get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/53588/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 53588,
    "url": "http://patches.dpdk.org/api/patches/53588/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190521161315.25500-6-olivier.matz@6wind.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190521161315.25500-6-olivier.matz@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190521161315.25500-6-olivier.matz@6wind.com",
    "date": "2019-05-21T16:13:05",
    "name": "[05/15] net: add rte prefix to ether defines",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "1d9b748e614f619643fccbdf83d0b8cc300cb3a7",
    "submitter": {
        "id": 8,
        "url": "http://patches.dpdk.org/api/people/8/?format=api",
        "name": "Olivier Matz",
        "email": "olivier.matz@6wind.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190521161315.25500-6-olivier.matz@6wind.com/mbox/",
    "series": [
        {
            "id": 4733,
            "url": "http://patches.dpdk.org/api/series/4733/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4733",
            "date": "2019-05-21T16:13:01",
            "name": "prefix network structures",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/4733/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/53588/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/53588/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id F0E595592;\n\tTue, 21 May 2019 18:37:57 +0200 (CEST)",
            "from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com\n\t[62.23.145.76]) by dpdk.org (Postfix) with ESMTP id 1DBDD4C80\n\tfor <dev@dpdk.org>; Tue, 21 May 2019 18:13:25 +0200 (CEST)",
            "from glumotte.dev.6wind.com. (unknown [10.16.0.195])\n\tby proxy.6wind.com (Postfix) with ESMTP id D29D72B83D8;\n\tTue, 21 May 2019 18:13:24 +0200 (CEST)"
        ],
        "From": "Olivier Matz <olivier.matz@6wind.com>",
        "To": "dev@dpdk.org",
        "Cc": "stephen@networkplumber.org,\n\tferruh.yigit@intel.com",
        "Date": "Tue, 21 May 2019 18:13:05 +0200",
        "Message-Id": "<20190521161315.25500-6-olivier.matz@6wind.com>",
        "X-Mailer": "git-send-email 2.11.0",
        "In-Reply-To": "<20190521161315.25500-1-olivier.matz@6wind.com>",
        "References": "<20190410083218.17531-1-olivier.matz@6wind.com>\n\t<20190521161315.25500-1-olivier.matz@6wind.com>",
        "X-Mailman-Approved-At": "Tue, 21 May 2019 18:37:53 +0200",
        "Subject": "[dpdk-dev] [PATCH 05/15] net: add rte prefix to ether defines",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add 'RTE_' prefix to defines:\n- rename ETHER_ADDR_LEN as RTE_ETHER_ADDR_LEN.\n- rename ETHER_TYPE_LEN as RTE_ETHER_TYPE_LEN.\n- rename ETHER_CRC_LEN as RTE_ETHER_CRC_LEN.\n- rename ETHER_HDR_LEN as RTE_ETHER_HDR_LEN.\n- rename ETHER_MIN_LEN as RTE_ETHER_MIN_LEN.\n- rename ETHER_MAX_LEN as RTE_ETHER_MAX_LEN.\n- rename ETHER_MTU as RTE_ETHER_MTU.\n- rename ETHER_MAX_VLAN_FRAME_LEN as RTE_ETHER_MAX_VLAN_FRAME_LEN.\n- rename ETHER_MAX_VLAN_ID as RTE_ETHER_MAX_VLAN_ID.\n- rename ETHER_MAX_JUMBO_FRAME_LEN as RTE_ETHER_MAX_JUMBO_FRAME_LEN.\n- rename ETHER_MIN_MTU as RTE_ETHER_MIN_MTU.\n- rename ETHER_LOCAL_ADMIN_ADDR as RTE_ETHER_LOCAL_ADMIN_ADDR.\n- rename ETHER_GROUP_ADDR as RTE_ETHER_GROUP_ADDR.\n- rename ETHER_TYPE_IPv4 as RTE_ETHER_TYPE_IPv4.\n- rename ETHER_TYPE_IPv6 as RTE_ETHER_TYPE_IPv6.\n- rename ETHER_TYPE_ARP as RTE_ETHER_TYPE_ARP.\n- rename ETHER_TYPE_VLAN as RTE_ETHER_TYPE_VLAN.\n- rename ETHER_TYPE_RARP as RTE_ETHER_TYPE_RARP.\n- rename ETHER_TYPE_QINQ as RTE_ETHER_TYPE_QINQ.\n- rename ETHER_TYPE_ETAG as RTE_ETHER_TYPE_ETAG.\n- rename ETHER_TYPE_1588 as RTE_ETHER_TYPE_1588.\n- rename ETHER_TYPE_SLOW as RTE_ETHER_TYPE_SLOW.\n- rename ETHER_TYPE_TEB as RTE_ETHER_TYPE_TEB.\n- rename ETHER_TYPE_LLDP as RTE_ETHER_TYPE_LLDP.\n- rename ETHER_TYPE_MPLS as RTE_ETHER_TYPE_MPLS.\n- rename ETHER_TYPE_MPLSM as RTE_ETHER_TYPE_MPLSM.\n- rename ETHER_VXLAN_HLEN as RTE_ETHER_VXLAN_HLEN.\n- rename ETHER_ADDR_FMT_SIZE as RTE_ETHER_ADDR_FMT_SIZE.\n- rename VXLAN_GPE_TYPE_IPV4 as RTE_VXLAN_GPE_TYPE_IPV4.\n- rename VXLAN_GPE_TYPE_IPV6 as RTE_VXLAN_GPE_TYPE_IPV6.\n- rename VXLAN_GPE_TYPE_ETH as RTE_VXLAN_GPE_TYPE_ETH.\n- rename VXLAN_GPE_TYPE_NSH as RTE_VXLAN_GPE_TYPE_NSH.\n- rename VXLAN_GPE_TYPE_MPLS as RTE_VXLAN_GPE_TYPE_MPLS.\n- rename VXLAN_GPE_TYPE_GBP as RTE_VXLAN_GPE_TYPE_GBP.\n- rename VXLAN_GPE_TYPE_VBNG as RTE_VXLAN_GPE_TYPE_VBNG.\n- rename ETHER_VXLAN_GPE_HLEN as RTE_ETHER_VXLAN_GPE_HLEN.\n\nDo not update the command line library to avoid adding a dependency to\nlibrte_net.\n\nSigned-off-by: Olivier Matz <olivier.matz@6wind.com>\n---\n app/test-eventdev/test_perf_common.c              |   2 +-\n app/test-eventdev/test_pipeline_common.c          |   2 +-\n app/test-pmd/cmdline.c                            |  34 +++---\n app/test-pmd/cmdline_flow.c                       |  80 +++++++-------\n app/test-pmd/config.c                             |   4 +-\n app/test-pmd/csumonly.c                           |  46 ++++----\n app/test-pmd/flowgen.c                            |   2 +-\n app/test-pmd/icmpecho.c                           |  12 +--\n app/test-pmd/ieee1588fwd.c                        |   4 +-\n app/test-pmd/parameters.c                         |   6 +-\n app/test-pmd/testpmd.c                            |  11 +-\n app/test-pmd/testpmd.h                            |  20 ++--\n app/test-pmd/txonly.c                             |   2 +-\n app/test-pmd/util.c                               |   4 +-\n app/test/packet_burst_generator.c                 |  14 +--\n app/test/test_cmdline_etheraddr.c                 |   2 +-\n app/test/test_flow_classify.c                     |   6 +-\n app/test/test_link_bonding.c                      | 111 ++++++++++----------\n app/test/test_link_bonding_mode4.c                |  30 +++---\n app/test/test_link_bonding_rssconf.c              |   4 +-\n app/test/test_pmd_perf.c                          |   8 +-\n app/test/test_sched.c                             |   2 +-\n app/test/virtual_pmd.c                            |   2 +-\n doc/guides/nics/kni.rst                           |   2 +-\n doc/guides/prog_guide/rte_flow.rst                |   4 +-\n doc/guides/sample_app_ug/flow_classify.rst        |   2 +-\n doc/guides/sample_app_ug/ipv4_multicast.rst       |   2 +-\n doc/guides/sample_app_ug/skeleton.rst             |   2 +-\n drivers/bus/dpaa/include/compat.h                 |   2 +\n drivers/net/af_xdp/rte_eth_af_xdp.c               |   4 +-\n drivers/net/ark/ark_ethdev.c                      |   5 +-\n drivers/net/atlantic/atl_ethdev.c                 |  13 ++-\n drivers/net/avp/avp_ethdev.c                      |   9 +-\n drivers/net/avp/rte_avp_common.h                  |   2 +-\n drivers/net/axgbe/axgbe_dev.c                     |   4 +-\n drivers/net/axgbe/axgbe_ethdev.c                  |   4 +-\n drivers/net/axgbe/axgbe_ethdev.h                  |   2 +-\n drivers/net/axgbe/axgbe_rxtx.c                    |   2 +-\n drivers/net/bnx2x/bnx2x.c                         |   4 +-\n drivers/net/bnx2x/ecore_sp.h                      |   2 +-\n drivers/net/bnxt/bnxt.h                           |   4 +-\n drivers/net/bnxt/bnxt_ethdev.c                    |  73 ++++++-------\n drivers/net/bnxt/bnxt_filter.c                    |   4 +-\n drivers/net/bnxt/bnxt_filter.h                    |   8 +-\n drivers/net/bnxt/bnxt_flow.c                      |  15 +--\n drivers/net/bnxt/bnxt_hwrm.c                      |  35 ++++---\n drivers/net/bnxt/bnxt_ring.c                      |   8 +-\n drivers/net/bnxt/bnxt_rxq.c                       |   2 +-\n drivers/net/bnxt/bnxt_rxr.c                       |   4 +-\n drivers/net/bnxt/bnxt_vnic.c                      |   2 +-\n drivers/net/bnxt/rte_pmd_bnxt.c                   |   8 +-\n drivers/net/bonding/rte_eth_bond_8023ad.c         |   2 +-\n drivers/net/bonding/rte_eth_bond_alb.c            |   8 +-\n drivers/net/bonding/rte_eth_bond_pmd.c            |  42 ++++----\n drivers/net/cxgbe/cxgbe.h                         |   5 +-\n drivers/net/cxgbe/cxgbe_compat.h                  |   1 +\n drivers/net/cxgbe/cxgbe_ethdev.c                  |  12 +--\n drivers/net/cxgbe/cxgbe_filter.h                  |   2 +-\n drivers/net/cxgbe/cxgbe_flow.c                    |   6 +-\n drivers/net/cxgbe/cxgbe_main.c                    |   4 +-\n drivers/net/cxgbe/cxgbevf_main.c                  |   2 +-\n drivers/net/cxgbe/l2t.c                           |   8 +-\n drivers/net/cxgbe/l2t.h                           |   2 +-\n drivers/net/cxgbe/mps_tcam.c                      |  14 +--\n drivers/net/cxgbe/mps_tcam.h                      |   4 +-\n drivers/net/cxgbe/sge.c                           |   9 +-\n drivers/net/dpaa/dpaa_ethdev.c                    |  14 +--\n drivers/net/dpaa/dpaa_rxtx.c                      |   8 +-\n drivers/net/dpaa2/dpaa2_ethdev.c                  |  10 +-\n drivers/net/e1000/e1000_ethdev.h                  |   5 +-\n drivers/net/e1000/em_ethdev.c                     |  22 ++--\n drivers/net/e1000/em_rxtx.c                       |  18 ++--\n drivers/net/e1000/igb_ethdev.c                    |  46 ++++----\n drivers/net/e1000/igb_flow.c                      |   4 +-\n drivers/net/e1000/igb_pf.c                        |  11 +-\n drivers/net/e1000/igb_rxtx.c                      |  14 +--\n drivers/net/ena/ena_ethdev.h                      |   2 +-\n drivers/net/enetc/base/enetc_hw.h                 |   4 +-\n drivers/net/enetc/enetc_ethdev.c                  |  19 ++--\n drivers/net/enic/enic.h                           |   2 +-\n drivers/net/enic/enic_ethdev.c                    |   8 +-\n drivers/net/enic/enic_flow.c                      |  14 +--\n drivers/net/enic/enic_res.c                       |   5 +-\n drivers/net/failsafe/failsafe_args.c              |   2 +-\n drivers/net/failsafe/failsafe_ether.c             |   5 +-\n drivers/net/fm10k/fm10k.h                         |   2 +-\n drivers/net/fm10k/fm10k_ethdev.c                  |   4 +-\n drivers/net/i40e/i40e_ethdev.c                    |  54 +++++-----\n drivers/net/i40e/i40e_ethdev.h                    |   2 +-\n drivers/net/i40e/i40e_ethdev_vf.c                 |  25 ++---\n drivers/net/i40e/i40e_fdir.c                      |  14 +--\n drivers/net/i40e/i40e_flow.c                      |  26 ++---\n drivers/net/i40e/i40e_pf.c                        |   2 +-\n drivers/net/i40e/i40e_rxtx.c                      |  22 ++--\n drivers/net/i40e/rte_pmd_i40e.c                   |   6 +-\n drivers/net/iavf/iavf.h                           |   2 +-\n drivers/net/iavf/iavf_ethdev.c                    |  22 ++--\n drivers/net/iavf/iavf_rxtx.c                      |   8 +-\n drivers/net/ice/ice_ethdev.c                      |  21 ++--\n drivers/net/ice/ice_ethdev.h                      |   2 +-\n drivers/net/ice/ice_rxtx.c                        |  22 ++--\n drivers/net/ipn3ke/ipn3ke_ethdev.h                |   2 +-\n drivers/net/ipn3ke/ipn3ke_flow.c                  |   4 +-\n drivers/net/ipn3ke/ipn3ke_representor.c           |  10 +-\n drivers/net/ixgbe/ixgbe_ethdev.c                  |  49 +++++----\n drivers/net/ixgbe/ixgbe_ethdev.h                  |   4 +-\n drivers/net/ixgbe/ixgbe_flow.c                    |  14 +--\n drivers/net/ixgbe/ixgbe_pf.c                      |  11 +-\n drivers/net/ixgbe/ixgbe_rxtx.c                    |   7 +-\n drivers/net/ixgbe/rte_pmd_ixgbe.c                 |   6 +-\n drivers/net/kni/rte_eth_kni.c                     |   2 +-\n drivers/net/liquidio/lio_ethdev.c                 |  20 ++--\n drivers/net/mlx4/mlx4.c                           |   2 +-\n drivers/net/mlx4/mlx4.h                           |   2 +-\n drivers/net/mlx4/mlx4_ethdev.c                    |   4 +-\n drivers/net/mlx4/mlx4_flow.c                      |  10 +-\n drivers/net/mlx4/mlx4_rxtx.c                      |   2 +-\n drivers/net/mlx5/mlx5.c                           |   2 +-\n drivers/net/mlx5/mlx5.h                           |   2 +-\n drivers/net/mlx5/mlx5_flow_dv.c                   |  12 +--\n drivers/net/mlx5/mlx5_flow_tcf.c                  |  20 ++--\n drivers/net/mlx5/mlx5_flow_verbs.c                |  14 ++-\n drivers/net/mlx5/mlx5_mac.c                       |   4 +-\n drivers/net/mlx5/mlx5_nl.c                        |   8 +-\n drivers/net/mlx5/mlx5_rxtx.c                      |   6 +-\n drivers/net/mlx5/mlx5_rxtx_vec_neon.h             |   8 +-\n drivers/net/mlx5/mlx5_rxtx_vec_sse.h              |  10 +-\n drivers/net/mlx5/mlx5_trigger.c                   |   2 +-\n drivers/net/mvneta/mvneta_ethdev.c                |  12 +--\n drivers/net/mvneta/mvneta_ethdev.h                |   6 +-\n drivers/net/mvpp2/mrvl_ethdev.c                   |  12 +--\n drivers/net/mvpp2/mrvl_ethdev.h                   |   6 +-\n drivers/net/netvsc/hn_ethdev.c                    |   2 +-\n drivers/net/netvsc/hn_nvs.c                       |   2 +-\n drivers/net/netvsc/hn_rndis.c                     |   2 +-\n drivers/net/netvsc/hn_rxtx.c                      |   4 +-\n drivers/net/nfb/nfb_ethdev.c                      |   2 +-\n drivers/net/nfp/nfp_net.c                         |  11 +-\n drivers/net/nfp/nfp_net_pmd.h                     |   2 +-\n drivers/net/octeontx/octeontx_ethdev.c            |   7 +-\n drivers/net/octeontx/octeontx_ethdev.h            |   2 +-\n drivers/net/pcap/rte_eth_pcap.c                   |  18 ++--\n drivers/net/qede/base/bcm_osal.h                  |   4 +-\n drivers/net/qede/qede_ethdev.c                    |  18 ++--\n drivers/net/qede/qede_filter.c                    |  24 +++--\n drivers/net/qede/qede_if.h                        |   2 +-\n drivers/net/qede/qede_main.c                      |   6 +-\n drivers/net/qede/qede_rxtx.c                      |   6 +-\n drivers/net/qede/qede_rxtx.h                      |   2 +-\n drivers/net/sfc/sfc_ethdev.c                      |   6 +-\n drivers/net/sfc/sfc_flow.c                        |   2 +-\n drivers/net/softnic/parser.c                      |   6 +-\n drivers/net/tap/rte_eth_tap.c                     |  24 +++--\n drivers/net/tap/tap_flow.c                        |  12 ++-\n drivers/net/thunderx/base/nicvf_plat.h            |   4 +-\n drivers/net/thunderx/nicvf_ethdev.c               |  13 +--\n drivers/net/thunderx/nicvf_struct.h               |   2 +-\n drivers/net/virtio/virtio_ethdev.c                |  52 ++++++----\n drivers/net/virtio/virtio_pci.h                   |   4 +-\n drivers/net/virtio/virtio_rxtx.c                  |  10 +-\n drivers/net/virtio/virtio_user/vhost_kernel_tap.c |   2 +-\n drivers/net/virtio/virtio_user/virtio_user_dev.c  |   6 +-\n drivers/net/virtio/virtio_user/virtio_user_dev.h  |   2 +-\n drivers/net/virtio/virtio_user_ethdev.c           |   8 +-\n drivers/net/virtio/virtqueue.h                    |   2 +-\n drivers/net/vmxnet3/vmxnet3_ethdev.c              |   4 +-\n drivers/net/vmxnet3/vmxnet3_ethdev.h              |   2 +-\n examples/bbdev_app/main.c                         |   2 +-\n examples/bond/main.c                              |  22 ++--\n examples/distributor/main.c                       |   2 +-\n examples/ethtool/ethtool-app/ethapp.c             |   4 +-\n examples/eventdev_pipeline/main.c                 |   2 +-\n examples/flow_classify/flow_classify.c            |   2 +-\n examples/flow_filtering/main.c                    |   4 +-\n examples/ip_fragmentation/main.c                  |  21 ++--\n examples/ip_pipeline/kni.c                        |   2 +-\n examples/ip_pipeline/parser.c                     |   6 +-\n examples/ip_reassembly/main.c                     |   8 +-\n examples/ipsec-secgw/ipsec-secgw.c                |  24 ++---\n examples/ipv4_multicast/main.c                    |   6 +-\n examples/kni/main.c                               |   6 +-\n examples/l2fwd-cat/l2fwd-cat.c                    |   2 +-\n examples/l2fwd-crypto/main.c                      |   4 +-\n examples/l3fwd-acl/main.c                         |   8 +-\n examples/l3fwd-power/main.c                       |  12 +--\n examples/l3fwd-vf/main.c                          |   6 +-\n examples/l3fwd/l3fwd_em.c                         |   4 +-\n examples/l3fwd/l3fwd_lpm.c                        |   4 +-\n examples/l3fwd/main.c                             |  10 +-\n examples/performance-thread/l3fwd-thread/main.c   |  16 +--\n examples/ptpclient/ptpclient.c                    |   2 +-\n examples/qos_meter/main.c                         |   2 +-\n examples/qos_sched/init.c                         |   2 +-\n examples/rxtx_callbacks/main.c                    |   2 +-\n examples/skeleton/basicfwd.c                      |   2 +-\n examples/tep_termination/vxlan.c                  |  20 ++--\n examples/tep_termination/vxlan_setup.c            |   6 +-\n examples/vhost/main.c                             |   6 +-\n examples/vm_power_manager/channel_monitor.c       |   6 +-\n examples/vm_power_manager/main.c                  |   2 +-\n lib/librte_ethdev/rte_ethdev.c                    |  19 ++--\n lib/librte_ethdev/rte_ethdev.h                    |   2 +-\n lib/librte_ethdev/rte_flow.h                      |   8 +-\n lib/librte_eventdev/rte_event_eth_rx_adapter.c    |  10 +-\n lib/librte_kni/rte_kni.c                          |   2 +-\n lib/librte_kni/rte_kni.h                          |   2 +-\n lib/librte_net/rte_arp.c                          |   8 +-\n lib/librte_net/rte_ether.h                        | 121 +++++++++++-----------\n lib/librte_net/rte_net.c                          |  30 +++---\n lib/librte_pipeline/rte_table_action.c            |  34 +++---\n lib/librte_port/rte_port_source_sink.c            |   6 +-\n lib/librte_vhost/virtio_net.c                     |   6 +-\n 212 files changed, 1150 insertions(+), 1077 deletions(-)",
    "diff": "diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c\nindex 8dbfd8181..01f782820 100644\n--- a/app/test-eventdev/test_perf_common.c\n+++ b/app/test-eventdev/test_perf_common.c\n@@ -658,7 +658,7 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)\n \tstruct rte_eth_conf port_conf = {\n \t\t.rxmode = {\n \t\t\t.mq_mode = ETH_MQ_RX_RSS,\n-\t\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t\t.split_hdr_size = 0,\n \t\t},\n \t\t.rx_adv_conf = {\ndiff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c\nindex 1e525643d..16c49b860 100644\n--- a/app/test-eventdev/test_pipeline_common.c\n+++ b/app/test-eventdev/test_pipeline_common.c\n@@ -165,7 +165,7 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)\n \tstruct rte_eth_conf port_conf = {\n \t\t.rxmode = {\n \t\t\t.mq_mode = ETH_MQ_RX_RSS,\n-\t\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t},\n \t\t.rx_adv_conf = {\n \t\t\t.rss_conf = {\ndiff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c\nindex 1fb609940..d1e0d4402 100644\n--- a/app/test-pmd/cmdline.c\n+++ b/app/test-pmd/cmdline.c\n@@ -1925,16 +1925,16 @@ cmd_config_max_pkt_len_parsed(void *parsed_result,\n \t\tuint64_t rx_offloads = port->dev_conf.rxmode.offloads;\n \n \t\tif (!strcmp(res->name, \"max-pkt-len\")) {\n-\t\t\tif (res->value < ETHER_MIN_LEN) {\n+\t\t\tif (res->value < RTE_ETHER_MIN_LEN) {\n \t\t\t\tprintf(\"max-pkt-len can not be less than %d\\n\",\n-\t\t\t\t\t\tETHER_MIN_LEN);\n+\t\t\t\t\t\tRTE_ETHER_MIN_LEN);\n \t\t\t\treturn;\n \t\t\t}\n \t\t\tif (res->value == port->dev_conf.rxmode.max_rx_pkt_len)\n \t\t\t\treturn;\n \n \t\t\tport->dev_conf.rxmode.max_rx_pkt_len = res->value;\n-\t\t\tif (res->value > ETHER_MAX_LEN)\n+\t\t\tif (res->value > RTE_ETHER_MAX_LEN)\n \t\t\t\trx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;\n \t\t\telse\n \t\t\t\trx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;\n@@ -1996,8 +1996,8 @@ cmd_config_mtu_parsed(void *parsed_result,\n {\n \tstruct cmd_config_mtu_result *res = parsed_result;\n \n-\tif (res->value < ETHER_MIN_LEN) {\n-\t\tprintf(\"mtu cannot be less than %d\\n\", ETHER_MIN_LEN);\n+\tif (res->value < RTE_ETHER_MIN_LEN) {\n+\t\tprintf(\"mtu cannot be less than %d\\n\", RTE_ETHER_MIN_LEN);\n \t\treturn;\n \t}\n \tport_mtu_set(res->port_id, res->value);\n@@ -8236,7 +8236,7 @@ cmd_set_vf_macvlan_parsed(void *parsed_result,\n \n \tmemset(&filter, 0, sizeof(struct rte_eth_mac_filter));\n \n-\trte_memcpy(&filter.mac_addr, &res->address, ETHER_ADDR_LEN);\n+\trte_memcpy(&filter.mac_addr, &res->address, RTE_ETHER_ADDR_LEN);\n \n \t/* set VF MAC filter */\n \tfilter.is_vf = 1;\n@@ -9210,7 +9210,7 @@ cmd_set_mirror_mask_parsed(void *parsed_result,\n \t\t\treturn;\n \n \t\tfor (i = 0; i < nb_item; i++) {\n-\t\t\tif (vlan_list[i] > ETHER_MAX_VLAN_ID) {\n+\t\t\tif (vlan_list[i] > RTE_ETHER_MAX_VLAN_ID) {\n \t\t\t\tprintf(\"Invalid vlan_id: must be < 4096\\n\");\n \t\t\t\treturn;\n \t\t\t}\n@@ -15301,9 +15301,9 @@ static void cmd_set_vxlan_parsed(void *parsed_result,\n \tif (vxlan_encap_conf.select_vlan)\n \t\tvxlan_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);\n \trte_memcpy(vxlan_encap_conf.eth_src, res->eth_src.addr_bytes,\n-\t\t   ETHER_ADDR_LEN);\n+\t\t   RTE_ETHER_ADDR_LEN);\n \trte_memcpy(vxlan_encap_conf.eth_dst, res->eth_dst.addr_bytes,\n-\t\t   ETHER_ADDR_LEN);\n+\t\t   RTE_ETHER_ADDR_LEN);\n }\n \n cmdline_parse_inst_t cmd_set_vxlan = {\n@@ -15492,9 +15492,9 @@ static void cmd_set_nvgre_parsed(void *parsed_result,\n \tif (nvgre_encap_conf.select_vlan)\n \t\tnvgre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);\n \trte_memcpy(nvgre_encap_conf.eth_src, res->eth_src.addr_bytes,\n-\t\t   ETHER_ADDR_LEN);\n+\t\t   RTE_ETHER_ADDR_LEN);\n \trte_memcpy(nvgre_encap_conf.eth_dst, res->eth_dst.addr_bytes,\n-\t\t   ETHER_ADDR_LEN);\n+\t\t   RTE_ETHER_ADDR_LEN);\n }\n \n cmdline_parse_inst_t cmd_set_nvgre = {\n@@ -15609,9 +15609,9 @@ static void cmd_set_l2_encap_parsed(void *parsed_result,\n \tif (l2_encap_conf.select_vlan)\n \t\tl2_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);\n \trte_memcpy(l2_encap_conf.eth_src, res->eth_src.addr_bytes,\n-\t\t   ETHER_ADDR_LEN);\n+\t\t   RTE_ETHER_ADDR_LEN);\n \trte_memcpy(l2_encap_conf.eth_dst, res->eth_dst.addr_bytes,\n-\t\t   ETHER_ADDR_LEN);\n+\t\t   RTE_ETHER_ADDR_LEN);\n }\n \n cmdline_parse_inst_t cmd_set_l2_encap = {\n@@ -15801,9 +15801,9 @@ static void cmd_set_mplsogre_encap_parsed(void *parsed_result,\n \tif (mplsogre_encap_conf.select_vlan)\n \t\tmplsogre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);\n \trte_memcpy(mplsogre_encap_conf.eth_src, res->eth_src.addr_bytes,\n-\t\t   ETHER_ADDR_LEN);\n+\t\t   RTE_ETHER_ADDR_LEN);\n \trte_memcpy(mplsogre_encap_conf.eth_dst, res->eth_dst.addr_bytes,\n-\t\t   ETHER_ADDR_LEN);\n+\t\t   RTE_ETHER_ADDR_LEN);\n }\n \n cmdline_parse_inst_t cmd_set_mplsogre_encap = {\n@@ -16039,9 +16039,9 @@ static void cmd_set_mplsoudp_encap_parsed(void *parsed_result,\n \tif (mplsoudp_encap_conf.select_vlan)\n \t\tmplsoudp_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);\n \trte_memcpy(mplsoudp_encap_conf.eth_src, res->eth_src.addr_bytes,\n-\t\t   ETHER_ADDR_LEN);\n+\t\t   RTE_ETHER_ADDR_LEN);\n \trte_memcpy(mplsoudp_encap_conf.eth_dst, res->eth_dst.addr_bytes,\n-\t\t   ETHER_ADDR_LEN);\n+\t\t   RTE_ETHER_ADDR_LEN);\n }\n \n cmdline_parse_inst_t cmd_set_mplsoudp_encap = {\ndiff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c\nindex 2e274f9b1..7a1c5aba9 100644\n--- a/app/test-pmd/cmdline_flow.c\n+++ b/app/test-pmd/cmdline_flow.c\n@@ -3493,9 +3493,9 @@ parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,\n \t\t.item_vxlan.flags = 0,\n \t};\n \tmemcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,\n-\t       vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN);\n+\t       vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);\n \tmemcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,\n-\t       vxlan_encap_conf.eth_src, ETHER_ADDR_LEN);\n+\t       vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);\n \tif (!vxlan_encap_conf.select_ipv4) {\n \t\tmemcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,\n \t\t       &vxlan_encap_conf.ipv6_src,\n@@ -3616,9 +3616,9 @@ parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,\n \t\t.item_nvgre.flow_id = 0,\n \t};\n \tmemcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,\n-\t       nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);\n+\t       nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);\n \tmemcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,\n-\t       nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);\n+\t       nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);\n \tif (!nvgre_encap_conf.select_ipv4) {\n \t\tmemcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,\n \t\t       &nvgre_encap_conf.ipv6_src,\n@@ -3680,22 +3680,22 @@ parse_vc_action_l2_encap(struct context *ctx, const struct token *token,\n \t};\n \theader = action_encap_data->data;\n \tif (l2_encap_conf.select_vlan)\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);\n \telse if (l2_encap_conf.select_ipv4)\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \telse\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);\n \tmemcpy(eth.dst.addr_bytes,\n-\t       l2_encap_conf.eth_dst, ETHER_ADDR_LEN);\n+\t       l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);\n \tmemcpy(eth.src.addr_bytes,\n-\t       l2_encap_conf.eth_src, ETHER_ADDR_LEN);\n+\t       l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);\n \tmemcpy(header, &eth, sizeof(eth));\n \theader += sizeof(eth);\n \tif (l2_encap_conf.select_vlan) {\n \t\tif (l2_encap_conf.select_ipv4)\n-\t\t\tvlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\t\tvlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \t\telse\n-\t\t\tvlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);\n+\t\t\tvlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);\n \t\tmemcpy(header, &vlan, sizeof(vlan));\n \t\theader += sizeof(vlan);\n \t}\n@@ -3744,7 +3744,7 @@ parse_vc_action_l2_decap(struct context *ctx, const struct token *token,\n \t};\n \theader = action_decap_data->data;\n \tif (l2_decap_conf.select_vlan)\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);\n \tmemcpy(header, &eth, sizeof(eth));\n \theader += sizeof(eth);\n \tif (l2_decap_conf.select_vlan) {\n@@ -3815,22 +3815,22 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,\n \t};\n \theader = action_encap_data->data;\n \tif (mplsogre_encap_conf.select_vlan)\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);\n \telse if (mplsogre_encap_conf.select_ipv4)\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \telse\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);\n \tmemcpy(eth.dst.addr_bytes,\n-\t       mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);\n+\t       mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);\n \tmemcpy(eth.src.addr_bytes,\n-\t       mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);\n+\t       mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);\n \tmemcpy(header, &eth, sizeof(eth));\n \theader += sizeof(eth);\n \tif (mplsogre_encap_conf.select_vlan) {\n \t\tif (mplsogre_encap_conf.select_ipv4)\n-\t\t\tvlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\t\tvlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \t\telse\n-\t\t\tvlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);\n+\t\t\tvlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);\n \t\tmemcpy(header, &vlan, sizeof(vlan));\n \t\theader += sizeof(vlan);\n \t}\n@@ -3910,22 +3910,22 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,\n \t};\n \theader = action_decap_data->data;\n \tif (mplsogre_decap_conf.select_vlan)\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);\n \telse if (mplsogre_encap_conf.select_ipv4)\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \telse\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);\n \tmemcpy(eth.dst.addr_bytes,\n-\t       mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);\n+\t       mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);\n \tmemcpy(eth.src.addr_bytes,\n-\t       mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);\n+\t       mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);\n \tmemcpy(header, &eth, sizeof(eth));\n \theader += sizeof(eth);\n \tif (mplsogre_encap_conf.select_vlan) {\n \t\tif (mplsogre_encap_conf.select_ipv4)\n-\t\t\tvlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\t\tvlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \t\telse\n-\t\t\tvlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);\n+\t\t\tvlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);\n \t\tmemcpy(header, &vlan, sizeof(vlan));\n \t\theader += sizeof(vlan);\n \t}\n@@ -4006,22 +4006,22 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,\n \t};\n \theader = action_encap_data->data;\n \tif (mplsoudp_encap_conf.select_vlan)\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);\n \telse if (mplsoudp_encap_conf.select_ipv4)\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \telse\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);\n \tmemcpy(eth.dst.addr_bytes,\n-\t       mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);\n+\t       mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);\n \tmemcpy(eth.src.addr_bytes,\n-\t       mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);\n+\t       mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);\n \tmemcpy(header, &eth, sizeof(eth));\n \theader += sizeof(eth);\n \tif (mplsoudp_encap_conf.select_vlan) {\n \t\tif (mplsoudp_encap_conf.select_ipv4)\n-\t\t\tvlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\t\tvlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \t\telse\n-\t\t\tvlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);\n+\t\t\tvlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);\n \t\tmemcpy(header, &vlan, sizeof(vlan));\n \t\theader += sizeof(vlan);\n \t}\n@@ -4103,22 +4103,22 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,\n \t};\n \theader = action_decap_data->data;\n \tif (mplsoudp_decap_conf.select_vlan)\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);\n \telse if (mplsoudp_encap_conf.select_ipv4)\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \telse\n-\t\teth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);\n+\t\teth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);\n \tmemcpy(eth.dst.addr_bytes,\n-\t       mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);\n+\t       mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);\n \tmemcpy(eth.src.addr_bytes,\n-\t       mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);\n+\t       mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);\n \tmemcpy(header, &eth, sizeof(eth));\n \theader += sizeof(eth);\n \tif (mplsoudp_encap_conf.select_vlan) {\n \t\tif (mplsoudp_encap_conf.select_ipv4)\n-\t\t\tvlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\t\tvlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \t\telse\n-\t\t\tvlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);\n+\t\t\tvlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);\n \t\tmemcpy(header, &vlan, sizeof(vlan));\n \t\theader += sizeof(vlan);\n \t}\ndiff --git a/app/test-pmd/config.c b/app/test-pmd/config.c\nindex 7a67690f2..ab458c8d2 100644\n--- a/app/test-pmd/config.c\n+++ b/app/test-pmd/config.c\n@@ -110,8 +110,8 @@ const struct rss_type_info rss_type_table[] = {\n static void\n print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);\n \tprintf(\"%s%s\", name, buf);\n }\n \ndiff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c\nindex 8de37c87e..d1344b923 100644\n--- a/app/test-pmd/csumonly.c\n+++ b/app/test-pmd/csumonly.c\n@@ -92,9 +92,9 @@ struct simple_gre_hdr {\n static uint16_t\n get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype)\n {\n-\tif (ethertype == _htons(ETHER_TYPE_IPv4))\n+\tif (ethertype == _htons(RTE_ETHER_TYPE_IPv4))\n \t\treturn rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);\n-\telse /* assume ethertype == ETHER_TYPE_IPv6 */\n+\telse /* assume ethertype == RTE_ETHER_TYPE_IPv6 */\n \t\treturn rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);\n }\n \n@@ -150,7 +150,7 @@ parse_ethernet(struct rte_ether_hdr *eth_hdr, struct testpmd_offload_info *info)\n \tinfo->l2_len = sizeof(struct rte_ether_hdr);\n \tinfo->ethertype = eth_hdr->ether_type;\n \n-\tif (info->ethertype == _htons(ETHER_TYPE_VLAN)) {\n+\tif (info->ethertype == _htons(RTE_ETHER_TYPE_VLAN)) {\n \t\tstruct rte_vlan_hdr *vlan_hdr = (\n \t\t\tstruct rte_vlan_hdr *)(eth_hdr + 1);\n \n@@ -159,11 +159,11 @@ parse_ethernet(struct rte_ether_hdr *eth_hdr, struct testpmd_offload_info *info)\n \t}\n \n \tswitch (info->ethertype) {\n-\tcase _htons(ETHER_TYPE_IPv4):\n+\tcase _htons(RTE_ETHER_TYPE_IPv4):\n \t\tipv4_hdr = (struct ipv4_hdr *) ((char *)eth_hdr + info->l2_len);\n \t\tparse_ipv4(ipv4_hdr, info);\n \t\tbreak;\n-\tcase _htons(ETHER_TYPE_IPv6):\n+\tcase _htons(RTE_ETHER_TYPE_IPv6):\n \t\tipv6_hdr = (struct ipv6_hdr *) ((char *)eth_hdr + info->l2_len);\n \t\tparse_ipv6(ipv6_hdr, info);\n \t\tbreak;\n@@ -201,7 +201,7 @@ parse_vxlan(struct udp_hdr *udp_hdr,\n \t\tsizeof(struct rte_vxlan_hdr));\n \n \tparse_ethernet(eth_hdr, info);\n-\tinfo->l2_len += ETHER_VXLAN_HLEN; /* add udp + vxlan */\n+\tinfo->l2_len += RTE_ETHER_VXLAN_HLEN; /* add udp + vxlan */\n }\n \n /* Parse a vxlan-gpe header */\n@@ -223,7 +223,7 @@ parse_vxlan_gpe(struct udp_hdr *udp_hdr,\n \t\t\t\tsizeof(struct udp_hdr));\n \n \tif (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==\n-\t    VXLAN_GPE_TYPE_IPV4) {\n+\t    RTE_VXLAN_GPE_TYPE_IPV4) {\n \t\tinfo->is_tunnel = 1;\n \t\tinfo->outer_ethertype = info->ethertype;\n \t\tinfo->outer_l2_len = info->l2_len;\n@@ -234,10 +234,10 @@ parse_vxlan_gpe(struct udp_hdr *udp_hdr,\n \t\t\t   vxlan_gpe_len);\n \n \t\tparse_ipv4(ipv4_hdr, info);\n-\t\tinfo->ethertype = _htons(ETHER_TYPE_IPv4);\n+\t\tinfo->ethertype = _htons(RTE_ETHER_TYPE_IPv4);\n \t\tinfo->l2_len = 0;\n \n-\t} else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_IPV6) {\n+\t} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_IPV6) {\n \t\tinfo->is_tunnel = 1;\n \t\tinfo->outer_ethertype = info->ethertype;\n \t\tinfo->outer_l2_len = info->l2_len;\n@@ -247,11 +247,11 @@ parse_vxlan_gpe(struct udp_hdr *udp_hdr,\n \t\tipv6_hdr = (struct ipv6_hdr *)((char *)vxlan_gpe_hdr +\n \t\t\t   vxlan_gpe_len);\n \n-\t\tinfo->ethertype = _htons(ETHER_TYPE_IPv6);\n+\t\tinfo->ethertype = _htons(RTE_ETHER_TYPE_IPv6);\n \t\tparse_ipv6(ipv6_hdr, info);\n \t\tinfo->l2_len = 0;\n \n-\t} else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_ETH) {\n+\t} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_ETH) {\n \t\tinfo->is_tunnel = 1;\n \t\tinfo->outer_ethertype = info->ethertype;\n \t\tinfo->outer_l2_len = info->l2_len;\n@@ -265,7 +265,7 @@ parse_vxlan_gpe(struct udp_hdr *udp_hdr,\n \t} else\n \t\treturn;\n \n-\tinfo->l2_len += ETHER_VXLAN_GPE_HLEN;\n+\tinfo->l2_len += RTE_ETHER_VXLAN_GPE_HLEN;\n }\n \n /* Parse a gre header */\n@@ -286,7 +286,7 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)\n \tif (gre_hdr->flags & _htons(GRE_CHECKSUM_PRESENT))\n \t\tgre_len += GRE_EXT_LEN;\n \n-\tif (gre_hdr->proto == _htons(ETHER_TYPE_IPv4)) {\n+\tif (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPv4)) {\n \t\tinfo->is_tunnel = 1;\n \t\tinfo->outer_ethertype = info->ethertype;\n \t\tinfo->outer_l2_len = info->l2_len;\n@@ -296,10 +296,10 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)\n \t\tipv4_hdr = (struct ipv4_hdr *)((char *)gre_hdr + gre_len);\n \n \t\tparse_ipv4(ipv4_hdr, info);\n-\t\tinfo->ethertype = _htons(ETHER_TYPE_IPv4);\n+\t\tinfo->ethertype = _htons(RTE_ETHER_TYPE_IPv4);\n \t\tinfo->l2_len = 0;\n \n-\t} else if (gre_hdr->proto == _htons(ETHER_TYPE_IPv6)) {\n+\t} else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPv6)) {\n \t\tinfo->is_tunnel = 1;\n \t\tinfo->outer_ethertype = info->ethertype;\n \t\tinfo->outer_l2_len = info->l2_len;\n@@ -308,11 +308,11 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)\n \n \t\tipv6_hdr = (struct ipv6_hdr *)((char *)gre_hdr + gre_len);\n \n-\t\tinfo->ethertype = _htons(ETHER_TYPE_IPv6);\n+\t\tinfo->ethertype = _htons(RTE_ETHER_TYPE_IPv6);\n \t\tparse_ipv6(ipv6_hdr, info);\n \t\tinfo->l2_len = 0;\n \n-\t} else if (gre_hdr->proto == _htons(ETHER_TYPE_TEB)) {\n+\t} else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_TEB)) {\n \t\tinfo->is_tunnel = 1;\n \t\tinfo->outer_ethertype = info->ethertype;\n \t\tinfo->outer_l2_len = info->l2_len;\n@@ -349,10 +349,10 @@ parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)\n \n \tif (ip_version == 4) {\n \t\tparse_ipv4(ipv4_hdr, info);\n-\t\tinfo->ethertype = _htons(ETHER_TYPE_IPv4);\n+\t\tinfo->ethertype = _htons(RTE_ETHER_TYPE_IPv4);\n \t} else {\n \t\tparse_ipv6(ipv6_hdr, info);\n-\t\tinfo->ethertype = _htons(ETHER_TYPE_IPv6);\n+\t\tinfo->ethertype = _htons(RTE_ETHER_TYPE_IPv6);\n \t}\n \tinfo->l2_len = 0;\n }\n@@ -384,7 +384,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,\n \t\t\ttso_segsz = info->tunnel_tso_segsz;\n \t}\n \n-\tif (info->ethertype == _htons(ETHER_TYPE_IPv4)) {\n+\tif (info->ethertype == _htons(RTE_ETHER_TYPE_IPv4)) {\n \t\tipv4_hdr = l3_hdr;\n \t\tipv4_hdr->hdr_checksum = 0;\n \n@@ -398,7 +398,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,\n \t\t\t\tipv4_hdr->hdr_checksum =\n \t\t\t\t\trte_ipv4_cksum(ipv4_hdr);\n \t\t}\n-\t} else if (info->ethertype == _htons(ETHER_TYPE_IPv6))\n+\t} else if (info->ethertype == _htons(RTE_ETHER_TYPE_IPv6))\n \t\tol_flags |= PKT_TX_IPV6;\n \telse\n \t\treturn 0; /* packet type not supported, nothing to do */\n@@ -459,7 +459,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,\n \tstruct udp_hdr *udp_hdr;\n \tuint64_t ol_flags = 0;\n \n-\tif (info->outer_ethertype == _htons(ETHER_TYPE_IPv4)) {\n+\tif (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPv4)) {\n \t\tipv4_hdr->hdr_checksum = 0;\n \t\tol_flags |= PKT_TX_OUTER_IPV4;\n \n@@ -495,7 +495,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,\n \t/* do not recalculate udp cksum if it was 0 */\n \tif (udp_hdr->dgram_cksum != 0) {\n \t\tudp_hdr->dgram_cksum = 0;\n-\t\tif (info->outer_ethertype == _htons(ETHER_TYPE_IPv4))\n+\t\tif (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPv4))\n \t\t\tudp_hdr->dgram_cksum =\n \t\t\t\trte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr);\n \t\telse\ndiff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c\nindex 3f29e9408..ccc5dd85c 100644\n--- a/app/test-pmd/flowgen.c\n+++ b/app/test-pmd/flowgen.c\n@@ -173,7 +173,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)\n \t\teth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);\n \t\trte_ether_addr_copy(&cfg_ether_dst, &eth_hdr->d_addr);\n \t\trte_ether_addr_copy(&cfg_ether_src, &eth_hdr->s_addr);\n-\t\teth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\teth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \n \t\t/* Initialize IP header. */\n \t\tip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);\ndiff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c\nindex f41179208..c6c7cb6bd 100644\n--- a/app/test-pmd/icmpecho.c\n+++ b/app/test-pmd/icmpecho.c\n@@ -223,9 +223,9 @@ ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf)\n static void\n ether_addr_dump(const char *what, const struct rte_ether_addr *ea)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n \n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, ea);\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, ea);\n \tif (what)\n \t\tprintf(\"%s\", what);\n \tprintf(\"%s\", buf);\n@@ -330,7 +330,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)\n \t\t\tether_addr_dump(\"  ETH:  src=\", &eth_h->s_addr);\n \t\t\tether_addr_dump(\" dst=\", &eth_h->d_addr);\n \t\t}\n-\t\tif (eth_type == ETHER_TYPE_VLAN) {\n+\t\tif (eth_type == RTE_ETHER_TYPE_VLAN) {\n \t\t\tvlan_h = (struct rte_vlan_hdr *)\n \t\t\t\t((char *)eth_h + sizeof(struct rte_ether_hdr));\n \t\t\tl2_len  += sizeof(struct rte_vlan_hdr);\n@@ -346,7 +346,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)\n \t\t}\n \n \t\t/* Reply to ARP requests */\n-\t\tif (eth_type == ETHER_TYPE_ARP) {\n+\t\tif (eth_type == RTE_ETHER_TYPE_ARP) {\n \t\t\tarp_h = (struct rte_arp_hdr *) ((char *)eth_h + l2_len);\n \t\t\tarp_op = RTE_BE_TO_CPU_16(arp_h->arp_opcode);\n \t\t\tarp_pro = RTE_BE_TO_CPU_16(arp_h->arp_protocol);\n@@ -360,7 +360,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)\n \t\t\t}\n \t\t\tif ((RTE_BE_TO_CPU_16(arp_h->arp_hardware) !=\n \t\t\t     RTE_ARP_HRD_ETHER) ||\n-\t\t\t    (arp_pro != ETHER_TYPE_IPv4) ||\n+\t\t\t    (arp_pro != RTE_ETHER_TYPE_IPv4) ||\n \t\t\t    (arp_h->arp_hlen != 6) ||\n \t\t\t    (arp_h->arp_plen != 4)\n \t\t\t    ) {\n@@ -414,7 +414,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)\n \t\t\tcontinue;\n \t\t}\n \n-\t\tif (eth_type != ETHER_TYPE_IPv4) {\n+\t\tif (eth_type != RTE_ETHER_TYPE_IPv4) {\n \t\t\trte_pktmbuf_free(pkt);\n \t\t\tcontinue;\n \t\t}\ndiff --git a/app/test-pmd/ieee1588fwd.c b/app/test-pmd/ieee1588fwd.c\nindex 2b7003be4..e3b98e3e0 100644\n--- a/app/test-pmd/ieee1588fwd.c\n+++ b/app/test-pmd/ieee1588fwd.c\n@@ -115,7 +115,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)\n \teth_type = rte_be_to_cpu_16(eth_hdr->ether_type);\n \n \tif (! (mb->ol_flags & PKT_RX_IEEE1588_PTP)) {\n-\t\tif (eth_type == ETHER_TYPE_1588) {\n+\t\tif (eth_type == RTE_ETHER_TYPE_1588) {\n \t\t\tprintf(\"Port %u Received PTP packet not filtered\"\n \t\t\t       \" by hardware\\n\",\n \t\t\t       fs->rx_port);\n@@ -128,7 +128,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)\n \t\trte_pktmbuf_free(mb);\n \t\treturn;\n \t}\n-\tif (eth_type != ETHER_TYPE_1588) {\n+\tif (eth_type != RTE_ETHER_TYPE_1588) {\n \t\tprintf(\"Port %u Received NON PTP packet incorrectly\"\n \t\t       \" detected by hardware\\n\",\n \t\t       fs->rx_port);\ndiff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c\nindex 41eb3ffa8..245b61064 100644\n--- a/app/test-pmd/parameters.c\n+++ b/app/test-pmd/parameters.c\n@@ -879,15 +879,15 @@ launch_args_parse(int argc, char** argv)\n \t\t\t}\n \t\t\tif (!strcmp(lgopts[opt_idx].name, \"max-pkt-len\")) {\n \t\t\t\tn = atoi(optarg);\n-\t\t\t\tif (n >= ETHER_MIN_LEN) {\n+\t\t\t\tif (n >= RTE_ETHER_MIN_LEN) {\n \t\t\t\t\trx_mode.max_rx_pkt_len = (uint32_t) n;\n-\t\t\t\t\tif (n > ETHER_MAX_LEN)\n+\t\t\t\t\tif (n > RTE_ETHER_MAX_LEN)\n \t\t\t\t\t\trx_offloads |=\n \t\t\t\t\t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \t\t\t\t} else\n \t\t\t\t\trte_exit(EXIT_FAILURE,\n \t\t\t\t\t\t \"Invalid max-pkt-len=%d - should be > %d\\n\",\n-\t\t\t\t\t\t n, ETHER_MIN_LEN);\n+\t\t\t\t\t\t n, RTE_ETHER_MIN_LEN);\n \t\t\t}\n \t\t\tif (!strcmp(lgopts[opt_idx].name, \"pkt-filter-mode\")) {\n \t\t\t\tif (!strcmp(optarg, \"signature\"))\ndiff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c\nindex 2c736ecac..55e1b9d27 100644\n--- a/app/test-pmd/testpmd.c\n+++ b/app/test-pmd/testpmd.c\n@@ -413,7 +413,8 @@ lcoreid_t latencystats_lcore_id = -1;\n  * Ethernet device configuration.\n  */\n struct rte_eth_rxmode rx_mode = {\n-\t.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */\n+\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n+\t\t/**< Default maximum frame length. */\n };\n \n struct rte_eth_txmode tx_mode = {\n@@ -526,7 +527,7 @@ static void dev_event_callback(const char *device_name,\n static int all_ports_started(void);\n \n struct gso_status gso_ports[RTE_MAX_ETHPORTS];\n-uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;\n+uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;\n \n /*\n  * Helper function to check if socket is already discovered.\n@@ -582,7 +583,7 @@ set_def_peer_eth_addrs(void)\n \tportid_t i;\n \n \tfor (i = 0; i < RTE_MAX_ETHPORTS; i++) {\n-\t\tpeer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;\n+\t\tpeer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;\n \t\tpeer_eth_addrs[i].addr_bytes[5] = i;\n \t}\n }\n@@ -1223,8 +1224,8 @@ init_config(void)\n \t\tfwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;\n \t\tfwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;\n \t\tfwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;\n-\t\tfwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -\n-\t\t\tETHER_CRC_LEN;\n+\t\tfwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -\n+\t\t\tRTE_ETHER_CRC_LEN;\n \t\tfwd_lcores[lc_id]->gso_ctx.flag = 0;\n \t}\n \ndiff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h\nindex abee0a952..e3a6f7c71 100644\n--- a/app/test-pmd/testpmd.h\n+++ b/app/test-pmd/testpmd.h\n@@ -501,8 +501,8 @@ struct vxlan_encap_conf {\n \trte_be16_t vlan_tci;\n \tuint8_t ip_tos;\n \tuint8_t ip_ttl;\n-\tuint8_t eth_src[ETHER_ADDR_LEN];\n-\tuint8_t eth_dst[ETHER_ADDR_LEN];\n+\tuint8_t eth_src[RTE_ETHER_ADDR_LEN];\n+\tuint8_t eth_dst[RTE_ETHER_ADDR_LEN];\n };\n struct vxlan_encap_conf vxlan_encap_conf;\n \n@@ -516,8 +516,8 @@ struct nvgre_encap_conf {\n \tuint8_t ipv6_src[16];\n \tuint8_t ipv6_dst[16];\n \trte_be16_t vlan_tci;\n-\tuint8_t eth_src[ETHER_ADDR_LEN];\n-\tuint8_t eth_dst[ETHER_ADDR_LEN];\n+\tuint8_t eth_src[RTE_ETHER_ADDR_LEN];\n+\tuint8_t eth_dst[RTE_ETHER_ADDR_LEN];\n };\n struct nvgre_encap_conf nvgre_encap_conf;\n \n@@ -526,8 +526,8 @@ struct l2_encap_conf {\n \tuint32_t select_ipv4:1;\n \tuint32_t select_vlan:1;\n \trte_be16_t vlan_tci;\n-\tuint8_t eth_src[ETHER_ADDR_LEN];\n-\tuint8_t eth_dst[ETHER_ADDR_LEN];\n+\tuint8_t eth_src[RTE_ETHER_ADDR_LEN];\n+\tuint8_t eth_dst[RTE_ETHER_ADDR_LEN];\n };\n struct l2_encap_conf l2_encap_conf;\n \n@@ -547,8 +547,8 @@ struct mplsogre_encap_conf {\n \tuint8_t ipv6_src[16];\n \tuint8_t ipv6_dst[16];\n \trte_be16_t vlan_tci;\n-\tuint8_t eth_src[ETHER_ADDR_LEN];\n-\tuint8_t eth_dst[ETHER_ADDR_LEN];\n+\tuint8_t eth_src[RTE_ETHER_ADDR_LEN];\n+\tuint8_t eth_dst[RTE_ETHER_ADDR_LEN];\n };\n struct mplsogre_encap_conf mplsogre_encap_conf;\n \n@@ -571,8 +571,8 @@ struct mplsoudp_encap_conf {\n \tuint8_t ipv6_src[16];\n \tuint8_t ipv6_dst[16];\n \trte_be16_t vlan_tci;\n-\tuint8_t eth_src[ETHER_ADDR_LEN];\n-\tuint8_t eth_dst[ETHER_ADDR_LEN];\n+\tuint8_t eth_src[RTE_ETHER_ADDR_LEN];\n+\tuint8_t eth_dst[RTE_ETHER_ADDR_LEN];\n };\n struct mplsoudp_encap_conf mplsoudp_encap_conf;\n \ndiff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c\nindex eb3a245d2..91fb1f6fb 100644\n--- a/app/test-pmd/txonly.c\n+++ b/app/test-pmd/txonly.c\n@@ -268,7 +268,7 @@ pkt_burst_transmit(struct fwd_stream *fs)\n \t */\n \trte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], &eth_hdr.d_addr);\n \trte_ether_addr_copy(&ports[fs->tx_port].eth_addr, &eth_hdr.s_addr);\n-\teth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\teth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \n \tif (rte_mempool_get_bulk(mbp, (void **)pkts_burst,\n \t\t\t\tnb_pkt_per_burst) == 0) {\ndiff --git a/app/test-pmd/util.c b/app/test-pmd/util.c\nindex 55f3844dd..105c56090 100644\n--- a/app/test-pmd/util.c\n+++ b/app/test-pmd/util.c\n@@ -16,8 +16,8 @@\n static inline void\n print_ether_addr(const char *what, struct rte_ether_addr *eth_addr)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);\n \tprintf(\"%s%s\", what, buf);\n }\n \ndiff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c\nindex ba94e6193..0999b87e1 100644\n--- a/app/test/packet_burst_generator.c\n+++ b/app/test/packet_burst_generator.c\n@@ -65,7 +65,7 @@ initialize_eth_header(struct rte_ether_hdr *eth_hdr,\n \t\tstruct rte_vlan_hdr *vhdr = (struct rte_vlan_hdr *)(\n \t\t\t(uint8_t *)eth_hdr + sizeof(struct rte_ether_hdr));\n \n-\t\teth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);\n+\t\teth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);\n \n \t\tvhdr->eth_proto =  rte_cpu_to_be_16(ether_type);\n \t\tvhdr->vlan_tci = van_id;\n@@ -82,8 +82,8 @@ initialize_arp_header(struct rte_arp_hdr *arp_hdr,\n \t\tuint32_t opcode)\n {\n \tarp_hdr->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER);\n-\tarp_hdr->arp_protocol = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n-\tarp_hdr->arp_hlen = ETHER_ADDR_LEN;\n+\tarp_hdr->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n+\tarp_hdr->arp_hlen = RTE_ETHER_ADDR_LEN;\n \tarp_hdr->arp_plen = sizeof(uint32_t);\n \tarp_hdr->arp_opcode = rte_cpu_to_be_16(opcode);\n \trte_ether_addr_copy(src_mac, &arp_hdr->arp_data.arp_sha);\n@@ -322,10 +322,10 @@ generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,\n \t\tpkt->l2_len = eth_hdr_size;\n \n \t\tif (ipv4) {\n-\t\t\tpkt->vlan_tci  = ETHER_TYPE_IPv4;\n+\t\t\tpkt->vlan_tci  = RTE_ETHER_TYPE_IPv4;\n \t\t\tpkt->l3_len = sizeof(struct ipv4_hdr);\n \t\t} else {\n-\t\t\tpkt->vlan_tci  = ETHER_TYPE_IPv6;\n+\t\t\tpkt->vlan_tci  = RTE_ETHER_TYPE_IPv6;\n \t\t\tpkt->l3_len = sizeof(struct ipv6_hdr);\n \t\t}\n \n@@ -437,10 +437,10 @@ generate_packet_burst_proto(struct rte_mempool *mp,\n \t\tpkt->l2_len = eth_hdr_size;\n \n \t\tif (ipv4) {\n-\t\t\tpkt->vlan_tci  = ETHER_TYPE_IPv4;\n+\t\t\tpkt->vlan_tci  = RTE_ETHER_TYPE_IPv4;\n \t\t\tpkt->l3_len = sizeof(struct ipv4_hdr);\n \t\t} else {\n-\t\t\tpkt->vlan_tci  = ETHER_TYPE_IPv6;\n+\t\t\tpkt->vlan_tci  = RTE_ETHER_TYPE_IPv6;\n \t\t\tpkt->l3_len = sizeof(struct ipv6_hdr);\n \t\t}\n \ndiff --git a/app/test/test_cmdline_etheraddr.c b/app/test/test_cmdline_etheraddr.c\nindex 90943c2b4..9a32fd7ec 100644\n--- a/app/test/test_cmdline_etheraddr.c\n+++ b/app/test/test_cmdline_etheraddr.c\n@@ -85,7 +85,7 @@ static int\n is_addr_different(const struct rte_ether_addr addr, uint64_t num)\n {\n \tint i;\n-\tfor (i = 0; i < ETHER_ADDR_LEN; i++, num >>= 8)\n+\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; i++, num >>= 8)\n \t\tif (addr.addr_bytes[i] != (num & 0xFF)) {\n \t\t\treturn 1;\n \t\t}\ndiff --git a/app/test/test_flow_classify.c b/app/test/test_flow_classify.c\nindex f81bbba3a..3f06f3005 100644\n--- a/app/test/test_flow_classify.c\n+++ b/app/test/test_flow_classify.c\n@@ -504,7 +504,7 @@ init_ipv4_udp_traffic(struct rte_mempool *mp,\n \tprintf(\"Set up IPv4 UDP traffic\\n\");\n \tinitialize_eth_header(&pkt_eth_hdr,\n \t\t(struct rte_ether_addr *)src_mac,\n-\t\t(struct rte_ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);\n+\t\t(struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPv4, 0, 0);\n \tpktlen = (uint16_t)(sizeof(struct rte_ether_hdr));\n \tprintf(\"ETH  pktlen %u\\n\", pktlen);\n \n@@ -541,7 +541,7 @@ init_ipv4_tcp_traffic(struct rte_mempool *mp,\n \tprintf(\"Set up IPv4 TCP traffic\\n\");\n \tinitialize_eth_header(&pkt_eth_hdr,\n \t\t(struct rte_ether_addr *)src_mac,\n-\t\t(struct rte_ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);\n+\t\t(struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPv4, 0, 0);\n \tpktlen = (uint16_t)(sizeof(struct rte_ether_hdr));\n \tprintf(\"ETH  pktlen %u\\n\", pktlen);\n \n@@ -578,7 +578,7 @@ init_ipv4_sctp_traffic(struct rte_mempool *mp,\n \tprintf(\"Set up IPv4 SCTP traffic\\n\");\n \tinitialize_eth_header(&pkt_eth_hdr,\n \t\t(struct rte_ether_addr *)src_mac,\n-\t\t(struct rte_ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);\n+\t\t(struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPv4, 0, 0);\n \tpktlen = (uint16_t)(sizeof(struct rte_ether_hdr));\n \tprintf(\"ETH  pktlen %u\\n\", pktlen);\n \ndiff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c\nindex 5a7f45f94..eb6f1a9f1 100644\n--- a/app/test/test_link_bonding.c\n+++ b/app/test/test_link_bonding.c\n@@ -136,7 +136,7 @@ static struct rte_eth_conf default_pmd_conf = {\n \t.rxmode = {\n \t\t.mq_mode = ETH_MQ_RX_NONE,\n \t\t.split_hdr_size = 0,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t},\n \t.txmode = {\n \t\t.mq_mode = ETH_MQ_TX_NONE,\n@@ -237,7 +237,7 @@ test_setup(void)\n \t\tfor (i = 0; i < TEST_MAX_NUMBER_OF_PORTS; i++) {\n \t\t\tchar pmd_name[RTE_ETH_NAME_MAX_LEN];\n \n-\t\t\tmac_addr->addr_bytes[ETHER_ADDR_LEN-1] = i;\n+\t\t\tmac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] = i;\n \n \t\t\tsnprintf(pmd_name, RTE_ETH_NAME_MAX_LEN, \"eth_virt_%d\", i);\n \n@@ -396,7 +396,7 @@ test_remove_slave_from_bonded_device(void)\n \n \n \tmac_addr = (struct rte_ether_addr *)slave_mac;\n-\tmac_addr->addr_bytes[ETHER_ADDR_LEN-1] =\n+\tmac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] =\n \t\t\ttest_params->bonded_slave_count-1;\n \n \trte_eth_macaddr_get(\n@@ -752,7 +752,7 @@ test_set_primary_slave(void)\n \t\t\t\ttest_params->bonded_port_id);\n \n \t\texpected_mac_addr = (struct rte_ether_addr *)&slave_mac;\n-\t\texpected_mac_addr->addr_bytes[ETHER_ADDR_LEN-1] = i;\n+\t\texpected_mac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] = i;\n \n \t\t/* Check primary slave MAC */\n \t\trte_eth_macaddr_get(test_params->slave_port_ids[i], &read_mac_addr);\n@@ -902,7 +902,8 @@ test_set_bonded_port_initialization_mac_assignment(void)\n \t\tfor (i = 0; i < BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT; i++) {\n \t\t\tchar pmd_name[RTE_ETH_NAME_MAX_LEN];\n \n-\t\t\tslave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = i + 100;\n+\t\t\tslave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] =\n+\t\t\t\ti + 100;\n \n \t\t\tsnprintf(pmd_name, RTE_ETH_NAME_MAX_LEN,\n \t\t\t\t\"eth_slave_%d\", i);\n@@ -942,8 +943,8 @@ test_set_bonded_port_initialization_mac_assignment(void)\n \t/*\n \t * 3. Set explicit MAC address on bonded ethdev\n \t */\n-\tbonded_mac_addr.addr_bytes[ETHER_ADDR_LEN-2] = 0xFF;\n-\tbonded_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 0xAA;\n+\tbonded_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-2] = 0xFF;\n+\tbonded_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0xAA;\n \n \tTEST_ASSERT_SUCCESS(rte_eth_bond_mac_address_set(\n \t\t\tbonded_port_id, &bonded_mac_addr),\n@@ -974,13 +975,13 @@ test_set_bonded_port_initialization_mac_assignment(void)\n \t\t\tsizeof(read_mac_addr)),\n \t\t\t\"slave port 0 mac address not as expected\");\n \n-\tslave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 1 + 100;\n+\tslave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100;\n \trte_eth_macaddr_get(slave_port_ids[1], &read_mac_addr);\n \tTEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,\n \t\t\tsizeof(read_mac_addr)),\n \t\t\t\"slave port 1 mac address not as expected\");\n \n-\tslave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 2 + 100;\n+\tslave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 2 + 100;\n \trte_eth_macaddr_get(slave_port_ids[2], &read_mac_addr);\n \tTEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,\n \t\t\tsizeof(read_mac_addr)),\n@@ -1005,13 +1006,13 @@ test_set_bonded_port_initialization_mac_assignment(void)\n \t\t\tsizeof(read_mac_addr)),\n \t\t\t\"bonded port mac address not as expected\");\n \n-\tslave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 0 + 100;\n+\tslave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0 + 100;\n \trte_eth_macaddr_get(slave_port_ids[0], &read_mac_addr);\n \tTEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,\n \t\t\tsizeof(read_mac_addr)),\n \t\t\t\"slave port 0 mac address not as expected\");\n \n-\tslave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 1 + 100;\n+\tslave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100;\n \trte_eth_macaddr_get(slave_port_ids[1], &read_mac_addr);\n \tTEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,\n \t\t\tsizeof(read_mac_addr)),\n@@ -1042,19 +1043,19 @@ test_set_bonded_port_initialization_mac_assignment(void)\n \t\t\t\"Number of slaves (%d) is great than expected (%d).\",\n \t\t\tslave_count, 0);\n \n-\tslave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 0 + 100;\n+\tslave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0 + 100;\n \trte_eth_macaddr_get(slave_port_ids[0], &read_mac_addr);\n \tTEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,\n \t\t\tsizeof(read_mac_addr)),\n \t\t\t\"slave port 0 mac address not as expected\");\n \n-\tslave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 1 + 100;\n+\tslave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100;\n \trte_eth_macaddr_get(slave_port_ids[1], &read_mac_addr);\n \tTEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,\n \t\t\tsizeof(read_mac_addr)),\n \t\t\t\"slave port 1 mac address not as expected\");\n \n-\tslave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 2 + 100;\n+\tslave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 2 + 100;\n \trte_eth_macaddr_get(slave_port_ids[2], &read_mac_addr);\n \tTEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,\n \t\t\tsizeof(read_mac_addr)),\n@@ -1271,9 +1272,9 @@ generate_test_burst(struct rte_mbuf **pkts_burst, uint16_t burst_size,\n \tvoid *ip_hdr;\n \n \tif (ipv4)\n-\t\tether_type = ETHER_TYPE_IPv4;\n+\t\tether_type = RTE_ETHER_TYPE_IPv4;\n \telse\n-\t\tether_type = ETHER_TYPE_IPv6;\n+\t\tether_type = RTE_ETHER_TYPE_IPv6;\n \n \tif (toggle_dst_mac)\n \t\tinitialize_eth_header(test_params->pkt_eth_hdr,\n@@ -1953,7 +1954,7 @@ test_roundrobin_verfiy_polling_slave_link_status_change(void)\n \tfor (i = 0; i < TEST_RR_POLLING_LINK_STATUS_SLAVE_COUNT; i++) {\n \t\t/* Generate slave name / MAC address */\n \t\tsnprintf(slave_name, RTE_ETH_NAME_MAX_LEN, \"eth_virt_poll_%d\", i);\n-\t\tmac_addr->addr_bytes[ETHER_ADDR_LEN-1] = i;\n+\t\tmac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] = i;\n \n \t\t/* Create slave devices with no ISR Support */\n \t\tif (polling_test_slaves[i] == -1) {\n@@ -2046,7 +2047,7 @@ test_activebackup_tx_burst(void)\n \tinitialize_eth_header(test_params->pkt_eth_hdr,\n \t\t\t(struct rte_ether_addr *)src_mac,\n \t\t\t(struct rte_ether_addr *)dst_mac_0,\n-\t\t\tETHER_TYPE_IPv4,  0, 0);\n+\t\t\tRTE_ETHER_TYPE_IPv4,  0, 0);\n \tpktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port,\n \t\t\tdst_port_0, 16);\n \tpktlen = initialize_ipv4_header(test_params->pkt_ipv4_hdr, src_addr,\n@@ -2583,7 +2584,7 @@ test_balance_l2_tx_burst(void)\n \tinitialize_eth_header(test_params->pkt_eth_hdr,\n \t\t\t(struct rte_ether_addr *)src_mac,\n \t\t\t(struct rte_ether_addr *)dst_mac_0,\n-\t\t\tETHER_TYPE_IPv4, 0, 0);\n+\t\t\tRTE_ETHER_TYPE_IPv4, 0, 0);\n \tpktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port,\n \t\t\tdst_port_0, 16);\n \tpktlen = initialize_ipv4_header(test_params->pkt_ipv4_hdr, src_addr,\n@@ -2599,7 +2600,7 @@ test_balance_l2_tx_burst(void)\n \tinitialize_eth_header(test_params->pkt_eth_hdr,\n \t\t\t(struct rte_ether_addr *)src_mac,\n \t\t\t(struct rte_ether_addr *)dst_mac_1,\n-\t\t\tETHER_TYPE_IPv4, 0, 0);\n+\t\t\tRTE_ETHER_TYPE_IPv4, 0, 0);\n \n \t/* Generate a burst 2 of packets to transmit */\n \tTEST_ASSERT_EQUAL(generate_packet_burst(test_params->mbuf_pool, &pkts_burst[1][0],\n@@ -3425,7 +3426,7 @@ test_broadcast_tx_burst(void)\n \tinitialize_eth_header(test_params->pkt_eth_hdr,\n \t\t\t(struct rte_ether_addr *)src_mac,\n \t\t\t(struct rte_ether_addr *)dst_mac_0,\n-\t\t\tETHER_TYPE_IPv4, 0, 0);\n+\t\t\tRTE_ETHER_TYPE_IPv4, 0, 0);\n \n \tpktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port,\n \t\t\tdst_port_0, 16);\n@@ -4011,12 +4012,12 @@ test_tlb_tx_burst(void)\n \t\t\tinitialize_eth_header(test_params->pkt_eth_hdr,\n \t\t\t\t\t(struct rte_ether_addr *)src_mac,\n \t\t\t\t\t(struct rte_ether_addr *)dst_mac_0,\n-\t\t\t\t\tETHER_TYPE_IPv4, 0, 0);\n+\t\t\t\t\tRTE_ETHER_TYPE_IPv4, 0, 0);\n \t\t} else {\n \t\t\tinitialize_eth_header(test_params->pkt_eth_hdr,\n \t\t\t\t\t(struct rte_ether_addr *)test_params->default_slave_mac,\n \t\t\t\t\t(struct rte_ether_addr *)dst_mac_0,\n-\t\t\t\t\tETHER_TYPE_IPv4, 0, 0);\n+\t\t\t\t\tRTE_ETHER_TYPE_IPv4, 0, 0);\n \t\t}\n \t\tpktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port,\n \t\t\t\tdst_port_0, 16);\n@@ -4519,10 +4520,10 @@ test_alb_change_mac_in_reply_sent(void)\n \t * them through the bonding port.\n \t */\n \tpkt = rte_pktmbuf_alloc(test_params->mbuf_pool);\n-\tmemcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN);\n+\tmemcpy(client_mac.addr_bytes, mac_client1, RTE_ETHER_ADDR_LEN);\n \teth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);\n-\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,\n-\t\t\t0);\n+\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac,\n+\t\t\tRTE_ETHER_TYPE_ARP, 0, 0);\n \tarp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +\n \t\t\t\t\tsizeof(struct rte_ether_hdr));\n \tinitialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client1,\n@@ -4530,10 +4531,10 @@ test_alb_change_mac_in_reply_sent(void)\n \trte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1);\n \n \tpkt = rte_pktmbuf_alloc(test_params->mbuf_pool);\n-\tmemcpy(client_mac.addr_bytes, mac_client2, ETHER_ADDR_LEN);\n+\tmemcpy(client_mac.addr_bytes, mac_client2, RTE_ETHER_ADDR_LEN);\n \teth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);\n-\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,\n-\t\t\t0);\n+\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac,\n+\t\t\tRTE_ETHER_TYPE_ARP, 0, 0);\n \tarp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +\n \t\t\t\t\tsizeof(struct rte_ether_hdr));\n \tinitialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client2,\n@@ -4541,10 +4542,10 @@ test_alb_change_mac_in_reply_sent(void)\n \trte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1);\n \n \tpkt = rte_pktmbuf_alloc(test_params->mbuf_pool);\n-\tmemcpy(client_mac.addr_bytes, mac_client3, ETHER_ADDR_LEN);\n+\tmemcpy(client_mac.addr_bytes, mac_client3, RTE_ETHER_ADDR_LEN);\n \teth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);\n-\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,\n-\t\t\t0);\n+\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac,\n+\t\t\tRTE_ETHER_TYPE_ARP, 0, 0);\n \tarp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +\n \t\t\t\t\tsizeof(struct rte_ether_hdr));\n \tinitialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client3,\n@@ -4552,10 +4553,10 @@ test_alb_change_mac_in_reply_sent(void)\n \trte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1);\n \n \tpkt = rte_pktmbuf_alloc(test_params->mbuf_pool);\n-\tmemcpy(client_mac.addr_bytes, mac_client4, ETHER_ADDR_LEN);\n+\tmemcpy(client_mac.addr_bytes, mac_client4, RTE_ETHER_ADDR_LEN);\n \teth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);\n-\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,\n-\t\t\t0);\n+\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac,\n+\t\t\tRTE_ETHER_TYPE_ARP, 0, 0);\n \tarp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +\n \t\t\t\t\tsizeof(struct rte_ether_hdr));\n \tinitialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client4,\n@@ -4640,10 +4641,10 @@ test_alb_reply_from_client(void)\n \t * them in the rx queue to be received by the bonding driver on rx_burst.\n \t */\n \tpkt = rte_pktmbuf_alloc(test_params->mbuf_pool);\n-\tmemcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN);\n+\tmemcpy(client_mac.addr_bytes, mac_client1, RTE_ETHER_ADDR_LEN);\n \teth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);\n-\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,\n-\t\t\t0);\n+\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac,\n+\t\t\tRTE_ETHER_TYPE_ARP, 0, 0);\n \tarp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +\n \t\t\t\t\tsizeof(struct rte_ether_hdr));\n \tinitialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client1, ip_host,\n@@ -4652,10 +4653,10 @@ test_alb_reply_from_client(void)\n \t\t\t1);\n \n \tpkt = rte_pktmbuf_alloc(test_params->mbuf_pool);\n-\tmemcpy(client_mac.addr_bytes, mac_client2, ETHER_ADDR_LEN);\n+\tmemcpy(client_mac.addr_bytes, mac_client2, RTE_ETHER_ADDR_LEN);\n \teth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);\n-\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,\n-\t\t\t0);\n+\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac,\n+\t\t\tRTE_ETHER_TYPE_ARP, 0, 0);\n \tarp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +\n \t\t\t\t\tsizeof(struct rte_ether_hdr));\n \tinitialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client2, ip_host,\n@@ -4664,10 +4665,10 @@ test_alb_reply_from_client(void)\n \t\t\t1);\n \n \tpkt = rte_pktmbuf_alloc(test_params->mbuf_pool);\n-\tmemcpy(client_mac.addr_bytes, mac_client3, ETHER_ADDR_LEN);\n+\tmemcpy(client_mac.addr_bytes, mac_client3, RTE_ETHER_ADDR_LEN);\n \teth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);\n-\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,\n-\t\t\t0);\n+\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac,\n+\t\t\tRTE_ETHER_TYPE_ARP, 0, 0);\n \tarp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +\n \t\t\t\t\tsizeof(struct rte_ether_hdr));\n \tinitialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client3, ip_host,\n@@ -4676,10 +4677,10 @@ test_alb_reply_from_client(void)\n \t\t\t1);\n \n \tpkt = rte_pktmbuf_alloc(test_params->mbuf_pool);\n-\tmemcpy(client_mac.addr_bytes, mac_client4, ETHER_ADDR_LEN);\n+\tmemcpy(client_mac.addr_bytes, mac_client4, RTE_ETHER_ADDR_LEN);\n \teth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);\n-\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,\n-\t\t\t0);\n+\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac,\n+\t\t\tRTE_ETHER_TYPE_ARP, 0, 0);\n \tarp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +\n \t\t\t\t\tsizeof(struct rte_ether_hdr));\n \tinitialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client4, ip_host,\n@@ -4774,16 +4775,16 @@ test_alb_receive_vlan_reply(void)\n \t * Generating packet with double VLAN header and placing it in the rx queue.\n \t */\n \tpkt = rte_pktmbuf_alloc(test_params->mbuf_pool);\n-\tmemcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN);\n+\tmemcpy(client_mac.addr_bytes, mac_client1, RTE_ETHER_ADDR_LEN);\n \teth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);\n-\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_VLAN, 0,\n-\t\t\t0);\n+\tinitialize_eth_header(eth_pkt, &bond_mac, &client_mac,\n+\t\t\tRTE_ETHER_TYPE_VLAN, 0, 0);\n \tvlan_pkt = (struct rte_vlan_hdr *)((char *)(eth_pkt + 1));\n \tvlan_pkt->vlan_tci = rte_cpu_to_be_16(1);\n-\tvlan_pkt->eth_proto = rte_cpu_to_be_16(ETHER_TYPE_VLAN);\n+\tvlan_pkt->eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);\n \tvlan_pkt = vlan_pkt+1;\n \tvlan_pkt->vlan_tci = rte_cpu_to_be_16(2);\n-\tvlan_pkt->eth_proto = rte_cpu_to_be_16(ETHER_TYPE_ARP);\n+\tvlan_pkt->eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP);\n \tarp_pkt = (struct rte_arp_hdr *)((char *)(vlan_pkt + 1));\n \tinitialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client1, ip_host,\n \t\t\tRTE_ARP_OP_REPLY);\n@@ -4810,7 +4811,8 @@ test_alb_receive_vlan_reply(void)\n \t\t\t\tretval = -1;\n \t\t\t\tgoto test_end;\n \t\t\t}\n-\t\t\tif (vlan_pkt->eth_proto != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {\n+\t\t\tif (vlan_pkt->eth_proto != rte_cpu_to_be_16(\n+\t\t\t\t\tRTE_ETHER_TYPE_VLAN)) {\n \t\t\t\tretval = -1;\n \t\t\t\tgoto test_end;\n \t\t\t}\n@@ -4819,7 +4821,8 @@ test_alb_receive_vlan_reply(void)\n \t\t\t\tretval = -1;\n \t\t\t\tgoto test_end;\n \t\t\t}\n-\t\t\tif (vlan_pkt->eth_proto != rte_cpu_to_be_16(ETHER_TYPE_ARP)) {\n+\t\t\tif (vlan_pkt->eth_proto != rte_cpu_to_be_16(\n+\t\t\t\t\tRTE_ETHER_TYPE_ARP)) {\n \t\t\t\tretval = -1;\n \t\t\t\tgoto test_end;\n \t\t\t}\ndiff --git a/app/test/test_link_bonding_mode4.c b/app/test/test_link_bonding_mode4.c\nindex 0bb81fd2e..96466a1ac 100644\n--- a/app/test/test_link_bonding_mode4.c\n+++ b/app/test/test_link_bonding_mode4.c\n@@ -108,7 +108,7 @@ static struct link_bonding_unittest_params test_params  = {\n static struct rte_eth_conf default_pmd_conf = {\n \t.rxmode = {\n \t\t.mq_mode = ETH_MQ_RX_NONE,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t},\n \t.txmode = {\n@@ -233,7 +233,7 @@ add_slave(struct slave_conf *slave, uint8_t start)\n \tRTE_VERIFY(slave->port_id != INVALID_PORT_ID);\n \n \trte_ether_addr_copy(&slave_mac_default, &addr);\n-\taddr.addr_bytes[ETHER_ADDR_LEN - 1] = slave->port_id;\n+\taddr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = slave->port_id;\n \n \trte_eth_dev_mac_addr_remove(slave->port_id, &addr);\n \n@@ -299,7 +299,7 @@ lacp_recv_cb(uint16_t slave_id, struct rte_mbuf *lacp_pkt)\n \tRTE_VERIFY(lacp_pkt != NULL);\n \n \thdr = rte_pktmbuf_mtod(lacp_pkt, struct rte_ether_hdr *);\n-\tRTE_VERIFY(hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_SLOW));\n+\tRTE_VERIFY(hdr->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW));\n \n \tslow_hdr = rte_pktmbuf_mtod(lacp_pkt, struct slow_protocol_frame *);\n \tRTE_VERIFY(slow_hdr->slow_protocol.subtype == SLOW_SUBTYPE_LACP);\n@@ -480,7 +480,7 @@ make_lacp_reply(struct slave_conf *slave, struct rte_mbuf *pkt)\n \n \t/* look for LACP */\n \thdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);\n-\tif (hdr->ether_type != rte_cpu_to_be_16(ETHER_TYPE_SLOW))\n+\tif (hdr->ether_type != rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW))\n \t\treturn 1;\n \n \tslow_hdr = rte_pktmbuf_mtod(pkt, struct slow_protocol_frame *);\n@@ -492,7 +492,8 @@ make_lacp_reply(struct slave_conf *slave, struct rte_mbuf *pkt)\n \n \t/* Change source address to partner address */\n \trte_ether_addr_copy(&parnter_mac_default, &slow_hdr->eth_hdr.s_addr);\n-\tslow_hdr->eth_hdr.s_addr.addr_bytes[ETHER_ADDR_LEN - 1] = slave->port_id;\n+\tslow_hdr->eth_hdr.s_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] =\n+\t\tslave->port_id;\n \n \tlacp = (struct lacpdu *) &slow_hdr->slow_protocol;\n \t/* Save last received state */\n@@ -930,11 +931,11 @@ test_mode4_rx(void)\n \tFOR_EACH_SLAVE(i, slave) {\n \t\tvoid *pkt = NULL;\n \n-\t\tdst_mac.addr_bytes[ETHER_ADDR_LEN - 1] = slave->port_id;\n+\t\tdst_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = slave->port_id;\n \t\tretval = generate_and_put_packets(slave, &src_mac, &dst_mac, 1);\n \t\tTEST_ASSERT_SUCCESS(retval, \"Failed to generate test packet burst.\");\n \n-\t\tsrc_mac.addr_bytes[ETHER_ADDR_LEN - 1] = slave->port_id;\n+\t\tsrc_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = slave->port_id;\n \t\tretval = generate_and_put_packets(slave, &src_mac, &bonded_mac, 1);\n \t\tTEST_ASSERT_SUCCESS(retval, \"Failed to generate test packet burst.\");\n \n@@ -995,7 +996,7 @@ test_mode4_tx_burst(void)\n \n \t/* Prepare burst */\n \tfor (pkts_cnt = 0; pkts_cnt < RTE_DIM(pkts); pkts_cnt++) {\n-\t\tdst_mac.addr_bytes[ETHER_ADDR_LEN - 1] = pkts_cnt;\n+\t\tdst_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = pkts_cnt;\n \t\tretval = generate_packets(&bonded_mac, &dst_mac, 1, &pkts[pkts_cnt]);\n \n \t\tif (retval != 1)\n@@ -1068,7 +1069,7 @@ test_mode4_tx_burst(void)\n \n \t/* Prepare burst. */\n \tfor (pkts_cnt = 0; pkts_cnt < RTE_DIM(pkts); pkts_cnt++) {\n-\t\tdst_mac.addr_bytes[ETHER_ADDR_LEN - 1] = pkts_cnt;\n+\t\tdst_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = pkts_cnt;\n \t\tretval = generate_packets(&bonded_mac, &dst_mac, 1, &pkts[pkts_cnt]);\n \n \t\tif (retval != 1)\n@@ -1140,9 +1141,10 @@ init_marker(struct rte_mbuf *pkt, struct slave_conf *slave)\n \n \t/* Init source address */\n \trte_ether_addr_copy(&parnter_mac_default, &marker_hdr->eth_hdr.s_addr);\n-\tmarker_hdr->eth_hdr.s_addr.addr_bytes[ETHER_ADDR_LEN-1] = slave->port_id;\n+\tmarker_hdr->eth_hdr.s_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] =\n+\t\tslave->port_id;\n \n-\tmarker_hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW);\n+\tmarker_hdr->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW);\n \n \tmarker_hdr->marker.subtype = SLOW_SUBTYPE_MARKER;\n \tmarker_hdr->marker.version_number = 1;\n@@ -1168,7 +1170,7 @@ test_mode4_marker(void)\n \tint retval;\n \tuint16_t nb_pkts;\n \tuint8_t i, j;\n-\tconst uint16_t ethtype_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);\n+\tconst uint16_t ethtype_slow_be = rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);\n \n \tretval = initialize_bonded_device_with_slaves(TEST_MARKER_SLAVE_COUT,\n \t\t\t\t\t\t      0);\n@@ -1368,7 +1370,7 @@ test_mode4_ext_ctrl(void)\n \trte_ether_addr_copy(&slow_protocol_mac_addr, &dst_mac);\n \n \tinitialize_eth_header(&lacpdu.eth_hdr, &src_mac, &dst_mac,\n-\t\t\t      ETHER_TYPE_SLOW, 0, 0);\n+\t\t\t      RTE_ETHER_TYPE_SLOW, 0, 0);\n \n \tfor (i = 0; i < SLAVE_COUNT; i++) {\n \t\tlacp_tx_buf[i] = rte_pktmbuf_alloc(test_params.mbuf_pool);\n@@ -1422,7 +1424,7 @@ test_mode4_ext_lacp(void)\n \trte_ether_addr_copy(&slow_protocol_mac_addr, &dst_mac);\n \n \tinitialize_eth_header(&lacpdu.eth_hdr, &src_mac, &dst_mac,\n-\t\t\t      ETHER_TYPE_SLOW, 0, 0);\n+\t\t\t      RTE_ETHER_TYPE_SLOW, 0, 0);\n \n \tfor (i = 0; i < SLAVE_COUNT; i++) {\n \t\tlacp_tx_buf[i] = rte_pktmbuf_alloc(test_params.mbuf_pool);\ndiff --git a/app/test/test_link_bonding_rssconf.c b/app/test/test_link_bonding_rssconf.c\nindex 4392522ea..65de3b98d 100644\n--- a/app/test/test_link_bonding_rssconf.c\n+++ b/app/test/test_link_bonding_rssconf.c\n@@ -81,7 +81,7 @@ static struct link_bonding_rssconf_unittest_params test_params  = {\n static struct rte_eth_conf default_pmd_conf = {\n \t.rxmode = {\n \t\t.mq_mode = ETH_MQ_RX_NONE,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t},\n \t.txmode = {\n@@ -93,7 +93,7 @@ static struct rte_eth_conf default_pmd_conf = {\n static struct rte_eth_conf rss_pmd_conf = {\n \t.rxmode = {\n \t\t.mq_mode = ETH_MQ_RX_RSS,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t},\n \t.txmode = {\ndiff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c\nindex a0c3eb956..4b973ffc4 100644\n--- a/app/test/test_pmd_perf.c\n+++ b/app/test/test_pmd_perf.c\n@@ -63,7 +63,7 @@ static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];\n static struct rte_eth_conf port_conf = {\n \t.rxmode = {\n \t\t.mq_mode = ETH_MQ_RX_NONE,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t},\n \t.txmode = {\n@@ -173,8 +173,8 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)\n static void\n print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);\n \tprintf(\"%s%s\", name, buf);\n }\n \n@@ -192,7 +192,7 @@ init_traffic(struct rte_mempool *mp,\n \n \tinitialize_eth_header(&pkt_eth_hdr,\n \t\t(struct rte_ether_addr *)src_mac,\n-\t\t(struct rte_ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);\n+\t\t(struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPv4, 0, 0);\n \n \tpktlen = initialize_ipv4_header(&pkt_ipv4_hdr,\n \t\t\t\t\tIPV4_ADDR(10, 0, 0, 1),\ndiff --git a/app/test/test_sched.c b/app/test/test_sched.c\nindex a782bf3e5..a08e1294c 100644\n--- a/app/test/test_sched.c\n+++ b/app/test/test_sched.c\n@@ -95,7 +95,7 @@ prepare_pkt(struct rte_sched_port *port, struct rte_mbuf *mbuf)\n \n \tvlan1->vlan_tci = rte_cpu_to_be_16(SUBPORT);\n \tvlan2->vlan_tci = rte_cpu_to_be_16(PIPE);\n-\teth_hdr->ether_type =  rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\teth_hdr->ether_type =  rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \tip_hdr->dst_addr = IPv4(0,0,TC,QUEUE);\n \n \ndiff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c\nindex 15ce64445..55c2bdb98 100644\n--- a/app/test/virtual_pmd.c\n+++ b/app/test/virtual_pmd.c\n@@ -566,7 +566,7 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,\n \teth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;\n \teth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;\n \n-\teth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);\n+\teth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);\n \tif (eth_dev->data->mac_addrs == NULL)\n \t\tgoto err;\n \ndiff --git a/doc/guides/nics/kni.rst b/doc/guides/nics/kni.rst\nindex 602a06b5f..593564d13 100644\n--- a/doc/guides/nics/kni.rst\n+++ b/doc/guides/nics/kni.rst\n@@ -56,7 +56,7 @@ configuration:\n         Interface name: kni#\n         force bind kernel thread to a core : NO\n         mbuf size: (rte_pktmbuf_data_room_size(pktmbuf_pool) - RTE_PKTMBUF_HEADROOM)\n-        mtu: (conf.mbuf_size - ETHER_HDR_LEN)\n+        mtu: (conf.mbuf_size - RTE_ETHER_HDR_LEN)\n \n KNI control path is not supported with the PMD, since there is no physical\n backend device by default.\ndiff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst\nindex 937f52bce..a34d012e5 100644\n--- a/doc/guides/prog_guide/rte_flow.rst\n+++ b/doc/guides/prog_guide/rte_flow.rst\n@@ -863,7 +863,7 @@ Item: ``VLAN``\n Matches an 802.1Q/ad VLAN tag.\n \n The corresponding standard outer EtherType (TPID) values are\n-``ETHER_TYPE_VLAN`` or ``ETHER_TYPE_QINQ``. It can be overridden by the\n+``RTE_ETHER_TYPE_VLAN`` or ``RTE_ETHER_TYPE_QINQ``. It can be overridden by the\n preceding pattern item.\n \n - ``tci``: tag control information.\n@@ -940,7 +940,7 @@ Item: ``E_TAG``\n Matches an IEEE 802.1BR E-Tag header.\n \n The corresponding standard outer EtherType (TPID) value is\n-``ETHER_TYPE_ETAG``. It can be overridden by the preceding pattern item.\n+``RTE_ETHER_TYPE_ETAG``. It can be overridden by the preceding pattern item.\n \n - ``epcp_edei_in_ecid_b``: E-Tag control information (E-TCI), E-PCP (3b),\n   E-DEI (1b), ingress E-CID base (12b).\ndiff --git a/doc/guides/sample_app_ug/flow_classify.rst b/doc/guides/sample_app_ug/flow_classify.rst\nindex 9582b9376..762c3844e 100644\n--- a/doc/guides/sample_app_ug/flow_classify.rst\n+++ b/doc/guides/sample_app_ug/flow_classify.rst\n@@ -326,7 +326,7 @@ The Ethernet ports are configured with default settings using the\n .. code-block:: c\n \n     static const struct rte_eth_conf port_conf_default = {\n-        .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }\n+        .rxmode = { .max_rx_pkt_len = RTE_ETHER_MAX_LEN }\n     };\n \n For this example the ports are set up with 1 RX and 1 TX queue using the\ndiff --git a/doc/guides/sample_app_ug/ipv4_multicast.rst b/doc/guides/sample_app_ug/ipv4_multicast.rst\nindex 0b44ab90b..a4cb4377f 100644\n--- a/doc/guides/sample_app_ug/ipv4_multicast.rst\n+++ b/doc/guides/sample_app_ug/ipv4_multicast.rst\n@@ -229,7 +229,7 @@ The actual packet transmission is done in the mcast_send_pkt() function:\n \n         rte_ether_addr_copy(dest_addr, &ethdr->d_addr);\n         rte_ether_addr_copy(&ports_eth_addr[port], &ethdr->s_addr);\n-        ethdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);\n+        ethdr->ether_type = rte_be_to_cpu_16(RTE_ETHER_TYPE_IPv4);\n \n         /* Put new packet into the output queue */\n \ndiff --git a/doc/guides/sample_app_ug/skeleton.rst b/doc/guides/sample_app_ug/skeleton.rst\nindex 715f5e91a..59ca511d3 100644\n--- a/doc/guides/sample_app_ug/skeleton.rst\n+++ b/doc/guides/sample_app_ug/skeleton.rst\n@@ -160,7 +160,7 @@ The Ethernet ports are configured with default settings using the\n .. code-block:: c\n \n     static const struct rte_eth_conf port_conf_default = {\n-        .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }\n+        .rxmode = { .max_rx_pkt_len = RTE_ETHER_MAX_LEN }\n     };\n \n For this example the ports are set up with 1 RX and 1 TX queue using the\ndiff --git a/drivers/bus/dpaa/include/compat.h b/drivers/bus/dpaa/include/compat.h\nindex 412265779..86cecf67b 100644\n--- a/drivers/bus/dpaa/include/compat.h\n+++ b/drivers/bus/dpaa/include/compat.h\n@@ -109,6 +109,8 @@ typedef uint32_t\tphandle;\n typedef uint32_t\tgfp_t;\n typedef uint32_t\tirqreturn_t;\n \n+#define ETHER_ADDR_LEN 6\n+\n #define IRQ_HANDLED\t0\n #define request_irq\tqbman_request_irq\n #define free_irq\tqbman_free_irq\ndiff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c\nindex 893c9ed9f..28e6aa27e 100644\n--- a/drivers/net/af_xdp/rte_eth_af_xdp.c\n+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c\n@@ -352,7 +352,7 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \tdev_info->max_rx_queues = 1;\n \tdev_info->max_tx_queues = 1;\n \n-\tdev_info->min_mtu = ETHER_MIN_MTU;\n+\tdev_info->min_mtu = RTE_ETHER_MIN_MTU;\n \tdev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;\n \n \tdev_info->default_rxportconf.nb_queues = 1;\n@@ -816,7 +816,7 @@ get_iface_info(const char *if_name,\n \tif (ioctl(sock, SIOCGIFHWADDR, &ifr))\n \t\tgoto error;\n \n-\trte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);\n+\trte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);\n \n \tclose(sock);\n \treturn 0;\ndiff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c\nindex 6a1530aa2..7692aa2d9 100644\n--- a/drivers/net/ark/ark_ethdev.c\n+++ b/drivers/net/ark/ark_ethdev.c\n@@ -318,7 +318,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)\n \n \tdev->dev_ops = &ark_eth_dev_ops;\n \n-\tdev->data->mac_addrs = rte_zmalloc(\"ark\", ETHER_ADDR_LEN, 0);\n+\tdev->data->mac_addrs = rte_zmalloc(\"ark\", RTE_ETHER_ADDR_LEN, 0);\n \tif (!dev->data->mac_addrs) {\n \t\tPMD_DRV_LOG(ERR,\n \t\t\t    \"Failed to allocated memory for storing mac address\"\n@@ -385,7 +385,8 @@ eth_ark_dev_init(struct rte_eth_dev *dev)\n \n \t\trte_eth_copy_pci_info(eth_dev, pci_dev);\n \n-\t\teth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);\n+\t\teth_dev->data->mac_addrs = rte_zmalloc(name,\n+\t\t\t\t\t\tRTE_ETHER_ADDR_LEN, 0);\n \t\tif (!eth_dev->data->mac_addrs) {\n \t\t\tPMD_DRV_LOG(ERR,\n \t\t\t\t    \"Memory allocation for MAC failed!\"\ndiff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c\nindex 6b395806c..d110e351e 100644\n--- a/drivers/net/atlantic/atl_ethdev.c\n+++ b/drivers/net/atlantic/atl_ethdev.c\n@@ -416,7 +416,8 @@ eth_atl_dev_init(struct rte_eth_dev *eth_dev)\n \tatl_disable_intr(hw);\n \n \t/* Allocate memory for storing MAC addresses */\n-\teth_dev->data->mac_addrs = rte_zmalloc(\"atlantic\", ETHER_ADDR_LEN, 0);\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"atlantic\",\n+\t\t\t\t\tRTE_ETHER_ADDR_LEN, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"MAC Malloc failed\");\n \t\treturn -ENOMEM;\n@@ -897,7 +898,8 @@ int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)\n \t\tATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);\n \n \tmemset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));\n-\tmemcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac, ETHER_ADDR_LEN);\n+\tmemcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,\n+\t\tRTE_ETHER_ADDR_LEN);\n \n \treturn 0;\n }\n@@ -909,7 +911,8 @@ int atl_macsec_config_rxsc(struct rte_eth_dev *dev,\n \t\tATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);\n \n \tmemset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));\n-\tmemcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac, ETHER_ADDR_LEN);\n+\tmemcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,\n+\t\tRTE_ETHER_ADDR_LEN);\n \tcfg->aq_macsec.rxsc.pi = pi;\n \n \treturn 0;\n@@ -1604,11 +1607,11 @@ static int\n atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n {\n \tstruct rte_eth_dev_info dev_info;\n-\tuint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;\n+\tuint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;\n \n \tatl_dev_info_get(dev, &dev_info);\n \n-\tif ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))\n+\tif (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)\n \t\treturn -EINVAL;\n \n \t/* update max frame size */\ndiff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c\nindex 38e5eae05..bd5eba4a0 100644\n--- a/drivers/net/avp/avp_ethdev.c\n+++ b/drivers/net/avp/avp_ethdev.c\n@@ -88,7 +88,7 @@ static void avp_dev_stats_reset(struct rte_eth_dev *dev);\n #define AVP_MAX_RX_BURST 64\n #define AVP_MAX_TX_BURST 64\n #define AVP_MAX_MAC_ADDRS 1\n-#define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN\n+#define AVP_MIN_RX_BUFSIZE RTE_ETHER_MIN_LEN\n \n \n /*\n@@ -867,7 +867,7 @@ avp_dev_create(struct rte_pci_device *pci_dev,\n \t\tavp->host_features = host_info->features;\n \t\trte_spinlock_init(&avp->lock);\n \t\tmemcpy(&avp->ethaddr.addr_bytes[0],\n-\t\t       host_info->ethaddr, ETHER_ADDR_LEN);\n+\t\t       host_info->ethaddr, RTE_ETHER_ADDR_LEN);\n \t\t/* adjust max values to not exceed our max */\n \t\tavp->max_tx_queues =\n \t\t\tRTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);\n@@ -1006,10 +1006,11 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev)\n \t}\n \n \t/* Allocate memory for storing MAC addresses */\n-\teth_dev->data->mac_addrs = rte_zmalloc(\"avp_ethdev\", ETHER_ADDR_LEN, 0);\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"avp_ethdev\",\n+\t\t\t\t\tRTE_ETHER_ADDR_LEN, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tPMD_DRV_LOG(ERR, \"Failed to allocate %d bytes needed to store MAC addresses\\n\",\n-\t\t\t    ETHER_ADDR_LEN);\n+\t\t\t    RTE_ETHER_ADDR_LEN);\n \t\treturn -ENOMEM;\n \t}\n \ndiff --git a/drivers/net/avp/rte_avp_common.h b/drivers/net/avp/rte_avp_common.h\nindex aa95159c4..478c01a16 100644\n--- a/drivers/net/avp/rte_avp_common.h\n+++ b/drivers/net/avp/rte_avp_common.h\n@@ -345,7 +345,7 @@ struct rte_avp_device_info {\n \t/* Ethernet info */\n \tchar ethaddr[ETH_ALEN];\n #else\n-\tchar ethaddr[ETHER_ADDR_LEN];\n+\tchar ethaddr[RTE_ETHER_ADDR_LEN];\n #endif\n \n \tuint8_t mode; /**< device mode, i.e guest, host, trace */\ndiff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c\nindex 707f1ee9d..b1f0bbc8e 100644\n--- a/drivers/net/axgbe/axgbe_dev.c\n+++ b/drivers/net/axgbe/axgbe_dev.c\n@@ -10,8 +10,8 @@\n \n static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)\n {\n-\treturn pdata->eth_dev->data->mtu + ETHER_HDR_LEN +\n-\t\tETHER_CRC_LEN + VLAN_HLEN;\n+\treturn pdata->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +\n+\t\tRTE_ETHER_CRC_LEN + VLAN_HLEN;\n }\n \n /* query busy bit */\ndiff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c\nindex 237b75c5d..221979c5e 100644\n--- a/drivers/net/axgbe/axgbe_ethdev.c\n+++ b/drivers/net/axgbe/axgbe_ethdev.c\n@@ -626,11 +626,11 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)\n \tpdata->mac_addr.addr_bytes[5] = (mac_hi >> 8)  &  0xff;\n \n \teth_dev->data->mac_addrs = rte_zmalloc(\"axgbe_mac_addr\",\n-\t\t\t\t\t       ETHER_ADDR_LEN, 0);\n+\t\t\t\t\t       RTE_ETHER_ADDR_LEN, 0);\n \tif (!eth_dev->data->mac_addrs) {\n \t\tPMD_INIT_LOG(ERR,\n \t\t\t     \"Failed to alloc %u bytes needed to store MAC addr tbl\",\n-\t\t\t     ETHER_ADDR_LEN);\n+\t\t\t     RTE_ETHER_ADDR_LEN);\n \t\treturn -ENOMEM;\n \t}\n \ndiff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h\nindex 810ac4a74..e3cfaf36f 100644\n--- a/drivers/net/axgbe/axgbe_ethdev.h\n+++ b/drivers/net/axgbe/axgbe_ethdev.h\n@@ -15,7 +15,7 @@\n \n #define AXGBE_TX_MAX_BUF_SIZE\t\t(0x3fff & ~(64 - 1))\n #define AXGBE_RX_MAX_BUF_SIZE\t\t(0x3fff & ~(64 - 1))\n-#define AXGBE_RX_MIN_BUF_SIZE\t\t(ETHER_MAX_LEN + VLAN_HLEN)\n+#define AXGBE_RX_MIN_BUF_SIZE\t\t(RTE_ETHER_MAX_LEN + VLAN_HLEN)\n #define AXGBE_MAX_MAC_ADDRS\t\t1\n \n #define AXGBE_RX_BUF_ALIGN\t\t64\ndiff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c\nindex b5a29a95f..e76601d14 100644\n--- a/drivers/net/axgbe/axgbe_rxtx.c\n+++ b/drivers/net/axgbe/axgbe_rxtx.c\n@@ -75,7 +75,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \trxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +\n \t\t\t\t\t\t  DMA_CH_RDTR_LO);\n \tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)\n-\t\trxq->crc_len = ETHER_CRC_LEN;\n+\t\trxq->crc_len = RTE_ETHER_CRC_LEN;\n \telse\n \t\trxq->crc_len = 0;\n \ndiff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c\nindex 39d30913b..e771e40cb 100644\n--- a/drivers/net/bnx2x/bnx2x.c\n+++ b/drivers/net/bnx2x/bnx2x.c\n@@ -9787,13 +9787,13 @@ int bnx2x_attach(struct bnx2x_softc *sc)\n \t\tbnx2x_get_phy_info(sc);\n \t} else {\n \t\t/* Left mac of VF unfilled, PF should set it for VF */\n-\t\tmemset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN);\n+\t\tmemset(sc->link_params.mac_addr, 0, RTE_ETHER_ADDR_LEN);\n \t}\n \n \tsc->wol = 0;\n \n \t/* set the default MTU (changed via ifconfig) */\n-\tsc->mtu = ETHER_MTU;\n+\tsc->mtu = RTE_ETHER_MTU;\n \n \tbnx2x_set_modes_bitmap(sc);\n \ndiff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h\nindex 7126097db..fce715b6d 100644\n--- a/drivers/net/bnx2x/ecore_sp.h\n+++ b/drivers/net/bnx2x/ecore_sp.h\n@@ -38,7 +38,7 @@ typedef rte_iova_t ecore_dma_addr_t; /* expected to be 64 bit wide */\n typedef volatile int ecore_atomic_t;\n \n \n-#define ETH_ALEN ETHER_ADDR_LEN /* 6 */\n+#define ETH_ALEN RTE_ETHER_ADDR_LEN /* 6 */\n \n #define ECORE_SWCID_SHIFT   17\n #define ECORE_SWCID_MASK    ((0x1 << ECORE_SWCID_SHIFT) - 1)\ndiff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h\nindex 5535c376e..caacc7258 100644\n--- a/drivers/net/bnxt/bnxt.h\n+++ b/drivers/net/bnxt/bnxt.h\n@@ -309,7 +309,7 @@ struct bnxt {\n \tstruct bnxt_irq         *irq_tbl;\n \n #define MAX_NUM_MAC_ADDR\t32\n-\tuint8_t\t\t\tmac_addr[ETHER_ADDR_LEN];\n+\tuint8_t\t\t\tmac_addr[RTE_ETHER_ADDR_LEN];\n \n \tuint16_t\t\t\thwrm_cmd_seq;\n \tuint16_t\t\t\tkong_cmd_seq;\n@@ -326,7 +326,7 @@ struct bnxt {\n \tuint8_t\t\t\ttx_cosq_id;\n \n \tuint16_t\t\tfw_fid;\n-\tuint8_t\t\t\tdflt_mac_addr[ETHER_ADDR_LEN];\n+\tuint8_t\t\t\tdflt_mac_addr[RTE_ETHER_ADDR_LEN];\n \tuint16_t\t\tmax_rsscos_ctx;\n \tuint16_t\t\tmax_cp_rings;\n \tuint16_t\t\tmax_tx_rings;\ndiff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c\nindex c64315053..b0244d6d1 100644\n--- a/drivers/net/bnxt/bnxt_ethdev.c\n+++ b/drivers/net/bnxt/bnxt_ethdev.c\n@@ -214,7 +214,7 @@ static int bnxt_init_chip(struct bnxt *bp)\n \t/* disable uio/vfio intr/eventfd mapping */\n \trte_intr_disable(intr_handle);\n \n-\tif (bp->eth_dev->data->mtu > ETHER_MTU) {\n+\tif (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {\n \t\tbp->eth_dev->data->dev_conf.rxmode.offloads |=\n \t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \t\tbp->flags |= BNXT_FLAG_JUMBO;\n@@ -462,8 +462,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,\n \n \t/* Fast path specifics */\n \tdev_info->min_rx_bufsize = 1;\n-\tdev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN\n-\t\t\t\t  + VLAN_TAG_SIZE * 2;\n+\tdev_info->max_rx_pktlen = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +\n+\t\tRTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;\n \n \tdev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;\n \tif (bp->flags & BNXT_FLAG_PTP_SUPPORTED)\n@@ -595,9 +595,9 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)\n \n \tif (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {\n \t\teth_dev->data->mtu =\n-\t\t\t\teth_dev->data->dev_conf.rxmode.max_rx_pkt_len -\n-\t\t\t\tETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE *\n-\t\t\t\tBNXT_NUM_VLANS;\n+\t\t\teth_dev->data->dev_conf.rxmode.max_rx_pkt_len -\n+\t\t\tRTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *\n+\t\t\tBNXT_NUM_VLANS;\n \t\tbnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);\n \t}\n \treturn 0;\n@@ -750,7 +750,7 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,\n \t\t\t\t\t\tbnxt_filter_info, next);\n \t\t\t\tbnxt_hwrm_clear_l2_filter(bp, filter);\n \t\t\t\tfilter->mac_index = INVALID_MAC_INDEX;\n-\t\t\t\tmemset(&filter->l2_addr, 0, ETHER_ADDR_LEN);\n+\t\t\t\tmemset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN);\n \t\t\t\tSTAILQ_INSERT_TAIL(&bp->free_filter_list,\n \t\t\t\t\t\t   filter, next);\n \t\t\t}\n@@ -791,7 +791,7 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,\n \t}\n \tSTAILQ_INSERT_TAIL(&vnic->filter, filter, next);\n \tfilter->mac_index = index;\n-\tmemcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);\n+\tmemcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);\n \treturn bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);\n }\n \n@@ -1312,7 +1312,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)\n \t\t\t\tnew_filter->mac_index =\n \t\t\t\t\tfilter->mac_index;\n \t\t\t\tmemcpy(new_filter->l2_addr, filter->l2_addr,\n-\t\t\t\t       ETHER_ADDR_LEN);\n+\t\t\t\t       RTE_ETHER_ADDR_LEN);\n \t\t\t\t/* MAC only filter */\n \t\t\t\trc = bnxt_hwrm_set_l2_filter(bp,\n \t\t\t\t\t\t\t     vnic->fw_vnic_id,\n@@ -1381,7 +1381,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)\n \t\t\t/* Inherit MAC from the previous filter */\n \t\t\tnew_filter->mac_index = filter->mac_index;\n \t\t\tmemcpy(new_filter->l2_addr, filter->l2_addr,\n-\t\t\t       ETHER_ADDR_LEN);\n+\t\t\t       RTE_ETHER_ADDR_LEN);\n \t\t\t/* MAC + VLAN ID filter */\n \t\t\tnew_filter->l2_ivlan = vlan_id;\n \t\t\tnew_filter->l2_ivlan_mask = 0xF000;\n@@ -1472,8 +1472,8 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,\n \t\trc = bnxt_hwrm_clear_l2_filter(bp, filter);\n \t\tif (rc)\n \t\t\treturn rc;\n-\t\tmemcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);\n-\t\tmemset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);\n+\t\tmemcpy(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);\n+\t\tmemset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);\n \t\tfilter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;\n \t\tfilter->enables |=\n \t\t\tHWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |\n@@ -1508,8 +1508,9 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,\n \t/* TODO Check for Duplicate mcast addresses */\n \tvnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;\n \tfor (i = 0; i < nb_mc_addr; i++) {\n-\t\tmemcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);\n-\t\toff += ETHER_ADDR_LEN;\n+\t\tmemcpy(vnic->mc_list + off, &mc_addr_list[i],\n+\t\t\tRTE_ETHER_ADDR_LEN);\n+\t\toff += RTE_ETHER_ADDR_LEN;\n \t}\n \n \tvnic->mc_addr_cnt = i;\n@@ -1582,13 +1583,13 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)\n \n \tbnxt_dev_info_get_op(eth_dev, &dev_info);\n \n-\tif (new_mtu < ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {\n+\tif (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {\n \t\tPMD_DRV_LOG(ERR, \"MTU requested must be within (%d, %d)\\n\",\n-\t\t\tETHER_MIN_MTU, BNXT_MAX_MTU);\n+\t\t\tRTE_ETHER_MIN_MTU, BNXT_MAX_MTU);\n \t\treturn -EINVAL;\n \t}\n \n-\tif (new_mtu > ETHER_MTU) {\n+\tif (new_mtu > RTE_ETHER_MTU) {\n \t\tbp->flags |= BNXT_FLAG_JUMBO;\n \t\tbp->eth_dev->data->dev_conf.rxmode.offloads |=\n \t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n@@ -1599,7 +1600,8 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)\n \t}\n \n \teth_dev->data->dev_conf.rxmode.max_rx_pkt_len =\n-\t\tnew_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;\n+\t\tnew_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +\n+\t\tVLAN_TAG_SIZE * 2;\n \n \teth_dev->data->mtu = new_mtu;\n \tPMD_DRV_LOG(INFO, \"New MTU is %d\\n\", eth_dev->data->mtu);\n@@ -1608,8 +1610,8 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)\n \t\tstruct bnxt_vnic_info *vnic = &bp->vnic_info[i];\n \t\tuint16_t size = 0;\n \n-\t\tvnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +\n-\t\t\t\t\tETHER_CRC_LEN + VLAN_TAG_SIZE * 2;\n+\t\tvnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +\n+\t\t\t\t\tRTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;\n \t\trc = bnxt_hwrm_vnic_cfg(bp, vnic);\n \t\tif (rc)\n \t\t\tbreak;\n@@ -1794,8 +1796,8 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,\n \tint match = 0;\n \t*ret = 0;\n \n-\tif (efilter->ether_type == ETHER_TYPE_IPv4 ||\n-\t\tefilter->ether_type == ETHER_TYPE_IPv6) {\n+\tif (efilter->ether_type == RTE_ETHER_TYPE_IPv4 ||\n+\t\tefilter->ether_type == RTE_ETHER_TYPE_IPv6) {\n \t\tPMD_DRV_LOG(ERR, \"invalid ether_type(0x%04x) in\"\n \t\t\t\" ethertype filter.\", efilter->ether_type);\n \t\t*ret = -EINVAL;\n@@ -1818,7 +1820,7 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,\n \tif (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {\n \t\tSTAILQ_FOREACH(mfilter, &vnic0->filter, next) {\n \t\t\tif ((!memcmp(efilter->mac_addr.addr_bytes,\n-\t\t\t\t     mfilter->l2_addr, ETHER_ADDR_LEN) &&\n+\t\t\t\t     mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&\n \t\t\t     mfilter->flags ==\n \t\t\t     HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&\n \t\t\t     mfilter->ethertype == efilter->ether_type)) {\n@@ -1829,7 +1831,7 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,\n \t} else {\n \t\tSTAILQ_FOREACH(mfilter, &vnic->filter, next)\n \t\t\tif ((!memcmp(efilter->mac_addr.addr_bytes,\n-\t\t\t\t     mfilter->l2_addr, ETHER_ADDR_LEN) &&\n+\t\t\t\t     mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&\n \t\t\t     mfilter->ethertype == efilter->ether_type &&\n \t\t\t     mfilter->flags ==\n \t\t\t     HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {\n@@ -1884,9 +1886,9 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,\n \t\t}\n \t\tbfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;\n \t\tmemcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,\n-\t\t       ETHER_ADDR_LEN);\n+\t\t       RTE_ETHER_ADDR_LEN);\n \t\tmemcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,\n-\t\t       ETHER_ADDR_LEN);\n+\t\t       RTE_ETHER_ADDR_LEN);\n \t\tbfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;\n \t\tbfilter->ethertype = efilter->ether_type;\n \t\tbfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;\n@@ -2397,7 +2399,7 @@ bnxt_parse_fdir_filter(struct bnxt *bp,\n \t\t//filter1 = bnxt_get_l2_filter(bp, filter, vnic0);\n \t} else {\n \t\tfilter->dst_id = vnic->fw_vnic_id;\n-\t\tfor (i = 0; i < ETHER_ADDR_LEN; i++)\n+\t\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; i++)\n \t\t\tif (filter->dst_macaddr[i] == 0x00)\n \t\t\t\tfilter1 = STAILQ_FIRST(&vnic0->filter);\n \t\t\telse\n@@ -2441,13 +2443,14 @@ bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,\n \t\t\t    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&\n \t\t\t    mf->l2_ivlan == nf->l2_ivlan &&\n \t\t\t    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&\n-\t\t\t    !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&\n+\t\t\t    !memcmp(mf->l2_addr, nf->l2_addr,\n+\t\t\t\t    RTE_ETHER_ADDR_LEN) &&\n \t\t\t    !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,\n-\t\t\t\t    ETHER_ADDR_LEN) &&\n+\t\t\t\t    RTE_ETHER_ADDR_LEN) &&\n \t\t\t    !memcmp(mf->src_macaddr, nf->src_macaddr,\n-\t\t\t\t    ETHER_ADDR_LEN) &&\n+\t\t\t\t    RTE_ETHER_ADDR_LEN) &&\n \t\t\t    !memcmp(mf->dst_macaddr, nf->dst_macaddr,\n-\t\t\t\t    ETHER_ADDR_LEN) &&\n+\t\t\t\t    RTE_ETHER_ADDR_LEN) &&\n \t\t\t    !memcmp(mf->src_ipaddr, nf->src_ipaddr,\n \t\t\t\t    sizeof(nf->src_ipaddr)) &&\n \t\t\t    !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,\n@@ -3354,16 +3357,16 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)\n \t\tgoto error_free;\n \t}\n \teth_dev->data->mac_addrs = rte_zmalloc(\"bnxt_mac_addr_tbl\",\n-\t\t\t\t\tETHER_ADDR_LEN * bp->max_l2_ctx, 0);\n+\t\t\t\t\tRTE_ETHER_ADDR_LEN * bp->max_l2_ctx, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tPMD_DRV_LOG(ERR,\n \t\t\t\"Failed to alloc %u bytes needed to store MAC addr tbl\",\n-\t\t\tETHER_ADDR_LEN * bp->max_l2_ctx);\n+\t\t\tRTE_ETHER_ADDR_LEN * bp->max_l2_ctx);\n \t\trc = -ENOMEM;\n \t\tgoto error_free;\n \t}\n \n-\tif (bnxt_check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {\n+\tif (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) {\n \t\tPMD_DRV_LOG(ERR,\n \t\t\t    \"Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\\n\",\n \t\t\t    bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],\n@@ -3374,7 +3377,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)\n \t}\n \t/* Copy the permanent MAC from the qcap response address now. */\n \tmemcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));\n-\tmemcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);\n+\tmemcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);\n \n \tif (bp->max_ring_grps < bp->rx_cp_nr_rings) {\n \t\t/* 1 ring is for default completion ring */\ndiff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c\nindex f43fe0db0..0aed29fb0 100644\n--- a/drivers/net/bnxt/bnxt_filter.c\n+++ b/drivers/net/bnxt/bnxt_filter.c\n@@ -39,8 +39,8 @@ struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)\n \tfilter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |\n \t\t\tHWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;\n \tmemcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,\n-\t       ETHER_ADDR_LEN);\n-\tmemset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);\n+\t       RTE_ETHER_ADDR_LEN);\n+\tmemset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);\n \treturn filter;\n }\n \ndiff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h\nindex a1ecfb19d..f8bad29de 100644\n--- a/drivers/net/bnxt/bnxt_filter.h\n+++ b/drivers/net/bnxt/bnxt_filter.h\n@@ -25,14 +25,14 @@ struct bnxt_filter_info {\n \t/* Filter Characteristics */\n \tuint32_t\t\tflags;\n \tuint32_t\t\tenables;\n-\tuint8_t\t\t\tl2_addr[ETHER_ADDR_LEN];\n-\tuint8_t\t\t\tl2_addr_mask[ETHER_ADDR_LEN];\n+\tuint8_t\t\t\tl2_addr[RTE_ETHER_ADDR_LEN];\n+\tuint8_t\t\t\tl2_addr_mask[RTE_ETHER_ADDR_LEN];\n \tuint16_t\t\tl2_ovlan;\n \tuint16_t\t\tl2_ovlan_mask;\n \tuint16_t\t\tl2_ivlan;\n \tuint16_t\t\tl2_ivlan_mask;\n-\tuint8_t\t\t\tt_l2_addr[ETHER_ADDR_LEN];\n-\tuint8_t\t\t\tt_l2_addr_mask[ETHER_ADDR_LEN];\n+\tuint8_t\t\t\tt_l2_addr[RTE_ETHER_ADDR_LEN];\n+\tuint8_t\t\t\tt_l2_addr_mask[RTE_ETHER_ADDR_LEN];\n \tuint16_t\t\tt_l2_ovlan;\n \tuint16_t\t\tt_l2_ovlan_mask;\n \tuint16_t\t\tt_l2_ivlan;\ndiff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c\nindex a1f527d4f..bb7f672fc 100644\n--- a/drivers/net/bnxt/bnxt_flow.c\n+++ b/drivers/net/bnxt/bnxt_flow.c\n@@ -682,7 +682,7 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,\n \tf0 = STAILQ_FIRST(&vnic0->filter);\n \n \t/* This flow has same DST MAC as the port/l2 filter. */\n-\tif (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)\n+\tif (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)\n \t\treturn f0;\n \n \t/* This flow needs DST MAC which is not same as port/l2 */\n@@ -694,8 +694,8 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,\n \tfilter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;\n \tfilter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |\n \t\t\tL2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;\n-\tmemcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);\n-\tmemset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);\n+\tmemcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);\n+\tmemset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);\n \trc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,\n \t\t\t\t     filter1);\n \tif (rc) {\n@@ -951,13 +951,14 @@ bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)\n \t\t\t    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&\n \t\t\t    mf->l2_ivlan == nf->l2_ivlan &&\n \t\t\t    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&\n-\t\t\t    !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&\n+\t\t\t    !memcmp(mf->l2_addr, nf->l2_addr,\n+\t\t\t\t    RTE_ETHER_ADDR_LEN) &&\n \t\t\t    !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,\n-\t\t\t\t    ETHER_ADDR_LEN) &&\n+\t\t\t\t    RTE_ETHER_ADDR_LEN) &&\n \t\t\t    !memcmp(mf->src_macaddr, nf->src_macaddr,\n-\t\t\t\t    ETHER_ADDR_LEN) &&\n+\t\t\t\t    RTE_ETHER_ADDR_LEN) &&\n \t\t\t    !memcmp(mf->dst_macaddr, nf->dst_macaddr,\n-\t\t\t\t    ETHER_ADDR_LEN) &&\n+\t\t\t\t    RTE_ETHER_ADDR_LEN) &&\n \t\t\t    !memcmp(mf->src_ipaddr, nf->src_ipaddr,\n \t\t\t\t    sizeof(nf->src_ipaddr)) &&\n \t\t\t    !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,\ndiff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c\nindex e2fe4f7a0..4f0142cdb 100644\n--- a/drivers/net/bnxt/bnxt_hwrm.c\n+++ b/drivers/net/bnxt/bnxt_hwrm.c\n@@ -393,11 +393,11 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,\n \tif (enables &\n \t    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)\n \t\tmemcpy(req.l2_addr, filter->l2_addr,\n-\t\t       ETHER_ADDR_LEN);\n+\t\t       RTE_ETHER_ADDR_LEN);\n \tif (enables &\n \t    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)\n \t\tmemcpy(req.l2_addr_mask, filter->l2_addr_mask,\n-\t\t       ETHER_ADDR_LEN);\n+\t\t       RTE_ETHER_ADDR_LEN);\n \tif (enables &\n \t    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)\n \t\treq.l2_ovlan = filter->l2_ovlan;\n@@ -571,7 +571,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)\n \t}\n \n \tbp->fw_fid = rte_le_to_cpu_32(resp->fid);\n-\tmemcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);\n+\tmemcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);\n \tbp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);\n \tbp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);\n \tbp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);\n@@ -1329,8 +1329,8 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)\n \tvnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;\n \tvnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;\n \tvnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;\n-\tvnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +\n-\t\t\t\tETHER_CRC_LEN + VLAN_TAG_SIZE;\n+\tvnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +\n+\t\t\t\tRTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;\n \tHWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);\n \n \tif (vnic->func_default)\n@@ -2516,8 +2516,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)\n \t\t\tHWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);\n \treq.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);\n \treq.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);\n-\treq.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +\n-\t\t\t\t   ETHER_CRC_LEN + VLAN_TAG_SIZE *\n+\treq.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +\n+\t\t\t\t   RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *\n \t\t\t\t   BNXT_NUM_VLANS);\n \treq.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);\n \treq.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);\n@@ -2554,11 +2554,11 @@ static void populate_vf_func_cfg_req(struct bnxt *bp,\n \t\t\tHWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |\n \t\t\tHWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);\n \n-\treq->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +\n-\t\t\t\t    ETHER_CRC_LEN + VLAN_TAG_SIZE *\n+\treq->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +\n+\t\t\t\t    RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *\n \t\t\t\t    BNXT_NUM_VLANS);\n-\treq->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +\n-\t\t\t\t    ETHER_CRC_LEN + VLAN_TAG_SIZE *\n+\treq->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +\n+\t\t\t\t    RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *\n \t\t\t\t    BNXT_NUM_VLANS);\n \treq->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /\n \t\t\t\t\t\t(num_vfs + 1));\n@@ -2589,7 +2589,8 @@ static void add_random_mac_if_needed(struct bnxt *bp,\n \t\trte_eth_random_addr(cfg_req->dflt_mac_addr);\n \t\tbp->pf.vf_info[vf].random_mac = true;\n \t} else {\n-\t\tmemcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);\n+\t\tmemcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,\n+\t\t\tRTE_ETHER_ADDR_LEN);\n \t}\n }\n \n@@ -3125,7 +3126,7 @@ int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,\n \n \tHWRM_CHECK_RESULT();\n \n-\tmemcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);\n+\tmemcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);\n \n \tHWRM_UNLOCK();\n \n@@ -3696,11 +3697,11 @@ int bnxt_hwrm_set_em_filter(struct bnxt *bp,\n \tif (enables &\n \t    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)\n \t\tmemcpy(req.src_macaddr, filter->src_macaddr,\n-\t\t       ETHER_ADDR_LEN);\n+\t\t       RTE_ETHER_ADDR_LEN);\n \tif (enables &\n \t    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)\n \t\tmemcpy(req.dst_macaddr, filter->dst_macaddr,\n-\t\t       ETHER_ADDR_LEN);\n+\t\t       RTE_ETHER_ADDR_LEN);\n \tif (enables &\n \t    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)\n \t\treq.ovlan_vid = filter->l2_ovlan;\n@@ -3799,11 +3800,11 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,\n \tif (enables &\n \t    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)\n \t\tmemcpy(req.src_macaddr, filter->src_macaddr,\n-\t\t       ETHER_ADDR_LEN);\n+\t\t       RTE_ETHER_ADDR_LEN);\n \t//if (enables &\n \t    //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)\n \t\t//memcpy(req.dst_macaddr, filter->dst_macaddr,\n-\t\t       //ETHER_ADDR_LEN);\n+\t\t       //RTE_ETHER_ADDR_LEN);\n \tif (enables &\n \t    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)\n \t\treq.ethertype = rte_cpu_to_be_16(filter->ethertype);\ndiff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c\nindex fcbd6bc6e..51fe35f3e 100644\n--- a/drivers/net/bnxt/bnxt_ring.c\n+++ b/drivers/net/bnxt/bnxt_ring.c\n@@ -344,8 +344,8 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)\n \tbp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;\n \tB_RX_DB(rxr->ag_doorbell, rxr->ag_prod);\n \n-\trxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +\n-\t\tETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);\n+\trxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +\n+\t\tRTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);\n \n \tif (bp->eth_dev->data->rx_queue_state[queue_index] ==\n \t    RTE_ETH_QUEUE_STATE_STARTED) {\n@@ -452,8 +452,8 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)\n \t\tbp->grp_info[i].ag_fw_ring_id = ring->fw_ring_id;\n \t\tB_RX_DB(rxr->ag_doorbell, rxr->ag_prod);\n \n-\t\trxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +\n-\t\t\t\t\tETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);\n+\t\trxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +\n+\t\t\t\t\tRTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);\n \t\tif (bnxt_init_one_rx_ring(rxq)) {\n \t\t\tPMD_DRV_LOG(ERR, \"bnxt_init_one_rx_ring failed!\\n\");\n \t\t\tbnxt_rx_queue_release_op(rxq);\ndiff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c\nindex 17e2909a7..f1ee9eede 100644\n--- a/drivers/net/bnxt/bnxt_rxq.c\n+++ b/drivers/net/bnxt/bnxt_rxq.c\n@@ -334,7 +334,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \trxq->queue_id = queue_idx;\n \trxq->port_id = eth_dev->data->port_id;\n \tif (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)\n-\t\trxq->crc_len = ETHER_CRC_LEN;\n+\t\trxq->crc_len = RTE_ETHER_CRC_LEN;\n \telse\n \t\trxq->crc_len = 0;\n \ndiff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c\nindex dc695e177..303549b5d 100644\n--- a/drivers/net/bnxt/bnxt_rxr.c\n+++ b/drivers/net/bnxt/bnxt_rxr.c\n@@ -640,8 +640,8 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)\n \tstruct bnxt_rx_ring_info *rxr;\n \tstruct bnxt_ring *ring;\n \n-\trxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN +\n-\t\t\t       (2 * VLAN_TAG_SIZE);\n+\trxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +\n+\t\tRTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);\n \trxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);\n \n \trxr = rte_zmalloc_socket(\"bnxt_rx_ring\",\ndiff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c\nindex aebfb1f1c..2cf5f0b5b 100644\n--- a/drivers/net/bnxt/bnxt_vnic.c\n+++ b/drivers/net/bnxt/bnxt_vnic.c\n@@ -116,7 +116,7 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)\n \tuint32_t entry_length = RTE_CACHE_LINE_ROUNDUP(\n \t\t\t\tHW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table) +\n \t\t\t\tHW_HASH_KEY_SIZE +\n-\t\t\t\tBNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN);\n+\t\t\t\tBNXT_MAX_MC_ADDRS * RTE_ETHER_ADDR_LEN);\n \tuint16_t max_vnics;\n \tint i;\n \trte_iova_t mz_phys_addr;\ndiff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c\nindex 5e3d1bfb1..f07789670 100644\n--- a/drivers/net/bnxt/rte_pmd_bnxt.c\n+++ b/drivers/net/bnxt/rte_pmd_bnxt.c\n@@ -698,7 +698,7 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct rte_ether_addr *addr,\n \t\t    filter->enables ==\n \t\t    (HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |\n \t\t     HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) &&\n-\t\t    memcmp(addr, filter->l2_addr, ETHER_ADDR_LEN) == 0) {\n+\t\t    memcmp(addr, filter->l2_addr, RTE_ETHER_ADDR_LEN) == 0) {\n \t\t\tbnxt_hwrm_clear_l2_filter(bp, filter);\n \t\t\tbreak;\n \t\t}\n@@ -711,12 +711,12 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct rte_ether_addr *addr,\n \tfilter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;\n \tfilter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |\n \t\t\tHWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;\n-\tmemcpy(filter->l2_addr, addr, ETHER_ADDR_LEN);\n-\tmemset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);\n+\tmemcpy(filter->l2_addr, addr, RTE_ETHER_ADDR_LEN);\n+\tmemset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);\n \n \t/* Do not add a filter for the default MAC */\n \tif (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf_id, &dflt_mac) ||\n-\t    memcmp(filter->l2_addr, dflt_mac.addr_bytes, ETHER_ADDR_LEN))\n+\t    memcmp(filter->l2_addr, dflt_mac.addr_bytes, RTE_ETHER_ADDR_LEN))\n \t\trc = bnxt_hwrm_set_l2_filter(bp, vnic.fw_vnic_id, filter);\n \n exit:\ndiff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c\nindex 6077c3dc8..d764dad33 100644\n--- a/drivers/net/bonding/rte_eth_bond_8023ad.c\n+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c\n@@ -577,7 +577,7 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id)\n \t/* Source and destination MAC */\n \trte_ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr);\n \trte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr);\n-\thdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW);\n+\thdr->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW);\n \n \tlacpdu = &hdr->lacpdu;\n \tmemset(lacpdu, 0, sizeof(*lacpdu));\ndiff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c\nindex 00d6b50e4..a1c577b9e 100644\n--- a/drivers/net/bonding/rte_eth_bond_alb.c\n+++ b/drivers/net/bonding/rte_eth_bond_alb.c\n@@ -216,9 +216,9 @@ bond_mode_alb_arp_upd(struct client_data *client_info,\n \trte_ether_addr_copy(&client_info->app_mac, &eth_h->s_addr);\n \trte_ether_addr_copy(&client_info->cli_mac, &eth_h->d_addr);\n \tif (client_info->vlan_count > 0)\n-\t\teth_h->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);\n+\t\teth_h->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);\n \telse\n-\t\teth_h->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP);\n+\t\teth_h->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP);\n \n \tarp_h = (struct rte_arp_hdr *)(\n \t\t(char *)eth_h + sizeof(struct rte_ether_hdr)\n@@ -233,8 +233,8 @@ bond_mode_alb_arp_upd(struct client_data *client_info,\n \tarp_h->arp_data.arp_tip = client_info->cli_ip;\n \n \tarp_h->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER);\n-\tarp_h->arp_protocol = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n-\tarp_h->arp_hlen = ETHER_ADDR_LEN;\n+\tarp_h->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n+\tarp_h->arp_hlen = RTE_ETHER_ADDR_LEN;\n \tarp_h->arp_plen = sizeof(uint32_t);\n \tarp_h->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REPLY);\n \ndiff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c\nindex e83e668ad..79962b43a 100644\n--- a/drivers/net/bonding/rte_eth_bond_pmd.c\n+++ b/drivers/net/bonding/rte_eth_bond_pmd.c\n@@ -37,15 +37,15 @@ get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)\n {\n \tsize_t vlan_offset = 0;\n \n-\tif (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto ||\n-\t\trte_cpu_to_be_16(ETHER_TYPE_QINQ) == *proto) {\n+\tif (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||\n+\t\trte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {\n \t\tstruct rte_vlan_hdr *vlan_hdr =\n \t\t\t(struct rte_vlan_hdr *)(eth_hdr + 1);\n \n \t\tvlan_offset = sizeof(struct rte_vlan_hdr);\n \t\t*proto = vlan_hdr->eth_proto;\n \n-\t\tif (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {\n+\t\tif (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {\n \t\t\tvlan_hdr = vlan_hdr + 1;\n \t\t\t*proto = vlan_hdr->eth_proto;\n \t\t\tvlan_offset += sizeof(struct rte_vlan_hdr);\n@@ -108,7 +108,8 @@ bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,\n static inline uint8_t\n is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)\n {\n-\tconst uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);\n+\tconst uint16_t ether_type_slow_be =\n+\t\trte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);\n \n \treturn !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&\n \t\t(ethertype == ether_type_slow_be &&\n@@ -122,7 +123,7 @@ is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)\n static struct rte_flow_item_eth flow_item_eth_type_8023ad = {\n \t.dst.addr_bytes = { 0 },\n \t.src.addr_bytes = { 0 },\n-\t.type = RTE_BE16(ETHER_TYPE_SLOW),\n+\t.type = RTE_BE16(RTE_ETHER_TYPE_SLOW),\n };\n \n static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {\n@@ -398,7 +399,8 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,\n \tstruct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;\n \tstruct rte_ether_hdr *hdr;\n \n-\tconst uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);\n+\tconst uint16_t ether_type_slow_be =\n+\t\trte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);\n \tuint16_t num_rx_total = 0;\t/* Total number of received packets */\n \tuint16_t slaves[RTE_MAX_ETHPORTS];\n \tuint16_t slave_count, idx;\n@@ -605,7 +607,7 @@ mode6_debug(const char __attribute__((unused)) *info,\n \tstrlcpy(buf, info, 16);\n #endif\n \n-\tif (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {\n+\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {\n \t\tipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);\n \t\tipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);\n #ifdef RTE_LIBRTE_BOND_DEBUG_ALB\n@@ -615,7 +617,7 @@ mode6_debug(const char __attribute__((unused)) *info,\n \t\tupdate_client_stats(ipv4_h->src_addr, port, burstnumber);\n \t}\n #ifdef RTE_LIBRTE_BOND_DEBUG_ALB\n-\telse if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {\n+\telse if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {\n \t\tarp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);\n \t\tipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);\n \t\tipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);\n@@ -644,14 +646,14 @@ bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\tether_type = eth_h->ether_type;\n \t\toffset = get_vlan_offset(eth_h, &ether_type);\n \n-\t\tif (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {\n+\t\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {\n #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)\n \t\t\tmode6_debug(\"RX ARP:\", eth_h, bufs[i]->port, &burstnumberRX);\n #endif\n \t\t\tbond_mode_alb_arp_recv(eth_h, offset, internals);\n \t\t}\n #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)\n-\t\telse if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))\n+\t\telse if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))\n \t\t\tmode6_debug(\"RX IPv4:\", eth_h, bufs[i]->port, &burstnumberRX);\n #endif\n \t}\n@@ -809,12 +811,12 @@ burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,\n \n \t\tvlan_offset = get_vlan_offset(eth_hdr, &proto);\n \n-\t\tif (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {\n+\t\tif (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) == proto) {\n \t\t\tstruct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)\n \t\t\t\t\t((char *)(eth_hdr + 1) + vlan_offset);\n \t\t\tl3hash = ipv4_hash(ipv4_hdr);\n \n-\t\t} else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {\n+\t\t} else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) == proto) {\n \t\t\tstruct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)\n \t\t\t\t\t((char *)(eth_hdr + 1) + vlan_offset);\n \t\t\tl3hash = ipv6_hash(ipv6_hdr);\n@@ -849,7 +851,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,\n \t\tl3hash = 0;\n \t\tl4hash = 0;\n \n-\t\tif (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {\n+\t\tif (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) == proto) {\n \t\t\tstruct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)\n \t\t\t\t\t((char *)(eth_hdr + 1) + vlan_offset);\n \t\t\tsize_t ip_hdr_offset;\n@@ -880,7 +882,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,\n \t\t\t\t\t\tl4hash = HASH_L4_PORTS(udp_hdr);\n \t\t\t\t}\n \t\t\t}\n-\t\t} else if  (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {\n+\t\t} else if  (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) == proto) {\n \t\t\tstruct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)\n \t\t\t\t\t((char *)(eth_hdr + 1) + vlan_offset);\n \t\t\tl3hash = ipv6_hash(ipv6_hdr);\n@@ -1107,7 +1109,7 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\tether_type = eth_h->ether_type;\n \t\toffset = get_vlan_offset(eth_h, &ether_type);\n \n-\t\tif (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {\n+\t\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {\n \t\t\tslave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);\n \n \t\t\t/* Change src mac in eth header */\n@@ -2252,7 +2254,7 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \n \tdev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?\n \t\t\tinternals->candidate_max_rx_pktlen :\n-\t\t\tETHER_MAX_JUMBO_FRAME_LEN;\n+\t\t\tRTE_ETHER_MAX_JUMBO_FRAME_LEN;\n \n \t/* Max number of tx/rx queues that the bonded device can support is the\n \t * minimum values of the bonded slaves, as all slaves must be capable\n@@ -3084,12 +3086,12 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)\n \teth_dev->data->nb_tx_queues = (uint16_t)1;\n \n \t/* Allocate memory for storing MAC addresses */\n-\teth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN *\n+\teth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *\n \t\t\tBOND_MAX_MAC_ADDRS, 0, socket_id);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tRTE_BOND_LOG(ERR,\n \t\t\t     \"Failed to allocate %u bytes needed to store MAC addresses\",\n-\t\t\t     ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);\n+\t\t\t     RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);\n \t\tgoto err;\n \t}\n \n@@ -3148,7 +3150,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)\n \t}\n \n \tvlan_filter_bmp_size =\n-\t\trte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);\n+\t\trte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);\n \tinternals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,\n \t\t\t\t\t\t   RTE_CACHE_LINE_SIZE);\n \tif (internals->vlan_filter_bmpmem == NULL) {\n@@ -3158,7 +3160,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)\n \t\tgoto err;\n \t}\n \n-\tinternals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,\n+\tinternals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,\n \t\t\tinternals->vlan_filter_bmpmem, vlan_filter_bmp_size);\n \tif (internals->vlan_filter_bmp == NULL) {\n \t\tRTE_BOND_LOG(ERR,\ndiff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h\nindex 6af3ee79b..3f97fa58b 100644\n--- a/drivers/net/cxgbe/cxgbe.h\n+++ b/drivers/net/cxgbe/cxgbe.h\n@@ -15,8 +15,9 @@\n #define CXGBE_DEFAULT_TX_DESC_SIZE    1024 /* Default TX ring size */\n #define CXGBE_DEFAULT_RX_DESC_SIZE    1024 /* Default RX ring size */\n \n-#define CXGBE_MIN_RX_BUFSIZE ETHER_MIN_MTU /* min buf size */\n-#define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */\n+#define CXGBE_MIN_RX_BUFSIZE RTE_ETHER_MIN_MTU /* min buf size */\n+#define CXGBE_MAX_RX_PKTLEN (9000 + RTE_ETHER_HDR_LEN + \\\n+\t\t\t\tRTE_ETHER_CRC_LEN) /* max pkt */\n \n /* Max poll time is 100 * 100msec = 10 sec */\n #define CXGBE_LINK_STATUS_POLL_MS 100 /* 100ms */\ndiff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h\nindex edc8ea57d..cce51c2f9 100644\n--- a/drivers/net/cxgbe/cxgbe_compat.h\n+++ b/drivers/net/cxgbe/cxgbe_compat.h\n@@ -101,6 +101,7 @@\n #define PTR_ALIGN(p, a) ((typeof(p))CXGBE_ALIGN((unsigned long)(p), (a)))\n \n #define VLAN_HLEN 4\n+#define ETHER_ADDR_LEN 6\n \n #define rmb()     rte_rmb() /* dpdk rte provided rmb */\n #define wmb()     rte_wmb() /* dpdk rte provided wmb */\ndiff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c\nindex e36936959..c03559640 100644\n--- a/drivers/net/cxgbe/cxgbe_ethdev.c\n+++ b/drivers/net/cxgbe/cxgbe_ethdev.c\n@@ -277,16 +277,16 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)\n \tstruct adapter *adapter = pi->adapter;\n \tstruct rte_eth_dev_info dev_info;\n \tint err;\n-\tuint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;\n+\tuint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;\n \n \tcxgbe_dev_info_get(eth_dev, &dev_info);\n \n-\t/* Must accommodate at least ETHER_MIN_MTU */\n-\tif ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen))\n+\t/* Must accommodate at least RTE_ETHER_MIN_MTU */\n+\tif (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)\n \t\treturn -EINVAL;\n \n \t/* set to jumbo mode if needed */\n-\tif (new_mtu > ETHER_MAX_LEN)\n+\tif (new_mtu > RTE_ETHER_MAX_LEN)\n \t\teth_dev->data->dev_conf.rxmode.offloads |=\n \t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \telse\n@@ -587,7 +587,7 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,\n \n \tcxgbe_dev_info_get(eth_dev, &dev_info);\n \n-\t/* Must accommodate at least ETHER_MIN_MTU */\n+\t/* Must accommodate at least RTE_ETHER_MIN_MTU */\n \tif ((pkt_len < dev_info.min_rx_bufsize) ||\n \t    (pkt_len > dev_info.max_rx_pktlen)) {\n \t\tdev_err(adap, \"%s: max pkt len must be > %d and <= %d\\n\",\n@@ -626,7 +626,7 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,\n \t\trxq->fl.size = temp_nb_desc;\n \n \t/* Set to jumbo mode if necessary */\n-\tif (pkt_len > ETHER_MAX_LEN)\n+\tif (pkt_len > RTE_ETHER_MAX_LEN)\n \t\teth_dev->data->dev_conf.rxmode.offloads |=\n \t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \telse\ndiff --git a/drivers/net/cxgbe/cxgbe_filter.h b/drivers/net/cxgbe/cxgbe_filter.h\nindex 6f6e25c63..0c67d2d15 100644\n--- a/drivers/net/cxgbe/cxgbe_filter.h\n+++ b/drivers/net/cxgbe/cxgbe_filter.h\n@@ -102,7 +102,7 @@ struct ch_filter_specification {\n \tuint32_t eport:2;\t/* egress port to switch packet out */\n \tuint32_t swapmac:1;     /* swap SMAC/DMAC for loopback packet */\n \tuint32_t newvlan:2;     /* rewrite VLAN Tag */\n-\tuint8_t dmac[ETHER_ADDR_LEN];   /* new destination MAC address */\n+\tuint8_t dmac[RTE_ETHER_ADDR_LEN];   /* new destination MAC address */\n \tuint16_t vlan;          /* VLAN Tag to insert */\n \n \t/*\ndiff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c\nindex a7b053b4a..4f23468b8 100644\n--- a/drivers/net/cxgbe/cxgbe_flow.c\n+++ b/drivers/net/cxgbe/cxgbe_flow.c\n@@ -233,7 +233,7 @@ ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,\n \t\t\t\t\t  item, \"ttl/tos are not supported\");\n \n \tfs->type = FILTER_TYPE_IPV4;\n-\tCXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);\n+\tCXGBE_FILL_FS(RTE_ETHER_TYPE_IPv4, 0xffff, ethtype);\n \tif (!val)\n \t\treturn 0; /* ipv4 wild card */\n \n@@ -262,7 +262,7 @@ ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,\n \t\t\t\t\t  \"tc/flow/hop are not supported\");\n \n \tfs->type = FILTER_TYPE_IPV6;\n-\tCXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);\n+\tCXGBE_FILL_FS(RTE_ETHER_TYPE_IPv6, 0xffff, ethtype);\n \tif (!val)\n \t\treturn 0; /* ipv6 wild card */\n \n@@ -448,7 +448,7 @@ ch_rte_parse_atype_switch(const struct rte_flow_action *a,\n \tcase RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:\n \t\tpushvlan = (const struct rte_flow_action_of_push_vlan *)\n \t\t\t    a->conf;\n-\t\tif (pushvlan->ethertype != ETHER_TYPE_VLAN)\n+\t\tif (pushvlan->ethertype != RTE_ETHER_TYPE_VLAN)\n \t\t\treturn rte_flow_error_set(e, EINVAL,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, a,\n \t\t\t\t\t\t  \"only ethertype 0x8100 \"\ndiff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c\nindex 28c3c66ba..b5d5cd081 100644\n--- a/drivers/net/cxgbe/cxgbe_main.c\n+++ b/drivers/net/cxgbe/cxgbe_main.c\n@@ -1348,7 +1348,7 @@ int cxgbe_link_start(struct port_info *pi)\n \tint ret;\n \n \tmtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -\n-\t      (ETHER_HDR_LEN + ETHER_CRC_LEN);\n+\t      (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);\n \n \tconf_offloads = pi->eth_dev->data->dev_conf.rxmode.offloads;\n \n@@ -1841,7 +1841,7 @@ int cxgbe_probe(struct adapter *adapter)\n \t\trte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);\n \n \t\tpi->eth_dev->data->mac_addrs = rte_zmalloc(name,\n-\t\t\t\t\t\t\t   ETHER_ADDR_LEN, 0);\n+\t\t\t\t\t\t\tRTE_ETHER_ADDR_LEN, 0);\n \t\tif (!pi->eth_dev->data->mac_addrs) {\n \t\t\tdev_err(adapter, \"%s: Mem allocation failed for storing mac addr, aborting\\n\",\n \t\t\t\t__func__);\ndiff --git a/drivers/net/cxgbe/cxgbevf_main.c b/drivers/net/cxgbe/cxgbevf_main.c\nindex 0ac2d98eb..d3345508f 100644\n--- a/drivers/net/cxgbe/cxgbevf_main.c\n+++ b/drivers/net/cxgbe/cxgbevf_main.c\n@@ -245,7 +245,7 @@ int cxgbevf_probe(struct adapter *adapter)\n \n \t\trte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);\n \t\tpi->eth_dev->data->mac_addrs = rte_zmalloc(name,\n-\t\t\t\t\t\t\t   ETHER_ADDR_LEN, 0);\n+\t\t\t\t\t\t\tRTE_ETHER_ADDR_LEN, 0);\n \t\tif (!pi->eth_dev->data->mac_addrs) {\n \t\t\tdev_err(adapter, \"%s: Mem allocation failed for storing mac addr, aborting\\n\",\n \t\t\t\t__func__);\ndiff --git a/drivers/net/cxgbe/l2t.c b/drivers/net/cxgbe/l2t.c\nindex 27cdf6f33..6faf624f7 100644\n--- a/drivers/net/cxgbe/l2t.c\n+++ b/drivers/net/cxgbe/l2t.c\n@@ -81,10 +81,10 @@ static int write_l2e(struct rte_eth_dev *dev, struct l2t_entry *e, int sync,\n \t\t\t\t  V_L2T_W_NOREPLY(!sync));\n \treq->l2t_idx = cpu_to_be16(l2t_idx);\n \treq->vlan = cpu_to_be16(e->vlan);\n-\trte_memcpy(req->dst_mac, e->dmac, ETHER_ADDR_LEN);\n+\trte_memcpy(req->dst_mac, e->dmac, RTE_ETHER_ADDR_LEN);\n \n \tif (loopback)\n-\t\tmemset(req->dst_mac, 0, ETHER_ADDR_LEN);\n+\t\tmemset(req->dst_mac, 0, RTE_ETHER_ADDR_LEN);\n \n \tt4_mgmt_tx(ctrlq, mbuf);\n \n@@ -116,7 +116,7 @@ static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,\n \t\t\t\tfirst_free = e;\n \t\t} else {\n \t\t\tif (e->state == L2T_STATE_SWITCHING) {\n-\t\t\t\tif ((!memcmp(e->dmac, dmac, ETHER_ADDR_LEN)) &&\n+\t\t\t\tif ((!memcmp(e->dmac, dmac, RTE_ETHER_ADDR_LEN)) &&\n \t\t\t\t    e->vlan == vlan && e->lport == port)\n \t\t\t\t\tgoto exists;\n \t\t\t}\n@@ -154,7 +154,7 @@ static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,\n \t\t\te->state = L2T_STATE_SWITCHING;\n \t\t\te->vlan = vlan;\n \t\t\te->lport = port;\n-\t\t\trte_memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);\n+\t\t\trte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN);\n \t\t\trte_atomic32_set(&e->refcnt, 1);\n \t\t\tret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);\n \t\t\tif (ret < 0)\ndiff --git a/drivers/net/cxgbe/l2t.h b/drivers/net/cxgbe/l2t.h\nindex ee40dc1f6..326abfde4 100644\n--- a/drivers/net/cxgbe/l2t.h\n+++ b/drivers/net/cxgbe/l2t.h\n@@ -28,7 +28,7 @@ struct l2t_entry {\n \tu16 idx;                    /* entry index within in-memory table */\n \tu16 vlan;                   /* VLAN TCI (id: bits 0-11, prio: 13-15 */\n \tu8  lport;                  /* destination port */\n-\tu8  dmac[ETHER_ADDR_LEN];   /* destination MAC address */\n+\tu8  dmac[RTE_ETHER_ADDR_LEN];   /* destination MAC address */\n \trte_spinlock_t lock;        /* entry lock */\n \trte_atomic32_t refcnt;      /* entry reference count */\n };\ndiff --git a/drivers/net/cxgbe/mps_tcam.c b/drivers/net/cxgbe/mps_tcam.c\nindex 71c8070b3..5302d1343 100644\n--- a/drivers/net/cxgbe/mps_tcam.c\n+++ b/drivers/net/cxgbe/mps_tcam.c\n@@ -8,8 +8,8 @@\n static inline bool\n match_entry(struct mps_tcam_entry *entry, const u8 *eth_addr, const u8 *mask)\n {\n-\tif (!memcmp(eth_addr, entry->eth_addr, ETHER_ADDR_LEN) &&\n-\t    !memcmp(mask, entry->mask, ETHER_ADDR_LEN))\n+\tif (!memcmp(eth_addr, entry->eth_addr, RTE_ETHER_ADDR_LEN) &&\n+\t    !memcmp(mask, entry->mask, RTE_ETHER_ADDR_LEN))\n \t\treturn true;\n \treturn false;\n }\n@@ -95,8 +95,8 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,\n \n \t/* Fill in the new values */\n \tentry = &mpstcam->entry[ret];\n-\tmemcpy(entry->eth_addr, eth_addr, ETHER_ADDR_LEN);\n-\tmemcpy(entry->mask, mask, ETHER_ADDR_LEN);\n+\tmemcpy(entry->eth_addr, eth_addr, RTE_ETHER_ADDR_LEN);\n+\tmemcpy(entry->mask, mask, RTE_ETHER_ADDR_LEN);\n \trte_atomic32_set(&entry->refcnt, 1);\n \tentry->state = MPS_ENTRY_USED;\n \n@@ -139,7 +139,7 @@ int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr)\n \n \t/* idx can now be different from what user provided */\n \tentry = &mpstcam->entry[idx];\n-\tmemcpy(entry->eth_addr, addr, ETHER_ADDR_LEN);\n+\tmemcpy(entry->eth_addr, addr, RTE_ETHER_ADDR_LEN);\n \t/* NOTE: we have considered the case that idx returned by t4_change_mac\n \t * will be different from the user provided value only if user\n \t * provided value is -1\n@@ -161,8 +161,8 @@ int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr)\n  */\n static inline void reset_mpstcam_entry(struct mps_tcam_entry *entry)\n {\n-\tmemset(entry->eth_addr, 0, ETHER_ADDR_LEN);\n-\tmemset(entry->mask, 0, ETHER_ADDR_LEN);\n+\tmemset(entry->eth_addr, 0, RTE_ETHER_ADDR_LEN);\n+\tmemset(entry->mask, 0, RTE_ETHER_ADDR_LEN);\n \trte_atomic32_clear(&entry->refcnt);\n \tentry->state = MPS_ENTRY_UNUSED;\n }\ndiff --git a/drivers/net/cxgbe/mps_tcam.h b/drivers/net/cxgbe/mps_tcam.h\nindex f86bac7bd..3d1e8d3db 100644\n--- a/drivers/net/cxgbe/mps_tcam.h\n+++ b/drivers/net/cxgbe/mps_tcam.h\n@@ -24,8 +24,8 @@ struct mps_tcam_entry {\n \tu16 idx;\n \n \t/* add data here which uniquely defines an entry */\n-\tu8 eth_addr[ETHER_ADDR_LEN];\n-\tu8 mask[ETHER_ADDR_LEN];\n+\tu8 eth_addr[RTE_ETHER_ADDR_LEN];\n+\tu8 mask[RTE_ETHER_ADDR_LEN];\n \n \tstruct mpstcam_table *mpstcam; /* backptr */\n \trte_atomic32_t refcnt;\ndiff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c\nindex 3a0eba5df..b3c4ec2eb 100644\n--- a/drivers/net/cxgbe/sge.c\n+++ b/drivers/net/cxgbe/sge.c\n@@ -73,7 +73,7 @@ static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,\n {\n \tstruct sge *s = &adapter->sge;\n \n-\treturn CXGBE_ALIGN(s->pktshift + ETHER_HDR_LEN + VLAN_HLEN + mtu,\n+\treturn CXGBE_ALIGN(s->pktshift + RTE_ETHER_HDR_LEN + VLAN_HLEN + mtu,\n \t\t\t   s->fl_align);\n }\n \n@@ -1128,7 +1128,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,\n \t * The chip min packet length is 10 octets but play safe and reject\n \t * anything shorter than an Ethernet header.\n \t */\n-\tif (unlikely(m->pkt_len < ETHER_HDR_LEN)) {\n+\tif (unlikely(m->pkt_len < RTE_ETHER_HDR_LEN)) {\n out_free:\n \t\trte_pktmbuf_free(m);\n \t\treturn 0;\n@@ -1145,7 +1145,8 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,\n \t/* align the end of coalesce WR to a 512 byte boundary */\n \ttxq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;\n \n-\tif (!((m->ol_flags & PKT_TX_TCP_SEG) || (m->pkt_len > ETHER_MAX_LEN))) {\n+\tif (!((m->ol_flags & PKT_TX_TCP_SEG) ||\n+\t\t\tm->pkt_len > RTE_ETHER_MAX_LEN)) {\n \t\tif (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {\n \t\t\tif (unlikely(map_mbuf(mbuf, addr) < 0)) {\n \t\t\t\tdev_warn(adap, \"%s: mapping err for coalesce\\n\",\n@@ -1230,7 +1231,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,\n \t\tv6 = (m->ol_flags & PKT_TX_IPV6) != 0;\n \t\tl3hdr_len = m->l3_len;\n \t\tl4hdr_len = m->l4_len;\n-\t\teth_xtra_len = m->l2_len - ETHER_HDR_LEN;\n+\t\teth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN;\n \t\tlen += sizeof(*lso);\n \t\twr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?\n \t\t\t\t\t\t  FW_ETH_TX_PKT_WR :\ndiff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c\nindex 6716dd0cf..4b0027929 100644\n--- a/drivers/net/dpaa/dpaa_ethdev.c\n+++ b/drivers/net/dpaa/dpaa_ethdev.c\n@@ -146,13 +146,13 @@ static int\n dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n {\n \tstruct dpaa_if *dpaa_intf = dev->data->dev_private;\n-\tuint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN\n+\tuint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN\n \t\t\t\t+ VLAN_TAG_SIZE;\n \tuint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;\n \n \tPMD_INIT_FUNC_TRACE();\n \n-\tif (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)\n+\tif (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)\n \t\treturn -EINVAL;\n \t/*\n \t * Refuse mtu that requires the support of scattered packets\n@@ -172,7 +172,7 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \t\treturn -EINVAL;\n \t}\n \n-\tif (frame_size > ETHER_MAX_LEN)\n+\tif (frame_size > RTE_ETHER_MAX_LEN)\n \t\tdev->data->dev_conf.rxmode.offloads &=\n \t\t\t\t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \telse\n@@ -230,7 +230,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)\n \n \t\tfman_if_set_maxfrm(dpaa_intf->fif, max_len);\n \t\tdev->data->mtu = max_len\n-\t\t\t\t- ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;\n+\t\t\t- RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;\n \t}\n \n \tif (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {\n@@ -1364,11 +1364,11 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)\n \n \t/* Allocate memory for storing MAC addresses */\n \teth_dev->data->mac_addrs = rte_zmalloc(\"mac_addr\",\n-\t\tETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);\n+\t\tRTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tDPAA_PMD_ERR(\"Failed to allocate %d bytes needed to \"\n \t\t\t\t\t\t\"store MAC addresses\",\n-\t\t\t\tETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);\n+\t\t\t\tRTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);\n \t\tret = -ENOMEM;\n \t\tgoto free_tx;\n \t}\n@@ -1396,7 +1396,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)\n \tfman_if_stats_reset(fman_intf);\n \t/* Disable SG by default */\n \tfman_if_set_sg(fman_intf, 0);\n-\tfman_if_set_maxfrm(fman_intf, ETHER_MAX_LEN + VLAN_TAG_SIZE);\n+\tfman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);\n \n \treturn 0;\n \ndiff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c\nindex 7f174f7fb..6699b768c 100644\n--- a/drivers/net/dpaa/dpaa_rxtx.c\n+++ b/drivers/net/dpaa/dpaa_rxtx.c\n@@ -222,10 +222,10 @@ static inline void dpaa_checksum(struct rte_mbuf *mbuf)\n \t\tstruct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +\n \t\t\t\t\t  mbuf->l3_len);\n \t\ttcp_hdr->cksum = 0;\n-\t\tif (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))\n+\t\tif (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPv4))\n \t\t\ttcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,\n \t\t\t\t\t\t\t       tcp_hdr);\n-\t\telse /* assume ethertype == ETHER_TYPE_IPv6 */\n+\t\telse /* assume ethertype == RTE_ETHER_TYPE_IPv6 */\n \t\t\ttcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,\n \t\t\t\t\t\t\t       tcp_hdr);\n \t} else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==\n@@ -233,10 +233,10 @@ static inline void dpaa_checksum(struct rte_mbuf *mbuf)\n \t\tstruct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr +\n \t\t\t\t\t\t\t     mbuf->l3_len);\n \t\tudp_hdr->dgram_cksum = 0;\n-\t\tif (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))\n+\t\tif (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPv4))\n \t\t\tudp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,\n \t\t\t\t\t\t\t\t     udp_hdr);\n-\t\telse /* assume ethertype == ETHER_TYPE_IPv6 */\n+\t\telse /* assume ethertype == RTE_ETHER_TYPE_IPv6 */\n \t\t\tudp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,\n \t\t\t\t\t\t\t\t     udp_hdr);\n \t}\ndiff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c\nindex 1afeaa919..d668f3e5d 100644\n--- a/drivers/net/dpaa2/dpaa2_ethdev.c\n+++ b/drivers/net/dpaa2/dpaa2_ethdev.c\n@@ -1086,7 +1086,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \tint ret;\n \tstruct dpaa2_dev_priv *priv = dev->data->dev_private;\n \tstruct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;\n-\tuint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN\n+\tuint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN\n \t\t\t\t+ VLAN_TAG_SIZE;\n \n \tPMD_INIT_FUNC_TRACE();\n@@ -1097,10 +1097,10 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \t}\n \n \t/* check that mtu is within the allowed range */\n-\tif ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))\n+\tif (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)\n \t\treturn -EINVAL;\n \n-\tif (frame_size > ETHER_MAX_LEN)\n+\tif (frame_size > RTE_ETHER_MAX_LEN)\n \t\tdev->data->dev_conf.rxmode.offloads &=\n \t\t\t\t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \telse\n@@ -2186,11 +2186,11 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)\n \t * can add MAC entries when rte_eth_dev_mac_addr_add is called.\n \t */\n \teth_dev->data->mac_addrs = rte_zmalloc(\"dpni\",\n-\t\tETHER_ADDR_LEN * attr.mac_filter_entries, 0);\n+\t\tRTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tDPAA2_PMD_ERR(\n \t\t   \"Failed to allocate %d bytes needed to store MAC addresses\",\n-\t\t   ETHER_ADDR_LEN * attr.mac_filter_entries);\n+\t\t   RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);\n \t\tret = -ENOMEM;\n \t\tgoto init_err;\n \t}\ndiff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h\nindex 3e74cd8fe..67acb7317 100644\n--- a/drivers/net/e1000/e1000_ethdev.h\n+++ b/drivers/net/e1000/e1000_ethdev.h\n@@ -92,7 +92,8 @@\n  * The overhead from MTU to max frame size.\n  * Considering VLAN so a tag needs to be counted.\n  */\n-#define E1000_ETH_OVERHEAD (ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE)\n+#define E1000_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \\\n+\t\t\t\tVLAN_TAG_SIZE)\n \n /*\n  * Maximum number of Ring Descriptors.\n@@ -155,7 +156,7 @@ struct e1000_vfta {\n  */\n #define E1000_MAX_VF_MC_ENTRIES         30\n struct e1000_vf_info {\n-\tuint8_t vf_mac_addresses[ETHER_ADDR_LEN];\n+\tuint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];\n \tuint16_t vf_mc_hashes[E1000_MAX_VF_MC_ENTRIES];\n \tuint16_t num_vf_mc_hashes;\n \tuint16_t default_vf_vlan_id;\ndiff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c\nindex cd70f2802..dc886613a 100644\n--- a/drivers/net/e1000/em_ethdev.c\n+++ b/drivers/net/e1000/em_ethdev.c\n@@ -284,12 +284,12 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)\n \t}\n \n \t/* Allocate memory for storing MAC addresses */\n-\teth_dev->data->mac_addrs = rte_zmalloc(\"e1000\", ETHER_ADDR_LEN *\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"e1000\", RTE_ETHER_ADDR_LEN *\n \t\t\thw->mac.rar_entry_count, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to allocate %d bytes needed to \"\n \t\t\t\"store MAC addresses\",\n-\t\t\tETHER_ADDR_LEN * hw->mac.rar_entry_count);\n+\t\t\tRTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);\n \t\treturn -ENOMEM;\n \t}\n \n@@ -576,7 +576,7 @@ eth_em_start(struct rte_eth_dev *dev)\n \t\treturn -EIO;\n \t}\n \n-\tE1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);\n+\tE1000_WRITE_REG(hw, E1000_VET, RTE_ETHER_TYPE_VLAN);\n \n \t/* Configure for OS presence */\n \tem_init_manageability(hw);\n@@ -821,7 +821,8 @@ em_hardware_init(struct e1000_hw *hw)\n \t */\n \trx_buf_size = em_get_rx_buffer_size(hw);\n \n-\thw->fc.high_water = rx_buf_size - PMD_ROUNDUP(ETHER_MAX_LEN * 2, 1024);\n+\thw->fc.high_water = rx_buf_size -\n+\t\tPMD_ROUNDUP(RTE_ETHER_MAX_LEN * 2, 1024);\n \thw->fc.low_water = hw->fc.high_water - 1500;\n \n \tif (hw->mac.type == e1000_80003es2lan)\n@@ -1037,7 +1038,7 @@ em_get_max_pktlen(struct rte_eth_dev *dev)\n \t\treturn 0x1000;\n \t/* Adapters that do not support jumbo frames */\n \tcase e1000_ich8lan:\n-\t\treturn ETHER_MAX_LEN;\n+\t\treturn RTE_ETHER_MAX_LEN;\n \tdefault:\n \t\treturn MAX_JUMBO_FRAME_SIZE;\n \t}\n@@ -1697,7 +1698,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)\n \tPMD_INIT_LOG(DEBUG, \"Rx packet buffer size = 0x%x\", rx_buf_size);\n \n \t/* At least reserve one Ethernet frame for watermark */\n-\tmax_high_water = rx_buf_size - ETHER_MAX_LEN;\n+\tmax_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;\n \tif ((fc_conf->high_water > max_high_water) ||\n \t    (fc_conf->high_water < fc_conf->low_water)) {\n \t\tPMD_INIT_LOG(ERR, \"e1000 incorrect high/low water value\");\n@@ -1747,7 +1748,7 @@ eth_em_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,\n static void\n eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index)\n {\n-\tuint8_t addr[ETHER_ADDR_LEN];\n+\tuint8_t addr[RTE_ETHER_ADDR_LEN];\n \tstruct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n \tmemset(addr, 0, sizeof(addr));\n@@ -1773,10 +1774,11 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \tuint32_t rctl;\n \n \teth_em_infos_get(dev, &dev_info);\n-\tframe_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE;\n+\tframe_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +\n+\t\tVLAN_TAG_SIZE;\n \n \t/* check that mtu is within the allowed range */\n-\tif ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))\n+\tif (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)\n \t\treturn -EINVAL;\n \n \t/* refuse mtu that requires the support of scattered packets when this\n@@ -1789,7 +1791,7 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \trctl = E1000_READ_REG(hw, E1000_RCTL);\n \n \t/* switch to jumbo mode if needed */\n-\tif (frame_size > ETHER_MAX_LEN) {\n+\tif (frame_size > RTE_ETHER_MAX_LEN) {\n \t\tdev->data->dev_conf.rxmode.offloads |=\n \t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \t\trctl |= E1000_RCTL_LPE;\ndiff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c\nindex 005e1ea96..1da56b229 100644\n--- a/drivers/net/e1000/em_rxtx.c\n+++ b/drivers/net/e1000/em_rxtx.c\n@@ -1005,17 +1005,17 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t */\n \t\trxm->next = NULL;\n \t\tif (unlikely(rxq->crc_len > 0)) {\n-\t\t\tfirst_seg->pkt_len -= ETHER_CRC_LEN;\n-\t\t\tif (data_len <= ETHER_CRC_LEN) {\n+\t\t\tfirst_seg->pkt_len -= RTE_ETHER_CRC_LEN;\n+\t\t\tif (data_len <= RTE_ETHER_CRC_LEN) {\n \t\t\t\trte_pktmbuf_free_seg(rxm);\n \t\t\t\tfirst_seg->nb_segs--;\n \t\t\t\tlast_seg->data_len = (uint16_t)\n \t\t\t\t\t(last_seg->data_len -\n-\t\t\t\t\t (ETHER_CRC_LEN - data_len));\n+\t\t\t\t\t (RTE_ETHER_CRC_LEN - data_len));\n \t\t\t\tlast_seg->next = NULL;\n \t\t\t} else\n-\t\t\t\trxm->data_len =\n-\t\t\t\t\t(uint16_t) (data_len - ETHER_CRC_LEN);\n+\t\t\t\trxm->data_len = (uint16_t)\n+\t\t\t\t\t(data_len - RTE_ETHER_CRC_LEN);\n \t\t}\n \n \t\t/*\n@@ -1368,7 +1368,7 @@ em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)\n \t\tDEV_RX_OFFLOAD_TCP_CKSUM   |\n \t\tDEV_RX_OFFLOAD_KEEP_CRC    |\n \t\tDEV_RX_OFFLOAD_SCATTER;\n-\tif (max_rx_pktlen > ETHER_MAX_LEN)\n+\tif (max_rx_pktlen > RTE_ETHER_MAX_LEN)\n \t\trx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;\n \n \treturn rx_offload_capa;\n@@ -1463,7 +1463,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,\n \trxq->queue_id = queue_idx;\n \trxq->port_id = dev->data->port_id;\n \tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)\n-\t\trxq->crc_len = ETHER_CRC_LEN;\n+\t\trxq->crc_len = RTE_ETHER_CRC_LEN;\n \telse\n \t\trxq->crc_len = 0;\n \n@@ -1799,7 +1799,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)\n \t\t *  call to configure\n \t\t */\n \t\tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)\n-\t\t\trxq->crc_len = ETHER_CRC_LEN;\n+\t\t\trxq->crc_len = RTE_ETHER_CRC_LEN;\n \t\telse\n \t\t\trxq->crc_len = 0;\n \n@@ -1832,7 +1832,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)\n \t\t * one buffer.\n \t\t */\n \t\tif (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||\n-\t\t\t\trctl_bsize < ETHER_MAX_LEN) {\n+\t\t\t\trctl_bsize < RTE_ETHER_MAX_LEN) {\n \t\t\tif (!dev->data->scattered_rx)\n \t\t\t\tPMD_INIT_LOG(DEBUG, \"forcing scatter mode\");\n \t\t\tdev->rx_pkt_burst =\ndiff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c\nindex 03047afa7..bd1c64c03 100644\n--- a/drivers/net/e1000/igb_ethdev.c\n+++ b/drivers/net/e1000/igb_ethdev.c\n@@ -830,11 +830,11 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)\n \n \t/* Allocate memory for storing MAC addresses */\n \teth_dev->data->mac_addrs = rte_zmalloc(\"e1000\",\n-\t\tETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);\n+\t\tRTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to allocate %d bytes needed to \"\n \t\t\t\t\t\t\"store MAC addresses\",\n-\t\t\t\tETHER_ADDR_LEN * hw->mac.rar_entry_count);\n+\t\t\t\tRTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);\n \t\terror = -ENOMEM;\n \t\tgoto err_late;\n \t}\n@@ -1028,13 +1028,13 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)\n \tdiag = hw->mac.ops.reset_hw(hw);\n \n \t/* Allocate memory for storing MAC addresses */\n-\teth_dev->data->mac_addrs = rte_zmalloc(\"igbvf\", ETHER_ADDR_LEN *\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"igbvf\", RTE_ETHER_ADDR_LEN *\n \t\thw->mac.rar_entry_count, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tPMD_INIT_LOG(ERR,\n \t\t\t\"Failed to allocate %d bytes needed to store MAC \"\n \t\t\t\"addresses\",\n-\t\t\tETHER_ADDR_LEN * hw->mac.rar_entry_count);\n+\t\t\tRTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);\n \t\treturn -ENOMEM;\n \t}\n \n@@ -1322,7 +1322,8 @@ eth_igb_start(struct rte_eth_dev *dev)\n \t}\n \tadapter->stopped = 0;\n \n-\tE1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);\n+\tE1000_WRITE_REG(hw, E1000_VET,\n+\t\t\tRTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);\n \n \tctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);\n \t/* Set PF Reset Done bit so PF/VF Mail Ops can work */\n@@ -1689,7 +1690,7 @@ igb_hardware_init(struct e1000_hw *hw)\n \t */\n \trx_buf_size = igb_get_rx_buffer_size(hw);\n \n-\thw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);\n+\thw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);\n \thw->fc.low_water = hw->fc.high_water - 1500;\n \thw->fc.pause_time = IGB_FC_PAUSE_TIME;\n \thw->fc.send_xon = 1;\n@@ -1708,7 +1709,8 @@ igb_hardware_init(struct e1000_hw *hw)\n \tif (diag < 0)\n \t\treturn diag;\n \n-\tE1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);\n+\tE1000_WRITE_REG(hw, E1000_VET,\n+\t\t\tRTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);\n \te1000_get_phy_info(hw);\n \te1000_check_for_link(hw);\n \n@@ -1772,10 +1774,10 @@ igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)\n \t/* Workaround CRC bytes included in size, take away 4 bytes/packet */\n \tstats->gorc += E1000_READ_REG(hw, E1000_GORCL);\n \tstats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);\n-\tstats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN;\n+\tstats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;\n \tstats->gotc += E1000_READ_REG(hw, E1000_GOTCL);\n \tstats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);\n-\tstats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN;\n+\tstats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;\n \n \tstats->rnbc += E1000_READ_REG(hw, E1000_RNBC);\n \tstats->ruc += E1000_READ_REG(hw, E1000_RUC);\n@@ -1788,10 +1790,10 @@ igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)\n \n \tstats->tor += E1000_READ_REG(hw, E1000_TORL);\n \tstats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32);\n-\tstats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN;\n+\tstats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;\n \tstats->tot += E1000_READ_REG(hw, E1000_TOTL);\n \tstats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32);\n-\tstats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN;\n+\tstats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;\n \n \tstats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);\n \tstats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);\n@@ -1825,10 +1827,10 @@ igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)\n \tstats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);\n \tstats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);\n \tstats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);\n-\tstats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN;\n+\tstats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;\n \tstats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);\n \tstats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);\n-\tstats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN;\n+\tstats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;\n \tstats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);\n \tstats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);\n \tstats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);\n@@ -2288,7 +2290,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \t\t\tETH_LINK_SPEED_1G;\n \n \tdev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;\n-\tdev_info->min_mtu = ETHER_MIN_MTU;\n+\tdev_info->min_mtu = RTE_ETHER_MIN_MTU;\n \n }\n \n@@ -3081,7 +3083,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)\n \tPMD_INIT_LOG(DEBUG, \"Rx packet buffer size = 0x%x\", rx_buf_size);\n \n \t/* At least reserve one Ethernet frame for watermark */\n-\tmax_high_water = rx_buf_size - ETHER_MAX_LEN;\n+\tmax_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;\n \tif ((fc_conf->high_water > max_high_water) ||\n \t    (fc_conf->high_water < fc_conf->low_water)) {\n \t\tPMD_INIT_LOG(ERR, \"e1000 incorrect high/low water value\");\n@@ -3137,7 +3139,7 @@ eth_igb_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,\n static void\n eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)\n {\n-\tuint8_t addr[ETHER_ADDR_LEN];\n+\tuint8_t addr[RTE_ETHER_ADDR_LEN];\n \tstruct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n \tmemset(addr, 0, sizeof(addr));\n@@ -4485,8 +4487,8 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \teth_igb_infos_get(dev, &dev_info);\n \n \t/* check that mtu is within the allowed range */\n-\tif ((mtu < ETHER_MIN_MTU) ||\n-\t    (frame_size > dev_info.max_rx_pktlen))\n+\tif (mtu < RTE_ETHER_MIN_MTU ||\n+\t\t\tframe_size > dev_info.max_rx_pktlen)\n \t\treturn -EINVAL;\n \n \t/* refuse mtu that requires the support of scattered packets when this\n@@ -4498,7 +4500,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \trctl = E1000_READ_REG(hw, E1000_RCTL);\n \n \t/* switch to jumbo mode if needed */\n-\tif (frame_size > ETHER_MAX_LEN) {\n+\tif (frame_size > RTE_ETHER_MAX_LEN) {\n \t\tdev->data->dev_conf.rxmode.offloads |=\n \t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \t\trctl |= E1000_RCTL_LPE;\n@@ -4744,8 +4746,8 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev,\n \tuint32_t etqf = 0;\n \tint ret;\n \n-\tif (filter->ether_type == ETHER_TYPE_IPv4 ||\n-\t\tfilter->ether_type == ETHER_TYPE_IPv6) {\n+\tif (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||\n+\t\tfilter->ether_type == RTE_ETHER_TYPE_IPv6) {\n \t\tPMD_DRV_LOG(ERR, \"unsupported ether_type(0x%04x) in\"\n \t\t\t\" ethertype filter.\", filter->ether_type);\n \t\treturn -EINVAL;\n@@ -5156,7 +5158,7 @@ igb_timesync_enable(struct rte_eth_dev *dev)\n \n \t/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */\n \tE1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),\n-\t\t\t(ETHER_TYPE_1588 |\n+\t\t\t(RTE_ETHER_TYPE_1588 |\n \t\t\t E1000_ETQF_FILTER_ENABLE |\n \t\t\t E1000_ETQF_1588));\n \ndiff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c\nindex 8dcfc71c5..342986121 100644\n--- a/drivers/net/e1000/igb_flow.c\n+++ b/drivers/net/e1000/igb_flow.c\n@@ -700,8 +700,8 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev,\n \t\t}\n \t}\n \n-\tif (filter->ether_type == ETHER_TYPE_IPv4 ||\n-\t\tfilter->ether_type == ETHER_TYPE_IPv6) {\n+\tif (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||\n+\t\tfilter->ether_type == RTE_ETHER_TYPE_IPv6) {\n \t\tmemset(filter, 0, sizeof(struct rte_eth_ethertype_filter));\n \t\trte_flow_error_set(error, EINVAL,\n \t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\ndiff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c\nindex ab48a269f..9d74c08ab 100644\n--- a/drivers/net/e1000/igb_pf.c\n+++ b/drivers/net/e1000/igb_pf.c\n@@ -37,7 +37,7 @@ dev_num_vf(struct rte_eth_dev *eth_dev)\n static inline\n int igb_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)\n {\n-\tunsigned char vf_mac_addr[ETHER_ADDR_LEN];\n+\tunsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN];\n \tstruct e1000_vf_info *vfinfo =\n \t\t*E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);\n \tuint16_t vfn;\n@@ -46,7 +46,7 @@ int igb_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)\n \t\trte_eth_random_addr(vf_mac_addr);\n \t\t/* keep the random address as default */\n \t\tmemcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,\n-\t\t\t\tETHER_ADDR_LEN);\n+\t\t\t\tRTE_ETHER_ADDR_LEN);\n \t}\n \n \treturn 0;\n@@ -290,7 +290,7 @@ igb_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)\n \n \t/* reply to reset with ack and vf mac address */\n \tmsgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;\n-\trte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);\n+\trte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);\n \te1000_write_mbx(hw, msgbuf, 3, vf);\n \n \treturn 0;\n@@ -400,10 +400,11 @@ igb_vf_set_rlpml(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)\n {\n \tstruct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \tuint16_t rlpml = msgbuf[1] & E1000_VMOLR_RLPML_MASK;\n-\tuint32_t max_frame = rlpml + ETHER_HDR_LEN + ETHER_CRC_LEN;\n+\tuint32_t max_frame = rlpml + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;\n \tuint32_t vmolr;\n \n-\tif ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))\n+\tif (max_frame < RTE_ETHER_MIN_LEN ||\n+\t\t\tmax_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)\n \t\treturn -1;\n \n \tvmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));\ndiff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c\nindex ab0a80e15..01f5e735b 100644\n--- a/drivers/net/e1000/igb_rxtx.c\n+++ b/drivers/net/e1000/igb_rxtx.c\n@@ -1147,17 +1147,17 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t */\n \t\trxm->next = NULL;\n \t\tif (unlikely(rxq->crc_len > 0)) {\n-\t\t\tfirst_seg->pkt_len -= ETHER_CRC_LEN;\n-\t\t\tif (data_len <= ETHER_CRC_LEN) {\n+\t\t\tfirst_seg->pkt_len -= RTE_ETHER_CRC_LEN;\n+\t\t\tif (data_len <= RTE_ETHER_CRC_LEN) {\n \t\t\t\trte_pktmbuf_free_seg(rxm);\n \t\t\t\tfirst_seg->nb_segs--;\n \t\t\t\tlast_seg->data_len = (uint16_t)\n \t\t\t\t\t(last_seg->data_len -\n-\t\t\t\t\t (ETHER_CRC_LEN - data_len));\n+\t\t\t\t\t (RTE_ETHER_CRC_LEN - data_len));\n \t\t\t\tlast_seg->next = NULL;\n \t\t\t} else\n-\t\t\t\trxm->data_len =\n-\t\t\t\t\t(uint16_t) (data_len - ETHER_CRC_LEN);\n+\t\t\t\trxm->data_len = (uint16_t)\n+\t\t\t\t\t(data_len - RTE_ETHER_CRC_LEN);\n \t\t}\n \n \t\t/*\n@@ -1725,7 +1725,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,\n \t\tqueue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);\n \trxq->port_id = dev->data->port_id;\n \tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)\n-\t\trxq->crc_len = ETHER_CRC_LEN;\n+\t\trxq->crc_len = RTE_ETHER_CRC_LEN;\n \telse\n \t\trxq->crc_len = 0;\n \n@@ -2378,7 +2378,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)\n \t\t *  call to configure\n \t\t */\n \t\tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)\n-\t\t\trxq->crc_len = ETHER_CRC_LEN;\n+\t\t\trxq->crc_len = RTE_ETHER_CRC_LEN;\n \t\telse\n \t\t\trxq->crc_len = 0;\n \ndiff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h\nindex 221760c62..dcc8690a5 100644\n--- a/drivers/net/ena/ena_ethdev.h\n+++ b/drivers/net/ena/ena_ethdev.h\n@@ -198,7 +198,7 @@ struct ena_adapter {\n \n \tint id_number;\n \tchar name[ENA_NAME_MAX_LEN];\n-\tu8 mac_addr[ETHER_ADDR_LEN];\n+\tu8 mac_addr[RTE_ETHER_ADDR_LEN];\n \n \tvoid *regs;\n \tvoid *dev_mem_base;\ndiff --git a/drivers/net/enetc/base/enetc_hw.h b/drivers/net/enetc/base/enetc_hw.h\nindex ff2bda592..fd71155ee 100644\n--- a/drivers/net/enetc/base/enetc_hw.h\n+++ b/drivers/net/enetc/base/enetc_hw.h\n@@ -215,8 +215,8 @@ struct enetc_hw {\n };\n \n struct enetc_eth_mac_info {\n-\tuint8_t addr[ETHER_ADDR_LEN];\n-\tuint8_t perm_addr[ETHER_ADDR_LEN];\n+\tuint8_t addr[RTE_ETHER_ADDR_LEN];\n+\tuint8_t perm_addr[RTE_ETHER_ADDR_LEN];\n \tuint8_t get_link_status;\n };\n \ndiff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c\nindex e637c8628..6c5501acb 100644\n--- a/drivers/net/enetc/enetc_ethdev.c\n+++ b/drivers/net/enetc/enetc_ethdev.c\n@@ -417,7 +417,7 @@ enetc_rx_queue_setup(struct rte_eth_dev *dev,\n \t}\n \n \trx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?\n-\t\t\t\t     ETHER_CRC_LEN : 0);\n+\t\t\t\t     RTE_ETHER_CRC_LEN : 0);\n \n \treturn 0;\n fail:\n@@ -595,7 +595,7 @@ enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \tstruct enetc_eth_hw *hw =\n \t\tENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \tstruct enetc_hw *enetc_hw = &hw->hw;\n-\tuint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;\n+\tuint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;\n \n \t/* check that mtu is within the allowed range */\n \tif (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)\n@@ -612,7 +612,7 @@ enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \t\treturn -EINVAL;\n \t}\n \n-\tif (frame_size > ETHER_MAX_LEN)\n+\tif (frame_size > RTE_ETHER_MAX_LEN)\n \t\tdev->data->dev_conf.rxmode.offloads &=\n \t\t\t\t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \telse\n@@ -654,7 +654,8 @@ enetc_dev_configure(struct rte_eth_dev *dev)\n \t\t\t      ENETC_MAC_MAXFRM_SIZE);\n \t\tenetc_port_wr(enetc_hw, ENETC_PTXMBAR,\n \t\t\t      2 * ENETC_MAC_MAXFRM_SIZE);\n-\t\tdev->data->mtu = ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN;\n+\t\tdev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -\n+\t\t\tRTE_ETHER_CRC_LEN;\n \t}\n \n \tif (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {\n@@ -830,11 +831,12 @@ enetc_dev_init(struct rte_eth_dev *eth_dev)\n \t}\n \n \t/* Allocate memory for storing MAC addresses */\n-\teth_dev->data->mac_addrs = rte_zmalloc(\"enetc_eth\", ETHER_ADDR_LEN, 0);\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"enetc_eth\",\n+\t\t\t\t\tRTE_ETHER_ADDR_LEN, 0);\n \tif (!eth_dev->data->mac_addrs) {\n \t\tENETC_PMD_ERR(\"Failed to allocate %d bytes needed to \"\n \t\t\t      \"store MAC addresses\",\n-\t\t\t      ETHER_ADDR_LEN * 1);\n+\t\t\t      RTE_ETHER_ADDR_LEN * 1);\n \t\terror = -ENOMEM;\n \t\treturn -1;\n \t}\n@@ -845,8 +847,9 @@ enetc_dev_init(struct rte_eth_dev *eth_dev)\n \n \t/* Set MTU */\n \tenetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,\n-\t\t      ENETC_SET_MAXFRM(ETHER_MAX_LEN));\n-\teth_dev->data->mtu = ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN;\n+\t\t      ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));\n+\teth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -\n+\t\tRTE_ETHER_CRC_LEN;\n \n \tENETC_PMD_DEBUG(\"port_id %d vendorID=0x%x deviceID=0x%x\",\n \t\t\teth_dev->data->port_id, pci_dev->id.vendor_id,\ndiff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h\nindex 20080af6f..bd2783430 100644\n--- a/drivers/net/enic/enic.h\n+++ b/drivers/net/enic/enic.h\n@@ -204,7 +204,7 @@ struct enic {\n static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)\n {\n \t/* ethdev max size includes eth whereas NIC MTU does not */\n-\treturn mtu + ETHER_HDR_LEN;\n+\treturn mtu + RTE_ETHER_HDR_LEN;\n }\n \n /* Get the CQ index from a Start of Packet(SOP) RQ index */\ndiff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c\nindex 3b55ebad4..80fbf595e 100644\n--- a/drivers/net/enic/enic_ethdev.c\n+++ b/drivers/net/enic/enic_ethdev.c\n@@ -646,9 +646,9 @@ static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,\n \n static void debug_log_add_del_addr(struct rte_ether_addr *addr, bool add)\n {\n-\tchar mac_str[ETHER_ADDR_FMT_SIZE];\n+\tchar mac_str[RTE_ETHER_ADDR_FMT_SIZE];\n \n-\trte_ether_format_addr(mac_str, ETHER_ADDR_FMT_SIZE, addr);\n+\trte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);\n \tPMD_INIT_LOG(DEBUG, \" %s address %s\\n\",\n \t\t     add ? \"add\" : \"remove\", mac_str);\n }\n@@ -658,7 +658,7 @@ static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev,\n \t\t\t\t    uint32_t nb_mc_addr)\n {\n \tstruct enic *enic = pmd_priv(eth_dev);\n-\tchar mac_str[ETHER_ADDR_FMT_SIZE];\n+\tchar mac_str[RTE_ETHER_ADDR_FMT_SIZE];\n \tstruct rte_ether_addr *addr;\n \tuint32_t i, j;\n \tint ret;\n@@ -671,7 +671,7 @@ static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev,\n \t\tif (!rte_is_multicast_ether_addr(addr) ||\n \t\t    rte_is_broadcast_ether_addr(addr)) {\n \t\t\trte_ether_format_addr(mac_str,\n-\t\t\t\t\tETHER_ADDR_FMT_SIZE, addr);\n+\t\t\t\t\tRTE_ETHER_ADDR_FMT_SIZE, addr);\n \t\t\tPMD_INIT_LOG(ERR, \" invalid multicast address %s\\n\",\n \t\t\t\t     mac_str);\n \t\t\treturn -EINVAL;\ndiff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c\nindex 9bddfb941..bdac1f755 100644\n--- a/drivers/net/enic/enic_flow.c\n+++ b/drivers/net/enic/enic_flow.c\n@@ -593,7 +593,7 @@ enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)\n \targ->l2_proto_off = *off + offsetof(struct rte_vlan_hdr, eth_proto);\n \treturn copy_inner_common(&arg->filter->u.generic_1, off,\n \t\targ->item->spec, mask, sizeof(struct rte_vlan_hdr),\n-\t\teth_type_off, rte_cpu_to_be_16(ETHER_TYPE_VLAN), 2);\n+\t\teth_type_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN), 2);\n }\n \n static int\n@@ -609,7 +609,7 @@ enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)\n \targ->l3_proto_off = *off + offsetof(struct ipv4_hdr, next_proto_id);\n \treturn copy_inner_common(&arg->filter->u.generic_1, off,\n \t\targ->item->spec, mask, sizeof(struct ipv4_hdr),\n-\t\targ->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv4), 2);\n+\t\targ->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4), 2);\n }\n \n static int\n@@ -625,7 +625,7 @@ enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)\n \targ->l3_proto_off = *off + offsetof(struct ipv6_hdr, proto);\n \treturn copy_inner_common(&arg->filter->u.generic_1, off,\n \t\targ->item->spec, mask, sizeof(struct ipv6_hdr),\n-\t\targ->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv6), 2);\n+\t\targ->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6), 2);\n }\n \n static int\n@@ -679,14 +679,14 @@ enic_copy_item_eth_v2(struct copy_item_args *arg)\n \t\tmask = &rte_flow_item_eth_mask;\n \n \tmemcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,\n-\t       ETHER_ADDR_LEN);\n+\t       RTE_ETHER_ADDR_LEN);\n \tmemcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,\n-\t       ETHER_ADDR_LEN);\n+\t       RTE_ETHER_ADDR_LEN);\n \n \tmemcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,\n-\t       ETHER_ADDR_LEN);\n+\t       RTE_ETHER_ADDR_LEN);\n \tmemcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,\n-\t       ETHER_ADDR_LEN);\n+\t       RTE_ETHER_ADDR_LEN);\n \tenic_spec.ether_type = spec->type;\n \tenic_mask.ether_type = mask->type;\n \ndiff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c\nindex f7cbc90fb..21cfbb8c5 100644\n--- a/drivers/net/enic/enic_res.c\n+++ b/drivers/net/enic/enic_res.c\n@@ -61,9 +61,10 @@ int enic_get_vnic_config(struct enic *enic)\n \t * and will be 0 for legacy firmware and VICs\n \t */\n \tif (c->max_pkt_size > ENIC_DEFAULT_RX_MAX_PKT_SIZE)\n-\t\tenic->max_mtu = c->max_pkt_size - ETHER_HDR_LEN;\n+\t\tenic->max_mtu = c->max_pkt_size - RTE_ETHER_HDR_LEN;\n \telse\n-\t\tenic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE - ETHER_HDR_LEN;\n+\t\tenic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE -\n+\t\t\tRTE_ETHER_HDR_LEN;\n \tif (c->mtu == 0)\n \t\tc->mtu = 1500;\n \ndiff --git a/drivers/net/failsafe/failsafe_args.c b/drivers/net/failsafe/failsafe_args.c\nindex 5fc6be403..d2e725bce 100644\n--- a/drivers/net/failsafe/failsafe_args.c\n+++ b/drivers/net/failsafe/failsafe_args.c\n@@ -376,7 +376,7 @@ fs_get_mac_addr_arg(const char *key __rte_unused,\n \t\t&ea->addr_bytes[0], &ea->addr_bytes[1],\n \t\t&ea->addr_bytes[2], &ea->addr_bytes[3],\n \t\t&ea->addr_bytes[4], &ea->addr_bytes[5]);\n-\treturn ret != ETHER_ADDR_LEN;\n+\treturn ret != RTE_ETHER_ADDR_LEN;\n }\n \n int\ndiff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c\nindex 325c67ca9..4746fad36 100644\n--- a/drivers/net/failsafe/failsafe_ether.c\n+++ b/drivers/net/failsafe/failsafe_ether.c\n@@ -172,9 +172,10 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev,\n \t\tret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), ea,\n \t\t\t\tPRIV(dev)->mac_addr_pool[i]);\n \t\tif (ret) {\n-\t\t\tchar ea_fmt[ETHER_ADDR_FMT_SIZE];\n+\t\t\tchar ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];\n \n-\t\t\trte_ether_format_addr(ea_fmt, ETHER_ADDR_FMT_SIZE, ea);\n+\t\t\trte_ether_format_addr(ea_fmt,\n+\t\t\t\t\tRTE_ETHER_ADDR_FMT_SIZE, ea);\n \t\t\tERROR(\"Adding MAC address %s failed\", ea_fmt);\n \t\t\treturn ret;\n \t\t}\ndiff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h\nindex dc814855d..99dedd98d 100644\n--- a/drivers/net/fm10k/fm10k.h\n+++ b/drivers/net/fm10k/fm10k.h\n@@ -305,7 +305,7 @@ fm10k_addr_alignment_valid(struct rte_mbuf *mb)\n \t/* 8B aligned, and max Ethernet frame would not cross a 4KB boundary? */\n \tif (RTE_ALIGN(addr, 8) == addr) {\n \t\tboundary1 = RTE_ALIGN_FLOOR(addr, 4096);\n-\t\tboundary2 = RTE_ALIGN_FLOOR(addr + ETHER_MAX_VLAN_FRAME_LEN,\n+\t\tboundary2 = RTE_ALIGN_FLOOR(addr + RTE_ETHER_MAX_VLAN_FRAME_LEN,\n \t\t\t\t\t\t4096);\n \t\tif (boundary1 == boundary2)\n \t\t\treturn 1;\ndiff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c\nindex e6956cbdb..b6bcebea9 100644\n--- a/drivers/net/fm10k/fm10k_ethdev.c\n+++ b/drivers/net/fm10k/fm10k_ethdev.c\n@@ -613,7 +613,7 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)\n \n \t/* reset MAC/VLAN as it's based on VMDQ or PF main VSI */\n \tmemset(dev->data->mac_addrs, 0,\n-\t\tETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);\n+\t\tRTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);\n \trte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,\n \t\t&dev->data->mac_addrs[0]);\n \tmemset(macvlan, 0, sizeof(*macvlan));\n@@ -3072,7 +3072,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)\n \n \t/* Initialize MAC address(es) */\n \tdev->data->mac_addrs = rte_zmalloc(\"fm10k\",\n-\t\t\tETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);\n+\t\t\tRTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);\n \tif (dev->data->mac_addrs == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"Cannot allocate memory for MAC addresses\");\n \t\treturn -ENOMEM;\ndiff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c\nindex b16d5c02a..0815eb6fc 100644\n--- a/drivers/net/i40e/i40e_ethdev.c\n+++ b/drivers/net/i40e/i40e_ethdev.c\n@@ -1478,7 +1478,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)\n \t/* Set the global registers with default ether type value */\n \tif (!pf->support_multi_driver) {\n \t\tret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,\n-\t\t\t\t\t ETHER_TYPE_VLAN);\n+\t\t\t\t\t RTE_ETHER_TYPE_VLAN);\n \t\tif (ret != I40E_SUCCESS) {\n \t\t\tPMD_INIT_LOG(ERR,\n \t\t\t\t     \"Failed to set the default outer \"\n@@ -1509,9 +1509,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)\n \t}\n \n \tif (!vsi->max_macaddrs)\n-\t\tlen = ETHER_ADDR_LEN;\n+\t\tlen = RTE_ETHER_ADDR_LEN;\n \telse\n-\t\tlen = ETHER_ADDR_LEN * vsi->max_macaddrs;\n+\t\tlen = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;\n \n \t/* Should be after VSI initialized */\n \tdev->data->mac_addrs = rte_zmalloc(\"i40e\", len, 0);\n@@ -2834,7 +2834,7 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)\n \t\t\t    &nes->rx_broadcast);\n \t/* exclude CRC bytes */\n \tnes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +\n-\t\tnes->rx_broadcast) * ETHER_CRC_LEN;\n+\t\tnes->rx_broadcast) * RTE_ETHER_CRC_LEN;\n \n \ti40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,\n \t\t\t    &oes->rx_discards, &nes->rx_discards);\n@@ -2934,7 +2934,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)\n \t/* exclude CRC size */\n \tpf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +\n \t\tpf->internal_stats.rx_multicast +\n-\t\tpf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;\n+\t\tpf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;\n \n \t/* Get statistics of struct i40e_eth_stats */\n \ti40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),\n@@ -2954,10 +2954,11 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)\n \t\t\t    pf->offset_loaded, &os->eth.rx_broadcast,\n \t\t\t    &ns->eth.rx_broadcast);\n \t/* Workaround: CRC size should not be included in byte statistics,\n-\t * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.\n+\t * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx\n+\t * packet.\n \t */\n \tns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +\n-\t\tns->eth.rx_broadcast) * ETHER_CRC_LEN;\n+\t\tns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;\n \n \t/* exclude internal rx bytes\n \t * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before\n@@ -3011,7 +3012,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)\n \t\t\t    pf->offset_loaded, &os->eth.tx_broadcast,\n \t\t\t    &ns->eth.tx_broadcast);\n \tns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +\n-\t\tns->eth.tx_broadcast) * ETHER_CRC_LEN;\n+\t\tns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;\n \n \t/* exclude internal tx bytes\n \t * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before\n@@ -3510,7 +3511,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \tdev_info->max_mac_addrs = vsi->max_macaddrs;\n \tdev_info->max_vfs = pci_dev->max_vfs;\n \tdev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;\n-\tdev_info->min_mtu = ETHER_MIN_MTU;\n+\tdev_info->min_mtu = RTE_ETHER_MIN_MTU;\n \tdev_info->rx_queue_offload_capa = 0;\n \tdev_info->rx_offload_capa =\n \t\tDEV_RX_OFFLOAD_VLAN_STRIP |\n@@ -3776,9 +3777,9 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)\n \t\t\ti40e_vsi_config_double_vlan(vsi, TRUE);\n \t\t\t/* Set global registers with default ethertype. */\n \t\t\ti40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,\n-\t\t\t\t\t   ETHER_TYPE_VLAN);\n+\t\t\t\t\t   RTE_ETHER_TYPE_VLAN);\n \t\t\ti40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,\n-\t\t\t\t\t   ETHER_TYPE_VLAN);\n+\t\t\t\t\t   RTE_ETHER_TYPE_VLAN);\n \t\t}\n \t\telse\n \t\t\ti40e_vsi_config_double_vlan(vsi, FALSE);\n@@ -4036,7 +4037,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,\n \t\treturn -EINVAL;\n \t}\n \n-\trte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);\n+\trte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);\n \tif (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)\n \t\tmac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;\n \telse\n@@ -4141,11 +4142,11 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf,\n \t}\n \n \tif (add) {\n-\t\trte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);\n+\t\trte_memcpy(&old_mac, hw->mac.addr, RTE_ETHER_ADDR_LEN);\n \t\trte_memcpy(hw->mac.addr, new_mac->addr_bytes,\n-\t\t\t\tETHER_ADDR_LEN);\n+\t\t\t\tRTE_ETHER_ADDR_LEN);\n \t\trte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,\n-\t\t\t\t ETHER_ADDR_LEN);\n+\t\t\t\t RTE_ETHER_ADDR_LEN);\n \n \t\tmac_filter.filter_type = filter->filter_type;\n \t\tret = i40e_vsi_add_mac(vf->vsi, &mac_filter);\n@@ -4156,7 +4157,7 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf,\n \t\trte_ether_addr_copy(new_mac, &pf->dev_addr);\n \t} else {\n \t\trte_memcpy(hw->mac.addr, hw->mac.perm_addr,\n-\t\t\t\tETHER_ADDR_LEN);\n+\t\t\t\tRTE_ETHER_ADDR_LEN);\n \t\tret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);\n \t\tif (ret != I40E_SUCCESS) {\n \t\t\tPMD_DRV_LOG(ERR, \"Failed to delete MAC filter.\");\n@@ -5824,7 +5825,7 @@ i40e_vsi_setup(struct i40e_pf *pf,\n \t}\n \n \t/* MAC/VLAN configuration */\n-\trte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);\n+\trte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);\n \tfilter.filter_type = RTE_MACVLAN_PERFECT_MATCH;\n \n \tret = i40e_vsi_add_mac(vsi, &filter);\n@@ -7110,7 +7111,7 @@ i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)\n \tint mac_num;\n \tint ret = I40E_SUCCESS;\n \n-\tif (!vsi || vlan > ETHER_MAX_VLAN_ID)\n+\tif (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)\n \t\treturn I40E_ERR_PARAM;\n \n \t/* If it's already set, just return */\n@@ -7161,7 +7162,7 @@ i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)\n \t * Vlan 0 is the generic filter for untagged packets\n \t * and can't be removed.\n \t */\n-\tif (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)\n+\tif (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)\n \t\treturn I40E_ERR_PARAM;\n \n \t/* If can't find it, just return */\n@@ -8622,7 +8623,7 @@ i40e_tunnel_filter_param_check(struct i40e_pf *pf,\n \t\treturn -EINVAL;\n \t}\n \n-\tif (filter->inner_vlan > ETHER_MAX_VLAN_ID) {\n+\tif (filter->inner_vlan > RTE_ETHER_MAX_VLAN_ID) {\n \t\tPMD_DRV_LOG(ERR, \"Invalid inner VLAN ID\");\n \t\treturn -EINVAL;\n \t}\n@@ -9902,7 +9903,8 @@ static int\n i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,\n \t\t\t      struct i40e_ethertype_filter *filter)\n {\n-\trte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);\n+\trte_memcpy(&filter->input.mac_addr, &input->mac_addr,\n+\t\tRTE_ETHER_ADDR_LEN);\n \tfilter->input.ether_type = input->ether_type;\n \tfilter->flags = input->flags;\n \tfilter->queue = input->queue;\n@@ -9994,14 +9996,14 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,\n \t\tPMD_DRV_LOG(ERR, \"Invalid queue ID\");\n \t\treturn -EINVAL;\n \t}\n-\tif (filter->ether_type == ETHER_TYPE_IPv4 ||\n-\t\tfilter->ether_type == ETHER_TYPE_IPv6) {\n+\tif (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||\n+\t\tfilter->ether_type == RTE_ETHER_TYPE_IPv6) {\n \t\tPMD_DRV_LOG(ERR,\n \t\t\t\"unsupported ether_type(0x%04x) in control packet filter.\",\n \t\t\tfilter->ether_type);\n \t\treturn -EINVAL;\n \t}\n-\tif (filter->ether_type == ETHER_TYPE_VLAN)\n+\tif (filter->ether_type == RTE_ETHER_TYPE_VLAN)\n \t\tPMD_DRV_LOG(WARNING,\n \t\t\t\"filter vlan ether_type in first tag is not supported.\");\n \n@@ -12010,7 +12012,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \tint ret = 0;\n \n \t/* check if mtu is within the allowed range */\n-\tif ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))\n+\tif (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)\n \t\treturn -EINVAL;\n \n \t/* mtu setting is forbidden if port is start */\n@@ -12020,7 +12022,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \t\treturn -EBUSY;\n \t}\n \n-\tif (frame_size > ETHER_MAX_LEN)\n+\tif (frame_size > RTE_ETHER_MAX_LEN)\n \t\tdev_data->dev_conf.rxmode.offloads |=\n \t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \telse\ndiff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h\nindex ffaa7b949..6f2949e88 100644\n--- a/drivers/net/i40e/i40e_ethdev.h\n+++ b/drivers/net/i40e/i40e_ethdev.h\n@@ -268,7 +268,7 @@ enum i40e_flxpld_layer_idx {\n  * Considering QinQ packet, the VLAN tag needs to be counted twice.\n  */\n #define I40E_ETH_OVERHEAD \\\n-\t(ETHER_HDR_LEN + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2)\n+\t(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2)\n \n struct i40e_adapter;\n \ndiff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c\nindex f7f193c1f..63dbe14c3 100644\n--- a/drivers/net/i40e/i40e_ethdev_vf.c\n+++ b/drivers/net/i40e/i40e_ethdev_vf.c\n@@ -1505,12 +1505,12 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)\n \n \t/* copy mac addr */\n \teth_dev->data->mac_addrs = rte_zmalloc(\"i40evf_mac\",\n-\t\t\t\t\tETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,\n-\t\t\t\t\t0);\n+\t\t\t\tRTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,\n+\t\t\t\t0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to allocate %d bytes needed to\"\n \t\t\t\t\" store MAC addresses\",\n-\t\t\t\tETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);\n+\t\t\t\tRTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);\n \t\treturn -ENOMEM;\n \t}\n \trte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,\n@@ -1767,21 +1767,22 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)\n \t * Check if the jumbo frame and maximum packet length are set correctly\n \t */\n \tif (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {\n-\t\tif (rxq->max_pkt_len <= ETHER_MAX_LEN ||\n+\t\tif (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||\n \t\t    rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {\n \t\t\tPMD_DRV_LOG(ERR, \"maximum packet length must be \"\n \t\t\t\t\"larger than %u and smaller than %u, as jumbo \"\n-\t\t\t\t\"frame is enabled\", (uint32_t)ETHER_MAX_LEN,\n+\t\t\t\t\"frame is enabled\", (uint32_t)RTE_ETHER_MAX_LEN,\n \t\t\t\t\t(uint32_t)I40E_FRAME_SIZE_MAX);\n \t\t\treturn I40E_ERR_CONFIG;\n \t\t}\n \t} else {\n-\t\tif (rxq->max_pkt_len < ETHER_MIN_LEN ||\n-\t\t    rxq->max_pkt_len > ETHER_MAX_LEN) {\n+\t\tif (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||\n+\t\t    rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {\n \t\t\tPMD_DRV_LOG(ERR, \"maximum packet length must be \"\n \t\t\t\t\"larger than %u and smaller than %u, as jumbo \"\n-\t\t\t\t\"frame is disabled\", (uint32_t)ETHER_MIN_LEN,\n-\t\t\t\t\t\t(uint32_t)ETHER_MAX_LEN);\n+\t\t\t\t\"frame is disabled\",\n+\t\t\t\t(uint32_t)RTE_ETHER_MIN_LEN,\n+\t\t\t\t(uint32_t)RTE_ETHER_MAX_LEN);\n \t\t\treturn I40E_ERR_CONFIG;\n \t\t}\n \t}\n@@ -2218,7 +2219,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \tdev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;\n \tdev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;\n \tdev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;\n-\tdev_info->min_mtu = ETHER_MIN_MTU;\n+\tdev_info->min_mtu = RTE_ETHER_MIN_MTU;\n \tdev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);\n \tdev_info->reta_size = ETH_RSS_RETA_SIZE_64;\n \tdev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;\n@@ -2680,7 +2681,7 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \tint ret = 0;\n \n \t/* check if mtu is within the allowed range */\n-\tif ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))\n+\tif (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)\n \t\treturn -EINVAL;\n \n \t/* mtu setting is forbidden if port is start */\n@@ -2690,7 +2691,7 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \t\treturn -EBUSY;\n \t}\n \n-\tif (frame_size > ETHER_MAX_LEN)\n+\tif (frame_size > RTE_ETHER_MAX_LEN)\n \t\tdev_data->dev_conf.rxmode.offloads |=\n \t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \telse\ndiff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c\nindex dd940cecb..4aaf27f7c 100644\n--- a/drivers/net/i40e/i40e_fdir.c\n+++ b/drivers/net/i40e/i40e_fdir.c\n@@ -113,7 +113,7 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)\n #endif\n \trx_ctx.dtype = i40e_header_split_none;\n \trx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;\n-\trx_ctx.rxmax = ETHER_MAX_LEN;\n+\trx_ctx.rxmax = RTE_ETHER_MAX_LEN;\n \trx_ctx.tphrdesc_ena = 1;\n \trx_ctx.tphwdesc_ena = 1;\n \trx_ctx.tphdata_ena = 1;\n@@ -725,7 +725,7 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,\n \tcase RTE_ETH_FLOW_FRAG_IPV4:\n \t\tip = (struct ipv4_hdr *)raw_pkt;\n \n-\t\t*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\t*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \t\tip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;\n \t\t/* set len to by default */\n \t\tip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);\n@@ -752,7 +752,7 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,\n \tcase RTE_ETH_FLOW_FRAG_IPV6:\n \t\tip6 = (struct ipv6_hdr *)raw_pkt;\n \n-\t\t*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);\n+\t\t*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);\n \t\tip6->vtc_flow =\n \t\t\trte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |\n \t\t\t\t\t (fdir_input->flow.ipv6_flow.tc <<\n@@ -910,7 +910,7 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,\n \t\t * starts after the whole ARP header\n \t\t */\n \t\tif (fdir_input->flow.l2_flow.ether_type ==\n-\t\t\t\trte_cpu_to_be_16(ETHER_TYPE_ARP))\n+\t\t\t\trte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))\n \t\t\tpayload += sizeof(struct rte_arp_hdr);\n \t\tset_idx = I40E_FLXPLD_L2_IDX;\n \t\tbreak;\n@@ -1009,7 +1009,7 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,\n \t\t is_customized_pctype) {\n \t\tip = (struct ipv4_hdr *)raw_pkt;\n \n-\t\t*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\t*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \t\tip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;\n \t\t/* set len to by default */\n \t\tip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);\n@@ -1042,7 +1042,7 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,\n \t\t   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {\n \t\tip6 = (struct ipv6_hdr *)raw_pkt;\n \n-\t\t*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);\n+\t\t*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);\n \t\tip6->vtc_flow =\n \t\t\trte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |\n \t\t\t\t\t (fdir_input->flow.ipv6_flow.tc <<\n@@ -1196,7 +1196,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,\n \t\t * starts after the whole ARP header\n \t\t */\n \t\tif (fdir_input->flow.l2_flow.ether_type ==\n-\t\t\t\trte_cpu_to_be_16(ETHER_TYPE_ARP))\n+\t\t\t\trte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))\n \t\t\tpayload += sizeof(struct rte_arp_hdr);\n \t\tset_idx = I40E_FLXPLD_L2_IDX;\n \t} else if (fdir_input->flow_ext.customized_pctype) {\ndiff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c\nindex 6feaaa818..f7d77f3b6 100644\n--- a/drivers/net/i40e/i40e_flow.c\n+++ b/drivers/net/i40e/i40e_flow.c\n@@ -2035,9 +2035,9 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,\n \t\t\t}\n \t\t\tfilter->ether_type = rte_be_to_cpu_16(eth_spec->type);\n \n-\t\t\tif (filter->ether_type == ETHER_TYPE_IPv4 ||\n-\t\t\t    filter->ether_type == ETHER_TYPE_IPv6 ||\n-\t\t\t    filter->ether_type == ETHER_TYPE_LLDP ||\n+\t\t\tif (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||\n+\t\t\t    filter->ether_type == RTE_ETHER_TYPE_IPv6 ||\n+\t\t\t    filter->ether_type == RTE_ETHER_TYPE_LLDP ||\n \t\t\t    filter->ether_type == outer_tpid) {\n \t\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n@@ -2507,9 +2507,9 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,\n \t\t\t\tether_type = rte_be_to_cpu_16(eth_spec->type);\n \n \t\t\t\tif (next == RTE_FLOW_ITEM_TYPE_VLAN ||\n-\t\t\t\t    ether_type == ETHER_TYPE_IPv4 ||\n-\t\t\t\t    ether_type == ETHER_TYPE_IPv6 ||\n-\t\t\t\t    ether_type == ETHER_TYPE_ARP ||\n+\t\t\t\t    ether_type == RTE_ETHER_TYPE_IPv4 ||\n+\t\t\t\t    ether_type == RTE_ETHER_TYPE_IPv6 ||\n+\t\t\t\t    ether_type == RTE_ETHER_TYPE_ARP ||\n \t\t\t\t    ether_type == outer_tpid) {\n \t\t\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\t\t\t     RTE_FLOW_ERROR_TYPE_ITEM,\n@@ -2552,9 +2552,9 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,\n \t\t\t\tether_type =\n \t\t\t\t\trte_be_to_cpu_16(vlan_spec->inner_type);\n \n-\t\t\t\tif (ether_type == ETHER_TYPE_IPv4 ||\n-\t\t\t\t    ether_type == ETHER_TYPE_IPv6 ||\n-\t\t\t\t    ether_type == ETHER_TYPE_ARP ||\n+\t\t\t\tif (ether_type == RTE_ETHER_TYPE_IPv4 ||\n+\t\t\t\t    ether_type == RTE_ETHER_TYPE_IPv6 ||\n+\t\t\t\t    ether_type == RTE_ETHER_TYPE_ARP ||\n \t\t\t\t    ether_type == outer_tpid) {\n \t\t\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\t\t\t     RTE_FLOW_ERROR_TYPE_ITEM,\n@@ -3338,12 +3338,12 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,\n \t\t\t\tif (!vxlan_flag) {\n \t\t\t\t\trte_memcpy(&filter->outer_mac,\n \t\t\t\t\t\t   &eth_spec->dst,\n-\t\t\t\t\t\t   ETHER_ADDR_LEN);\n+\t\t\t\t\t\t   RTE_ETHER_ADDR_LEN);\n \t\t\t\t\tfilter_type |= ETH_TUNNEL_FILTER_OMAC;\n \t\t\t\t} else {\n \t\t\t\t\trte_memcpy(&filter->inner_mac,\n \t\t\t\t\t\t   &eth_spec->dst,\n-\t\t\t\t\t\t   ETHER_ADDR_LEN);\n+\t\t\t\t\t\t   RTE_ETHER_ADDR_LEN);\n \t\t\t\t\tfilter_type |= ETH_TUNNEL_FILTER_IMAC;\n \t\t\t\t}\n \t\t\t}\n@@ -3568,12 +3568,12 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,\n \t\t\t\tif (!nvgre_flag) {\n \t\t\t\t\trte_memcpy(&filter->outer_mac,\n \t\t\t\t\t\t   &eth_spec->dst,\n-\t\t\t\t\t\t   ETHER_ADDR_LEN);\n+\t\t\t\t\t\t   RTE_ETHER_ADDR_LEN);\n \t\t\t\t\tfilter_type |= ETH_TUNNEL_FILTER_OMAC;\n \t\t\t\t} else {\n \t\t\t\t\trte_memcpy(&filter->inner_mac,\n \t\t\t\t\t\t   &eth_spec->dst,\n-\t\t\t\t\t\t   ETHER_ADDR_LEN);\n+\t\t\t\t\t\t   RTE_ETHER_ADDR_LEN);\n \t\t\t\t\tfilter_type |= ETH_TUNNEL_FILTER_IMAC;\n \t\t\t\t}\n \t\t\t}\ndiff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c\nindex b7636c7fa..b28d02ec2 100644\n--- a/drivers/net/i40e/i40e_pf.c\n+++ b/drivers/net/i40e/i40e_pf.c\n@@ -843,7 +843,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,\n \n \tfor (i = 0; i < addr_list->num_elements; i++) {\n \t\tmac = (struct rte_ether_addr *)(addr_list->list[i].addr);\n-\t\trte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);\n+\t\trte_memcpy(&filter.mac_addr, mac, RTE_ETHER_ADDR_LEN);\n \t\tfilter.filter_type = RTE_MACVLAN_PERFECT_MATCH;\n \t\tif (rte_is_zero_ether_addr(mac) ||\n \t\t    i40e_vsi_add_mac(vf->vsi, &filter)) {\ndiff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c\nindex 1489552da..c0ded1d26 100644\n--- a/drivers/net/i40e/i40e_rxtx.c\n+++ b/drivers/net/i40e/i40e_rxtx.c\n@@ -889,17 +889,17 @@ i40e_recv_scattered_pkts(void *rx_queue,\n \t\t */\n \t\trxm->next = NULL;\n \t\tif (unlikely(rxq->crc_len > 0)) {\n-\t\t\tfirst_seg->pkt_len -= ETHER_CRC_LEN;\n-\t\t\tif (rx_packet_len <= ETHER_CRC_LEN) {\n+\t\t\tfirst_seg->pkt_len -= RTE_ETHER_CRC_LEN;\n+\t\t\tif (rx_packet_len <= RTE_ETHER_CRC_LEN) {\n \t\t\t\trte_pktmbuf_free_seg(rxm);\n \t\t\t\tfirst_seg->nb_segs--;\n \t\t\t\tlast_seg->data_len =\n \t\t\t\t\t(uint16_t)(last_seg->data_len -\n-\t\t\t\t\t(ETHER_CRC_LEN - rx_packet_len));\n+\t\t\t\t\t(RTE_ETHER_CRC_LEN - rx_packet_len));\n \t\t\t\tlast_seg->next = NULL;\n \t\t\t} else\n \t\t\t\trxm->data_len = (uint16_t)(rx_packet_len -\n-\t\t\t\t\t\t\t\tETHER_CRC_LEN);\n+\t\t\t\t\t\t\tRTE_ETHER_CRC_LEN);\n \t\t}\n \n \t\tfirst_seg->port = rxq->port_id;\n@@ -1839,7 +1839,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \trxq->reg_idx = reg_idx;\n \trxq->port_id = dev->data->port_id;\n \tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)\n-\t\trxq->crc_len = ETHER_CRC_LEN;\n+\t\trxq->crc_len = RTE_ETHER_CRC_LEN;\n \telse\n \t\trxq->crc_len = 0;\n \trxq->drop_en = rx_conf->rx_drop_en;\n@@ -2619,23 +2619,23 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)\n \tlen = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;\n \trxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);\n \tif (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {\n-\t\tif (rxq->max_pkt_len <= ETHER_MAX_LEN ||\n+\t\tif (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||\n \t\t\trxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {\n \t\t\tPMD_DRV_LOG(ERR, \"maximum packet length must \"\n \t\t\t\t    \"be larger than %u and smaller than %u,\"\n \t\t\t\t    \"as jumbo frame is enabled\",\n-\t\t\t\t    (uint32_t)ETHER_MAX_LEN,\n+\t\t\t\t    (uint32_t)RTE_ETHER_MAX_LEN,\n \t\t\t\t    (uint32_t)I40E_FRAME_SIZE_MAX);\n \t\t\treturn I40E_ERR_CONFIG;\n \t\t}\n \t} else {\n-\t\tif (rxq->max_pkt_len < ETHER_MIN_LEN ||\n-\t\t\trxq->max_pkt_len > ETHER_MAX_LEN) {\n+\t\tif (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||\n+\t\t\trxq->max_pkt_len > RTE_ETHER_MAX_LEN) {\n \t\t\tPMD_DRV_LOG(ERR, \"maximum packet length must be \"\n \t\t\t\t    \"larger than %u and smaller than %u, \"\n \t\t\t\t    \"as jumbo frame is disabled\",\n-\t\t\t\t    (uint32_t)ETHER_MIN_LEN,\n-\t\t\t\t    (uint32_t)ETHER_MAX_LEN);\n+\t\t\t\t    (uint32_t)RTE_ETHER_MIN_LEN,\n+\t\t\t\t    (uint32_t)RTE_ETHER_MAX_LEN);\n \t\t\treturn I40E_ERR_CONFIG;\n \t\t}\n \t}\ndiff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c\nindex 3d66b635c..0fbf79ca3 100644\n--- a/drivers/net/i40e/rte_pmd_i40e.c\n+++ b/drivers/net/i40e/rte_pmd_i40e.c\n@@ -663,7 +663,7 @@ int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,\n \n \tRTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);\n \n-\tif (vlan_id > ETHER_MAX_VLAN_ID) {\n+\tif (vlan_id > RTE_ETHER_MAX_VLAN_ID) {\n \t\tPMD_DRV_LOG(ERR, \"Invalid VLAN ID.\");\n \t\treturn -EINVAL;\n \t}\n@@ -765,7 +765,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,\n \t}\n \n \tif (on) {\n-\t\trte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);\n+\t\trte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);\n \t\tfilter.filter_type = RTE_MACVLAN_PERFECT_MATCH;\n \t\tret = i40e_vsi_add_mac(vsi, &filter);\n \t} else {\n@@ -893,7 +893,7 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,\n \tif (!is_i40e_supported(dev))\n \t\treturn -ENOTSUP;\n \n-\tif (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {\n+\tif (vlan_id > RTE_ETHER_MAX_VLAN_ID || !vlan_id) {\n \t\tPMD_DRV_LOG(ERR, \"Invalid VLAN ID.\");\n \t\treturn -EINVAL;\n \t}\ndiff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h\nindex 1227259f5..bbd4d75d0 100644\n--- a/drivers/net/iavf/iavf.h\n+++ b/drivers/net/iavf/iavf.h\n@@ -57,7 +57,7 @@\n  */\n #define IAVF_VLAN_TAG_SIZE               4\n #define IAVF_ETH_OVERHEAD \\\n-\t(ETHER_HDR_LEN + ETHER_CRC_LEN + IAVF_VLAN_TAG_SIZE * 2)\n+\t(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IAVF_VLAN_TAG_SIZE * 2)\n \n #define IAVF_32_BIT_WIDTH (CHAR_BIT * 4)\n #define IAVF_48_BIT_WIDTH (CHAR_BIT * 6)\ndiff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c\nindex f545bd907..53dc05c78 100644\n--- a/drivers/net/iavf/iavf_ethdev.c\n+++ b/drivers/net/iavf/iavf_ethdev.c\n@@ -225,23 +225,23 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)\n \t * correctly.\n \t */\n \tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {\n-\t\tif (max_pkt_len <= ETHER_MAX_LEN ||\n+\t\tif (max_pkt_len <= RTE_ETHER_MAX_LEN ||\n \t\t    max_pkt_len > IAVF_FRAME_SIZE_MAX) {\n \t\t\tPMD_DRV_LOG(ERR, \"maximum packet length must be \"\n \t\t\t\t    \"larger than %u and smaller than %u, \"\n \t\t\t\t    \"as jumbo frame is enabled\",\n-\t\t\t\t    (uint32_t)ETHER_MAX_LEN,\n+\t\t\t\t    (uint32_t)RTE_ETHER_MAX_LEN,\n \t\t\t\t    (uint32_t)IAVF_FRAME_SIZE_MAX);\n \t\t\treturn -EINVAL;\n \t\t}\n \t} else {\n-\t\tif (max_pkt_len < ETHER_MIN_LEN ||\n-\t\t    max_pkt_len > ETHER_MAX_LEN) {\n+\t\tif (max_pkt_len < RTE_ETHER_MIN_LEN ||\n+\t\t    max_pkt_len > RTE_ETHER_MAX_LEN) {\n \t\t\tPMD_DRV_LOG(ERR, \"maximum packet length must be \"\n \t\t\t\t    \"larger than %u and smaller than %u, \"\n \t\t\t\t    \"as jumbo frame is disabled\",\n-\t\t\t\t    (uint32_t)ETHER_MIN_LEN,\n-\t\t\t\t    (uint32_t)ETHER_MAX_LEN);\n+\t\t\t\t    (uint32_t)RTE_ETHER_MIN_LEN,\n+\t\t\t\t    (uint32_t)RTE_ETHER_MAX_LEN);\n \t\t\treturn -EINVAL;\n \t\t}\n \t}\n@@ -917,7 +917,7 @@ iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \tuint32_t frame_size = mtu + IAVF_ETH_OVERHEAD;\n \tint ret = 0;\n \n-\tif (mtu < ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)\n+\tif (mtu < RTE_ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)\n \t\treturn -EINVAL;\n \n \t/* mtu setting is forbidden if port is start */\n@@ -926,7 +926,7 @@ iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \t\treturn -EBUSY;\n \t}\n \n-\tif (frame_size > ETHER_MAX_LEN)\n+\tif (frame_size > RTE_ETHER_MAX_LEN)\n \t\tdev->data->dev_conf.rxmode.offloads |=\n \t\t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \telse\n@@ -1305,13 +1305,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)\n \n \t/* copy mac addr */\n \teth_dev->data->mac_addrs = rte_zmalloc(\n-\t\t\t\t\t\"iavf_mac\",\n-\t\t\t\t\tETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX,\n-\t\t\t\t\t0);\n+\t\t\"iavf_mac\", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);\n \tif (!eth_dev->data->mac_addrs) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to allocate %d bytes needed to\"\n \t\t\t     \" store MAC addresses\",\n-\t\t\t     ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);\n+\t\t\t     RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);\n \t\treturn -ENOMEM;\n \t}\n \t/* If the MAC address is not configured by host,\ndiff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c\nindex db7070fb5..3b83605e8 100644\n--- a/drivers/net/iavf/iavf_rxtx.c\n+++ b/drivers/net/iavf/iavf_rxtx.c\n@@ -1025,17 +1025,17 @@ iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t */\n \t\trxm->next = NULL;\n \t\tif (unlikely(rxq->crc_len > 0)) {\n-\t\t\tfirst_seg->pkt_len -= ETHER_CRC_LEN;\n-\t\t\tif (rx_packet_len <= ETHER_CRC_LEN) {\n+\t\t\tfirst_seg->pkt_len -= RTE_ETHER_CRC_LEN;\n+\t\t\tif (rx_packet_len <= RTE_ETHER_CRC_LEN) {\n \t\t\t\trte_pktmbuf_free_seg(rxm);\n \t\t\t\tfirst_seg->nb_segs--;\n \t\t\t\tlast_seg->data_len =\n \t\t\t\t\t(uint16_t)(last_seg->data_len -\n-\t\t\t\t\t(ETHER_CRC_LEN - rx_packet_len));\n+\t\t\t\t\t(RTE_ETHER_CRC_LEN - rx_packet_len));\n \t\t\t\tlast_seg->next = NULL;\n \t\t\t} else\n \t\t\t\trxm->data_len = (uint16_t)(rx_packet_len -\n-\t\t\t\t\t\t\t\tETHER_CRC_LEN);\n+\t\t\t\t\t\t\tRTE_ETHER_CRC_LEN);\n \t\t}\n \n \t\tfirst_seg->port = rxq->port_id;\ndiff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c\nindex 61624d76f..5f89310f1 100644\n--- a/drivers/net/ice/ice_ethdev.c\n+++ b/drivers/net/ice/ice_ethdev.c\n@@ -651,7 +651,7 @@ ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)\n \tstruct ice_hw *hw;\n \tint ret = 0;\n \n-\tif (!vsi || vlan_id > ETHER_MAX_VLAN_ID)\n+\tif (!vsi || vlan_id > RTE_ETHER_MAX_VLAN_ID)\n \t\treturn -EINVAL;\n \n \thw = ICE_VSI_TO_HW(vsi);\n@@ -722,7 +722,7 @@ ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)\n \t * Vlan 0 is the generic filter for untagged packets\n \t * and can't be removed.\n \t */\n-\tif (!vsi || vlan_id == 0 || vlan_id > ETHER_MAX_VLAN_ID)\n+\tif (!vsi || vlan_id == 0 || vlan_id > RTE_ETHER_MAX_VLAN_ID)\n \t\treturn -EINVAL;\n \n \thw = ICE_VSI_TO_HW(vsi);\n@@ -1224,12 +1224,12 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n \t\t   hw->port_info->mac.perm_addr,\n \t\t   ETH_ADDR_LEN);\n \n-\trte_memcpy(&mac_addr, &pf->dev_addr, ETHER_ADDR_LEN);\n+\trte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);\n \tret = ice_add_mac_filter(vsi, &mac_addr);\n \tif (ret != ICE_SUCCESS)\n \t\tPMD_INIT_LOG(ERR, \"Failed to add dflt MAC filter\");\n \n-\trte_memcpy(&mac_addr, &broadcast, ETHER_ADDR_LEN);\n+\trte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);\n \tret = ice_add_mac_filter(vsi, &mac_addr);\n \tif (ret != ICE_SUCCESS)\n \t\tPMD_INIT_LOG(ERR, \"Failed to add MAC filter\");\n@@ -2233,7 +2233,7 @@ ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \tuint32_t frame_size = mtu + ICE_ETH_OVERHEAD;\n \n \t/* check if mtu is within the allowed range */\n-\tif (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)\n+\tif (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)\n \t\treturn -EINVAL;\n \n \t/* mtu setting is forbidden if port is start */\n@@ -2244,7 +2244,7 @@ ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \t\treturn -EBUSY;\n \t}\n \n-\tif (frame_size > ETHER_MAX_LEN)\n+\tif (frame_size > RTE_ETHER_MAX_LEN)\n \t\tdev_data->dev_conf.rxmode.offloads |=\n \t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \telse\n@@ -3101,7 +3101,7 @@ ice_update_vsi_stats(struct ice_vsi *vsi)\n \t\t\t   &nes->rx_broadcast);\n \t/* exclude CRC bytes */\n \tnes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +\n-\t\t\t  nes->rx_broadcast) * ETHER_CRC_LEN;\n+\t\t\t  nes->rx_broadcast) * RTE_ETHER_CRC_LEN;\n \n \tice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,\n \t\t\t   &oes->rx_discards, &nes->rx_discards);\n@@ -3174,10 +3174,11 @@ ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)\n \t\t\t   &ns->eth.rx_discards);\n \n \t/* Workaround: CRC size should not be included in byte statistics,\n-\t * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.\n+\t * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx\n+\t * packet.\n \t */\n \tns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +\n-\t\t\t     ns->eth.rx_broadcast) * ETHER_CRC_LEN;\n+\t\t\t     ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;\n \n \t/* GLPRT_REPC not supported */\n \t/* GLPRT_RMPC not supported */\n@@ -3202,7 +3203,7 @@ ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)\n \t\t\t   pf->offset_loaded, &os->eth.tx_broadcast,\n \t\t\t   &ns->eth.tx_broadcast);\n \tns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +\n-\t\t\t     ns->eth.tx_broadcast) * ETHER_CRC_LEN;\n+\t\t\t     ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;\n \n \t/* GLPRT_TEPC not supported */\n \ndiff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h\nindex 4baaf9498..1385afac3 100644\n--- a/drivers/net/ice/ice_ethdev.h\n+++ b/drivers/net/ice/ice_ethdev.h\n@@ -122,7 +122,7 @@\n  * Considering QinQ packet, the VLAN tag needs to be counted twice.\n  */\n #define ICE_ETH_OVERHEAD \\\n-\t(ETHER_HDR_LEN + ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2)\n+\t(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2)\n \n struct ice_adapter;\n \ndiff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c\nindex ace766b1d..40e5db767 100644\n--- a/drivers/net/ice/ice_rxtx.c\n+++ b/drivers/net/ice/ice_rxtx.c\n@@ -47,23 +47,23 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)\n \t\t\t\t   dev->data->dev_conf.rxmode.max_rx_pkt_len);\n \n \tif (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {\n-\t\tif (rxq->max_pkt_len <= ETHER_MAX_LEN ||\n+\t\tif (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||\n \t\t    rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {\n \t\t\tPMD_DRV_LOG(ERR, \"maximum packet length must \"\n \t\t\t\t    \"be larger than %u and smaller than %u,\"\n \t\t\t\t    \"as jumbo frame is enabled\",\n-\t\t\t\t    (uint32_t)ETHER_MAX_LEN,\n+\t\t\t\t    (uint32_t)RTE_ETHER_MAX_LEN,\n \t\t\t\t    (uint32_t)ICE_FRAME_SIZE_MAX);\n \t\t\treturn -EINVAL;\n \t\t}\n \t} else {\n-\t\tif (rxq->max_pkt_len < ETHER_MIN_LEN ||\n-\t\t    rxq->max_pkt_len > ETHER_MAX_LEN) {\n+\t\tif (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||\n+\t\t    rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {\n \t\t\tPMD_DRV_LOG(ERR, \"maximum packet length must be \"\n \t\t\t\t    \"larger than %u and smaller than %u, \"\n \t\t\t\t    \"as jumbo frame is disabled\",\n-\t\t\t\t    (uint32_t)ETHER_MIN_LEN,\n-\t\t\t\t    (uint32_t)ETHER_MAX_LEN);\n+\t\t\t\t    (uint32_t)RTE_ETHER_MIN_LEN,\n+\t\t\t\t    (uint32_t)RTE_ETHER_MAX_LEN);\n \t\t\treturn -EINVAL;\n \t\t}\n \t}\n@@ -629,7 +629,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,\n \trxq->reg_idx = vsi->base_queue + queue_idx;\n \trxq->port_id = dev->data->port_id;\n \tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)\n-\t\trxq->crc_len = ETHER_CRC_LEN;\n+\t\trxq->crc_len = RTE_ETHER_CRC_LEN;\n \telse\n \t\trxq->crc_len = 0;\n \n@@ -1412,17 +1412,17 @@ ice_recv_scattered_pkts(void *rx_queue,\n \t\t */\n \t\trxm->next = NULL;\n \t\tif (unlikely(rxq->crc_len > 0)) {\n-\t\t\tfirst_seg->pkt_len -= ETHER_CRC_LEN;\n-\t\t\tif (rx_packet_len <= ETHER_CRC_LEN) {\n+\t\t\tfirst_seg->pkt_len -= RTE_ETHER_CRC_LEN;\n+\t\t\tif (rx_packet_len <= RTE_ETHER_CRC_LEN) {\n \t\t\t\trte_pktmbuf_free_seg(rxm);\n \t\t\t\tfirst_seg->nb_segs--;\n \t\t\t\tlast_seg->data_len =\n \t\t\t\t\t(uint16_t)(last_seg->data_len -\n-\t\t\t\t\t(ETHER_CRC_LEN - rx_packet_len));\n+\t\t\t\t\t(RTE_ETHER_CRC_LEN - rx_packet_len));\n \t\t\t\tlast_seg->next = NULL;\n \t\t\t} else\n \t\t\t\trxm->data_len = (uint16_t)(rx_packet_len -\n-\t\t\t\t\t\t\t   ETHER_CRC_LEN);\n+\t\t\t\t\t\t\t   RTE_ETHER_CRC_LEN);\n \t\t}\n \n \t\tfirst_seg->port = rxq->port_id;\ndiff --git a/drivers/net/ipn3ke/ipn3ke_ethdev.h b/drivers/net/ipn3ke/ipn3ke_ethdev.h\nindex 566bc7607..af2da05c5 100644\n--- a/drivers/net/ipn3ke/ipn3ke_ethdev.h\n+++ b/drivers/net/ipn3ke/ipn3ke_ethdev.h\n@@ -642,7 +642,7 @@ ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev,\n  * Considering QinQ packet, the VLAN tag needs to be counted twice.\n  */\n #define IPN3KE_ETH_OVERHEAD \\\n-\t\t(ETHER_HDR_LEN + ETHER_CRC_LEN + IPN3KE_VLAN_TAG_SIZE * 2)\n+\t(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IPN3KE_VLAN_TAG_SIZE * 2)\n \n #define IPN3KE_MAC_FRAME_SIZE_MAX    9728\n #define IPN3KE_MAC_RX_FRAME_MAXLENGTH    0x00AE\ndiff --git a/drivers/net/ipn3ke/ipn3ke_flow.c b/drivers/net/ipn3ke/ipn3ke_flow.c\nindex e5937df9b..300d8f97a 100644\n--- a/drivers/net/ipn3ke/ipn3ke_flow.c\n+++ b/drivers/net/ipn3ke/ipn3ke_flow.c\n@@ -96,7 +96,7 @@ ipn3ke_pattern_vxlan(const struct rte_flow_item patterns[],\n \n \t\t\trte_memcpy(&parser->key[0],\n \t\t\t\t\teth->src.addr_bytes,\n-\t\t\t\t\tETHER_ADDR_LEN);\n+\t\t\t\t\tRTE_ETHER_ADDR_LEN);\n \t\t\tbreak;\n \n \t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n@@ -160,7 +160,7 @@ ipn3ke_pattern_mac(const struct rte_flow_item patterns[],\n \n \t\t\trte_memcpy(parser->key,\n \t\t\t\t\teth->src.addr_bytes,\n-\t\t\t\t\tETHER_ADDR_LEN);\n+\t\t\t\t\tRTE_ETHER_ADDR_LEN);\n \t\t\tbreak;\n \n \t\tdefault:\ndiff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c\nindex b18d183f0..f4529f63f 100644\n--- a/drivers/net/ipn3ke/ipn3ke_representor.c\n+++ b/drivers/net/ipn3ke/ipn3ke_representor.c\n@@ -128,7 +128,7 @@ ipn3ke_rpst_dev_start(struct rte_eth_dev *dev)\n \t\t\t&rpst->mac_addr);\n \n \trte_ether_addr_copy(&rpst->mac_addr, &dev->data->mac_addrs[0]);\n-\tdev->data->mac_addrs->addr_bytes[ETHER_ADDR_LEN - 1] =\n+\tdev->data->mac_addrs->addr_bytes[RTE_ETHER_ADDR_LEN - 1] =\n \t\t(uint8_t)rpst->port_id + 1;\n \n \tif (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {\n@@ -692,7 +692,7 @@ ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu)\n \tuint32_t frame_size = mtu  + IPN3KE_ETH_OVERHEAD;\n \n \t/* check if mtu is within the allowed range */\n-\tif (mtu < ETHER_MIN_MTU ||\n+\tif (mtu < RTE_ETHER_MIN_MTU ||\n \t\tframe_size > IPN3KE_MAC_FRAME_SIZE_MAX)\n \t\treturn -EINVAL;\n \n@@ -712,7 +712,7 @@ ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu)\n \t\treturn -EBUSY;\n \t}\n \n-\tif (frame_size > ETHER_MAX_LEN)\n+\tif (frame_size > RTE_ETHER_MAX_LEN)\n \t\tdev_data->dev_conf.rxmode.offloads |=\n \t\t\t(uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME);\n \telse\n@@ -837,7 +837,7 @@ ipn3ke_rpst_init(struct rte_eth_dev *ethdev, void *init_params)\n \trpst->i40e_pf_eth = NULL;\n \trpst->i40e_pf_eth_port_id = 0xFFFF;\n \n-\tethdev->data->mac_addrs = rte_zmalloc(\"ipn3ke\", ETHER_ADDR_LEN, 0);\n+\tethdev->data->mac_addrs = rte_zmalloc(\"ipn3ke\", RTE_ETHER_ADDR_LEN, 0);\n \tif (!ethdev->data->mac_addrs) {\n \t\tIPN3KE_AFU_PMD_ERR(\"Failed to \"\n \t\t\t\"allocated memory for storing mac address\");\n@@ -860,7 +860,7 @@ ipn3ke_rpst_init(struct rte_eth_dev *ethdev, void *init_params)\n \tethdev->data->nb_tx_queues = 1;\n \n \tethdev->data->mac_addrs = rte_zmalloc(\"ipn3ke_afu_representor\",\n-\t\t\t\t\t\tETHER_ADDR_LEN,\n+\t\t\t\t\t\tRTE_ETHER_ADDR_LEN,\n \t\t\t\t\t\t0);\n \tif (!ethdev->data->mac_addrs) {\n \t\tIPN3KE_AFU_PMD_ERR(\"Failed to \"\ndiff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c\nindex 2f97f0f8d..581056d19 100644\n--- a/drivers/net/ixgbe/ixgbe_ethdev.c\n+++ b/drivers/net/ixgbe/ixgbe_ethdev.c\n@@ -1213,13 +1213,13 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)\n \tixgbe_reset_qstat_mappings(hw);\n \n \t/* Allocate memory for storing MAC addresses */\n-\teth_dev->data->mac_addrs = rte_zmalloc(\"ixgbe\", ETHER_ADDR_LEN *\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"ixgbe\", RTE_ETHER_ADDR_LEN *\n \t\t\t\t\t       hw->mac.num_rar_entries, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tPMD_INIT_LOG(ERR,\n \t\t\t     \"Failed to allocate %u bytes needed to store \"\n \t\t\t     \"MAC addresses\",\n-\t\t\t     ETHER_ADDR_LEN * hw->mac.num_rar_entries);\n+\t\t\t     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);\n \t\treturn -ENOMEM;\n \t}\n \t/* Copy the permanent MAC address */\n@@ -1227,12 +1227,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)\n \t\t\t&eth_dev->data->mac_addrs[0]);\n \n \t/* Allocate memory for storing hash filter MAC addresses */\n-\teth_dev->data->hash_mac_addrs = rte_zmalloc(\"ixgbe\", ETHER_ADDR_LEN *\n-\t\t\t\t\t\t    IXGBE_VMDQ_NUM_UC_MAC, 0);\n+\teth_dev->data->hash_mac_addrs = rte_zmalloc(\n+\t\t\"ixgbe\", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0);\n \tif (eth_dev->data->hash_mac_addrs == NULL) {\n \t\tPMD_INIT_LOG(ERR,\n \t\t\t     \"Failed to allocate %d bytes needed to store MAC addresses\",\n-\t\t\t     ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);\n+\t\t\t     RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);\n \t\treturn -ENOMEM;\n \t}\n \n@@ -1499,7 +1499,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)\n \t}\n \tl2_tn_info->e_tag_en = FALSE;\n \tl2_tn_info->e_tag_fwd_en = FALSE;\n-\tl2_tn_info->e_tag_ether_type = ETHER_TYPE_ETAG;\n+\tl2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;\n \n \treturn 0;\n }\n@@ -1540,7 +1540,7 @@ generate_random_mac_addr(struct rte_ether_addr *mac_addr)\n \tmac_addr->addr_bytes[1] = 0x09;\n \tmac_addr->addr_bytes[2] = 0xC0;\n \t/* Force indication of locally assigned MAC address. */\n-\tmac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;\n+\tmac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;\n \t/* Generate the last 3 bytes of the MAC address with a random number. */\n \trandom = rte_rand();\n \tmemcpy(&mac_addr->addr_bytes[3], &random, 3);\n@@ -1647,13 +1647,13 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)\n \tixgbevf_get_queues(hw, &tcs, &tc);\n \n \t/* Allocate memory for storing MAC addresses */\n-\teth_dev->data->mac_addrs = rte_zmalloc(\"ixgbevf\", ETHER_ADDR_LEN *\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"ixgbevf\", RTE_ETHER_ADDR_LEN *\n \t\t\t\t\t       hw->mac.num_rar_entries, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tPMD_INIT_LOG(ERR,\n \t\t\t     \"Failed to allocate %u bytes needed to store \"\n \t\t\t     \"MAC addresses\",\n-\t\t\t     ETHER_ADDR_LEN * hw->mac.num_rar_entries);\n+\t\t\t     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);\n \t\treturn -ENOMEM;\n \t}\n \n@@ -3052,7 +3052,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,\n \t\thw_stats->qbrc[i] +=\n \t\t    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);\n \t\tif (crc_strip == 0)\n-\t\t\thw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;\n+\t\t\thw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN;\n \n \t\thw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));\n \t\thw_stats->qbtc[i] +=\n@@ -3097,12 +3097,12 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,\n \thw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);\n \n \tif (crc_strip == 0)\n-\t\thw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;\n+\t\thw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN;\n \n \tuint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);\n \thw_stats->gptc += delta_gptc;\n-\thw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;\n-\thw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;\n+\thw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN;\n+\thw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;\n \n \t/*\n \t * Workaround: mprc hardware is incorrectly counting\n@@ -3132,7 +3132,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,\n \thw_stats->gptc -= total;\n \thw_stats->mptc -= total;\n \thw_stats->ptc64 -= total;\n-\thw_stats->gotc -= total * ETHER_MIN_LEN;\n+\thw_stats->gotc -= total * RTE_ETHER_MIN_LEN;\n \n \thw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);\n \thw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);\n@@ -3754,7 +3754,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \telse\n \t\tdev_info->max_vmdq_pools = ETH_64_POOLS;\n \tdev_info->max_mtu =  dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;\n-\tdev_info->min_mtu = ETHER_MIN_MTU;\n+\tdev_info->min_mtu = RTE_ETHER_MIN_MTU;\n \tdev_info->vmdq_queue_num = dev_info->max_rx_queues;\n \tdev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);\n \tdev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |\n@@ -4555,7 +4555,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)\n \t * At least reserve one Ethernet frame for watermark\n \t * high_water/low_water in kilo bytes for ixgbe\n \t */\n-\tmax_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;\n+\tmax_high_water = (rx_buf_size -\n+\t\t\tRTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;\n \tif ((fc_conf->high_water > max_high_water) ||\n \t\t(fc_conf->high_water < fc_conf->low_water)) {\n \t\tPMD_INIT_LOG(ERR, \"Invalid high/low water setup value in KB\");\n@@ -4776,7 +4777,8 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p\n \t * At least reserve one Ethernet frame for watermark\n \t * high_water/low_water in kilo bytes for ixgbe\n \t */\n-\tmax_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;\n+\tmax_high_water = (rx_buf_size -\n+\t\t\tRTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;\n \tif ((pfc_conf->fc.high_water > max_high_water) ||\n \t    (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {\n \t\tPMD_INIT_LOG(ERR, \"Invalid high/low water setup value in KB\");\n@@ -4957,7 +4959,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \tixgbe_dev_info_get(dev, &dev_info);\n \n \t/* check that mtu is within the allowed range */\n-\tif ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))\n+\tif (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)\n \t\treturn -EINVAL;\n \n \t/* If device is started, refuse mtu that requires the support of\n@@ -4974,7 +4976,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \thlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);\n \n \t/* switch to jumbo mode if needed */\n-\tif (frame_size > ETHER_MAX_LEN) {\n+\tif (frame_size > RTE_ETHER_MAX_LEN) {\n \t\tdev->data->dev_conf.rxmode.offloads |=\n \t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \t\thlreg0 |= IXGBE_HLREG0_JUMBOEN;\n@@ -6363,7 +6365,8 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)\n \n \thw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n-\tif ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))\n+\tif (mtu < RTE_ETHER_MIN_MTU ||\n+\t\t\tmax_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)\n \t\treturn -EINVAL;\n \n \t/* If device is started, refuse mtu that requires the support of\n@@ -6660,8 +6663,8 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,\n \tif (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)\n \t\treturn -EINVAL;\n \n-\tif (filter->ether_type == ETHER_TYPE_IPv4 ||\n-\t\tfilter->ether_type == ETHER_TYPE_IPv6) {\n+\tif (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||\n+\t\tfilter->ether_type == RTE_ETHER_TYPE_IPv6) {\n \t\tPMD_DRV_LOG(ERR, \"unsupported ether_type(0x%04x) in\"\n \t\t\t\" ethertype filter.\", filter->ether_type);\n \t\treturn -EINVAL;\n@@ -7069,7 +7072,7 @@ ixgbe_timesync_enable(struct rte_eth_dev *dev)\n \n \t/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */\n \tIXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),\n-\t\t\t(ETHER_TYPE_1588 |\n+\t\t\t(RTE_ETHER_TYPE_1588 |\n \t\t\t IXGBE_ETQF_FILTER_EN |\n \t\t\t IXGBE_ETQF_1588));\n \ndiff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h\nindex d1f61e85e..fdad94d58 100644\n--- a/drivers/net/ixgbe/ixgbe_ethdev.h\n+++ b/drivers/net/ixgbe/ixgbe_ethdev.h\n@@ -102,7 +102,7 @@\n #define IXGBE_5TUPLE_MIN_PRI            1\n \n /* The overhead from MTU to max frame size. */\n-#define IXGBE_ETH_OVERHEAD (ETHER_HDR_LEN + ETHER_CRC_LEN)\n+#define IXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)\n \n /* bit of VXLAN tunnel type | 7 bits of zeros  | 8 bits of zeros*/\n #define IXGBE_FDIR_VXLAN_TUNNEL_TYPE    0x8000\n@@ -258,7 +258,7 @@ struct ixgbe_mirror_info {\n };\n \n struct ixgbe_vf_info {\n-\tuint8_t vf_mac_addresses[ETHER_ADDR_LEN];\n+\tuint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];\n \tuint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];\n \tuint16_t num_vf_mc_hashes;\n \tuint16_t default_vf_vlan_id;\ndiff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c\nindex 7024354cd..23aba0a47 100644\n--- a/drivers/net/ixgbe/ixgbe_flow.c\n+++ b/drivers/net/ixgbe/ixgbe_flow.c\n@@ -887,8 +887,8 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,\n \t\treturn -rte_errno;\n \t}\n \n-\tif (filter->ether_type == ETHER_TYPE_IPv4 ||\n-\t\tfilter->ether_type == ETHER_TYPE_IPv6) {\n+\tif (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||\n+\t\tfilter->ether_type == RTE_ETHER_TYPE_IPv6) {\n \t\tmemset(filter, 0, sizeof(struct rte_eth_ethertype_filter));\n \t\trte_flow_error_set(error, EINVAL,\n \t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n@@ -1705,7 +1705,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,\n \t\t\teth_spec = item->spec;\n \n \t\t\t/* Get the dst MAC. */\n-\t\t\tfor (j = 0; j < ETHER_ADDR_LEN; j++) {\n+\t\t\tfor (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {\n \t\t\t\trule->ixgbe_fdir.formatted.inner_mac[j] =\n \t\t\t\t\teth_spec->dst.addr_bytes[j];\n \t\t\t}\n@@ -1734,7 +1734,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,\n \t\t\t * src MAC address must be masked,\n \t\t\t * and don't support dst MAC address mask.\n \t\t\t */\n-\t\t\tfor (j = 0; j < ETHER_ADDR_LEN; j++) {\n+\t\t\tfor (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {\n \t\t\t\tif (eth_mask->src.addr_bytes[j] ||\n \t\t\t\t\teth_mask->dst.addr_bytes[j] != 0xFF) {\n \t\t\t\t\tmemset(rule, 0,\n@@ -2660,7 +2660,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,\n \t}\n \n \t/* src MAC address should be masked. */\n-\tfor (j = 0; j < ETHER_ADDR_LEN; j++) {\n+\tfor (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {\n \t\tif (eth_mask->src.addr_bytes[j]) {\n \t\t\tmemset(rule, 0,\n \t\t\t       sizeof(struct ixgbe_fdir_rule));\n@@ -2671,7 +2671,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,\n \t\t}\n \t}\n \trule->mask.mac_addr_byte_mask = 0;\n-\tfor (j = 0; j < ETHER_ADDR_LEN; j++) {\n+\tfor (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {\n \t\t/* It's a per byte mask. */\n \t\tif (eth_mask->dst.addr_bytes[j] == 0xFF) {\n \t\t\trule->mask.mac_addr_byte_mask |= 0x1 << j;\n@@ -2692,7 +2692,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,\n \t\teth_spec = item->spec;\n \n \t\t/* Get the dst MAC. */\n-\t\tfor (j = 0; j < ETHER_ADDR_LEN; j++) {\n+\t\tfor (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {\n \t\t\trule->ixgbe_fdir.formatted.inner_mac[j] =\n \t\t\t\teth_spec->dst.addr_bytes[j];\n \t\t}\ndiff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c\nindex a2ae703df..c88d56e24 100644\n--- a/drivers/net/ixgbe/ixgbe_pf.c\n+++ b/drivers/net/ixgbe/ixgbe_pf.c\n@@ -40,7 +40,7 @@ dev_num_vf(struct rte_eth_dev *eth_dev)\n static inline\n int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)\n {\n-\tunsigned char vf_mac_addr[ETHER_ADDR_LEN];\n+\tunsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN];\n \tstruct ixgbe_vf_info *vfinfo =\n \t\t*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);\n \tuint16_t vfn;\n@@ -49,7 +49,7 @@ int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)\n \t\trte_eth_random_addr(vf_mac_addr);\n \t\t/* keep the random address as default */\n \t\tmemcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,\n-\t\t\t   ETHER_ADDR_LEN);\n+\t\t\t   RTE_ETHER_ADDR_LEN);\n \t}\n \n \treturn 0;\n@@ -443,7 +443,7 @@ ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)\n \n \t/* reply to reset with ack and vf mac address */\n \tmsgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;\n-\trte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);\n+\trte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);\n \t/*\n \t * Piggyback the multicast filter type so VF can compute the\n \t * correct vectors\n@@ -547,7 +547,7 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *ms\n \tstruct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \tuint32_t new_mtu = msgbuf[1];\n \tuint32_t max_frs;\n-\tint max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;\n+\tint max_frame = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;\n \n \t/* X540 and X550 support jumbo frames in IOV mode */\n \tif (hw->mac.type != ixgbe_mac_X540 &&\n@@ -556,7 +556,8 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *ms\n \t\thw->mac.type != ixgbe_mac_X550EM_a)\n \t\treturn -1;\n \n-\tif ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))\n+\tif (max_frame < RTE_ETHER_MIN_LEN ||\n+\t\t\tmax_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)\n \t\treturn -1;\n \n \tmax_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &\ndiff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c\nindex 1fbc754ae..ebe197852 100644\n--- a/drivers/net/ixgbe/ixgbe_rxtx.c\n+++ b/drivers/net/ixgbe/ixgbe_rxtx.c\n@@ -2940,7 +2940,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \t\tqueue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);\n \trxq->port_id = dev->data->port_id;\n \tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)\n-\t\trxq->crc_len = ETHER_CRC_LEN;\n+\t\trxq->crc_len = RTE_ETHER_CRC_LEN;\n \telse\n \t\trxq->crc_len = 0;\n \trxq->drop_en = rx_conf->rx_drop_en;\n@@ -3965,7 +3965,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,\n \tuint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};\n \tuint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};\n \tstruct ixgbe_dcb_tc_config *tc;\n-\tuint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;\n+\tuint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN +\n+\t\tRTE_ETHER_CRC_LEN;\n \tstruct ixgbe_hw *hw =\n \t\t\tIXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \tstruct ixgbe_bw_conf *bw_conf =\n@@ -4948,7 +4949,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)\n \t\t * call to configure.\n \t\t */\n \t\tif (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)\n-\t\t\trxq->crc_len = ETHER_CRC_LEN;\n+\t\t\trxq->crc_len = RTE_ETHER_CRC_LEN;\n \t\telse\n \t\t\trxq->crc_len = 0;\n \ndiff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c\nindex db2191862..077afab0e 100644\n--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c\n+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c\n@@ -38,7 +38,7 @@ rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,\n \tif (rte_is_valid_assigned_ether_addr(\n \t\t\t(struct rte_ether_addr *)new_mac)) {\n \t\trte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,\n-\t\t\t   ETHER_ADDR_LEN);\n+\t\t\t   RTE_ETHER_ADDR_LEN);\n \t\treturn hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,\n \t\t\t\t\t   IXGBE_RAH_AV);\n \t}\n@@ -155,7 +155,7 @@ rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id)\n \tif (vf >= pci_dev->max_vfs)\n \t\treturn -EINVAL;\n \n-\tif (vlan_id > ETHER_MAX_VLAN_ID)\n+\tif (vlan_id > RTE_ETHER_MAX_VLAN_ID)\n \t\treturn -EINVAL;\n \n \thw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n@@ -477,7 +477,7 @@ rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,\n \tif (!is_ixgbe_supported(dev))\n \t\treturn -ENOTSUP;\n \n-\tif ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))\n+\tif (vlan > RTE_ETHER_MAX_VLAN_ID || vf_mask == 0)\n \t\treturn -EINVAL;\n \n \thw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\ndiff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c\nindex 1cb1f067f..42a45333d 100644\n--- a/drivers/net/kni/rte_eth_kni.c\n+++ b/drivers/net/kni/rte_eth_kni.c\n@@ -20,7 +20,7 @@\n #define MAX_KNI_PORTS 8\n \n #define KNI_ETHER_MTU(mbuf_size)       \\\n-\t((mbuf_size) - ETHER_HDR_LEN) /**< Ethernet MTU. */\n+\t((mbuf_size) - RTE_ETHER_HDR_LEN) /**< Ethernet MTU. */\n \n #define ETH_KNI_NO_REQUEST_THREAD_ARG\t\"no_request_thread\"\n static const char * const valid_arguments[] = {\ndiff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c\nindex 21ded9ef3..c25dab00c 100644\n--- a/drivers/net/liquidio/lio_ethdev.c\n+++ b/drivers/net/liquidio/lio_ethdev.c\n@@ -430,7 +430,7 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)\n {\n \tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n \tuint16_t pf_mtu = lio_dev->linfo.link.s.mtu;\n-\tuint32_t frame_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;\n+\tuint32_t frame_len = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;\n \tstruct lio_dev_ctrl_cmd ctrl_cmd;\n \tstruct lio_ctrl_pkt ctrl_pkt;\n \n@@ -445,9 +445,9 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)\n \t/* check if VF MTU is within allowed range.\n \t * New value should not exceed PF MTU.\n \t */\n-\tif ((mtu < ETHER_MIN_MTU) || (mtu > pf_mtu)) {\n+\tif (mtu < RTE_ETHER_MIN_MTU || mtu > pf_mtu) {\n \t\tlio_dev_err(lio_dev, \"VF MTU should be >= %d and <= %d\\n\",\n-\t\t\t    ETHER_MIN_MTU, pf_mtu);\n+\t\t\t    RTE_ETHER_MIN_MTU, pf_mtu);\n \t\treturn -EINVAL;\n \t}\n \n@@ -476,7 +476,7 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)\n \t\treturn -1;\n \t}\n \n-\tif (frame_len > ETHER_MAX_LEN)\n+\tif (frame_len > RTE_ETHER_MAX_LEN)\n \t\teth_dev->data->dev_conf.rxmode.offloads |=\n \t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n \telse\n@@ -1429,9 +1429,9 @@ lio_dev_start(struct rte_eth_dev *eth_dev)\n \t\tgoto dev_mtu_set_error;\n \t}\n \n-\tmtu = (uint16_t)(frame_len - ETHER_HDR_LEN - ETHER_CRC_LEN);\n-\tif (mtu < ETHER_MIN_MTU)\n-\t\tmtu = ETHER_MIN_MTU;\n+\tmtu = (uint16_t)(frame_len - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN);\n+\tif (mtu < RTE_ETHER_MIN_MTU)\n+\t\tmtu = RTE_ETHER_MIN_MTU;\n \n \tif (eth_dev->data->mtu != mtu) {\n \t\tret = lio_dev_mtu_set(eth_dev, mtu);\n@@ -1712,7 +1712,7 @@ lio_dev_configure(struct rte_eth_dev *eth_dev)\n \tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n \tuint16_t timeout = LIO_MAX_CMD_TIMEOUT;\n \tint retval, num_iqueues, num_oqueues;\n-\tuint8_t mac[ETHER_ADDR_LEN], i;\n+\tuint8_t mac[RTE_ETHER_ADDR_LEN], i;\n \tstruct lio_if_cfg_resp *resp;\n \tstruct lio_soft_command *sc;\n \tunion lio_if_cfg if_cfg;\n@@ -1830,7 +1830,7 @@ lio_dev_configure(struct rte_eth_dev *eth_dev)\n \n \t/* 64-bit swap required on LE machines */\n \tlio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);\n-\tfor (i = 0; i < ETHER_ADDR_LEN; i++)\n+\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; i++)\n \t\tmac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +\n \t\t\t\t       2 + i));\n \n@@ -2090,7 +2090,7 @@ lio_eth_dev_init(struct rte_eth_dev *eth_dev)\n \t}\n \n \teth_dev->dev_ops = &liovf_eth_dev_ops;\n-\teth_dev->data->mac_addrs = rte_zmalloc(\"lio\", ETHER_ADDR_LEN, 0);\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"lio\", RTE_ETHER_ADDR_LEN, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tlio_dev_err(lio_dev,\n \t\t\t    \"MAC addresses memory allocation failed\\n\");\ndiff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c\nindex 460c10f89..640e06f74 100644\n--- a/drivers/net/mlx4/mlx4.c\n+++ b/drivers/net/mlx4/mlx4.c\n@@ -945,7 +945,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)\n \t\tpriv->device_attr = device_attr;\n \t\tpriv->port = port;\n \t\tpriv->pd = pd;\n-\t\tpriv->mtu = ETHER_MTU;\n+\t\tpriv->mtu = RTE_ETHER_MTU;\n \t\tpriv->vf = vf;\n \t\tpriv->hw_csum =\t!!(device_attr.device_cap_flags &\n \t\t\t\t   IBV_DEVICE_RAW_IP_CSUM);\ndiff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h\nindex 4e9bb9c1a..d4e56e879 100644\n--- a/drivers/net/mlx4/mlx4.h\n+++ b/drivers/net/mlx4/mlx4.h\n@@ -199,7 +199,7 @@ struct mlx4_priv {\n /* mlx4_ethdev.c */\n \n int mlx4_get_ifname(const struct mlx4_priv *priv, char (*ifname)[IF_NAMESIZE]);\n-int mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]);\n+int mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]);\n int mlx4_mtu_get(struct mlx4_priv *priv, uint16_t *mtu);\n int mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);\n int mlx4_dev_set_link_down(struct rte_eth_dev *dev);\ndiff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c\nindex 6639037f0..3d18aa4fc 100644\n--- a/drivers/net/mlx4/mlx4_ethdev.c\n+++ b/drivers/net/mlx4/mlx4_ethdev.c\n@@ -176,14 +176,14 @@ mlx4_ifreq(const struct mlx4_priv *priv, int req, struct ifreq *ifr)\n  *   0 on success, negative errno value otherwise and rte_errno is set.\n  */\n int\n-mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])\n+mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[RTE_ETHER_ADDR_LEN])\n {\n \tstruct ifreq request;\n \tint ret = mlx4_ifreq(priv, SIOCGIFHWADDR, &request);\n \n \tif (ret)\n \t\treturn ret;\n-\tmemcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);\n+\tmemcpy(mac, request.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);\n \treturn 0;\n }\n \ndiff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c\nindex 244f19311..96479b83d 100644\n--- a/drivers/net/mlx4/mlx4_flow.c\n+++ b/drivers/net/mlx4/mlx4_flow.c\n@@ -224,7 +224,7 @@ mlx4_flow_merge_eth(struct rte_flow *flow,\n \t\t\t\tgoto error;\n \t\t\t}\n \t\t\tflow->allmulti = 1;\n-\t\t} else if (sum_dst != (UINT8_C(0xff) * ETHER_ADDR_LEN)) {\n+\t\t} else if (sum_dst != (UINT8_C(0xff) * RTE_ETHER_ADDR_LEN)) {\n \t\t\tmsg = \"mlx4 does not support matching partial\"\n \t\t\t\t\" Ethernet fields\";\n \t\t\tgoto error;\n@@ -252,12 +252,12 @@ mlx4_flow_merge_eth(struct rte_flow *flow,\n \t\tflow->promisc = 1;\n \t\treturn 0;\n \t}\n-\tmemcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);\n-\tmemcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);\n+\tmemcpy(eth->val.dst_mac, spec->dst.addr_bytes, RTE_ETHER_ADDR_LEN);\n+\tmemcpy(eth->mask.dst_mac, mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);\n \t/* Remove unwanted bits from values. */\n-\tfor (i = 0; i < ETHER_ADDR_LEN; ++i) {\n+\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)\n \t\teth->val.dst_mac[i] &= eth->mask.dst_mac[i];\n-\t}\n+\n \treturn 0;\n error:\n \treturn rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,\ndiff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c\nindex 391271a61..a941a8a2f 100644\n--- a/drivers/net/mlx4/mlx4_rxtx.c\n+++ b/drivers/net/mlx4/mlx4_rxtx.c\n@@ -1281,7 +1281,7 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)\n \t\t\tpkt->ol_flags = PKT_RX_RSS_HASH;\n \t\t\tpkt->hash.rss = cqe->immed_rss_invalid;\n \t\t\tif (rxq->crc_present)\n-\t\t\t\tlen -= ETHER_CRC_LEN;\n+\t\t\t\tlen -= RTE_ETHER_CRC_LEN;\n \t\t\tpkt->pkt_len = len;\n \t\t\tif (rxq->csum | rxq->csum_l2tun) {\n \t\t\t\tuint32_t flags =\ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex 2798e0e95..57a62215a 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -1282,7 +1282,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t}\n \tpriv->sh = sh;\n \tpriv->ibv_port = spawn->ibv_port;\n-\tpriv->mtu = ETHER_MTU;\n+\tpriv->mtu = RTE_ETHER_MTU;\n #ifndef RTE_ARCH_64\n \t/* Initialize UAR access locks for 32bit implementations. */\n \trte_spinlock_init(&priv->uar_lock_cq);\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 80df971e5..6738a5067 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -440,7 +440,7 @@ void mlx5_translate_port_name(const char *port_name_in,\n \n /* mlx5_mac.c */\n \n-int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]);\n+int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]);\n void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);\n int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,\n \t\t      uint32_t index, uint32_t vmdq);\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex e8560fd42..6a706fdfe 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -1247,7 +1247,7 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,\n \t\t\t\t\t\t(void *)items->type,\n \t\t\t\t\t\t\"eth header not found\");\n \t\t\tif (!eth->ether_type)\n-\t\t\t\teth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);\n+\t\t\t\teth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n \t\t\tipv4 = (struct ipv4_hdr *)&buf[temp_size];\n@@ -1258,9 +1258,9 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,\n \t\t\t\t\t\t\"neither eth nor vlan\"\n \t\t\t\t\t\t\" header found\");\n \t\t\tif (vlan && !vlan->eth_proto)\n-\t\t\t\tvlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);\n+\t\t\t\tvlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPv4);\n \t\t\telse if (eth && !eth->ether_type)\n-\t\t\t\teth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);\n+\t\t\t\teth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPv4);\n \t\t\tif (!ipv4->version_ihl)\n \t\t\t\tipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |\n \t\t\t\t\t\t    MLX5_ENCAP_IPV4_IHL_MIN;\n@@ -1276,9 +1276,9 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,\n \t\t\t\t\t\t\"neither eth nor vlan\"\n \t\t\t\t\t\t\" header found\");\n \t\t\tif (vlan && !vlan->eth_proto)\n-\t\t\t\tvlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);\n+\t\t\t\tvlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPv6);\n \t\t\telse if (eth && !eth->ether_type)\n-\t\t\t\teth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);\n+\t\t\t\teth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPv6);\n \t\t\tif (!ipv6->vtc_flow)\n \t\t\t\tipv6->vtc_flow =\n \t\t\t\t\tRTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);\n@@ -3094,7 +3094,7 @@ flow_dv_translate_item_mpls(void *matcher, void *key,\n \tcase MLX5_FLOW_LAYER_GRE:\n \t\tMLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);\n \t\tMLX5_SET(fte_match_set_misc, misc_v, gre_protocol,\n-\t\t\t ETHER_TYPE_MPLS);\n+\t\t\t RTE_ETHER_TYPE_MPLS);\n \t\tbreak;\n \tdefault:\n \t\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);\ndiff --git a/drivers/net/mlx5/mlx5_flow_tcf.c b/drivers/net/mlx5/mlx5_flow_tcf.c\nindex 48d54e8de..7b9a15e7e 100644\n--- a/drivers/net/mlx5/mlx5_flow_tcf.c\n+++ b/drivers/net/mlx5/mlx5_flow_tcf.c\n@@ -707,7 +707,7 @@ flow_tcf_pedit_key_set_mac(const struct rte_flow_action *actions,\n \tp_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;\n \tmemcpy(&p_parser->keys[idx].val,\n \t\tconf->mac_addr + SZ_PEDIT_KEY_VAL,\n-\t\tETHER_ADDR_LEN - SZ_PEDIT_KEY_VAL);\n+\t\tRTE_ETHER_ADDR_LEN - SZ_PEDIT_KEY_VAL);\n \tp_parser->sel.nkeys = (++idx);\n }\n \n@@ -984,11 +984,11 @@ flow_tcf_get_pedit_actions_size(const struct rte_flow_action **actions,\n \t\t\tflags |= MLX5_FLOW_ACTION_DEC_TTL;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:\n-\t\t\tkeys += NUM_OF_PEDIT_KEYS(ETHER_ADDR_LEN);\n+\t\t\tkeys += NUM_OF_PEDIT_KEYS(RTE_ETHER_ADDR_LEN);\n \t\t\tflags |= MLX5_FLOW_ACTION_SET_MAC_SRC;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_DST:\n-\t\t\tkeys += NUM_OF_PEDIT_KEYS(ETHER_ADDR_LEN);\n+\t\t\tkeys += NUM_OF_PEDIT_KEYS(RTE_ETHER_ADDR_LEN);\n \t\t\tflags |= MLX5_FLOW_ACTION_SET_MAC_DST;\n \t\t\tbreak;\n \t\tdefault:\n@@ -2521,7 +2521,7 @@ flow_tcf_get_items_size(const struct rte_flow_attr *attr,\n \t\tcase RTE_FLOW_ITEM_TYPE_PORT_ID:\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n-\t\t\tsize += SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) * 4;\n+\t\t\tsize += SZ_NLATTR_DATA_OF(RTE_ETHER_ADDR_LEN) * 4;\n \t\t\t\t/* dst/src MAC addr and mask. */\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\n@@ -3336,18 +3336,18 @@ flow_tcf_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,\n \t\t\t}\n \t\t\tif (!rte_is_zero_ether_addr(&mask.eth->dst)) {\n \t\t\t\tmnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST,\n-\t\t\t\t\t     ETHER_ADDR_LEN,\n+\t\t\t\t\t     RTE_ETHER_ADDR_LEN,\n \t\t\t\t\t     spec.eth->dst.addr_bytes);\n \t\t\t\tmnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST_MASK,\n-\t\t\t\t\t     ETHER_ADDR_LEN,\n+\t\t\t\t\t     RTE_ETHER_ADDR_LEN,\n \t\t\t\t\t     mask.eth->dst.addr_bytes);\n \t\t\t}\n \t\t\tif (!rte_is_zero_ether_addr(&mask.eth->src)) {\n \t\t\t\tmnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC,\n-\t\t\t\t\t     ETHER_ADDR_LEN,\n+\t\t\t\t\t     RTE_ETHER_ADDR_LEN,\n \t\t\t\t\t     spec.eth->src.addr_bytes);\n \t\t\t\tmnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC_MASK,\n-\t\t\t\t\t     ETHER_ADDR_LEN,\n+\t\t\t\t\t     RTE_ETHER_ADDR_LEN,\n \t\t\t\t\t     mask.eth->src.addr_bytes);\n \t\t\t}\n \t\t\tassert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);\n@@ -4395,7 +4395,7 @@ flow_tcf_collect_neigh_cb(const struct nlmsghdr *nlh, void *arg)\n \t/* Neigh rule with permanent attribute found. */\n \tsize = MNL_ALIGN(sizeof(struct nlmsghdr)) +\n \t       MNL_ALIGN(sizeof(struct ndmsg)) +\n-\t       SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) +\n+\t       SZ_NLATTR_DATA_OF(RTE_ETHER_ADDR_LEN) +\n \t       (family == AF_INET6 ? SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)\n \t\t\t\t   : SZ_NLATTR_TYPE_OF(uint32_t));\n \tcmd = flow_tcf_alloc_nlcmd(ctx, size);\n@@ -4419,7 +4419,7 @@ flow_tcf_collect_neigh_cb(const struct nlmsghdr *nlh, void *arg)\n \t\tmnl_attr_put(cmd, NDA_DST, IPV6_ADDR_LEN,\n \t\t\t     mnl_attr_get_payload(na_ip));\n \t}\n-\tmnl_attr_put(cmd, NDA_LLADDR, ETHER_ADDR_LEN,\n+\tmnl_attr_put(cmd, NDA_LLADDR, RTE_ETHER_ADDR_LEN,\n \t\t     mnl_attr_get_payload(na_mac));\n \tassert(size == cmd->nlmsg_len);\n \treturn 1;\ndiff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c\nindex c44e15dfb..55aa8054f 100644\n--- a/drivers/net/mlx5/mlx5_flow_verbs.c\n+++ b/drivers/net/mlx5/mlx5_flow_verbs.c\n@@ -290,14 +290,18 @@ flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,\n \tif (spec) {\n \t\tunsigned int i;\n \n-\t\tmemcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);\n-\t\tmemcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);\n+\t\tmemcpy(&eth.val.dst_mac, spec->dst.addr_bytes,\n+\t\t\tRTE_ETHER_ADDR_LEN);\n+\t\tmemcpy(&eth.val.src_mac, spec->src.addr_bytes,\n+\t\t\tRTE_ETHER_ADDR_LEN);\n \t\teth.val.ether_type = spec->type;\n-\t\tmemcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);\n-\t\tmemcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);\n+\t\tmemcpy(&eth.mask.dst_mac, mask->dst.addr_bytes,\n+\t\t\tRTE_ETHER_ADDR_LEN);\n+\t\tmemcpy(&eth.mask.src_mac, mask->src.addr_bytes,\n+\t\t\tRTE_ETHER_ADDR_LEN);\n \t\teth.mask.ether_type = mask->type;\n \t\t/* Remove unwanted bits from values. */\n-\t\tfor (i = 0; i < ETHER_ADDR_LEN; ++i) {\n+\t\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {\n \t\t\teth.val.dst_mac[i] &= eth.mask.dst_mac[i];\n \t\t\teth.val.src_mac[i] &= eth.mask.src_mac[i];\n \t\t}\ndiff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c\nindex 9204c5f52..0ffef5c5d 100644\n--- a/drivers/net/mlx5/mlx5_mac.c\n+++ b/drivers/net/mlx5/mlx5_mac.c\n@@ -44,7 +44,7 @@\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n int\n-mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN])\n+mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN])\n {\n \tstruct ifreq request;\n \tint ret;\n@@ -52,7 +52,7 @@ mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN])\n \tret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request);\n \tif (ret)\n \t\treturn ret;\n-\tmemcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);\n+\tmemcpy(mac, request.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);\n \treturn 0;\n }\n \ndiff --git a/drivers/net/mlx5/mlx5_nl.c b/drivers/net/mlx5/mlx5_nl.c\nindex 50cd32a09..5773fa75c 100644\n--- a/drivers/net/mlx5/mlx5_nl.c\n+++ b/drivers/net/mlx5/mlx5_nl.c\n@@ -344,7 +344,7 @@ mlx5_nl_mac_addr_cb(struct nlmsghdr *nh, void *arg)\n \t\t\tDRV_LOG(DEBUG, \"bridge MAC address %s\", m);\n #endif\n \t\t\tmemcpy(&(*data->mac)[data->mac_n++],\n-\t\t\t       RTA_DATA(attribute), ETHER_ADDR_LEN);\n+\t\t\t       RTA_DATA(attribute), RTE_ETHER_ADDR_LEN);\n \t\t}\n \t}\n \treturn 0;\n@@ -433,7 +433,7 @@ mlx5_nl_mac_addr_modify(struct rte_eth_dev *dev, struct rte_ether_addr *mac,\n \t\tstruct nlmsghdr hdr;\n \t\tstruct ndmsg ndm;\n \t\tstruct rtattr rta;\n-\t\tuint8_t buffer[ETHER_ADDR_LEN];\n+\t\tuint8_t buffer[RTE_ETHER_ADDR_LEN];\n \t} req = {\n \t\t.hdr = {\n \t\t\t.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)),\n@@ -449,7 +449,7 @@ mlx5_nl_mac_addr_modify(struct rte_eth_dev *dev, struct rte_ether_addr *mac,\n \t\t},\n \t\t.rta = {\n \t\t\t.rta_type = NDA_LLADDR,\n-\t\t\t.rta_len = RTA_LENGTH(ETHER_ADDR_LEN),\n+\t\t\t.rta_len = RTA_LENGTH(RTE_ETHER_ADDR_LEN),\n \t\t},\n \t};\n \tint fd;\n@@ -459,7 +459,7 @@ mlx5_nl_mac_addr_modify(struct rte_eth_dev *dev, struct rte_ether_addr *mac,\n \tif (priv->nl_socket_route == -1)\n \t\treturn 0;\n \tfd = priv->nl_socket_route;\n-\tmemcpy(RTA_DATA(&req.rta), mac, ETHER_ADDR_LEN);\n+\tmemcpy(RTA_DATA(&req.rta), mac, RTE_ETHER_ADDR_LEN);\n \treq.hdr.nlmsg_len = NLMSG_ALIGN(req.hdr.nlmsg_len) +\n \t\tRTA_ALIGN(req.rta.rta_len);\n \tret = mlx5_nl_send(fd, &req.hdr, sn);\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c\nindex 3da3f62fa..7174ffc91 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.c\n+++ b/drivers/net/mlx5/mlx5_rxtx.c\n@@ -632,7 +632,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)\n \t\tif (buf->ol_flags & PKT_TX_VLAN_PKT) {\n \t\t\tuint32_t vlan = rte_cpu_to_be_32(0x81000000 |\n \t\t\t\t\t\t\t buf->vlan_tci);\n-\t\t\tunsigned int len = 2 * ETHER_ADDR_LEN - 2;\n+\t\t\tunsigned int len = 2 * RTE_ETHER_ADDR_LEN - 2;\n \n \t\t\taddr += 2;\n \t\t\tlength -= 2;\n@@ -2058,7 +2058,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)\n \t\t\t\t\t\t\tmcqe->rx_hash_result);\n \t\t\trxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);\n \t\t\tif (rxq->crc_present)\n-\t\t\t\tlen -= ETHER_CRC_LEN;\n+\t\t\t\tlen -= RTE_ETHER_CRC_LEN;\n \t\t\tPKT_LEN(pkt) = len;\n \t\t}\n \t\tDATA_LEN(rep) = DATA_LEN(seg);\n@@ -2264,7 +2264,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)\n \t\tlen = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;\n \t\tassert((int)len >= (rxq->crc_present << 2));\n \t\tif (rxq->crc_present)\n-\t\t\tlen -= ETHER_CRC_LEN;\n+\t\t\tlen -= RTE_ETHER_CRC_LEN;\n \t\toffset = strd_idx * strd_sz + strd_shift;\n \t\taddr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf), offset);\n \t\t/* Initialize the offload flag. */\ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h\nindex 38e915c5c..b2cc71088 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h\n@@ -379,7 +379,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,\n \t};\n \t/* Restore the compressed count. Must be 16 bits. */\n \tconst uint16_t mcqe_n = t_pkt->data_len +\n-\t\t\t\t(rxq->crc_present * ETHER_CRC_LEN);\n+\t\t\t\t(rxq->crc_present * RTE_ETHER_CRC_LEN);\n \tconst uint64x2_t rearm =\n \t\tvld1q_u64((void *)&t_pkt->rearm_data);\n \tconst uint32x4_t rxdf_mask = {\n@@ -393,8 +393,8 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,\n \t\t\t vreinterpretq_u8_u32(rxdf_mask));\n \tconst uint16x8_t crc_adj = {\n \t\t0, 0,\n-\t\trxq->crc_present * ETHER_CRC_LEN, 0,\n-\t\trxq->crc_present * ETHER_CRC_LEN, 0,\n+\t\trxq->crc_present * RTE_ETHER_CRC_LEN, 0,\n+\t\trxq->crc_present * RTE_ETHER_CRC_LEN, 0,\n \t\t0, 0\n \t};\n \tconst uint32_t flow_tag = t_pkt->hash.fdir.hi;\n@@ -717,7 +717,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,\n \t\t12, 13, 14, -1  /* 1st CQE */\n \t};\n \tconst uint16x8_t crc_adj = {\n-\t\t0, 0, rxq->crc_present * ETHER_CRC_LEN, 0, 0, 0, 0, 0\n+\t\t0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0\n \t};\n \tconst uint32x4_t flow_mark_adj = { 0, 0, 0, rxq->mark * (-1) };\n \ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h\nindex fb384efde..dce3ee4b4 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h\n@@ -374,16 +374,16 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,\n \t\t\t    -1, -1, -1, -1  /* skip packet_type */);\n \t/* Restore the compressed count. Must be 16 bits. */\n \tconst uint16_t mcqe_n = t_pkt->data_len +\n-\t\t\t\t(rxq->crc_present * ETHER_CRC_LEN);\n+\t\t\t\t(rxq->crc_present * RTE_ETHER_CRC_LEN);\n \tconst __m128i rearm =\n \t\t_mm_loadu_si128((__m128i *)&t_pkt->rearm_data);\n \tconst __m128i rxdf =\n \t\t_mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1);\n \tconst __m128i crc_adj =\n \t\t_mm_set_epi16(0, 0, 0,\n-\t\t\t      rxq->crc_present * ETHER_CRC_LEN,\n+\t\t\t      rxq->crc_present * RTE_ETHER_CRC_LEN,\n \t\t\t      0,\n-\t\t\t      rxq->crc_present * ETHER_CRC_LEN,\n+\t\t\t      rxq->crc_present * RTE_ETHER_CRC_LEN,\n \t\t\t      0, 0);\n \tconst uint32_t flow_tag = t_pkt->hash.fdir.hi;\n #ifdef MLX5_PMD_SOFT_COUNTERS\n@@ -699,9 +699,9 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,\n \tconst __m128i ones = _mm_cmpeq_epi32(zero, zero);\n \tconst __m128i crc_adj =\n \t\t_mm_set_epi16(0, 0, 0, 0, 0,\n-\t\t\t      rxq->crc_present * ETHER_CRC_LEN,\n+\t\t\t      rxq->crc_present * RTE_ETHER_CRC_LEN,\n \t\t\t      0,\n-\t\t\t      rxq->crc_present * ETHER_CRC_LEN);\n+\t\t\t      rxq->crc_present * RTE_ETHER_CRC_LEN);\n \tconst __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);\n \n \tassert(rxq->sges_n == 0);\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex 69681e296..d5ce74d8c 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -337,7 +337,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)\n \t\t\tcontinue;\n \t\tmemcpy(&unicast.dst.addr_bytes,\n \t\t       mac->addr_bytes,\n-\t\t       ETHER_ADDR_LEN);\n+\t\t       RTE_ETHER_ADDR_LEN);\n \t\tfor (j = 0; j != vlan_filter_n; ++j) {\n \t\t\tuint16_t vlan = priv->vlan_filter[j];\n \ndiff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c\nindex 9ec71c3c1..965744406 100644\n--- a/drivers/net/mvneta/mvneta_ethdev.c\n+++ b/drivers/net/mvneta/mvneta_ethdev.c\n@@ -261,7 +261,7 @@ mvneta_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \t\t\tmbuf_data_size, mtu, mru);\n \t}\n \n-\tif (mtu < ETHER_MIN_MTU || mru > MVNETA_PKT_SIZE_MAX) {\n+\tif (mtu < RTE_ETHER_MIN_MTU || mru > MVNETA_PKT_SIZE_MAX) {\n \t\tMVNETA_LOG(ERR, \"Invalid MTU [%u] or MRU [%u]\", mtu, mru);\n \t\treturn -EINVAL;\n \t}\n@@ -586,7 +586,7 @@ static void\n mvneta_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)\n {\n \tstruct mvneta_priv *priv = dev->data->dev_private;\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n \tint ret;\n \n \tif (!priv->ppio)\n@@ -621,7 +621,7 @@ mvneta_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,\n \t\t  uint32_t index, uint32_t vmdq __rte_unused)\n {\n \tstruct mvneta_priv *priv = dev->data->dev_private;\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n \tint ret;\n \n \tif (index == 0)\n@@ -660,7 +660,7 @@ mvneta_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)\n \n \tret = neta_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);\n \tif (ret) {\n-\t\tchar buf[ETHER_ADDR_FMT_SIZE];\n+\t\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n \t\trte_ether_format_addr(buf, sizeof(buf), mac_addr);\n \t\tMVNETA_LOG(ERR, \"Failed to set mac to %s\", buf);\n \t}\n@@ -794,7 +794,7 @@ mvneta_eth_dev_create(struct rte_vdev_device *vdev, const char *name)\n \n \teth_dev->data->mac_addrs =\n \t\trte_zmalloc(\"mac_addrs\",\n-\t\t\t    ETHER_ADDR_LEN * MVNETA_MAC_ADDRS_MAX, 0);\n+\t\t\t    RTE_ETHER_ADDR_LEN * MVNETA_MAC_ADDRS_MAX, 0);\n \tif (!eth_dev->data->mac_addrs) {\n \t\tMVNETA_LOG(ERR, \"Failed to allocate space for eth addrs\");\n \t\tret = -ENOMEM;\n@@ -808,7 +808,7 @@ mvneta_eth_dev_create(struct rte_vdev_device *vdev, const char *name)\n \t\tgoto out_free;\n \n \tmemcpy(eth_dev->data->mac_addrs[0].addr_bytes,\n-\t       req.ifr_addr.sa_data, ETHER_ADDR_LEN);\n+\t       req.ifr_addr.sa_data, RTE_ETHER_ADDR_LEN);\n \n \teth_dev->data->kdrv = RTE_KDRV_NONE;\n \teth_dev->device = &vdev->device;\ndiff --git a/drivers/net/mvneta/mvneta_ethdev.h b/drivers/net/mvneta/mvneta_ethdev.h\nindex 101b0a817..d892d6c67 100644\n--- a/drivers/net/mvneta/mvneta_ethdev.h\n+++ b/drivers/net/mvneta/mvneta_ethdev.h\n@@ -45,9 +45,9 @@\n /** Rx queue descriptors alignment in B */\n #define MRVL_NETA_RXD_ALIGN 32\n \n-#define MRVL_NETA_VLAN_TAG_LEN\t\t4\n-#define MRVL_NETA_ETH_HDRS_LEN\t\t(ETHER_HDR_LEN + ETHER_CRC_LEN + \\\n-\t\t\t\t\tMRVL_NETA_VLAN_TAG_LEN)\n+#define MRVL_NETA_VLAN_TAG_LEN\t4\n+#define MRVL_NETA_ETH_HDRS_LEN\t(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \\\n+\t\t\t\tMRVL_NETA_VLAN_TAG_LEN)\n \n #define MRVL_NETA_HDRS_LEN\t\t(MV_MH_SIZE + MRVL_NETA_ETH_HDRS_LEN)\n #define MRVL_NETA_MTU_TO_MRU(mtu)\t((mtu) + MRVL_NETA_HDRS_LEN)\ndiff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c\nindex 4c6edb41d..d1d0d4186 100644\n--- a/drivers/net/mvpp2/mrvl_ethdev.c\n+++ b/drivers/net/mvpp2/mrvl_ethdev.c\n@@ -448,7 +448,7 @@ mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \t\t\tmbuf_data_size, mtu, mru);\n \t}\n \n-\tif (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) {\n+\tif (mtu < RTE_ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) {\n \t\tMRVL_LOG(ERR, \"Invalid MTU [%u] or MRU [%u]\", mtu, mru);\n \t\treturn -EINVAL;\n \t}\n@@ -1068,7 +1068,7 @@ static void\n mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)\n {\n \tstruct mrvl_priv *priv = dev->data->dev_private;\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n \tint ret;\n \n \tif (!priv->ppio)\n@@ -1106,7 +1106,7 @@ mrvl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,\n \t\t  uint32_t index, uint32_t vmdq __rte_unused)\n {\n \tstruct mrvl_priv *priv = dev->data->dev_private;\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n \tint ret;\n \n \tif (priv->isolated)\n@@ -1167,7 +1167,7 @@ mrvl_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)\n \n \tret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);\n \tif (ret) {\n-\t\tchar buf[ETHER_ADDR_FMT_SIZE];\n+\t\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n \t\trte_ether_format_addr(buf, sizeof(buf), mac_addr);\n \t\tMRVL_LOG(ERR, \"Failed to set mac to %s\", buf);\n \t}\n@@ -2787,7 +2787,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)\n \n \teth_dev->data->mac_addrs =\n \t\trte_zmalloc(\"mac_addrs\",\n-\t\t\t    ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);\n+\t\t\t    RTE_ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);\n \tif (!eth_dev->data->mac_addrs) {\n \t\tMRVL_LOG(ERR, \"Failed to allocate space for eth addrs\");\n \t\tret = -ENOMEM;\n@@ -2801,7 +2801,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)\n \t\tgoto out_free;\n \n \tmemcpy(eth_dev->data->mac_addrs[0].addr_bytes,\n-\t       req.ifr_addr.sa_data, ETHER_ADDR_LEN);\n+\t       req.ifr_addr.sa_data, RTE_ETHER_ADDR_LEN);\n \n \teth_dev->data->kdrv = RTE_KDRV_NONE;\n \teth_dev->device = &vdev->device;\ndiff --git a/drivers/net/mvpp2/mrvl_ethdev.h b/drivers/net/mvpp2/mrvl_ethdev.h\nindex 0120b9e8a..db6632f5b 100644\n--- a/drivers/net/mvpp2/mrvl_ethdev.h\n+++ b/drivers/net/mvpp2/mrvl_ethdev.h\n@@ -72,9 +72,9 @@\n /** Minimum number of sent buffers to release from shadow queue to BM */\n #define MRVL_PP2_BUF_RELEASE_BURST_SIZE\t64\n \n-#define MRVL_PP2_VLAN_TAG_LEN\t\t4\n-#define MRVL_PP2_ETH_HDRS_LEN\t\t(ETHER_HDR_LEN + ETHER_CRC_LEN + \\\n-\t\t\t\t\t(2 * MRVL_PP2_VLAN_TAG_LEN))\n+#define MRVL_PP2_VLAN_TAG_LEN\t4\n+#define MRVL_PP2_ETH_HDRS_LEN\t(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \\\n+\t\t\t\t(2 * MRVL_PP2_VLAN_TAG_LEN))\n #define MRVL_PP2_HDRS_LEN\t\t(MV_MH_SIZE + MRVL_PP2_ETH_HDRS_LEN)\n #define MRVL_PP2_MTU_TO_MRU(mtu)\t((mtu) + MRVL_PP2_HDRS_LEN)\n #define MRVL_PP2_MRU_TO_MTU(mru)\t((mru) - MRVL_PP2_HDRS_LEN)\ndiff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c\nindex 35503df32..9e7cf2df5 100644\n--- a/drivers/net/netvsc/hn_ethdev.c\n+++ b/drivers/net/netvsc/hn_ethdev.c\n@@ -766,7 +766,7 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev)\n \tif (!hv->primary)\n \t\treturn -ENOMEM;\n \n-\terr = hn_attach(hv, ETHER_MTU);\n+\terr = hn_attach(hv, RTE_ETHER_MTU);\n \tif  (err)\n \t\tgoto failed;\n \ndiff --git a/drivers/net/netvsc/hn_nvs.c b/drivers/net/netvsc/hn_nvs.c\nindex d58770e04..6b518685a 100644\n--- a/drivers/net/netvsc/hn_nvs.c\n+++ b/drivers/net/netvsc/hn_nvs.c\n@@ -323,7 +323,7 @@ hn_nvs_conf_ndis(struct hn_data *hv, unsigned int mtu)\n \n \tmemset(&conf, 0, sizeof(conf));\n \tconf.type = NVS_TYPE_NDIS_CONF;\n-\tconf.mtu = mtu + ETHER_HDR_LEN;\n+\tconf.mtu = mtu + RTE_ETHER_HDR_LEN;\n \tconf.caps = NVS_NDIS_CONF_VLAN;\n \n \t/* enable SRIOV */\ndiff --git a/drivers/net/netvsc/hn_rndis.c b/drivers/net/netvsc/hn_rndis.c\nindex 0134ecb67..4a1d49ffc 100644\n--- a/drivers/net/netvsc/hn_rndis.c\n+++ b/drivers/net/netvsc/hn_rndis.c\n@@ -1093,7 +1093,7 @@ hn_rndis_get_eaddr(struct hn_data *hv, uint8_t *eaddr)\n \tuint32_t eaddr_len;\n \tint error;\n \n-\teaddr_len = ETHER_ADDR_LEN;\n+\teaddr_len = RTE_ETHER_ADDR_LEN;\n \terror = hn_rndis_query(hv, OID_802_3_PERMANENT_ADDRESS, NULL, 0,\n \t\t\t       eaddr, eaddr_len);\n \tif (error)\ndiff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c\nindex 61924209c..7212780c1 100644\n--- a/drivers/net/netvsc/hn_rxtx.c\n+++ b/drivers/net/netvsc/hn_rxtx.c\n@@ -100,7 +100,7 @@ struct hn_txdesc {\n \n /* Minimum space required for a packet */\n #define HN_PKTSIZE_MIN(align) \\\n-\tRTE_ALIGN(ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align)\n+\tRTE_ALIGN(RTE_ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align)\n \n #define DEFAULT_TX_FREE_THRESH 32U\n \n@@ -606,7 +606,7 @@ static void hn_rndis_rx_data(struct hn_rx_queue *rxq,\n \tif (unlikely(data_off + data_len > pkt->len))\n \t\tgoto error;\n \n-\tif (unlikely(data_len < ETHER_HDR_LEN))\n+\tif (unlikely(data_len < RTE_ETHER_HDR_LEN))\n \t\tgoto error;\n \n \thn_rxpkt(rxq, rxb, data, data_off, data_len, &info);\ndiff --git a/drivers/net/nfb/nfb_ethdev.c b/drivers/net/nfb/nfb_ethdev.c\nindex ffdc2bf46..fdd0e701b 100644\n--- a/drivers/net/nfb/nfb_ethdev.c\n+++ b/drivers/net/nfb/nfb_ethdev.c\n@@ -363,7 +363,7 @@ nfb_eth_mac_addr_set(struct rte_eth_dev *dev,\n \tif (!rte_is_valid_assigned_ether_addr(mac_addr))\n \t\treturn -EINVAL;\n \n-\tfor (i = 0; i < ETHER_ADDR_LEN; i++) {\n+\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {\n \t\tmac <<= 8;\n \t\tmac |= mac_addr->addr_bytes[i] & 0xFF;\n \t}\ndiff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c\nindex 853b7800b..1a7aa17ee 100644\n--- a/drivers/net/nfp/nfp_net.c\n+++ b/drivers/net/nfp/nfp_net.c\n@@ -1213,7 +1213,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \n \tdev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;\n \tdev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;\n-\tdev_info->min_rx_bufsize = ETHER_MIN_MTU;\n+\tdev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;\n \tdev_info->max_rx_pktlen = hw->max_mtu;\n \t/* Next should change when PF support is implemented */\n \tdev_info->max_mac_addrs = 1;\n@@ -1486,7 +1486,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \thw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n \t/* check that mtu is within the allowed range */\n-\tif ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))\n+\tif (mtu < RTE_ETHER_MIN_MTU || (uint32_t)mtu > hw->max_mtu)\n \t\treturn -EINVAL;\n \n \t/* mtu setting is forbidden if port is started */\n@@ -1497,7 +1497,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n \t}\n \n \t/* switch to jumbo mode if needed */\n-\tif ((uint32_t)mtu > ETHER_MAX_LEN)\n+\tif ((uint32_t)mtu > RTE_ETHER_MAX_LEN)\n \t\tdev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;\n \telse\n \t\tdev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;\n@@ -2905,7 +2905,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)\n \thw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);\n \thw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);\n \thw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);\n-\thw->mtu = ETHER_MTU;\n+\thw->mtu = RTE_ETHER_MTU;\n \n \t/* VLAN insertion is incompatible with LSOv2 */\n \tif (hw->cap & NFP_NET_CFG_CTRL_LSO2)\n@@ -2948,7 +2948,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev)\n \trte_spinlock_init(&hw->reconfig_lock);\n \n \t/* Allocating memory for mac addr */\n-\teth_dev->data->mac_addrs = rte_zmalloc(\"mac_addr\", ETHER_ADDR_LEN, 0);\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"mac_addr\",\n+\t\t\t\t\t       RTE_ETHER_ADDR_LEN, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to space for MAC address\");\n \t\terr = -ENOMEM;\ndiff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h\nindex 61a6eba6d..bc288bed6 100644\n--- a/drivers/net/nfp/nfp_net_pmd.h\n+++ b/drivers/net/nfp/nfp_net_pmd.h\n@@ -436,7 +436,7 @@ struct nfp_net_hw {\n #endif\n #endif\n \n-\tuint8_t mac_addr[ETHER_ADDR_LEN];\n+\tuint8_t mac_addr[RTE_ETHER_ADDR_LEN];\n \n \t/* Records starting point for counters */\n \tstruct rte_eth_stats eth_stats_base;\ndiff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c\nindex 643479254..1b853399d 100644\n--- a/drivers/net/octeontx/octeontx_ethdev.c\n+++ b/drivers/net/octeontx/octeontx_ethdev.c\n@@ -142,7 +142,8 @@ octeontx_port_open(struct octeontx_nic *nic)\n \tnic->mcast_mode = bgx_port_conf.mcast_mode;\n \tnic->speed\t= bgx_port_conf.mode;\n \n-\tmemcpy(&nic->mac_addr[0], &bgx_port_conf.macaddr[0], ETHER_ADDR_LEN);\n+\tmemcpy(&nic->mac_addr[0], &bgx_port_conf.macaddr[0],\n+\t\tRTE_ETHER_ADDR_LEN);\n \n \tocteontx_log_dbg(\"port opened %d\", nic->port_id);\n \treturn res;\n@@ -1064,7 +1065,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,\n \tdata->all_multicast = 0;\n \tdata->scattered_rx = 0;\n \n-\tdata->mac_addrs = rte_zmalloc_socket(octtx_name, ETHER_ADDR_LEN, 0,\n+\tdata->mac_addrs = rte_zmalloc_socket(octtx_name, RTE_ETHER_ADDR_LEN, 0,\n \t\t\t\t\t\t\tsocket_id);\n \tif (data->mac_addrs == NULL) {\n \t\tocteontx_log_err(\"failed to allocate memory for mac_addrs\");\n@@ -1085,7 +1086,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,\n \t}\n \n \t/* Update port_id mac to eth_dev */\n-\tmemcpy(data->mac_addrs, nic->mac_addr, ETHER_ADDR_LEN);\n+\tmemcpy(data->mac_addrs, nic->mac_addr, RTE_ETHER_ADDR_LEN);\n \n \tPMD_INIT_LOG(DEBUG, \"ethdev info: \");\n \tPMD_INIT_LOG(DEBUG, \"port %d, port_ena %d ochan %d num_ochan %d tx_q %d\",\ndiff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h\nindex 2a4a08afc..fd2e99edf 100644\n--- a/drivers/net/octeontx/octeontx_ethdev.h\n+++ b/drivers/net/octeontx/octeontx_ethdev.h\n@@ -62,7 +62,7 @@ struct octeontx_nic {\n \tuint8_t\tduplex;\n \tuint8_t speed;\n \tuint16_t mtu;\n-\tuint8_t mac_addr[ETHER_ADDR_LEN];\n+\tuint8_t mac_addr[RTE_ETHER_ADDR_LEN];\n \t/* Rx port parameters */\n \tstruct {\n \t\tbool classifier_enable;\ndiff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c\nindex 7655b3a7a..10277b9b6 100644\n--- a/drivers/net/pcap/rte_eth_pcap.c\n+++ b/drivers/net/pcap/rte_eth_pcap.c\n@@ -28,7 +28,7 @@\n #include <rte_string_fns.h>\n \n #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535\n-#define RTE_ETH_PCAP_SNAPLEN ETHER_MAX_JUMBO_FRAME_LEN\n+#define RTE_ETH_PCAP_SNAPLEN RTE_ETHER_MAX_JUMBO_FRAME_LEN\n #define RTE_ETH_PCAP_PROMISC 1\n #define RTE_ETH_PCAP_TIMEOUT -1\n \n@@ -287,7 +287,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\t\tpcap_dump((u_char *)dumper, &header,\n \t\t\t\t  rte_pktmbuf_mtod(mbuf, void*));\n \t\t} else {\n-\t\t\tif (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {\n+\t\t\tif (mbuf->pkt_len <= RTE_ETHER_MAX_JUMBO_FRAME_LEN) {\n \t\t\t\teth_pcap_gather_data(tx_pcap_data, mbuf);\n \t\t\t\tpcap_dump((u_char *)dumper, &header,\n \t\t\t\t\t  tx_pcap_data);\n@@ -295,7 +295,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\t\t\tPMD_LOG(ERR,\n \t\t\t\t\t\"Dropping PCAP packet. Size (%d) > max jumbo size (%d).\",\n \t\t\t\t\tmbuf->pkt_len,\n-\t\t\t\t\tETHER_MAX_JUMBO_FRAME_LEN);\n+\t\t\t\t\tRTE_ETHER_MAX_JUMBO_FRAME_LEN);\n \n \t\t\t\trte_pktmbuf_free(mbuf);\n \t\t\t\tbreak;\n@@ -349,7 +349,7 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\t\t\t\trte_pktmbuf_mtod(mbuf, u_char *),\n \t\t\t\t\tmbuf->pkt_len);\n \t\t} else {\n-\t\t\tif (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {\n+\t\t\tif (mbuf->pkt_len <= RTE_ETHER_MAX_JUMBO_FRAME_LEN) {\n \t\t\t\teth_pcap_gather_data(tx_pcap_data, mbuf);\n \t\t\t\tret = pcap_sendpacket(pcap,\n \t\t\t\t\t\ttx_pcap_data, mbuf->pkt_len);\n@@ -357,7 +357,7 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\t\t\tPMD_LOG(ERR,\n \t\t\t\t\t\"Dropping PCAP packet. Size (%d) > max jumbo size (%d).\",\n \t\t\t\t\tmbuf->pkt_len,\n-\t\t\t\t\tETHER_MAX_JUMBO_FRAME_LEN);\n+\t\t\t\t\tRTE_ETHER_MAX_JUMBO_FRAME_LEN);\n \n \t\t\t\trte_pktmbuf_free(mbuf);\n \t\t\t\tbreak;\n@@ -993,7 +993,7 @@ eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev,\n \t\treturn -1;\n \t}\n \n-\tmac_addrs = rte_zmalloc_socket(NULL, ETHER_ADDR_LEN, 0, numa_node);\n+\tmac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);\n \tif (!mac_addrs) {\n \t\tclose(if_fd);\n \t\treturn -1;\n@@ -1002,7 +1002,7 @@ eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev,\n \tPMD_LOG(INFO, \"Setting phy MAC for %s\", if_name);\n \teth_dev->data->mac_addrs = mac_addrs;\n \trte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,\n-\t\t\tifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);\n+\t\t\tifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);\n \n \tclose(if_fd);\n \n@@ -1040,7 +1040,7 @@ eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev,\n \tifm = (struct if_msghdr *)buf;\n \tsdl = (struct sockaddr_dl *)(ifm + 1);\n \n-\tmac_addrs = rte_zmalloc_socket(NULL, ETHER_ADDR_LEN, 0, numa_node);\n+\tmac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);\n \tif (!mac_addrs) {\n \t\trte_free(buf);\n \t\treturn -1;\n@@ -1049,7 +1049,7 @@ eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev,\n \tPMD_LOG(INFO, \"Setting phy MAC for %s\", if_name);\n \teth_dev->data->mac_addrs = mac_addrs;\n \trte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,\n-\t\t\tLLADDR(sdl), ETHER_ADDR_LEN);\n+\t\t\tLLADDR(sdl), RTE_ETHER_ADDR_LEN);\n \n \trte_free(buf);\n \ndiff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h\nindex 1abf44fa7..51edc4151 100644\n--- a/drivers/net/qede/base/bcm_osal.h\n+++ b/drivers/net/qede/base/bcm_osal.h\n@@ -332,7 +332,9 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);\n \tqede_find_first_zero_bit(bitmap, length)\n \n #define OSAL_BUILD_BUG_ON(cond)\t\tnothing\n-#define ETH_ALEN\t\t\tETHER_ADDR_LEN\n+#define ETH_ALEN\t\t\tRTE_ETHER_ADDR_LEN\n+#define ETHER_TYPE_VLAN\t\t\tRTE_ETHER_TYPE_VLAN\n+#define ETHER_TYPE_QINQ\t\t\tRTE_ETHER_TYPE_QINQ\n \n #define OSAL_BITMAP_WEIGHT(bitmap, count) 0\n \ndiff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c\nindex c7fadf27f..84d898421 100644\n--- a/drivers/net/qede/qede_ethdev.c\n+++ b/drivers/net/qede/qede_ethdev.c\n@@ -565,7 +565,7 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,\n \tif (add) {\n \t\tSLIST_FOREACH(tmp, &qdev->uc_list_head, list) {\n \t\t\tif ((memcmp(mac_addr, &tmp->mac,\n-\t\t\t\t    ETHER_ADDR_LEN) == 0) &&\n+\t\t\t\t    RTE_ETHER_ADDR_LEN) == 0) &&\n \t\t\t     ucast->vni == tmp->vni &&\n \t\t\t     ucast->vlan == tmp->vlan) {\n \t\t\t\tDP_INFO(edev, \"Unicast MAC is already added\"\n@@ -588,7 +588,7 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,\n \t} else {\n \t\tSLIST_FOREACH(tmp, &qdev->uc_list_head, list) {\n \t\t\tif ((memcmp(mac_addr, &tmp->mac,\n-\t\t\t\t    ETHER_ADDR_LEN) == 0) &&\n+\t\t\t\t    RTE_ETHER_ADDR_LEN) == 0) &&\n \t\t\t    ucast->vlan == tmp->vlan\t  &&\n \t\t\t    ucast->vni == tmp->vni)\n \t\t\tbreak;\n@@ -1216,7 +1216,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)\n \tif (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)\n \t\teth_dev->data->mtu =\n \t\t\teth_dev->data->dev_conf.rxmode.max_rx_pkt_len -\n-\t\t\tETHER_HDR_LEN - QEDE_ETH_OVERHEAD;\n+\t\t\tRTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;\n \n \tif (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)\n \t\teth_dev->data->scattered_rx = 1;\n@@ -2232,9 +2232,9 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)\n \tqede_dev_info_get(dev, &dev_info);\n \tmax_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;\n \tframe_size = max_rx_pkt_len;\n-\tif ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {\n+\tif (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) {\n \t\tDP_ERR(edev, \"MTU %u out of range, %u is maximum allowable\\n\",\n-\t\t       mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -\n+\t\t       mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN -\n \t\t       QEDE_ETH_OVERHEAD);\n \t\treturn -EINVAL;\n \t}\n@@ -2274,7 +2274,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)\n \t\t\tfp->rxq->rx_buf_size = rc;\n \t\t}\n \t}\n-\tif (max_rx_pkt_len > ETHER_MAX_LEN)\n+\tif (max_rx_pkt_len > RTE_ETHER_MAX_LEN)\n \t\tdev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;\n \telse\n \t\tdev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;\n@@ -2408,7 +2408,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)\n \tstruct qed_slowpath_params params;\n \tstatic bool do_once = true;\n \tuint8_t bulletin_change;\n-\tuint8_t vf_mac[ETHER_ADDR_LEN];\n+\tuint8_t vf_mac[RTE_ETHER_ADDR_LEN];\n \tuint8_t is_mac_forced;\n \tbool is_mac_exist;\n \t/* Fix up ecore debug level */\n@@ -2538,7 +2538,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)\n \n \t/* Allocate memory for storing MAC addr */\n \teth_dev->data->mac_addrs = rte_zmalloc(edev->name,\n-\t\t\t\t\t(ETHER_ADDR_LEN *\n+\t\t\t\t\t(RTE_ETHER_ADDR_LEN *\n \t\t\t\t\tadapter->dev_info.num_mac_filters),\n \t\t\t\t\tRTE_CACHE_LINE_SIZE);\n \n@@ -2596,7 +2596,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)\n \tSLIST_INIT(&adapter->vlan_list_head);\n \tSLIST_INIT(&adapter->uc_list_head);\n \tSLIST_INIT(&adapter->mc_list_head);\n-\tadapter->mtu = ETHER_MTU;\n+\tadapter->mtu = RTE_ETHER_MTU;\n \tadapter->vport_started = false;\n \n \t/* VF tunnel offloads is enabled by default in PF driver */\ndiff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c\nindex 792b9efb4..c2f39ceb9 100644\n--- a/drivers/net/qede/qede_filter.c\n+++ b/drivers/net/qede/qede_filter.c\n@@ -221,7 +221,7 @@ qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,\n \tcase RTE_ETH_FLOW_NONFRAG_IPV4_TCP:\n \tcase RTE_ETH_FLOW_NONFRAG_IPV4_UDP:\n \t\t/* fill the common ip header */\n-\t\tarfs->tuple.eth_proto = ETHER_TYPE_IPv4;\n+\t\tarfs->tuple.eth_proto = RTE_ETHER_TYPE_IPv4;\n \t\tarfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;\n \t\tarfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;\n \t\tarfs->tuple.ip_proto = next_proto[input->flow_type];\n@@ -237,7 +237,7 @@ qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,\n \t\tbreak;\n \tcase RTE_ETH_FLOW_NONFRAG_IPV6_TCP:\n \tcase RTE_ETH_FLOW_NONFRAG_IPV6_UDP:\n-\t\tarfs->tuple.eth_proto = ETHER_TYPE_IPv6;\n+\t\tarfs->tuple.eth_proto = RTE_ETHER_TYPE_IPv6;\n \t\tarfs->tuple.ip_proto = next_proto[input->flow_type];\n \t\trte_memcpy(arfs->tuple.dst_ipv6,\n \t\t\t   &input->flow.ipv6_flow.dst_ip,\n@@ -473,7 +473,7 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,\n \n \t*ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);\n \tswitch (arfs->tuple.eth_proto) {\n-\tcase ETHER_TYPE_IPv4:\n+\tcase RTE_ETHER_TYPE_IPv4:\n \t\tip = (struct ipv4_hdr *)raw_pkt;\n \t\tip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;\n \t\tip->total_length = sizeof(struct ipv4_hdr);\n@@ -506,7 +506,7 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,\n \t\t\tparams->tcp = true;\n \t\t}\n \t\tbreak;\n-\tcase ETHER_TYPE_IPv6:\n+\tcase RTE_ETHER_TYPE_IPv6:\n \t\tip6 = (struct ipv6_hdr *)raw_pkt;\n \t\tip6->proto = arfs->tuple.ip_proto;\n \t\tip6->vtc_flow =\n@@ -992,25 +992,25 @@ qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,\n \tbreak;\n \tcase ECORE_FILTER_MAC:\n \t\tmemcpy(ucast->mac, conf->outer_mac.addr_bytes,\n-\t\t       ETHER_ADDR_LEN);\n+\t\t       RTE_ETHER_ADDR_LEN);\n \tbreak;\n \tcase ECORE_FILTER_INNER_MAC:\n \t\tmemcpy(ucast->mac, conf->inner_mac.addr_bytes,\n-\t\t       ETHER_ADDR_LEN);\n+\t\t       RTE_ETHER_ADDR_LEN);\n \tbreak;\n \tcase ECORE_FILTER_MAC_VNI_PAIR:\n \t\tmemcpy(ucast->mac, conf->outer_mac.addr_bytes,\n-\t\t\tETHER_ADDR_LEN);\n+\t\t\tRTE_ETHER_ADDR_LEN);\n \t\tucast->vni = conf->tenant_id;\n \tbreak;\n \tcase ECORE_FILTER_INNER_MAC_VNI_PAIR:\n \t\tmemcpy(ucast->mac, conf->inner_mac.addr_bytes,\n-\t\t\tETHER_ADDR_LEN);\n+\t\t\tRTE_ETHER_ADDR_LEN);\n \t\tucast->vni = conf->tenant_id;\n \tbreak;\n \tcase ECORE_FILTER_INNER_PAIR:\n \t\tmemcpy(ucast->mac, conf->inner_mac.addr_bytes,\n-\t\t\tETHER_ADDR_LEN);\n+\t\t\tRTE_ETHER_ADDR_LEN);\n \t\tucast->vlan = conf->inner_vlan;\n \tbreak;\n \tdefault:\n@@ -1266,7 +1266,8 @@ qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev,\n \t\t\t\tspec = pattern->spec;\n \t\t\t\tflow->entry.tuple.src_ipv4 = spec->hdr.src_addr;\n \t\t\t\tflow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;\n-\t\t\t\tflow->entry.tuple.eth_proto = ETHER_TYPE_IPv4;\n+\t\t\t\tflow->entry.tuple.eth_proto =\n+\t\t\t\t\tRTE_ETHER_TYPE_IPv4;\n \t\t\t}\n \t\t\tbreak;\n \n@@ -1283,7 +1284,8 @@ qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev,\n \t\t\t\trte_memcpy(flow->entry.tuple.dst_ipv6,\n \t\t\t\t\t   spec->hdr.dst_addr,\n \t\t\t\t\t   IPV6_ADDR_LEN);\n-\t\t\t\tflow->entry.tuple.eth_proto = ETHER_TYPE_IPv6;\n+\t\t\t\tflow->entry.tuple.eth_proto =\n+\t\t\t\t\tRTE_ETHER_TYPE_IPv6;\n \t\t\t}\n \t\t\tbreak;\n \ndiff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h\nindex b840c743c..02feaba16 100644\n--- a/drivers/net/qede/qede_if.h\n+++ b/drivers/net/qede/qede_if.h\n@@ -17,7 +17,7 @@ enum ecore_int_mode;\n \n struct qed_dev_info {\n \tuint8_t num_hwfns;\n-\tuint8_t hw_mac[ETHER_ADDR_LEN];\n+\tuint8_t hw_mac[RTE_ETHER_ADDR_LEN];\n \tbool is_mf_default;\n \n \t/* FW version */\ndiff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c\nindex ec6190b11..8a108f99c 100644\n--- a/drivers/net/qede/qede_main.c\n+++ b/drivers/net/qede/qede_main.c\n@@ -369,7 +369,7 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)\n \tdev_info->dev_type = edev->type;\n \n \trte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,\n-\t       ETHER_ADDR_LEN);\n+\t       RTE_ETHER_ADDR_LEN);\n \n \tdev_info->fw_major = FW_MAJOR_VERSION;\n \tdev_info->fw_minor = FW_MINOR_VERSION;\n@@ -434,7 +434,7 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)\n \t\t\t\t\t max_vf_vlan_filters;\n \n \t\trte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,\n-\t\t\t   ETHER_ADDR_LEN);\n+\t\t\t   RTE_ETHER_ADDR_LEN);\n \t} else {\n \t\tecore_vf_get_num_rxqs(ECORE_LEADING_HWFN(edev),\n \t\t\t\t      &info->num_queues);\n@@ -455,7 +455,7 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)\n \tqed_fill_dev_info(edev, &info->common);\n \n \tif (IS_VF(edev))\n-\t\tmemset(&info->common.hw_mac, 0, ETHER_ADDR_LEN);\n+\t\tmemset(&info->common.hw_mac, 0, RTE_ETHER_ADDR_LEN);\n \n \treturn 0;\n }\ndiff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c\nindex 0e8a3675b..235f78ae3 100644\n--- a/drivers/net/qede/qede_rxtx.c\n+++ b/drivers/net/qede/qede_rxtx.c\n@@ -963,21 +963,21 @@ static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)\n \tethertype = rte_cpu_to_be_16(eth_hdr->ether_type);\n \n \t /* Note: Valid only if VLAN stripping is disabled */\n-\tif (ethertype == ETHER_TYPE_VLAN) {\n+\tif (ethertype == RTE_ETHER_TYPE_VLAN) {\n \t\tvlan_tagged = 1;\n \t\tvlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);\n \t\tlen += sizeof(struct rte_vlan_hdr);\n \t\tethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);\n \t}\n \n-\tif (ethertype == ETHER_TYPE_IPv4) {\n+\tif (ethertype == RTE_ETHER_TYPE_IPv4) {\n \t\tpacket_type |= RTE_PTYPE_L3_IPV4;\n \t\tipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, len);\n \t\tif (ipv4_hdr->next_proto_id == IPPROTO_TCP)\n \t\t\tpacket_type |= RTE_PTYPE_L4_TCP;\n \t\telse if (ipv4_hdr->next_proto_id == IPPROTO_UDP)\n \t\t\tpacket_type |= RTE_PTYPE_L4_UDP;\n-\t} else if (ethertype == ETHER_TYPE_IPv6) {\n+\t} else if (ethertype == RTE_ETHER_TYPE_IPv6) {\n \t\tpacket_type |= RTE_PTYPE_L3_IPV6;\n \t\tipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, len);\n \t\tif (ipv6_hdr->proto == IPPROTO_TCP)\ndiff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h\nindex 5b249cbb2..41a5f0f5c 100644\n--- a/drivers/net/qede/qede_rxtx.h\n+++ b/drivers/net/qede/qede_rxtx.h\n@@ -70,7 +70,7 @@\n #define QEDE_ETH_OVERHEAD\t(((2 * QEDE_VLAN_TAG_SIZE)) \\\n \t\t\t\t + (QEDE_LLC_SNAP_HDR_LEN) + 2)\n \n-#define QEDE_MAX_ETHER_HDR_LEN\t(ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)\n+#define QEDE_MAX_ETHER_HDR_LEN\t(RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)\n \n #define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4\t\t\t|\\\n \t\t\t\t ETH_RSS_NONFRAG_IPV4_TCP\t|\\\ndiff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c\nindex dd9897d70..661432e11 100644\n--- a/drivers/net/sfc/sfc_ethdev.c\n+++ b/drivers/net/sfc/sfc_ethdev.c\n@@ -93,7 +93,7 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \n \tsfc_log_init(sa, \"entry\");\n \n-\tdev_info->min_mtu = ETHER_MIN_MTU;\n+\tdev_info->min_mtu = RTE_ETHER_MIN_MTU;\n \tdev_info->max_mtu = EFX_MAC_SDU_MAX;\n \n \tdev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;\n@@ -941,7 +941,7 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)\n \t * The driver does not use it, but other PMDs update jumbo frame\n \t * flag and max_rx_pkt_len when MTU is set.\n \t */\n-\tif (mtu > ETHER_MAX_LEN) {\n+\tif (mtu > RTE_ETHER_MAX_LEN) {\n \t\tstruct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;\n \t\trxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;\n \t}\n@@ -2095,7 +2095,7 @@ sfc_eth_dev_init(struct rte_eth_dev *dev)\n \n \tsfc_log_init(sa, \"entry\");\n \n-\tdev->data->mac_addrs = rte_zmalloc(\"sfc\", ETHER_ADDR_LEN, 0);\n+\tdev->data->mac_addrs = rte_zmalloc(\"sfc\", RTE_ETHER_ADDR_LEN, 0);\n \tif (dev->data->mac_addrs == NULL) {\n \t\trc = ENOMEM;\n \t\tgoto fail_mac_addrs;\ndiff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c\nindex e1d981b4e..e4a9ba0ff 100644\n--- a/drivers/net/sfc/sfc_flow.c\n+++ b/drivers/net/sfc/sfc_flow.c\n@@ -951,7 +951,7 @@ sfc_flow_parse_geneve(const struct rte_flow_item *item,\n \t\treturn 0;\n \n \tif (mask->protocol == supp_mask.protocol) {\n-\t\tif (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {\n+\t\tif (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {\n \t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, item,\n \t\t\t\t\"GENEVE encap. protocol must be Ethernet \"\ndiff --git a/drivers/net/softnic/parser.c b/drivers/net/softnic/parser.c\nindex 7ea6eb4fa..dc15ec8aa 100644\n--- a/drivers/net/softnic/parser.c\n+++ b/drivers/net/softnic/parser.c\n@@ -533,7 +533,7 @@ my_ether_aton(const char *a)\n {\n \tint i;\n \tchar *end;\n-\tunsigned long o[ETHER_ADDR_LEN];\n+\tunsigned long o[RTE_ETHER_ADDR_LEN];\n \tstatic struct rte_ether_addr ether_addr;\n \n \ti = 0;\n@@ -550,14 +550,14 @@ my_ether_aton(const char *a)\n \t\treturn NULL;\n \n \t/* Support the format XX:XX:XX:XX:XX:XX */\n-\tif (i == ETHER_ADDR_LEN) {\n+\tif (i == RTE_ETHER_ADDR_LEN) {\n \t\twhile (i-- != 0) {\n \t\t\tif (o[i] > UINT8_MAX)\n \t\t\t\treturn NULL;\n \t\t\tether_addr.addr_bytes[i] = (uint8_t)o[i];\n \t\t}\n \t/* Support the format XXXX:XXXX:XXXX */\n-\t} else if (i == ETHER_ADDR_LEN / 2) {\n+\t} else if (i == RTE_ETHER_ADDR_LEN / 2) {\n \t\twhile (i-- != 0) {\n \t\t\tif (o[i] > UINT16_MAX)\n \t\t\t\treturn NULL;\ndiff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c\nindex 949b72d0c..4a42bbeb1 100644\n--- a/drivers/net/tap/rte_eth_tap.c\n+++ b/drivers/net/tap/rte_eth_tap.c\n@@ -565,9 +565,9 @@ tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,\n \t\t\tchar *buff_data = rte_pktmbuf_mtod(seg, void *);\n \t\t\tproto = (*buff_data & 0xf0);\n \t\t\tpi.proto = (proto == 0x40) ?\n-\t\t\t\trte_cpu_to_be_16(ETHER_TYPE_IPv4) :\n+\t\t\t\trte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) :\n \t\t\t\t((proto == 0x60) ?\n-\t\t\t\t\trte_cpu_to_be_16(ETHER_TYPE_IPv6) :\n+\t\t\t\t\trte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) :\n \t\t\t\t\t0x00);\n \t\t}\n \n@@ -657,7 +657,7 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\treturn 0;\n \n \tstruct rte_mbuf *gso_mbufs[MAX_GSO_MBUFS];\n-\tmax_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);\n+\tmax_size = *txq->mtu + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4);\n \tfor (i = 0; i < nb_pkts; i++) {\n \t\tstruct rte_mbuf *mbuf_in = bufs[num_tx];\n \t\tstruct rte_mbuf **mbuf;\n@@ -677,7 +677,7 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\t\t/* TCP segmentation implies TCP checksum offload */\n \t\t\tmbuf_in->ol_flags |= PKT_TX_TCP_CKSUM;\n \n-\t\t\t/* gso size is calculated without ETHER_CRC_LEN */\n+\t\t\t/* gso size is calculated without RTE_ETHER_CRC_LEN */\n \t\t\thdrs_len = mbuf_in->l2_len + mbuf_in->l3_len +\n \t\t\t\t\tmbuf_in->l4_len;\n \t\t\ttso_segsz = mbuf_in->tso_segsz + hdrs_len;\n@@ -924,7 +924,7 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \n \tdev_info->if_index = internals->if_index;\n \tdev_info->max_mac_addrs = 1;\n-\tdev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;\n+\tdev_info->max_rx_pktlen = (uint32_t)RTE_ETHER_MAX_VLAN_FRAME_LEN;\n \tdev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;\n \tdev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;\n \tdev_info->min_rx_bufsize = 0;\n@@ -1185,11 +1185,11 @@ tap_mac_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)\n \t\t\tmac_addr))\n \t\tmode = LOCAL_AND_REMOTE;\n \tifr.ifr_hwaddr.sa_family = AF_LOCAL;\n-\trte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);\n+\trte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, RTE_ETHER_ADDR_LEN);\n \tret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode);\n \tif (ret < 0)\n \t\treturn ret;\n-\trte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);\n+\trte_memcpy(&pmd->eth_addr, mac_addr, RTE_ETHER_ADDR_LEN);\n \tif (pmd->remote_if_index && !pmd->flow_isolate) {\n \t\t/* Replace MAC redirection rule after a MAC change */\n \t\tret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC);\n@@ -1782,7 +1782,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,\n \t\tmemset(&ifr, 0, sizeof(struct ifreq));\n \t\tifr.ifr_hwaddr.sa_family = AF_LOCAL;\n \t\trte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,\n-\t\t\t\tETHER_ADDR_LEN);\n+\t\t\t\tRTE_ETHER_ADDR_LEN);\n \t\tif (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)\n \t\t\tgoto error_exit;\n \t}\n@@ -1837,7 +1837,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,\n \t\t\tgoto error_remote;\n \t\t}\n \t\trte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,\n-\t\t\t   ETHER_ADDR_LEN);\n+\t\t\t   RTE_ETHER_ADDR_LEN);\n \t\t/* The desired MAC is already in ifreq after SIOCGIFHWADDR. */\n \t\tif (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {\n \t\t\tTAP_LOG(ERR, \"%s: failed to get %s MAC address.\",\n@@ -1996,8 +1996,10 @@ set_mac_type(const char *key __rte_unused,\n \t\tstatic int iface_idx;\n \n \t\t/* fixed mac = 00:64:74:61:70:<iface_idx> */\n-\t\tmemcpy((char *)user_mac->addr_bytes, \"\\0dtap\", ETHER_ADDR_LEN);\n-\t\tuser_mac->addr_bytes[ETHER_ADDR_LEN - 1] = iface_idx++ + '0';\n+\t\tmemcpy((char *)user_mac->addr_bytes, \"\\0dtap\",\n+\t\t\tRTE_ETHER_ADDR_LEN);\n+\t\tuser_mac->addr_bytes[RTE_ETHER_ADDR_LEN - 1] =\n+\t\t\tiface_idx++ + '0';\n \t\tgoto success;\n \t}\n \ndiff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c\nindex 20963090c..9d90361d9 100644\n--- a/drivers/net/tap/tap_flow.c\n+++ b/drivers/net/tap/tap_flow.c\n@@ -538,17 +538,19 @@ tap_flow_create_eth(const struct rte_flow_item *item, void *data)\n \t\treturn 0;\n \tmsg = &flow->msg;\n \tif (!rte_is_zero_ether_addr(&mask->dst)) {\n-\t\ttap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,\n+\t\ttap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST,\n+\t\t\tRTE_ETHER_ADDR_LEN,\n \t\t\t   &spec->dst.addr_bytes);\n \t\ttap_nlattr_add(&msg->nh,\n-\t\t\t   TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN,\n+\t\t\t   TCA_FLOWER_KEY_ETH_DST_MASK, RTE_ETHER_ADDR_LEN,\n \t\t\t   &mask->dst.addr_bytes);\n \t}\n \tif (!rte_is_zero_ether_addr(&mask->src)) {\n-\t\ttap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,\n-\t\t\t   &spec->src.addr_bytes);\n+\t\ttap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC,\n+\t\t\tRTE_ETHER_ADDR_LEN,\n+\t\t\t&spec->src.addr_bytes);\n \t\ttap_nlattr_add(&msg->nh,\n-\t\t\t   TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN,\n+\t\t\t   TCA_FLOWER_KEY_ETH_SRC_MASK, RTE_ETHER_ADDR_LEN,\n \t\t\t   &mask->src.addr_bytes);\n \t}\n \treturn 0;\ndiff --git a/drivers/net/thunderx/base/nicvf_plat.h b/drivers/net/thunderx/base/nicvf_plat.h\nindex 3f0519593..dd054627e 100644\n--- a/drivers/net/thunderx/base/nicvf_plat.h\n+++ b/drivers/net/thunderx/base/nicvf_plat.h\n@@ -44,10 +44,10 @@\n \n /* Constants */\n #include <rte_ether.h>\n-#define NICVF_MAC_ADDR_SIZE ETHER_ADDR_LEN\n+#define NICVF_MAC_ADDR_SIZE RTE_ETHER_ADDR_LEN\n \n /* Ethernet */\n-#define ether_addr_copy(x, y) memcpy(y, x, ETHER_ADDR_LEN)\n+#define ether_addr_copy(x, y) memcpy(y, x, RTE_ETHER_ADDR_LEN)\n \n #include <rte_io.h>\n #define nicvf_addr_write(addr, val) rte_write64_relaxed((val), (void *)(addr))\ndiff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c\nindex 482968b7a..eb2c11da5 100644\n--- a/drivers/net/thunderx/nicvf_ethdev.c\n+++ b/drivers/net/thunderx/nicvf_ethdev.c\n@@ -191,7 +191,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)\n \t\t(frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))\n \t\treturn -EINVAL;\n \n-\tif (frame_size > ETHER_MAX_LEN)\n+\tif (frame_size > RTE_ETHER_MAX_LEN)\n \t\trxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;\n \telse\n \t\trxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;\n@@ -200,7 +200,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)\n \t\treturn -EINVAL;\n \n \t/* Update max_rx_pkt_len */\n-\trxmode->max_rx_pkt_len = mtu + ETHER_HDR_LEN;\n+\trxmode->max_rx_pkt_len = mtu + RTE_ETHER_HDR_LEN;\n \tnic->mtu = mtu;\n \n \tfor (i = 0; i < nic->sqs_count; i++)\n@@ -1408,8 +1408,8 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \tif (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)\n \t\tdev_info->speed_capa |= ETH_LINK_SPEED_40G;\n \n-\tdev_info->min_rx_bufsize = ETHER_MIN_MTU;\n-\tdev_info->max_rx_pktlen = NIC_HW_MAX_MTU + ETHER_HDR_LEN;\n+\tdev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;\n+\tdev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN;\n \tdev_info->max_rx_queues =\n \t\t\t(uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);\n \tdev_info->max_tx_queues =\n@@ -1736,7 +1736,7 @@ nicvf_dev_start(struct rte_eth_dev *dev)\n \t/* Setup MTU based on max_rx_pkt_len or default */\n \tmtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?\n \t\tdev->data->dev_conf.rxmode.max_rx_pkt_len\n-\t\t\t-  ETHER_HDR_LEN : ETHER_MTU;\n+\t\t\t-  RTE_ETHER_HDR_LEN : RTE_ETHER_MTU;\n \n \tif (nicvf_dev_set_mtu(dev, mtu)) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to set default mtu size\");\n@@ -2173,7 +2173,8 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)\n \t\treturn ENOTSUP;\n \t}\n \n-\teth_dev->data->mac_addrs = rte_zmalloc(\"mac_addr\", ETHER_ADDR_LEN, 0);\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"mac_addr\",\n+\t\t\t\t\tRTE_ETHER_ADDR_LEN, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for mac addr\");\n \t\tret = -ENOMEM;\ndiff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h\nindex dd52f38e5..5d1379803 100644\n--- a/drivers/net/thunderx/nicvf_struct.h\n+++ b/drivers/net/thunderx/nicvf_struct.h\n@@ -105,7 +105,7 @@ struct nicvf {\n \tuint16_t mtu;\n \tint skip_bytes;\n \tbool vlan_filter_en;\n-\tuint8_t mac_addr[ETHER_ADDR_LEN];\n+\tuint8_t mac_addr[RTE_ETHER_ADDR_LEN];\n \t/* secondary queue set support */\n \tuint8_t sqs_id;\n \tuint8_t sqs_count;\ndiff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c\nindex 2762dcf97..c4570bbf8 100644\n--- a/drivers/net/virtio/virtio_ethdev.c\n+++ b/drivers/net/virtio/virtio_ethdev.c\n@@ -832,16 +832,16 @@ static int\n virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n {\n \tstruct virtio_hw *hw = dev->data->dev_private;\n-\tuint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +\n+\tuint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +\n \t\t\t\t hw->vtnet_hdr_size;\n \tuint32_t frame_size = mtu + ether_hdr_len;\n \tuint32_t max_frame_size = hw->max_mtu + ether_hdr_len;\n \n \tmax_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);\n \n-\tif (mtu < ETHER_MIN_MTU || frame_size > max_frame_size) {\n+\tif (mtu < RTE_ETHER_MIN_MTU || frame_size > max_frame_size) {\n \t\tPMD_INIT_LOG(ERR, \"MTU should be between %d and %d\",\n-\t\t\tETHER_MIN_MTU, max_frame_size - ether_hdr_len);\n+\t\t\tRTE_ETHER_MIN_MTU, max_frame_size - ether_hdr_len);\n \t\treturn -EINVAL;\n \t}\n \treturn 0;\n@@ -1097,7 +1097,7 @@ virtio_set_hwaddr(struct virtio_hw *hw)\n {\n \tvtpci_write_dev_config(hw,\n \t\t\toffsetof(struct virtio_net_config, mac),\n-\t\t\t&hw->mac_addr, ETHER_ADDR_LEN);\n+\t\t\t&hw->mac_addr, RTE_ETHER_ADDR_LEN);\n }\n \n static void\n@@ -1106,7 +1106,7 @@ virtio_get_hwaddr(struct virtio_hw *hw)\n \tif (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {\n \t\tvtpci_read_dev_config(hw,\n \t\t\toffsetof(struct virtio_net_config, mac),\n-\t\t\t&hw->mac_addr, ETHER_ADDR_LEN);\n+\t\t\t&hw->mac_addr, RTE_ETHER_ADDR_LEN);\n \t} else {\n \t\trte_eth_random_addr(&hw->mac_addr[0]);\n \t\tvirtio_set_hwaddr(hw);\n@@ -1129,10 +1129,10 @@ virtio_mac_table_set(struct virtio_hw *hw,\n \tctrl.hdr.class = VIRTIO_NET_CTRL_MAC;\n \tctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;\n \n-\tlen[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries);\n+\tlen[0] = uc->entries * RTE_ETHER_ADDR_LEN + sizeof(uc->entries);\n \tmemcpy(ctrl.data, uc, len[0]);\n \n-\tlen[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries);\n+\tlen[1] = mc->entries * RTE_ETHER_ADDR_LEN + sizeof(mc->entries);\n \tmemcpy(ctrl.data + len[0], mc, len[1]);\n \n \terr = virtio_send_command(hw->cvq, &ctrl, len, 2);\n@@ -1155,9 +1155,11 @@ virtio_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,\n \t\treturn -EINVAL;\n \t}\n \n-\tuc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));\n+\tuc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +\n+\t\tsizeof(uc->entries));\n \tuc->entries = 0;\n-\tmc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));\n+\tmc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +\n+\t\tsizeof(mc->entries));\n \tmc->entries = 0;\n \n \tfor (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {\n@@ -1166,7 +1168,7 @@ virtio_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,\n \t\tstruct virtio_net_ctrl_mac *tbl\n \t\t\t= rte_is_multicast_ether_addr(addr) ? mc : uc;\n \n-\t\tmemcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);\n+\t\tmemcpy(&tbl->macs[tbl->entries++], addr, RTE_ETHER_ADDR_LEN);\n \t}\n \n \treturn virtio_mac_table_set(hw, uc, mc);\n@@ -1185,9 +1187,11 @@ virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)\n \t\treturn;\n \t}\n \n-\tuc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));\n+\tuc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +\n+\t\tsizeof(uc->entries));\n \tuc->entries = 0;\n-\tmc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));\n+\tmc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +\n+\t\tsizeof(mc->entries));\n \tmc->entries = 0;\n \n \tfor (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {\n@@ -1197,7 +1201,8 @@ virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)\n \t\t\tcontinue;\n \n \t\ttbl = rte_is_multicast_ether_addr(addrs + i) ? mc : uc;\n-\t\tmemcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN);\n+\t\tmemcpy(&tbl->macs[tbl->entries++], addrs + i,\n+\t\t\tRTE_ETHER_ADDR_LEN);\n \t}\n \n \tvirtio_mac_table_set(hw, uc, mc);\n@@ -1208,17 +1213,17 @@ virtio_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)\n {\n \tstruct virtio_hw *hw = dev->data->dev_private;\n \n-\tmemcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN);\n+\tmemcpy(hw->mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);\n \n \t/* Use atomic update if available */\n \tif (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {\n \t\tstruct virtio_pmd_ctrl ctrl;\n-\t\tint len = ETHER_ADDR_LEN;\n+\t\tint len = RTE_ETHER_ADDR_LEN;\n \n \t\tctrl.hdr.class = VIRTIO_NET_CTRL_MAC;\n \t\tctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;\n \n-\t\tmemcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);\n+\t\tmemcpy(ctrl.data, mac_addr, RTE_ETHER_ADDR_LEN);\n \t\treturn virtio_send_command(hw->cvq, &ctrl, &len, 1);\n \t}\n \n@@ -1297,7 +1302,7 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)\n \t\t\toffsetof(struct virtio_net_config, mtu),\n \t\t\t&config.mtu, sizeof(config.mtu));\n \n-\t\tif (config.mtu < ETHER_MIN_MTU)\n+\t\tif (config.mtu < RTE_ETHER_MIN_MTU)\n \t\t\treq_features &= ~(1ULL << VIRTIO_NET_F_MTU);\n \t}\n \n@@ -1710,7 +1715,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)\n \t\t\t * time, but check again in case it has changed since\n \t\t\t * then, which should not happen.\n \t\t\t */\n-\t\t\tif (config->mtu < ETHER_MIN_MTU) {\n+\t\t\tif (config->mtu < RTE_ETHER_MIN_MTU) {\n \t\t\t\tPMD_INIT_LOG(ERR, \"invalid max MTU value (%u)\",\n \t\t\t\t\t\tconfig->mtu);\n \t\t\t\treturn -1;\n@@ -1721,7 +1726,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)\n \t\t\teth_dev->data->mtu = config->mtu;\n \n \t\t} else {\n-\t\t\thw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -\n+\t\t\thw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -\n \t\t\t\tVLAN_TAG_LEN - hw->vtnet_hdr_size;\n \t\t}\n \n@@ -1736,7 +1741,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)\n \t} else {\n \t\tPMD_INIT_LOG(DEBUG, \"config->max_virtqueue_pairs=1\");\n \t\thw->max_queue_pairs = 1;\n-\t\thw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -\n+\t\thw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -\n \t\t\tVLAN_TAG_LEN - hw->vtnet_hdr_size;\n \t}\n \n@@ -1835,11 +1840,12 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)\n \t}\n \n \t/* Allocate memory for storing MAC addresses */\n-\teth_dev->data->mac_addrs = rte_zmalloc(\"virtio\", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0);\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"virtio\",\n+\t\t\t\tVIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tPMD_INIT_LOG(ERR,\n \t\t\t\"Failed to allocate %d bytes needed to store MAC addresses\",\n-\t\t\tVIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN);\n+\t\t\tVIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN);\n \t\treturn -ENOMEM;\n \t}\n \n@@ -1990,7 +1996,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)\n \tconst struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;\n \tconst struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;\n \tstruct virtio_hw *hw = dev->data->dev_private;\n-\tuint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +\n+\tuint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +\n \t\thw->vtnet_hdr_size;\n \tuint64_t rx_offloads = rxmode->offloads;\n \tuint64_t tx_offloads = txmode->offloads;\ndiff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h\nindex 38a0261da..a38cb45ad 100644\n--- a/drivers/net/virtio/virtio_pci.h\n+++ b/drivers/net/virtio/virtio_pci.h\n@@ -251,7 +251,7 @@ struct virtio_hw {\n \tbool        has_tx_offload;\n \tbool        has_rx_offload;\n \tuint16_t    port_id;\n-\tuint8_t     mac_addr[ETHER_ADDR_LEN];\n+\tuint8_t     mac_addr[RTE_ETHER_ADDR_LEN];\n \tuint32_t    notify_off_multiplier;\n \tuint8_t     *isr;\n \tuint16_t    *notify_base;\n@@ -294,7 +294,7 @@ extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];\n  */\n struct virtio_net_config {\n \t/* The config defining mac address (if VIRTIO_NET_F_MAC) */\n-\tuint8_t    mac[ETHER_ADDR_LEN];\n+\tuint8_t    mac[RTE_ETHER_ADDR_LEN];\n \t/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */\n \tuint16_t   status;\n \tuint16_t   max_virtqueue_pairs;\ndiff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c\nindex 3de467768..3805e217a 100644\n--- a/drivers/net/virtio/virtio_rxtx.c\n+++ b/drivers/net/virtio/virtio_rxtx.c\n@@ -1244,7 +1244,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \n \t\tPMD_RX_LOG(DEBUG, \"packet len:%d\", len[i]);\n \n-\t\tif (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {\n+\t\tif (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {\n \t\t\tPMD_RX_LOG(ERR, \"Packet drop\");\n \t\t\tnb_enqueued++;\n \t\t\tvirtio_discard_rxbuf(vq, rxm);\n@@ -1347,7 +1347,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,\n \n \t\tPMD_RX_LOG(DEBUG, \"packet len:%d\", len[i]);\n \n-\t\tif (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {\n+\t\tif (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {\n \t\t\tPMD_RX_LOG(ERR, \"Packet drop\");\n \t\t\tnb_enqueued++;\n \t\t\tvirtio_discard_rxbuf(vq, rxm);\n@@ -1461,7 +1461,7 @@ virtio_recv_pkts_inorder(void *rx_queue,\n \n \t\trxm = rcv_pkts[i];\n \n-\t\tif (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {\n+\t\tif (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {\n \t\t\tPMD_RX_LOG(ERR, \"Packet drop\");\n \t\t\tnb_enqueued++;\n \t\t\tvirtio_discard_rxbuf_inorder(vq, rxm);\n@@ -1653,7 +1653,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \n \t\trxm = rcv_pkts[i];\n \n-\t\tif (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {\n+\t\tif (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {\n \t\t\tPMD_RX_LOG(ERR, \"Packet drop\");\n \t\t\tnb_enqueued++;\n \t\t\tvirtio_discard_rxbuf(vq, rxm);\n@@ -1832,7 +1832,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,\n \n \t\trxm = rcv_pkts[i];\n \n-\t\tif (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {\n+\t\tif (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {\n \t\t\tPMD_RX_LOG(ERR, \"Packet drop\");\n \t\t\tnb_enqueued++;\n \t\t\tvirtio_discard_rxbuf(vq, rxm);\ndiff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c\nindex fbd9e979d..76bf75423 100644\n--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c\n+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c\n@@ -135,7 +135,7 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,\n \n \tmemset(&ifr, 0, sizeof(ifr));\n \tifr.ifr_hwaddr.sa_family = ARPHRD_ETHER;\n-\tmemcpy(ifr.ifr_hwaddr.sa_data, mac, ETHER_ADDR_LEN);\n+\tmemcpy(ifr.ifr_hwaddr.sa_data, mac, RTE_ETHER_ADDR_LEN);\n \tif (ioctl(tapfd, SIOCSIFHWADDR, (void *)&ifr) == -1) {\n \t\tPMD_DRV_LOG(ERR, \"SIOCSIFHWADDR failed: %s\", strerror(errno));\n \t\tgoto error;\ndiff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c\nindex 8e420bcbc..e743695e4 100644\n--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c\n+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c\n@@ -226,15 +226,15 @@ static inline void\n parse_mac(struct virtio_user_dev *dev, const char *mac)\n {\n \tint i, r;\n-\tuint32_t tmp[ETHER_ADDR_LEN];\n+\tuint32_t tmp[RTE_ETHER_ADDR_LEN];\n \n \tif (!mac)\n \t\treturn;\n \n \tr = sscanf(mac, \"%x:%x:%x:%x:%x:%x\", &tmp[0],\n \t\t\t&tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5]);\n-\tif (r == ETHER_ADDR_LEN) {\n-\t\tfor (i = 0; i < ETHER_ADDR_LEN; ++i)\n+\tif (r == RTE_ETHER_ADDR_LEN) {\n+\t\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)\n \t\t\tdev->mac_addr[i] = (uint8_t)tmp[i];\n \t\tdev->mac_specified = 1;\n \t} else {\ndiff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h\nindex 829ad4140..db7dc607a 100644\n--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h\n+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h\n@@ -43,7 +43,7 @@ struct virtio_user_dev {\n \tuint64_t\tunsupported_features; /* unsupported features mask */\n \tuint8_t\t\tstatus;\n \tuint16_t\tport_id;\n-\tuint8_t\t\tmac_addr[ETHER_ADDR_LEN];\n+\tuint8_t\t\tmac_addr[RTE_ETHER_ADDR_LEN];\n \tchar\t\tpath[PATH_MAX];\n \tunion {\n \t\tstruct vring\t\tvrings[VIRTIO_MAX_VIRTQUEUES];\ndiff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c\nindex 129c2b9ef..893f48a5d 100644\n--- a/drivers/net/virtio/virtio_user_ethdev.c\n+++ b/drivers/net/virtio/virtio_user_ethdev.c\n@@ -118,8 +118,8 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,\n \tstruct virtio_user_dev *dev = virtio_user_get_dev(hw);\n \n \tif (offset == offsetof(struct virtio_net_config, mac) &&\n-\t    length == ETHER_ADDR_LEN) {\n-\t\tfor (i = 0; i < ETHER_ADDR_LEN; ++i)\n+\t    length == RTE_ETHER_ADDR_LEN) {\n+\t\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)\n \t\t\t((uint8_t *)dst)[i] = dev->mac_addr[i];\n \t\treturn;\n \t}\n@@ -179,8 +179,8 @@ virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,\n \tstruct virtio_user_dev *dev = virtio_user_get_dev(hw);\n \n \tif ((offset == offsetof(struct virtio_net_config, mac)) &&\n-\t    (length == ETHER_ADDR_LEN))\n-\t\tfor (i = 0; i < ETHER_ADDR_LEN; ++i)\n+\t    (length == RTE_ETHER_ADDR_LEN))\n+\t\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)\n \t\t\tdev->mac_addr[i] = ((const uint8_t *)src)[i];\n \telse\n \t\tPMD_DRV_LOG(ERR, \"not supported offset=%zu, len=%d\",\ndiff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h\nindex e9dedc5da..c6dd4a347 100644\n--- a/drivers/net/virtio/virtqueue.h\n+++ b/drivers/net/virtio/virtqueue.h\n@@ -134,7 +134,7 @@ enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };\n  */\n struct virtio_net_ctrl_mac {\n \tuint32_t entries;\n-\tuint8_t macs[][ETHER_ADDR_LEN];\n+\tuint8_t macs[][RTE_ETHER_ADDR_LEN];\n } __attribute__((__packed__));\n \n #define VIRTIO_NET_CTRL_MAC    1\ndiff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c\nindex f718b8b65..79ec42e37 100644\n--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c\n+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c\n@@ -307,12 +307,12 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)\n \tmemcpy(hw->perm_addr + 4, &mac_hi, 2);\n \n \t/* Allocate memory for storing MAC addresses */\n-\teth_dev->data->mac_addrs = rte_zmalloc(\"vmxnet3\", ETHER_ADDR_LEN *\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"vmxnet3\", RTE_ETHER_ADDR_LEN *\n \t\t\t\t\t       VMXNET3_MAX_MAC_ADDRS, 0);\n \tif (eth_dev->data->mac_addrs == NULL) {\n \t\tPMD_INIT_LOG(ERR,\n \t\t\t     \"Failed to allocate %d bytes needed to store MAC addresses\",\n-\t\t\t     ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);\n+\t\t\t     RTE_ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);\n \t\treturn -ENOMEM;\n \t}\n \t/* Copy the permanent MAC address */\ndiff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h\nindex 319d73926..8c2b6f877 100644\n--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h\n+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h\n@@ -71,7 +71,7 @@ struct vmxnet3_hw {\n \tuint16_t subsystem_vendor_id;\n \tbool adapter_stopped;\n \n-\tuint8_t perm_addr[ETHER_ADDR_LEN];\n+\tuint8_t perm_addr[RTE_ETHER_ADDR_LEN];\n \tuint8_t num_tx_queues;\n \tuint8_t num_rx_queues;\n \tuint8_t bufs_per_pkt;\ndiff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c\nindex 2675a16f0..9acf666dc 100644\n--- a/examples/bbdev_app/main.c\n+++ b/examples/bbdev_app/main.c\n@@ -62,7 +62,7 @@\n static const struct rte_eth_conf port_conf = {\n \t.rxmode = {\n \t\t.mq_mode = ETH_MQ_RX_NONE,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t},\n \t.txmode = {\ndiff --git a/examples/bond/main.c b/examples/bond/main.c\nindex f053e5bd8..2e81a9e7e 100644\n--- a/examples/bond/main.c\n+++ b/examples/bond/main.c\n@@ -120,7 +120,7 @@ static struct rte_mempool *mbuf_pool;\n static struct rte_eth_conf port_conf = {\n \t.rxmode = {\n \t\t.mq_mode = ETH_MQ_RX_NONE,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t},\n \t.rx_adv_conf = {\n@@ -304,14 +304,14 @@ get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)\n {\n \tsize_t vlan_offset = 0;\n \n-\tif (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {\n+\tif (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {\n \t\tstruct rte_vlan_hdr *vlan_hdr =\n \t\t\t(struct rte_vlan_hdr *)(eth_hdr + 1);\n \n \t\tvlan_offset = sizeof(struct rte_vlan_hdr);\n \t\t*proto = vlan_hdr->eth_proto;\n \n-\t\tif (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {\n+\t\tif (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {\n \t\t\tvlan_hdr = vlan_hdr + 1;\n \n \t\t\t*proto = vlan_hdr->eth_proto;\n@@ -374,12 +374,12 @@ static int lcore_main(__attribute__((unused)) void *arg1)\n \t\t\teth_hdr = rte_pktmbuf_mtod(pkts[i],\n \t\t\t\t\t\tstruct rte_ether_hdr *);\n \t\t\tether_type = eth_hdr->ether_type;\n-\t\t\tif (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))\n+\t\t\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))\n \t\t\t\tprintf(\"VLAN taged frame, offset:\");\n \t\t\toffset = get_vlan_offset(eth_hdr, &ether_type);\n \t\t\tif (offset > 0)\n \t\t\t\tprintf(\"%d\\n\", offset);\n-\t\t\tif (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {\n+\t\t\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {\n \t\t\t\tif (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1)     {\n \t\t\t\t\tglobal_flag_stru_p->port_packets[1]++;\n \t\t\t\t\trte_spinlock_unlock(&global_flag_stru_p->lock);\n@@ -404,7 +404,7 @@ static int lcore_main(__attribute__((unused)) void *arg1)\n \t\t\t\t\t\trte_eth_tx_burst(BOND_PORT, 0, NULL, 0);\n \t\t\t\t\t}\n \t\t\t\t}\n-\t\t\t} else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {\n+\t\t\t} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {\n \t\t\t\tif (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1)     {\n \t\t\t\t\tglobal_flag_stru_p->port_packets[2]++;\n \t\t\t\t\trte_spinlock_unlock(&global_flag_stru_p->lock);\n@@ -479,20 +479,20 @@ static void cmd_obj_send_parsed(void *parsed_result,\n \n \teth_hdr = rte_pktmbuf_mtod(created_pkt, struct rte_ether_hdr *);\n \trte_eth_macaddr_get(BOND_PORT, &eth_hdr->s_addr);\n-\tmemset(&eth_hdr->d_addr, 0xFF, ETHER_ADDR_LEN);\n-\teth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP);\n+\tmemset(&eth_hdr->d_addr, 0xFF, RTE_ETHER_ADDR_LEN);\n+\teth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP);\n \n \tarp_hdr = (struct rte_arp_hdr *)(\n \t\t(char *)eth_hdr + sizeof(struct rte_ether_hdr));\n \tarp_hdr->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER);\n-\tarp_hdr->arp_protocol = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n-\tarp_hdr->arp_hlen = ETHER_ADDR_LEN;\n+\tarp_hdr->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n+\tarp_hdr->arp_hlen = RTE_ETHER_ADDR_LEN;\n \tarp_hdr->arp_plen = sizeof(uint32_t);\n \tarp_hdr->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REQUEST);\n \n \trte_eth_macaddr_get(BOND_PORT, &arp_hdr->arp_data.arp_sha);\n \tarp_hdr->arp_data.arp_sip = bond_ip;\n-\tmemset(&arp_hdr->arp_data.arp_tha, 0, ETHER_ADDR_LEN);\n+\tmemset(&arp_hdr->arp_data.arp_tha, 0, RTE_ETHER_ADDR_LEN);\n \tarp_hdr->arp_data.arp_tip =\n \t\t\t  ((unsigned char *)&res->ip.addr.ipv4)[0]        |\n \t\t\t (((unsigned char *)&res->ip.addr.ipv4)[1] << 8)  |\ndiff --git a/examples/distributor/main.c b/examples/distributor/main.c\nindex e4c8c3c18..81d7ca61d 100644\n--- a/examples/distributor/main.c\n+++ b/examples/distributor/main.c\n@@ -81,7 +81,7 @@ struct app_stats prev_app_stats;\n static const struct rte_eth_conf port_conf_default = {\n \t.rxmode = {\n \t\t.mq_mode = ETH_MQ_RX_RSS,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t},\n \t.txmode = {\n \t\t.mq_mode = ETH_MQ_TX_NONE,\ndiff --git a/examples/ethtool/ethtool-app/ethapp.c b/examples/ethtool/ethtool-app/ethapp.c\nindex ce7f715f8..b6b967118 100644\n--- a/examples/ethtool/ethtool-app/ethapp.c\n+++ b/examples/ethtool/ethtool-app/ethapp.c\n@@ -530,8 +530,8 @@ pcmd_mtu_callback(void *ptr_params,\n \tnew_mtu = atoi(params->opt);\n \tnew_mtu = strtoul(params->opt, &ptr_parse_end, 10);\n \tif (*ptr_parse_end != '\\0' ||\n-\t\t\tnew_mtu < ETHER_MIN_MTU ||\n-\t\t\tnew_mtu > ETHER_MAX_JUMBO_FRAME_LEN) {\n+\t\t\tnew_mtu < RTE_ETHER_MIN_MTU ||\n+\t\t\tnew_mtu > RTE_ETHER_MAX_JUMBO_FRAME_LEN) {\n \t\tprintf(\"Port %i: Invalid MTU value\\n\", params->port);\n \t\treturn;\n \t}\ndiff --git a/examples/eventdev_pipeline/main.c b/examples/eventdev_pipeline/main.c\nindex 63227f0d5..f4e57f541 100644\n--- a/examples/eventdev_pipeline/main.c\n+++ b/examples/eventdev_pipeline/main.c\n@@ -253,7 +253,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)\n \tstatic const struct rte_eth_conf port_conf_default = {\n \t\t.rxmode = {\n \t\t\t.mq_mode = ETH_MQ_RX_RSS,\n-\t\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t},\n \t\t.rx_adv_conf = {\n \t\t\t.rss_conf = {\ndiff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c\nindex 5ae81fab1..dfb7db1c9 100644\n--- a/examples/flow_classify/flow_classify.c\n+++ b/examples/flow_classify/flow_classify.c\n@@ -61,7 +61,7 @@ const char cb_port_delim[] = \":\";\n \n static const struct rte_eth_conf port_conf_default = {\n \t.rxmode = {\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t},\n };\n \ndiff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c\nindex cf9421874..09dbbcea9 100644\n--- a/examples/flow_filtering/main.c\n+++ b/examples/flow_filtering/main.c\n@@ -47,8 +47,8 @@ struct rte_flow *flow;\n static inline void\n print_ether_addr(const char *what, struct rte_ether_addr *eth_addr)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);\n \tprintf(\"%s%s\", what, buf);\n }\n \ndiff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c\nindex 8d82a6411..d071dae9c 100644\n--- a/examples/ip_fragmentation/main.c\n+++ b/examples/ip_fragmentation/main.c\n@@ -52,15 +52,16 @@\n  * Default byte size for the IPv6 Maximum Transfer Unit (MTU).\n  * This value includes the size of IPv6 header.\n  */\n-#define\tIPV4_MTU_DEFAULT\tETHER_MTU\n-#define\tIPV6_MTU_DEFAULT\tETHER_MTU\n+#define\tIPV4_MTU_DEFAULT\tRTE_ETHER_MTU\n+#define\tIPV6_MTU_DEFAULT\tRTE_ETHER_MTU\n \n /*\n  * The overhead from max frame size to MTU.\n  * We have to consider the max possible overhead.\n  */\n #define MTU_OVERHEAD\t\\\n-\t(ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * sizeof(struct rte_vlan_hdr))\n+\t(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \\\n+\t\t2 * sizeof(struct rte_vlan_hdr))\n \n /*\n  * Default payload in bytes for the IPv6 packet.\n@@ -357,9 +358,11 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,\n \t\trte_ether_addr_copy(&ports_eth_addr[port_out],\n \t\t\t\t&eth_hdr->s_addr);\n \t\tif (ipv6)\n-\t\t\teth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv6);\n+\t\t\teth_hdr->ether_type =\n+\t\t\t\trte_be_to_cpu_16(RTE_ETHER_TYPE_IPv6);\n \t\telse\n-\t\t\teth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);\n+\t\t\teth_hdr->ether_type =\n+\t\t\t\trte_be_to_cpu_16(RTE_ETHER_TYPE_IPv4);\n \t}\n \n \tlen += len2;\n@@ -572,8 +575,8 @@ parse_args(int argc, char **argv)\n static void\n print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);\n \tprintf(\"%s%s\", name, buf);\n }\n \n@@ -677,9 +680,9 @@ parse_ptype(struct rte_mbuf *m)\n \n \teth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);\n \tether_type = eth_hdr->ether_type;\n-\tif (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))\n+\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))\n \t\tpacket_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;\n-\telse if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))\n+\telse if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6))\n \t\tpacket_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;\n \n \tm->packet_type = packet_type;\ndiff --git a/examples/ip_pipeline/kni.c b/examples/ip_pipeline/kni.c\nindex e3d0b3758..e9262e079 100644\n--- a/examples/ip_pipeline/kni.c\n+++ b/examples/ip_pipeline/kni.c\n@@ -86,7 +86,7 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu)\n \tif (!rte_eth_dev_is_valid_port(port_id))\n \t\treturn -EINVAL;\n \n-\tif (new_mtu > ETHER_MAX_LEN)\n+\tif (new_mtu > RTE_ETHER_MAX_LEN)\n \t\treturn -EINVAL;\n \n \t/* Set new MTU */\ndiff --git a/examples/ip_pipeline/parser.c b/examples/ip_pipeline/parser.c\nindex 4777f9ffd..3fffeb586 100644\n--- a/examples/ip_pipeline/parser.c\n+++ b/examples/ip_pipeline/parser.c\n@@ -518,7 +518,7 @@ my_ether_aton(const char *a)\n {\n \tint i;\n \tchar *end;\n-\tunsigned long o[ETHER_ADDR_LEN];\n+\tunsigned long o[RTE_ETHER_ADDR_LEN];\n \tstatic struct rte_ether_addr ether_addr;\n \n \ti = 0;\n@@ -535,14 +535,14 @@ my_ether_aton(const char *a)\n \t\treturn NULL;\n \n \t/* Support the format XX:XX:XX:XX:XX:XX */\n-\tif (i == ETHER_ADDR_LEN) {\n+\tif (i == RTE_ETHER_ADDR_LEN) {\n \t\twhile (i-- != 0) {\n \t\t\tif (o[i] > UINT8_MAX)\n \t\t\t\treturn NULL;\n \t\t\tether_addr.addr_bytes[i] = (uint8_t)o[i];\n \t\t}\n \t/* Support the format XXXX:XXXX:XXXX */\n-\t} else if (i == ETHER_ADDR_LEN / 2) {\n+\t} else if (i == RTE_ETHER_ADDR_LEN / 2) {\n \t\twhile (i-- != 0) {\n \t\t\tif (o[i] > UINT16_MAX)\n \t\t\t\treturn NULL;\ndiff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c\nindex fe92fcdd2..7ce52150d 100644\n--- a/examples/ip_reassembly/main.c\n+++ b/examples/ip_reassembly/main.c\n@@ -362,7 +362,7 @@ reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,\n \t\t\tdst_port = next_hop;\n \t\t}\n \n-\t\teth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);\n+\t\teth_hdr->ether_type = rte_be_to_cpu_16(RTE_ETHER_TYPE_IPv4);\n \t} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {\n \t\t/* if packet is IPv6 */\n \t\tstruct ipv6_extension_fragment *frag_hdr;\n@@ -401,7 +401,7 @@ reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,\n \t\t\tdst_port = next_hop;\n \t\t}\n \n-\t\teth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv6);\n+\t\teth_hdr->ether_type = rte_be_to_cpu_16(RTE_ETHER_TYPE_IPv6);\n \t}\n \t/* if packet wasn't IPv4 or IPv6, it's forwarded to the port it came from */\n \n@@ -694,8 +694,8 @@ parse_args(int argc, char **argv)\n static void\n print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);\n \tprintf(\"%s%s\", name, buf);\n }\n \ndiff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c\nindex 9a2c6e3ba..d47cdd55c 100644\n--- a/examples/ipsec-secgw/ipsec-secgw.c\n+++ b/examples/ipsec-secgw/ipsec-secgw.c\n@@ -211,7 +211,7 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];\n static struct rte_eth_conf port_conf = {\n \t.rxmode = {\n \t\t.mq_mode\t= ETH_MQ_RX_RSS,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t\t.offloads = DEV_RX_OFFLOAD_CHECKSUM,\n \t},\n@@ -236,8 +236,8 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)\n \tstruct rte_ether_hdr *eth;\n \n \teth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);\n-\tif (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {\n-\t\tnlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);\n+\tif (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {\n+\t\tnlp = (uint8_t *)rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);\n \t\tnlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p));\n \t\tif (*nlp == IPPROTO_ESP)\n \t\t\tt->ipsec.pkts[(t->ipsec.num)++] = pkt;\n@@ -247,8 +247,8 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)\n \t\t}\n \t\tpkt->l2_len = 0;\n \t\tpkt->l3_len = sizeof(struct ip);\n-\t} else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {\n-\t\tnlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);\n+\t} else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) {\n+\t\tnlp = (uint8_t *)rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);\n \t\tnlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt));\n \t\tif (*nlp == IPPROTO_ESP)\n \t\t\tt->ipsec.pkts[(t->ipsec.num)++] = pkt;\n@@ -330,12 +330,12 @@ prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,\n \tip = rte_pktmbuf_mtod(pkt, struct ip *);\n \n \tethhdr = (struct rte_ether_hdr *)\n-\t\trte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);\n+\t\trte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);\n \n \tif (ip->ip_v == IPVERSION) {\n \t\tpkt->ol_flags |= qconf->outbound.ipv4_offloads;\n \t\tpkt->l3_len = sizeof(struct ip);\n-\t\tpkt->l2_len = ETHER_HDR_LEN;\n+\t\tpkt->l2_len = RTE_ETHER_HDR_LEN;\n \n \t\tip->ip_sum = 0;\n \n@@ -343,13 +343,13 @@ prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,\n \t\tif ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)\n \t\t\tip->ip_sum = rte_ipv4_cksum((struct ipv4_hdr *)ip);\n \n-\t\tethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\tethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \t} else {\n \t\tpkt->ol_flags |= qconf->outbound.ipv6_offloads;\n \t\tpkt->l3_len = sizeof(struct ip6_hdr);\n-\t\tpkt->l2_len = ETHER_HDR_LEN;\n+\t\tpkt->l2_len = RTE_ETHER_HDR_LEN;\n \n-\t\tethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);\n+\t\tethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);\n \t}\n \n \tmemcpy(&ethhdr->s_addr, &ethaddr_tbl[port].src,\n@@ -1427,8 +1427,8 @@ parse_args(int32_t argc, char **argv)\n static void\n print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);\n \tprintf(\"%s%s\", name, buf);\n }\n \ndiff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c\nindex 938375230..adab3217a 100644\n--- a/examples/ipv4_multicast/main.c\n+++ b/examples/ipv4_multicast/main.c\n@@ -280,7 +280,7 @@ mcast_send_pkt(struct rte_mbuf *pkt, struct rte_ether_addr *dest_addr,\n \n \trte_ether_addr_copy(dest_addr, &ethdr->d_addr);\n \trte_ether_addr_copy(&ports_eth_addr[port], &ethdr->s_addr);\n-\tethdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);\n+\tethdr->ether_type = rte_be_to_cpu_16(RTE_ETHER_TYPE_IPv4);\n \n \t/* Put new packet into the output queue */\n \tlen = qconf->tx_mbufs[port].len;\n@@ -539,8 +539,8 @@ parse_args(int argc, char **argv)\n static void\n print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);\n \tprintf(\"%s%s\", name, buf);\n }\n \ndiff --git a/examples/kni/main.c b/examples/kni/main.c\nindex a6d6fc2d2..a4d598bb7 100644\n--- a/examples/kni/main.c\n+++ b/examples/kni/main.c\n@@ -760,7 +760,7 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu)\n \n \tmemcpy(&conf, &port_conf, sizeof(conf));\n \t/* Set new MTU */\n-\tif (new_mtu > ETHER_MAX_LEN)\n+\tif (new_mtu > RTE_ETHER_MAX_LEN)\n \t\tconf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;\n \telse\n \t\tconf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;\n@@ -834,8 +834,8 @@ kni_config_network_interface(uint16_t port_id, uint8_t if_up)\n static void\n print_ethaddr(const char *name, struct rte_ether_addr *mac_addr)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, mac_addr);\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);\n \tRTE_LOG(INFO, APP, \"\\t%s%s\\n\", name, buf);\n }\n \ndiff --git a/examples/l2fwd-cat/l2fwd-cat.c b/examples/l2fwd-cat/l2fwd-cat.c\nindex 1a8af28e2..b34b40a00 100644\n--- a/examples/l2fwd-cat/l2fwd-cat.c\n+++ b/examples/l2fwd-cat/l2fwd-cat.c\n@@ -20,7 +20,7 @@\n #define BURST_SIZE 32\n \n static const struct rte_eth_conf port_conf_default = {\n-\t.rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }\n+\t.rxmode = { .max_rx_pkt_len = RTE_ETHER_MAX_LEN }\n };\n \n /* l2fwd-cat.c: CAT enabled, basic DPDK skeleton forwarding example. */\ndiff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c\nindex a77b000a9..973a371bf 100644\n--- a/examples/l2fwd-crypto/main.c\n+++ b/examples/l2fwd-crypto/main.c\n@@ -212,7 +212,7 @@ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];\n static struct rte_eth_conf port_conf = {\n \t.rxmode = {\n \t\t.mq_mode = ETH_MQ_RX_NONE,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t},\n \t.txmode = {\n@@ -396,7 +396,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,\n \n \teth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);\n \n-\tif (eth_hdr->ether_type != rte_cpu_to_be_16(ETHER_TYPE_IPv4))\n+\tif (eth_hdr->ether_type != rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))\n \t\treturn -1;\n \n \tipdata_offset = sizeof(struct rte_ether_hdr);\ndiff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c\nindex 4b15853e3..f38b3ae6f 100644\n--- a/examples/l3fwd-acl/main.c\n+++ b/examples/l3fwd-acl/main.c\n@@ -125,7 +125,7 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /\n static struct rte_eth_conf port_conf = {\n \t.rxmode = {\n \t\t.mq_mode\t= ETH_MQ_RX_RSS,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t\t.offloads = DEV_RX_OFFLOAD_CHECKSUM,\n \t},\n@@ -1701,7 +1701,7 @@ parse_args(int argc, char **argv)\n \n \t\t\t\t/*\n \t\t\t\t * if no max-pkt-len set, then use the\n-\t\t\t\t * default value ETHER_MAX_LEN\n+\t\t\t\t * default value RTE_ETHER_MAX_LEN\n \t\t\t\t */\n \t\t\t\tif (0 == getopt_long(argc, argvopt, \"\",\n \t\t\t\t\t\t&lenopts, &option_index)) {\n@@ -1756,8 +1756,8 @@ parse_args(int argc, char **argv)\n static void\n print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);\n \tprintf(\"%s%s\", name, buf);\n }\n \ndiff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c\nindex 7f7be22c6..8a991f5f7 100644\n--- a/examples/l3fwd-power/main.c\n+++ b/examples/l3fwd-power/main.c\n@@ -200,7 +200,7 @@ uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /\n static struct rte_eth_conf port_conf = {\n \t.rxmode = {\n \t\t.mq_mode        = ETH_MQ_RX_RSS,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t\t.offloads = DEV_RX_OFFLOAD_CHECKSUM,\n \t},\n@@ -624,9 +624,9 @@ parse_ptype_one(struct rte_mbuf *m)\n \n \teth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);\n \tether_type = eth_hdr->ether_type;\n-\tif (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))\n+\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))\n \t\tpacket_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;\n-\telse if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))\n+\telse if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6))\n \t\tpacket_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;\n \n \tm->packet_type = packet_type;\n@@ -1536,7 +1536,7 @@ parse_args(int argc, char **argv)\n \n \t\t\t\t/**\n \t\t\t\t * if no max-pkt-len set, use the default value\n-\t\t\t\t * ETHER_MAX_LEN\n+\t\t\t\t * RTE_ETHER_MAX_LEN\n \t\t\t\t */\n \t\t\t\tif (0 == getopt_long(argc, argvopt, \"\",\n \t\t\t\t\t\t&lenopts, &option_index)) {\n@@ -1581,8 +1581,8 @@ parse_args(int argc, char **argv)\n static void\n print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);\n \tprintf(\"%s%s\", name, buf);\n }\n \ndiff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c\nindex 56a55ac28..d9b636806 100644\n--- a/examples/l3fwd-vf/main.c\n+++ b/examples/l3fwd-vf/main.c\n@@ -159,7 +159,7 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /\n static struct rte_eth_conf port_conf = {\n \t.rxmode = {\n \t\t.mq_mode\t= ETH_MQ_RX_RSS,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t\t.offloads = DEV_RX_OFFLOAD_CHECKSUM,\n \t},\n@@ -790,8 +790,8 @@ parse_args(int argc, char **argv)\n static void\n print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);\n \tprintf(\"%s%s\", name, buf);\n }\n \ndiff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c\nindex 64e79b946..b56b08646 100644\n--- a/examples/l3fwd/l3fwd_em.c\n+++ b/examples/l3fwd/l3fwd_em.c\n@@ -572,7 +572,7 @@ em_parse_ptype(struct rte_mbuf *m)\n \teth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);\n \tether_type = eth_hdr->ether_type;\n \tl3 = (uint8_t *)eth_hdr + sizeof(struct rte_ether_hdr);\n-\tif (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {\n+\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {\n \t\tipv4_hdr = (struct ipv4_hdr *)l3;\n \t\thdr_len = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *\n \t\t\t  IPV4_IHL_MULTIPLIER;\n@@ -584,7 +584,7 @@ em_parse_ptype(struct rte_mbuf *m)\n \t\t\t\tpacket_type |= RTE_PTYPE_L4_UDP;\n \t\t} else\n \t\t\tpacket_type |= RTE_PTYPE_L3_IPV4_EXT;\n-\t} else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {\n+\t} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) {\n \t\tipv6_hdr = (struct ipv6_hdr *)l3;\n \t\tif (ipv6_hdr->proto == IPPROTO_TCP)\n \t\t\tpacket_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;\ndiff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c\nindex b373047f6..eaa47aaa3 100644\n--- a/examples/l3fwd/l3fwd_lpm.c\n+++ b/examples/l3fwd/l3fwd_lpm.c\n@@ -386,9 +386,9 @@ lpm_parse_ptype(struct rte_mbuf *m)\n \n \teth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);\n \tether_type = eth_hdr->ether_type;\n-\tif (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))\n+\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))\n \t\tpacket_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;\n-\telse if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))\n+\telse if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6))\n \t\tpacket_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;\n \n \tm->packet_type = packet_type;\ndiff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c\nindex 3f4625abd..570693b32 100644\n--- a/examples/l3fwd/main.c\n+++ b/examples/l3fwd/main.c\n@@ -120,7 +120,7 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /\n static struct rte_eth_conf port_conf = {\n \t.rxmode = {\n \t\t.mq_mode = ETH_MQ_RX_RSS,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t\t.offloads = DEV_RX_OFFLOAD_CHECKSUM,\n \t},\n@@ -570,7 +570,7 @@ parse_args(int argc, char **argv)\n \n \t\t\t/*\n \t\t\t * if no max-pkt-len set, use the default\n-\t\t\t * value ETHER_MAX_LEN.\n+\t\t\t * value RTE_ETHER_MAX_LEN.\n \t\t\t */\n \t\t\tif (getopt_long(argc, argvopt, \"\",\n \t\t\t\t\t&lenopts, &option_index) == 0) {\n@@ -649,8 +649,8 @@ parse_args(int argc, char **argv)\n static void\n print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);\n \tprintf(\"%s%s\", name, buf);\n }\n \n@@ -827,7 +827,7 @@ main(int argc, char **argv)\n \t/* pre-init dst MACs for all ports to 02:00:00:00:00:xx */\n \tfor (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {\n \t\tdest_eth_addr[portid] =\n-\t\t\tETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);\n+\t\t\tRTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);\n \t\t*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];\n \t}\n \ndiff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c\nindex 8e28c16e1..46275e79a 100644\n--- a/examples/performance-thread/l3fwd-thread/main.c\n+++ b/examples/performance-thread/l3fwd-thread/main.c\n@@ -98,9 +98,9 @@ parse_ptype(struct rte_mbuf *m)\n \n \teth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);\n \tether_type = eth_hdr->ether_type;\n-\tif (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))\n+\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))\n \t\tpacket_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;\n-\telse if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))\n+\telse if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6))\n \t\tpacket_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;\n \n \tm->packet_type = packet_type;\n@@ -302,7 +302,7 @@ static uint16_t nb_tx_thread_params = RTE_DIM(tx_thread_params_array_default);\n static struct rte_eth_conf port_conf = {\n \t.rxmode = {\n \t\t.mq_mode = ETH_MQ_RX_RSS,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t\t.offloads = DEV_RX_OFFLOAD_CHECKSUM,\n \t},\n@@ -2977,7 +2977,9 @@ parse_args(int argc, char **argv)\n \t\t\t\tport_conf.txmode.offloads |=\n \t\t\t\t\t\tDEV_TX_OFFLOAD_MULTI_SEGS;\n \n-\t\t\t\t/* if no max-pkt-len set, use the default value ETHER_MAX_LEN */\n+\t\t\t\t/* if no max-pkt-len set, use the default value\n+\t\t\t\t * RTE_ETHER_MAX_LEN\n+\t\t\t\t */\n \t\t\t\tif (0 == getopt_long(argc, argvopt, \"\", &lenopts,\n \t\t\t\t\t\t&option_index)) {\n \n@@ -3024,9 +3026,9 @@ parse_args(int argc, char **argv)\n static void\n print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)\n {\n-\tchar buf[ETHER_ADDR_FMT_SIZE];\n+\tchar buf[RTE_ETHER_ADDR_FMT_SIZE];\n \n-\trte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);\n+\trte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);\n \tprintf(\"%s%s\", name, buf);\n }\n \n@@ -3490,7 +3492,7 @@ main(int argc, char **argv)\n \n \t/* pre-init dst MACs for all ports to 02:00:00:00:00:xx */\n \tfor (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {\n-\t\tdest_eth_addr[portid] = ETHER_LOCAL_ADMIN_ADDR +\n+\t\tdest_eth_addr[portid] = RTE_ETHER_LOCAL_ADMIN_ADDR +\n \t\t\t\t((uint64_t)portid << 40);\n \t\t*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];\n \t}\ndiff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c\nindex 1266f521c..95b0c176e 100644\n--- a/examples/ptpclient/ptpclient.c\n+++ b/examples/ptpclient/ptpclient.c\n@@ -49,7 +49,7 @@ static uint8_t ptp_enabled_ports[RTE_MAX_ETHPORTS];\n \n static const struct rte_eth_conf port_conf_default = {\n \t.rxmode = {\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t},\n };\n \ndiff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c\nindex 431e84f4e..ce1f63dc7 100644\n--- a/examples/qos_meter/main.c\n+++ b/examples/qos_meter/main.c\n@@ -54,7 +54,7 @@ static struct rte_mempool *pool = NULL;\n static struct rte_eth_conf port_conf = {\n \t.rxmode = {\n \t\t.mq_mode\t= ETH_MQ_RX_RSS,\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t\t.offloads = DEV_RX_OFFLOAD_CHECKSUM,\n \t},\ndiff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c\nindex 37c2b95fd..1209bd7ce 100644\n--- a/examples/qos_sched/init.c\n+++ b/examples/qos_sched/init.c\n@@ -57,7 +57,7 @@ struct flow_conf qos_conf[MAX_DATA_STREAMS];\n \n static struct rte_eth_conf port_conf = {\n \t.rxmode = {\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t},\n \t.txmode = {\ndiff --git a/examples/rxtx_callbacks/main.c b/examples/rxtx_callbacks/main.c\nindex a277519f5..640a4152d 100644\n--- a/examples/rxtx_callbacks/main.c\n+++ b/examples/rxtx_callbacks/main.c\n@@ -19,7 +19,7 @@\n \n static const struct rte_eth_conf port_conf_default = {\n \t.rxmode = {\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t},\n };\n \ndiff --git a/examples/skeleton/basicfwd.c b/examples/skeleton/basicfwd.c\nindex 7e3cd8715..a8a8e98f0 100644\n--- a/examples/skeleton/basicfwd.c\n+++ b/examples/skeleton/basicfwd.c\n@@ -19,7 +19,7 @@\n \n static const struct rte_eth_conf port_conf_default = {\n \t.rxmode = {\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t},\n };\n \ndiff --git a/examples/tep_termination/vxlan.c b/examples/tep_termination/vxlan.c\nindex 6545b8ae4..cc171fbbc 100644\n--- a/examples/tep_termination/vxlan.c\n+++ b/examples/tep_termination/vxlan.c\n@@ -16,9 +16,9 @@\n static uint16_t\n get_psd_sum(void *l3_hdr, uint16_t ethertype, uint64_t ol_flags)\n {\n-\tif (ethertype == ETHER_TYPE_IPv4)\n+\tif (ethertype == RTE_ETHER_TYPE_IPv4)\n \t\treturn rte_ipv4_phdr_cksum(l3_hdr, ol_flags);\n-\telse /* assume ethertype == ETHER_TYPE_IPv6 */\n+\telse /* assume ethertype == RTE_ETHER_TYPE_IPv6 */\n \t\treturn rte_ipv6_phdr_cksum(l3_hdr, ol_flags);\n }\n \n@@ -38,7 +38,7 @@ parse_ethernet(struct rte_ether_hdr *eth_hdr, union tunnel_offload_info *info,\n \tinfo->outer_l2_len = sizeof(struct rte_ether_hdr);\n \tethertype = rte_be_to_cpu_16(eth_hdr->ether_type);\n \n-\tif (ethertype == ETHER_TYPE_VLAN) {\n+\tif (ethertype == RTE_ETHER_TYPE_VLAN) {\n \t\tstruct rte_vlan_hdr *vlan_hdr =\n \t\t\t(struct rte_vlan_hdr *)(eth_hdr + 1);\n \t\tinfo->outer_l2_len  += sizeof(struct rte_vlan_hdr);\n@@ -46,13 +46,13 @@ parse_ethernet(struct rte_ether_hdr *eth_hdr, union tunnel_offload_info *info,\n \t}\n \n \tswitch (ethertype) {\n-\tcase ETHER_TYPE_IPv4:\n+\tcase RTE_ETHER_TYPE_IPv4:\n \t\tipv4_hdr = (struct ipv4_hdr *)\n \t\t\t((char *)eth_hdr + info->outer_l2_len);\n \t\tinfo->outer_l3_len = sizeof(struct ipv4_hdr);\n \t\t*l4_proto = ipv4_hdr->next_proto_id;\n \t\tbreak;\n-\tcase ETHER_TYPE_IPv6:\n+\tcase RTE_ETHER_TYPE_IPv6:\n \t\tipv6_hdr = (struct ipv6_hdr *)\n \t\t\t((char *)eth_hdr + info->outer_l2_len);\n \t\tinfo->outer_l3_len = sizeof(struct ipv6_hdr);\n@@ -85,7 +85,7 @@ process_inner_cksums(struct rte_ether_hdr *eth_hdr,\n \tinfo->l2_len = sizeof(struct rte_ether_hdr);\n \tethertype = rte_be_to_cpu_16(eth_hdr->ether_type);\n \n-\tif (ethertype == ETHER_TYPE_VLAN) {\n+\tif (ethertype == RTE_ETHER_TYPE_VLAN) {\n \t\tstruct rte_vlan_hdr *vlan_hdr =\n \t\t\t(struct rte_vlan_hdr *)(eth_hdr + 1);\n \t\tinfo->l2_len  += sizeof(struct rte_vlan_hdr);\n@@ -94,14 +94,14 @@ process_inner_cksums(struct rte_ether_hdr *eth_hdr,\n \n \tl3_hdr = (char *)eth_hdr + info->l2_len;\n \n-\tif (ethertype == ETHER_TYPE_IPv4) {\n+\tif (ethertype == RTE_ETHER_TYPE_IPv4) {\n \t\tipv4_hdr = (struct ipv4_hdr *)l3_hdr;\n \t\tipv4_hdr->hdr_checksum = 0;\n \t\tol_flags |= PKT_TX_IPV4;\n \t\tol_flags |= PKT_TX_IP_CKSUM;\n \t\tinfo->l3_len = sizeof(struct ipv4_hdr);\n \t\tl4_proto = ipv4_hdr->next_proto_id;\n-\t} else if (ethertype == ETHER_TYPE_IPv6) {\n+\t} else if (ethertype == RTE_ETHER_TYPE_IPv6) {\n \t\tipv6_hdr = (struct ipv6_hdr *)l3_hdr;\n \t\tinfo->l3_len = sizeof(struct ipv6_hdr);\n \t\tl4_proto = ipv6_hdr->proto;\n@@ -212,7 +212,7 @@ encapsulation(struct rte_mbuf *m, uint8_t queue_id)\n \t\tm->l2_len = tx_offload.l2_len;\n \t\tm->l3_len = tx_offload.l3_len;\n \t\tm->l4_len = tx_offload.l4_len;\n-\t\tm->l2_len += ETHER_VXLAN_HLEN;\n+\t\tm->l2_len += RTE_ETHER_VXLAN_HLEN;\n \t}\n \n \tm->outer_l2_len = sizeof(struct rte_ether_hdr);\n@@ -234,7 +234,7 @@ encapsulation(struct rte_mbuf *m, uint8_t queue_id)\n \t\t\t\t+ sizeof(struct rte_vxlan_hdr));\n \n \tudp->dst_port = rte_cpu_to_be_16(vxdev.dst_port);\n-\thash = rte_hash_crc(phdr, 2 * ETHER_ADDR_LEN, phdr->ether_type);\n+\thash = rte_hash_crc(phdr, 2 * RTE_ETHER_ADDR_LEN, phdr->ether_type);\n \tudp->src_port = rte_cpu_to_be_16((((uint64_t) hash * PORT_RANGE) >> 32)\n \t\t\t\t\t+ PORT_MIN);\n \ndiff --git a/examples/tep_termination/vxlan_setup.c b/examples/tep_termination/vxlan_setup.c\nindex 3316df13a..2237c7559 100644\n--- a/examples/tep_termination/vxlan_setup.c\n+++ b/examples/tep_termination/vxlan_setup.c\n@@ -251,7 +251,7 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)\n \t\treturn -1;\n \t}\n \n-\tfor (i = 0; i < ETHER_ADDR_LEN; i++) {\n+\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {\n \t\tvdev->mac_address.addr_bytes[i] =\n \t\t\tvxdev.port[portid].vport_mac.addr_bytes[i] =\n \t\t\tpkt_hdr->s_addr.addr_bytes[i];\n@@ -313,7 +313,7 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)\n \t\t\t&app_l2_hdr[portid].d_addr);\n \trte_ether_addr_copy(&ports_eth_addr[0],\n \t\t\t&app_l2_hdr[portid].s_addr);\n-\tapp_l2_hdr[portid].ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\tapp_l2_hdr[portid].ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \n \tip = &app_ip_hdr[portid];\n \tip->version_ihl = IP_VHL_DEF;\n@@ -373,7 +373,7 @@ vxlan_unlink(struct vhost_dev *vdev)\n \t\t\t\tvdev->rx_q);\n \t\t\treturn;\n \t\t}\n-\t\tfor (i = 0; i < ETHER_ADDR_LEN; i++)\n+\t\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; i++)\n \t\t\tvdev->mac_address.addr_bytes[i] = 0;\n \n \t\t/* Clear out the receive buffers */\ndiff --git a/examples/vhost/main.c b/examples/vhost/main.c\nindex f4f0fc862..d7b34b3d4 100644\n--- a/examples/vhost/main.c\n+++ b/examples/vhost/main.c\n@@ -693,7 +693,7 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)\n \t\treturn -1;\n \t}\n \n-\tfor (i = 0; i < ETHER_ADDR_LEN; i++)\n+\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; i++)\n \t\tvdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];\n \n \t/* vlan_tag currently uses the device_id. */\n@@ -858,7 +858,7 @@ get_psd_sum(void *l3_hdr, uint64_t ol_flags)\n {\n \tif (ol_flags & PKT_TX_IPV4)\n \t\treturn rte_ipv4_phdr_cksum(l3_hdr, ol_flags);\n-\telse /* assume ethertype == ETHER_TYPE_IPv6 */\n+\telse /* assume ethertype == RTE_ETHER_TYPE_IPv6 */\n \t\treturn rte_ipv6_phdr_cksum(l3_hdr, ol_flags);\n }\n \n@@ -949,7 +949,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)\n \ttx_q = &lcore_tx_queue[lcore_id];\n \n \tnh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);\n-\tif (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {\n+\tif (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) {\n \t\t/* Guest has inserted the vlan tag. */\n \t\tstruct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1);\n \t\tuint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);\ndiff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c\nindex 1194f62f8..aab19ba57 100644\n--- a/examples/vm_power_manager/channel_monitor.c\n+++ b/examples/vm_power_manager/channel_monitor.c\n@@ -65,7 +65,7 @@ str_to_ether_addr(const char *a, struct rte_ether_addr *ether_addr)\n {\n \tint i;\n \tchar *end;\n-\tunsigned long o[ETHER_ADDR_LEN];\n+\tunsigned long o[RTE_ETHER_ADDR_LEN];\n \n \ti = 0;\n \tdo {\n@@ -81,14 +81,14 @@ str_to_ether_addr(const char *a, struct rte_ether_addr *ether_addr)\n \t\treturn -1;\n \n \t/* Support the format XX:XX:XX:XX:XX:XX */\n-\tif (i == ETHER_ADDR_LEN) {\n+\tif (i == RTE_ETHER_ADDR_LEN) {\n \t\twhile (i-- != 0) {\n \t\t\tif (o[i] > UINT8_MAX)\n \t\t\t\treturn -1;\n \t\t\tether_addr->addr_bytes[i] = (uint8_t)o[i];\n \t\t}\n \t/* Support the format XXXX:XXXX:XXXX */\n-\t} else if (i == ETHER_ADDR_LEN / 2) {\n+\t} else if (i == RTE_ETHER_ADDR_LEN / 2) {\n \t\twhile (i-- != 0) {\n \t\t\tif (o[i] > UINT16_MAX)\n \t\t\t\treturn -1;\ndiff --git a/examples/vm_power_manager/main.c b/examples/vm_power_manager/main.c\nindex f8cab1162..bc15cb64e 100644\n--- a/examples/vm_power_manager/main.c\n+++ b/examples/vm_power_manager/main.c\n@@ -54,7 +54,7 @@ static volatile bool force_quit;\n /****************/\n static const struct rte_eth_conf port_conf_default = {\n \t.rxmode = {\n-\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.max_rx_pkt_len = RTE_ETHER_MAX_LEN,\n \t},\n };\n \ndiff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c\nindex 1af36dead..72bb33881 100644\n--- a/lib/librte_ethdev/rte_ethdev.c\n+++ b/lib/librte_ethdev/rte_ethdev.c\n@@ -500,7 +500,7 @@ rte_eth_dev_allocate(const char *name)\n \teth_dev = eth_dev_get(port_id);\n \tstrlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));\n \teth_dev->data->port_id = port_id;\n-\teth_dev->data->mtu = ETHER_MTU;\n+\teth_dev->data->mtu = RTE_ETHER_MTU;\n \n unlock:\n \trte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);\n@@ -1224,20 +1224,20 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,\n \t\t\t\tdev_info.max_rx_pktlen);\n \t\t\tret = -EINVAL;\n \t\t\tgoto rollback;\n-\t\t} else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {\n+\t\t} else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {\n \t\t\tRTE_ETHDEV_LOG(ERR,\n \t\t\t\t\"Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\\n\",\n \t\t\t\tport_id, dev_conf->rxmode.max_rx_pkt_len,\n-\t\t\t\t(unsigned)ETHER_MIN_LEN);\n+\t\t\t\t(unsigned int)RTE_ETHER_MIN_LEN);\n \t\t\tret = -EINVAL;\n \t\t\tgoto rollback;\n \t\t}\n \t} else {\n-\t\tif (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||\n-\t\t\tdev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)\n+\t\tif (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||\n+\t\t\tdev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)\n \t\t\t/* Use default value */\n \t\t\tdev->data->dev_conf.rxmode.max_rx_pkt_len =\n-\t\t\t\t\t\t\tETHER_MAX_LEN;\n+\t\t\t\t\t\t\tRTE_ETHER_MAX_LEN;\n \t}\n \n \t/* Any requested offloading must be within its device capabilities */\n@@ -2552,7 +2552,7 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)\n \tdev_info->rx_desc_lim = lim;\n \tdev_info->tx_desc_lim = lim;\n \tdev_info->device = dev->device;\n-\tdev_info->min_mtu = ETHER_MIN_MTU;\n+\tdev_info->min_mtu = RTE_ETHER_MIN_MTU;\n \tdev_info->max_mtu = UINT16_MAX;\n \n \tRTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);\n@@ -3088,7 +3088,8 @@ get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)\n \trte_eth_dev_info_get(port_id, &dev_info);\n \n \tfor (i = 0; i < dev_info.max_mac_addrs; i++)\n-\t\tif (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)\n+\t\tif (memcmp(addr, &dev->data->mac_addrs[i],\n+\t\t\t\tRTE_ETHER_ADDR_LEN) == 0)\n \t\t\treturn i;\n \n \treturn -1;\n@@ -3222,7 +3223,7 @@ get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)\n \n \tfor (i = 0; i < dev_info.max_hash_mac_addrs; i++)\n \t\tif (memcmp(addr, &dev->data->hash_mac_addrs[i],\n-\t\t\tETHER_ADDR_LEN) == 0)\n+\t\t\tRTE_ETHER_ADDR_LEN) == 0)\n \t\t\treturn i;\n \n \treturn -1;\ndiff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h\nindex bcc1bb71d..bd20745dd 100644\n--- a/lib/librte_ethdev/rte_ethdev.h\n+++ b/lib/librte_ethdev/rte_ethdev.h\n@@ -2336,7 +2336,7 @@ void rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);\n  *  };\n  *\n  * device = dev->device\n- * min_mtu = ETHER_MIN_MTU\n+ * min_mtu = RTE_ETHER_MIN_MTU\n  * max_mtu = UINT16_MAX\n  *\n  * The following fields will be populated if support for dev_infos_get()\ndiff --git a/lib/librte_ethdev/rte_flow.h b/lib/librte_ethdev/rte_flow.h\nindex 050af52f8..e87a70613 100644\n--- a/lib/librte_ethdev/rte_flow.h\n+++ b/lib/librte_ethdev/rte_flow.h\n@@ -604,8 +604,8 @@ static const struct rte_flow_item_eth rte_flow_item_eth_mask = {\n  * Matches an 802.1Q/ad VLAN tag.\n  *\n  * The corresponding standard outer EtherType (TPID) values are\n- * ETHER_TYPE_VLAN or ETHER_TYPE_QINQ. It can be overridden by the preceding\n- * pattern item.\n+ * RTE_ETHER_TYPE_VLAN or RTE_ETHER_TYPE_QINQ. It can be overridden by\n+ * the preceding pattern item.\n  */\n struct rte_flow_item_vlan {\n \trte_be16_t tci; /**< Tag control information. */\n@@ -768,7 +768,7 @@ static const struct rte_flow_item_vxlan rte_flow_item_vxlan_mask = {\n  * Matches a E-tag header.\n  *\n  * The corresponding standard outer EtherType (TPID) value is\n- * ETHER_TYPE_ETAG. It can be overridden by the preceding pattern item.\n+ * RTE_ETHER_TYPE_ETAG. It can be overridden by the preceding pattern item.\n  */\n struct rte_flow_item_e_tag {\n \t/**\n@@ -2128,7 +2128,7 @@ struct rte_flow_action_set_ttl {\n  * Set MAC address from the matched flow\n  */\n struct rte_flow_action_set_mac {\n-\tuint8_t mac_addr[ETHER_ADDR_LEN];\n+\tuint8_t mac_addr[RTE_ETHER_ADDR_LEN];\n };\n \n /*\ndiff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c\nindex b6ff57b42..e01d61b24 100644\n--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c\n+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c\n@@ -622,21 +622,21 @@ rxa_mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,\n \t*ipv6_hdr = NULL;\n \n \tswitch (eth_hdr->ether_type) {\n-\tcase RTE_BE16(ETHER_TYPE_IPv4):\n+\tcase RTE_BE16(RTE_ETHER_TYPE_IPv4):\n \t\t*ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);\n \t\tbreak;\n \n-\tcase RTE_BE16(ETHER_TYPE_IPv6):\n+\tcase RTE_BE16(RTE_ETHER_TYPE_IPv6):\n \t\t*ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);\n \t\tbreak;\n \n-\tcase RTE_BE16(ETHER_TYPE_VLAN):\n+\tcase RTE_BE16(RTE_ETHER_TYPE_VLAN):\n \t\tvlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);\n \t\tswitch (vlan_hdr->eth_proto) {\n-\t\tcase RTE_BE16(ETHER_TYPE_IPv4):\n+\t\tcase RTE_BE16(RTE_ETHER_TYPE_IPv4):\n \t\t\t*ipv4_hdr = (struct ipv4_hdr *)(vlan_hdr + 1);\n \t\t\tbreak;\n-\t\tcase RTE_BE16(ETHER_TYPE_IPv6):\n+\t\tcase RTE_BE16(RTE_ETHER_TYPE_IPv6):\n \t\t\t*ipv6_hdr = (struct ipv6_hdr *)(vlan_hdr + 1);\n \t\t\tbreak;\n \t\tdefault:\ndiff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c\nindex 7a41788e2..e6507b07d 100644\n--- a/lib/librte_kni/rte_kni.c\n+++ b/lib/librte_kni/rte_kni.c\n@@ -253,7 +253,7 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,\n \tdev_info.mbuf_size = conf->mbuf_size;\n \tdev_info.mtu = conf->mtu;\n \n-\tmemcpy(dev_info.mac_addr, conf->mac_addr, ETHER_ADDR_LEN);\n+\tmemcpy(dev_info.mac_addr, conf->mac_addr, RTE_ETHER_ADDR_LEN);\n \n \tstrlcpy(dev_info.name, conf->name, RTE_KNI_NAMESIZE);\n \ndiff --git a/lib/librte_kni/rte_kni.h b/lib/librte_kni/rte_kni.h\nindex c93d3a985..45073af10 100644\n--- a/lib/librte_kni/rte_kni.h\n+++ b/lib/librte_kni/rte_kni.h\n@@ -68,7 +68,7 @@ struct rte_kni_conf {\n \n \t__extension__\n \tuint8_t force_bind : 1; /* Flag to bind kernel thread */\n-\tuint8_t mac_addr[ETHER_ADDR_LEN]; /* MAC address assigned to KNI */\n+\tuint8_t mac_addr[RTE_ETHER_ADDR_LEN]; /* MAC address assigned to KNI */\n \tuint16_t mtu;\n };\n \ndiff --git a/lib/librte_net/rte_arp.c b/lib/librte_net/rte_arp.c\nindex 0b204a573..35a743c13 100644\n--- a/lib/librte_net/rte_arp.c\n+++ b/lib/librte_net/rte_arp.c\n@@ -30,15 +30,15 @@ rte_net_make_rarp_packet(struct rte_mempool *mpool,\n \t}\n \n \t/* Ethernet header. */\n-\tmemset(eth_hdr->d_addr.addr_bytes, 0xff, ETHER_ADDR_LEN);\n+\tmemset(eth_hdr->d_addr.addr_bytes, 0xff, RTE_ETHER_ADDR_LEN);\n \trte_ether_addr_copy(mac, &eth_hdr->s_addr);\n-\teth_hdr->ether_type = htons(ETHER_TYPE_RARP);\n+\teth_hdr->ether_type = htons(RTE_ETHER_TYPE_RARP);\n \n \t/* RARP header. */\n \trarp = (struct rte_arp_hdr *)(eth_hdr + 1);\n \trarp->arp_hardware = htons(RTE_ARP_HRD_ETHER);\n-\trarp->arp_protocol = htons(ETHER_TYPE_IPv4);\n-\trarp->arp_hlen = ETHER_ADDR_LEN;\n+\trarp->arp_protocol = htons(RTE_ETHER_TYPE_IPv4);\n+\trarp->arp_hlen = RTE_ETHER_ADDR_LEN;\n \trarp->arp_plen = 4;\n \trarp->arp_opcode  = htons(RTE_ARP_OP_REVREQUEST);\n \ndiff --git a/lib/librte_net/rte_ether.h b/lib/librte_net/rte_ether.h\nindex cc31fbd9b..60a7dbe3a 100644\n--- a/lib/librte_net/rte_ether.h\n+++ b/lib/librte_net/rte_ether.h\n@@ -23,25 +23,28 @@ extern \"C\" {\n #include <rte_mbuf.h>\n #include <rte_byteorder.h>\n \n-#define ETHER_ADDR_LEN  6 /**< Length of Ethernet address. */\n-#define ETHER_TYPE_LEN  2 /**< Length of Ethernet type field. */\n-#define ETHER_CRC_LEN   4 /**< Length of Ethernet CRC. */\n-#define ETHER_HDR_LEN   \\\n-\t(ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN) /**< Length of Ethernet header. */\n-#define ETHER_MIN_LEN   64    /**< Minimum frame len, including CRC. */\n-#define ETHER_MAX_LEN   1518  /**< Maximum frame len, including CRC. */\n-#define ETHER_MTU       \\\n-\t(ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN) /**< Ethernet MTU. */\n-\n-#define ETHER_MAX_VLAN_FRAME_LEN \\\n-\t(ETHER_MAX_LEN + 4) /**< Maximum VLAN frame length, including CRC. */\n-\n-#define ETHER_MAX_JUMBO_FRAME_LEN \\\n+#define RTE_ETHER_ADDR_LEN  6 /**< Length of Ethernet address. */\n+#define RTE_ETHER_TYPE_LEN  2 /**< Length of Ethernet type field. */\n+#define RTE_ETHER_CRC_LEN   4 /**< Length of Ethernet CRC. */\n+#define RTE_ETHER_HDR_LEN   \\\n+\t(RTE_ETHER_ADDR_LEN * 2 + \\\n+\t\tRTE_ETHER_TYPE_LEN) /**< Length of Ethernet header. */\n+#define RTE_ETHER_MIN_LEN   64    /**< Minimum frame len, including CRC. */\n+#define RTE_ETHER_MAX_LEN   1518  /**< Maximum frame len, including CRC. */\n+#define RTE_ETHER_MTU       \\\n+\t(RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN - \\\n+\t\tRTE_ETHER_CRC_LEN) /**< Ethernet MTU. */\n+\n+#define RTE_ETHER_MAX_VLAN_FRAME_LEN \\\n+\t(RTE_ETHER_MAX_LEN + 4)\n+\t/**< Maximum VLAN frame length, including CRC. */\n+\n+#define RTE_ETHER_MAX_JUMBO_FRAME_LEN \\\n \t0x3F00 /**< Maximum Jumbo frame length, including CRC. */\n \n-#define ETHER_MAX_VLAN_ID  4095 /**< Maximum VLAN ID. */\n+#define RTE_ETHER_MAX_VLAN_ID  4095 /**< Maximum VLAN ID. */\n \n-#define ETHER_MIN_MTU 68 /**< Minimum MTU for IPv4 packets, see RFC 791. */\n+#define RTE_ETHER_MIN_MTU 68 /**< Minimum MTU for IPv4 packets, see RFC 791. */\n \n /**\n  * Ethernet address:\n@@ -55,11 +58,11 @@ extern \"C\" {\n  * See http://standards.ieee.org/regauth/groupmac/tutorial.html\n  */\n struct rte_ether_addr {\n-\tuint8_t addr_bytes[ETHER_ADDR_LEN]; /**< Addr bytes in tx order */\n+\tuint8_t addr_bytes[RTE_ETHER_ADDR_LEN]; /**< Addr bytes in tx order */\n } __attribute__((__packed__));\n \n-#define ETHER_LOCAL_ADMIN_ADDR 0x02 /**< Locally assigned Eth. address. */\n-#define ETHER_GROUP_ADDR       0x01 /**< Multicast or broadcast Eth. address. */\n+#define RTE_ETHER_LOCAL_ADMIN_ADDR 0x02 /**< Locally assigned Eth. address. */\n+#define RTE_ETHER_GROUP_ADDR  0x01 /**< Multicast or broadcast Eth. address. */\n \n /**\n  * Check if two Ethernet addresses are the same.\n@@ -79,7 +82,7 @@ static inline int rte_is_same_ether_addr(const struct rte_ether_addr *ea1,\n \t\t\t\t     const struct rte_ether_addr *ea2)\n {\n \tint i;\n-\tfor (i = 0; i < ETHER_ADDR_LEN; i++)\n+\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; i++)\n \t\tif (ea1->addr_bytes[i] != ea2->addr_bytes[i])\n \t\t\treturn 0;\n \treturn 1;\n@@ -98,7 +101,7 @@ static inline int rte_is_same_ether_addr(const struct rte_ether_addr *ea1,\n static inline int rte_is_zero_ether_addr(const struct rte_ether_addr *ea)\n {\n \tint i;\n-\tfor (i = 0; i < ETHER_ADDR_LEN; i++)\n+\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; i++)\n \t\tif (ea->addr_bytes[i] != 0x00)\n \t\t\treturn 0;\n \treturn 1;\n@@ -116,7 +119,7 @@ static inline int rte_is_zero_ether_addr(const struct rte_ether_addr *ea)\n  */\n static inline int rte_is_unicast_ether_addr(const struct rte_ether_addr *ea)\n {\n-\treturn (ea->addr_bytes[0] & ETHER_GROUP_ADDR) == 0;\n+\treturn (ea->addr_bytes[0] & RTE_ETHER_GROUP_ADDR) == 0;\n }\n \n /**\n@@ -131,7 +134,7 @@ static inline int rte_is_unicast_ether_addr(const struct rte_ether_addr *ea)\n  */\n static inline int rte_is_multicast_ether_addr(const struct rte_ether_addr *ea)\n {\n-\treturn ea->addr_bytes[0] & ETHER_GROUP_ADDR;\n+\treturn ea->addr_bytes[0] & RTE_ETHER_GROUP_ADDR;\n }\n \n /**\n@@ -164,7 +167,7 @@ static inline int rte_is_broadcast_ether_addr(const struct rte_ether_addr *ea)\n  */\n static inline int rte_is_universal_ether_addr(const struct rte_ether_addr *ea)\n {\n-\treturn (ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) == 0;\n+\treturn (ea->addr_bytes[0] & RTE_ETHER_LOCAL_ADMIN_ADDR) == 0;\n }\n \n /**\n@@ -179,7 +182,7 @@ static inline int rte_is_universal_ether_addr(const struct rte_ether_addr *ea)\n  */\n static inline int rte_is_local_admin_ether_addr(const struct rte_ether_addr *ea)\n {\n-\treturn (ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) != 0;\n+\treturn (ea->addr_bytes[0] & RTE_ETHER_LOCAL_ADMIN_ADDR) != 0;\n }\n \n /**\n@@ -209,9 +212,9 @@ static inline void rte_eth_random_addr(uint8_t *addr)\n \tuint64_t rand = rte_rand();\n \tuint8_t *p = (uint8_t *)&rand;\n \n-\trte_memcpy(addr, p, ETHER_ADDR_LEN);\n-\taddr[0] &= (uint8_t)~ETHER_GROUP_ADDR;       /* clear multicast bit */\n-\taddr[0] |= ETHER_LOCAL_ADMIN_ADDR;  /* set local assignment bit */\n+\trte_memcpy(addr, p, RTE_ETHER_ADDR_LEN);\n+\taddr[0] &= (uint8_t)~RTE_ETHER_GROUP_ADDR;  /* clear multicast bit */\n+\taddr[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;  /* set local assignment bit */\n }\n \n /**\n@@ -240,7 +243,7 @@ static inline void rte_ether_addr_copy(const struct rte_ether_addr *ea_from,\n #endif\n }\n \n-#define ETHER_ADDR_FMT_SIZE         18\n+#define RTE_ETHER_ADDR_FMT_SIZE         18\n /**\n  * Format 48bits Ethernet address in pattern xx:xx:xx:xx:xx:xx.\n  *\n@@ -295,24 +298,26 @@ struct rte_vxlan_hdr {\n } __attribute__((__packed__));\n \n /* Ethernet frame types */\n-#define ETHER_TYPE_IPv4 0x0800 /**< IPv4 Protocol. */\n-#define ETHER_TYPE_IPv6 0x86DD /**< IPv6 Protocol. */\n-#define ETHER_TYPE_ARP  0x0806 /**< Arp Protocol. */\n-#define ETHER_TYPE_RARP 0x8035 /**< Reverse Arp Protocol. */\n-#define ETHER_TYPE_VLAN 0x8100 /**< IEEE 802.1Q VLAN tagging. */\n-#define ETHER_TYPE_QINQ 0x88A8 /**< IEEE 802.1ad QinQ tagging. */\n+#define RTE_ETHER_TYPE_IPv4 0x0800 /**< IPv4 Protocol. */\n+#define RTE_ETHER_TYPE_IPv6 0x86DD /**< IPv6 Protocol. */\n+#define RTE_ETHER_TYPE_ARP  0x0806 /**< Arp Protocol. */\n+#define RTE_ETHER_TYPE_RARP 0x8035 /**< Reverse Arp Protocol. */\n+#define RTE_ETHER_TYPE_VLAN 0x8100 /**< IEEE 802.1Q VLAN tagging. */\n+#define RTE_ETHER_TYPE_QINQ 0x88A8 /**< IEEE 802.1ad QinQ tagging. */\n #define ETHER_TYPE_PPPOE_DISCOVERY 0x8863 /**< PPPoE Discovery Stage. */\n #define ETHER_TYPE_PPPOE_SESSION 0x8864 /**< PPPoE Session Stage. */\n-#define ETHER_TYPE_ETAG 0x893F /**< IEEE 802.1BR E-Tag. */\n-#define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */\n-#define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */\n-#define ETHER_TYPE_TEB  0x6558 /**< Transparent Ethernet Bridging. */\n-#define ETHER_TYPE_LLDP 0x88CC /**< LLDP Protocol. */\n-#define ETHER_TYPE_MPLS 0x8847 /**< MPLS ethertype. */\n-#define ETHER_TYPE_MPLSM 0x8848 /**< MPLS multicast ethertype. */\n-\n-#define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct rte_vxlan_hdr))\n-/**< VXLAN tunnel header length. */\n+#define RTE_ETHER_TYPE_ETAG 0x893F /**< IEEE 802.1BR E-Tag. */\n+#define RTE_ETHER_TYPE_1588 0x88F7\n+\t/**< IEEE 802.1AS 1588 Precise Time Protocol. */\n+#define RTE_ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */\n+#define RTE_ETHER_TYPE_TEB  0x6558 /**< Transparent Ethernet Bridging. */\n+#define RTE_ETHER_TYPE_LLDP 0x88CC /**< LLDP Protocol. */\n+#define RTE_ETHER_TYPE_MPLS 0x8847 /**< MPLS ethertype. */\n+#define RTE_ETHER_TYPE_MPLSM 0x8848 /**< MPLS multicast ethertype. */\n+\n+#define RTE_ETHER_VXLAN_HLEN \\\n+\t(sizeof(struct udp_hdr) + sizeof(struct rte_vxlan_hdr))\n+\t/**< VXLAN tunnel header length. */\n \n /**\n  * VXLAN-GPE protocol header (draft-ietf-nvo3-vxlan-gpe-05).\n@@ -327,15 +332,15 @@ struct rte_vxlan_gpe_hdr {\n } __attribute__((__packed__));\n \n /* VXLAN-GPE next protocol types */\n-#define VXLAN_GPE_TYPE_IPV4 1 /**< IPv4 Protocol. */\n-#define VXLAN_GPE_TYPE_IPV6 2 /**< IPv6 Protocol. */\n-#define VXLAN_GPE_TYPE_ETH  3 /**< Ethernet Protocol. */\n-#define VXLAN_GPE_TYPE_NSH  4 /**< NSH Protocol. */\n-#define VXLAN_GPE_TYPE_MPLS 5 /**< MPLS Protocol. */\n-#define VXLAN_GPE_TYPE_GBP  6 /**< GBP Protocol. */\n-#define VXLAN_GPE_TYPE_VBNG 7 /**< vBNG Protocol. */\n-\n-#define ETHER_VXLAN_GPE_HLEN (sizeof(struct udp_hdr) + \\\n+#define RTE_VXLAN_GPE_TYPE_IPV4 1 /**< IPv4 Protocol. */\n+#define RTE_VXLAN_GPE_TYPE_IPV6 2 /**< IPv6 Protocol. */\n+#define RTE_VXLAN_GPE_TYPE_ETH  3 /**< Ethernet Protocol. */\n+#define RTE_VXLAN_GPE_TYPE_NSH  4 /**< NSH Protocol. */\n+#define RTE_VXLAN_GPE_TYPE_MPLS 5 /**< MPLS Protocol. */\n+#define RTE_VXLAN_GPE_TYPE_GBP  6 /**< GBP Protocol. */\n+#define RTE_VXLAN_GPE_TYPE_VBNG 7 /**< vBNG Protocol. */\n+\n+#define RTE_ETHER_VXLAN_GPE_HLEN (sizeof(struct udp_hdr) + \\\n \t\t\t      sizeof(struct rte_vxlan_gpe_hdr))\n /**< VXLAN-GPE tunnel header length. */\n \n@@ -356,7 +361,7 @@ static inline int rte_vlan_strip(struct rte_mbuf *m)\n \t\t = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);\n \tstruct rte_vlan_hdr *vh;\n \n-\tif (eh->ether_type != rte_cpu_to_be_16(ETHER_TYPE_VLAN))\n+\tif (eh->ether_type != rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))\n \t\treturn -1;\n \n \tvh = (struct rte_vlan_hdr *)(eh + 1);\n@@ -365,7 +370,7 @@ static inline int rte_vlan_strip(struct rte_mbuf *m)\n \n \t/* Copy ether header over rather than moving whole packet */\n \tmemmove(rte_pktmbuf_adj(m, sizeof(struct rte_vlan_hdr)),\n-\t\teh, 2 * ETHER_ADDR_LEN);\n+\t\teh, 2 * RTE_ETHER_ADDR_LEN);\n \n \treturn 0;\n }\n@@ -404,8 +409,8 @@ static inline int rte_vlan_insert(struct rte_mbuf **m)\n \tif (nh == NULL)\n \t\treturn -ENOSPC;\n \n-\tmemmove(nh, oh, 2 * ETHER_ADDR_LEN);\n-\tnh->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);\n+\tmemmove(nh, oh, 2 * RTE_ETHER_ADDR_LEN);\n+\tnh->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);\n \n \tvh = (struct rte_vlan_hdr *) (nh + 1);\n \tvh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci);\ndiff --git a/lib/librte_net/rte_net.c b/lib/librte_net/rte_net.c\nindex 315c37c55..5551cce17 100644\n--- a/lib/librte_net/rte_net.c\n+++ b/lib/librte_net/rte_net.c\n@@ -155,16 +155,16 @@ ptype_tunnel(uint16_t *proto, const struct rte_mbuf *m,\n \n \t\t*off += opt_len[flags];\n \t\t*proto = gh->proto;\n-\t\tif (*proto == rte_cpu_to_be_16(ETHER_TYPE_TEB))\n+\t\tif (*proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB))\n \t\t\treturn RTE_PTYPE_TUNNEL_NVGRE;\n \t\telse\n \t\t\treturn RTE_PTYPE_TUNNEL_GRE;\n \t}\n \tcase IPPROTO_IPIP:\n-\t\t*proto = rte_cpu_to_be_16(ETHER_TYPE_IPv4);\n+\t\t*proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);\n \t\treturn RTE_PTYPE_TUNNEL_IP;\n \tcase IPPROTO_IPV6:\n-\t\t*proto = rte_cpu_to_be_16(ETHER_TYPE_IPv6);\n+\t\t*proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);\n \t\treturn RTE_PTYPE_TUNNEL_IP; /* IP is also valid for IPv6 */\n \tdefault:\n \t\treturn 0;\n@@ -249,10 +249,10 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,\n \tif ((layers & RTE_PTYPE_L2_MASK) == 0)\n \t\treturn 0;\n \n-\tif (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4))\n+\tif (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))\n \t\tgoto l3; /* fast path if packet is IPv4 */\n \n-\tif (proto == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {\n+\tif (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {\n \t\tconst struct rte_vlan_hdr *vh;\n \t\tstruct rte_vlan_hdr vh_copy;\n \n@@ -263,7 +263,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,\n \t\toff += sizeof(*vh);\n \t\thdr_lens->l2_len += sizeof(*vh);\n \t\tproto = vh->eth_proto;\n-\t} else if (proto == rte_cpu_to_be_16(ETHER_TYPE_QINQ)) {\n+\t} else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {\n \t\tconst struct rte_vlan_hdr *vh;\n \t\tstruct rte_vlan_hdr vh_copy;\n \n@@ -275,8 +275,8 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,\n \t\toff += 2 * sizeof(*vh);\n \t\thdr_lens->l2_len += 2 * sizeof(*vh);\n \t\tproto = vh->eth_proto;\n-\t} else if ((proto == rte_cpu_to_be_16(ETHER_TYPE_MPLS)) ||\n-\t\t(proto == rte_cpu_to_be_16(ETHER_TYPE_MPLSM))) {\n+\t} else if ((proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_MPLS)) ||\n+\t\t(proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_MPLSM))) {\n \t\tunsigned int i;\n \t\tconst struct mpls_hdr *mh;\n \t\tstruct mpls_hdr mh_copy;\n@@ -299,7 +299,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,\n \tif ((layers & RTE_PTYPE_L3_MASK) == 0)\n \t\treturn pkt_type;\n \n-\tif (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {\n+\tif (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {\n \t\tconst struct ipv4_hdr *ip4h;\n \t\tstruct ipv4_hdr ip4h_copy;\n \n@@ -322,7 +322,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,\n \t\t}\n \t\tproto = ip4h->next_proto_id;\n \t\tpkt_type |= ptype_l4(proto);\n-\t} else if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {\n+\t} else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) {\n \t\tconst struct ipv6_hdr *ip6h;\n \t\tstruct ipv6_hdr ip6h_copy;\n \t\tint frag = 0;\n@@ -391,7 +391,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,\n \t\treturn pkt_type;\n \n \thdr_lens->inner_l2_len = 0;\n-\tif (proto == rte_cpu_to_be_16(ETHER_TYPE_TEB)) {\n+\tif (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {\n \t\teh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);\n \t\tif (unlikely(eh == NULL))\n \t\t\treturn pkt_type;\n@@ -401,7 +401,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,\n \t\thdr_lens->inner_l2_len = sizeof(*eh);\n \t}\n \n-\tif (proto == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {\n+\tif (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {\n \t\tconst struct rte_vlan_hdr *vh;\n \t\tstruct rte_vlan_hdr vh_copy;\n \n@@ -413,7 +413,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,\n \t\toff += sizeof(*vh);\n \t\thdr_lens->inner_l2_len += sizeof(*vh);\n \t\tproto = vh->eth_proto;\n-\t} else if (proto == rte_cpu_to_be_16(ETHER_TYPE_QINQ)) {\n+\t} else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {\n \t\tconst struct rte_vlan_hdr *vh;\n \t\tstruct rte_vlan_hdr vh_copy;\n \n@@ -431,7 +431,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,\n \tif ((layers & RTE_PTYPE_INNER_L3_MASK) == 0)\n \t\treturn pkt_type;\n \n-\tif (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {\n+\tif (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {\n \t\tconst struct ipv4_hdr *ip4h;\n \t\tstruct ipv4_hdr ip4h_copy;\n \n@@ -454,7 +454,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,\n \t\t}\n \t\tproto = ip4h->next_proto_id;\n \t\tpkt_type |= ptype_inner_l4(proto);\n-\t} else if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {\n+\t} else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) {\n \t\tconst struct ipv6_hdr *ip6h;\n \t\tstruct ipv6_hdr ip6h_copy;\n \t\tint frag = 0;\ndiff --git a/lib/librte_pipeline/rte_table_action.c b/lib/librte_pipeline/rte_table_action.c\nindex a474148c7..ea966cb94 100644\n--- a/lib/librte_pipeline/rte_table_action.c\n+++ b/lib/librte_pipeline/rte_table_action.c\n@@ -611,8 +611,8 @@ encap_ether_apply(void *data,\n {\n \tstruct encap_ether_data *d = data;\n \tuint16_t ethertype = (common_cfg->ip_version) ?\n-\t\tETHER_TYPE_IPv4 :\n-\t\tETHER_TYPE_IPv6;\n+\t\tRTE_ETHER_TYPE_IPv4 :\n+\t\tRTE_ETHER_TYPE_IPv6;\n \n \t/* Ethernet */\n \trte_ether_addr_copy(&p->ether.ether.da, &d->ether.d_addr);\n@@ -629,13 +629,13 @@ encap_vlan_apply(void *data,\n {\n \tstruct encap_vlan_data *d = data;\n \tuint16_t ethertype = (common_cfg->ip_version) ?\n-\t\tETHER_TYPE_IPv4 :\n-\t\tETHER_TYPE_IPv6;\n+\t\tRTE_ETHER_TYPE_IPv4 :\n+\t\tRTE_ETHER_TYPE_IPv6;\n \n \t/* Ethernet */\n \trte_ether_addr_copy(&p->vlan.ether.da, &d->ether.d_addr);\n \trte_ether_addr_copy(&p->vlan.ether.sa, &d->ether.s_addr);\n-\td->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);\n+\td->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);\n \n \t/* VLAN */\n \td->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,\n@@ -653,19 +653,19 @@ encap_qinq_apply(void *data,\n {\n \tstruct encap_qinq_data *d = data;\n \tuint16_t ethertype = (common_cfg->ip_version) ?\n-\t\tETHER_TYPE_IPv4 :\n-\t\tETHER_TYPE_IPv6;\n+\t\tRTE_ETHER_TYPE_IPv4 :\n+\t\tRTE_ETHER_TYPE_IPv6;\n \n \t/* Ethernet */\n \trte_ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);\n \trte_ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);\n-\td->ether.ether_type = rte_htons(ETHER_TYPE_QINQ);\n+\td->ether.ether_type = rte_htons(RTE_ETHER_TYPE_QINQ);\n \n \t/* SVLAN */\n \td->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,\n \t\tp->qinq.svlan.dei,\n \t\tp->qinq.svlan.vid));\n-\td->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN);\n+\td->svlan.eth_proto = rte_htons(RTE_ETHER_TYPE_VLAN);\n \n \t/* CVLAN */\n \td->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,\n@@ -685,13 +685,13 @@ encap_qinq_pppoe_apply(void *data,\n \t/* Ethernet */\n \trte_ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);\n \trte_ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);\n-\td->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);\n+\td->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);\n \n \t/* SVLAN */\n \td->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,\n \t\tp->qinq.svlan.dei,\n \t\tp->qinq.svlan.vid));\n-\td->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN);\n+\td->svlan.eth_proto = rte_htons(RTE_ETHER_TYPE_VLAN);\n \n \t/* CVLAN */\n \td->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,\n@@ -780,13 +780,13 @@ encap_vxlan_apply(void *data,\n \t\t\t\t\t&d->ether.d_addr);\n \t\t\trte_ether_addr_copy(&p->vxlan.ether.sa,\n \t\t\t\t\t&d->ether.s_addr);\n-\t\t\td->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);\n+\t\t\td->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);\n \n \t\t\t/* VLAN */\n \t\t\td->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,\n \t\t\t\tp->vxlan.vlan.dei,\n \t\t\t\tp->vxlan.vlan.vid));\n-\t\t\td->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv4);\n+\t\t\td->vlan.eth_proto = rte_htons(RTE_ETHER_TYPE_IPv4);\n \n \t\t\t/* IPv4*/\n \t\t\td->ipv4.version_ihl = 0x45;\n@@ -821,7 +821,7 @@ encap_vxlan_apply(void *data,\n \t\t\t\t\t&d->ether.d_addr);\n \t\t\trte_ether_addr_copy(&p->vxlan.ether.sa,\n \t\t\t\t\t&d->ether.s_addr);\n-\t\t\td->ether.ether_type = rte_htons(ETHER_TYPE_IPv4);\n+\t\t\td->ether.ether_type = rte_htons(RTE_ETHER_TYPE_IPv4);\n \n \t\t\t/* IPv4*/\n \t\t\td->ipv4.version_ihl = 0x45;\n@@ -858,13 +858,13 @@ encap_vxlan_apply(void *data,\n \t\t\t\t\t&d->ether.d_addr);\n \t\t\trte_ether_addr_copy(&p->vxlan.ether.sa,\n \t\t\t\t\t&d->ether.s_addr);\n-\t\t\td->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);\n+\t\t\td->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);\n \n \t\t\t/* VLAN */\n \t\t\td->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,\n \t\t\t\tp->vxlan.vlan.dei,\n \t\t\t\tp->vxlan.vlan.vid));\n-\t\t\td->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv6);\n+\t\t\td->vlan.eth_proto = rte_htons(RTE_ETHER_TYPE_IPv6);\n \n \t\t\t/* IPv6*/\n \t\t\td->ipv6.vtc_flow = rte_htonl((6 << 28) |\n@@ -899,7 +899,7 @@ encap_vxlan_apply(void *data,\n \t\t\t\t\t&d->ether.d_addr);\n \t\t\trte_ether_addr_copy(&p->vxlan.ether.sa,\n \t\t\t\t\t&d->ether.s_addr);\n-\t\t\td->ether.ether_type = rte_htons(ETHER_TYPE_IPv6);\n+\t\t\td->ether.ether_type = rte_htons(RTE_ETHER_TYPE_IPv6);\n \n \t\t\t/* IPv6*/\n \t\t\td->ipv6.vtc_flow = rte_htonl((6 << 28) |\ndiff --git a/lib/librte_port/rte_port_source_sink.c b/lib/librte_port/rte_port_source_sink.c\nindex 54045f952..74b7385a2 100644\n--- a/lib/librte_port/rte_port_source_sink.c\n+++ b/lib/librte_port/rte_port_source_sink.c\n@@ -368,7 +368,7 @@ pcap_sink_write_pkt(struct rte_port_sink *port, struct rte_mbuf *mbuf)\n {\n \tuint8_t *pcap_dumper = (port->dumper);\n \tstruct pcap_pkthdr pcap_hdr;\n-\tuint8_t jumbo_pkt_buf[ETHER_MAX_JUMBO_FRAME_LEN];\n+\tuint8_t jumbo_pkt_buf[RTE_ETHER_MAX_JUMBO_FRAME_LEN];\n \tuint8_t *pkt;\n \n \t/* Maximum num packets already reached */\n@@ -385,10 +385,10 @@ pcap_sink_write_pkt(struct rte_port_sink *port, struct rte_mbuf *mbuf)\n \t\tstruct rte_mbuf *jumbo_mbuf;\n \t\tuint32_t pkt_index = 0;\n \n-\t\t/* if packet size longer than ETHER_MAX_JUMBO_FRAME_LEN,\n+\t\t/* if packet size longer than RTE_ETHER_MAX_JUMBO_FRAME_LEN,\n \t\t * ignore it.\n \t\t */\n-\t\tif (mbuf->pkt_len > ETHER_MAX_JUMBO_FRAME_LEN)\n+\t\tif (mbuf->pkt_len > RTE_ETHER_MAX_JUMBO_FRAME_LEN)\n \t\t\treturn;\n \n \t\tfor (jumbo_mbuf = mbuf; jumbo_mbuf != NULL;\ndiff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c\nindex f97ec76f9..2394178f0 100644\n--- a/lib/librte_vhost/virtio_net.c\n+++ b/lib/librte_vhost/virtio_net.c\n@@ -977,7 +977,7 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)\n \tm->l2_len = sizeof(struct rte_ether_hdr);\n \tethertype = rte_be_to_cpu_16(eth_hdr->ether_type);\n \n-\tif (ethertype == ETHER_TYPE_VLAN) {\n+\tif (ethertype == RTE_ETHER_TYPE_VLAN) {\n \t\tstruct rte_vlan_hdr *vlan_hdr =\n \t\t\t(struct rte_vlan_hdr *)(eth_hdr + 1);\n \n@@ -988,14 +988,14 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)\n \tl3_hdr = (char *)eth_hdr + m->l2_len;\n \n \tswitch (ethertype) {\n-\tcase ETHER_TYPE_IPv4:\n+\tcase RTE_ETHER_TYPE_IPv4:\n \t\tipv4_hdr = l3_hdr;\n \t\t*l4_proto = ipv4_hdr->next_proto_id;\n \t\tm->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;\n \t\t*l4_hdr = (char *)l3_hdr + m->l3_len;\n \t\tm->ol_flags |= PKT_TX_IPV4;\n \t\tbreak;\n-\tcase ETHER_TYPE_IPv6:\n+\tcase RTE_ETHER_TYPE_IPv6:\n \t\tipv6_hdr = l3_hdr;\n \t\t*l4_proto = ipv6_hdr->proto;\n \t\tm->l3_len = sizeof(struct ipv6_hdr);\n",
    "prefixes": [
        "05/15"
    ]
}