[RFC] remove unused functions
diff mbox series

Message ID 20201119035238.3653702-1-ferruh.yigit@intel.com
State Deferred
Delegated to: Thomas Monjalon
Headers show
Series
  • [RFC] remove unused functions
Related show

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/checkpatch warning coding style issues

Commit Message

Ferruh Yigit Nov. 19, 2020, 3:52 a.m. UTC
Removing unused functions, reported by cppcheck.

Easy way to remove clutter, since the code is already in the git repo,
they can be added back when needed.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 app/test-eventdev/parser.c                    |   88 -
 app/test-eventdev/parser.h                    |    6 -
 app/test/test_table_pipeline.c                |   36 -
 drivers/bus/dpaa/base/fman/fman_hw.c          |  182 -
 drivers/bus/dpaa/base/fman/netcfg_layer.c     |   11 -
 drivers/bus/dpaa/base/qbman/bman.c            |   34 -
 drivers/bus/dpaa/base/qbman/bman_driver.c     |   16 -
 drivers/bus/dpaa/base/qbman/process.c         |   94 -
 drivers/bus/dpaa/base/qbman/qman.c            |  778 ----
 drivers/bus/dpaa/base/qbman/qman_priv.h       |    9 -
 drivers/bus/dpaa/dpaa_bus.c                   |   20 -
 drivers/bus/dpaa/include/fsl_bman.h           |   15 -
 drivers/bus/dpaa/include/fsl_fman.h           |   28 -
 drivers/bus/dpaa/include/fsl_qman.h           |  307 --
 drivers/bus/dpaa/include/fsl_usd.h            |   11 -
 drivers/bus/dpaa/include/netcfg.h             |    6 -
 drivers/bus/dpaa/rte_dpaa_bus.h               |   13 -
 drivers/bus/dpaa/version.map                  |   10 -
 drivers/bus/fslmc/fslmc_bus.c                 |   19 -
 drivers/bus/fslmc/mc/dpbp.c                   |  141 -
 drivers/bus/fslmc/mc/dpci.c                   |  320 --
 drivers/bus/fslmc/mc/dpcon.c                  |  241 --
 drivers/bus/fslmc/mc/dpdmai.c                 |  144 -
 drivers/bus/fslmc/mc/dpio.c                   |  191 -
 drivers/bus/fslmc/mc/fsl_dpbp.h               |   20 -
 drivers/bus/fslmc/mc/fsl_dpci.h               |   49 -
 drivers/bus/fslmc/mc/fsl_dpcon.h              |   37 -
 drivers/bus/fslmc/mc/fsl_dpdmai.h             |   20 -
 drivers/bus/fslmc/mc/fsl_dpio.h               |   26 -
 drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c      |    7 -
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h       |    3 -
 .../bus/fslmc/qbman/include/fsl_qbman_debug.h |    2 -
 .../fslmc/qbman/include/fsl_qbman_portal.h    |  463 ---
 drivers/bus/fslmc/qbman/qbman_debug.c         |    5 -
 drivers/bus/fslmc/qbman/qbman_portal.c        |  437 ---
 drivers/bus/fslmc/rte_fslmc.h                 |   10 -
 drivers/bus/fslmc/version.map                 |    6 -
 drivers/bus/ifpga/ifpga_common.c              |   23 -
 drivers/bus/ifpga/ifpga_common.h              |    3 -
 drivers/common/dpaax/dpaa_of.c                |   27 -
 drivers/common/dpaax/dpaa_of.h                |    5 -
 drivers/common/dpaax/dpaax_iova_table.c       |   39 -
 drivers/common/dpaax/dpaax_iova_table.h       |    2 -
 drivers/common/dpaax/version.map              |    1 -
 drivers/common/iavf/iavf_common.c             |  425 ---
 drivers/common/iavf/iavf_prototype.h          |   17 -
 drivers/common/octeontx2/otx2_mbox.c          |   13 -
 drivers/common/octeontx2/otx2_mbox.h          |    1 -
 drivers/crypto/bcmfs/bcmfs_sym_pmd.c          |   19 -
 drivers/crypto/bcmfs/bcmfs_sym_pmd.h          |    3 -
 drivers/crypto/bcmfs/bcmfs_vfio.c             |   24 -
 drivers/crypto/bcmfs/bcmfs_vfio.h             |    4 -
 drivers/crypto/caam_jr/caam_jr_pvt.h          |    1 -
 drivers/crypto/caam_jr/caam_jr_uio.c          |   28 -
 drivers/crypto/ccp/ccp_dev.c                  |   65 -
 drivers/crypto/ccp/ccp_dev.h                  |    8 -
 drivers/crypto/dpaa2_sec/mc/dpseci.c          |  401 --
 drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h      |   52 -
 drivers/crypto/virtio/virtio_pci.c            |   13 -
 drivers/crypto/virtio/virtio_pci.h            |    5 -
 drivers/event/dlb/dlb_priv.h                  |    2 -
 drivers/event/dlb/dlb_xstats.c                |    7 -
 drivers/event/dlb2/dlb2_priv.h                |    2 -
 drivers/event/dlb2/dlb2_xstats.c              |    7 -
 drivers/event/opdl/opdl_ring.c                |  210 --
 drivers/event/opdl/opdl_ring.h                |  236 --
 drivers/net/ark/ark_ddm.c                     |   13 -
 drivers/net/ark/ark_ddm.h                     |    1 -
 drivers/net/ark/ark_pktchkr.c                 |   52 -
 drivers/net/ark/ark_pktchkr.h                 |    3 -
 drivers/net/ark/ark_pktdir.c                  |   22 -
 drivers/net/ark/ark_pktdir.h                  |    3 -
 drivers/net/ark/ark_pktgen.c                  |   27 -
 drivers/net/ark/ark_pktgen.h                  |    2 -
 drivers/net/ark/ark_udm.c                     |   15 -
 drivers/net/ark/ark_udm.h                     |    2 -
 drivers/net/atlantic/hw_atl/hw_atl_b0.c       |   14 -
 drivers/net/atlantic/hw_atl/hw_atl_b0.h       |    2 -
 drivers/net/atlantic/hw_atl/hw_atl_llh.c      |  318 --
 drivers/net/atlantic/hw_atl/hw_atl_llh.h      |  153 -
 drivers/net/atlantic/hw_atl/hw_atl_utils.c    |   36 -
 drivers/net/atlantic/hw_atl/hw_atl_utils.h    |    4 -
 drivers/net/bnx2x/ecore_sp.c                  |   17 -
 drivers/net/bnx2x/ecore_sp.h                  |    2 -
 drivers/net/bnx2x/elink.c                     | 1367 -------
 drivers/net/bnx2x/elink.h                     |   57 -
 drivers/net/bnxt/tf_core/bitalloc.c           |  156 -
 drivers/net/bnxt/tf_core/bitalloc.h           |   26 -
 drivers/net/bnxt/tf_core/stack.c              |   25 -
 drivers/net/bnxt/tf_core/stack.h              |   12 -
 drivers/net/bnxt/tf_core/tf_core.c            |  241 --
 drivers/net/bnxt/tf_core/tf_core.h            |   81 -
 drivers/net/bnxt/tf_core/tf_msg.c             |   40 -
 drivers/net/bnxt/tf_core/tf_msg.h             |   31 -
 drivers/net/bnxt/tf_core/tf_session.c         |   33 -
 drivers/net/bnxt/tf_core/tf_session.h         |   16 -
 drivers/net/bnxt/tf_core/tf_shadow_tbl.c      |   53 -
 drivers/net/bnxt/tf_core/tf_shadow_tbl.h      |   14 -
 drivers/net/bnxt/tf_core/tf_tcam.c            |    7 -
 drivers/net/bnxt/tf_core/tf_tcam.h            |   17 -
 drivers/net/bnxt/tf_core/tfp.c                |   27 -
 drivers/net/bnxt/tf_core/tfp.h                |    4 -
 drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c          |   78 -
 drivers/net/bnxt/tf_ulp/ulp_port_db.c         |   31 -
 drivers/net/bnxt/tf_ulp/ulp_port_db.h         |   14 -
 drivers/net/bnxt/tf_ulp/ulp_utils.c           |   11 -
 drivers/net/bnxt/tf_ulp/ulp_utils.h           |    3 -
 drivers/net/bonding/eth_bond_private.h        |    4 -
 drivers/net/bonding/rte_eth_bond.h            |   38 -
 drivers/net/bonding/rte_eth_bond_api.c        |   39 -
 drivers/net/bonding/rte_eth_bond_pmd.c        |   22 -
 drivers/net/cxgbe/base/common.h               |    5 -
 drivers/net/cxgbe/base/t4_hw.c                |   41 -
 drivers/net/dpaa/fmlib/fm_vsp.c               |   19 -
 drivers/net/dpaa/fmlib/fm_vsp_ext.h           |    3 -
 drivers/net/dpaa2/mc/dpdmux.c                 |  725 ----
 drivers/net/dpaa2/mc/dpni.c                   |  818 +----
 drivers/net/dpaa2/mc/dprtc.c                  |  365 --
 drivers/net/dpaa2/mc/fsl_dpdmux.h             |  108 -
 drivers/net/dpaa2/mc/fsl_dpni.h               |  134 -
 drivers/net/dpaa2/mc/fsl_dprtc.h              |   57 -
 drivers/net/e1000/base/e1000_82542.c          |   97 -
 drivers/net/e1000/base/e1000_82543.c          |   78 -
 drivers/net/e1000/base/e1000_82543.h          |    4 -
 drivers/net/e1000/base/e1000_82571.c          |   35 -
 drivers/net/e1000/base/e1000_82571.h          |    1 -
 drivers/net/e1000/base/e1000_82575.c          |  298 --
 drivers/net/e1000/base/e1000_82575.h          |    8 -
 drivers/net/e1000/base/e1000_api.c            |  530 ---
 drivers/net/e1000/base/e1000_api.h            |   40 -
 drivers/net/e1000/base/e1000_base.c           |   78 -
 drivers/net/e1000/base/e1000_base.h           |    1 -
 drivers/net/e1000/base/e1000_ich8lan.c        |  266 --
 drivers/net/e1000/base/e1000_ich8lan.h        |    3 -
 drivers/net/e1000/base/e1000_mac.c            |   14 -
 drivers/net/e1000/base/e1000_mac.h            |    1 -
 drivers/net/e1000/base/e1000_manage.c         |  192 -
 drivers/net/e1000/base/e1000_manage.h         |    2 -
 drivers/net/e1000/base/e1000_nvm.c            |  129 -
 drivers/net/e1000/base/e1000_nvm.h            |    5 -
 drivers/net/e1000/base/e1000_phy.c            |  201 -
 drivers/net/e1000/base/e1000_phy.h            |    4 -
 drivers/net/e1000/base/e1000_vf.c             |   19 -
 drivers/net/e1000/base/e1000_vf.h             |    1 -
 drivers/net/ena/base/ena_com.c                |  222 --
 drivers/net/ena/base/ena_com.h                |  144 -
 drivers/net/ena/base/ena_eth_com.c            |   11 -
 drivers/net/ena/base/ena_eth_com.h            |    2 -
 drivers/net/fm10k/base/fm10k_api.c            |  104 -
 drivers/net/fm10k/base/fm10k_api.h            |   11 -
 drivers/net/fm10k/base/fm10k_tlv.c            |  183 -
 drivers/net/fm10k/base/fm10k_tlv.h            |    1 -
 drivers/net/i40e/base/i40e_common.c           | 2989 ++-------------
 drivers/net/i40e/base/i40e_dcb.c              |   43 -
 drivers/net/i40e/base/i40e_dcb.h              |    3 -
 drivers/net/i40e/base/i40e_diag.c             |  146 -
 drivers/net/i40e/base/i40e_diag.h             |   30 -
 drivers/net/i40e/base/i40e_lan_hmc.c          |  264 --
 drivers/net/i40e/base/i40e_lan_hmc.h          |    6 -
 drivers/net/i40e/base/i40e_nvm.c              |  988 -----
 drivers/net/i40e/base/i40e_prototype.h        |  202 -
 drivers/net/i40e/base/meson.build             |    1 -
 drivers/net/iavf/iavf.h                       |    2 -
 drivers/net/iavf/iavf_vchnl.c                 |   72 -
 drivers/net/ice/base/ice_acl.c                |  108 -
 drivers/net/ice/base/ice_acl.h                |   13 -
 drivers/net/ice/base/ice_common.c             | 2084 ++---------
 drivers/net/ice/base/ice_common.h             |   70 -
 drivers/net/ice/base/ice_dcb.c                |  161 -
 drivers/net/ice/base/ice_dcb.h                |   11 -
 drivers/net/ice/base/ice_fdir.c               |  262 --
 drivers/net/ice/base/ice_fdir.h               |   16 -
 drivers/net/ice/base/ice_flex_pipe.c          |  103 -
 drivers/net/ice/base/ice_flex_pipe.h          |    4 -
 drivers/net/ice/base/ice_flow.c               |  207 --
 drivers/net/ice/base/ice_flow.h               |   15 -
 drivers/net/ice/base/ice_nvm.c                |  200 -
 drivers/net/ice/base/ice_nvm.h                |    8 -
 drivers/net/ice/base/ice_sched.c              | 1440 +-------
 drivers/net/ice/base/ice_sched.h              |   78 -
 drivers/net/ice/base/ice_switch.c             | 1646 +--------
 drivers/net/ice/base/ice_switch.h             |   62 -
 drivers/net/igc/base/igc_api.c                |  598 ---
 drivers/net/igc/base/igc_api.h                |   41 -
 drivers/net/igc/base/igc_base.c               |   78 -
 drivers/net/igc/base/igc_base.h               |    1 -
 drivers/net/igc/base/igc_hw.h                 |    3 -
 drivers/net/igc/base/igc_i225.c               |  159 -
 drivers/net/igc/base/igc_i225.h               |    4 -
 drivers/net/igc/base/igc_mac.c                |  853 -----
 drivers/net/igc/base/igc_mac.h                |   22 -
 drivers/net/igc/base/igc_manage.c             |  262 --
 drivers/net/igc/base/igc_manage.h             |    4 -
 drivers/net/igc/base/igc_nvm.c                |  679 ----
 drivers/net/igc/base/igc_nvm.h                |   16 -
 drivers/net/igc/base/igc_osdep.c              |   25 -
 drivers/net/igc/base/igc_phy.c                | 3256 +----------------
 drivers/net/igc/base/igc_phy.h                |   49 -
 drivers/net/ionic/ionic.h                     |    2 -
 drivers/net/ionic/ionic_dev.c                 |   39 -
 drivers/net/ionic/ionic_dev.h                 |    4 -
 drivers/net/ionic/ionic_lif.c                 |   11 -
 drivers/net/ionic/ionic_lif.h                 |    1 -
 drivers/net/ionic/ionic_main.c                |   33 -
 drivers/net/ionic/ionic_rx_filter.c           |   14 -
 drivers/net/ionic/ionic_rx_filter.h           |    1 -
 drivers/net/mlx5/mlx5.h                       |    1 -
 drivers/net/mlx5/mlx5_utils.c                 |   21 -
 drivers/net/mlx5/mlx5_utils.h                 |   25 -
 drivers/net/mvneta/mvneta_ethdev.c            |   18 -
 drivers/net/netvsc/hn_rndis.c                 |   31 -
 drivers/net/netvsc/hn_rndis.h                 |    1 -
 drivers/net/netvsc/hn_var.h                   |    3 -
 drivers/net/netvsc/hn_vf.c                    |   25 -
 drivers/net/nfp/nfpcore/nfp_cpp.h             |  213 --
 drivers/net/nfp/nfpcore/nfp_cppcore.c         |  218 --
 drivers/net/nfp/nfpcore/nfp_mip.c             |    6 -
 drivers/net/nfp/nfpcore/nfp_mip.h             |    1 -
 drivers/net/nfp/nfpcore/nfp_mutex.c           |   93 -
 drivers/net/nfp/nfpcore/nfp_nsp.c             |   41 -
 drivers/net/nfp/nfpcore/nfp_nsp.h             |   16 -
 drivers/net/nfp/nfpcore/nfp_nsp_cmds.c        |   79 -
 drivers/net/nfp/nfpcore/nfp_nsp_eth.c         |  206 --
 drivers/net/nfp/nfpcore/nfp_resource.c        |   12 -
 drivers/net/nfp/nfpcore/nfp_resource.h        |    7 -
 drivers/net/nfp/nfpcore/nfp_rtsym.c           |   34 -
 drivers/net/nfp/nfpcore/nfp_rtsym.h           |    4 -
 drivers/net/octeontx/base/octeontx_bgx.c      |   54 -
 drivers/net/octeontx/base/octeontx_bgx.h      |    2 -
 drivers/net/octeontx/base/octeontx_pkivf.c    |   22 -
 drivers/net/octeontx/base/octeontx_pkivf.h    |    1 -
 drivers/net/octeontx2/otx2_ethdev.c           |   26 -
 drivers/net/octeontx2/otx2_ethdev.h           |    3 -
 drivers/net/octeontx2/otx2_ethdev_debug.c     |   55 -
 drivers/net/octeontx2/otx2_flow.h             |    2 -
 drivers/net/octeontx2/otx2_flow_utils.c       |   18 -
 drivers/net/pfe/base/pfe.h                    |   12 -
 drivers/net/pfe/pfe_hal.c                     |  144 -
 drivers/net/pfe/pfe_hif_lib.c                 |   20 -
 drivers/net/pfe/pfe_hif_lib.h                 |    1 -
 drivers/net/qede/base/ecore.h                 |    3 -
 drivers/net/qede/base/ecore_cxt.c             |  229 --
 drivers/net/qede/base/ecore_cxt.h             |   27 -
 drivers/net/qede/base/ecore_dcbx.c            |  266 --
 drivers/net/qede/base/ecore_dcbx_api.h        |   27 -
 drivers/net/qede/base/ecore_dev.c             |  306 --
 drivers/net/qede/base/ecore_dev_api.h         |  127 -
 drivers/net/qede/base/ecore_hw.c              |   16 -
 drivers/net/qede/base/ecore_hw.h              |   10 -
 drivers/net/qede/base/ecore_init_fw_funcs.c   |  616 ----
 drivers/net/qede/base/ecore_init_fw_funcs.h   |  227 --
 drivers/net/qede/base/ecore_int.c             |  193 -
 drivers/net/qede/base/ecore_int.h             |   13 -
 drivers/net/qede/base/ecore_int_api.h         |   60 -
 drivers/net/qede/base/ecore_iov_api.h         |  469 ---
 drivers/net/qede/base/ecore_l2.c              |  103 -
 drivers/net/qede/base/ecore_l2_api.h          |   24 -
 drivers/net/qede/base/ecore_mcp.c             | 1121 +-----
 drivers/net/qede/base/ecore_mcp.h             |   37 -
 drivers/net/qede/base/ecore_mcp_api.h         |  449 ---
 drivers/net/qede/base/ecore_sp_commands.c     |   89 -
 drivers/net/qede/base/ecore_sp_commands.h     |   21 -
 drivers/net/qede/base/ecore_sriov.c           |  767 ----
 drivers/net/qede/base/ecore_vf.c              |   48 -
 drivers/net/qede/base/ecore_vf_api.h          |   40 -
 drivers/net/qede/qede_debug.c                 |  532 ---
 drivers/net/qede/qede_debug.h                 |   97 -
 drivers/net/sfc/sfc_kvargs.c                  |   37 -
 drivers/net/sfc/sfc_kvargs.h                  |    2 -
 drivers/net/softnic/parser.c                  |  218 --
 drivers/net/softnic/parser.h                  |   10 -
 .../net/softnic/rte_eth_softnic_cryptodev.c   |   15 -
 .../net/softnic/rte_eth_softnic_internals.h   |   28 -
 drivers/net/softnic/rte_eth_softnic_thread.c  |  183 -
 drivers/net/txgbe/base/txgbe_eeprom.c         |   72 -
 drivers/net/txgbe/base/txgbe_eeprom.h         |    2 -
 drivers/raw/ifpga/base/opae_eth_group.c       |   25 -
 drivers/raw/ifpga/base/opae_eth_group.h       |    1 -
 drivers/raw/ifpga/base/opae_hw_api.c          |  212 --
 drivers/raw/ifpga/base/opae_hw_api.h          |   36 -
 drivers/raw/ifpga/base/opae_i2c.c             |   12 -
 drivers/raw/ifpga/base/opae_i2c.h             |    4 -
 drivers/raw/ifpga/base/opae_ifpga_hw_api.c    |   99 -
 drivers/raw/ifpga/base/opae_ifpga_hw_api.h    |   15 -
 drivers/regex/mlx5/mlx5_regex.h               |    2 -
 drivers/regex/mlx5/mlx5_regex_fastpath.c      |   25 -
 drivers/regex/mlx5/mlx5_rxp.c                 |   45 -
 .../regex/octeontx2/otx2_regexdev_hw_access.c |   58 -
 .../regex/octeontx2/otx2_regexdev_hw_access.h |    2 -
 drivers/regex/octeontx2/otx2_regexdev_mbox.c  |   28 -
 drivers/regex/octeontx2/otx2_regexdev_mbox.h  |    3 -
 examples/ip_pipeline/cryptodev.c              |    8 -
 examples/ip_pipeline/cryptodev.h              |    3 -
 examples/ip_pipeline/link.c                   |   21 -
 examples/ip_pipeline/link.h                   |    3 -
 examples/ip_pipeline/parser.c                 |  202 -
 examples/ip_pipeline/parser.h                 |    7 -
 examples/pipeline/obj.c                       |   21 -
 examples/pipeline/obj.h                       |    3 -
 lib/librte_eal/linux/eal_memory.c             |    8 -
 lib/librte_vhost/fd_man.c                     |   15 -
 lib/librte_vhost/fd_man.h                     |    2 -
 302 files changed, 833 insertions(+), 38856 deletions(-)
 delete mode 100644 drivers/net/i40e/base/i40e_diag.c
 delete mode 100644 drivers/net/i40e/base/i40e_diag.h

Comments

Xu, Rosen Nov. 19, 2020, 7:22 a.m. UTC | #1
Hi,

> -----Original Message-----
> From: Yigit, Ferruh <ferruh.yigit@intel.com>
> Sent: Thursday, November 19, 2020 11:53
> To: Jerin Jacob <jerinj@marvell.com>; Dumitrescu, Cristian
> <cristian.dumitrescu@intel.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>; Sachin Saxena <sachin.saxena@oss.nxp.com>;
> Ray Kinsella <mdr@ashroe.eu>; Neil Horman <nhorman@tuxdriver.com>; Xu,
> Rosen <rosen.xu@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing,
> Beilei <beilei.xing@intel.com>; Nithin Dabilpuram
> <ndabilpuram@marvell.com>; Ajit Khaparde
> <ajit.khaparde@broadcom.com>; Raveendra Padasalagi
> <raveendra.padasalagi@broadcom.com>; Vikas Gupta
> <vikas.gupta@broadcom.com>; Gagandeep Singh <g.singh@nxp.com>;
> Somalapuram Amaranath <asomalap@amd.com>; Akhil Goyal
> <akhil.goyal@nxp.com>; Jay Zhou <jianjay.zhou@huawei.com>; McDaniel,
> Timothy <timothy.mcdaniel@intel.com>; Ma, Liang J <liang.j.ma@intel.com>;
> Mccarthy, Peter <peter.mccarthy@intel.com>; Shepard Siegel
> <shepard.siegel@atomicrules.com>; Ed Czeck <ed.czeck@atomicrules.com>;
> John Miller <john.miller@atomicrules.com>; Igor Russkikh
> <igor.russkikh@aquantia.com>; Pavel Belous <pavel.belous@aquantia.com>;
> Rasesh Mody <rmody@marvell.com>; Shahed Shaikh
> <shshaikh@marvell.com>; Somnath Kotur
> <somnath.kotur@broadcom.com>; Chas Williams <chas3@att.com>; Min Hu
> (Connor) <humin29@huawei.com>; Rahul Lakkireddy
> <rahul.lakkireddy@chelsio.com>; Guo, Jia <jia.guo@intel.com>; Wang,
> Haiyue <haiyue.wang@intel.com>; Marcin Wojtas <mw@semihalf.com>;
> Michal Krawczyk <mk@semihalf.com>; Guy Tzalik <gtzalik@amazon.com>;
> Evgeny Schemeilin <evgenys@amazon.com>; Igor Chauskin
> <igorch@amazon.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Wang, Xiao W
> <xiao.w.wang@intel.com>; Yang, Qiming <qiming.yang@intel.com>; Alfredo
> Cardigliano <cardigliano@ntop.org>; Matan Azrad <matan@nvidia.com>;
> Shahaf Shuler <shahafs@nvidia.com>; Viacheslav Ovsiienko
> <viacheslavo@nvidia.com>; Zyta Szpak <zr@semihalf.com>; Liron Himi
> <lironh@marvell.com>; Stephen Hemminger <sthemmin@microsoft.com>; K.
> Y. Srinivasan <kys@microsoft.com>; Haiyang Zhang
> <haiyangz@microsoft.com>; Long Li <longli@microsoft.com>; Heinrich Kuhn
> <heinrich.kuhn@netronome.com>; Harman Kalra <hkalra@marvell.com>;
> Kiran Kumar K <kirankumark@marvell.com>; Andrew Rybchenko
> <andrew.rybchenko@oktetlabs.ru>; Singh, Jasvinder
> <jasvinder.singh@intel.com>; Jiawen Wu <jiawenwu@trustnetic.com>; Jian
> Wang <jianwang@trustnetic.com>; Zhang, Tianfei
> <tianfei.zhang@intel.com>; Ori Kam <orika@nvidia.com>; Guy Kaneti
> <guyk@marvell.com>; Burakov, Anatoly <anatoly.burakov@intel.com>;
> Maxime Coquelin <maxime.coquelin@redhat.com>; Xia, Chenbo
> <chenbo.xia@intel.com>
> Cc: Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
> Subject: [RFC] remove unused functions
> 
> Removing unused functions, reported by cppcheck.
> 
> Easy way to remove clutter, since the code is already in the git repo,
> they can be added back when needed.
> 
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> ---
>  drivers/bus/ifpga/ifpga_common.c              |   23 -
>  drivers/bus/ifpga/ifpga_common.h              |    3 -
> 

> diff --git a/drivers/bus/ifpga/ifpga_common.c
> b/drivers/bus/ifpga/ifpga_common.c
> index 78e2eaee4e..7281b169d0 100644
> --- a/drivers/bus/ifpga/ifpga_common.c
> +++ b/drivers/bus/ifpga/ifpga_common.c
> @@ -52,29 +52,6 @@ int rte_ifpga_get_integer32_arg(const char *key
> __rte_unused,
> 
>  	return 0;
>  }
> -int ifpga_get_integer64_arg(const char *key __rte_unused,
> -	const char *value, void *extra_args)
> -{
> -	if (!value || !extra_args)
> -		return -EINVAL;
> -
> -	*(uint64_t *)extra_args = strtoull(value, NULL, 0);
> -
> -	return 0;
> -}
> -int ifpga_get_unsigned_long(const char *str, int base)
> -{
> -	unsigned long num;
> -	char *end = NULL;
> -
> -	errno = 0;
> -
> -	num = strtoul(str, &end, base);
> -	if ((str[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
> -		return -1;
> -
> -	return num;
> -}
> 
>  int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0,
>  	const struct rte_afu_id *afu_id1)
> diff --git a/drivers/bus/ifpga/ifpga_common.h
> b/drivers/bus/ifpga/ifpga_common.h
> index f9254b9d5d..44381eb78d 100644
> --- a/drivers/bus/ifpga/ifpga_common.h
> +++ b/drivers/bus/ifpga/ifpga_common.h
> @@ -9,9 +9,6 @@ int rte_ifpga_get_string_arg(const char *key
> __rte_unused,
>  	const char *value, void *extra_args);
>  int rte_ifpga_get_integer32_arg(const char *key __rte_unused,
>  	const char *value, void *extra_args);
> -int ifpga_get_integer64_arg(const char *key __rte_unused,
> -	const char *value, void *extra_args);
> -int ifpga_get_unsigned_long(const char *str, int base);
>  int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0,
>  	const struct rte_afu_id *afu_id1);
> 
> 2.26.2

Reviewed-by: Rosen Xu <rosen.xu@intel.com>
Stephen Hemminger Nov. 19, 2020, 7:01 p.m. UTC | #2
On Thu, 19 Nov 2020 03:52:36 +0000
Ferruh Yigit <ferruh.yigit@intel.com> wrote:

>  drivers/net/netvsc/hn_rndis.c                 |   31 -
>  drivers/net/netvsc/hn_rndis.h                 |    1 -
>  drivers/net/netvsc/hn_var.h                   |    3 -
>  drivers/net/netvsc/hn_vf.c                    |   25 -


Thanks this found some real bugs.
  1. The get_supported_ptypes is wrong, the driver should just not support it.
  2. VF stats need to get reset
  3. VX tx_queue release call needs to be added.

Sent tentative patches off for testing.
Zhang, Tianfei Nov. 23, 2020, 2:55 a.m. UTC | #3
>  drivers/raw/ifpga/base/opae_eth_group.c       |   25 -
>  drivers/raw/ifpga/base/opae_eth_group.h       |    1 -
>  drivers/raw/ifpga/base/opae_hw_api.c          |  212 --
>  drivers/raw/ifpga/base/opae_hw_api.h          |   36 -
>  drivers/raw/ifpga/base/opae_i2c.c             |   12 -
>  drivers/raw/ifpga/base/opae_i2c.h             |    4 -
>  drivers/raw/ifpga/base/opae_ifpga_hw_api.c    |   99 -
>  drivers/raw/ifpga/base/opae_ifpga_hw_api.h    |   15 -

The ifpga base code is want to provide fully functionality low level hardware support for Intel FPGA card like N3000, even though some APIs are not used by DPDK framework now,but it will useful for end-users or other customers developing their own productions , my opinion is keeping those APIs of ifpga base code in DPDK.
Ferruh Yigit Nov. 23, 2020, 9:56 a.m. UTC | #4
On 11/23/2020 2:55 AM, Zhang, Tianfei wrote:
>>   drivers/raw/ifpga/base/opae_eth_group.c       |   25 -
>>   drivers/raw/ifpga/base/opae_eth_group.h       |    1 -
>>   drivers/raw/ifpga/base/opae_hw_api.c          |  212 --
>>   drivers/raw/ifpga/base/opae_hw_api.h          |   36 -
>>   drivers/raw/ifpga/base/opae_i2c.c             |   12 -
>>   drivers/raw/ifpga/base/opae_i2c.h             |    4 -
>>   drivers/raw/ifpga/base/opae_ifpga_hw_api.c    |   99 -
>>   drivers/raw/ifpga/base/opae_ifpga_hw_api.h    |   15 -
> 
> The ifpga base code is want to provide fully functionality low level hardware support for Intel FPGA card like N3000, even though some APIs are not used by DPDK framework now,but it will useful for end-users or other customers developing their own productions , my opinion is keeping those APIs of ifpga base code in DPDK.
> 

The code is hidden behind the driver code, it is not accessible to user directly.

If the idea is the code can be used later, it can be added when needed.

If the idea is the code can be needed in the field by users, can you please give 
example on the use case?
Stephen Hemminger Nov. 23, 2020, 4:07 p.m. UTC | #5
On Mon, 23 Nov 2020 09:56:51 +0000
Ferruh Yigit <ferruh.yigit@intel.com> wrote:

> On 11/23/2020 2:55 AM, Zhang, Tianfei wrote:
> >>   drivers/raw/ifpga/base/opae_eth_group.c       |   25 -
> >>   drivers/raw/ifpga/base/opae_eth_group.h       |    1 -
> >>   drivers/raw/ifpga/base/opae_hw_api.c          |  212 --
> >>   drivers/raw/ifpga/base/opae_hw_api.h          |   36 -
> >>   drivers/raw/ifpga/base/opae_i2c.c             |   12 -
> >>   drivers/raw/ifpga/base/opae_i2c.h             |    4 -
> >>   drivers/raw/ifpga/base/opae_ifpga_hw_api.c    |   99 -
> >>   drivers/raw/ifpga/base/opae_ifpga_hw_api.h    |   15 -  
> > 
> > The ifpga base code is want to provide fully functionality low level hardware support for Intel FPGA card like N3000, even though some APIs are not used by DPDK framework now,but it will useful for end-users or other customers developing their own productions , my opinion is keeping those APIs of ifpga base code in DPDK.
> >   
> 
> The code is hidden behind the driver code, it is not accessible to user directly.
> 
> If the idea is the code can be used later, it can be added when needed.
> 
> If the idea is the code can be needed in the field by users, can you please give 
> example on the use case?

Dead code is buggy code. Please remove it.
Maintaining dead code increases the overall technical debt.
Dead code is guaranteed not to be tested.

If someone needs it in future they can resurrect it from earlier versions
which will always be there in git.

If Intel wants to support it in DPDK then it should provide API's and tests for it.
Maxime Coquelin Nov. 23, 2020, 4:20 p.m. UTC | #6
On 11/19/20 4:52 AM, Ferruh Yigit wrote:
> diff --git a/lib/librte_vhost/fd_man.c b/lib/librte_vhost/fd_man.c
> index 55d4856f9e..942c5f145b 100644
> --- a/lib/librte_vhost/fd_man.c
> +++ b/lib/librte_vhost/fd_man.c
> @@ -100,21 +100,6 @@ fdset_add_fd(struct fdset *pfdset, int idx, int fd,
>  	pfd->revents = 0;
>  }
>  
> -void
> -fdset_init(struct fdset *pfdset)
> -{
> -	int i;
> -
> -	if (pfdset == NULL)
> -		return;
> -
> -	for (i = 0; i < MAX_FDS; i++) {
> -		pfdset->fd[i].fd = -1;
> -		pfdset->fd[i].dat = NULL;
> -	}
> -	pfdset->num = 0;
> -}
> -
>  /**
>   * Register the fd in the fdset with read/write handler and context.
>   */
> diff --git a/lib/librte_vhost/fd_man.h b/lib/librte_vhost/fd_man.h
> index 3ab5cfdd60..f0157eeeed 100644
> --- a/lib/librte_vhost/fd_man.h
> +++ b/lib/librte_vhost/fd_man.h
> @@ -39,8 +39,6 @@ struct fdset {
>  };
>  
>  
> -void fdset_init(struct fdset *pfdset);
> -
>  int fdset_add(struct fdset *pfdset, int fd,
>  	fd_cb rcb, fd_cb wcb, void *dat);
>  

For Vhost changes:
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

For the patch in general, I think it would better be split per driver/lib.

Thanks,
Maxime
Jiawen Wu Nov. 24, 2020, 3:32 a.m. UTC | #7
On Thursday, November 19, 2020 11:53 AM, Ferruh Yigit wrote:
> Removing unused functions, reported by cppcheck.
> 
> Easy way to remove clutter, since the code is already in the git repo,
> they can be added back when needed.
> 
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> ---
>  drivers/net/txgbe/base/txgbe_eeprom.c         |   72 -
>  drivers/net/txgbe/base/txgbe_eeprom.h         |    2 -
>
> diff --git a/drivers/net/txgbe/base/txgbe_eeprom.c
> b/drivers/net/txgbe/base/txgbe_eeprom.c
> index 72cd3ff307..fedaecf26d 100644
> --- a/drivers/net/txgbe/base/txgbe_eeprom.c
> +++ b/drivers/net/txgbe/base/txgbe_eeprom.c
> @@ -274,42 +274,6 @@ s32 txgbe_ee_read32(struct txgbe_hw *hw, u32
> addr, u32 *data)
>  	return err;
>  }
> 
> -/**
> - *  txgbe_ee_read_buffer - Read EEPROM byte(s) using hostif
> - *  @hw: pointer to hardware structure
> - *  @addr: offset of bytes in the EEPROM to read
> - *  @len: number of bytes
> - *  @data: byte(s) read from the EEPROM
> - *
> - *  Reads a 8 bit byte(s) from the EEPROM using the hostif.
> - **/
> -s32 txgbe_ee_read_buffer(struct txgbe_hw *hw,
> -				     u32 addr, u32 len, void *data)
> -{
> -	const u32 mask = TXGBE_MNGSEM_SWMBX |
> TXGBE_MNGSEM_SWFLASH;
> -	u8 *buf = (u8 *)data;
> -	int err;
> -
> -	err = hw->mac.acquire_swfw_sync(hw, mask);
> -	if (err)
> -		return err;
> -
> -	while (len) {
> -		u32 seg = (len <= TXGBE_PMMBX_DATA_SIZE
> -				? len : TXGBE_PMMBX_DATA_SIZE);
> -
> -		err = txgbe_hic_sr_read(hw, addr, buf, seg);
> -		if (err)
> -			break;
> -
> -		len -= seg;
> -		buf += seg;
> -	}
> -
> -	hw->mac.release_swfw_sync(hw, mask);
> -	return err;
> -}
> -
>  /**
>   *  txgbe_ee_write - Write EEPROM word using hostif
>   *  @hw: pointer to hardware structure
> @@ -420,42 +384,6 @@ s32 txgbe_ee_write32(struct txgbe_hw *hw, u32
> addr, u32 data)
>  	return err;
>  }
> 
> -/**
> - *  txgbe_ee_write_buffer - Write EEPROM byte(s) using hostif
> - *  @hw: pointer to hardware structure
> - *  @addr: offset of bytes in the EEPROM to write
> - *  @len: number of bytes
> - *  @data: word(s) write to the EEPROM
> - *
> - *  Write a 8 bit byte(s) to the EEPROM using the hostif.
> - **/
> -s32 txgbe_ee_write_buffer(struct txgbe_hw *hw,
> -				      u32 addr, u32 len, void *data)
> -{
> -	const u32 mask = TXGBE_MNGSEM_SWMBX |
> TXGBE_MNGSEM_SWFLASH;
> -	u8 *buf = (u8 *)data;
> -	int err;
> -
> -	err = hw->mac.acquire_swfw_sync(hw, mask);
> -	if (err)
> -		return err;
> -
> -	while (len) {
> -		u32 seg = (len <= TXGBE_PMMBX_DATA_SIZE
> -				? len : TXGBE_PMMBX_DATA_SIZE);
> -
> -		err = txgbe_hic_sr_write(hw, addr, buf, seg);
> -		if (err)
> -			break;
> -
> -		len -= seg;
> -		buf += seg;
> -	}
> -
> -	hw->mac.release_swfw_sync(hw, mask);
> -	return err;
> -}
> -
>  /**
>   *  txgbe_calc_eeprom_checksum - Calculates and returns the checksum
>   *  @hw: pointer to hardware structure
> diff --git a/drivers/net/txgbe/base/txgbe_eeprom.h
> b/drivers/net/txgbe/base/txgbe_eeprom.h
> index d0e142dba5..78b8af978b 100644
> --- a/drivers/net/txgbe/base/txgbe_eeprom.h
> +++ b/drivers/net/txgbe/base/txgbe_eeprom.h
> @@ -51,14 +51,12 @@ s32 txgbe_ee_readw_sw(struct txgbe_hw *hw, u32
> offset, u16 *data);
>  s32 txgbe_ee_readw_buffer(struct txgbe_hw *hw, u32 offset, u32 words,
>  				void *data);
>  s32 txgbe_ee_read32(struct txgbe_hw *hw, u32 addr, u32 *data);
> -s32 txgbe_ee_read_buffer(struct txgbe_hw *hw, u32 addr, u32 len, void
> *data);
> 
>  s32 txgbe_ee_write16(struct txgbe_hw *hw, u32 offset, u16 data);
>  s32 txgbe_ee_writew_sw(struct txgbe_hw *hw, u32 offset, u16 data);
>  s32 txgbe_ee_writew_buffer(struct txgbe_hw *hw, u32 offset, u32 words,
>  				void *data);
>  s32 txgbe_ee_write32(struct txgbe_hw *hw, u32 addr, u32 data);
> -s32 txgbe_ee_write_buffer(struct txgbe_hw *hw, u32 addr, u32 len, void
> *data);
> 
> 
>  #endif /* _TXGBE_EEPROM_H_ */

Reviewed-by: Jiawen Wu <jiawenwu@trustnetic.com>

Thanks.
Zhang, Tianfei Nov. 24, 2020, 10 a.m. UTC | #8
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: 2020年11月23日 17:57
> To: Zhang, Tianfei <tianfei.zhang@intel.com>; Jerin Jacob
> <jerinj@marvell.com>; Dumitrescu, Cristian <cristian.dumitrescu@intel.com>;
> Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>; Ray Kinsella <mdr@ashroe.eu>; Neil Horman
> <nhorman@tuxdriver.com>; Xu, Rosen <rosen.xu@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Nithin
> Dabilpuram <ndabilpuram@marvell.com>; Ajit Khaparde
> <ajit.khaparde@broadcom.com>; Raveendra Padasalagi
> <raveendra.padasalagi@broadcom.com>; Vikas Gupta
> <vikas.gupta@broadcom.com>; Gagandeep Singh <g.singh@nxp.com>;
> Somalapuram Amaranath <asomalap@amd.com>; Akhil Goyal
> <akhil.goyal@nxp.com>; Jay Zhou <jianjay.zhou@huawei.com>; McDaniel,
> Timothy <timothy.mcdaniel@intel.com>; Ma, Liang J <liang.j.ma@intel.com>;
> Mccarthy, Peter <peter.mccarthy@intel.com>; Shepard Siegel
> <shepard.siegel@atomicrules.com>; Ed Czeck <ed.czeck@atomicrules.com>;
> John Miller <john.miller@atomicrules.com>; Igor Russkikh
> <igor.russkikh@aquantia.com>; Pavel Belous <pavel.belous@aquantia.com>;
> Rasesh Mody <rmody@marvell.com>; Shahed Shaikh
> <shshaikh@marvell.com>; Somnath Kotur <somnath.kotur@broadcom.com>;
> Chas Williams <chas3@att.com>; Min Hu (Connor) <humin29@huawei.com>;
> Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>; Guo, Jia
> <jia.guo@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>; Marcin
> Wojtas <mw@semihalf.com>; Michal Krawczyk <mk@semihalf.com>; Guy
> Tzalik <gtzalik@amazon.com>; Evgeny Schemeilin <evgenys@amazon.com>;
> Igor Chauskin <igorch@amazon.com>; Zhang, Qi Z <qi.z.zhang@intel.com>;
> Wang, Xiao W <xiao.w.wang@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; Alfredo Cardigliano <cardigliano@ntop.org>;
> Matan Azrad <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>;
> Viacheslav Ovsiienko <viacheslavo@nvidia.com>; Zyta Szpak
> <zr@semihalf.com>; Liron Himi <lironh@marvell.com>; Stephen Hemminger
> <sthemmin@microsoft.com>; K. Y. Srinivasan <kys@microsoft.com>; Haiyang
> Zhang <haiyangz@microsoft.com>; Long Li <longli@microsoft.com>; Heinrich
> Kuhn <heinrich.kuhn@netronome.com>; Harman Kalra
> <hkalra@marvell.com>; Kiran Kumar K <kirankumark@marvell.com>; Andrew
> Rybchenko <andrew.rybchenko@oktetlabs.ru>; Singh, Jasvinder
> <jasvinder.singh@intel.com>; Jiawen Wu <jiawenwu@trustnetic.com>; Jian
> Wang <jianwang@trustnetic.com>; Ori Kam <orika@nvidia.com>; Guy Kaneti
> <guyk@marvell.com>; Burakov, Anatoly <anatoly.burakov@intel.com>;
> Maxime Coquelin <maxime.coquelin@redhat.com>; Xia, Chenbo
> <chenbo.xia@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [RFC] remove unused functions
> 
> On 11/23/2020 2:55 AM, Zhang, Tianfei wrote:
> >>   drivers/raw/ifpga/base/opae_eth_group.c       |   25 -
> >>   drivers/raw/ifpga/base/opae_eth_group.h       |    1 -
> >>   drivers/raw/ifpga/base/opae_hw_api.c          |  212 --
> >>   drivers/raw/ifpga/base/opae_hw_api.h          |   36 -
> >>   drivers/raw/ifpga/base/opae_i2c.c             |   12 -
> >>   drivers/raw/ifpga/base/opae_i2c.h             |    4 -
> >>   drivers/raw/ifpga/base/opae_ifpga_hw_api.c    |   99 -
> >>   drivers/raw/ifpga/base/opae_ifpga_hw_api.h    |   15 -
> >
> > The ifpga base code is want to provide fully functionality low level hardware
> support for Intel FPGA card like N3000, even though some APIs are not used
> by DPDK framework now,but it will useful for end-users or other customers
> developing their own productions , my opinion is keeping those APIs of ifpga
> base code in DPDK.
> >
> 
> The code is hidden behind the driver code, it is not accessible to user directly.
> 
> If the idea is the code can be used later, it can be added when needed.

Sound good!
> 
> If the idea is the code can be needed in the field by users, can you please give
> example on the use case?

Do you mean write some test cases in examples folder?
Zhang, Tianfei Nov. 24, 2020, 10:01 a.m. UTC | #9
> -----Original Message-----
> From: Stephen Hemminger <stephen@networkplumber.org>
> Sent: 2020年11月24日 0:07
> To: Yigit, Ferruh <ferruh.yigit@intel.com>
> Cc: Zhang, Tianfei <tianfei.zhang@intel.com>; Jerin Jacob
> <jerinj@marvell.com>; Dumitrescu, Cristian <cristian.dumitrescu@intel.com>;
> Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>; Ray Kinsella <mdr@ashroe.eu>; Neil Horman
> <nhorman@tuxdriver.com>; Xu, Rosen <rosen.xu@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Nithin
> Dabilpuram <ndabilpuram@marvell.com>; Ajit Khaparde
> <ajit.khaparde@broadcom.com>; Raveendra Padasalagi
> <raveendra.padasalagi@broadcom.com>; Vikas Gupta
> <vikas.gupta@broadcom.com>; Gagandeep Singh <g.singh@nxp.com>;
> Somalapuram Amaranath <asomalap@amd.com>; Akhil Goyal
> <akhil.goyal@nxp.com>; Jay Zhou <jianjay.zhou@huawei.com>; McDaniel,
> Timothy <timothy.mcdaniel@intel.com>; Ma, Liang J <liang.j.ma@intel.com>;
> Mccarthy, Peter <peter.mccarthy@intel.com>; Shepard Siegel
> <shepard.siegel@atomicrules.com>; Ed Czeck <ed.czeck@atomicrules.com>;
> John Miller <john.miller@atomicrules.com>; Igor Russkikh
> <igor.russkikh@aquantia.com>; Pavel Belous <pavel.belous@aquantia.com>;
> Rasesh Mody <rmody@marvell.com>; Shahed Shaikh
> <shshaikh@marvell.com>; Somnath Kotur <somnath.kotur@broadcom.com>;
> Chas Williams <chas3@att.com>; Min Hu (Connor) <humin29@huawei.com>;
> Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>; Guo, Jia
> <jia.guo@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>; Marcin
> Wojtas <mw@semihalf.com>; Michal Krawczyk <mk@semihalf.com>; Guy
> Tzalik <gtzalik@amazon.com>; Evgeny Schemeilin <evgenys@amazon.com>;
> Igor Chauskin <igorch@amazon.com>; Zhang, Qi Z <qi.z.zhang@intel.com>;
> Wang, Xiao W <xiao.w.wang@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; Alfredo Cardigliano <cardigliano@ntop.org>;
> Matan Azrad <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>;
> Viacheslav Ovsiienko <viacheslavo@nvidia.com>; Zyta Szpak
> <zr@semihalf.com>; Liron Himi <lironh@marvell.com>; Stephen Hemminger
> <sthemmin@microsoft.com>; K. Y. Srinivasan <kys@microsoft.com>; Haiyang
> Zhang <haiyangz@microsoft.com>; Long Li <longli@microsoft.com>; Heinrich
> Kuhn <heinrich.kuhn@netronome.com>; Harman Kalra
> <hkalra@marvell.com>; Kiran Kumar K <kirankumark@marvell.com>; Andrew
> Rybchenko <andrew.rybchenko@oktetlabs.ru>; Singh, Jasvinder
> <jasvinder.singh@intel.com>; Jiawen Wu <jiawenwu@trustnetic.com>; Jian
> Wang <jianwang@trustnetic.com>; Ori Kam <orika@nvidia.com>; Guy Kaneti
> <guyk@marvell.com>; Burakov, Anatoly <anatoly.burakov@intel.com>;
> Maxime Coquelin <maxime.coquelin@redhat.com>; Xia, Chenbo
> <chenbo.xia@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [RFC] remove unused functions
> 
> On Mon, 23 Nov 2020 09:56:51 +0000
> Ferruh Yigit <ferruh.yigit@intel.com> wrote:
> 
> > On 11/23/2020 2:55 AM, Zhang, Tianfei wrote:
> > >>   drivers/raw/ifpga/base/opae_eth_group.c       |   25 -
> > >>   drivers/raw/ifpga/base/opae_eth_group.h       |    1 -
> > >>   drivers/raw/ifpga/base/opae_hw_api.c          |  212 --
> > >>   drivers/raw/ifpga/base/opae_hw_api.h          |   36 -
> > >>   drivers/raw/ifpga/base/opae_i2c.c             |   12 -
> > >>   drivers/raw/ifpga/base/opae_i2c.h             |    4 -
> > >>   drivers/raw/ifpga/base/opae_ifpga_hw_api.c    |   99 -
> > >>   drivers/raw/ifpga/base/opae_ifpga_hw_api.h    |   15 -
> > >
> > > The ifpga base code is want to provide fully functionality low level
> hardware support for Intel FPGA card like N3000, even though some APIs are
> not used by DPDK framework now,but it will useful for end-users or other
> customers developing their own productions , my opinion is keeping those
> APIs of ifpga base code in DPDK.
> > >
> >
> > The code is hidden behind the driver code, it is not accessible to user
> directly.
> >
> > If the idea is the code can be used later, it can be added when needed.
> >
> > If the idea is the code can be needed in the field by users, can you
> > please give example on the use case?
> 
> Dead code is buggy code. Please remove it.
> Maintaining dead code increases the overall technical debt.
> Dead code is guaranteed not to be tested.
> 
> If someone needs it in future they can resurrect it from earlier versions which
> will always be there in git.
> 
> If Intel wants to support it in DPDK then it should provide API's and tests for it.

Yes, I agree with you. I want to write some test cases for ifpga base code.
Ferruh Yigit Nov. 24, 2020, 10:26 a.m. UTC | #10
On 11/24/2020 10:00 AM, Zhang, Tianfei wrote:
> 
> 
>> -----Original Message-----
>> From: Ferruh Yigit <ferruh.yigit@intel.com>
>> Sent: 2020年11月23日 17:57
>> To: Zhang, Tianfei <tianfei.zhang@intel.com>; Jerin Jacob
>> <jerinj@marvell.com>; Dumitrescu, Cristian <cristian.dumitrescu@intel.com>;
>> Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
>> <sachin.saxena@oss.nxp.com>; Ray Kinsella <mdr@ashroe.eu>; Neil Horman
>> <nhorman@tuxdriver.com>; Xu, Rosen <rosen.xu@intel.com>; Wu, Jingjing
>> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Nithin
>> Dabilpuram <ndabilpuram@marvell.com>; Ajit Khaparde
>> <ajit.khaparde@broadcom.com>; Raveendra Padasalagi
>> <raveendra.padasalagi@broadcom.com>; Vikas Gupta
>> <vikas.gupta@broadcom.com>; Gagandeep Singh <g.singh@nxp.com>;
>> Somalapuram Amaranath <asomalap@amd.com>; Akhil Goyal
>> <akhil.goyal@nxp.com>; Jay Zhou <jianjay.zhou@huawei.com>; McDaniel,
>> Timothy <timothy.mcdaniel@intel.com>; Ma, Liang J <liang.j.ma@intel.com>;
>> Mccarthy, Peter <peter.mccarthy@intel.com>; Shepard Siegel
>> <shepard.siegel@atomicrules.com>; Ed Czeck <ed.czeck@atomicrules.com>;
>> John Miller <john.miller@atomicrules.com>; Igor Russkikh
>> <igor.russkikh@aquantia.com>; Pavel Belous <pavel.belous@aquantia.com>;
>> Rasesh Mody <rmody@marvell.com>; Shahed Shaikh
>> <shshaikh@marvell.com>; Somnath Kotur <somnath.kotur@broadcom.com>;
>> Chas Williams <chas3@att.com>; Min Hu (Connor) <humin29@huawei.com>;
>> Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>; Guo, Jia
>> <jia.guo@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>; Marcin
>> Wojtas <mw@semihalf.com>; Michal Krawczyk <mk@semihalf.com>; Guy
>> Tzalik <gtzalik@amazon.com>; Evgeny Schemeilin <evgenys@amazon.com>;
>> Igor Chauskin <igorch@amazon.com>; Zhang, Qi Z <qi.z.zhang@intel.com>;
>> Wang, Xiao W <xiao.w.wang@intel.com>; Yang, Qiming
>> <qiming.yang@intel.com>; Alfredo Cardigliano <cardigliano@ntop.org>;
>> Matan Azrad <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>;
>> Viacheslav Ovsiienko <viacheslavo@nvidia.com>; Zyta Szpak
>> <zr@semihalf.com>; Liron Himi <lironh@marvell.com>; Stephen Hemminger
>> <sthemmin@microsoft.com>; K. Y. Srinivasan <kys@microsoft.com>; Haiyang
>> Zhang <haiyangz@microsoft.com>; Long Li <longli@microsoft.com>; Heinrich
>> Kuhn <heinrich.kuhn@netronome.com>; Harman Kalra
>> <hkalra@marvell.com>; Kiran Kumar K <kirankumark@marvell.com>; Andrew
>> Rybchenko <andrew.rybchenko@oktetlabs.ru>; Singh, Jasvinder
>> <jasvinder.singh@intel.com>; Jiawen Wu <jiawenwu@trustnetic.com>; Jian
>> Wang <jianwang@trustnetic.com>; Ori Kam <orika@nvidia.com>; Guy Kaneti
>> <guyk@marvell.com>; Burakov, Anatoly <anatoly.burakov@intel.com>;
>> Maxime Coquelin <maxime.coquelin@redhat.com>; Xia, Chenbo
>> <chenbo.xia@intel.com>
>> Cc: dev@dpdk.org
>> Subject: Re: [RFC] remove unused functions
>>
>> On 11/23/2020 2:55 AM, Zhang, Tianfei wrote:
>>>>    drivers/raw/ifpga/base/opae_eth_group.c       |   25 -
>>>>    drivers/raw/ifpga/base/opae_eth_group.h       |    1 -
>>>>    drivers/raw/ifpga/base/opae_hw_api.c          |  212 --
>>>>    drivers/raw/ifpga/base/opae_hw_api.h          |   36 -
>>>>    drivers/raw/ifpga/base/opae_i2c.c             |   12 -
>>>>    drivers/raw/ifpga/base/opae_i2c.h             |    4 -
>>>>    drivers/raw/ifpga/base/opae_ifpga_hw_api.c    |   99 -
>>>>    drivers/raw/ifpga/base/opae_ifpga_hw_api.h    |   15 -
>>>
>>> The ifpga base code is want to provide fully functionality low level hardware
>> support for Intel FPGA card like N3000, even though some APIs are not used
>> by DPDK framework now,but it will useful for end-users or other customers
>> developing their own productions , my opinion is keeping those APIs of ifpga
>> base code in DPDK.
>>>
>>
>> The code is hidden behind the driver code, it is not accessible to user directly.
>>
>> If the idea is the code can be used later, it can be added when needed.
> 
> Sound good!
>>
>> If the idea is the code can be needed in the field by users, can you please give
>> example on the use case?
> 
> Do you mean write some test cases in examples folder?
> 

You mentioned end user may need some of the removed functions, can you give an 
example how an end user may need them?
Zhang, Tianfei Nov. 24, 2020, 11:01 a.m. UTC | #11
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: 2020年11月24日 18:26
> To: Zhang, Tianfei <tianfei.zhang@intel.com>; Jerin Jacob
> <jerinj@marvell.com>; Dumitrescu, Cristian <cristian.dumitrescu@intel.com>;
> Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>; Ray Kinsella <mdr@ashroe.eu>; Neil Horman
> <nhorman@tuxdriver.com>; Xu, Rosen <rosen.xu@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Nithin
> Dabilpuram <ndabilpuram@marvell.com>; Ajit Khaparde
> <ajit.khaparde@broadcom.com>; Raveendra Padasalagi
> <raveendra.padasalagi@broadcom.com>; Vikas Gupta
> <vikas.gupta@broadcom.com>; Gagandeep Singh <g.singh@nxp.com>;
> Somalapuram Amaranath <asomalap@amd.com>; Akhil Goyal
> <akhil.goyal@nxp.com>; Jay Zhou <jianjay.zhou@huawei.com>; McDaniel,
> Timothy <timothy.mcdaniel@intel.com>; Ma, Liang J <liang.j.ma@intel.com>;
> Mccarthy, Peter <peter.mccarthy@intel.com>; Shepard Siegel
> <shepard.siegel@atomicrules.com>; Ed Czeck <ed.czeck@atomicrules.com>;
> John Miller <john.miller@atomicrules.com>; Igor Russkikh
> <igor.russkikh@aquantia.com>; Pavel Belous <pavel.belous@aquantia.com>;
> Rasesh Mody <rmody@marvell.com>; Shahed Shaikh
> <shshaikh@marvell.com>; Somnath Kotur <somnath.kotur@broadcom.com>;
> Chas Williams <chas3@att.com>; Min Hu (Connor) <humin29@huawei.com>;
> Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>; Guo, Jia
> <jia.guo@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>; Marcin
> Wojtas <mw@semihalf.com>; Michal Krawczyk <mk@semihalf.com>; Guy
> Tzalik <gtzalik@amazon.com>; Evgeny Schemeilin <evgenys@amazon.com>;
> Igor Chauskin <igorch@amazon.com>; Zhang, Qi Z <qi.z.zhang@intel.com>;
> Wang, Xiao W <xiao.w.wang@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; Alfredo Cardigliano <cardigliano@ntop.org>;
> Matan Azrad <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>;
> Viacheslav Ovsiienko <viacheslavo@nvidia.com>; Zyta Szpak
> <zr@semihalf.com>; Liron Himi <lironh@marvell.com>; Stephen Hemminger
> <sthemmin@microsoft.com>; K. Y. Srinivasan <kys@microsoft.com>; Haiyang
> Zhang <haiyangz@microsoft.com>; Long Li <longli@microsoft.com>; Heinrich
> Kuhn <heinrich.kuhn@netronome.com>; Harman Kalra
> <hkalra@marvell.com>; Kiran Kumar K <kirankumark@marvell.com>; Andrew
> Rybchenko <andrew.rybchenko@oktetlabs.ru>; Singh, Jasvinder
> <jasvinder.singh@intel.com>; Jiawen Wu <jiawenwu@trustnetic.com>; Jian
> Wang <jianwang@trustnetic.com>; Ori Kam <orika@nvidia.com>; Guy Kaneti
> <guyk@marvell.com>; Burakov, Anatoly <anatoly.burakov@intel.com>;
> Maxime Coquelin <maxime.coquelin@redhat.com>; Xia, Chenbo
> <chenbo.xia@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [RFC] remove unused functions
> 
> On 11/24/2020 10:00 AM, Zhang, Tianfei wrote:
> >
> >
> >> -----Original Message-----
> >> From: Ferruh Yigit <ferruh.yigit@intel.com>
> >> Sent: 2020年11月23日 17:57
> >> To: Zhang, Tianfei <tianfei.zhang@intel.com>; Jerin Jacob
> >> <jerinj@marvell.com>; Dumitrescu, Cristian
> >> <cristian.dumitrescu@intel.com>; Hemant Agrawal
> >> <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>;
> >> Ray Kinsella <mdr@ashroe.eu>; Neil Horman <nhorman@tuxdriver.com>;
> >> Xu, Rosen <rosen.xu@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> >> Xing, Beilei <beilei.xing@intel.com>; Nithin Dabilpuram
> >> <ndabilpuram@marvell.com>; Ajit Khaparde
> >> <ajit.khaparde@broadcom.com>; Raveendra Padasalagi
> >> <raveendra.padasalagi@broadcom.com>; Vikas Gupta
> >> <vikas.gupta@broadcom.com>; Gagandeep Singh <g.singh@nxp.com>;
> >> Somalapuram Amaranath <asomalap@amd.com>; Akhil Goyal
> >> <akhil.goyal@nxp.com>; Jay Zhou <jianjay.zhou@huawei.com>; McDaniel,
> >> Timothy <timothy.mcdaniel@intel.com>; Ma, Liang J
> >> <liang.j.ma@intel.com>; Mccarthy, Peter <peter.mccarthy@intel.com>;
> >> Shepard Siegel <shepard.siegel@atomicrules.com>; Ed Czeck
> >> <ed.czeck@atomicrules.com>; John Miller
> >> <john.miller@atomicrules.com>; Igor Russkikh
> >> <igor.russkikh@aquantia.com>; Pavel Belous
> >> <pavel.belous@aquantia.com>; Rasesh Mody <rmody@marvell.com>;
> Shahed
> >> Shaikh <shshaikh@marvell.com>; Somnath Kotur
> >> <somnath.kotur@broadcom.com>; Chas Williams <chas3@att.com>; Min
> Hu
> >> (Connor) <humin29@huawei.com>; Rahul Lakkireddy
> >> <rahul.lakkireddy@chelsio.com>; Guo, Jia <jia.guo@intel.com>; Wang,
> >> Haiyue <haiyue.wang@intel.com>; Marcin Wojtas <mw@semihalf.com>;
> >> Michal Krawczyk <mk@semihalf.com>; Guy Tzalik <gtzalik@amazon.com>;
> >> Evgeny Schemeilin <evgenys@amazon.com>; Igor Chauskin
> >> <igorch@amazon.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Wang, Xiao
> W
> >> <xiao.w.wang@intel.com>; Yang, Qiming <qiming.yang@intel.com>;
> >> Alfredo Cardigliano <cardigliano@ntop.org>; Matan Azrad
> >> <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>; Viacheslav
> >> Ovsiienko <viacheslavo@nvidia.com>; Zyta Szpak <zr@semihalf.com>;
> >> Liron Himi <lironh@marvell.com>; Stephen Hemminger
> >> <sthemmin@microsoft.com>; K. Y. Srinivasan <kys@microsoft.com>;
> >> Haiyang Zhang <haiyangz@microsoft.com>; Long Li
> >> <longli@microsoft.com>; Heinrich Kuhn
> <heinrich.kuhn@netronome.com>;
> >> Harman Kalra <hkalra@marvell.com>; Kiran Kumar K
> >> <kirankumark@marvell.com>; Andrew Rybchenko
> >> <andrew.rybchenko@oktetlabs.ru>; Singh, Jasvinder
> >> <jasvinder.singh@intel.com>; Jiawen Wu <jiawenwu@trustnetic.com>;
> >> Jian Wang <jianwang@trustnetic.com>; Ori Kam <orika@nvidia.com>; Guy
> >> Kaneti <guyk@marvell.com>; Burakov, Anatoly
> >> <anatoly.burakov@intel.com>; Maxime Coquelin
> >> <maxime.coquelin@redhat.com>; Xia, Chenbo <chenbo.xia@intel.com>
> >> Cc: dev@dpdk.org
> >> Subject: Re: [RFC] remove unused functions
> >>
> >> On 11/23/2020 2:55 AM, Zhang, Tianfei wrote:
> >>>>    drivers/raw/ifpga/base/opae_eth_group.c       |   25 -
> >>>>    drivers/raw/ifpga/base/opae_eth_group.h       |    1 -

For those unused functions on opae_eth_group.c are release functions, like eth_group_release(), it should be called by fme_eth_group_uinit(),
I will send a patch to fix it.

> >>>>    drivers/raw/ifpga/base/opae_hw_api.c          |  212 --
> >>>>    drivers/raw/ifpga/base/opae_hw_api.h          |   36 -
> >>>>    drivers/raw/ifpga/base/opae_i2c.c             |   12 -
> >>>>    drivers/raw/ifpga/base/opae_i2c.h             |    4 -
> >>>>    drivers/raw/ifpga/base/opae_ifpga_hw_api.c    |   99 -
> >>>>    drivers/raw/ifpga/base/opae_ifpga_hw_api.h    |   15 -
> >>>
> >>> The ifpga base code is want to provide fully functionality low level
> >>> hardware
> >> support for Intel FPGA card like N3000, even though some APIs are not
> >> used by DPDK framework now,but it will useful for end-users or other
> >> customers developing their own productions , my opinion is keeping
> >> those APIs of ifpga base code in DPDK.
> >>>
> >>
> >> The code is hidden behind the driver code, it is not accessible to user
> directly.
> >>
> >> If the idea is the code can be used later, it can be added when needed.
> >
> > Sound good!
> >>
> >> If the idea is the code can be needed in the field by users, can you
> >> please give example on the use case?
> >
> > Do you mean write some test cases in examples folder?
> >
> 
> You mentioned end user may need some of the removed functions, can you
> give an example how an end user may need them?

Currently, there is only one DPDK PMD driver using the ifpga base code, it is called ipn3ke, which is target for VBNG applications.
The ipn3ke is not using all of our APIs which provided by ifpga base code. I am agree remove the unused functions firstly. And I plan to write
an test application to test all of APIs ( including those unused functions ) and add those functions back in future, and I think the test applications should be a good example for the end-user how to use those APIs.
Ferruh Yigit Nov. 24, 2020, 11:31 a.m. UTC | #12
On 11/24/2020 11:01 AM, Zhang, Tianfei wrote:
> 
> 
>> -----Original Message-----
>> From: Ferruh Yigit <ferruh.yigit@intel.com>
>> Sent: 2020年11月24日 18:26
>> To: Zhang, Tianfei <tianfei.zhang@intel.com>; Jerin Jacob
>> <jerinj@marvell.com>; Dumitrescu, Cristian <cristian.dumitrescu@intel.com>;
>> Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
>> <sachin.saxena@oss.nxp.com>; Ray Kinsella <mdr@ashroe.eu>; Neil Horman
>> <nhorman@tuxdriver.com>; Xu, Rosen <rosen.xu@intel.com>; Wu, Jingjing
>> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Nithin
>> Dabilpuram <ndabilpuram@marvell.com>; Ajit Khaparde
>> <ajit.khaparde@broadcom.com>; Raveendra Padasalagi
>> <raveendra.padasalagi@broadcom.com>; Vikas Gupta
>> <vikas.gupta@broadcom.com>; Gagandeep Singh <g.singh@nxp.com>;
>> Somalapuram Amaranath <asomalap@amd.com>; Akhil Goyal
>> <akhil.goyal@nxp.com>; Jay Zhou <jianjay.zhou@huawei.com>; McDaniel,
>> Timothy <timothy.mcdaniel@intel.com>; Ma, Liang J <liang.j.ma@intel.com>;
>> Mccarthy, Peter <peter.mccarthy@intel.com>; Shepard Siegel
>> <shepard.siegel@atomicrules.com>; Ed Czeck <ed.czeck@atomicrules.com>;
>> John Miller <john.miller@atomicrules.com>; Igor Russkikh
>> <igor.russkikh@aquantia.com>; Pavel Belous <pavel.belous@aquantia.com>;
>> Rasesh Mody <rmody@marvell.com>; Shahed Shaikh
>> <shshaikh@marvell.com>; Somnath Kotur <somnath.kotur@broadcom.com>;
>> Chas Williams <chas3@att.com>; Min Hu (Connor) <humin29@huawei.com>;
>> Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>; Guo, Jia
>> <jia.guo@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>; Marcin
>> Wojtas <mw@semihalf.com>; Michal Krawczyk <mk@semihalf.com>; Guy
>> Tzalik <gtzalik@amazon.com>; Evgeny Schemeilin <evgenys@amazon.com>;
>> Igor Chauskin <igorch@amazon.com>; Zhang, Qi Z <qi.z.zhang@intel.com>;
>> Wang, Xiao W <xiao.w.wang@intel.com>; Yang, Qiming
>> <qiming.yang@intel.com>; Alfredo Cardigliano <cardigliano@ntop.org>;
>> Matan Azrad <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>;
>> Viacheslav Ovsiienko <viacheslavo@nvidia.com>; Zyta Szpak
>> <zr@semihalf.com>; Liron Himi <lironh@marvell.com>; Stephen Hemminger
>> <sthemmin@microsoft.com>; K. Y. Srinivasan <kys@microsoft.com>; Haiyang
>> Zhang <haiyangz@microsoft.com>; Long Li <longli@microsoft.com>; Heinrich
>> Kuhn <heinrich.kuhn@netronome.com>; Harman Kalra
>> <hkalra@marvell.com>; Kiran Kumar K <kirankumark@marvell.com>; Andrew
>> Rybchenko <andrew.rybchenko@oktetlabs.ru>; Singh, Jasvinder
>> <jasvinder.singh@intel.com>; Jiawen Wu <jiawenwu@trustnetic.com>; Jian
>> Wang <jianwang@trustnetic.com>; Ori Kam <orika@nvidia.com>; Guy Kaneti
>> <guyk@marvell.com>; Burakov, Anatoly <anatoly.burakov@intel.com>;
>> Maxime Coquelin <maxime.coquelin@redhat.com>; Xia, Chenbo
>> <chenbo.xia@intel.com>
>> Cc: dev@dpdk.org
>> Subject: Re: [RFC] remove unused functions
>>
>> On 11/24/2020 10:00 AM, Zhang, Tianfei wrote:
>>>
>>>
>>>> -----Original Message-----
>>>> From: Ferruh Yigit <ferruh.yigit@intel.com>
>>>> Sent: 2020年11月23日 17:57
>>>> To: Zhang, Tianfei <tianfei.zhang@intel.com>; Jerin Jacob
>>>> <jerinj@marvell.com>; Dumitrescu, Cristian
>>>> <cristian.dumitrescu@intel.com>; Hemant Agrawal
>>>> <hemant.agrawal@nxp.com>; Sachin Saxena
>> <sachin.saxena@oss.nxp.com>;
>>>> Ray Kinsella <mdr@ashroe.eu>; Neil Horman <nhorman@tuxdriver.com>;
>>>> Xu, Rosen <rosen.xu@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
>>>> Xing, Beilei <beilei.xing@intel.com>; Nithin Dabilpuram
>>>> <ndabilpuram@marvell.com>; Ajit Khaparde
>>>> <ajit.khaparde@broadcom.com>; Raveendra Padasalagi
>>>> <raveendra.padasalagi@broadcom.com>; Vikas Gupta
>>>> <vikas.gupta@broadcom.com>; Gagandeep Singh <g.singh@nxp.com>;
>>>> Somalapuram Amaranath <asomalap@amd.com>; Akhil Goyal
>>>> <akhil.goyal@nxp.com>; Jay Zhou <jianjay.zhou@huawei.com>; McDaniel,
>>>> Timothy <timothy.mcdaniel@intel.com>; Ma, Liang J
>>>> <liang.j.ma@intel.com>; Mccarthy, Peter <peter.mccarthy@intel.com>;
>>>> Shepard Siegel <shepard.siegel@atomicrules.com>; Ed Czeck
>>>> <ed.czeck@atomicrules.com>; John Miller
>>>> <john.miller@atomicrules.com>; Igor Russkikh
>>>> <igor.russkikh@aquantia.com>; Pavel Belous
>>>> <pavel.belous@aquantia.com>; Rasesh Mody <rmody@marvell.com>;
>> Shahed
>>>> Shaikh <shshaikh@marvell.com>; Somnath Kotur
>>>> <somnath.kotur@broadcom.com>; Chas Williams <chas3@att.com>; Min
>> Hu
>>>> (Connor) <humin29@huawei.com>; Rahul Lakkireddy
>>>> <rahul.lakkireddy@chelsio.com>; Guo, Jia <jia.guo@intel.com>; Wang,
>>>> Haiyue <haiyue.wang@intel.com>; Marcin Wojtas <mw@semihalf.com>;
>>>> Michal Krawczyk <mk@semihalf.com>; Guy Tzalik <gtzalik@amazon.com>;
>>>> Evgeny Schemeilin <evgenys@amazon.com>; Igor Chauskin
>>>> <igorch@amazon.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Wang, Xiao
>> W
>>>> <xiao.w.wang@intel.com>; Yang, Qiming <qiming.yang@intel.com>;
>>>> Alfredo Cardigliano <cardigliano@ntop.org>; Matan Azrad
>>>> <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>; Viacheslav
>>>> Ovsiienko <viacheslavo@nvidia.com>; Zyta Szpak <zr@semihalf.com>;
>>>> Liron Himi <lironh@marvell.com>; Stephen Hemminger
>>>> <sthemmin@microsoft.com>; K. Y. Srinivasan <kys@microsoft.com>;
>>>> Haiyang Zhang <haiyangz@microsoft.com>; Long Li
>>>> <longli@microsoft.com>; Heinrich Kuhn
>> <heinrich.kuhn@netronome.com>;
>>>> Harman Kalra <hkalra@marvell.com>; Kiran Kumar K
>>>> <kirankumark@marvell.com>; Andrew Rybchenko
>>>> <andrew.rybchenko@oktetlabs.ru>; Singh, Jasvinder
>>>> <jasvinder.singh@intel.com>; Jiawen Wu <jiawenwu@trustnetic.com>;
>>>> Jian Wang <jianwang@trustnetic.com>; Ori Kam <orika@nvidia.com>; Guy
>>>> Kaneti <guyk@marvell.com>; Burakov, Anatoly
>>>> <anatoly.burakov@intel.com>; Maxime Coquelin
>>>> <maxime.coquelin@redhat.com>; Xia, Chenbo <chenbo.xia@intel.com>
>>>> Cc: dev@dpdk.org
>>>> Subject: Re: [RFC] remove unused functions
>>>>
>>>> On 11/23/2020 2:55 AM, Zhang, Tianfei wrote:
>>>>>>     drivers/raw/ifpga/base/opae_eth_group.c       |   25 -
>>>>>>     drivers/raw/ifpga/base/opae_eth_group.h       |    1 -
> 
> For those unused functions on opae_eth_group.c are release functions, like eth_group_release(), it should be called by fme_eth_group_uinit(),
> I will send a patch to fix it.
> 
>>>>>>     drivers/raw/ifpga/base/opae_hw_api.c          |  212 --
>>>>>>     drivers/raw/ifpga/base/opae_hw_api.h          |   36 -
>>>>>>     drivers/raw/ifpga/base/opae_i2c.c             |   12 -
>>>>>>     drivers/raw/ifpga/base/opae_i2c.h             |    4 -
>>>>>>     drivers/raw/ifpga/base/opae_ifpga_hw_api.c    |   99 -
>>>>>>     drivers/raw/ifpga/base/opae_ifpga_hw_api.h    |   15 -
>>>>>
>>>>> The ifpga base code is want to provide fully functionality low level
>>>>> hardware
>>>> support for Intel FPGA card like N3000, even though some APIs are not
>>>> used by DPDK framework now,but it will useful for end-users or other
>>>> customers developing their own productions , my opinion is keeping
>>>> those APIs of ifpga base code in DPDK.
>>>>>
>>>>
>>>> The code is hidden behind the driver code, it is not accessible to user
>> directly.
>>>>
>>>> If the idea is the code can be used later, it can be added when needed.
>>>
>>> Sound good!
>>>>
>>>> If the idea is the code can be needed in the field by users, can you
>>>> please give example on the use case?
>>>
>>> Do you mean write some test cases in examples folder?
>>>
>>
>> You mentioned end user may need some of the removed functions, can you
>> give an example how an end user may need them?
> 
> Currently, there is only one DPDK PMD driver using the ifpga base code, it is called ipn3ke, which is target for VBNG applications.
> The ipn3ke is not using all of our APIs which provided by ifpga base code. I am agree remove the unused functions firstly. And I plan to write
> an test application to test all of APIs ( including those unused functions ) and add those functions back in future, and I think the test applications should be a good example for the end-user how to use those APIs.
> 

I am not clear how a test application can help to test driver base code functions.

Driver should call them to configure the HW, if there is no DPDK API to request 
that configuration, or current device doesn't support it, what application can do?
Zhang, Tianfei Nov. 24, 2020, 12:17 p.m. UTC | #13
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: 2020年11月24日 19:31
> To: Zhang, Tianfei <tianfei.zhang@intel.com>; Jerin Jacob
> <jerinj@marvell.com>; Dumitrescu, Cristian <cristian.dumitrescu@intel.com>;
> Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>; Ray Kinsella <mdr@ashroe.eu>; Neil Horman
> <nhorman@tuxdriver.com>; Xu, Rosen <rosen.xu@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Nithin
> Dabilpuram <ndabilpuram@marvell.com>; Ajit Khaparde
> <ajit.khaparde@broadcom.com>; Raveendra Padasalagi
> <raveendra.padasalagi@broadcom.com>; Vikas Gupta
> <vikas.gupta@broadcom.com>; Gagandeep Singh <g.singh@nxp.com>;
> Somalapuram Amaranath <asomalap@amd.com>; Akhil Goyal
> <akhil.goyal@nxp.com>; Jay Zhou <jianjay.zhou@huawei.com>; McDaniel,
> Timothy <timothy.mcdaniel@intel.com>; Ma, Liang J <liang.j.ma@intel.com>;
> Mccarthy, Peter <peter.mccarthy@intel.com>; Shepard Siegel
> <shepard.siegel@atomicrules.com>; Ed Czeck <ed.czeck@atomicrules.com>;
> John Miller <john.miller@atomicrules.com>; Igor Russkikh
> <igor.russkikh@aquantia.com>; Pavel Belous <pavel.belous@aquantia.com>;
> Rasesh Mody <rmody@marvell.com>; Shahed Shaikh
> <shshaikh@marvell.com>; Somnath Kotur <somnath.kotur@broadcom.com>;
> Chas Williams <chas3@att.com>; Min Hu (Connor) <humin29@huawei.com>;
> Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>; Guo, Jia
> <jia.guo@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>; Marcin
> Wojtas <mw@semihalf.com>; Michal Krawczyk <mk@semihalf.com>; Guy
> Tzalik <gtzalik@amazon.com>; Evgeny Schemeilin <evgenys@amazon.com>;
> Igor Chauskin <igorch@amazon.com>; Zhang, Qi Z <qi.z.zhang@intel.com>;
> Wang, Xiao W <xiao.w.wang@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; Alfredo Cardigliano <cardigliano@ntop.org>;
> Matan Azrad <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>;
> Viacheslav Ovsiienko <viacheslavo@nvidia.com>; Zyta Szpak
> <zr@semihalf.com>; Liron Himi <lironh@marvell.com>; Stephen Hemminger
> <sthemmin@microsoft.com>; K. Y. Srinivasan <kys@microsoft.com>; Haiyang
> Zhang <haiyangz@microsoft.com>; Long Li <longli@microsoft.com>; Heinrich
> Kuhn <heinrich.kuhn@netronome.com>; Harman Kalra
> <hkalra@marvell.com>; Kiran Kumar K <kirankumark@marvell.com>; Andrew
> Rybchenko <andrew.rybchenko@oktetlabs.ru>; Singh, Jasvinder
> <jasvinder.singh@intel.com>; Jiawen Wu <jiawenwu@trustnetic.com>; Jian
> Wang <jianwang@trustnetic.com>; Ori Kam <orika@nvidia.com>; Guy Kaneti
> <guyk@marvell.com>; Burakov, Anatoly <anatoly.burakov@intel.com>;
> Maxime Coquelin <maxime.coquelin@redhat.com>; Xia, Chenbo
> <chenbo.xia@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [RFC] remove unused functions
> 
> On 11/24/2020 11:01 AM, Zhang, Tianfei wrote:
> >
> >
> >> -----Original Message-----
> >> From: Ferruh Yigit <ferruh.yigit@intel.com>
> >> Sent: 2020年11月24日 18:26
> >> To: Zhang, Tianfei <tianfei.zhang@intel.com>; Jerin Jacob
> >> <jerinj@marvell.com>; Dumitrescu, Cristian
> >> <cristian.dumitrescu@intel.com>; Hemant Agrawal
> >> <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>;
> >> Ray Kinsella <mdr@ashroe.eu>; Neil Horman <nhorman@tuxdriver.com>;
> >> Xu, Rosen <rosen.xu@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> >> Xing, Beilei <beilei.xing@intel.com>; Nithin Dabilpuram
> >> <ndabilpuram@marvell.com>; Ajit Khaparde
> >> <ajit.khaparde@broadcom.com>; Raveendra Padasalagi
> >> <raveendra.padasalagi@broadcom.com>; Vikas Gupta
> >> <vikas.gupta@broadcom.com>; Gagandeep Singh <g.singh@nxp.com>;
> >> Somalapuram Amaranath <asomalap@amd.com>; Akhil Goyal
> >> <akhil.goyal@nxp.com>; Jay Zhou <jianjay.zhou@huawei.com>; McDaniel,
> >> Timothy <timothy.mcdaniel@intel.com>; Ma, Liang J
> >> <liang.j.ma@intel.com>; Mccarthy, Peter <peter.mccarthy@intel.com>;
> >> Shepard Siegel <shepard.siegel@atomicrules.com>; Ed Czeck
> >> <ed.czeck@atomicrules.com>; John Miller
> >> <john.miller@atomicrules.com>; Igor Russkikh
> >> <igor.russkikh@aquantia.com>; Pavel Belous
> >> <pavel.belous@aquantia.com>; Rasesh Mody <rmody@marvell.com>;
> Shahed
> >> Shaikh <shshaikh@marvell.com>; Somnath Kotur
> >> <somnath.kotur@broadcom.com>; Chas Williams <chas3@att.com>; Min
> Hu
> >> (Connor) <humin29@huawei.com>; Rahul Lakkireddy
> >> <rahul.lakkireddy@chelsio.com>; Guo, Jia <jia.guo@intel.com>; Wang,
> >> Haiyue <haiyue.wang@intel.com>; Marcin Wojtas <mw@semihalf.com>;
> >> Michal Krawczyk <mk@semihalf.com>; Guy Tzalik <gtzalik@amazon.com>;
> >> Evgeny Schemeilin <evgenys@amazon.com>; Igor Chauskin
> >> <igorch@amazon.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Wang, Xiao
> W
> >> <xiao.w.wang@intel.com>; Yang, Qiming <qiming.yang@intel.com>;
> >> Alfredo Cardigliano <cardigliano@ntop.org>; Matan Azrad
> >> <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>; Viacheslav
> >> Ovsiienko <viacheslavo@nvidia.com>; Zyta Szpak <zr@semihalf.com>;
> >> Liron Himi <lironh@marvell.com>; Stephen Hemminger
> >> <sthemmin@microsoft.com>; K. Y. Srinivasan <kys@microsoft.com>;
> >> Haiyang Zhang <haiyangz@microsoft.com>; Long Li
> >> <longli@microsoft.com>; Heinrich Kuhn
> <heinrich.kuhn@netronome.com>;
> >> Harman Kalra <hkalra@marvell.com>; Kiran Kumar K
> >> <kirankumark@marvell.com>; Andrew Rybchenko
> >> <andrew.rybchenko@oktetlabs.ru>; Singh, Jasvinder
> >> <jasvinder.singh@intel.com>; Jiawen Wu <jiawenwu@trustnetic.com>;
> >> Jian Wang <jianwang@trustnetic.com>; Ori Kam <orika@nvidia.com>; Guy
> >> Kaneti <guyk@marvell.com>; Burakov, Anatoly
> >> <anatoly.burakov@intel.com>; Maxime Coquelin
> >> <maxime.coquelin@redhat.com>; Xia, Chenbo <chenbo.xia@intel.com>
> >> Cc: dev@dpdk.org
> >> Subject: Re: [RFC] remove unused functions
> >>
> >> On 11/24/2020 10:00 AM, Zhang, Tianfei wrote:
> >>>
> >>>
> >>>> -----Original Message-----
> >>>> From: Ferruh Yigit <ferruh.yigit@intel.com>
> >>>> Sent: 2020年11月23日 17:57
> >>>> To: Zhang, Tianfei <tianfei.zhang@intel.com>; Jerin Jacob
> >>>> <jerinj@marvell.com>; Dumitrescu, Cristian
> >>>> <cristian.dumitrescu@intel.com>; Hemant Agrawal
> >>>> <hemant.agrawal@nxp.com>; Sachin Saxena
> >> <sachin.saxena@oss.nxp.com>;
> >>>> Ray Kinsella <mdr@ashroe.eu>; Neil Horman
> <nhorman@tuxdriver.com>;
> >>>> Xu, Rosen <rosen.xu@intel.com>; Wu, Jingjing
> >>>> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>;
> >>>> Nithin Dabilpuram <ndabilpuram@marvell.com>; Ajit Khaparde
> >>>> <ajit.khaparde@broadcom.com>; Raveendra Padasalagi
> >>>> <raveendra.padasalagi@broadcom.com>; Vikas Gupta
> >>>> <vikas.gupta@broadcom.com>; Gagandeep Singh <g.singh@nxp.com>;
> >>>> Somalapuram Amaranath <asomalap@amd.com>; Akhil Goyal
> >>>> <akhil.goyal@nxp.com>; Jay Zhou <jianjay.zhou@huawei.com>;
> >>>> McDaniel, Timothy <timothy.mcdaniel@intel.com>; Ma, Liang J
> >>>> <liang.j.ma@intel.com>; Mccarthy, Peter <peter.mccarthy@intel.com>;
> >>>> Shepard Siegel <shepard.siegel@atomicrules.com>; Ed Czeck
> >>>> <ed.czeck@atomicrules.com>; John Miller
> >>>> <john.miller@atomicrules.com>; Igor Russkikh
> >>>> <igor.russkikh@aquantia.com>; Pavel Belous
> >>>> <pavel.belous@aquantia.com>; Rasesh Mody <rmody@marvell.com>;
> >> Shahed
> >>>> Shaikh <shshaikh@marvell.com>; Somnath Kotur
> >>>> <somnath.kotur@broadcom.com>; Chas Williams <chas3@att.com>;
> Min
> >> Hu
> >>>> (Connor) <humin29@huawei.com>; Rahul Lakkireddy
> >>>> <rahul.lakkireddy@chelsio.com>; Guo, Jia <jia.guo@intel.com>; Wang,
> >>>> Haiyue <haiyue.wang@intel.com>; Marcin Wojtas
> <mw@semihalf.com>;
> >>>> Michal Krawczyk <mk@semihalf.com>; Guy Tzalik
> <gtzalik@amazon.com>;
> >>>> Evgeny Schemeilin <evgenys@amazon.com>; Igor Chauskin
> >>>> <igorch@amazon.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Wang,
> Xiao
> >> W
> >>>> <xiao.w.wang@intel.com>; Yang, Qiming <qiming.yang@intel.com>;
> >>>> Alfredo Cardigliano <cardigliano@ntop.org>; Matan Azrad
> >>>> <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>; Viacheslav
> >>>> Ovsiienko <viacheslavo@nvidia.com>; Zyta Szpak <zr@semihalf.com>;
> >>>> Liron Himi <lironh@marvell.com>; Stephen Hemminger
> >>>> <sthemmin@microsoft.com>; K. Y. Srinivasan <kys@microsoft.com>;
> >>>> Haiyang Zhang <haiyangz@microsoft.com>; Long Li
> >>>> <longli@microsoft.com>; Heinrich Kuhn
> >> <heinrich.kuhn@netronome.com>;
> >>>> Harman Kalra <hkalra@marvell.com>; Kiran Kumar K
> >>>> <kirankumark@marvell.com>; Andrew Rybchenko
> >>>> <andrew.rybchenko@oktetlabs.ru>; Singh, Jasvinder
> >>>> <jasvinder.singh@intel.com>; Jiawen Wu <jiawenwu@trustnetic.com>;
> >>>> Jian Wang <jianwang@trustnetic.com>; Ori Kam <orika@nvidia.com>;
> >>>> Guy Kaneti <guyk@marvell.com>; Burakov, Anatoly
> >>>> <anatoly.burakov@intel.com>; Maxime Coquelin
> >>>> <maxime.coquelin@redhat.com>; Xia, Chenbo <chenbo.xia@intel.com>
> >>>> Cc: dev@dpdk.org
> >>>> Subject: Re: [RFC] remove unused functions
> >>>>
> >>>> On 11/23/2020 2:55 AM, Zhang, Tianfei wrote:
> >>>>>>     drivers/raw/ifpga/base/opae_eth_group.c       |   25 -
> >>>>>>     drivers/raw/ifpga/base/opae_eth_group.h       |    1 -
> >
> > For those unused functions on opae_eth_group.c are release functions,
> > like eth_group_release(), it should be called by fme_eth_group_uinit(), I will
> send a patch to fix it.
> >
> >>>>>>     drivers/raw/ifpga/base/opae_hw_api.c          |  212 --
> >>>>>>     drivers/raw/ifpga/base/opae_hw_api.h          |   36 -
> >>>>>>     drivers/raw/ifpga/base/opae_i2c.c             |   12 -
> >>>>>>     drivers/raw/ifpga/base/opae_i2c.h             |    4 -
> >>>>>>     drivers/raw/ifpga/base/opae_ifpga_hw_api.c    |   99 -
> >>>>>>     drivers/raw/ifpga/base/opae_ifpga_hw_api.h    |   15 -
> >>>>>
> >>>>> The ifpga base code is want to provide fully functionality low
> >>>>> level hardware
> >>>> support for Intel FPGA card like N3000, even though some APIs are
> >>>> not used by DPDK framework now,but it will useful for end-users or
> >>>> other customers developing their own productions , my opinion is
> >>>> keeping those APIs of ifpga base code in DPDK.
> >>>>>
> >>>>
> >>>> The code is hidden behind the driver code, it is not accessible to
> >>>> user
> >> directly.
> >>>>
> >>>> If the idea is the code can be used later, it can be added when needed.
> >>>
> >>> Sound good!
> >>>>
> >>>> If the idea is the code can be needed in the field by users, can
> >>>> you please give example on the use case?
> >>>
> >>> Do you mean write some test cases in examples folder?
> >>>
> >>
> >> You mentioned end user may need some of the removed functions, can
> >> you give an example how an end user may need them?
> >
> > Currently, there is only one DPDK PMD driver using the ifpga base code, it is
> called ipn3ke, which is target for VBNG applications.
> > The ipn3ke is not using all of our APIs which provided by ifpga base
> > code. I am agree remove the unused functions firstly. And I plan to write an
> test application to test all of APIs ( including those unused functions ) and add
> those functions back in future, and I think the test applications should be a
> good example for the end-user how to use those APIs.
> >
> 
> I am not clear how a test application can help to test driver base code
> functions.
> 
> Driver should call them to configure the HW, if there is no DPDK API to request
> that configuration, or current device doesn't support it, what application can
> do?

My though is write a test rawdev PMD driver and corresponding application which are only for the ifpga APIs testing purpose, I am not sure is it workable?

Patch
diff mbox series

diff --git a/app/test-eventdev/parser.c b/app/test-eventdev/parser.c
index 24f1855e9a..131f7383d9 100644
--- a/app/test-eventdev/parser.c
+++ b/app/test-eventdev/parser.c
@@ -37,44 +37,6 @@  get_hex_val(char c)
 	}
 }
 
-int
-parser_read_arg_bool(const char *p)
-{
-	p = skip_white_spaces(p);
-	int result = -EINVAL;
-
-	if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
-		((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
-		p += 3;
-		result = 1;
-	}
-
-	if (((p[0] == 'o') && (p[1] == 'n')) ||
-		((p[0] == 'O') && (p[1] == 'N'))) {
-		p += 2;
-		result = 1;
-	}
-
-	if (((p[0] == 'n') && (p[1] == 'o')) ||
-		((p[0] == 'N') && (p[1] == 'O'))) {
-		p += 2;
-		result = 0;
-	}
-
-	if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
-		((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
-		p += 3;
-		result = 0;
-	}
-
-	p = skip_white_spaces(p);
-
-	if (p[0] != '\0')
-		return -EINVAL;
-
-	return result;
-}
-
 int
 parser_read_uint64(uint64_t *value, const char *p)
 {
@@ -115,24 +77,6 @@  parser_read_uint64(uint64_t *value, const char *p)
 	return 0;
 }
 
-int
-parser_read_int32(int32_t *value, const char *p)
-{
-	char *next;
-	int32_t val;
-
-	p = skip_white_spaces(p);
-	if (!isdigit(*p))
-		return -EINVAL;
-
-	val = strtol(p, &next, 10);
-	if (p == next)
-		return -EINVAL;
-
-	*value = val;
-	return 0;
-}
-
 int
 parser_read_uint64_hex(uint64_t *value, const char *p)
 {
@@ -169,22 +113,6 @@  parser_read_uint32(uint32_t *value, const char *p)
 	return 0;
 }
 
-int
-parser_read_uint32_hex(uint32_t *value, const char *p)
-{
-	uint64_t val = 0;
-	int ret = parser_read_uint64_hex(&val, p);
-
-	if (ret < 0)
-		return ret;
-
-	if (val > UINT32_MAX)
-		return -ERANGE;
-
-	*value = val;
-	return 0;
-}
-
 int
 parser_read_uint16(uint16_t *value, const char *p)
 {
@@ -201,22 +129,6 @@  parser_read_uint16(uint16_t *value, const char *p)
 	return 0;
 }
 
-int
-parser_read_uint16_hex(uint16_t *value, const char *p)
-{
-	uint64_t val = 0;
-	int ret = parser_read_uint64_hex(&val, p);
-
-	if (ret < 0)
-		return ret;
-
-	if (val > UINT16_MAX)
-		return -ERANGE;
-
-	*value = val;
-	return 0;
-}
-
 int
 parser_read_uint8(uint8_t *value, const char *p)
 {
diff --git a/app/test-eventdev/parser.h b/app/test-eventdev/parser.h
index 673ff22d78..94856e66e3 100644
--- a/app/test-eventdev/parser.h
+++ b/app/test-eventdev/parser.h
@@ -28,20 +28,14 @@  skip_digits(const char *src)
 	return i;
 }
 
-int parser_read_arg_bool(const char *p);
-
 int parser_read_uint64(uint64_t *value, const char *p);
 int parser_read_uint32(uint32_t *value, const char *p);
 int parser_read_uint16(uint16_t *value, const char *p);
 int parser_read_uint8(uint8_t *value, const char *p);
 
 int parser_read_uint64_hex(uint64_t *value, const char *p);
-int parser_read_uint32_hex(uint32_t *value, const char *p);
-int parser_read_uint16_hex(uint16_t *value, const char *p);
 int parser_read_uint8_hex(uint8_t *value, const char *p);
 
-int parser_read_int32(int32_t *value, const char *p);
-
 int parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
 
 int parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens);
diff --git a/app/test/test_table_pipeline.c b/app/test/test_table_pipeline.c
index aabf4375db..4e5926a7c0 100644
--- a/app/test/test_table_pipeline.c
+++ b/app/test/test_table_pipeline.c
@@ -61,46 +61,10 @@  rte_pipeline_port_out_action_handler port_action_stub(struct rte_mbuf **pkts,
 
 #endif
 
-rte_pipeline_table_action_handler_hit
-table_action_0x00(struct rte_pipeline *p, struct rte_mbuf **pkts,
-	uint64_t pkts_mask, struct rte_pipeline_table_entry **entry, void *arg);
-
-rte_pipeline_table_action_handler_hit
-table_action_stub_hit(struct rte_pipeline *p, struct rte_mbuf **pkts,
-	uint64_t pkts_mask, struct rte_pipeline_table_entry **entry, void *arg);
-
 static int
 table_action_stub_miss(struct rte_pipeline *p, struct rte_mbuf **pkts,
 	uint64_t pkts_mask, struct rte_pipeline_table_entry *entry, void *arg);
 
-rte_pipeline_table_action_handler_hit
-table_action_0x00(__rte_unused struct rte_pipeline *p,
-	__rte_unused struct rte_mbuf **pkts,
-	uint64_t pkts_mask,
-	__rte_unused struct rte_pipeline_table_entry **entry,
-	__rte_unused void *arg)
-{
-	printf("Table Action, setting pkts_mask to 0x00\n");
-	pkts_mask = ~0x00;
-	rte_pipeline_ah_packet_drop(p, pkts_mask);
-	return 0;
-}
-
-rte_pipeline_table_action_handler_hit
-table_action_stub_hit(__rte_unused struct rte_pipeline *p,
-	__rte_unused struct rte_mbuf **pkts,
-	uint64_t pkts_mask,
-	__rte_unused struct rte_pipeline_table_entry **entry,
-	__rte_unused void *arg)
-{
-	printf("STUB Table Action Hit - doing nothing\n");
-	printf("STUB Table Action Hit - setting mask to 0x%"PRIx64"\n",
-		override_hit_mask);
-	pkts_mask = (~override_hit_mask) & 0x3;
-	rte_pipeline_ah_packet_drop(p, pkts_mask);
-	return 0;
-}
-
 static int
 table_action_stub_miss(struct rte_pipeline *p,
 	__rte_unused struct rte_mbuf **pkts,
diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index 4ab49f7853..b69b133a90 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -56,74 +56,6 @@  fman_if_reset_mcast_filter_table(struct fman_if *p)
 		out_be32(hashtable_ctrl, i & ~HASH_CTRL_MCAST_EN);
 }
 
-static
-uint32_t get_mac_hash_code(uint64_t eth_addr)
-{
-	uint64_t	mask1, mask2;
-	uint32_t	xorVal = 0;
-	uint8_t		i, j;
-
-	for (i = 0; i < 6; i++) {
-		mask1 = eth_addr & (uint64_t)0x01;
-		eth_addr >>= 1;
-
-		for (j = 0; j < 7; j++) {
-			mask2 = eth_addr & (uint64_t)0x01;
-			mask1 ^= mask2;
-			eth_addr >>= 1;
-		}
-
-		xorVal |= (mask1 << (5 - i));
-	}
-
-	return xorVal;
-}
-
-int
-fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth)
-{
-	uint64_t eth_addr;
-	void *hashtable_ctrl;
-	uint32_t hash;
-
-	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
-
-	eth_addr = ETH_ADDR_TO_UINT64(eth);
-
-	if (!(eth_addr & GROUP_ADDRESS))
-		return -1;
-
-	hash = get_mac_hash_code(eth_addr) & HASH_CTRL_ADDR_MASK;
-	hash = hash | HASH_CTRL_MCAST_EN;
-
-	hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
-	out_be32(hashtable_ctrl, hash);
-
-	return 0;
-}
-
-int
-fman_if_get_primary_mac_addr(struct fman_if *p, uint8_t *eth)
-{
-	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
-	void *mac_reg =
-		&((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_l;
-	u32 val = in_be32(mac_reg);
-
-	eth[0] = (val & 0x000000ff) >> 0;
-	eth[1] = (val & 0x0000ff00) >> 8;
-	eth[2] = (val & 0x00ff0000) >> 16;
-	eth[3] = (val & 0xff000000) >> 24;
-
-	mac_reg =  &((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_u;
-	val = in_be32(mac_reg);
-
-	eth[4] = (val & 0x000000ff) >> 0;
-	eth[5] = (val & 0x0000ff00) >> 8;
-
-	return 0;
-}
-
 void
 fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num)
 {
@@ -180,38 +112,6 @@  fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num)
 	return 0;
 }
 
-void
-fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable)
-{
-	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
-	u32 value = 0;
-	void *cmdcfg;
-
-	assert(fman_ccsr_map_fd != -1);
-
-	/* Set Rx Ignore Pause Frames */
-	cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
-	if (enable)
-		value = in_be32(cmdcfg) | CMD_CFG_PAUSE_IGNORE;
-	else
-		value = in_be32(cmdcfg) & ~CMD_CFG_PAUSE_IGNORE;
-
-	out_be32(cmdcfg, value);
-}
-
-void
-fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len)
-{
-	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
-	unsigned int *maxfrm;
-
-	assert(fman_ccsr_map_fd != -1);
-
-	/* Set Max frame length */
-	maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
-	out_be32(maxfrm, (MAXFRM_RX_MASK & max_frame_len));
-}
-
 void
 fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats)
 {
@@ -422,23 +322,6 @@  fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta)
 	return 0;
 }
 
-int
-fman_if_get_fdoff(struct fman_if *fm_if)
-{
-	u32 fmbm_rebm;
-	int fdoff;
-
-	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
-
-	assert(fman_ccsr_map_fd != -1);
-
-	fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
-
-	fdoff = (fmbm_rebm >> FMAN_SP_EXT_BUF_MARG_START_SHIFT) & 0x1ff;
-
-	return fdoff;
-}
-
 void
 fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid)
 {
@@ -451,28 +334,6 @@  fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid)
 	out_be32(fmbm_refqid, err_fqid);
 }
 
-int
-fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp)
-{
-	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
-	int val = 0;
-	int iceof_mask = 0x001f0000;
-	int icsz_mask = 0x0000001f;
-	int iciof_mask = 0x00000f00;
-
-	assert(fman_ccsr_map_fd != -1);
-
-	unsigned int *fmbm_ricp =
-		&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp;
-	val = in_be32(fmbm_ricp);
-
-	icp->iceof = (val & iceof_mask) >> 12;
-	icp->iciof = (val & iciof_mask) >> 4;
-	icp->icsz = (val & icsz_mask) << 4;
-
-	return 0;
-}
-
 int
 fman_if_set_ic_params(struct fman_if *fm_if,
 			  const struct fman_if_ic_params *icp)
@@ -526,19 +387,6 @@  fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm)
 	out_be32(reg_maxfrm, (in_be32(reg_maxfrm) & 0xFFFF0000) | max_frm);
 }
 
-uint16_t
-fman_if_get_maxfrm(struct fman_if *fm_if)
-{
-	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
-	unsigned int *reg_maxfrm;
-
-	assert(fman_ccsr_map_fd != -1);
-
-	reg_maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
-
-	return (in_be32(reg_maxfrm) | 0x0000FFFF);
-}
-
 /* MSB in fmbm_rebm register
  * 0 - If BMI cannot store the frame in a single buffer it may select a buffer
  *     of smaller size and store the frame in scatter gather (S/G) buffers
@@ -580,36 +428,6 @@  fman_if_set_sg(struct fman_if *fm_if, int enable)
 	out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
 }
 
-void
-fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia)
-{
-	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
-	unsigned int *fmqm_pndn;
-
-	assert(fman_ccsr_map_fd != -1);
-
-	fmqm_pndn = &((struct fman_port_qmi_regs *)__if->qmi_map)->fmqm_pndn;
-
-	out_be32(fmqm_pndn, nia);
-}
-
-void
-fman_if_discard_rx_errors(struct fman_if *fm_if)
-{
-	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
-	unsigned int *fmbm_rfsdm, *fmbm_rfsem;
-
-	fmbm_rfsem = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsem;
-	out_be32(fmbm_rfsem, 0);
-
-	/* Configure the discard mask to discard the error packets which have
-	 * DMA errors, Frame size error, Header error etc. The mask 0x010EE3F0
-	 * is to configured discard all the errors which come in the FD[STATUS]
-	 */
-	fmbm_rfsdm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsdm;
-	out_be32(fmbm_rfsdm, 0x010EE3F0);
-}
-
 void
 fman_if_receive_rx_errors(struct fman_if *fm_if,
 	unsigned int err_eq)
diff --git a/drivers/bus/dpaa/base/fman/netcfg_layer.c b/drivers/bus/dpaa/base/fman/netcfg_layer.c
index b7009f2299..1d6460f1d1 100644
--- a/drivers/bus/dpaa/base/fman/netcfg_layer.c
+++ b/drivers/bus/dpaa/base/fman/netcfg_layer.c
@@ -148,14 +148,3 @@  netcfg_acquire(void)
 
 	return NULL;
 }
-
-void
-netcfg_release(struct netcfg_info *cfg_ptr)
-{
-	rte_free(cfg_ptr);
-	/* Close socket for shared interfaces */
-	if (skfd >= 0) {
-		close(skfd);
-		skfd = -1;
-	}
-}
diff --git a/drivers/bus/dpaa/base/qbman/bman.c b/drivers/bus/dpaa/base/qbman/bman.c
index 8a6290734f..95215bb24e 100644
--- a/drivers/bus/dpaa/base/qbman/bman.c
+++ b/drivers/bus/dpaa/base/qbman/bman.c
@@ -321,41 +321,7 @@  int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
 	return ret;
 }
 
-int bman_query_pools(struct bm_pool_state *state)
-{
-	struct bman_portal *p = get_affine_portal();
-	struct bm_mc_result *mcr;
-
-	bm_mc_start(&p->p);
-	bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
-	while (!(mcr = bm_mc_result(&p->p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) ==
-		    BM_MCR_VERB_CMD_QUERY);
-	*state = mcr->query;
-	state->as.state.state[0] = be32_to_cpu(state->as.state.state[0]);
-	state->as.state.state[1] = be32_to_cpu(state->as.state.state[1]);
-	state->ds.state.state[0] = be32_to_cpu(state->ds.state.state[0]);
-	state->ds.state.state[1] = be32_to_cpu(state->ds.state.state[1]);
-	return 0;
-}
-
 u32 bman_query_free_buffers(struct bman_pool *pool)
 {
 	return bm_pool_free_buffers(pool->params.bpid);
 }
-
-int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
-{
-	u32 bpid;
-
-	bpid = bman_get_params(pool)->bpid;
-
-	return bm_pool_set(bpid, thresholds);
-}
-
-int bman_shutdown_pool(u32 bpid)
-{
-	struct bman_portal *p = get_affine_portal();
-	return bm_shutdown_pool(&p->p, bpid);
-}
diff --git a/drivers/bus/dpaa/base/qbman/bman_driver.c b/drivers/bus/dpaa/base/qbman/bman_driver.c
index 750b756b93..8763ac6215 100644
--- a/drivers/bus/dpaa/base/qbman/bman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/bman_driver.c
@@ -109,11 +109,6 @@  static int fsl_bman_portal_finish(void)
 	return ret;
 }
 
-int bman_thread_fd(void)
-{
-	return bmfd;
-}
-
 int bman_thread_init(void)
 {
 	/* Convert from contiguous/virtual cpu numbering to real cpu when
@@ -127,17 +122,6 @@  int bman_thread_finish(void)
 	return fsl_bman_portal_finish();
 }
 
-void bman_thread_irq(void)
-{
-	qbman_invoke_irq(pcfg.irq);
-	/* Now we need to uninhibit interrupts. This is the only code outside
-	 * the regular portal driver that manipulates any portal register, so
-	 * rather than breaking that encapsulation I am simply hard-coding the
-	 * offset to the inhibit register here.
-	 */
-	out_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
-}
-
 int bman_init_ccsr(const struct device_node *node)
 {
 	static int ccsr_map_fd;
diff --git a/drivers/bus/dpaa/base/qbman/process.c b/drivers/bus/dpaa/base/qbman/process.c
index 9bc92681cd..9ce8ac8b12 100644
--- a/drivers/bus/dpaa/base/qbman/process.c
+++ b/drivers/bus/dpaa/base/qbman/process.c
@@ -204,100 +204,6 @@  struct dpaa_ioctl_raw_portal {
 #define DPAA_IOCTL_FREE_RAW_PORTAL \
 	_IOR(DPAA_IOCTL_MAGIC, 0x0D, struct dpaa_ioctl_raw_portal)
 
-static int process_portal_allocate(struct dpaa_ioctl_raw_portal *portal)
-{
-	int ret = check_fd();
-
-	if (ret)
-		return ret;
-
-	ret = ioctl(fd, DPAA_IOCTL_ALLOC_RAW_PORTAL, portal);
-	if (ret) {
-		perror("ioctl(DPAA_IOCTL_ALLOC_RAW_PORTAL)");
-		return ret;
-	}
-	return 0;
-}
-
-static int process_portal_free(struct dpaa_ioctl_raw_portal *portal)
-{
-	int ret = check_fd();
-
-	if (ret)
-		return ret;
-
-	ret = ioctl(fd, DPAA_IOCTL_FREE_RAW_PORTAL, portal);
-	if (ret) {
-		perror("ioctl(DPAA_IOCTL_FREE_RAW_PORTAL)");
-		return ret;
-	}
-	return 0;
-}
-
-int qman_allocate_raw_portal(struct dpaa_raw_portal *portal)
-{
-	struct dpaa_ioctl_raw_portal input;
-	int ret;
-
-	input.type = dpaa_portal_qman;
-	input.index = portal->index;
-	input.enable_stash = portal->enable_stash;
-	input.cpu = portal->cpu;
-	input.cache = portal->cache;
-	input.window = portal->window;
-	input.sdest = portal->sdest;
-
-	ret =  process_portal_allocate(&input);
-	if (ret)
-		return ret;
-	portal->index = input.index;
-	portal->cinh = input.cinh;
-	portal->cena  = input.cena;
-	return 0;
-}
-
-int qman_free_raw_portal(struct dpaa_raw_portal *portal)
-{
-	struct dpaa_ioctl_raw_portal input;
-
-	input.type = dpaa_portal_qman;
-	input.index = portal->index;
-	input.cinh = portal->cinh;
-	input.cena = portal->cena;
-
-	return process_portal_free(&input);
-}
-
-int bman_allocate_raw_portal(struct dpaa_raw_portal *portal)
-{
-	struct dpaa_ioctl_raw_portal input;
-	int ret;
-
-	input.type = dpaa_portal_bman;
-	input.index = portal->index;
-	input.enable_stash = 0;
-
-	ret =  process_portal_allocate(&input);
-	if (ret)
-		return ret;
-	portal->index = input.index;
-	portal->cinh = input.cinh;
-	portal->cena  = input.cena;
-	return 0;
-}
-
-int bman_free_raw_portal(struct dpaa_raw_portal *portal)
-{
-	struct dpaa_ioctl_raw_portal input;
-
-	input.type = dpaa_portal_bman;
-	input.index = portal->index;
-	input.cinh = portal->cinh;
-	input.cena = portal->cena;
-
-	return process_portal_free(&input);
-}
-
 #define DPAA_IOCTL_ENABLE_LINK_STATUS_INTERRUPT \
 	_IOW(DPAA_IOCTL_MAGIC, 0x0E, struct usdpaa_ioctl_link_status)
 
diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c
index 447c091770..a8deecf689 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -199,14 +199,6 @@  static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
 	return -ENOMEM;
 }
 
-static void clear_fq_table_entry(u32 entry)
-{
-	spin_lock(&fq_hash_table_lock);
-	DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
-	qman_fq_lookup_table[entry] = NULL;
-	spin_unlock(&fq_hash_table_lock);
-}
-
 static inline struct qman_fq *get_fq_table_entry(u32 entry)
 {
 	DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
@@ -235,13 +227,6 @@  static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
 	fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
 }
 
-static inline void cpu_to_hw_fd(struct qm_fd *fd)
-{
-	fd->addr = cpu_to_be40(fd->addr);
-	fd->status = cpu_to_be32(fd->status);
-	fd->opaque = cpu_to_be32(fd->opaque);
-}
-
 static inline void hw_fd_to_cpu(struct qm_fd *fd)
 {
 	fd->addr = be40_to_cpu(fd->addr);
@@ -285,15 +270,6 @@  static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
 	return IRQ_HANDLED;
 }
 
-/* This inner version is used privately by qman_create_affine_portal(), as well
- * as by the exported qman_stop_dequeues().
- */
-static inline void qman_stop_dequeues_ex(struct qman_portal *p)
-{
-	if (!(p->dqrr_disable_ref++))
-		qm_dqrr_set_maxfill(&p->p, 0);
-}
-
 static int drain_mr_fqrni(struct qm_portal *p)
 {
 	const struct qm_mr_entry *msg;
@@ -1173,17 +1149,6 @@  int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits)
 	return 0;
 }
 
-u16 qman_affine_channel(int cpu)
-{
-	if (cpu < 0) {
-		struct qman_portal *portal = get_affine_portal();
-
-		cpu = portal->config->cpu;
-	}
-	DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
-	return affine_channels[cpu];
-}
-
 unsigned int qman_portal_poll_rx(unsigned int poll_limit,
 				 void **bufs,
 				 struct qman_portal *p)
@@ -1247,14 +1212,6 @@  unsigned int qman_portal_poll_rx(unsigned int poll_limit,
 	return rx_number;
 }
 
-void qman_clear_irq(void)
-{
-	struct qman_portal *p = get_affine_portal();
-	u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
-		~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
-	qm_isr_status_clear(&p->p, clear);
-}
-
 u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
 			void **bufs)
 {
@@ -1370,51 +1327,6 @@  void qman_dqrr_consume(struct qman_fq *fq,
 	qm_dqrr_next(&p->p);
 }
 
-int qman_poll_dqrr(unsigned int limit)
-{
-	struct qman_portal *p = get_affine_portal();
-	int ret;
-
-	ret = __poll_portal_fast(p, limit);
-	return ret;
-}
-
-void qman_poll(void)
-{
-	struct qman_portal *p = get_affine_portal();
-
-	if ((~p->irq_sources) & QM_PIRQ_SLOW) {
-		if (!(p->slowpoll--)) {
-			u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
-			u32 active = __poll_portal_slow(p, is);
-
-			if (active) {
-				qm_isr_status_clear(&p->p, active);
-				p->slowpoll = SLOW_POLL_BUSY;
-			} else
-				p->slowpoll = SLOW_POLL_IDLE;
-		}
-	}
-	if ((~p->irq_sources) & QM_PIRQ_DQRI)
-		__poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
-}
-
-void qman_stop_dequeues(void)
-{
-	struct qman_portal *p = get_affine_portal();
-
-	qman_stop_dequeues_ex(p);
-}
-
-void qman_start_dequeues(void)
-{
-	struct qman_portal *p = get_affine_portal();
-
-	DPAA_ASSERT(p->dqrr_disable_ref > 0);
-	if (!(--p->dqrr_disable_ref))
-		qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
-}
-
 void qman_static_dequeue_add(u32 pools, struct qman_portal *qp)
 {
 	struct qman_portal *p = qp ? qp : get_affine_portal();
@@ -1424,28 +1336,6 @@  void qman_static_dequeue_add(u32 pools, struct qman_portal *qp)
 	qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
 }
 
-void qman_static_dequeue_del(u32 pools, struct qman_portal *qp)
-{
-	struct qman_portal *p = qp ? qp : get_affine_portal();
-
-	pools &= p->config->pools;
-	p->sdqcr &= ~pools;
-	qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
-}
-
-u32 qman_static_dequeue_get(struct qman_portal *qp)
-{
-	struct qman_portal *p = qp ? qp : get_affine_portal();
-	return p->sdqcr;
-}
-
-void qman_dca(const struct qm_dqrr_entry *dq, int park_request)
-{
-	struct qman_portal *p = get_affine_portal();
-
-	qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
-}
-
 void qman_dca_index(u8 index, int park_request)
 {
 	struct qman_portal *p = get_affine_portal();
@@ -1563,42 +1453,11 @@  int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
 	return -EIO;
 }
 
-void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
-{
-	/*
-	 * We don't need to lock the FQ as it is a pre-condition that the FQ be
-	 * quiesced. Instead, run some checks.
-	 */
-	switch (fq->state) {
-	case qman_fq_state_parked:
-		DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
-		/* Fallthrough */
-	case qman_fq_state_oos:
-		if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
-			qman_release_fqid(fq->fqid);
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-		clear_fq_table_entry(fq->key);
-#endif
-		return;
-	default:
-		break;
-	}
-	DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
-}
-
 u32 qman_fq_fqid(struct qman_fq *fq)
 {
 	return fq->fqid;
 }
 
-void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
-{
-	if (state)
-		*state = fq->state;
-	if (flags)
-		*flags = fq->flags;
-}
-
 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
 {
 	struct qm_mc_command *mcc;
@@ -1695,48 +1554,6 @@  int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
 	return 0;
 }
 
-int qman_schedule_fq(struct qman_fq *fq)
-{
-	struct qm_mc_command *mcc;
-	struct qm_mc_result *mcr;
-	struct qman_portal *p;
-
-	int ret = 0;
-	u8 res;
-
-	if (fq->state != qman_fq_state_parked)
-		return -EINVAL;
-#ifdef RTE_LIBRTE_DPAA_HWDEBUG
-	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
-		return -EINVAL;
-#endif
-	/* Issue a ALTERFQ_SCHED management command */
-	p = get_affine_portal();
-
-	FQLOCK(fq);
-	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
-		     (fq->state != qman_fq_state_parked))) {
-		ret = -EBUSY;
-		goto out;
-	}
-	mcc = qm_mc_start(&p->p);
-	mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
-	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
-	while (!(mcr = qm_mc_result(&p->p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
-	res = mcr->result;
-	if (res != QM_MCR_RESULT_OK) {
-		ret = -EIO;
-		goto out;
-	}
-	fq->state = qman_fq_state_sched;
-out:
-	FQUNLOCK(fq);
-
-	return ret;
-}
-
 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
 {
 	struct qm_mc_command *mcc;
@@ -1866,98 +1683,6 @@  int qman_oos_fq(struct qman_fq *fq)
 	return ret;
 }
 
-int qman_fq_flow_control(struct qman_fq *fq, int xon)
-{
-	struct qm_mc_command *mcc;
-	struct qm_mc_result *mcr;
-	struct qman_portal *p;
-
-	int ret = 0;
-	u8 res;
-	u8 myverb;
-
-	if ((fq->state == qman_fq_state_oos) ||
-	    (fq->state == qman_fq_state_retired) ||
-		(fq->state == qman_fq_state_parked))
-		return -EINVAL;
-
-#ifdef RTE_LIBRTE_DPAA_HWDEBUG
-	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
-		return -EINVAL;
-#endif
-	/* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
-	p = get_affine_portal();
-	FQLOCK(fq);
-	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
-		     (fq->state == qman_fq_state_parked) ||
-			(fq->state == qman_fq_state_oos) ||
-			(fq->state == qman_fq_state_retired))) {
-		ret = -EBUSY;
-		goto out;
-	}
-	mcc = qm_mc_start(&p->p);
-	mcc->alterfq.fqid = fq->fqid;
-	mcc->alterfq.count = 0;
-	myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
-
-	qm_mc_commit(&p->p, myverb);
-	while (!(mcr = qm_mc_result(&p->p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
-
-	res = mcr->result;
-	if (res != QM_MCR_RESULT_OK) {
-		ret = -EIO;
-		goto out;
-	}
-out:
-	FQUNLOCK(fq);
-	return ret;
-}
-
-int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
-{
-	struct qm_mc_command *mcc;
-	struct qm_mc_result *mcr;
-	struct qman_portal *p = get_affine_portal();
-
-	u8 res;
-
-	mcc = qm_mc_start(&p->p);
-	mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
-	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
-	while (!(mcr = qm_mc_result(&p->p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
-	res = mcr->result;
-	if (res == QM_MCR_RESULT_OK)
-		*fqd = mcr->queryfq.fqd;
-	hw_fqd_to_cpu(fqd);
-	if (res != QM_MCR_RESULT_OK)
-		return -EIO;
-	return 0;
-}
-
-int qman_query_fq_has_pkts(struct qman_fq *fq)
-{
-	struct qm_mc_command *mcc;
-	struct qm_mc_result *mcr;
-	struct qman_portal *p = get_affine_portal();
-
-	int ret = 0;
-	u8 res;
-
-	mcc = qm_mc_start(&p->p);
-	mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
-	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
-	while (!(mcr = qm_mc_result(&p->p)))
-		cpu_relax();
-	res = mcr->result;
-	if (res == QM_MCR_RESULT_OK)
-		ret = !!mcr->queryfq_np.frm_cnt;
-	return ret;
-}
-
 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
 {
 	struct qm_mc_command *mcc;
@@ -2022,65 +1747,6 @@  int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt)
 	return 0;
 }
 
-int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
-{
-	struct qm_mc_command *mcc;
-	struct qm_mc_result *mcr;
-	struct qman_portal *p = get_affine_portal();
-
-	u8 res, myverb;
-
-	myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
-				 QM_MCR_VERB_QUERYWQ;
-	mcc = qm_mc_start(&p->p);
-	mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
-	qm_mc_commit(&p->p, myverb);
-	while (!(mcr = qm_mc_result(&p->p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
-	res = mcr->result;
-	if (res == QM_MCR_RESULT_OK) {
-		int i, array_len;
-
-		wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
-		array_len = ARRAY_SIZE(mcr->querywq.wq_len);
-		for (i = 0; i < array_len; i++)
-			wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
-	}
-	if (res != QM_MCR_RESULT_OK) {
-		pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
-		return -EIO;
-	}
-	return 0;
-}
-
-int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
-		       struct qm_mcr_cgrtestwrite *result)
-{
-	struct qm_mc_command *mcc;
-	struct qm_mc_result *mcr;
-	struct qman_portal *p = get_affine_portal();
-
-	u8 res;
-
-	mcc = qm_mc_start(&p->p);
-	mcc->cgrtestwrite.cgid = cgr->cgrid;
-	mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
-	mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
-	qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
-	while (!(mcr = qm_mc_result(&p->p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
-	res = mcr->result;
-	if (res == QM_MCR_RESULT_OK)
-		*result = mcr->cgrtestwrite;
-	if (res != QM_MCR_RESULT_OK) {
-		pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
-		return -EIO;
-	}
-	return 0;
-}
-
 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
 {
 	struct qm_mc_command *mcc;
@@ -2116,32 +1782,6 @@  int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
 	return 0;
 }
 
-int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
-{
-	struct qm_mc_result *mcr;
-	struct qman_portal *p = get_affine_portal();
-	u8 res;
-	unsigned int i;
-
-	qm_mc_start(&p->p);
-	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
-	while (!(mcr = qm_mc_result(&p->p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-			QM_MCC_VERB_QUERYCONGESTION);
-	res = mcr->result;
-	if (res == QM_MCR_RESULT_OK)
-		*congestion = mcr->querycongestion;
-	if (res != QM_MCR_RESULT_OK) {
-		pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
-		return -EIO;
-	}
-	for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
-		congestion->state.state[i] =
-			be32_to_cpu(congestion->state.state[i]);
-	return 0;
-}
-
 int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
 {
 	struct qman_portal *p = get_affine_portal();
@@ -2179,128 +1819,6 @@  int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
 	return ret;
 }
 
-int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
-			  u32 vdqcr)
-{
-	struct qman_portal *p;
-	int ret = -EBUSY;
-
-	if ((fq->state != qman_fq_state_parked) &&
-	    (fq->state != qman_fq_state_retired))
-		return -EINVAL;
-	if (vdqcr & QM_VDQCR_FQID_MASK)
-		return -EINVAL;
-	if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
-		return -EBUSY;
-	vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
-
-	p = get_affine_portal();
-
-	if (!p->vdqcr_owned) {
-		FQLOCK(fq);
-		if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
-			goto escape;
-		fq_set(fq, QMAN_FQ_STATE_VDQCR);
-		FQUNLOCK(fq);
-		p->vdqcr_owned = fq;
-		ret = 0;
-	}
-escape:
-	if (ret)
-		return ret;
-
-	/* VDQCR is set */
-	qm_dqrr_vdqcr_set(&p->p, vdqcr);
-	return 0;
-}
-
-static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
-{
-	if (avail)
-		qm_eqcr_cce_prefetch(&p->p);
-	else
-		qm_eqcr_cce_update(&p->p);
-}
-
-int qman_eqcr_is_empty(void)
-{
-	struct qman_portal *p = get_affine_portal();
-	u8 avail;
-
-	update_eqcr_ci(p, 0);
-	avail = qm_eqcr_get_fill(&p->p);
-	return (avail == 0);
-}
-
-void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
-{
-	if (affine) {
-		struct qman_portal *p = get_affine_portal();
-
-		p->cb_dc_ern = handler;
-	} else
-		cb_dc_ern = handler;
-}
-
-static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
-					struct qman_fq *fq,
-					const struct qm_fd *fd,
-					u32 flags)
-{
-	struct qm_eqcr_entry *eq;
-	u8 avail;
-
-	if (p->use_eqcr_ci_stashing) {
-		/*
-		 * The stashing case is easy, only update if we need to in
-		 * order to try and liberate ring entries.
-		 */
-		eq = qm_eqcr_start_stash(&p->p);
-	} else {
-		/*
-		 * The non-stashing case is harder, need to prefetch ahead of
-		 * time.
-		 */
-		avail = qm_eqcr_get_avail(&p->p);
-		if (avail < 2)
-			update_eqcr_ci(p, avail);
-		eq = qm_eqcr_start_no_stash(&p->p);
-	}
-
-	if (unlikely(!eq))
-		return NULL;
-
-	if (flags & QMAN_ENQUEUE_FLAG_DCA)
-		eq->dca = QM_EQCR_DCA_ENABLE |
-			((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
-					QM_EQCR_DCA_PARK : 0) |
-			((flags >> 8) & QM_EQCR_DCA_IDXMASK);
-	eq->fqid = cpu_to_be32(fq->fqid);
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-	eq->tag = cpu_to_be32(fq->key);
-#else
-	eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
-#endif
-	eq->fd = *fd;
-	cpu_to_hw_fd(&eq->fd);
-	return eq;
-}
-
-int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
-{
-	struct qman_portal *p = get_affine_portal();
-	struct qm_eqcr_entry *eq;
-
-	eq = try_p_eq_start(p, fq, fd, flags);
-	if (!eq)
-		return -EBUSY;
-	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
-	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
-		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
-	/* Factor the below out, it's used from qman_enqueue_orp() too */
-	return 0;
-}
-
 int qman_enqueue_multi(struct qman_fq *fq,
 		       const struct qm_fd *fd, u32 *flags,
 		int frames_to_send)
@@ -2442,37 +1960,6 @@  qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
 	return sent;
 }
 
-int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
-		     struct qman_fq *orp, u16 orp_seqnum)
-{
-	struct qman_portal *p  = get_affine_portal();
-	struct qm_eqcr_entry *eq;
-
-	eq = try_p_eq_start(p, fq, fd, flags);
-	if (!eq)
-		return -EBUSY;
-	/* Process ORP-specifics here */
-	if (flags & QMAN_ENQUEUE_FLAG_NLIS)
-		orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
-	else {
-		orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
-		if (flags & QMAN_ENQUEUE_FLAG_NESN)
-			orp_seqnum |= QM_EQCR_SEQNUM_NESN;
-		else
-			/* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
-			orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
-	}
-	eq->seqnum = cpu_to_be16(orp_seqnum);
-	eq->orp = cpu_to_be32(orp->fqid);
-	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
-	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
-		((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
-				0 : QM_EQCR_VERB_CMD_ENQUEUE) |
-		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
-
-	return 0;
-}
-
 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
 		    struct qm_mcc_initcgr *opts)
 {
@@ -2581,52 +2068,6 @@  int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
 	return ret;
 }
 
-int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
-			   struct qm_mcc_initcgr *opts)
-{
-	struct qm_mcc_initcgr local_opts;
-	struct qm_mcr_querycgr cgr_state;
-	int ret;
-
-	if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
-		pr_warn("QMan version doesn't support CSCN => DCP portal\n");
-		return -EINVAL;
-	}
-	/* We have to check that the provided CGRID is within the limits of the
-	 * data-structures, for obvious reasons. However we'll let h/w take
-	 * care of determining whether it's within the limits of what exists on
-	 * the SoC.
-	 */
-	if (cgr->cgrid >= __CGR_NUM)
-		return -EINVAL;
-
-	ret = qman_query_cgr(cgr, &cgr_state);
-	if (ret)
-		return ret;
-
-	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
-	if (opts)
-		local_opts = *opts;
-
-	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
-		local_opts.cgr.cscn_targ_upd_ctrl =
-				QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
-				QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
-	else
-		local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
-					TARG_DCP_MASK(dcp_portal);
-	local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
-
-	/* send init if flags indicate so */
-	if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
-		ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
-				      &local_opts);
-	else
-		ret = qman_modify_cgr(cgr, 0, &local_opts);
-
-	return ret;
-}
-
 int qman_delete_cgr(struct qman_cgr *cgr)
 {
 	struct qm_mcr_querycgr cgr_state;
@@ -2674,222 +2115,3 @@  int qman_delete_cgr(struct qman_cgr *cgr)
 put_portal:
 	return ret;
 }
-
-int qman_shutdown_fq(u32 fqid)
-{
-	struct qman_portal *p;
-	struct qm_portal *low_p;
-	struct qm_mc_command *mcc;
-	struct qm_mc_result *mcr;
-	u8 state;
-	int orl_empty, fq_empty, drain = 0;
-	u32 result;
-	u32 channel, wq;
-	u16 dest_wq;
-
-	p = get_affine_portal();
-	low_p = &p->p;
-
-	/* Determine the state of the FQID */
-	mcc = qm_mc_start(low_p);
-	mcc->queryfq_np.fqid = cpu_to_be32(fqid);
-	qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
-	while (!(mcr = qm_mc_result(low_p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
-	state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
-	if (state == QM_MCR_NP_STATE_OOS)
-		return 0; /* Already OOS, no need to do anymore checks */
-
-	/* Query which channel the FQ is using */
-	mcc = qm_mc_start(low_p);
-	mcc->queryfq.fqid = cpu_to_be32(fqid);
-	qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
-	while (!(mcr = qm_mc_result(low_p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
-
-	/* Need to store these since the MCR gets reused */
-	dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
-	channel = dest_wq & 0x7;
-	wq = dest_wq >> 3;
-
-	switch (state) {
-	case QM_MCR_NP_STATE_TEN_SCHED:
-	case QM_MCR_NP_STATE_TRU_SCHED:
-	case QM_MCR_NP_STATE_ACTIVE:
-	case QM_MCR_NP_STATE_PARKED:
-		orl_empty = 0;
-		mcc = qm_mc_start(low_p);
-		mcc->alterfq.fqid = cpu_to_be32(fqid);
-		qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
-		while (!(mcr = qm_mc_result(low_p)))
-			cpu_relax();
-		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-			   QM_MCR_VERB_ALTER_RETIRE);
-		result = mcr->result; /* Make a copy as we reuse MCR below */
-
-		if (result == QM_MCR_RESULT_PENDING) {
-			/* Need to wait for the FQRN in the message ring, which
-			 * will only occur once the FQ has been drained.  In
-			 * order for the FQ to drain the portal needs to be set
-			 * to dequeue from the channel the FQ is scheduled on
-			 */
-			const struct qm_mr_entry *msg;
-			const struct qm_dqrr_entry *dqrr = NULL;
-			int found_fqrn = 0;
-			__maybe_unused u16 dequeue_wq = 0;
-
-			/* Flag that we need to drain FQ */
-			drain = 1;
-
-			if (channel >= qm_channel_pool1 &&
-			    channel < (u16)(qm_channel_pool1 + 15)) {
-				/* Pool channel, enable the bit in the portal */
-				dequeue_wq = (channel -
-					      qm_channel_pool1 + 1) << 4 | wq;
-			} else if (channel < qm_channel_pool1) {
-				/* Dedicated channel */
-				dequeue_wq = wq;
-			} else {
-				pr_info("Cannot recover FQ 0x%x,"
-					" it is scheduled on channel 0x%x",
-					fqid, channel);
-				return -EBUSY;
-			}
-			/* Set the sdqcr to drain this channel */
-			if (channel < qm_channel_pool1)
-				qm_dqrr_sdqcr_set(low_p,
-						  QM_SDQCR_TYPE_ACTIVE |
-					  QM_SDQCR_CHANNELS_DEDICATED);
-			else
-				qm_dqrr_sdqcr_set(low_p,
-						  QM_SDQCR_TYPE_ACTIVE |
-						  QM_SDQCR_CHANNELS_POOL_CONV
-						  (channel));
-			while (!found_fqrn) {
-				/* Keep draining DQRR while checking the MR*/
-				qm_dqrr_pvb_update(low_p);
-				dqrr = qm_dqrr_current(low_p);
-				while (dqrr) {
-					qm_dqrr_cdc_consume_1ptr(
-						low_p, dqrr, 0);
-					qm_dqrr_pvb_update(low_p);
-					qm_dqrr_next(low_p);
-					dqrr = qm_dqrr_current(low_p);
-				}
-				/* Process message ring too */
-				qm_mr_pvb_update(low_p);
-				msg = qm_mr_current(low_p);
-				while (msg) {
-					if ((msg->ern.verb &
-					     QM_MR_VERB_TYPE_MASK)
-					    == QM_MR_VERB_FQRN)
-						found_fqrn = 1;
-					qm_mr_next(low_p);
-					qm_mr_cci_consume_to_current(low_p);
-					qm_mr_pvb_update(low_p);
-					msg = qm_mr_current(low_p);
-				}
-				cpu_relax();
-			}
-		}
-		if (result != QM_MCR_RESULT_OK &&
-		    result !=  QM_MCR_RESULT_PENDING) {
-			/* error */
-			pr_err("qman_retire_fq failed on FQ 0x%x,"
-			       " result=0x%x\n", fqid, result);
-			return -1;
-		}
-		if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
-			/* ORL had no entries, no need to wait until the
-			 * ERNs come in.
-			 */
-			orl_empty = 1;
-		}
-		/* Retirement succeeded, check to see if FQ needs
-		 * to be drained.
-		 */
-		if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
-			/* FQ is Not Empty, drain using volatile DQ commands */
-			fq_empty = 0;
-			do {
-				const struct qm_dqrr_entry *dqrr = NULL;
-				u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
-
-				qm_dqrr_vdqcr_set(low_p, vdqcr);
-
-				/* Wait for a dequeue to occur */
-				while (dqrr == NULL) {
-					qm_dqrr_pvb_update(low_p);
-					dqrr = qm_dqrr_current(low_p);
-					if (!dqrr)
-						cpu_relax();
-				}
-				/* Process the dequeues, making sure to
-				 * empty the ring completely.
-				 */
-				while (dqrr) {
-					if (dqrr->fqid == fqid &&
-					    dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
-						fq_empty = 1;
-					qm_dqrr_cdc_consume_1ptr(low_p,
-								 dqrr, 0);
-					qm_dqrr_pvb_update(low_p);
-					qm_dqrr_next(low_p);
-					dqrr = qm_dqrr_current(low_p);
-				}
-			} while (fq_empty == 0);
-		}
-		qm_dqrr_sdqcr_set(low_p, 0);
-
-		/* Wait for the ORL to have been completely drained */
-		while (orl_empty == 0) {
-			const struct qm_mr_entry *msg;
-
-			qm_mr_pvb_update(low_p);
-			msg = qm_mr_current(low_p);
-			while (msg) {
-				if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) ==
-				    QM_MR_VERB_FQRL)
-					orl_empty = 1;
-				qm_mr_next(low_p);
-				qm_mr_cci_consume_to_current(low_p);
-				qm_mr_pvb_update(low_p);
-				msg = qm_mr_current(low_p);
-			}
-			cpu_relax();
-		}
-		mcc = qm_mc_start(low_p);
-		mcc->alterfq.fqid = cpu_to_be32(fqid);
-		qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
-		while (!(mcr = qm_mc_result(low_p)))
-			cpu_relax();
-		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-			   QM_MCR_VERB_ALTER_OOS);
-		if (mcr->result != QM_MCR_RESULT_OK) {
-			pr_err(
-			"OOS after drain Failed on FQID 0x%x, result 0x%x\n",
-			       fqid, mcr->result);
-			return -1;
-		}
-		return 0;
-
-	case QM_MCR_NP_STATE_RETIRED:
-		/* Send OOS Command */
-		mcc = qm_mc_start(low_p);
-		mcc->alterfq.fqid = cpu_to_be32(fqid);
-		qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
-		while (!(mcr = qm_mc_result(low_p)))
-			cpu_relax();
-		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-			   QM_MCR_VERB_ALTER_OOS);
-		if (mcr->result) {
-			pr_err("OOS Failed on FQID 0x%x\n", fqid);
-			return -1;
-		}
-		return 0;
-
-	}
-	return -1;
-}
diff --git a/drivers/bus/dpaa/base/qbman/qman_priv.h b/drivers/bus/dpaa/base/qbman/qman_priv.h
index 8254729e66..25306804a5 100644
--- a/drivers/bus/dpaa/base/qbman/qman_priv.h
+++ b/drivers/bus/dpaa/base/qbman/qman_priv.h
@@ -165,15 +165,6 @@  struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
 void qm_put_unused_portal(struct qm_portal_config *pcfg);
 void qm_set_liodns(struct qm_portal_config *pcfg);
 
-/* This CGR feature is supported by h/w and required by unit-tests and the
- * debugfs hooks, so is implemented in the driver. However it allows an explicit
- * corruption of h/w fields by s/w that are usually incorruptible (because the
- * counters are usually maintained entirely within h/w). As such, we declare
- * this API internally.
- */
-int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
-		       struct qm_mcr_cgrtestwrite *result);
-
 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
 /* If the fq object pointer is greater than the size of context_b field,
  * than a lookup table is required.
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index 3098e23093..ca1e27aeaf 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -359,11 +359,6 @@  rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
 	return 0;
 }
 
-int rte_dpaa_portal_fq_close(struct qman_fq *fq)
-{
-	return fsl_qman_fq_portal_destroy(fq->qp);
-}
-
 void
 dpaa_portal_finish(void *arg)
 {
@@ -488,21 +483,6 @@  rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
 	driver->dpaa_bus = &rte_dpaa_bus;
 }
 
-/* un-register a dpaa bus based dpaa driver */
-void
-rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
-{
-	struct rte_dpaa_bus *dpaa_bus;
-
-	BUS_INIT_FUNC_TRACE();
-
-	dpaa_bus = driver->dpaa_bus;
-
-	TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
-	/* Update Bus references */
-	driver->dpaa_bus = NULL;
-}
-
 static int
 rte_dpaa_device_match(struct rte_dpaa_driver *drv,
 		      struct rte_dpaa_device *dev)
diff --git a/drivers/bus/dpaa/include/fsl_bman.h b/drivers/bus/dpaa/include/fsl_bman.h
index 82da2fcfe0..a06d29eb2d 100644
--- a/drivers/bus/dpaa/include/fsl_bman.h
+++ b/drivers/bus/dpaa/include/fsl_bman.h
@@ -252,8 +252,6 @@  static inline int bman_reserve_bpid(u32 bpid)
 
 void bman_seed_bpid_range(u32 bpid, unsigned int count);
 
-int bman_shutdown_pool(u32 bpid);
-
 /**
  * bman_new_pool - Allocates a Buffer Pool object
  * @params: parameters specifying the buffer pool ID and behaviour
@@ -310,12 +308,6 @@  __rte_internal
 int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
 		 u32 flags);
 
-/**
- * bman_query_pools - Query all buffer pool states
- * @state: storage for the queried availability and depletion states
- */
-int bman_query_pools(struct bm_pool_state *state);
-
 /**
  * bman_query_free_buffers - Query how many free buffers are in buffer pool
  * @pool: the buffer pool object to query
@@ -325,13 +317,6 @@  int bman_query_pools(struct bm_pool_state *state);
 __rte_internal
 u32 bman_query_free_buffers(struct bman_pool *pool);
 
-/**
- * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
- * @pool: the buffer pool object to which the thresholds will be set
- * @thresholds: the new thresholds
- */
-int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
-
 /**
  * bm_pool_set_hw_threshold - Change the buffer pool's thresholds
  * @pool: Pool id
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index a3cf77f0e3..71f5a2f8cf 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -64,12 +64,6 @@  void fman_if_stats_reset(struct fman_if *p);
 __rte_internal
 void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
 
-/* Set ignore pause option for a specific interface */
-void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);
-
-/* Set max frame length */
-void fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len);
-
 /* Enable/disable Rx promiscuous mode on specified interface */
 __rte_internal
 void fman_if_promiscuous_enable(struct fman_if *p);
@@ -114,18 +108,11 @@  int fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta);
 __rte_internal
 void fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid);
 
-/* Get IC transfer params */
-int fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp);
-
 /* Set IC transfer params */
 __rte_internal
 int fman_if_set_ic_params(struct fman_if *fm_if,
 			  const struct fman_if_ic_params *icp);
 
-/* Get interface fd->offset value */
-__rte_internal
-int fman_if_get_fdoff(struct fman_if *fm_if);
-
 /* Set interface fd->offset value */
 __rte_internal
 void fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset);
@@ -138,20 +125,10 @@  int fman_if_get_sg_enable(struct fman_if *fm_if);
 __rte_internal
 void fman_if_set_sg(struct fman_if *fm_if, int enable);
 
-/* Get interface Max Frame length (MTU) */
-uint16_t fman_if_get_maxfrm(struct fman_if *fm_if);
-
 /* Set interface  Max Frame length (MTU) */
 __rte_internal
 void fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm);
 
-/* Set interface next invoked action for dequeue operation */
-void fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia);
-
-/* discard error packets on rx */
-__rte_internal
-void fman_if_discard_rx_errors(struct fman_if *fm_if);
-
 __rte_internal
 void fman_if_receive_rx_errors(struct fman_if *fm_if,
 	unsigned int err_eq);
@@ -162,11 +139,6 @@  void fman_if_set_mcast_filter_table(struct fman_if *p);
 __rte_internal
 void fman_if_reset_mcast_filter_table(struct fman_if *p);
 
-int fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth);
-
-int fman_if_get_primary_mac_addr(struct fman_if *p, uint8_t *eth);
-
-
 /* Enable/disable Rx on all interfaces */
 static inline void fman_if_enable_all_rx(void)
 {
diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
index 10212f0fd5..b24aa76409 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -1379,16 +1379,6 @@  int qman_irqsource_remove(u32 bits);
 __rte_internal
 int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits);
 
-/**
- * qman_affine_channel - return the channel ID of an portal
- * @cpu: the cpu whose affine portal is the subject of the query
- *
- * If @cpu is -1, the affine portal for the current CPU will be used. It is a
- * bug to call this function for any value of @cpu (other than -1) that is not a
- * member of the cpu mask.
- */
-u16 qman_affine_channel(int cpu);
-
 __rte_internal
 unsigned int qman_portal_poll_rx(unsigned int poll_limit,
 				 void **bufs, struct qman_portal *q);
@@ -1428,55 +1418,6 @@  __rte_internal
 void qman_dqrr_consume(struct qman_fq *fq,
 		       struct qm_dqrr_entry *dq);
 
-/**
- * qman_poll_dqrr - process DQRR (fast-path) entries
- * @limit: the maximum number of DQRR entries to process
- *
- * Use of this function requires that DQRR processing not be interrupt-driven.
- * Ie. the value returned by qman_irqsource_get() should not include
- * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
- * this function will return -EINVAL, otherwise the return value is >=0 and
- * represents the number of DQRR entries processed.
- */
-__rte_internal
-int qman_poll_dqrr(unsigned int limit);
-
-/**
- * qman_poll
- *
- * Dispatcher logic on a cpu can use this to trigger any maintenance of the
- * affine portal. There are two classes of portal processing in question;
- * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
- * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
- * thresholds, congestion state changes, etc). This function does whatever
- * processing is not triggered by interrupts.
- *
- * Note, if DQRR and some slow-path processing are poll-driven (rather than
- * interrupt-driven) then this function uses a heuristic to determine how often
- * to run slow-path processing - as slow-path processing introduces at least a
- * minimum latency each time it is run, whereas fast-path (DQRR) processing is
- * close to zero-cost if there is no work to be done.
- */
-void qman_poll(void);
-
-/**
- * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
- *
- * Disables DQRR processing of the portal. This is reference-counted, so
- * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
- * truly re-enable dequeuing.
- */
-void qman_stop_dequeues(void);
-
-/**
- * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
- *
- * Enables DQRR processing of the portal. This is reference-counted, so
- * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
- * truly re-enable dequeuing.
- */
-void qman_start_dequeues(void);
-
 /**
  * qman_static_dequeue_add - Add pool channels to the portal SDQCR
  * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
@@ -1488,39 +1429,6 @@  void qman_start_dequeues(void);
 __rte_internal
 void qman_static_dequeue_add(u32 pools, struct qman_portal *qm);
 
-/**
- * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
- * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
- *
- * Removes a set of pool channels from the portal's static dequeue command
- * register (SDQCR). The requested pools are limited to those the portal has
- * dequeue access to.
- */
-void qman_static_dequeue_del(u32 pools, struct qman_portal *qp);
-
-/**
- * qman_static_dequeue_get - return the portal's current SDQCR
- *
- * Returns the portal's current static dequeue command register (SDQCR). The
- * entire register is returned, so if only the currently-enabled pool channels
- * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
- */
-u32 qman_static_dequeue_get(struct qman_portal *qp);
-
-/**
- * qman_dca - Perform a Discrete Consumption Acknowledgment
- * @dq: the DQRR entry to be consumed
- * @park_request: indicates whether the held-active @fq should be parked
- *
- * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
- * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
- * does not take a 'portal' argument but implies the core affine portal from the
- * cpu that is currently executing the function. For reasons of locking, this
- * function must be called from the same CPU as that which processed the DQRR
- * entry in the first place.
- */
-void qman_dca(const struct qm_dqrr_entry *dq, int park_request);
-
 /**
  * qman_dca_index - Perform a Discrete Consumption Acknowledgment
  * @index: the DQRR index to be consumed
@@ -1536,36 +1444,6 @@  void qman_dca(const struct qm_dqrr_entry *dq, int park_request);
 __rte_internal
 void qman_dca_index(u8 index, int park_request);
 
-/**
- * qman_eqcr_is_empty - Determine if portal's EQCR is empty
- *
- * For use in situations where a cpu-affine caller needs to determine when all
- * enqueues for the local portal have been processed by Qman but can't use the
- * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
- * The function forces tracking of EQCR consumption (which normally doesn't
- * happen until enqueue processing needs to find space to put new enqueue
- * commands), and returns zero if the ring still has unprocessed entries,
- * non-zero if it is empty.
- */
-int qman_eqcr_is_empty(void);
-
-/**
- * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
- * @handler: callback for processing DCP ERNs
- * @affine: whether this handler is specific to the locally affine portal
- *
- * If a hardware block's interface to Qman (ie. its direct-connect portal, or
- * DCP) is configured not to receive enqueue rejections, then any enqueues
- * through that DCP that are rejected will be sent to a given software portal.
- * If @affine is non-zero, then this handler will only be used for DCP ERNs
- * received on the portal affine to the current CPU. If multiple CPUs share a
- * portal and they all call this function, they will be setting the handler for
- * the same portal! If @affine is zero, then this handler will be global to all
- * portals handled by this instance of the driver. Only those portals that do
- * not have their own affine handler will use the global handler.
- */
-void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
-
 	/* FQ management */
 	/* ------------- */
 /**
@@ -1594,18 +1472,6 @@  void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
 __rte_internal
 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
 
-/**
- * qman_destroy_fq - Deallocates a FQ
- * @fq: the frame queue object to release
- * @flags: bit-mask of QMAN_FQ_FREE_*** options
- *
- * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
- * not deallocated but the caller regains ownership, to do with as desired. The
- * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
- * is specified, in which case it may also be in the 'parked' state.
- */
-void qman_destroy_fq(struct qman_fq *fq, u32 flags);
-
 /**
  * qman_fq_fqid - Queries the frame queue ID of a FQ object
  * @fq: the frame queue object to query
@@ -1613,19 +1479,6 @@  void qman_destroy_fq(struct qman_fq *fq, u32 flags);
 __rte_internal
 u32 qman_fq_fqid(struct qman_fq *fq);
 
-/**
- * qman_fq_state - Queries the state of a FQ object
- * @fq: the frame queue object to query
- * @state: pointer to state enum to return the FQ scheduling state
- * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
- *
- * Queries the state of the FQ object, without performing any h/w commands.
- * This captures the state, as seen by the driver, at the time the function
- * executes.
- */
-__rte_internal
-void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
-
 /**
  * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
  * @fq: the frame queue object to modify, must be 'parked' or new.
@@ -1663,15 +1516,6 @@  void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
 __rte_internal
 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
 
-/**
- * qman_schedule_fq - Schedules a FQ
- * @fq: the frame queue object to schedule, must be 'parked'
- *
- * Schedules the frame queue, which must be Parked, which takes it to
- * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
- */
-int qman_schedule_fq(struct qman_fq *fq);
-
 /**
  * qman_retire_fq - Retires a FQ
  * @fq: the frame queue object to retire
@@ -1703,32 +1547,6 @@  int qman_retire_fq(struct qman_fq *fq, u32 *flags);
 __rte_internal
 int qman_oos_fq(struct qman_fq *fq);
 
-/**
- * qman_fq_flow_control - Set the XON/XOFF state of a FQ
- * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
- * or 'retired' or 'parked' state
- * @xon: boolean to set fq in XON or XOFF state
- *
- * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
- * otherwise the IFSI interrupt will be asserted.
- */
-int qman_fq_flow_control(struct qman_fq *fq, int xon);
-
-/**
- * qman_query_fq - Queries FQD fields (via h/w query command)
- * @fq: the frame queue object to be queried
- * @fqd: storage for the queried FQD fields
- */
-int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
-
-/**
- * qman_query_fq_has_pkts - Queries non-programmable FQD fields and returns '1'
- * if packets are in the frame queue. If there are no packets on frame
- * queue '0' is returned.
- * @fq: the frame queue object to be queried
- */
-int qman_query_fq_has_pkts(struct qman_fq *fq);
-
 /**
  * qman_query_fq_np - Queries non-programmable FQD fields
  * @fq: the frame queue object to be queried
@@ -1745,73 +1563,6 @@  int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
 __rte_internal
 int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);
 
-/**
- * qman_query_wq - Queries work queue lengths
- * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
- *		to this software portal. Otherwise, query length of WQs in a
- *		channel  specified in wq.
- * @wq: storage for the queried WQs lengths. Also specified the channel to
- *	to query if query_dedicated is zero.
- */
-int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
-
-/**
- * qman_volatile_dequeue - Issue a volatile dequeue command
- * @fq: the frame queue object to dequeue from
- * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
- * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
- *
- * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
- * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
- * the VDQCR is already in use, otherwise returns non-zero for failure. If
- * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
- * the VDQCR command has finished executing (ie. once the callback for the last
- * DQRR entry resulting from the VDQCR command has been called). If not using
- * the FINISH flag, completion can be determined either by detecting the
- * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
- * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
- * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
- * "flags" retrieved from qman_fq_state().
- */
-__rte_internal
-int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
-
-/**
- * qman_enqueue - Enqueue a frame to a frame queue
- * @fq: the frame queue object to enqueue to
- * @fd: a descriptor of the frame to be enqueued
- * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
- *
- * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
- * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
- * field is ignored. The return value is non-zero on error, such as ring full
- * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
- * specified), etc. If the ring is full and FLAG_WAIT is specified, this
- * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
- * interrupt will assert when Qman consumes the EQCR entry (subject to "status
- * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
- * perform an implied "discrete consumption acknowledgment" on the dequeue
- * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
- * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
- * this implicit DCA can delay the release of a "held active" frame queue
- * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
- * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
- * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
- * acknowledgment should "park request" the "held active" frame queue. Ie.
- * when the portal eventually releases that frame queue, it will be left in the
- * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
- * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
- * is requested, and the FQ is a member of a congestion group, then this
- * function returns -EAGAIN if the congestion group is currently congested.
- * Note, this does not eliminate ERNs, as the async interface means we can be
- * sending enqueue commands to an un-congested FQ that becomes congested before
- * the enqueue commands are processed, but it does minimise needless thrashing
- * of an already busy hardware resource by throttling many of the to-be-dropped
- * enqueues "at the source".
- */
-__rte_internal
-int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
-
 __rte_internal
 int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,
 		       int frames_to_send);
@@ -1846,45 +1597,6 @@  qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
 
 typedef int (*qman_cb_precommit) (void *arg);
 
-/**
- * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
- * @fq: the frame queue object to enqueue to
- * @fd: a descriptor of the frame to be enqueued
- * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
- * @orp: the frame queue object used as an order restoration point.
- * @orp_seqnum: the sequence number of this frame in the order restoration path
- *
- * Similar to qman_enqueue(), but with the addition of an Order Restoration
- * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
- * enqueue operation to employ order restoration. Each frame queue object acts
- * as an Order Definition Point (ODP) by providing each frame dequeued from it
- * with an incrementing sequence number, this value is generally ignored unless
- * that sequence of dequeued frames will need order restoration later. Each
- * frame queue object also encapsulates an Order Restoration Point (ORP), which
- * is a re-assembly context for re-ordering frames relative to their sequence
- * numbers as they are enqueued. The ORP does not have to be within the frame
- * queue that receives the enqueued frame, in fact it is usually the frame
- * queue from which the frames were originally dequeued. For the purposes of
- * order restoration, multiple frames (or "fragments") can be enqueued for a
- * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
- * enqueues except the final fragment of a given sequence number. Ordering
- * between sequence numbers is guaranteed, even if fragments of different
- * sequence numbers are interlaced with one another. Fragments of the same
- * sequence number will retain the order in which they are enqueued. If no
- * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
- * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
- * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
- * sequence number should become the ORP's "Next Expected Sequence Number".
- *
- * Side note: a frame queue object can be used purely as an ORP, without
- * carrying any frames at all. Care should be taken not to deallocate a frame
- * queue object that is being actively used as an ORP, as a future allocation
- * of the frame queue object may start using the internal ORP before the
- * previous use has finished.
- */
-int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
-		     struct qman_fq *orp, u16 orp_seqnum);
-
 /**
  * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
  * @result: is set by the API to the base FQID of the allocated range
@@ -1922,8 +1634,6 @@  static inline void qman_release_fqid(u32 fqid)
 
 void qman_seed_fqid_range(u32 fqid, unsigned int count);
 
-int qman_shutdown_fq(u32 fqid);
-
 /**
  * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
  * @fqid: the base FQID of the range to deallocate
@@ -2001,17 +1711,6 @@  __rte_internal
 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
 		    struct qm_mcc_initcgr *opts);
 
-/**
- * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
- * @cgr: the 'cgr' object, with fields filled in
- * @flags: QMAN_CGR_FLAG_* values
- * @dcp_portal: the DCP portal to which the cgr object is registered.
- * @opts: optional state of CGR settings
- *
- */
-int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
-			   struct qm_mcc_initcgr *opts);
-
 /**
  * qman_delete_cgr - Deregisters a congestion group object
  * @cgr: the 'cgr' object to deregister
@@ -2048,12 +1747,6 @@  int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
  */
 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
 
-/**
- * qman_query_congestion - Queries the state of all congestion groups
- * @congestion: storage for the queried state of all congestion groups
- */
-int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
-
 /**
  * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
  * @result: is set by the API to the base CGR ID of the allocated range
diff --git a/drivers/bus/dpaa/include/fsl_usd.h b/drivers/bus/dpaa/include/fsl_usd.h
index dcf35e4adb..3a5df9bf7e 100644
--- a/drivers/bus/dpaa/include/fsl_usd.h
+++ b/drivers/bus/dpaa/include/fsl_usd.h
@@ -51,16 +51,9 @@  struct dpaa_raw_portal {
 	uint64_t cena;
 };
 
-int qman_allocate_raw_portal(struct dpaa_raw_portal *portal);
-int qman_free_raw_portal(struct dpaa_raw_portal *portal);
-
-int bman_allocate_raw_portal(struct dpaa_raw_portal *portal);
-int bman_free_raw_portal(struct dpaa_raw_portal *portal);
-
 /* Obtain thread-local UIO file-descriptors */
 __rte_internal
 int qman_thread_fd(void);
-int bman_thread_fd(void);
 
 /* Post-process interrupts. NB, the kernel IRQ handler disables the interrupt
  * line before notifying us, and this post-processing re-enables it once
@@ -70,12 +63,8 @@  int bman_thread_fd(void);
 __rte_internal
 void qman_thread_irq(void);
 
-__rte_internal
-void bman_thread_irq(void);
 __rte_internal
 void qman_fq_portal_thread_irq(struct qman_portal *qp);
-__rte_internal
-void qman_clear_irq(void);
 
 /* Global setup */
 int qman_global_init(void);
diff --git a/drivers/bus/dpaa/include/netcfg.h b/drivers/bus/dpaa/include/netcfg.h
index d7d1befd24..815b3ba087 100644
--- a/drivers/bus/dpaa/include/netcfg.h
+++ b/drivers/bus/dpaa/include/netcfg.h
@@ -49,12 +49,6 @@  struct netcfg_interface {
 __rte_internal
 struct netcfg_info *netcfg_acquire(void);
 
-/* cfg_ptr: configuration information pointer.
- * Frees the resources allocated by the configuration layer.
- */
-__rte_internal
-void netcfg_release(struct netcfg_info *cfg_ptr);
-
 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
 /* cfg_ptr: configuration information pointer.
  * This function dumps configuration data to stdout.
diff --git a/drivers/bus/dpaa/rte_dpaa_bus.h b/drivers/bus/dpaa/rte_dpaa_bus.h
index 48d5cf4625..40d82412df 100644
--- a/drivers/bus/dpaa/rte_dpaa_bus.h
+++ b/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -214,16 +214,6 @@  rte_dpaa_mem_vtop(void *vaddr)
 __rte_internal
 void rte_dpaa_driver_register(struct rte_dpaa_driver *driver);
 
-/**
- * Unregister a DPAA driver.
- *
- * @param driver
- *	A pointer to a rte_dpaa_driver structure describing the driver
- *	to be unregistered.
- */
-__rte_internal
-void rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver);
-
 /**
  * Initialize a DPAA portal
  *
@@ -239,9 +229,6 @@  int rte_dpaa_portal_init(void *arg);
 __rte_internal
 int rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq);
 
-__rte_internal
-int rte_dpaa_portal_fq_close(struct qman_fq *fq);
-
 /**
  * Cleanup a DPAA Portal
  */
diff --git a/drivers/bus/dpaa/version.map b/drivers/bus/dpaa/version.map
index fe4f9ac5aa..98f1e00582 100644
--- a/drivers/bus/dpaa/version.map
+++ b/drivers/bus/dpaa/version.map
@@ -7,7 +7,6 @@  INTERNAL {
 	bman_new_pool;
 	bman_query_free_buffers;
 	bman_release;
-	bman_thread_irq;
 	dpaa_get_ioctl_version_number;
 	dpaa_get_eth_port_cfg;
 	dpaa_get_qm_channel_caam;
@@ -25,11 +24,9 @@  INTERNAL {
 	fman_if_add_mac_addr;
 	fman_if_clear_mac_addr;
 	fman_if_disable_rx;
-	fman_if_discard_rx_errors;
 	fman_if_enable_rx;
 	fman_if_get_fc_quanta;
 	fman_if_get_fc_threshold;
-	fman_if_get_fdoff;
 	fman_if_get_sg_enable;
 	fman_if_loopback_disable;
 	fman_if_loopback_enable;
@@ -52,19 +49,16 @@  INTERNAL {
 	fman_if_receive_rx_errors;
 	fsl_qman_fq_portal_create;
 	netcfg_acquire;
-	netcfg_release;
 	per_lcore_dpaa_io;
 	qman_alloc_cgrid_range;
 	qman_alloc_fqid_range;
 	qman_alloc_pool_range;
-	qman_clear_irq;
 	qman_create_cgr;
 	qman_create_fq;
 	qman_dca_index;
 	qman_delete_cgr;
 	qman_dequeue;
 	qman_dqrr_consume;
-	qman_enqueue;
 	qman_enqueue_multi;
 	qman_enqueue_multi_fq;
 	qman_ern_poll_free;
@@ -79,7 +73,6 @@  INTERNAL {
 	qman_irqsource_remove;
 	qman_modify_cgr;
 	qman_oos_fq;
-	qman_poll_dqrr;
 	qman_portal_dequeue;
 	qman_portal_poll_rx;
 	qman_query_fq_frm_cnt;
@@ -92,10 +85,7 @@  INTERNAL {
 	qman_static_dequeue_add;
 	qman_thread_fd;
 	qman_thread_irq;
-	qman_volatile_dequeue;
 	rte_dpaa_driver_register;
-	rte_dpaa_driver_unregister;
-	rte_dpaa_portal_fq_close;
 	rte_dpaa_portal_fq_init;
 	rte_dpaa_portal_init;
 
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index 58435589b2..51749764e7 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -521,25 +521,6 @@  rte_fslmc_driver_register(struct rte_dpaa2_driver *driver)
 	driver->fslmc_bus = &rte_fslmc_bus;
 }
 
-/*un-register a fslmc bus based dpaa2 driver */
-void
-rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)
-{
-	struct rte_fslmc_bus *fslmc_bus;
-
-	fslmc_bus = driver->fslmc_bus;
-
-	/* Cleanup the PA->VA Translation table; From whereever this function
-	 * is called from.
-	 */
-	if (rte_eal_iova_mode() == RTE_IOVA_PA)
-		dpaax_iova_table_depopulate();
-
-	TAILQ_REMOVE(&fslmc_bus->driver_list, driver, next);
-	/* Update Bus references */
-	driver->fslmc_bus = NULL;
-}
-
 /*
  * All device has iova as va
  */
diff --git a/drivers/bus/fslmc/mc/dpbp.c b/drivers/bus/fslmc/mc/dpbp.c
index d9103409cf..f3af33b658 100644
--- a/drivers/bus/fslmc/mc/dpbp.c
+++ b/drivers/bus/fslmc/mc/dpbp.c
@@ -77,78 +77,6 @@  int dpbp_close(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpbp_create() - Create the DPBP object.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id; use in subsequent API calls
- *
- * Create the DPBP object, allocate required resources and
- * perform required initialization.
- *
- * This function accepts an authentication token of a parent
- * container that this object should be assigned to and returns
- * an object id. This object_id will be used in all subsequent calls to
- * this specific object.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpbp_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpbp_cfg *cfg,
-		uint32_t *obj_id)
-{
-	struct mc_command cmd = { 0 };
-	int err;
-
-	(void)(cfg); /* unused */
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
-					  cmd_flags, dprc_token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpbp_destroy() - Destroy the DPBP object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @obj_id:	ID of DPBP object
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpbp_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t obj_id)
-{
-	struct dpbp_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
-					  cmd_flags, dprc_token);
-
-	cmd_params = (struct dpbp_cmd_destroy *)cmd.params;
-	cmd_params->object_id = cpu_to_le32(obj_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpbp_enable() - Enable the DPBP.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -193,40 +121,6 @@  int dpbp_disable(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpbp_is_enabled() - Check if the DPBP is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPBP object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpbp_is_enabled(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token,
-		    int *en)
-{
-	struct dpbp_rsp_is_enabled *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpbp_rsp_is_enabled *)cmd.params;
-	*en = rsp_params->enabled & DPBP_ENABLE;
-
-	return 0;
-}
-
 /**
  * dpbp_reset() - Reset the DPBP, returns the object to initial state.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -284,41 +178,6 @@  int dpbp_get_attributes(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dpbp_get_api_version - Get Data Path Buffer Pool API version
- * @mc_io:	Pointer to Mc portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of Buffer Pool API
- * @minor_ver:	Minor version of Buffer Pool API
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpbp_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver)
-{
-	struct dpbp_rsp_get_api_version *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
-					  cmd_flags, 0);
-
-	/* send command to mc */
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpbp_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
-
 /**
  * dpbp_get_num_free_bufs() - Get number of free buffers in the buffer pool
  * @mc_io:  Pointer to MC portal's I/O object
diff --git a/drivers/bus/fslmc/mc/dpci.c b/drivers/bus/fslmc/mc/dpci.c
index 7e31327afa..cd558d507c 100644
--- a/drivers/bus/fslmc/mc/dpci.c
+++ b/drivers/bus/fslmc/mc/dpci.c
@@ -53,116 +53,6 @@  int dpci_open(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dpci_close() - Close the control session of the object
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCI object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpci_close(struct fsl_mc_io *mc_io,
-	       uint32_t cmd_flags,
-	       uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_CLOSE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpci_create() - Create the DPCI object.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id
- *
- * Create the DPCI object, allocate required resources and perform required
- * initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpci_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpci_cfg *cfg,
-		uint32_t *obj_id)
-{
-	struct dpci_cmd_create *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpci_cmd_create *)cmd.params;
-	cmd_params->num_of_priorities = cfg->num_of_priorities;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpci_destroy() - Destroy the DPCI object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id:	The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpci_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t object_id)
-{
-	struct dpci_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_DESTROY,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpci_cmd_destroy *)cmd.params;
-	cmd_params->dpci_id = cpu_to_le32(object_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpci_enable() - Enable the DPCI, allow sending and receiving frames.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -186,86 +76,6 @@  int dpci_enable(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpci_disable() - Disable the DPCI, stop sending and receiving frames.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCI object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpci_disable(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_DISABLE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpci_is_enabled() - Check if the DPCI is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCI object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpci_is_enabled(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token,
-		    int *en)
-{
-	struct dpci_rsp_is_enabled *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_IS_ENABLED, cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpci_rsp_is_enabled *)cmd.params;
-	*en = dpci_get_field(rsp_params->en, ENABLE);
-
-	return 0;
-}
-
-/**
- * dpci_reset() - Reset the DPCI, returns the object to initial state.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCI object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpci_reset(struct fsl_mc_io *mc_io,
-	       uint32_t cmd_flags,
-	       uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_RESET,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpci_get_attributes() - Retrieve DPCI attributes.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -431,133 +241,3 @@  int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
 
 	return 0;
 }
-
-/**
- * dpci_get_api_version() - Get communication interface API version
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of data path communication interface API
- * @minor_ver:	Minor version of data path communication interface API
- *
- * Return:  '0' on Success; Error code otherwise.
- */
-int dpci_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver)
-{
-	struct dpci_rsp_get_api_version *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_API_VERSION,
-					cmd_flags,
-					0);
-
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params = (struct dpci_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
-
-/**
- * dpci_set_opr() - Set Order Restoration configuration.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCI object
- * @index:	The queue index
- * @options:	Configuration mode options
- *		can be OPR_OPT_CREATE or OPR_OPT_RETIRE
- * @cfg:	Configuration options for the OPR
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpci_set_opr(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token,
-		 uint8_t index,
-		 uint8_t options,
-		 struct opr_cfg *cfg)
-{
-	struct dpci_cmd_set_opr *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_OPR,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpci_cmd_set_opr *)cmd.params;
-	cmd_params->index = index;
-	cmd_params->options = options;
-	cmd_params->oloe = cfg->oloe;
-	cmd_params->oeane = cfg->oeane;
-	cmd_params->olws = cfg->olws;
-	cmd_params->oa = cfg->oa;
-	cmd_params->oprrws = cfg->oprrws;
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpci_get_opr() - Retrieve Order Restoration config and query.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCI object
- * @index:	The queue index
- * @cfg:	Returned OPR configuration
- * @qry:	Returned OPR query
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpci_get_opr(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token,
-		 uint8_t index,
-		 struct opr_cfg *cfg,
-		 struct opr_qry *qry)
-{
-	struct dpci_rsp_get_opr *rsp_params;
-	struct dpci_cmd_get_opr *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_OPR,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpci_cmd_get_opr *)cmd.params;
-	cmd_params->index = index;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpci_rsp_get_opr *)cmd.params;
-	cfg->oloe = rsp_params->oloe;
-	cfg->oeane = rsp_params->oeane;
-	cfg->olws = rsp_params->olws;
-	cfg->oa = rsp_params->oa;
-	cfg->oprrws = rsp_params->oprrws;
-	qry->rip = dpci_get_field(rsp_params->flags, RIP);
-	qry->enable = dpci_get_field(rsp_params->flags, OPR_ENABLE);
-	qry->nesn = le16_to_cpu(rsp_params->nesn);
-	qry->ndsn = le16_to_cpu(rsp_params->ndsn);
-	qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
-	qry->tseq_nlis = dpci_get_field(rsp_params->tseq_nlis, TSEQ_NLIS);
-	qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
-	qry->hseq_nlis = dpci_get_field(rsp_params->hseq_nlis, HSEQ_NLIS);
-	qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
-	qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
-	qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
-	qry->opr_id = le16_to_cpu(rsp_params->opr_id);
-
-	return 0;
-}
diff --git a/drivers/bus/fslmc/mc/dpcon.c b/drivers/bus/fslmc/mc/dpcon.c
index 2c46638dcb..e9bf364507 100644
--- a/drivers/bus/fslmc/mc/dpcon.c
+++ b/drivers/bus/fslmc/mc/dpcon.c
@@ -53,212 +53,6 @@  int dpcon_open(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dpcon_close() - Close the control session of the object
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCON object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpcon_close(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpcon_create() - Create the DPCON object.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id; use in subsequent API calls
- *
- * Create the DPCON object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * This function accepts an authentication token of a parent
- * container that this object should be assigned to and returns
- * an object id. This object_id will be used in all subsequent calls to
- * this specific object.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpcon_create(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 const struct dpcon_cfg *cfg,
-		 uint32_t *obj_id)
-{
-	struct dpcon_cmd_create *dpcon_cmd;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-	dpcon_cmd = (struct dpcon_cmd_create *)cmd.params;
-	dpcon_cmd->num_priorities = cfg->num_priorities;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpcon_destroy() - Destroy the DPCON object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @obj_id:	ID of DPCON object
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpcon_destroy(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  uint32_t obj_id)
-{
-	struct dpcon_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpcon_cmd_destroy *)cmd.params;
-	cmd_params->object_id = cpu_to_le32(obj_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpcon_enable() - Enable the DPCON
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCON object
- *
- * Return:	'0' on Success; Error code otherwise
- */
-int dpcon_enable(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpcon_disable() - Disable the DPCON
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCON object
- *
- * Return:	'0' on Success; Error code otherwise
- */
-int dpcon_disable(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpcon_is_enabled() -	Check if the DPCON is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCON object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpcon_is_enabled(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     int *en)
-{
-	struct dpcon_rsp_is_enabled *dpcon_rsp;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	dpcon_rsp = (struct dpcon_rsp_is_enabled *)cmd.params;
-	*en = dpcon_rsp->enabled & DPCON_ENABLE;
-
-	return 0;
-}
-
-/**
- * dpcon_reset() - Reset the DPCON, returns the object to initial state.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCON object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpcon_reset(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
-					  cmd_flags, token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpcon_get_attributes() - Retrieve DPCON attributes.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -295,38 +89,3 @@  int dpcon_get_attributes(struct fsl_mc_io *mc_io,
 
 	return 0;
 }
-
-/**
- * dpcon_get_api_version - Get Data Path Concentrator API version
- * @mc_io:	Pointer to MC portal's DPCON object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of DPCON API
- * @minor_ver:	Minor version of DPCON API
- *
- * Return:	'0' on Success; Error code otherwise
- */
-int dpcon_get_api_version(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t *major_ver,
-			  uint16_t *minor_ver)
-{
-	struct dpcon_rsp_get_api_version *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_API_VERSION,
-					  cmd_flags, 0);
-
-	/* send command to mc */
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpcon_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
diff --git a/drivers/bus/fslmc/mc/dpdmai.c b/drivers/bus/fslmc/mc/dpdmai.c
index dcb9d516a1..30640fd353 100644
--- a/drivers/bus/fslmc/mc/dpdmai.c
+++ b/drivers/bus/fslmc/mc/dpdmai.c
@@ -76,92 +76,6 @@  int dpdmai_close(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpdmai_create() - Create the DPDMAI object
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id
- *
- * Create the DPDMAI object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmai_create(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  const struct dpdmai_cfg *cfg,
-		  uint32_t *obj_id)
-{
-	struct dpdmai_cmd_create *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpdmai_cmd_create *)cmd.params;
-	cmd_params->num_queues = cfg->num_queues;
-	cmd_params->priorities[0] = cfg->priorities[0];
-	cmd_params->priorities[1] = cfg->priorities[1];
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id:	The object id; it must be a valid id within the container that
- *		created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpdmai_destroy(struct fsl_mc_io *mc_io,
-		   uint16_t dprc_token,
-		   uint32_t cmd_flags,
-		   uint32_t object_id)
-{
-	struct dpdmai_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpdmai_cmd_destroy *)cmd.params;
-	cmd_params->dpdmai_id = cpu_to_le32(object_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -208,64 +122,6 @@  int dpdmai_disable(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMAI object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      int *en)
-{
-	struct dpdmai_rsp_is_enabled *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpdmai_rsp_is_enabled *)cmd.params;
-	*en = dpdmai_get_field(rsp_params->en, ENABLE);
-
-	return 0;
-}
-
-/**
- * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMAI object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmai_reset(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
  * @mc_io:	Pointer to MC portal's I/O object
diff --git a/drivers/bus/fslmc/mc/dpio.c b/drivers/bus/fslmc/mc/dpio.c
index a3382ed142..317924c856 100644
--- a/drivers/bus/fslmc/mc/dpio.c
+++ b/drivers/bus/fslmc/mc/dpio.c
@@ -76,95 +76,6 @@  int dpio_close(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpio_create() - Create the DPIO object.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id
- *
- * Create the DPIO object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpio_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpio_cfg *cfg,
-		uint32_t *obj_id)
-{
-	struct dpio_cmd_create *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpio_cmd_create *)cmd.params;
-	cmd_params->num_priorities = cfg->num_priorities;
-	dpio_set_field(cmd_params->channel_mode,
-		       CHANNEL_MODE,
-		       cfg->channel_mode);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpio_destroy() - Destroy the DPIO object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id:	The object id; it must be a valid id within the container that
- *		created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:	'0' on Success; Error code otherwise
- */
-int dpio_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t object_id)
-{
-	struct dpio_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY,
-			cmd_flags,
-			dprc_token);
-
-	/* set object id to destroy */
-	cmd_params = (struct dpio_cmd_destroy *)cmd.params;
-	cmd_params->dpio_id = cpu_to_le32(object_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpio_enable() - Enable the DPIO, allow I/O portal operations.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -211,40 +122,6 @@  int dpio_disable(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpio_is_enabled() - Check if the DPIO is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPIO object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpio_is_enabled(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token,
-		    int *en)
-{
-	struct dpio_rsp_is_enabled *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpio_rsp_is_enabled *)cmd.params;
-	*en = dpio_get_field(rsp_params->en, ENABLE);
-
-	return 0;
-}
-
 /**
  * dpio_reset() - Reset the DPIO, returns the object to initial state.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -341,41 +218,6 @@  int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpio_get_stashing_destination() - Get the stashing destination..
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPIO object
- * @sdest:	Returns the stashing destination value
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
-				  uint32_t cmd_flags,
-				  uint16_t token,
-				  uint8_t *sdest)
-{
-	struct dpio_stashing_dest *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpio_stashing_dest *)cmd.params;
-	*sdest = rsp_params->sdest;
-
-	return 0;
-}
-
 /**
  * dpio_add_static_dequeue_channel() - Add a static dequeue channel.
  * @mc_io:		Pointer to MC portal's I/O object
@@ -444,36 +286,3 @@  int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
 	/* send command to mc*/
 	return mc_send_command(mc_io, &cmd);
 }
-
-/**
- * dpio_get_api_version() - Get Data Path I/O API version
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of data path i/o API
- * @minor_ver:	Minor version of data path i/o API
- *
- * Return:  '0' on Success; Error code otherwise.
- */
-int dpio_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver)
-{
-	struct dpio_rsp_get_api_version *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
-					cmd_flags,
-					0);
-
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params = (struct dpio_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp.h b/drivers/bus/fslmc/mc/fsl_dpbp.h
index 8a021f55f1..f50131ba45 100644
--- a/drivers/bus/fslmc/mc/fsl_dpbp.h
+++ b/drivers/bus/fslmc/mc/fsl_dpbp.h
@@ -34,17 +34,6 @@  struct dpbp_cfg {
 	uint32_t options;
 };
 
-int dpbp_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpbp_cfg *cfg,
-		uint32_t *obj_id);
-
-int dpbp_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t obj_id);
-
 __rte_internal
 int dpbp_enable(struct fsl_mc_io *mc_io,
 		uint32_t cmd_flags,
@@ -55,11 +44,6 @@  int dpbp_disable(struct fsl_mc_io *mc_io,
 		 uint32_t cmd_flags,
 		 uint16_t token);
 
-int dpbp_is_enabled(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token,
-		    int *en);
-
 __rte_internal
 int dpbp_reset(struct fsl_mc_io *mc_io,
 	       uint32_t cmd_flags,
@@ -90,10 +74,6 @@  int dpbp_get_attributes(struct fsl_mc_io *mc_io,
  * BPSCN write will attempt to allocate into a cache (coherent write)
  */
 #define DPBP_NOTIF_OPT_COHERENT_WRITE	0x00000001
-int dpbp_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver);
 
 __rte_internal
 int dpbp_get_num_free_bufs(struct fsl_mc_io *mc_io,
diff --git a/drivers/bus/fslmc/mc/fsl_dpci.h b/drivers/bus/fslmc/mc/fsl_dpci.h
index 81fd3438aa..9fdc3a8ea5 100644
--- a/drivers/bus/fslmc/mc/fsl_dpci.h
+++ b/drivers/bus/fslmc/mc/fsl_dpci.h
@@ -37,10 +37,6 @@  int dpci_open(struct fsl_mc_io *mc_io,
 	      int dpci_id,
 	      uint16_t *token);
 
-int dpci_close(struct fsl_mc_io *mc_io,
-	       uint32_t cmd_flags,
-	       uint16_t token);
-
 /**
  * Enable the Order Restoration support
  */
@@ -66,34 +62,10 @@  struct dpci_cfg {
 	uint8_t num_of_priorities;
 };
 
-int dpci_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpci_cfg *cfg,
-		uint32_t *obj_id);
-
-int dpci_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t object_id);
-
 int dpci_enable(struct fsl_mc_io *mc_io,
 		uint32_t cmd_flags,
 		uint16_t token);
 
-int dpci_disable(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token);
-
-int dpci_is_enabled(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token,
-		    int *en);
-
-int dpci_reset(struct fsl_mc_io *mc_io,
-	       uint32_t cmd_flags,
-	       uint16_t token);
-
 /**
  * struct dpci_attr - Structure representing DPCI attributes
  * @id:			DPCI object ID
@@ -224,25 +196,4 @@  int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
 		      uint8_t priority,
 		      struct dpci_tx_queue_attr *attr);
 
-int dpci_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver);
-
-__rte_internal
-int dpci_set_opr(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token,
-		 uint8_t index,
-		 uint8_t options,
-		 struct opr_cfg *cfg);
-
-__rte_internal
-int dpci_get_opr(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token,
-		 uint8_t index,
-		 struct opr_cfg *cfg,
-		 struct opr_qry *qry);
-
 #endif /* __FSL_DPCI_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpcon.h b/drivers/bus/fslmc/mc/fsl_dpcon.h
index 7caa6c68a1..0b3add5d52 100644
--- a/drivers/bus/fslmc/mc/fsl_dpcon.h
+++ b/drivers/bus/fslmc/mc/fsl_dpcon.h
@@ -26,10 +26,6 @@  int dpcon_open(struct fsl_mc_io *mc_io,
 	       int dpcon_id,
 	       uint16_t *token);
 
-int dpcon_close(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token);
-
 /**
  * struct dpcon_cfg - Structure representing DPCON configuration
  * @num_priorities: Number of priorities for the DPCON channel (1-8)
@@ -38,34 +34,6 @@  struct dpcon_cfg {
 	uint8_t num_priorities;
 };
 
-int dpcon_create(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 const struct dpcon_cfg *cfg,
-		 uint32_t *obj_id);
-
-int dpcon_destroy(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  uint32_t obj_id);
-
-int dpcon_enable(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token);
-
-int dpcon_disable(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token);
-
-int dpcon_is_enabled(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     int *en);
-
-int dpcon_reset(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token);
-
 /**
  * struct dpcon_attr - Structure representing DPCON attributes
  * @id:			DPCON object ID
@@ -84,9 +52,4 @@  int dpcon_get_attributes(struct fsl_mc_io *mc_io,
 			 uint16_t token,
 			 struct dpcon_attr *attr);
 
-int dpcon_get_api_version(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t *major_ver,
-			  uint16_t *minor_ver);
-
 #endif /* __FSL_DPCON_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpdmai.h b/drivers/bus/fslmc/mc/fsl_dpdmai.h
index 19328c00a0..eb1d3c1658 100644
--- a/drivers/bus/fslmc/mc/fsl_dpdmai.h
+++ b/drivers/bus/fslmc/mc/fsl_dpdmai.h
@@ -47,17 +47,6 @@  struct dpdmai_cfg {
 	uint8_t priorities[DPDMAI_PRIO_NUM];
 };
 
-int dpdmai_create(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  const struct dpdmai_cfg *cfg,
-		  uint32_t *obj_id);
-
-int dpdmai_destroy(struct fsl_mc_io *mc_io,
-		   uint16_t dprc_token,
-		   uint32_t cmd_flags,
-		   uint32_t object_id);
-
 __rte_internal
 int dpdmai_enable(struct fsl_mc_io *mc_io,
 		  uint32_t cmd_flags,
@@ -68,15 +57,6 @@  int dpdmai_disable(struct fsl_mc_io *mc_io,
 		   uint32_t cmd_flags,
 		   uint16_t token);
 
-int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      int *en);
-
-int dpdmai_reset(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token);
-
 /**
  * struct dpdmai_attr - Structure representing DPDMAI attributes
  * @id: DPDMAI object ID
diff --git a/drivers/bus/fslmc/mc/fsl_dpio.h b/drivers/bus/fslmc/mc/fsl_dpio.h
index c2db76bdf8..0ddcdb41ec 100644
--- a/drivers/bus/fslmc/mc/fsl_dpio.h
+++ b/drivers/bus/fslmc/mc/fsl_dpio.h
@@ -50,17 +50,6 @@  struct dpio_cfg {
 };
 
 
-int dpio_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpio_cfg *cfg,
-		uint32_t *obj_id);
-
-int dpio_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t object_id);
-
 __rte_internal
 int dpio_enable(struct fsl_mc_io *mc_io,
 		uint32_t cmd_flags,
@@ -71,11 +60,6 @@  int dpio_disable(struct fsl_mc_io *mc_io,
 		 uint32_t cmd_flags,
 		 uint16_t token);
 
-int dpio_is_enabled(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token,
-		    int *en);
-
 __rte_internal
 int dpio_reset(struct fsl_mc_io *mc_io,
 	       uint32_t cmd_flags,
@@ -87,11 +71,6 @@  int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
 				  uint16_t token,
 				  uint8_t sdest);
 
-int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
-				  uint32_t cmd_flags,
-				  uint16_t token,
-				  uint8_t *sdest);
-
 __rte_internal
 int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
 				    uint32_t cmd_flags,
@@ -135,9 +114,4 @@  int dpio_get_attributes(struct fsl_mc_io *mc_io,
 			uint16_t token,
 			struct dpio_attr *attr);
 
-int dpio_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver);
-
 #endif /* __FSL_DPIO_H */
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
index d9619848d8..06b3e81f26 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
@@ -109,13 +109,6 @@  void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp)
 	}
 }
 
-int dpaa2_dpbp_supported(void)
-{
-	if (TAILQ_EMPTY(&dpbp_dev_list))
-		return -1;
-	return 0;
-}
-
 static struct rte_dpaa2_object rte_dpaa2_dpbp_obj = {
 	.dev_type = DPAA2_BPOOL,
 	.create = dpaa2_create_dpbp_device,
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index ac24f01451..b72017bd32 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -454,9 +454,6 @@  struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
 __rte_internal
 void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);
 
-__rte_internal
-int dpaa2_dpbp_supported(void);
-
 __rte_internal
 struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void);
 
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
index 54096e8774..12beb148fb 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
@@ -36,6 +36,4 @@  int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
 __rte_internal
 uint32_t qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
 
-uint32_t qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
-
 #endif /* !_FSL_QBMAN_DEBUG_H */
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
index eb68c9cab5..b24c809fa1 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
@@ -50,14 +50,6 @@  struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
  */
 int qbman_swp_update(struct qbman_swp *p, int stash_off);
 
-/**
- * qbman_swp_finish() - Create and destroy a functional object representing
- * the given QBMan portal descriptor.
- * @p: the qbman_swp object to be destroyed.
- *
- */
-void qbman_swp_finish(struct qbman_swp *p);
-
 /**
  * qbman_swp_invalidate() - Invalidate the cache enabled area of the QBMan
  * portal. This is required to be called if a portal moved to another core
@@ -67,14 +59,6 @@  void qbman_swp_finish(struct qbman_swp *p);
  */
 void qbman_swp_invalidate(struct qbman_swp *p);
 
-/**
- * qbman_swp_get_desc() - Get the descriptor of the given portal object.
- * @p: the given portal object.
- *
- * Return the descriptor for this portal.
- */
-const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p);
-
 	/**************/
 	/* Interrupts */
 	/**************/
@@ -92,32 +76,6 @@  const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p);
 /* Volatile dequeue command interrupt */
 #define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020)
 
-/**
- * qbman_swp_interrupt_get_vanish() - Get the data in software portal
- * interrupt status disable register.
- * @p: the given software portal object.
- *
- * Return the settings in SWP_ISDR register.
- */
-uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p);
-
-/**
- * qbman_swp_interrupt_set_vanish() - Set the data in software portal
- * interrupt status disable register.
- * @p: the given software portal object.
- * @mask: The value to set in SWP_IDSR register.
- */
-void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask);
-
-/**
- * qbman_swp_interrupt_read_status() - Get the data in software portal
- * interrupt status register.
- * @p: the given software portal object.
- *
- * Return the settings in SWP_ISR register.
- */
-uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
-
 /**
  * qbman_swp_interrupt_clear_status() - Set the data in software portal
  * interrupt status register.
@@ -127,13 +85,6 @@  uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
 __rte_internal
 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
 
-/**
- * qbman_swp_dqrr_thrshld_read_status() - Get the data in software portal
- * DQRR interrupt threshold register.
- * @p: the given software portal object.
- */
-uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p);
-
 /**
  * qbman_swp_dqrr_thrshld_write() - Set the data in software portal
  * DQRR interrupt threshold register.
@@ -142,13 +93,6 @@  uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p);
  */
 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask);
 
-/**
- * qbman_swp_intr_timeout_read_status() - Get the data in software portal
- * Interrupt Time-Out period register.
- * @p: the given software portal object.
- */
-uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p);
-
 /**
  * qbman_swp_intr_timeout_write() - Set the data in software portal
  * Interrupt Time-Out period register.
@@ -157,15 +101,6 @@  uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p);
  */
 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask);
 
-/**
- * qbman_swp_interrupt_get_trigger() - Get the data in software portal
- * interrupt enable register.
- * @p: the given software portal object.
- *
- * Return the settings in SWP_IER register.
- */
-uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
-
 /**
  * qbman_swp_interrupt_set_trigger() - Set the data in software portal
  * interrupt enable register.
@@ -174,15 +109,6 @@  uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
  */
 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask);
 
-/**
- * qbman_swp_interrupt_get_inhibit() - Get the data in software portal
- * interrupt inhibit register.
- * @p: the given software portal object.
- *
- * Return the settings in SWP_IIR register.
- */
-int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
-
 /**
  * qbman_swp_interrupt_set_inhibit() - Set the data in software portal
  * interrupt inhibit register.
@@ -268,21 +194,6 @@  int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout);
 /* Push-mode dequeuing */
 /* ------------------- */
 
-/* The user of a portal can enable and disable push-mode dequeuing of up to 16
- * channels independently. It does not specify this toggling by channel IDs, but
- * rather by specifying the index (from 0 to 15) that has been mapped to the
- * desired channel.
- */
-
-/**
- * qbman_swp_push_get() - Get the push dequeue setup.
- * @s: the software portal object.
- * @channel_idx: the channel index to query.
- * @enabled: returned boolean to show whether the push dequeue is enabled for
- * the given channel.
- */
-void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled);
-
 /**
  * qbman_swp_push_set() - Enable or disable push dequeue.
  * @s: the software portal object.
@@ -363,17 +274,6 @@  void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
 __rte_internal
 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
 				   uint8_t numframes);
-/**
- * qbman_pull_desc_set_token() - Set dequeue token for pull command
- * @d: the dequeue descriptor
- * @token: the token to be set
- *
- * token is the value that shows up in the dequeue response that can be used to
- * detect when the results have been published. The easiest technique is to zero
- * result "storage" before issuing a dequeue, and use any non-zero 'token' value
- */
-void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);
-
 /* Exactly one of the following descriptor "actions" should be set. (Calling any
  * one of these will replace the effect of any prior call to one of these.)
  * - pull dequeue from the given frame queue (FQ)
@@ -387,30 +287,6 @@  void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);
 __rte_internal
 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid);
 
-/**
- * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues.
- * @wqid: composed of channel id and wqid within the channel.
- * @dct: the dequeue command type.
- */
-void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
-			    enum qbman_pull_type_e dct);
-
-/* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
- * dequeues.
- * @chid: the channel id to be dequeued.
- * @dct: the dequeue command type.
- */
-void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
-				 enum qbman_pull_type_e dct);
-
-/**
- * qbman_pull_desc_set_rad() - Decide whether reschedule the fq after dequeue
- *
- * @rad: 1 = Reschedule the FQ after dequeue.
- *	 0 = Allow the FQ to remain active after dequeue.
- */
-void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad);
-
 /**
  * qbman_swp_pull() - Issue the pull dequeue command
  * @s: the software portal object.
@@ -471,17 +347,6 @@  void qbman_swp_dqrr_idx_consume(struct qbman_swp *s, uint8_t dqrr_index);
 __rte_internal
 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr);
 
-/**
- * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the
- * given portal
- * @s: the given portal.
- * @idx: the dqrr index.
- *
- * Return dqrr entry object.
- */
-__rte_internal
-struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
-
 /* ------------------------------------------------- */
 /* Polling user-provided storage for dequeue results */
 /* ------------------------------------------------- */
@@ -549,78 +414,6 @@  static inline int qbman_result_is_SCN(const struct qbman_result *dq)
 	return !qbman_result_is_DQ(dq);
 }
 
-/* Recognise different notification types, only required if the user allows for
- * these to occur, and cares about them when they do.
- */
-
-/**
- * qbman_result_is_FQDAN() - Check for FQ Data Availability
- * @dq: the qbman_result object.
- *
- * Return 1 if this is FQDAN.
- */
-int qbman_result_is_FQDAN(const struct qbman_result *dq);
-
-/**
- * qbman_result_is_CDAN() - Check for Channel Data Availability
- * @dq: the qbman_result object to check.
- *
- * Return 1 if this is CDAN.
- */
-int qbman_result_is_CDAN(const struct qbman_result *dq);
-
-/**
- * qbman_result_is_CSCN() - Check for Congestion State Change
- * @dq: the qbman_result object to check.
- *
- * Return 1 if this is CSCN.
- */
-int qbman_result_is_CSCN(const struct qbman_result *dq);
-
-/**
- * qbman_result_is_BPSCN() - Check for Buffer Pool State Change.
- * @dq: the qbman_result object to check.
- *
- * Return 1 if this is BPSCN.
- */
-int qbman_result_is_BPSCN(const struct qbman_result *dq);
-
-/**
- * qbman_result_is_CGCU() - Check for Congestion Group Count Update.
- * @dq: the qbman_result object to check.
- *
- * Return 1 if this is CGCU.
- */
-int qbman_result_is_CGCU(const struct qbman_result *dq);
-
-/* Frame queue state change notifications; (FQDAN in theory counts too as it
- * leaves a FQ parked, but it is primarily a data availability notification)
- */
-
-/**
- * qbman_result_is_FQRN() - Check for FQ Retirement Notification.
- * @dq: the qbman_result object to check.
- *
- * Return 1 if this is FQRN.
- */
-int qbman_result_is_FQRN(const struct qbman_result *dq);
-
-/**
- * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate
- * @dq: the qbman_result object to check.
- *
- * Return 1 if this is FQRNI.
- */
-int qbman_result_is_FQRNI(const struct qbman_result *dq);
-
-/**
- * qbman_result_is_FQPN() - Check for FQ Park Notification
- * @dq: the qbman_result object to check.
- *
- * Return 1 if this is FQPN.
- */
-int qbman_result_is_FQPN(const struct qbman_result *dq);
-
 /* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE)
  */
 /* FQ empty */
@@ -695,30 +488,6 @@  uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
 __rte_internal
 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq);
 
-/**
- * qbman_result_DQ_fqid() - Get the fqid in dequeue response
- * @dq: the dequeue result.
- *
- * Return fqid.
- */
-uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq);
-
-/**
- * qbman_result_DQ_byte_count() - Get the byte count in dequeue response
- * @dq: the dequeue result.
- *
- * Return the byte count remaining in the FQ.
- */
-uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq);
-
-/**
- * qbman_result_DQ_frame_count - Get the frame count in dequeue response
- * @dq: the dequeue result.
- *
- * Return the frame count remaining in the FQ.
- */
-uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq);
-
 /**
  * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response
  * @dq: the dequeue result.
@@ -780,66 +549,6 @@  uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn);
 /* Get the CGID from the CSCN */
 #define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
 
-/**
- * qbman_result_bpscn_bpid() - Get the bpid from BPSCN
- * @scn: the state change notification.
- *
- * Return the buffer pool id.
- */
-uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn);
-
-/**
- * qbman_result_bpscn_has_free_bufs() - Check whether there are free
- * buffers in the pool from BPSCN.
- * @scn: the state change notification.
- *
- * Return the number of free buffers.
- */
-int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn);
-
-/**
- * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the
- * buffer pool is depleted.
- * @scn: the state change notification.
- *
- * Return the status of buffer pool depletion.
- */
-int qbman_result_bpscn_is_depleted(const struct qbman_result *scn);
-
-/**
- * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer
- * pool is surplus or not.
- * @scn: the state change notification.
- *
- * Return the status of buffer pool surplus.
- */
-int qbman_result_bpscn_is_surplus(const struct qbman_result *scn);
-
-/**
- * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message
- * @scn: the state change notification.
- *
- * Return the BPSCN context.
- */
-uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);
-
-/* Parsing CGCU */
-/**
- * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
- * @scn: the state change notification.
- *
- * Return the CGCU resource id.
- */
-uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn);
-
-/**
- * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU
- * @scn: the state change notification.
- *
- * Return instantaneous count in the CGCU notification.
- */
-uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
-
 	/************/
 	/* Enqueues */
 	/************/
@@ -916,25 +625,6 @@  __rte_internal
 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
 			   uint16_t opr_id, uint16_t seqnum, int incomplete);
 
-/**
- * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
- * without any enqueue
- * @d: the enqueue descriptor.
- * @opr_id: the order point record id.
- * @seqnum: the order restoration sequence number.
- */
-void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
-				uint16_t seqnum);
-
-/**
- * qbman_eq_desc_set_orp_nesn() -  advance NESN (Next Expected Sequence Number)
- * without any enqueue
- * @d: the enqueue descriptor.
- * @opr_id: the order point record id.
- * @seqnum: the order restoration sequence number.
- */
-void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
-				uint16_t seqnum);
 /**
  * qbman_eq_desc_set_response() - Set the enqueue response info.
  * @d: the enqueue descriptor
@@ -981,27 +671,6 @@  void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);
 __rte_internal
 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
 
-/**
- * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command.
- * @d: the enqueue descriptor
- * @qdid: the id of the queuing destination to be enqueued.
- * @qd_bin: the queuing destination bin
- * @qd_prio: the queuing destination priority.
- */
-__rte_internal
-void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
-			  uint16_t qd_bin, uint8_t qd_prio);
-
-/**
- * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt
- * @d: the enqueue descriptor
- * @enable: boolean to enable/disable EQDI
- *
- * Determines whether or not the portal's EQDI interrupt source should be
- * asserted after the enqueue command is completed.
- */
-void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
-
 /**
  * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command.
  * @d: the enqueue descriptor.
@@ -1060,19 +729,6 @@  uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp);
 __rte_internal
 uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp);
 
-/**
- * qbman_swp_enqueue() - Issue an enqueue command.
- * @s: the software portal used for enqueue.
- * @d: the enqueue descriptor.
- * @fd: the frame descriptor to be enqueued.
- *
- * Please note that 'fd' should only be NULL if the "action" of the
- * descriptor is "orp_hole" or "orp_nesn".
- *
- * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready.
- */
-int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
-		      const struct qbman_fd *fd);
 /**
  * qbman_swp_enqueue_multiple() - Enqueue multiple frames with same
 				  eq descriptor
@@ -1171,13 +827,6 @@  void qbman_release_desc_clear(struct qbman_release_desc *d);
 __rte_internal
 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid);
 
-/**
- * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
- * interrupt source should be asserted after the release command is completed.
- * @d: the qbman release descriptor.
- */
-void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
-
 /**
  * qbman_swp_release() - Issue a buffer release command.
  * @s: the software portal object.
@@ -1217,116 +866,4 @@  __rte_internal
 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
 		      unsigned int num_buffers);
 
-	/*****************/
-	/* FQ management */
-	/*****************/
-/**
- * qbman_swp_fq_schedule() - Move the fq to the scheduled state.
- * @s: the software portal object.
- * @fqid: the index of frame queue to be scheduled.
- *
- * There are a couple of different ways that a FQ can end up parked state,
- * This schedules it.
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid);
-
-/**
- * qbman_swp_fq_force() - Force the FQ to fully scheduled state.
- * @s: the software portal object.
- * @fqid: the index of frame queue to be forced.
- *
- * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
- * and thus be available for selection by any channel-dequeuing behaviour (push
- * or pull). If the FQ is subsequently "dequeued" from the channel and is still
- * empty at the time this happens, the resulting dq_entry will have no FD.
- * (qbman_result_DQ_fd() will return NULL.)
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
-
-/**
- * These functions change the FQ flow-control stuff between XON/XOFF. (The
- * default is XON.) This setting doesn't affect enqueues to the FQ, just
- * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
- * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
- * changed to XOFF after it had already become truly-scheduled to a channel, and
- * a pull dequeue of that channel occurs that selects that FQ for dequeuing,
- * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will
- * return NULL.)
- */
-/**
- * qbman_swp_fq_xon() - XON the frame queue.
- * @s: the software portal object.
- * @fqid: the index of frame queue.
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid);
-/**
- * qbman_swp_fq_xoff() - XOFF the frame queue.
- * @s: the software portal object.
- * @fqid: the index of frame queue.
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid);
-
-	/**********************/
-	/* Channel management */
-	/**********************/
-
-/**
- * If the user has been allocated a channel object that is going to generate
- * CDANs to another channel, then these functions will be necessary.
- * CDAN-enabled channels only generate a single CDAN notification, after which
- * it they need to be reenabled before they'll generate another. (The idea is
- * that pull dequeuing will occur in reaction to the CDAN, followed by a
- * reenable step.) Each function generates a distinct command to hardware, so a
- * combination function is provided if the user wishes to modify the "context"
- * (which shows up in each CDAN message) each time they reenable, as a single
- * command to hardware.
- */
-
-/**
- * qbman_swp_CDAN_set_context() - Set CDAN context
- * @s: the software portal object.
- * @channelid: the channel index.
- * @ctx: the context to be set in CDAN.
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
-			       uint64_t ctx);
-
-/**
- * qbman_swp_CDAN_enable() - Enable CDAN for the channel.
- * @s: the software portal object.
- * @channelid: the index of the channel to generate CDAN.
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid);
-
-/**
- * qbman_swp_CDAN_disable() - disable CDAN for the channel.
- * @s: the software portal object.
- * @channelid: the index of the channel to generate CDAN.
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid);
-
-/**
- * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
- * @s: the software portal object.
- * @channelid: the index of the channel to generate CDAN.
- * @ctx: the context set in CDAN.
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
-				      uint64_t ctx);
 #endif /* !_FSL_QBMAN_PORTAL_H */
diff --git a/drivers/bus/fslmc/qbman/qbman_debug.c b/drivers/bus/fslmc/qbman/qbman_debug.c
index 34374ae4b6..2c6a7dcd16 100644
--- a/drivers/bus/fslmc/qbman/qbman_debug.c
+++ b/drivers/bus/fslmc/qbman/qbman_debug.c
@@ -59,8 +59,3 @@  uint32_t qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
 {
 	return (r->frm_cnt & 0x00FFFFFF);
 }
-
-uint32_t qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
-{
-	return r->byte_cnt;
-}
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index 77c9d508c4..b8bcfb7189 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -82,10 +82,6 @@  qbman_swp_enqueue_ring_mode_cinh_read_direct(struct qbman_swp *s,
 		const struct qbman_eq_desc *d,
 		const struct qbman_fd *fd);
 static int
-qbman_swp_enqueue_ring_mode_cinh_direct(struct qbman_swp *s,
-		const struct qbman_eq_desc *d,
-		const struct qbman_fd *fd);
-static int
 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
 		const struct qbman_eq_desc *d,
 		const struct qbman_fd *fd);
@@ -377,80 +373,30 @@  int qbman_swp_update(struct qbman_swp *p, int stash_off)
 	return 0;
 }
 
-void qbman_swp_finish(struct qbman_swp *p)
-{
-#ifdef QBMAN_CHECKING
-	QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
-#endif
-	qbman_swp_sys_finish(&p->sys);
-	portal_idx_map[p->desc.idx] = NULL;
-	free(p);
-}
-
-const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
-{
-	return &p->desc;
-}
-
 /**************/
 /* Interrupts */
 /**************/
 
-uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
-{
-	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
-}
-
-void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
-{
-	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
-}
-
-uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
-{
-	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
-}
-
 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
 {
 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
 }
 
-uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
-{
-	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
-}
-
 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
 {
 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
 }
 
-uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
-{
-	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
-}
-
 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
 {
 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
 }
 
-uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
-{
-	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
-}
-
 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
 {
 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
 }
 
-int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
-{
-	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
-}
-
 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
 {
 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
@@ -643,28 +589,6 @@  void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
 		d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
 }
 
-void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
-				uint16_t seqnum)
-{
-	d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
-	d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
-	d->eq.orpid = opr_id;
-	d->eq.seqnum = seqnum;
-	d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
-	d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
-}
-
-void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
-				uint16_t seqnum)
-{
-	d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
-	d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
-	d->eq.orpid = opr_id;
-	d->eq.seqnum = seqnum;
-	d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
-	d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
-}
-
 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
 				dma_addr_t storage_phys,
 				int stash)
@@ -684,23 +608,6 @@  void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
 	d->eq.tgtid = fqid;
 }
 
-void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
-			  uint16_t qd_bin, uint8_t qd_prio)
-{
-	d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
-	d->eq.tgtid = qdid;
-	d->eq.qdbin = qd_bin;
-	d->eq.qpri = qd_prio;
-}
-
-void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
-{
-	if (enable)
-		d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
-	else
-		d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
-}
-
 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
 			   uint8_t dqrr_idx, int park)
 {
@@ -789,13 +696,6 @@  static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
 	return 0;
 }
 
-static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
-					       const struct qbman_eq_desc *d,
-					       const struct qbman_fd *fd)
-{
-	return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
-}
-
 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
 					      const struct qbman_eq_desc *d,
 					      const struct qbman_fd *fd)
@@ -873,44 +773,6 @@  static int qbman_swp_enqueue_ring_mode_cinh_read_direct(
 	return 0;
 }
 
-static int qbman_swp_enqueue_ring_mode_cinh_direct(
-		struct qbman_swp *s,
-		const struct qbman_eq_desc *d,
-		const struct qbman_fd *fd)
-{
-	uint32_t *p;
-	const uint32_t *cl = qb_cl(d);
-	uint32_t eqcr_ci, full_mask, half_mask;
-
-	half_mask = (s->eqcr.pi_ci_mask>>1);
-	full_mask = s->eqcr.pi_ci_mask;
-	if (!s->eqcr.available) {
-		eqcr_ci = s->eqcr.ci;
-		s->eqcr.ci = qbman_cinh_read(&s->sys,
-				QBMAN_CINH_SWP_EQCR_CI) & full_mask;
-		s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
-				eqcr_ci, s->eqcr.ci);
-		if (!s->eqcr.available)
-			return -EBUSY;
-	}
-
-	p = qbman_cinh_write_start_wo_shadow(&s->sys,
-			QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
-	memcpy_byte_by_byte(&p[1], &cl[1], 28);
-	memcpy_byte_by_byte(&p[8], fd, sizeof(*fd));
-	lwsync();
-
-	/* Set the verb byte, have to substitute in the valid-bit */
-	p[0] = cl[0] | s->eqcr.pi_vb;
-	s->eqcr.pi++;
-	s->eqcr.pi &= full_mask;
-	s->eqcr.available--;
-	if (!(s->eqcr.pi & half_mask))
-		s->eqcr.pi_vb ^= QB_VALID_BIT;
-
-	return 0;
-}
-
 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
 						const struct qbman_eq_desc *d,
 						const struct qbman_fd *fd)
@@ -949,25 +811,6 @@  static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
 	return 0;
 }
 
-static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
-				       const struct qbman_eq_desc *d,
-				       const struct qbman_fd *fd)
-{
-	if (!s->stash_off)
-		return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
-	else
-		return qbman_swp_enqueue_ring_mode_cinh_direct(s, d, fd);
-}
-
-int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
-		      const struct qbman_fd *fd)
-{
-	if (s->sys.eqcr_mode == qman_eqcr_vb_array)
-		return qbman_swp_enqueue_array_mode(s, d, fd);
-	else    /* Use ring mode by default */
-		return qbman_swp_enqueue_ring_mode(s, d, fd);
-}
-
 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
 					     const struct qbman_eq_desc *d,
 					     const struct qbman_fd *fd,
@@ -1769,14 +1612,6 @@  int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
 /* Static (push) dequeue */
 /*************************/
 
-void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
-{
-	uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
-
-	QBMAN_BUG_ON(channel_idx > 15);
-	*enabled = src | (1 << channel_idx);
-}
-
 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
 {
 	uint16_t dqsrc;
@@ -1845,11 +1680,6 @@  void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
 	d->pull.numf = numframes - 1;
 }
 
-void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
-{
-	d->pull.tok = token;
-}
-
 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
 {
 	d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
@@ -1857,34 +1687,6 @@  void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
 	d->pull.dq_src = fqid;
 }
 
-void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
-			    enum qbman_pull_type_e dct)
-{
-	d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
-	d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
-	d->pull.dq_src = wqid;
-}
-
-void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
-				 enum qbman_pull_type_e dct)
-{
-	d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
-	d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
-	d->pull.dq_src = chid;
-}
-
-void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
-{
-	if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
-		if (rad)
-			d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
-		else
-			d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
-	} else {
-		printf("The RAD feature is not valid when RLS = 0\n");
-	}
-}
-
 static int qbman_swp_pull_direct(struct qbman_swp *s,
 				 struct qbman_pull_desc *d)
 {
@@ -2303,47 +2105,6 @@  int qbman_result_is_DQ(const struct qbman_result *dq)
 	return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
 }
 
-int qbman_result_is_FQDAN(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
-}
-
-int qbman_result_is_CDAN(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
-}
-
-int qbman_result_is_CSCN(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
-		__qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
-}
-
-int qbman_result_is_BPSCN(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
-}
-
-int qbman_result_is_CGCU(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
-}
-
-int qbman_result_is_FQRN(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
-}
-
-int qbman_result_is_FQRNI(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
-}
-
-int qbman_result_is_FQPN(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
-}
-
 /*********************************/
 /* Parsing frame dequeue results */
 /*********************************/
@@ -2365,21 +2126,6 @@  uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
 	return dq->dq.oprid;
 }
 
-uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
-{
-	return dq->dq.fqid;
-}
-
-uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
-{
-	return dq->dq.fq_byte_cnt;
-}
-
-uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
-{
-	return dq->dq.fq_frm_cnt;
-}
-
 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
 {
 	return dq->dq.fqd_ctx;
@@ -2408,47 +2154,6 @@  uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
 	return scn->scn.ctx;
 }
 
-/*****************/
-/* Parsing BPSCN */
-/*****************/
-uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
-{
-	return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
-}
-
-int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
-{
-	return !(int)(qbman_result_SCN_state(scn) & 0x1);
-}
-
-int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
-{
-	return (int)(qbman_result_SCN_state(scn) & 0x2);
-}
-
-int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
-{
-	return (int)(qbman_result_SCN_state(scn) & 0x4);
-}
-
-uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
-{
-	return qbman_result_SCN_ctx(scn);
-}
-
-/*****************/
-/* Parsing CGCU  */
-/*****************/
-uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
-{
-	return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
-}
-
-uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
-{
-	return qbman_result_SCN_ctx(scn);
-}
-
 /********************/
 /* Parsing EQ RESP  */
 /********************/
@@ -2492,14 +2197,6 @@  void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
 	d->br.bpid = bpid;
 }
 
-void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
-{
-	if (enable)
-		d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
-	else
-		d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
-}
-
 #define RAR_IDX(rar)     ((rar) & 0x7)
 #define RAR_VB(rar)      ((rar) & 0x80)
 #define RAR_SUCCESS(rar) ((rar) & 0x100)
@@ -2751,60 +2448,6 @@  struct qbman_alt_fq_state_rslt {
 
 #define ALT_FQ_FQID_MASK 0x00FFFFFF
 
-static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
-				  uint8_t alt_fq_verb)
-{
-	struct qbman_alt_fq_state_desc *p;
-	struct qbman_alt_fq_state_rslt *r;
-
-	/* Start the management command */
-	p = qbman_swp_mc_start(s);
-	if (!p)
-		return -EBUSY;
-
-	p->fqid = fqid & ALT_FQ_FQID_MASK;
-
-	/* Complete the management command */
-	r = qbman_swp_mc_complete(s, p, alt_fq_verb);
-	if (!r) {
-		pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
-		       alt_fq_verb);
-		return -EIO;
-	}
-
-	/* Decode the outcome */
-	QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
-
-	/* Determine success or failure */
-	if (r->rslt != QBMAN_MC_RSLT_OK) {
-		pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
-		       fqid, alt_fq_verb, r->rslt);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
-{
-	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
-}
-
-int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
-{
-	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
-}
-
-int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
-{
-	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
-}
-
-int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
-{
-	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
-}
-
 /**********************/
 /* Channel management */
 /**********************/
@@ -2834,87 +2477,7 @@  struct qbman_cdan_ctrl_rslt {
 #define CODE_CDAN_WE_EN    0x1
 #define CODE_CDAN_WE_CTX   0x4
 
-static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
-			      uint8_t we_mask, uint8_t cdan_en,
-			      uint64_t ctx)
-{
-	struct qbman_cdan_ctrl_desc *p;
-	struct qbman_cdan_ctrl_rslt *r;
-
-	/* Start the management command */
-	p = qbman_swp_mc_start(s);
-	if (!p)
-		return -EBUSY;
-
-	/* Encode the caller-provided attributes */
-	p->ch = channelid;
-	p->we = we_mask;
-	if (cdan_en)
-		p->ctrl = 1;
-	else
-		p->ctrl = 0;
-	p->cdan_ctx = ctx;
-
-	/* Complete the management command */
-	r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
-	if (!r) {
-		pr_err("qbman: wqchan config failed, no response\n");
-		return -EIO;
-	}
-
-	/* Decode the outcome */
-	QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
-		     != QBMAN_WQCHAN_CONFIGURE);
-
-	/* Determine success or failure */
-	if (r->rslt != QBMAN_MC_RSLT_OK) {
-		pr_err("CDAN cQID %d failed: code = 0x%02x\n",
-		       channelid, r->rslt);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
-			       uint64_t ctx)
-{
-	return qbman_swp_CDAN_set(s, channelid,
-				  CODE_CDAN_WE_CTX,
-				  0, ctx);
-}
-
-int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
-{
-	return qbman_swp_CDAN_set(s, channelid,
-				  CODE_CDAN_WE_EN,
-				  1, 0);
-}
-
-int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
-{
-	return qbman_swp_CDAN_set(s, channelid,
-				  CODE_CDAN_WE_EN,
-				  0, 0);
-}
-
-int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
-				      uint64_t ctx)
-{
-	return qbman_swp_CDAN_set(s, channelid,
-				  CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
-				  1, ctx);
-}
-
 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
 {
 	return QBMAN_IDX_FROM_DQRR(dqrr);
 }
-
-struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
-{
-	struct qbman_result *dq;
-
-	dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
-	return dq;
-}
diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h
index 37d45dffe5..f6ded1717e 100644
--- a/drivers/bus/fslmc/rte_fslmc.h
+++ b/drivers/bus/fslmc/rte_fslmc.h
@@ -170,16 +170,6 @@  struct rte_fslmc_bus {
 __rte_internal
 void rte_fslmc_driver_register(struct rte_dpaa2_driver *driver);
 
-/**
- * Unregister a DPAA2 driver.
- *
- * @param driver
- *   A pointer to a rte_dpaa2_driver structure describing the driver
- *   to be unregistered.
- */
-__rte_internal
-void rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver);
-
 /** Helper for DPAA2 device registration from driver (eth, crypto) instance */
 #define RTE_PMD_REGISTER_DPAA2(nm, dpaa2_drv) \
 RTE_INIT(dpaa2initfn_ ##nm) \
diff --git a/drivers/bus/fslmc/version.map b/drivers/bus/fslmc/version.map
index f44c1a7988..a95c0faa00 100644
--- a/drivers/bus/fslmc/version.map
+++ b/drivers/bus/fslmc/version.map
@@ -11,7 +11,6 @@  INTERNAL {
 	dpaa2_affine_qbman_swp;
 	dpaa2_alloc_dpbp_dev;
 	dpaa2_alloc_dq_storage;
-	dpaa2_dpbp_supported;
 	dpaa2_dqrr_size;
 	dpaa2_eqcr_size;
 	dpaa2_free_dpbp_dev;
@@ -28,8 +27,6 @@  INTERNAL {
 	dpbp_get_num_free_bufs;
 	dpbp_open;
 	dpbp_reset;
-	dpci_get_opr;
-	dpci_set_opr;
 	dpci_set_rx_queue;
 	dpcon_get_attributes;
 	dpcon_open;
@@ -61,12 +58,10 @@  INTERNAL {
 	qbman_eq_desc_set_fq;
 	qbman_eq_desc_set_no_orp;
 	qbman_eq_desc_set_orp;
-	qbman_eq_desc_set_qd;
 	qbman_eq_desc_set_response;
 	qbman_eq_desc_set_token;
 	qbman_fq_query_state;
 	qbman_fq_state_frame_count;
-	qbman_get_dqrr_from_idx;
 	qbman_get_dqrr_idx;
 	qbman_pull_desc_clear;
 	qbman_pull_desc_set_fq;
@@ -103,7 +98,6 @@  INTERNAL {
 	rte_dpaa2_intr_disable;
 	rte_dpaa2_intr_enable;
 	rte_fslmc_driver_register;
-	rte_fslmc_driver_unregister;
 	rte_fslmc_get_device_count;
 	rte_fslmc_object_register;
 	rte_global_active_dqs_list;
diff --git a/drivers/bus/ifpga/ifpga_common.c b/drivers/bus/ifpga/ifpga_common.c
index 78e2eaee4e..7281b169d0 100644
--- a/drivers/bus/ifpga/ifpga_common.c
+++ b/drivers/bus/ifpga/ifpga_common.c
@@ -52,29 +52,6 @@  int rte_ifpga_get_integer32_arg(const char *key __rte_unused,
 
 	return 0;
 }
-int ifpga_get_integer64_arg(const char *key __rte_unused,
-	const char *value, void *extra_args)
-{
-	if (!value || !extra_args)
-		return -EINVAL;
-
-	*(uint64_t *)extra_args = strtoull(value, NULL, 0);
-
-	return 0;
-}
-int ifpga_get_unsigned_long(const char *str, int base)
-{
-	unsigned long num;
-	char *end = NULL;
-
-	errno = 0;
-
-	num = strtoul(str, &end, base);
-	if ((str[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
-		return -1;
-
-	return num;
-}
 
 int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0,
 	const struct rte_afu_id *afu_id1)
diff --git a/drivers/bus/ifpga/ifpga_common.h b/drivers/bus/ifpga/ifpga_common.h
index f9254b9d5d..44381eb78d 100644
--- a/drivers/bus/ifpga/ifpga_common.h
+++ b/drivers/bus/ifpga/ifpga_common.h
@@ -9,9 +9,6 @@  int rte_ifpga_get_string_arg(const char *key __rte_unused,
 	const char *value, void *extra_args);
 int rte_ifpga_get_integer32_arg(const char *key __rte_unused,
 	const char *value, void *extra_args);
-int ifpga_get_integer64_arg(const char *key __rte_unused,
-	const char *value, void *extra_args);
-int ifpga_get_unsigned_long(const char *str, int base);
 int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0,
 	const struct rte_afu_id *afu_id1);
 
diff --git a/drivers/common/dpaax/dpaa_of.c b/drivers/common/dpaax/dpaa_of.c
index bb2c8fc66b..ad96eb0b3d 100644
--- a/drivers/common/dpaax/dpaa_of.c
+++ b/drivers/common/dpaax/dpaa_of.c
@@ -242,33 +242,6 @@  of_init_path(const char *dt_path)
 	return 0;
 }
 
-static void
-destroy_dir(struct dt_dir *d)
-{
-	struct dt_file *f, *tmpf;
-	struct dt_dir *dd, *tmpd;
-
-	list_for_each_entry_safe(f, tmpf, &d->files, node.list) {
-		list_del(&f->node.list);
-		free(f);
-	}
-	list_for_each_entry_safe(dd, tmpd, &d->subdirs, node.list) {
-		destroy_dir(dd);
-		list_del(&dd->node.list);
-		free(dd);
-	}
-}
-
-void
-of_finish(void)
-{
-	DPAAX_HWWARN(!alive, "Double-finish of device-tree driver!");
-
-	destroy_dir(&root_dir);
-	INIT_LIST_HEAD(&linear);
-	alive = 0;
-}
-
 static const struct dt_dir *
 next_linear(const struct dt_dir *f)
 {
diff --git a/drivers/common/dpaax/dpaa_of.h b/drivers/common/dpaax/dpaa_of.h
index aed6bf98b0..0ba3794e9b 100644
--- a/drivers/common/dpaax/dpaa_of.h
+++ b/drivers/common/dpaax/dpaa_of.h
@@ -161,11 +161,6 @@  bool of_device_is_compatible(const struct device_node *dev_node,
 __rte_internal
 int of_init_path(const char *dt_path);
 
-/* of_finish() allows a controlled tear-down of the device-tree layer, eg. if a
- * full reload is desired without a process exit.
- */
-void of_finish(void);
-
 /* Use of this wrapper is recommended. */
 static inline int of_init(void)
 {
diff --git a/drivers/common/dpaax/dpaax_iova_table.c b/drivers/common/dpaax/dpaax_iova_table.c
index 91bee65e7b..357e62c164 100644
--- a/drivers/common/dpaax/dpaax_iova_table.c
+++ b/drivers/common/dpaax/dpaax_iova_table.c
@@ -346,45 +346,6 @@  dpaax_iova_table_update(phys_addr_t paddr, void *vaddr, size_t length)
 	return 0;
 }
 
-/* dpaax_iova_table_dump
- * Dump the table, with its entries, on screen. Only works in Debug Mode
- * Not for weak hearted - the tables can get quite large
- */
-void
-dpaax_iova_table_dump(void)
-{
-	unsigned int i, j;
-	struct dpaax_iovat_element *entry;
-
-	/* In case DEBUG is not enabled, some 'if' conditions might misbehave
-	 * as they have nothing else in them  except a DPAAX_DEBUG() which if
-	 * tuned out would leave 'if' naked.
-	 */
-	if (rte_log_get_global_level() < RTE_LOG_DEBUG) {
-		DPAAX_ERR("Set log level to Debug for PA->Table dump!");
-		return;
-	}
-
-	DPAAX_DEBUG(" === Start of PA->VA Translation Table ===");
-	if (dpaax_iova_table_p == NULL)
-		DPAAX_DEBUG("\tNULL");
-
-	entry = dpaax_iova_table_p->entries;
-	for (i = 0; i < dpaax_iova_table_p->count; i++) {
-		DPAAX_DEBUG("\t(%16i),(%16"PRIu64"),(%16zu),(%16p)",
-			    i, entry[i].start, entry[i].len, entry[i].pages);
-		DPAAX_DEBUG("\t\t          (PA),          (VA)");
-		for (j = 0; j < (entry->len/DPAAX_MEM_SPLIT); j++) {
-			if (entry[i].pages[j] == 0)
-				continue;
-			DPAAX_DEBUG("\t\t(%16"PRIx64"),(%16"PRIx64")",
-				    (entry[i].start + (j * sizeof(uint64_t))),
-				    entry[i].pages[j]);
-		}
-	}
-	DPAAX_DEBUG(" === End of PA->VA Translation Table ===");
-}
-
 static void
 dpaax_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
 		  void *arg __rte_unused)
diff --git a/drivers/common/dpaax/dpaax_iova_table.h b/drivers/common/dpaax/dpaax_iova_table.h
index 230fba8ba0..8c3ce45f6a 100644
--- a/drivers/common/dpaax/dpaax_iova_table.h
+++ b/drivers/common/dpaax/dpaax_iova_table.h
@@ -67,8 +67,6 @@  __rte_internal
 void dpaax_iova_table_depopulate(void);
 __rte_internal
 int dpaax_iova_table_update(phys_addr_t paddr, void *vaddr, size_t length);
-__rte_internal
-void dpaax_iova_table_dump(void);
 
 static inline void *dpaax_iova_table_get_va(phys_addr_t paddr) __rte_hot;
 
diff --git a/drivers/common/dpaax/version.map b/drivers/common/dpaax/version.map
index ee1ca6801c..7390954793 100644
--- a/drivers/common/dpaax/version.map
+++ b/drivers/common/dpaax/version.map
@@ -2,7 +2,6 @@  INTERNAL {
 	global:
 
 	dpaax_iova_table_depopulate;
-	dpaax_iova_table_dump;
 	dpaax_iova_table_p;
 	dpaax_iova_table_populate;
 	dpaax_iova_table_update;
diff --git a/drivers/common/iavf/iavf_common.c b/drivers/common/iavf/iavf_common.c
index c951b7d787..025c9e9ece 100644
--- a/drivers/common/iavf/iavf_common.c
+++ b/drivers/common/iavf/iavf_common.c
@@ -43,214 +43,6 @@  enum iavf_status iavf_set_mac_type(struct iavf_hw *hw)
 	return status;
 }
 
-/**
- * iavf_aq_str - convert AQ err code to a string
- * @hw: pointer to the HW structure
- * @aq_err: the AQ error code to convert
- **/
-const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err)
-{
-	switch (aq_err) {
-	case IAVF_AQ_RC_OK:
-		return "OK";
-	case IAVF_AQ_RC_EPERM:
-		return "IAVF_AQ_RC_EPERM";
-	case IAVF_AQ_RC_ENOENT:
-		return "IAVF_AQ_RC_ENOENT";
-	case IAVF_AQ_RC_ESRCH:
-		return "IAVF_AQ_RC_ESRCH";
-	case IAVF_AQ_RC_EINTR:
-		return "IAVF_AQ_RC_EINTR";
-	case IAVF_AQ_RC_EIO:
-		return "IAVF_AQ_RC_EIO";
-	case IAVF_AQ_RC_ENXIO:
-		return "IAVF_AQ_RC_ENXIO";
-	case IAVF_AQ_RC_E2BIG:
-		return "IAVF_AQ_RC_E2BIG";
-	case IAVF_AQ_RC_EAGAIN:
-		return "IAVF_AQ_RC_EAGAIN";
-	case IAVF_AQ_RC_ENOMEM:
-		return "IAVF_AQ_RC_ENOMEM";
-	case IAVF_AQ_RC_EACCES:
-		return "IAVF_AQ_RC_EACCES";
-	case IAVF_AQ_RC_EFAULT:
-		return "IAVF_AQ_RC_EFAULT";
-	case IAVF_AQ_RC_EBUSY:
-		return "IAVF_AQ_RC_EBUSY";
-	case IAVF_AQ_RC_EEXIST:
-		return "IAVF_AQ_RC_EEXIST";
-	case IAVF_AQ_RC_EINVAL:
-		return "IAVF_AQ_RC_EINVAL";
-	case IAVF_AQ_RC_ENOTTY:
-		return "IAVF_AQ_RC_ENOTTY";
-	case IAVF_AQ_RC_ENOSPC:
-		return "IAVF_AQ_RC_ENOSPC";
-	case IAVF_AQ_RC_ENOSYS:
-		return "IAVF_AQ_RC_ENOSYS";
-	case IAVF_AQ_RC_ERANGE:
-		return "IAVF_AQ_RC_ERANGE";
-	case IAVF_AQ_RC_EFLUSHED:
-		return "IAVF_AQ_RC_EFLUSHED";
-	case IAVF_AQ_RC_BAD_ADDR:
-		return "IAVF_AQ_RC_BAD_ADDR";
-	case IAVF_AQ_RC_EMODE:
-		return "IAVF_AQ_RC_EMODE";
-	case IAVF_AQ_RC_EFBIG:
-		return "IAVF_AQ_RC_EFBIG";
-	}
-
-	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
-	return hw->err_str;
-}
-
-/**
- * iavf_stat_str - convert status err code to a string
- * @hw: pointer to the HW structure
- * @stat_err: the status error code to convert
- **/
-const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
-{
-	switch (stat_err) {
-	case IAVF_SUCCESS:
-		return "OK";
-	case IAVF_ERR_NVM:
-		return "IAVF_ERR_NVM";
-	case IAVF_ERR_NVM_CHECKSUM:
-		return "IAVF_ERR_NVM_CHECKSUM";
-	case IAVF_ERR_PHY:
-		return "IAVF_ERR_PHY";
-	case IAVF_ERR_CONFIG:
-		return "IAVF_ERR_CONFIG";
-	case IAVF_ERR_PARAM:
-		return "IAVF_ERR_PARAM";
-	case IAVF_ERR_MAC_TYPE:
-		return "IAVF_ERR_MAC_TYPE";
-	case IAVF_ERR_UNKNOWN_PHY:
-		return "IAVF_ERR_UNKNOWN_PHY";
-	case IAVF_ERR_LINK_SETUP:
-		return "IAVF_ERR_LINK_SETUP";
-	case IAVF_ERR_ADAPTER_STOPPED:
-		return "IAVF_ERR_ADAPTER_STOPPED";
-	case IAVF_ERR_INVALID_MAC_ADDR:
-		return "IAVF_ERR_INVALID_MAC_ADDR";
-	case IAVF_ERR_DEVICE_NOT_SUPPORTED:
-		return "IAVF_ERR_DEVICE_NOT_SUPPORTED";
-	case IAVF_ERR_MASTER_REQUESTS_PENDING:
-		return "IAVF_ERR_MASTER_REQUESTS_PENDING";
-	case IAVF_ERR_INVALID_LINK_SETTINGS:
-		return "IAVF_ERR_INVALID_LINK_SETTINGS";
-	case IAVF_ERR_AUTONEG_NOT_COMPLETE:
-		return "IAVF_ERR_AUTONEG_NOT_COMPLETE";
-	case IAVF_ERR_RESET_FAILED:
-		return "IAVF_ERR_RESET_FAILED";
-	case IAVF_ERR_SWFW_SYNC:
-		return "IAVF_ERR_SWFW_SYNC";
-	case IAVF_ERR_NO_AVAILABLE_VSI:
-		return "IAVF_ERR_NO_AVAILABLE_VSI";
-	case IAVF_ERR_NO_MEMORY:
-		return "IAVF_ERR_NO_MEMORY";
-	case IAVF_ERR_BAD_PTR:
-		return "IAVF_ERR_BAD_PTR";
-	case IAVF_ERR_RING_FULL:
-		return "IAVF_ERR_RING_FULL";
-	case IAVF_ERR_INVALID_PD_ID:
-		return "IAVF_ERR_INVALID_PD_ID";
-	case IAVF_ERR_INVALID_QP_ID:
-		return "IAVF_ERR_INVALID_QP_ID";
-	case IAVF_ERR_INVALID_CQ_ID:
-		return "IAVF_ERR_INVALID_CQ_ID";
-	case IAVF_ERR_INVALID_CEQ_ID:
-		return "IAVF_ERR_INVALID_CEQ_ID";
-	case IAVF_ERR_INVALID_AEQ_ID:
-		return "IAVF_ERR_INVALID_AEQ_ID";
-	case IAVF_ERR_INVALID_SIZE:
-		return "IAVF_ERR_INVALID_SIZE";
-	case IAVF_ERR_INVALID_ARP_INDEX:
-		return "IAVF_ERR_INVALID_ARP_INDEX";
-	case IAVF_ERR_INVALID_FPM_FUNC_ID:
-		return "IAVF_ERR_INVALID_FPM_FUNC_ID";
-	case IAVF_ERR_QP_INVALID_MSG_SIZE:
-		return "IAVF_ERR_QP_INVALID_MSG_SIZE";
-	case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
-		return "IAVF_ERR_QP_TOOMANY_WRS_POSTED";
-	case IAVF_ERR_INVALID_FRAG_COUNT:
-		return "IAVF_ERR_INVALID_FRAG_COUNT";
-	case IAVF_ERR_QUEUE_EMPTY:
-		return "IAVF_ERR_QUEUE_EMPTY";
-	case IAVF_ERR_INVALID_ALIGNMENT:
-		return "IAVF_ERR_INVALID_ALIGNMENT";
-	case IAVF_ERR_FLUSHED_QUEUE:
-		return "IAVF_ERR_FLUSHED_QUEUE";
-	case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
-		return "IAVF_ERR_INVALID_PUSH_PAGE_INDEX";
-	case IAVF_ERR_INVALID_IMM_DATA_SIZE:
-		return "IAVF_ERR_INVALID_IMM_DATA_SIZE";
-	case IAVF_ERR_TIMEOUT:
-		return "IAVF_ERR_TIMEOUT";
-	case IAVF_ERR_OPCODE_MISMATCH:
-		return "IAVF_ERR_OPCODE_MISMATCH";
-	case IAVF_ERR_CQP_COMPL_ERROR:
-		return "IAVF_ERR_CQP_COMPL_ERROR";
-	case IAVF_ERR_INVALID_VF_ID:
-		return "IAVF_ERR_INVALID_VF_ID";
-	case IAVF_ERR_INVALID_HMCFN_ID:
-		return "IAVF_ERR_INVALID_HMCFN_ID";
-	case IAVF_ERR_BACKING_PAGE_ERROR:
-		return "IAVF_ERR_BACKING_PAGE_ERROR";
-	case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
-		return "IAVF_ERR_NO_PBLCHUNKS_AVAILABLE";
-	case IAVF_ERR_INVALID_PBLE_INDEX:
-		return "IAVF_ERR_INVALID_PBLE_INDEX";
-	case IAVF_ERR_INVALID_SD_INDEX:
-		return "IAVF_ERR_INVALID_SD_INDEX";
-	case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
-		return "IAVF_ERR_INVALID_PAGE_DESC_INDEX";
-	case IAVF_ERR_INVALID_SD_TYPE:
-		return "IAVF_ERR_INVALID_SD_TYPE";
-	case IAVF_ERR_MEMCPY_FAILED:
-		return "IAVF_ERR_MEMCPY_FAILED";
-	case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
-		return "IAVF_ERR_INVALID_HMC_OBJ_INDEX";
-	case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
-		return "IAVF_ERR_INVALID_HMC_OBJ_COUNT";
-	case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
-		return "IAVF_ERR_INVALID_SRQ_ARM_LIMIT";
-	case IAVF_ERR_SRQ_ENABLED:
-		return "IAVF_ERR_SRQ_ENABLED";
-	case IAVF_ERR_ADMIN_QUEUE_ERROR:
-		return "IAVF_ERR_ADMIN_QUEUE_ERROR";
-	case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
-		return "IAVF_ERR_ADMIN_QUEUE_TIMEOUT";
-	case IAVF_ERR_BUF_TOO_SHORT:
-		return "IAVF_ERR_BUF_TOO_SHORT";
-	case IAVF_ERR_ADMIN_QUEUE_FULL:
-		return "IAVF_ERR_ADMIN_QUEUE_FULL";
-	case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
-		return "IAVF_ERR_ADMIN_QUEUE_NO_WORK";
-	case IAVF_ERR_BAD_IWARP_CQE:
-		return "IAVF_ERR_BAD_IWARP_CQE";
-	case IAVF_ERR_NVM_BLANK_MODE:
-		return "IAVF_ERR_NVM_BLANK_MODE";
-	case IAVF_ERR_NOT_IMPLEMENTED:
-		return "IAVF_ERR_NOT_IMPLEMENTED";
-	case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
-		return "IAVF_ERR_PE_DOORBELL_NOT_ENABLED";
-	case IAVF_ERR_DIAG_TEST_FAILED:
-		return "IAVF_ERR_DIAG_TEST_FAILED";
-	case IAVF_ERR_NOT_READY:
-		return "IAVF_ERR_NOT_READY";
-	case IAVF_NOT_SUPPORTED:
-		return "IAVF_NOT_SUPPORTED";
-	case IAVF_ERR_FIRMWARE_API_VERSION:
-		return "IAVF_ERR_FIRMWARE_API_VERSION";
-	case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
-		return "IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
-	}
-
-	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
-	return hw->err_str;
-}
-
 /**
  * iavf_debug_aq
  * @hw: debug mask related to admin queue
@@ -362,164 +154,6 @@  enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw,
 	return status;
 }
 
-/**
- * iavf_aq_get_set_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- * @set: set true to set the table, false to get the table
- *
- * Internal function to get or set RSS look up table
- **/
-STATIC enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
-						u16 vsi_id, bool pf_lut,
-						u8 *lut, u16 lut_size,
-						bool set)
-{
-	enum iavf_status status;
-	struct iavf_aq_desc desc;
-	struct iavf_aqc_get_set_rss_lut *cmd_resp =
-		   (struct iavf_aqc_get_set_rss_lut *)&desc.params.raw;
-
-	if (set)
-		iavf_fill_default_direct_cmd_desc(&desc,
-						  iavf_aqc_opc_set_rss_lut);
-	else
-		iavf_fill_default_direct_cmd_desc(&desc,
-						  iavf_aqc_opc_get_rss_lut);
-
-	/* Indirect command */
-	desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_BUF);
-	desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_RD);
-
-	cmd_resp->vsi_id =
-			CPU_TO_LE16((u16)((vsi_id <<
-					  IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
-					  IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK));
-	cmd_resp->vsi_id |= CPU_TO_LE16((u16)IAVF_AQC_SET_RSS_LUT_VSI_VALID);
-
-	if (pf_lut)
-		cmd_resp->flags |= CPU_TO_LE16((u16)
-					((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
-					IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
-					IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
-	else
-		cmd_resp->flags |= CPU_TO_LE16((u16)
-					((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
-					IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
-					IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
-
-	status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL);
-
-	return status;
-}
-
-/**
- * iavf_aq_get_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- *
- * get the RSS lookup table, PF or VSI type
- **/
-enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
-				     bool pf_lut, u8 *lut, u16 lut_size)
-{
-	return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
-				       false);
-}
-
-/**
- * iavf_aq_set_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- *
- * set the RSS lookup table, PF or VSI type
- **/
-enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,
-				     bool pf_lut, u8 *lut, u16 lut_size)
-{
-	return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
-}
-
-/**
- * iavf_aq_get_set_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- * @set: set true to set the key, false to get the key
- *
- * get the RSS key per VSI
- **/
-STATIC enum iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw,
-				      u16 vsi_id,
-				      struct iavf_aqc_get_set_rss_key_data *key,
-				      bool set)
-{
-	enum iavf_status status;
-	struct iavf_aq_desc desc;
-	struct iavf_aqc_get_set_rss_key *cmd_resp =
-			(struct iavf_aqc_get_set_rss_key *)&desc.params.raw;
-	u16 key_size = sizeof(struct iavf_aqc_get_set_rss_key_data);
-
-	if (set)
-		iavf_fill_default_direct_cmd_desc(&desc,
-						  iavf_aqc_opc_set_rss_key);
-	else
-		iavf_fill_default_direct_cmd_desc(&desc,
-						  iavf_aqc_opc_get_rss_key);
-
-	/* Indirect command */
-	desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_BUF);
-	desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_RD);
-
-	cmd_resp->vsi_id =
-			CPU_TO_LE16((u16)((vsi_id <<
-					  IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
-					  IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK));
-	cmd_resp->vsi_id |= CPU_TO_LE16((u16)IAVF_AQC_SET_RSS_KEY_VSI_VALID);
-
-	status = iavf_asq_send_command(hw, &desc, key, key_size, NULL);
-
-	return status;
-}
-
-/**
- * iavf_aq_get_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- *
- **/
-enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw,
-				     u16 vsi_id,
-				     struct iavf_aqc_get_set_rss_key_data *key)
-{
-	return iavf_aq_get_set_rss_key(hw, vsi_id, key, false);
-}
-
-/**
- * iavf_aq_set_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- *
- * set the RSS key per VSI
- **/
-enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw,
-				     u16 vsi_id,
-				     struct iavf_aqc_get_set_rss_key_data *key)
-{
-	return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
-}
-
 /* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the
  * hardware to a bit-field that can be used by SW to more easily determine the
  * packet type.
@@ -885,30 +519,6 @@  struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
 	IAVF_PTT_UNUSED_ENTRY(255)
 };
 
-/**
- * iavf_validate_mac_addr - Validate unicast MAC address
- * @mac_addr: pointer to MAC address
- *
- * Tests a MAC address to ensure it is a valid Individual Address
- **/
-enum iavf_status iavf_validate_mac_addr(u8 *mac_addr)
-{
-	enum iavf_status status = IAVF_SUCCESS;
-
-	DEBUGFUNC("iavf_validate_mac_addr");
-
-	/* Broadcast addresses ARE multicast addresses
-	 * Make sure it is not a multicast address
-	 * Reject the zero address
-	 */
-	if (IAVF_IS_MULTICAST(mac_addr) ||
-	    (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
-	      mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
-		status = IAVF_ERR_INVALID_MAC_ADDR;
-
-	return status;
-}
-
 /**
  * iavf_aq_send_msg_to_pf
  * @hw: pointer to the hardware structure
@@ -989,38 +599,3 @@  void iavf_vf_parse_hw_config(struct iavf_hw *hw,
 		vsi_res++;
 	}
 }
-
-/**
- * iavf_vf_reset
- * @hw: pointer to the hardware structure
- *
- * Send a VF_RESET message to the PF. Does not wait for response from PF
- * as none will be forthcoming. Immediately after calling this function,
- * the admin queue should be shut down and (optionally) reinitialized.
- **/
-enum iavf_status iavf_vf_reset(struct iavf_hw *hw)
-{
-	return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
-				      IAVF_SUCCESS, NULL, 0, NULL);
-}
-
-/**
-* iavf_aq_clear_all_wol_filters
-* @hw: pointer to the hw struct
-* @cmd_details: pointer to command details structure or NULL
-*
-* Get information for the reason of a Wake Up event
-**/
-enum iavf_status iavf_aq_clear_all_wol_filters(struct iavf_hw *hw,
-			struct iavf_asq_cmd_details *cmd_details)
-{
-	struct iavf_aq_desc desc;
-	enum iavf_status status;
-
-	iavf_fill_default_direct_cmd_desc(&desc,
-					  iavf_aqc_opc_clear_all_wol_filters);
-
-	status = iavf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
diff --git a/drivers/common/iavf/iavf_prototype.h b/drivers/common/iavf/iavf_prototype.h
index f34e77db0f..5d5deacfe2 100644
--- a/drivers/common/iavf/iavf_prototype.h
+++ b/drivers/common/iavf/iavf_prototype.h
@@ -30,7 +30,6 @@  enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw);
 u16 iavf_clean_asq(struct iavf_hw *hw);
 void iavf_free_adminq_asq(struct iavf_hw *hw);
 void iavf_free_adminq_arq(struct iavf_hw *hw);
-enum iavf_status iavf_validate_mac_addr(u8 *mac_addr);
 void iavf_adminq_init_ring_data(struct iavf_hw *hw);
 __rte_internal
 enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
@@ -51,19 +50,6 @@  void iavf_idle_aq(struct iavf_hw *hw);
 bool iavf_check_asq_alive(struct iavf_hw *hw);
 enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
 
-enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid,
-				     bool pf_lut, u8 *lut, u16 lut_size);
-enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
-				     bool pf_lut, u8 *lut, u16 lut_size);
-enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw,
-				     u16 seid,
-				     struct iavf_aqc_get_set_rss_key_data *key);
-enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw,
-				     u16 seid,
-				     struct iavf_aqc_get_set_rss_key_data *key);
-const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err);
-const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err);
-
 __rte_internal
 enum iavf_status iavf_set_mac_type(struct iavf_hw *hw);
 
@@ -83,7 +69,6 @@  void iavf_destroy_spinlock(struct iavf_spinlock *sp);
 __rte_internal
 void iavf_vf_parse_hw_config(struct iavf_hw *hw,
 			     struct virtchnl_vf_resource *msg);
-enum iavf_status iavf_vf_reset(struct iavf_hw *hw);
 __rte_internal
 enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
 				enum virtchnl_ops v_opcode,
@@ -95,6 +80,4 @@  enum iavf_status iavf_aq_debug_dump(struct iavf_hw *hw, u8 cluster_id,
 				    void *buff, u16 *ret_buff_size,
 				    u8 *ret_next_table, u32 *ret_next_index,
 				    struct iavf_asq_cmd_details *cmd_details);
-enum iavf_status iavf_aq_clear_all_wol_filters(struct iavf_hw *hw,
-			struct iavf_asq_cmd_details *cmd_details);
 #endif /* _IAVF_PROTOTYPE_H_ */
diff --git a/drivers/common/octeontx2/otx2_mbox.c b/drivers/common/octeontx2/otx2_mbox.c
index 6df1e8ea63..e65fe602f7 100644
--- a/drivers/common/octeontx2/otx2_mbox.c
+++ b/drivers/common/octeontx2/otx2_mbox.c
@@ -381,19 +381,6 @@  otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
 	return otx2_mbox_wait_for_rsp_tmo(mbox, devid, MBOX_RSP_TIMEOUT);
 }
 
-int
-otx2_mbox_get_availmem(struct otx2_mbox *mbox, int devid)
-{
-	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
-	int avail;
-
-	rte_spinlock_lock(&mdev->mbox_lock);
-	avail = mbox->tx_size - mdev->msg_size - msgs_offset();
-	rte_spinlock_unlock(&mdev->mbox_lock);
-
-	return avail;
-}
-
 int
 otx2_send_ready_msg(struct otx2_mbox *mbox, uint16_t *pcifunc)
 {
diff --git a/drivers/common/octeontx2/otx2_mbox.h b/drivers/common/octeontx2/otx2_mbox.h
index f6d884c198..7d9c018597 100644
--- a/drivers/common/octeontx2/otx2_mbox.h
+++ b/drivers/common/octeontx2/otx2_mbox.h
@@ -1785,7 +1785,6 @@  int otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, void **msg);
 __rte_internal
 int otx2_mbox_get_rsp_tmo(struct otx2_mbox *mbox, int devid, void **msg,
 			  uint32_t tmo);
-int otx2_mbox_get_availmem(struct otx2_mbox *mbox, int devid);
 __rte_internal
 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
 					    int size, int size_rsp);
diff --git a/drivers/crypto/bcmfs/bcmfs_sym_pmd.c b/drivers/crypto/bcmfs/bcmfs_sym_pmd.c
index aa7fad6d70..d23e58ff6d 100644
--- a/drivers/crypto/bcmfs/bcmfs_sym_pmd.c
+++ b/drivers/crypto/bcmfs/bcmfs_sym_pmd.c
@@ -399,25 +399,6 @@  bcmfs_sym_dev_create(struct bcmfs_device *fsdev)
 	return 0;
 }
 
-int
-bcmfs_sym_dev_destroy(struct bcmfs_device *fsdev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (fsdev == NULL)
-		return -ENODEV;
-	if (fsdev->sym_dev == NULL)
-		return 0;
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(fsdev->sym_dev->sym_dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	fsdev->sym_rte_dev.name = NULL;
-	fsdev->sym_dev = NULL;
-
-	return 0;
-}
-
 static struct cryptodev_driver bcmfs_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(bcmfs_crypto_drv,
 			       cryptodev_bcmfs_sym_driver,
diff --git a/drivers/crypto/bcmfs/bcmfs_sym_pmd.h b/drivers/crypto/bcmfs/bcmfs_sym_pmd.h
index 65d7046090..d9ddd024ff 100644
--- a/drivers/crypto/bcmfs/bcmfs_sym_pmd.h
+++ b/drivers/crypto/bcmfs/bcmfs_sym_pmd.h
@@ -32,7 +32,4 @@  struct bcmfs_sym_dev_private {
 int
 bcmfs_sym_dev_create(struct bcmfs_device *fdev);
 
-int
-bcmfs_sym_dev_destroy(struct bcmfs_device *fdev);
-
 #endif /* _BCMFS_SYM_PMD_H_ */
diff --git a/drivers/crypto/bcmfs/bcmfs_vfio.c b/drivers/crypto/bcmfs/bcmfs_vfio.c
index dc2def580f..81994d9d56 100644
--- a/drivers/crypto/bcmfs/bcmfs_vfio.c
+++ b/drivers/crypto/bcmfs/bcmfs_vfio.c
@@ -74,34 +74,10 @@  bcmfs_attach_vfio(struct bcmfs_device *dev)
 
 	return 0;
 }
-
-void
-bcmfs_release_vfio(struct bcmfs_device *dev)
-{
-	int ret;
-
-	if (dev == NULL)
-		return;
-
-	/* unmap the addr */
-	munmap(dev->mmap_addr, dev->mmap_size);
-	/* release the device */
-	ret = rte_vfio_release_device(dev->dirname, dev->name,
-				      dev->vfio_dev_fd);
-	if (ret < 0) {
-		BCMFS_LOG(ERR, "cannot release device");
-		return;
-	}
-}
 #else
 int
 bcmfs_attach_vfio(struct bcmfs_device *dev __rte_unused)
 {
 	return -1;
 }
-
-void
-bcmfs_release_vfio(struct bcmfs_device *dev __rte_unused)
-{
-}
 #endif
diff --git a/drivers/crypto/bcmfs/bcmfs_vfio.h b/drivers/crypto/bcmfs/bcmfs_vfio.h
index d0fdf6483f..4177bc1fee 100644
--- a/drivers/crypto/bcmfs/bcmfs_vfio.h
+++ b/drivers/crypto/bcmfs/bcmfs_vfio.h
@@ -10,8 +10,4 @@ 
 int
 bcmfs_attach_vfio(struct bcmfs_device *dev);
 
-/* Release the bcmfs device from vfio */
-void
-bcmfs_release_vfio(struct bcmfs_device *dev);
-
 #endif /* _BCMFS_VFIO_H_ */
diff --git a/drivers/crypto/caam_jr/caam_jr_pvt.h b/drivers/crypto/caam_jr/caam_jr_pvt.h
index 552d6b9b1b..60cf1fa45b 100644
--- a/drivers/crypto/caam_jr/caam_jr_pvt.h
+++ b/drivers/crypto/caam_jr/caam_jr_pvt.h
@@ -222,7 +222,6 @@  struct uio_job_ring {
 	int uio_minor_number;
 };
 
-int sec_cleanup(void);
 int sec_configure(void);
 void sec_uio_job_rings_init(void);
 struct uio_job_ring *config_job_ring(void);
diff --git a/drivers/crypto/caam_jr/caam_jr_uio.c b/drivers/crypto/caam_jr/caam_jr_uio.c
index e4ee102344..60c551e4f2 100644
--- a/drivers/crypto/caam_jr/caam_jr_uio.c
+++ b/drivers/crypto/caam_jr/caam_jr_uio.c
@@ -471,34 +471,6 @@  sec_configure(void)
 	return config_jr_no;
 }
 
-int
-sec_cleanup(void)
-{
-	int i;
-	struct uio_job_ring *job_ring;
-
-	for (i = 0; i < g_uio_jr_num; i++) {
-		job_ring = &g_uio_job_ring[i];
-		/* munmap SEC's register memory */
-		if (job_ring->register_base_addr) {
-			munmap(job_ring->register_base_addr,
-				job_ring->map_size);
-			job_ring->register_base_addr = NULL;
-		}
-		/* I need to close the fd after shutdown UIO commands need to be
-		 * sent using the fd
-		 */
-		if (job_ring->uio_fd != -1) {
-			CAAM_JR_INFO(
-			"Closed device file for job ring %d , fd = %d",
-			job_ring->jr_id, job_ring->uio_fd);
-			close(job_ring->uio_fd);
-			job_ring->uio_fd = -1;
-		}
-	}
-	return 0;
-}
-
 void
 sec_uio_job_rings_init(void)
 {
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 664ddc1747..fc34b6a639 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -62,26 +62,6 @@  ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
 	return NULL;
 }
 
-int
-ccp_read_hwrng(uint32_t *value)
-{
-	struct ccp_device *dev;
-
-	TAILQ_FOREACH(dev, &ccp_list, next) {
-		void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
-
-		while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
-			*value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
-			if (*value) {
-				dev->hwrng_retries = 0;
-				return 0;
-			}
-		}
-		dev->hwrng_retries = 0;
-	}
-	return -1;
-}
-
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
 			   uint32_t queue_size,
@@ -180,28 +160,6 @@  ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
 	}
 }
 
-static void
-ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
-{
-	unsigned long *p = map + WORD_OFFSET(start);
-	const unsigned int size = start + len;
-	int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
-	unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
-
-	while (len - bits_to_clear >= 0) {
-		*p &= ~mask_to_clear;
-		len -= bits_to_clear;
-		bits_to_clear = BITS_PER_WORD;
-		mask_to_clear = ~0UL;
-		p++;
-	}
-	if (len) {
-		mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
-		*p &= ~mask_to_clear;
-	}
-}
-
-
 static unsigned long
 _ccp_find_next_bit(const unsigned long *addr,
 		   unsigned long nbits,
@@ -312,29 +270,6 @@  ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
 	return 0;
 }
 
-static void __rte_unused
-ccp_lsb_free(struct ccp_queue *cmd_q,
-	     unsigned int start,
-	     unsigned int count)
-{
-	int lsbno = start / LSB_SIZE;
-
-	if (!start)
-		return;
-
-	if (cmd_q->lsb == lsbno) {
-		/* An entry from the private LSB */
-		ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
-	} else {
-		/* From the shared LSBs */
-		struct ccp_device *ccp = cmd_q->dev;
-
-		rte_spinlock_lock(&ccp->lsb_lock);
-		ccp_bitmap_clear(ccp->lsbmap, start, count);
-		rte_spinlock_unlock(&ccp->lsb_lock);
-	}
-}
-
 static int
 ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
 {
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index 37e04218ce..8bfce5d9fb 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -484,12 +484,4 @@  int ccp_probe_devices(const struct rte_pci_id *ccp_id);
  */
 struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
 
-/**
- * read hwrng value
- *
- * @param trng_value data pointer to write RNG value
- * @return 0 on success otherwise -1
- */
-int ccp_read_hwrng(uint32_t *trng_value);
-
 #endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/dpaa2_sec/mc/dpseci.c b/drivers/crypto/dpaa2_sec/mc/dpseci.c
index 87e0defdc6..52bfd72f50 100644
--- a/drivers/crypto/dpaa2_sec/mc/dpseci.c
+++ b/drivers/crypto/dpaa2_sec/mc/dpseci.c
@@ -80,96 +80,6 @@  int dpseci_close(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpseci_create() - Create the DPSECI object
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id
- *
- * Create the DPSECI object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpseci_create(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  const struct dpseci_cfg *cfg,
-		  uint32_t *obj_id)
-{
-	struct dpseci_cmd_create *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err, i;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpseci_cmd_create *)cmd.params;
-	for (i = 0; i < 8; i++)
-		cmd_params->priorities[i] = cfg->priorities[i];
-	for (i = 0; i < 8; i++)
-		cmd_params->priorities2[i] = cfg->priorities[8 + i];
-	cmd_params->num_tx_queues = cfg->num_tx_queues;
-	cmd_params->num_rx_queues = cfg->num_rx_queues;
-	cmd_params->options = cpu_to_le32(cfg->options);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpseci_destroy() - Destroy the DPSECI object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id:	The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpseci_destroy(struct fsl_mc_io *mc_io,
-		   uint16_t dprc_token,
-		   uint32_t cmd_flags,
-		   uint32_t object_id)
-{
-	struct dpseci_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
-	cmd_params->dpseci_id = cpu_to_le32(object_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -216,41 +126,6 @@  int dpseci_disable(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpseci_is_enabled() - Check if the DPSECI is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPSECI object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpseci_is_enabled(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      int *en)
-{
-	struct dpseci_rsp_is_enabled *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
-	*en = dpseci_get_field(rsp_params->en, ENABLE);
-
-	return 0;
-}
-
 /**
  * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -446,59 +321,6 @@  int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPSECI object
- * @attr:	Returned SEC attributes
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
-			uint32_t cmd_flags,
-			uint16_t token,
-			struct dpseci_sec_attr *attr)
-{
-	struct dpseci_rsp_get_sec_attr *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
-	attr->ip_id = le16_to_cpu(rsp_params->ip_id);
-	attr->major_rev = rsp_params->major_rev;
-	attr->minor_rev = rsp_params->minor_rev;
-	attr->era = rsp_params->era;
-	attr->deco_num = rsp_params->deco_num;
-	attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
-	attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
-	attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
-	attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
-	attr->crc_acc_num = rsp_params->crc_acc_num;
-	attr->pk_acc_num = rsp_params->pk_acc_num;
-	attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
-	attr->rng_acc_num = rsp_params->rng_acc_num;
-	attr->md_acc_num = rsp_params->md_acc_num;
-	attr->arc4_acc_num = rsp_params->arc4_acc_num;
-	attr->des_acc_num = rsp_params->des_acc_num;
-	attr->aes_acc_num = rsp_params->aes_acc_num;
-	attr->ccha_acc_num = rsp_params->ccha_acc_num;
-	attr->ptha_acc_num = rsp_params->ptha_acc_num;
-
-	return 0;
-}
-
 /**
  * dpseci_get_sec_counters() - Retrieve SEC accelerator counters.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -540,226 +362,3 @@  int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
 
 	return 0;
 }
-
-/**
- * dpseci_get_api_version() - Get Data Path SEC Interface API version
- * @mc_io:  Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of data path sec API
- * @minor_ver:	Minor version of data path sec API
- *
- * Return:  '0' on Success; Error code otherwise.
- */
-int dpseci_get_api_version(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t *major_ver,
-			   uint16_t *minor_ver)
-{
-	struct dpseci_rsp_get_api_version *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
-					cmd_flags,
-					0);
-
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
-
-/**
- * dpseci_set_opr() - Set Order Restoration configuration.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPSECI object
- * @index:	The queue index
- * @options:	Configuration mode options
- *			can be OPR_OPT_CREATE or OPR_OPT_RETIRE
- * @cfg:	Configuration options for the OPR
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpseci_set_opr(struct fsl_mc_io *mc_io,
-		   uint32_t cmd_flags,
-		   uint16_t token,
-		   uint8_t index,
-		   uint8_t options,
-		   struct opr_cfg *cfg)
-{
-	struct dpseci_cmd_set_opr *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_OPR,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpseci_cmd_set_opr *)cmd.params;
-	cmd_params->index = index;
-	cmd_params->options = options;
-	cmd_params->oloe = cfg->oloe;
-	cmd_params->oeane = cfg->oeane;
-	cmd_params->olws = cfg->olws;
-	cmd_params->oa = cfg->oa;
-	cmd_params->oprrws = cfg->oprrws;
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpseci_get_opr() - Retrieve Order Restoration config and query.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPSECI object
- * @index:	The queue index
- * @cfg:	Returned OPR configuration
- * @qry:	Returned OPR query
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dpseci_get_opr(struct fsl_mc_io *mc_io,
-		   uint32_t cmd_flags,
-		   uint16_t token,
-		   uint8_t index,
-		   struct opr_cfg *cfg,
-		   struct opr_qry *qry)
-{
-	struct dpseci_rsp_get_opr *rsp_params;
-	struct dpseci_cmd_get_opr *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpseci_cmd_get_opr *)cmd.params;
-	cmd_params->index = index;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
-	cfg->oloe = rsp_params->oloe;
-	cfg->oeane = rsp_params->oeane;
-	cfg->olws = rsp_params->olws;
-	cfg->oa = rsp_params->oa;
-	cfg->oprrws = rsp_params->oprrws;
-	qry->rip = dpseci_get_field(rsp_params->flags, RIP);
-	qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE);
-	qry->nesn = le16_to_cpu(rsp_params->nesn);
-	qry->ndsn = le16_to_cpu(rsp_params->ndsn);
-	qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
-	qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, TSEQ_NLIS);
-	qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
-	qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, HSEQ_NLIS);
-	qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
-	qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
-	qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
-	qry->opr_id = le16_to_cpu(rsp_params->opr_id);
-
-	return 0;
-}
-
-/**
- * dpseci_set_congestion_notification() - Set congestion group
- *	notification configuration
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPSECI object
- * @cfg:	congestion notification configuration
- *
- * Return:	'0' on success, error code otherwise
- */
-int dpseci_set_congestion_notification(
-			struct fsl_mc_io *mc_io,
-			uint32_t cmd_flags,
-			uint16_t token,
-			const struct dpseci_congestion_notification_cfg *cfg)
-{
-	struct dpseci_cmd_set_congestion_notification *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(
-			DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
-			cmd_flags,
-			token);
-
-	cmd_params =
-		(struct dpseci_cmd_set_congestion_notification *)cmd.params;
-	cmd_params->dest_id = cfg->dest_cfg.dest_id;
-	cmd_params->dest_priority = cfg->dest_cfg.priority;
-	cmd_params->message_ctx = cfg->message_ctx;
-	cmd_params->message_iova = cfg->message_iova;
-	cmd_params->notification_mode = cfg->notification_mode;
-	cmd_params->threshold_entry = cfg->threshold_entry;
-	cmd_params->threshold_exit = cfg->threshold_exit;
-	dpseci_set_field(cmd_params->type_units,
-			 DEST_TYPE,
-			 cfg->dest_cfg.dest_type);
-	dpseci_set_field(cmd_params->type_units,
-			 CG_UNITS,
-			 cfg->units);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpseci_get_congestion_notification() - Get congestion group
- *	notification configuration
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPSECI object
- * @cfg:	congestion notification configuration
- *
- * Return:	'0' on success, error code otherwise
- */
-int dpseci_get_congestion_notification(
-				struct fsl_mc_io *mc_io,
-				uint32_t cmd_flags,
-				uint16_t token,
-				struct dpseci_congestion_notification_cfg *cfg)
-{
-	struct dpseci_cmd_set_congestion_notification *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(
-			DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
-			cmd_flags,
-			token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params =
-		(struct dpseci_cmd_set_congestion_notification *)cmd.params;
-
-	cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
-	cfg->dest_cfg.priority = rsp_params->dest_priority;
-	cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
-	cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
-	cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
-	cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
-	cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
-	cfg->units = dpseci_get_field(rsp_params->type_units, CG_UNITS);
-	cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->type_units,
-						DEST_TYPE);
-
-	return 0;
-}
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
index 279e8f4d4a..fbbfd40815 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -61,17 +61,6 @@  struct dpseci_cfg {
 	uint8_t priorities[DPSECI_MAX_QUEUE_NUM];
 };
 
-int dpseci_create(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  const struct dpseci_cfg *cfg,
-		  uint32_t *obj_id);
-
-int dpseci_destroy(struct fsl_mc_io *mc_io,
-		   uint16_t dprc_token,
-		   uint32_t cmd_flags,
-		   uint32_t object_id);
-
 int dpseci_enable(struct fsl_mc_io *mc_io,
 		  uint32_t cmd_flags,
 		  uint16_t token);
@@ -80,11 +69,6 @@  int dpseci_disable(struct fsl_mc_io *mc_io,
 		   uint32_t cmd_flags,
 		   uint16_t token);
 
-int dpseci_is_enabled(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      int *en);
-
 int dpseci_reset(struct fsl_mc_io *mc_io,
 		 uint32_t cmd_flags,
 		 uint16_t token);
@@ -287,11 +271,6 @@  struct dpseci_sec_attr {
 	uint8_t ptha_acc_num;
 };
 
-int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
-			uint32_t cmd_flags,
-			uint16_t token,
-			struct dpseci_sec_attr *attr);
-
 /**
  * struct dpseci_sec_counters - Structure representing global SEC counters and
  *				not per dpseci counters
@@ -318,25 +297,6 @@  int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
 			    uint16_t token,
 			    struct dpseci_sec_counters *counters);
 
-int dpseci_get_api_version(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t *major_ver,
-			   uint16_t *minor_ver);
-
-int dpseci_set_opr(struct fsl_mc_io *mc_io,
-		   uint32_t cmd_flags,
-		   uint16_t token,
-		   uint8_t index,
-		   uint8_t options,
-		   struct opr_cfg *cfg);
-
-int dpseci_get_opr(struct fsl_mc_io *mc_io,
-		   uint32_t cmd_flags,
-		   uint16_t token,
-		   uint8_t index,
-		   struct opr_cfg *cfg,
-		   struct opr_qry *qry);
-
 /**
  * enum dpseci_congestion_unit - DPSECI congestion units
  * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
@@ -405,16 +365,4 @@  struct dpseci_congestion_notification_cfg {
 	uint16_t notification_mode;
 };
 
-int dpseci_set_congestion_notification(
-			struct fsl_mc_io *mc_io,
-			uint32_t cmd_flags,
-			uint16_t token,
-			const struct dpseci_congestion_notification_cfg *cfg);
-
-int dpseci_get_congestion_notification(
-			struct fsl_mc_io *mc_io,
-			uint32_t cmd_flags,
-			uint16_t token,
-			struct dpseci_congestion_notification_cfg *cfg);
-
 #endif /* __FSL_DPSECI_H */
diff --git a/drivers/crypto/virtio/virtio_pci.c b/drivers/crypto/virtio/virtio_pci.c
index ae069794a6..40bd748094 100644
--- a/drivers/crypto/virtio/virtio_pci.c
+++ b/drivers/crypto/virtio/virtio_pci.c
@@ -246,13 +246,6 @@  vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
 	VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
 }
 
-void
-vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
-		const void *src, int length)
-{
-	VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
-}
-
 uint64_t
 vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
 		uint64_t host_features)
@@ -298,12 +291,6 @@  vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw)
 	return VTPCI_OPS(hw)->get_status(hw);
 }
 
-uint8_t
-vtpci_cryptodev_isr(struct virtio_crypto_hw *hw)
-{
-	return VTPCI_OPS(hw)->get_isr(hw);
-}
-
 static void *
 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
 {
diff --git a/drivers/crypto/virtio/virtio_pci.h b/drivers/crypto/virtio/virtio_pci.h
index d9a214dfd0..3092b56952 100644
--- a/drivers/crypto/virtio/virtio_pci.h
+++ b/drivers/crypto/virtio/virtio_pci.h
@@ -242,12 +242,7 @@  void vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status);
 uint64_t vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
 	uint64_t host_features);
 
-void vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
-	const void *src, int length);
-
 void vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
 	void *dst, int length);
 
-uint8_t vtpci_cryptodev_isr(struct virtio_crypto_hw *hw);
-
 #endif /* _VIRTIO_PCI_H_ */
diff --git a/drivers/event/dlb/dlb_priv.h b/drivers/event/dlb/dlb_priv.h
index 58ff4287df..deaf467090 100644
--- a/drivers/event/dlb/dlb_priv.h
+++ b/drivers/event/dlb/dlb_priv.h
@@ -470,8 +470,6 @@  void dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f);
 
 int dlb_xstats_init(struct dlb_eventdev *dlb);
 
-void dlb_xstats_uninit(struct dlb_eventdev *dlb);
-
 int dlb_eventdev_xstats_get(const struct rte_eventdev *dev,
 			    enum rte_event_dev_xstats_mode mode,
 			    uint8_t queue_port_id, const unsigned int ids[],
diff --git a/drivers/event/dlb/dlb_xstats.c b/drivers/event/dlb/dlb_xstats.c
index 5f4c590307..6678a8b322 100644
--- a/drivers/event/dlb/dlb_xstats.c
+++ b/drivers/event/dlb/dlb_xstats.c
@@ -578,13 +578,6 @@  dlb_xstats_init(struct dlb_eventdev *dlb)
 	return 0;
 }
 
-void
-dlb_xstats_uninit(struct dlb_eventdev *dlb)
-{
-	rte_free(dlb->xstats);
-	dlb->xstats_count = 0;
-}
-
 int
 dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,
 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h
index b73cf3ff14..56bd4ebe1b 100644
--- a/drivers/event/dlb2/dlb2_priv.h
+++ b/drivers/event/dlb2/dlb2_priv.h
@@ -536,8 +536,6 @@  void dlb2_eventdev_dump(struct rte_eventdev *dev, FILE *f);
 
 int dlb2_xstats_init(struct dlb2_eventdev *dlb2);
 
-void dlb2_xstats_uninit(struct dlb2_eventdev *dlb2);
-
 int dlb2_eventdev_xstats_get(const struct rte_eventdev *dev,
 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
 		const unsigned int ids[], uint64_t values[], unsigned int n);
diff --git a/drivers/event/dlb2/dlb2_xstats.c b/drivers/event/dlb2/dlb2_xstats.c
index 8c3c3cda94..574fca89e8 100644
--- a/drivers/event/dlb2/dlb2_xstats.c
+++ b/drivers/event/dlb2/dlb2_xstats.c
@@ -634,13 +634,6 @@  dlb2_xstats_init(struct dlb2_eventdev *dlb2)
 	return 0;
 }
 
-void
-dlb2_xstats_uninit(struct dlb2_eventdev *dlb2)
-{
-	rte_free(dlb2->xstats);
-	dlb2->xstats_count = 0;
-}
-
 int
 dlb2_eventdev_xstats_get_names(const struct rte_eventdev *dev,
 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c
index 69392b56bb..3ddfcaf67c 100644
--- a/drivers/event/opdl/opdl_ring.c
+++ b/drivers/event/opdl/opdl_ring.c
@@ -586,52 +586,6 @@  opdl_stage_claim_multithread(struct opdl_stage *s, void *entries,
 	return i;
 }
 
-/* Claim and copy slot pointers, optimised for single-thread operation */
-static __rte_always_inline uint32_t
-opdl_stage_claim_copy_singlethread(struct opdl_stage *s, void *entries,
-		uint32_t num_entries, uint32_t *seq, bool block)
-{
-	num_entries = num_to_process(s, num_entries, block);
-	if (num_entries == 0)
-		return 0;
-	copy_entries_out(s->t, s->head, entries, num_entries);
-	if (seq != NULL)
-		*seq = s->head;
-	s->head += num_entries;
-	return num_entries;
-}
-
-/* Thread-safe version of function to claim and copy pointers to slots */
-static __rte_always_inline uint32_t
-opdl_stage_claim_copy_multithread(struct opdl_stage *s, void *entries,
-		uint32_t num_entries, uint32_t *seq, bool block)
-{
-	uint32_t old_head;
-
-	move_head_atomically(s, &num_entries, &old_head, block, true);
-	if (num_entries == 0)
-		return 0;
-	copy_entries_out(s->t, old_head, entries, num_entries);
-	if (seq != NULL)
-		*seq = old_head;
-	return num_entries;
-}
-
-static __rte_always_inline void
-opdl_stage_disclaim_singlethread_n(struct opdl_stage *s,
-		uint32_t num_entries)
-{
-	uint32_t old_tail = s->shared.tail;
-
-	if (unlikely(num_entries > (s->head - old_tail))) {
-		PMD_DRV_LOG(WARNING, "Attempt to disclaim (%u) more than claimed (%u)",
-				num_entries, s->head - old_tail);
-		num_entries = s->head - old_tail;
-	}
-	__atomic_store_n(&s->shared.tail, num_entries + old_tail,
-			__ATOMIC_RELEASE);
-}
-
 uint32_t
 opdl_ring_input(struct opdl_ring *t, const void *entries, uint32_t num_entries,
 		bool block)
@@ -644,26 +598,6 @@  opdl_ring_input(struct opdl_ring *t, const void *entries, uint32_t num_entries,
 				block);
 }
 
-uint32_t
-opdl_ring_copy_from_burst(struct opdl_ring *t, struct opdl_stage *s,
-		const void *entries, uint32_t num_entries, bool block)
-{
-	uint32_t head = s->head;
-
-	num_entries = num_to_process(s, num_entries, block);
-
-	if (num_entries == 0)
-		return 0;
-
-	copy_entries_in(t, head, entries, num_entries);
-
-	s->head += num_entries;
-	__atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
-
-	return num_entries;
-
-}
-
 uint32_t
 opdl_ring_copy_to_burst(struct opdl_ring *t, struct opdl_stage *s,
 		void *entries, uint32_t num_entries, bool block)
@@ -682,25 +616,6 @@  opdl_ring_copy_to_burst(struct opdl_ring *t, struct opdl_stage *s,
 	return num_entries;
 }
 
-uint32_t
-opdl_stage_find_num_available(struct opdl_stage *s, uint32_t num_entries)
-{
-	/* return (num_to_process(s, num_entries, false)); */
-
-	if (available(s) >= num_entries)
-		return num_entries;
-
-	update_available_seq(s);
-
-	uint32_t avail = available(s);
-
-	if (avail == 0) {
-		rte_pause();
-		return 0;
-	}
-	return (avail <= num_entries) ? avail : num_entries;
-}
-
 uint32_t
 opdl_stage_claim(struct opdl_stage *s, void *entries,
 		uint32_t num_entries, uint32_t *seq, bool block, bool atomic)
@@ -713,41 +628,6 @@  opdl_stage_claim(struct opdl_stage *s, void *entries,
 				seq, block);
 }
 
-uint32_t
-opdl_stage_claim_copy(struct opdl_stage *s, void *entries,
-		uint32_t num_entries, uint32_t *seq, bool block)
-{
-	if (s->threadsafe == false)
-		return opdl_stage_claim_copy_singlethread(s, entries,
-				num_entries, seq, block);
-	else
-		return opdl_stage_claim_copy_multithread(s, entries,
-				num_entries, seq, block);
-}
-
-void
-opdl_stage_disclaim_n(struct opdl_stage *s, uint32_t num_entries,
-		bool block)
-{
-
-	if (s->threadsafe == false) {
-		opdl_stage_disclaim_singlethread_n(s, s->num_claimed);
-	} else {
-		struct claim_manager *disclaims =
-			&s->pending_disclaims[rte_lcore_id()];
-
-		if (unlikely(num_entries > s->num_slots)) {
-			PMD_DRV_LOG(WARNING, "Attempt to disclaim (%u) more than claimed (%u)",
-					num_entries, disclaims->num_claimed);
-			num_entries = disclaims->num_claimed;
-		}
-
-		num_entries = RTE_MIN(num_entries + disclaims->num_to_disclaim,
-				disclaims->num_claimed);
-		opdl_stage_disclaim_multithread_n(s, num_entries, block);
-	}
-}
-
 int
 opdl_stage_disclaim(struct opdl_stage *s, uint32_t num_entries, bool block)
 {
@@ -769,12 +649,6 @@  opdl_stage_disclaim(struct opdl_stage *s, uint32_t num_entries, bool block)
 	return num_entries;
 }
 
-uint32_t
-opdl_ring_available(struct opdl_ring *t)
-{
-	return opdl_stage_available(&t->stages[0]);
-}
-
 uint32_t
 opdl_stage_available(struct opdl_stage *s)
 {
@@ -782,14 +656,6 @@  opdl_stage_available(struct opdl_stage *s)
 	return available(s);
 }
 
-void
-opdl_ring_flush(struct opdl_ring *t)
-{
-	struct opdl_stage *s = input_stage(t);
-
-	wait_for_available(s, s->num_slots);
-}
-
 /******************** Non performance sensitive functions ********************/
 
 /* Initial setup of a new stage's context */
@@ -962,12 +828,6 @@  opdl_ring_create(const char *name, uint32_t num_slots, uint32_t slot_size,
 	return NULL;
 }
 
-void *
-opdl_ring_get_slot(const struct opdl_ring *t, uint32_t index)
-{
-	return get_slot(t, index);
-}
-
 bool
 opdl_ring_cas_slot(struct opdl_stage *s, const struct rte_event *ev,
 		uint32_t index, bool atomic)
@@ -1046,24 +906,6 @@  opdl_ring_cas_slot(struct opdl_stage *s, const struct rte_event *ev,
 	return ev_updated;
 }
 
-int
-opdl_ring_get_socket(const struct opdl_ring *t)
-{
-	return t->socket;
-}
-
-uint32_t
-opdl_ring_get_num_slots(const struct opdl_ring *t)
-{
-	return t->num_slots;
-}
-
-const char *
-opdl_ring_get_name(const struct opdl_ring *t)
-{
-	return t->name;
-}
-
 /* Check dependency list is valid for a given opdl_ring */
 static int
 check_deps(struct opdl_ring *t, struct opdl_stage *deps[],
@@ -1146,36 +988,6 @@  opdl_stage_deps_add(struct opdl_ring *t, struct opdl_stage *s,
 	return ret;
 }
 
-struct opdl_stage *
-opdl_ring_get_input_stage(const struct opdl_ring *t)
-{
-	return input_stage(t);
-}
-
-int
-opdl_stage_set_deps(struct opdl_stage *s, struct opdl_stage *deps[],
-		uint32_t num_deps)
-{
-	unsigned int i;
-	int ret;
-
-	if ((num_deps == 0) || (!deps)) {
-		PMD_DRV_LOG(ERR, "cannot set NULL dependencies");
-		return -EINVAL;
-	}
-
-	ret = check_deps(s->t, deps, num_deps);
-	if (ret < 0)
-		return ret;
-
-	/* Update deps */
-	for (i = 0; i < num_deps; i++)
-		s->deps[i] = &deps[i]->shared;
-	s->num_deps = num_deps;
-
-	return 0;
-}
-
 struct opdl_ring *
 opdl_stage_get_opdl_ring(const struct opdl_stage *s)
 {
@@ -1245,25 +1057,3 @@  opdl_ring_free(struct opdl_ring *t)
 	if (rte_memzone_free(mz) != 0)
 		PMD_DRV_LOG(ERR, "Cannot free memzone for %s", t->name);
 }
-
-/* search a opdl_ring from its name */
-struct opdl_ring *
-opdl_ring_lookup(const char *name)
-{
-	const struct rte_memzone *mz;
-	char mz_name[RTE_MEMZONE_NAMESIZE];
-
-	snprintf(mz_name, sizeof(mz_name), "%s%s", LIB_NAME, name);
-
-	mz = rte_memzone_lookup(mz_name);
-	if (mz == NULL)
-		return NULL;
-
-	return mz->addr;
-}
-
-void
-opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe)
-{
-	s->threadsafe = threadsafe;
-}
diff --git a/drivers/event/opdl/opdl_ring.h b/drivers/event/opdl/opdl_ring.h
index 14ababe0bb..c9e2ab6b1b 100644
--- a/drivers/event/opdl/opdl_ring.h
+++ b/drivers/event/opdl/opdl_ring.h
@@ -83,57 +83,6 @@  struct opdl_ring *
 opdl_ring_create(const char *name, uint32_t num_slots, uint32_t slot_size,
 		uint32_t max_num_stages, int socket);
 
-/**
- * Get pointer to individual slot in a opdl_ring.
- *
- * @param t
- *   The opdl_ring.
- * @param index
- *   Index of slot. If greater than the number of slots it will be masked to be
- *   within correct range.
- *
- * @return
- *   A pointer to that slot.
- */
-void *
-opdl_ring_get_slot(const struct opdl_ring *t, uint32_t index);
-
-/**
- * Get NUMA socket used by a opdl_ring.
- *
- * @param t
- *   The opdl_ring.
- *
- * @return
- *   NUMA socket.
- */
-int
-opdl_ring_get_socket(const struct opdl_ring *t);
-
-/**
- * Get number of slots in a opdl_ring.
- *
- * @param t
- *   The opdl_ring.
- *
- * @return
- *   Number of slots.
- */
-uint32_t
-opdl_ring_get_num_slots(const struct opdl_ring *t);
-
-/**
- * Get name of a opdl_ring.
- *
- * @param t
- *   The opdl_ring.
- *
- * @return
- *   Name string.
- */
-const char *
-opdl_ring_get_name(const struct opdl_ring *t);
-
 /**
  * Adds a new processing stage to a specified opdl_ring instance. Adding a stage
  * while there are entries in the opdl_ring being processed will cause undefined
@@ -160,38 +109,6 @@  opdl_ring_get_name(const struct opdl_ring *t);
 struct opdl_stage *
 opdl_stage_add(struct opdl_ring *t, bool threadsafe, bool is_input);
 
-/**
- * Returns the input stage of a opdl_ring to be used by other API functions.
- *
- * @param t
- *   The opdl_ring.
- *
- * @return
- *   A pointer to the input stage.
- */
-struct opdl_stage *
-opdl_ring_get_input_stage(const struct opdl_ring *t);
-
-/**
- * Sets the dependencies for a stage (clears all the previous deps!). Changing
- * dependencies while there are entries in the opdl_ring being processed will
- * cause undefined behaviour.
- *
- * @param s
- *   The stage to set the dependencies for.
- * @param deps
- *   An array of pointers to other stages that this stage will depends on. The
- *   other stages must be part of the same opdl_ring!
- * @param num_deps
- *   The size of the deps array. This must be > 0.
- *
- * @return
- *   0 on success, a negative value on error.
- */
-int
-opdl_stage_set_deps(struct opdl_stage *s, struct opdl_stage *deps[],
-		uint32_t num_deps);
-
 /**
  * Returns the opdl_ring that a stage belongs to.
  *
@@ -228,32 +145,6 @@  uint32_t
 opdl_ring_input(struct opdl_ring *t, const void *entries, uint32_t num_entries,
 		bool block);
 
-/**
- * Inputs a new batch of entries into a opdl stage. This function is only
- * threadsafe (with the same opdl parameter) if the threadsafe parameter of
- * opdl_create() was true. For performance reasons, this function does not
- * check input parameters.
- *
- * @param t
- *   The opdl ring to input entries in to.
- * @param s
- *   The stage to copy entries to.
- * @param entries
- *   An array of entries that will be copied in to the opdl ring.
- * @param num_entries
- *   The size of the entries array.
- * @param block
- *   If this is true, the function blocks until enough slots are available to
- *   input all the requested entries. If false, then the function inputs as
- *   many entries as currently possible.
- *
- * @return
- *   The number of entries successfully input.
- */
-uint32_t
-opdl_ring_copy_from_burst(struct opdl_ring *t, struct opdl_stage *s,
-			const void *entries, uint32_t num_entries, bool block);
-
 /**
  * Copy a batch of entries from the opdl ring. This function is only
  * threadsafe (with the same opdl parameter) if the threadsafe parameter of
@@ -368,41 +259,6 @@  opdl_stage_claim_check(struct opdl_stage *s, void **entries,
 		uint32_t num_entries, uint32_t *seq, bool block,
 		opdl_ring_check_entries_t *check, void *arg);
 
-/**
- * Before processing a batch of entries, a stage must first claim them to get
- * access. This function is threadsafe using same opdl_stage parameter if
- * the stage was created with threadsafe set to true, otherwise it is only
- * threadsafe with a different opdl_stage per thread.
- *
- * The difference between this function and opdl_stage_claim() is that this
- * function copies the entries from the opdl_ring. Note that any changes made to
- * the copied entries will not be reflected back in to the entries in the
- * opdl_ring, so this function probably only makes sense if the entries are
- * pointers to other data. For performance reasons, this function does not check
- * input parameters.
- *
- * @param s
- *   The opdl_ring stage to read entries in.
- * @param entries
- *   An array of entries that will be filled in by this function.
- * @param num_entries
- *   The number of entries to attempt to claim for processing (and the size of
- *   the entries array).
- * @param seq
- *   If not NULL, this is set to the value of the internal stage sequence number
- *   associated with the first entry returned.
- * @param block
- *   If this is true, the function blocks until num_entries slots are available
- *   to process. If false, then the function claims as many entries as
- *   currently possible.
- *
- * @return
- *   The number of entries copied in to the entries array.
- */
-uint32_t
-opdl_stage_claim_copy(struct opdl_stage *s, void *entries,
-		uint32_t num_entries, uint32_t *seq, bool block);
-
 /**
  * This function must be called when a stage has finished its processing of
  * entries, to make them available to any dependent stages. All entries that are
@@ -433,48 +289,6 @@  int
 opdl_stage_disclaim(struct opdl_stage *s, uint32_t num_entries,
 		bool block);
 
-/**
- * This function can be called when a stage has finished its processing of
- * entries, to make them available to any dependent stages. The difference
- * between this function and opdl_stage_disclaim() is that here only a
- * portion of entries are disclaimed, not all of them. For performance reasons,
- * this function does not check input parameters.
- *
- * @param s
- *   The opdl_ring stage in which to disclaim entries.
- *
- * @param num_entries
- *   The number of entries to disclaim.
- *
- * @param block
- *   Entries are always made available to a stage in the same order that they
- *   were input in the stage. If a stage is multithread safe, this may mean that
- *   full disclaiming of a batch of entries can not be considered complete until
- *   all earlier threads in the stage have disclaimed. If this parameter is true
- *   then the function blocks until the specified number of entries has been
- *   disclaimed (or there are no more entries to disclaim). Otherwise it
- *   disclaims as many claims as currently possible and an attempt to disclaim
- *   them is made the next time a claim or disclaim function for this stage on
- *   this thread is called.
- *
- *   In a single threaded stage, this parameter has no effect.
- */
-void
-opdl_stage_disclaim_n(struct opdl_stage *s, uint32_t num_entries,
-		bool block);
-
-/**
- * Check how many entries can be input.
- *
- * @param t
- *   The opdl_ring instance to check.
- *
- * @return
- *   The number of new entries currently allowed to be input.
- */
-uint32_t
-opdl_ring_available(struct opdl_ring *t);
-
 /**
  * Check how many entries can be processed in a stage.
  *
@@ -487,23 +301,6 @@  opdl_ring_available(struct opdl_ring *t);
 uint32_t
 opdl_stage_available(struct opdl_stage *s);
 
-/**
- * Check how many entries are available to be processed.
- *
- * NOTE : DOES NOT CHANGE ANY STATE WITHIN THE STAGE
- *
- * @param s
- *   The stage to check.
- *
- * @param num_entries
- *   The number of entries to check for availability.
- *
- * @return
- *   The number of entries currently available to be processed in this stage.
- */
-uint32_t
-opdl_stage_find_num_available(struct opdl_stage *s, uint32_t num_entries);
-
 /**
  * Create empty stage instance and return the pointer.
  *
@@ -543,15 +340,6 @@  opdl_stage_set_queue_id(struct opdl_stage *s,
 void
 opdl_ring_dump(const struct opdl_ring *t, FILE *f);
 
-/**
- * Blocks until all entries in a opdl_ring have been processed by all stages.
- *
- * @param t
- *   The opdl_ring instance to flush.
- */
-void
-opdl_ring_flush(struct opdl_ring *t);
-
 /**
  * Deallocates all resources used by a opdl_ring instance
  *
@@ -561,30 +349,6 @@  opdl_ring_flush(struct opdl_ring *t);
 void
 opdl_ring_free(struct opdl_ring *t);
 
-/**
- * Search for a opdl_ring by its name
- *
- * @param name
- *   The name of the opdl_ring.
- * @return
- *   The pointer to the opdl_ring matching the name, or NULL if not found.
- *
- */
-struct opdl_ring *
-opdl_ring_lookup(const char *name);
-
-/**
- * Set a opdl_stage to threadsafe variable.
- *
- * @param s
- *   The opdl_stage.
- * @param threadsafe
- *   Threadsafe value.
- */
-void
-opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe);
-
-
 /**
  * Compare the event descriptor with original version in the ring.
  * if key field event descriptor is changed by application, then
diff --git a/drivers/net/ark/ark_ddm.c b/drivers/net/ark/ark_ddm.c
index 91d1179d88..2a6aa93ffe 100644
--- a/drivers/net/ark/ark_ddm.c
+++ b/drivers/net/ark/ark_ddm.c
@@ -92,19 +92,6 @@  ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg)
 		     );
 }
 
-void
-ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg)
-{
-	struct ark_ddm_stats_t *stats = &ddm->stats;
-
-	ARK_PMD_LOG(INFO, "DDM Stats: %s"
-		      ARK_SU64 ARK_SU64 ARK_SU64
-		      "\n", msg,
-		      "Bytes:", stats->tx_byte_count,
-		      "Packets:", stats->tx_pkt_count,
-		      "MBufs", stats->tx_mbuf_count);
-}
-
 int
 ark_ddm_is_stopped(struct ark_ddm_t *ddm)
 {
diff --git a/drivers/net/ark/ark_ddm.h b/drivers/net/ark/ark_ddm.h
index 5456b4b5cc..5b722b6ede 100644
--- a/drivers/net/ark/ark_ddm.h
+++ b/drivers/net/ark/ark_ddm.h
@@ -141,7 +141,6 @@  void ark_ddm_reset(struct ark_ddm_t *ddm);
 void ark_ddm_stats_reset(struct ark_ddm_t *ddm);
 void ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr,
 		   uint32_t interval);
-void ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg);
 void ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg);
 int ark_ddm_is_stopped(struct ark_ddm_t *ddm);
 uint64_t ark_ddm_queue_byte_count(struct ark_ddm_t *ddm);
diff --git a/drivers/net/ark/ark_pktchkr.c b/drivers/net/ark/ark_pktchkr.c
index b8fb69497d..5a7e686f0e 100644
--- a/drivers/net/ark/ark_pktchkr.c
+++ b/drivers/net/ark/ark_pktchkr.c
@@ -15,7 +15,6 @@ 
 #include "ark_logs.h"
 
 static int set_arg(char *arg, char *val);
-static int ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle);
 
 #define ARK_MAX_STR_LEN 64
 union OPTV {
@@ -136,15 +135,6 @@  ark_pktchkr_stop(ark_pkt_chkr_t handle)
 	ARK_PMD_LOG(DEBUG, "Pktchk %d stopped.\n", inst->ordinal);
 }
 
-int
-ark_pktchkr_is_running(ark_pkt_chkr_t handle)
-{
-	struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
-	uint32_t r = inst->sregs->pkt_start_stop;
-
-	return ((r & 1) == 1);
-}
-
 static void
 ark_pktchkr_set_pkt_ctrl(ark_pkt_chkr_t handle,
 			 uint32_t gen_forever,
@@ -173,48 +163,6 @@  ark_pktchkr_set_pkt_ctrl(ark_pkt_chkr_t handle,
 	inst->cregs->pkt_ctrl = r;
 }
 
-static
-int
-ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle)
-{
-	struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
-	uint32_t r = inst->cregs->pkt_ctrl;
-
-	return (((r >> 24) & 1) == 1);
-}
-
-int
-ark_pktchkr_wait_done(ark_pkt_chkr_t handle)
-{
-	struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
-
-	if (ark_pktchkr_is_gen_forever(handle)) {
-		ARK_PMD_LOG(NOTICE, "Pktchk wait_done will not terminate"
-			      " because gen_forever=1\n");
-		return -1;
-	}
-	int wait_cycle = 10;
-
-	while (!ark_pktchkr_stopped(handle) && (wait_cycle > 0)) {
-		usleep(1000);
-		wait_cycle--;
-		ARK_PMD_LOG(DEBUG, "Waiting for packet checker %d's"
-			      " internal pktgen to finish sending...\n",
-			      inst->ordinal);
-		ARK_PMD_LOG(DEBUG, "Pktchk %d's pktgen done.\n",
-			      inst->ordinal);
-	}
-	return 0;
-}
-
-int
-ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle)
-{
-	struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
-
-	return inst->cregs->pkts_sent;
-}
-
 void
 ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b)
 {
diff --git a/drivers/net/ark/ark_pktchkr.h b/drivers/net/ark/ark_pktchkr.h
index b362281776..2b0ba17d90 100644
--- a/drivers/net/ark/ark_pktchkr.h
+++ b/drivers/net/ark/ark_pktchkr.h
@@ -69,8 +69,6 @@  void ark_pktchkr_uninit(ark_pkt_chkr_t handle);
 void ark_pktchkr_run(ark_pkt_chkr_t handle);
 int ark_pktchkr_stopped(ark_pkt_chkr_t handle);
 void ark_pktchkr_stop(ark_pkt_chkr_t handle);
-int ark_pktchkr_is_running(ark_pkt_chkr_t handle);
-int ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle);
 void ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b);
 void ark_pktchkr_set_pkt_size_min(ark_pkt_chkr_t handle, uint32_t x);
 void ark_pktchkr_set_pkt_size_max(ark_pkt_chkr_t handle, uint32_t x);
@@ -83,6 +81,5 @@  void ark_pktchkr_set_hdr_dW(ark_pkt_chkr_t handle, uint32_t *hdr);
 void ark_pktchkr_parse(char *args);
 void ark_pktchkr_setup(ark_pkt_chkr_t handle);
 void ark_pktchkr_dump_stats(ark_pkt_chkr_t handle);
-int ark_pktchkr_wait_done(ark_pkt_chkr_t handle);
 
 #endif
diff --git a/drivers/net/ark/ark_pktdir.c b/drivers/net/ark/ark_pktdir.c
index 25e1218310..00bf165bff 100644
--- a/drivers/net/ark/ark_pktdir.c
+++ b/drivers/net/ark/ark_pktdir.c
@@ -26,31 +26,9 @@  ark_pktdir_init(void *base)
 	return inst;
 }
 
-void
-ark_pktdir_uninit(ark_pkt_dir_t handle)
-{
-	struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
-
-	rte_free(inst);
-}
-
 void
 ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v)
 {
 	struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
 	inst->regs->ctrl = v;
 }
-
-uint32_t
-ark_pktdir_status(ark_pkt_dir_t handle)
-{
-	struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
-	return inst->regs->ctrl;
-}
-
-uint32_t
-ark_pktdir_stall_cnt(ark_pkt_dir_t handle)
-{
-	struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
-	return inst->regs->stall_cnt;
-}
diff --git a/drivers/net/ark/ark_pktdir.h b/drivers/net/ark/ark_pktdir.h
index 4afd128f95..e7f2026a00 100644
--- a/drivers/net/ark/ark_pktdir.h
+++ b/drivers/net/ark/ark_pktdir.h
@@ -33,9 +33,6 @@  struct ark_pkt_dir_inst {
 };
 
 ark_pkt_dir_t ark_pktdir_init(void *base);
-void ark_pktdir_uninit(ark_pkt_dir_t handle);
 void ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v);
-uint32_t ark_pktdir_stall_cnt(ark_pkt_dir_t handle);
-uint32_t ark_pktdir_status(ark_pkt_dir_t handle);
 
 #endif
diff --git a/drivers/net/ark/ark_pktgen.c b/drivers/net/ark/ark_pktgen.c
index 4a02662a46..9769c46b47 100644
--- a/drivers/net/ark/ark_pktgen.c
+++ b/drivers/net/ark/ark_pktgen.c
@@ -186,33 +186,6 @@  ark_pktgen_is_gen_forever(ark_pkt_gen_t handle)
 	return (((r >> 24) & 1) == 1);
 }
 
-void
-ark_pktgen_wait_done(ark_pkt_gen_t handle)
-{
-	struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
-	int wait_cycle = 10;
-
-	if (ark_pktgen_is_gen_forever(handle))
-		ARK_PMD_LOG(NOTICE, "Pktgen wait_done will not terminate"
-			    " because gen_forever=1\n");
-
-	while (!ark_pktgen_tx_done(handle) && (wait_cycle > 0)) {
-		usleep(1000);
-		wait_cycle--;
-		ARK_PMD_LOG(DEBUG,
-			      "Waiting for pktgen %d to finish sending...\n",
-			      inst->ordinal);
-	}
-	ARK_PMD_LOG(DEBUG, "Pktgen %d done.\n", inst->ordinal);
-}
-
-uint32_t
-ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle)
-{
-	struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
-	return inst->regs->pkts_sent;
-}
-
 void
 ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b)
 {
diff --git a/drivers/net/ark/ark_pktgen.h b/drivers/net/ark/ark_pktgen.h
index c61dfee6db..cc78577d3d 100644
--- a/drivers/net/ark/ark_pktgen.h
+++ b/drivers/net/ark/ark_pktgen.h
@@ -60,8 +60,6 @@  uint32_t ark_pktgen_is_gen_forever(ark_pkt_gen_t handle);
 uint32_t ark_pktgen_is_running(ark_pkt_gen_t handle);
 uint32_t ark_pktgen_tx_done(ark_pkt_gen_t handle);
 void ark_pktgen_reset(ark_pkt_gen_t handle);
-void ark_pktgen_wait_done(ark_pkt_gen_t handle);
-uint32_t ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle);
 void ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b);
 void ark_pktgen_set_pkt_spacing(ark_pkt_gen_t handle, uint32_t x);
 void ark_pktgen_set_pkt_size_min(ark_pkt_gen_t handle, uint32_t x);
diff --git a/drivers/net/ark/ark_udm.c b/drivers/net/ark/ark_udm.c
index a740d36d43..2132f4e972 100644
--- a/drivers/net/ark/ark_udm.c
+++ b/drivers/net/ark/ark_udm.c
@@ -135,21 +135,6 @@  ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg)
 		      "MBuf Count", udm->stats.rx_mbuf_count);
 }
 
-void
-ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg, uint16_t qid)
-{
-	ARK_PMD_LOG(INFO, "UDM Queue %3u Stats: %s"
-		      ARK_SU64 ARK_SU64
-		      ARK_SU64 ARK_SU64
-		      ARK_SU64 "\n",
-		      qid, msg,
-		      "Pkts Received", udm->qstats.q_packet_count,
-		      "Pkts Finalized", udm->qstats.q_ff_packet_count,
-		      "Pkts Dropped", udm->qstats.q_pkt_drop,
-		      "Bytes Count", udm->qstats.q_byte_count,
-		      "MBuf Count", udm->qstats.q_mbuf_count);
-}
-
 void
 ark_udm_dump(struct ark_udm_t *udm, const char *msg)
 {
diff --git a/drivers/net/ark/ark_udm.h b/drivers/net/ark/ark_udm.h
index 5846c825b8..7f0d3c2a5e 100644
--- a/drivers/net/ark/ark_udm.h
+++ b/drivers/net/ark/ark_udm.h
@@ -145,8 +145,6 @@  void ark_udm_configure(struct ark_udm_t *udm,
 void ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr);
 void ark_udm_stats_reset(struct ark_udm_t *udm);
 void ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg);
-void ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg,
-			      uint16_t qid);
 void ark_udm_dump(struct ark_udm_t *udm, const char *msg);
 void ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg);
 void ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id);
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/atlantic/hw_atl/hw_atl_b0.c
index 7d0e724019..415099e04a 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/atlantic/hw_atl/hw_atl_b0.c
@@ -480,20 +480,6 @@  int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, uint64_t base_addr,
 	return aq_hw_err_from_flags(self);
 }
 
-int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
-{
-	hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
-	return aq_hw_err_from_flags(self);
-}
-
-int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
-{
-	hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
-	hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
-
-	return aq_hw_err_from_flags(self);
-}
-
 int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
 {
 	*mask = hw_atl_itr_irq_statuslsw_get(self);
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/atlantic/hw_atl/hw_atl_b0.h
index d1ba2aceb3..4a155d2bc7 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/atlantic/hw_atl/hw_atl_b0.h
@@ -35,8 +35,6 @@  int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
 int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
 				struct aq_rss_parameters *rss_params);
 
-int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask);
-int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask);
 int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask);
 
 #endif /* HW_ATL_B0_H */
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/atlantic/hw_atl/hw_atl_llh.c
index 2dc5be2ff1..b29419bce3 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/atlantic/hw_atl/hw_atl_llh.c
@@ -22,28 +22,6 @@  u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
 	return aq_hw_read_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore));
 }
 
-void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_REG_RES_DIS_ADR,
-			    HW_ATL_GLB_REG_RES_DIS_MSK,
-			    HW_ATL_GLB_REG_RES_DIS_SHIFT,
-			    glb_reg_res_dis);
-}
-
-void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
-			    HW_ATL_GLB_SOFT_RES_MSK,
-			    HW_ATL_GLB_SOFT_RES_SHIFT, soft_res);
-}
-
-u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw)
-{
-	return aq_hw_read_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
-				  HW_ATL_GLB_SOFT_RES_MSK,
-				  HW_ATL_GLB_SOFT_RES_SHIFT);
-}
-
 u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
 {
 	return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR);
@@ -275,13 +253,6 @@  void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
 	aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMSRLSW_ADR, irq_msk_setlsw);
 }
 
-void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_REG_RES_DSBL_ADR,
-			    HW_ATL_ITR_REG_RES_DSBL_MSK,
-			    HW_ATL_ITR_REG_RES_DSBL_SHIFT, irq_reg_res_dis);
-}
-
 void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
 					u32 irq_status_clearlsw)
 {
@@ -293,18 +264,6 @@  u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
 	return aq_hw_read_reg(aq_hw, HW_ATL_ITR_ISRLSW_ADR);
 }
 
-u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw)
-{
-	return aq_hw_read_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
-				  HW_ATL_ITR_RES_SHIFT);
-}
-
-void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
-			    HW_ATL_ITR_RES_SHIFT, res_irq);
-}
-
 /* rdm */
 void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
 {
@@ -374,13 +333,6 @@  void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
 			    rx_desc_head_splitting);
 }
 
-u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
-{
-	return aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_DESCDHD_ADR(descriptor),
-				  HW_ATL_RDM_DESCDHD_MSK,
-				  HW_ATL_RDM_DESCDHD_SHIFT);
-}
-
 void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
 				u32 descriptor)
 {
@@ -389,15 +341,6 @@  void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
 			    rx_desc_len);
 }
 
-void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
-				u32 descriptor)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDRESET_ADR(descriptor),
-			    HW_ATL_RDM_DESCDRESET_MSK,
-			    HW_ATL_RDM_DESCDRESET_SHIFT,
-			    rx_desc_res);
-}
-
 void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
 					 u32 rx_desc_wr_wb_irq_en)
 {
@@ -425,15 +368,6 @@  void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
 			    rx_pld_dca_en);
 }
 
-void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
-				      u32 rdm_intr_moder_en)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_RIM_EN_ADR,
-			    HW_ATL_RDM_INT_RIM_EN_MSK,
-			    HW_ATL_RDM_INT_RIM_EN_SHIFT,
-			    rdm_intr_moder_en);
-}
-
 /* reg */
 void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
 				u32 regidx)
@@ -441,21 +375,11 @@  void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
 	aq_hw_write_reg(aq_hw, HW_ATL_GEN_INTR_MAP_ADR(regidx), gen_intr_map);
 }
 
-u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
-{
-	return aq_hw_read_reg(aq_hw, HW_ATL_GEN_INTR_STAT_ADR);
-}
-
 void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
 {
 	aq_hw_write_reg(aq_hw, HW_ATL_INTR_GLB_CTL_ADR, intr_glb_ctl);
 }
 
-void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
-{
-	aq_hw_write_reg(aq_hw, HW_ATL_INTR_THR_ADR(throttle), intr_thr);
-}
-
 void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
 					       u32 rx_dma_desc_base_addrlsw,
 					       u32 descriptor)
@@ -472,11 +396,6 @@  void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
 			rx_dma_desc_base_addrmsw);
 }
 
-u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
-{
-	return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor));
-}
-
 void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
 					 u32 rx_dma_desc_tail_ptr,
 					 u32 descriptor)
@@ -506,26 +425,6 @@  void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
 			rx_flr_rss_control1);
 }
 
-void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw,
-				    u32 rx_filter_control2)
-{
-	aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_CONTROL2_ADR, rx_filter_control2);
-}
-
-void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				       u32 rx_intr_moderation_ctl,
-				       u32 queue)
-{
-	aq_hw_write_reg(aq_hw, HW_ATL_RX_INTR_MODERATION_CTL_ADR(queue),
-			rx_intr_moderation_ctl);
-}
-
-void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
-				     u32 tx_dma_debug_ctl)
-{
-	aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DEBUG_CTL_ADR, tx_dma_debug_ctl);
-}
-
 void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
 					       u32 tx_dma_desc_base_addrlsw,
 					       u32 descriptor)
@@ -552,22 +451,7 @@  void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
 			tx_dma_desc_tail_ptr);
 }
 
-void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				       u32 tx_intr_moderation_ctl,
-				       u32 queue)
-{
-	aq_hw_write_reg(aq_hw, HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue),
-			tx_intr_moderation_ctl);
-}
-
 /* RPB: rx packet buffer */
-void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_SYS_LBK_ADR,
-			    HW_ATL_RPB_DMA_SYS_LBK_MSK,
-			    HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
-}
-
 void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
 					   u32 rx_traf_class_mode)
 {
@@ -577,13 +461,6 @@  void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
 			    rx_traf_class_mode);
 }
 
-u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw)
-{
-	return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
-			HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
-			HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT);
-}
-
 void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
 {
 	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
@@ -664,15 +541,6 @@  void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
 			    HW_ATL_RPFL2BC_ACT_SHIFT, l2broadcast_flr_act);
 }
 
-void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
-				      u32 l2multicast_flr_en,
-				      u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ENF_ADR(filter),
-			    HW_ATL_RPFL2MC_ENF_MSK,
-			    HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
-}
-
 void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
 					 u32 l2promiscuous_mode_en)
 {
@@ -813,15 +681,6 @@  void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
 			    HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT, rss_redir_wr_en);
 }
 
-void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
-				       u32 tpo_to_rpf_sys_lbk)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR,
-			    HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK,
-			    HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT,
-			    tpo_to_rpf_sys_lbk);
-}
-
 void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
 {
 	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR,
@@ -847,24 +706,6 @@  void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
 			    vlan_prom_mode_en);
 }
 
-void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
-						 u32 vlan_acc_untagged_packets)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR,
-			    HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK,
-			    HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT,
-			    vlan_acc_untagged_packets);
-}
-
-void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
-				      u32 vlan_untagged_act)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR,
-			    HW_ATL_RPF_VL_UNTAGGED_ACT_MSK,
-			    HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT,
-			    vlan_untagged_act);
-}
-
 void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
 				u32 filter)
 {
@@ -892,73 +733,6 @@  void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
 			    vlan_id_flr);
 }
 
-void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
-				u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter),
-			    HW_ATL_RPF_ET_ENF_MSK,
-			    HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en);
-}
-
-void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
-					  u32 etht_user_priority_en, u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter),
-			    HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT,
-			    etht_user_priority_en);
-}
-
-void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
-				     u32 etht_rx_queue_en,
-				     u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter),
-			    HW_ATL_RPF_ET_RXQFEN_MSK,
-			    HW_ATL_RPF_ET_RXQFEN_SHIFT,
-			    etht_rx_queue_en);
-}
-
-void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
-				       u32 etht_user_priority,
-				       u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter),
-			    HW_ATL_RPF_ET_UPF_MSK,
-			    HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority);
-}
-
-void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
-				  u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter),
-			    HW_ATL_RPF_ET_RXQF_MSK,
-			    HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue);
-}
-
-void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
-				   u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter),
-			    HW_ATL_RPF_ET_MNG_RXQF_MSK,
-			    HW_ATL_RPF_ET_MNG_RXQF_SHIFT,
-			    etht_mgt_queue);
-}
-
-void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
-				 u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter),
-			    HW_ATL_RPF_ET_ACTF_MSK,
-			    HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act);
-}
-
-void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter),
-			    HW_ATL_RPF_ET_VALF_MSK,
-			    HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
-}
-
 /* RPO: rx packet offload */
 void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
 					      u32 ipv4header_crc_offload_en)
@@ -1156,13 +930,6 @@  void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
 			    tx_desc_en);
 }
 
-u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
-{
-	return aq_hw_read_reg_bit(aq_hw, HW_ATL_TDM_DESCDHD_ADR(descriptor),
-				  HW_ATL_TDM_DESCDHD_MSK,
-				  HW_ATL_TDM_DESCDHD_SHIFT);
-}
-
 void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
 				u32 descriptor)
 {
@@ -1191,15 +958,6 @@  void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
 			    tx_desc_wr_wb_threshold);
 }
 
-void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
-				      u32 tdm_irq_moderation_en)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_MOD_EN_ADR,
-			    HW_ATL_TDM_INT_MOD_EN_MSK,
-			    HW_ATL_TDM_INT_MOD_EN_SHIFT,
-			    tdm_irq_moderation_en);
-}
-
 /* thm */
 void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
 					      u32 lso_tcp_flag_of_first_pkt)
@@ -1236,13 +994,6 @@  void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
 			    HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
 }
 
-u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
-{
-	return aq_hw_read_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
-			HW_ATL_TPB_TX_TC_MODE_MSK,
-			HW_ATL_TPB_TX_TC_MODE_SHIFT);
-}
-
 void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
 				   u32 tx_traf_class_mode)
 {
@@ -1272,15 +1023,6 @@  void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
 			    tx_buff_lo_threshold_per_tc);
 }
 
-void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw,
-				      u32 tx_dma_sys_lbk_en)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_SYS_LBK_ADR,
-			    HW_ATL_TPB_DMA_SYS_LBK_MSK,
-			    HW_ATL_TPB_DMA_SYS_LBK_SHIFT,
-			    tx_dma_sys_lbk_en);
-}
-
 void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
 					    u32 tx_pkt_buff_size_per_tc,
 					    u32 buffer)
@@ -1319,15 +1061,6 @@  void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
 			    tcp_udp_crc_offload_en);
 }
 
-void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
-				      u32 tx_pkt_sys_lbk_en)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_PKT_SYS_LBK_ADR,
-			    HW_ATL_TPO_PKT_SYS_LBK_MSK,
-			    HW_ATL_TPO_PKT_SYS_LBK_SHIFT,
-			    tx_pkt_sys_lbk_en);
-}
-
 /* TPS: tx packet scheduler */
 void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
 					      u32 tx_pkt_shed_data_arb_mode)
@@ -1422,58 +1155,7 @@  void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
 			    HW_ATL_TX_REG_RES_DSBL_SHIFT, tx_reg_res_dis);
 }
 
-/* msm */
-u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw)
-{
-	return aq_hw_read_reg_bit(aq_hw, HW_ATL_MSM_REG_ACCESS_BUSY_ADR,
-				  HW_ATL_MSM_REG_ACCESS_BUSY_MSK,
-				  HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT);
-}
-
-void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
-					       u32 reg_addr_for_indirect_addr)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_ADDR_ADR,
-			    HW_ATL_MSM_REG_ADDR_MSK,
-			    HW_ATL_MSM_REG_ADDR_SHIFT,
-			    reg_addr_for_indirect_addr);
-}
-
-void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_RD_STROBE_ADR,
-			    HW_ATL_MSM_REG_RD_STROBE_MSK,
-			    HW_ATL_MSM_REG_RD_STROBE_SHIFT,
-			    reg_rd_strobe);
-}
-
-u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
-{
-	return aq_hw_read_reg(aq_hw, HW_ATL_MSM_REG_RD_DATA_ADR);
-}
-
-void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
-{
-	aq_hw_write_reg(aq_hw, HW_ATL_MSM_REG_WR_DATA_ADR, reg_wr_data);
-}
-
-void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_WR_STROBE_ADR,
-			    HW_ATL_MSM_REG_WR_STROBE_MSK,
-			    HW_ATL_MSM_REG_WR_STROBE_SHIFT,
-			    reg_wr_strobe);
-}
-
 /* pci */
-void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_PCI_REG_RES_DSBL_ADR,
-			    HW_ATL_PCI_REG_RES_DSBL_MSK,
-			    HW_ATL_PCI_REG_RES_DSBL_SHIFT,
-			    pci_reg_res_dis);
-}
-
 void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
 					u32 glb_cpu_scratch_scp,
 					u32 scratch_scp)
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/atlantic/hw_atl/hw_atl_llh.h
index e30083cea5..493fd88934 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/atlantic/hw_atl/hw_atl_llh.h
@@ -21,15 +21,6 @@  void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw,	u32 glb_cpu_sem,
 /* get global microprocessor semaphore */
 u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
 
-/* set global register reset disable */
-void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
-
-/* set soft reset */
-void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
-
-/* get soft reset */
-u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw);
-
 /* stats */
 
 u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
@@ -130,9 +121,6 @@  void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
 /* set interrupt mask set lsw */
 void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
 
-/* set interrupt register reset disable */
-void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
-
 /* set interrupt status clear lsw */
 void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
 					u32 irq_status_clearlsw);
@@ -140,12 +128,6 @@  void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
 /* get interrupt status lsw */
 u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
 
-/* get reset interrupt */
-u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
-
-/* set reset interrupt */
-void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
-
 /* rdm */
 
 /* set cpu id */
@@ -175,9 +157,6 @@  void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
 					   u32 rx_desc_head_splitting,
 				    u32 descriptor);
 
-/* get rx descriptor head pointer */
-u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
-
 /* set rx descriptor length */
 void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
 				u32 descriptor);
@@ -199,29 +178,15 @@  void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
 					   u32 rx_desc_head_buff_size,
 					   u32 descriptor);
 
-/* set rx descriptor reset */
-void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
-				u32 descriptor);
-
-/* Set RDM Interrupt Moderation Enable */
-void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
-				      u32 rdm_intr_moder_en);
-
 /* reg */
 
 /* set general interrupt mapping register */
 void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
 				u32 regidx);
 
-/* get general interrupt status register */
-u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
-
 /* set interrupt global control register */
 void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
 
-/* set interrupt throttle register */
-void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
-
 /* set rx dma descriptor base address lsw */
 void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
 					       u32 rx_dma_desc_base_addrlsw,
@@ -232,9 +197,6 @@  void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
 					       u32 rx_dma_desc_base_addrmsw,
 					u32 descriptor);
 
-/* get rx dma descriptor status register */
-u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
-
 /* set rx dma descriptor tail pointer register */
 void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
 					 u32 rx_dma_desc_tail_ptr,
@@ -252,18 +214,6 @@  void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
 void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
 				       u32 rx_flr_rss_control1);
 
-/* Set RX Filter Control Register 2 */
-void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
-
-/* Set RX Interrupt Moderation Control Register */
-void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				       u32 rx_intr_moderation_ctl,
-				u32 queue);
-
-/* set tx dma debug control */
-void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
-				     u32 tx_dma_debug_ctl);
-
 /* set tx dma descriptor base address lsw */
 void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
 					       u32 tx_dma_desc_base_addrlsw,
@@ -279,11 +229,6 @@  void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
 					 u32 tx_dma_desc_tail_ptr,
 					 u32 descriptor);
 
-/* Set TX Interrupt Moderation Control Register */
-void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				       u32 tx_intr_moderation_ctl,
-				       u32 queue);
-
 /* set global microprocessor scratch pad */
 void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
 					u32 glb_cpu_scratch_scp,
@@ -291,16 +236,10 @@  void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
 
 /* rpb */
 
-/* set dma system loopback */
-void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
-
 /* set rx traffic class mode */
 void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
 					   u32 rx_traf_class_mode);
 
-/* get rx traffic class mode */
-u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw);
-
 /* set rx buffer enable */
 void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
 
@@ -341,11 +280,6 @@  void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
 void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
 				       u32 l2broadcast_flr_act);
 
-/* set l2 multicast filter enable */
-void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
-				      u32 l2multicast_flr_en,
-				      u32 filter);
-
 /* set l2 promiscuous mode enable */
 void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
 					 u32 l2promiscuous_mode_en);
@@ -403,10 +337,6 @@  u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
 /* set rss redirection write enable */
 void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
 
-/* set tpo to rpf system loopback */
-void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
-				       u32 tpo_to_rpf_sys_lbk);
-
 /* set vlan inner ethertype */
 void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
 
@@ -417,14 +347,6 @@  void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
 void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
 				      u32 vlan_prom_mode_en);
 
-/* Set VLAN untagged action */
-void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
-				      u32 vlan_untagged_act);
-
-/* Set VLAN accept untagged packets */
-void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
-						 u32 vlan_acc_untagged_packets);
-
 /* Set VLAN filter enable */
 void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
 				u32 filter);
@@ -437,40 +359,6 @@  void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
 void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
 				u32 filter);
 
-/* set ethertype filter enable */
-void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
-				u32 filter);
-
-/* set  ethertype user-priority enable */
-void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
-					  u32 etht_user_priority_en,
-					  u32 filter);
-
-/* set  ethertype rx queue enable */
-void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
-				     u32 etht_rx_queue_en,
-				     u32 filter);
-
-/* set ethertype rx queue */
-void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
-				  u32 filter);
-
-/* set ethertype user-priority */
-void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
-				       u32 etht_user_priority,
-				       u32 filter);
-
-/* set ethertype management queue */
-void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
-				   u32 filter);
-
-/* set ethertype filter action */
-void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
-				 u32 filter);
-
-/* set ethertype filter */
-void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
-
 /* rpo */
 
 /* set ipv4 header checksum offload enable */
@@ -552,9 +440,6 @@  void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
 void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
 				   u32 dca);
 
-/* get tx descriptor head pointer */
-u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
-
 /* set tx descriptor length */
 void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
 				u32 descriptor);
@@ -568,9 +453,6 @@  void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
 					    u32 tx_desc_wr_wb_threshold,
 				     u32 descriptor);
 
-/* Set TDM Interrupt Moderation Enable */
-void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
-				      u32 tdm_irq_moderation_en);
 /* thm */
 
 /* set lso tcp flag of first packet */
@@ -591,9 +473,6 @@  void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
 void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
 				   u32 tx_traf_class_mode);
 
-/* get TX Traffic Class Mode */
-u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
-
 /* set tx buffer enable */
 void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
 
@@ -607,10 +486,6 @@  void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
 						u32 tx_buff_lo_threshold_per_tc,
 					 u32 buffer);
 
-/* set tx dma system loopback enable */
-void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw,
-				      u32 tx_dma_sys_lbk_en);
-
 /* set tx packet buffer size (per tc) */
 void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
 					    u32 tx_pkt_buff_size_per_tc,
@@ -630,10 +505,6 @@  void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
 void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
 					   u32 tcp_udp_crc_offload_en);
 
-/* set tx pkt system loopback enable */
-void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
-				      u32 tx_pkt_sys_lbk_en);
-
 /* tps */
 
 /* set tx packet scheduler data arbitration mode */
@@ -681,32 +552,8 @@  void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
 /* set tx register reset disable */
 void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
 
-/* msm */
-
-/* get register access status */
-u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw);
-
-/* set  register address for indirect address */
-void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
-					       u32 reg_addr_for_indirect_addr);
-
-/* set register read strobe */
-void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
-
-/* get  register read data */
-u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
-
-/* set  register write data */
-void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
-
-/* set register write strobe */
-void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
-
 /* pci */
 
-/* set pci register reset disable */
-void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
-
 /* set uP Force Interrupt */
 void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
 
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/atlantic/hw_atl/hw_atl_utils.c
index 84d11ab3a5..c94f5112f1 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/atlantic/hw_atl/hw_atl_utils.c
@@ -682,37 +682,6 @@  static int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
 	return err;
 }
 
-unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
-{
-	unsigned int ret = 0U;
-
-	switch (mbps) {
-	case 100U:
-		ret = 5U;
-		break;
-
-	case 1000U:
-		ret = 4U;
-		break;
-
-	case 2500U:
-		ret = 3U;
-		break;
-
-	case 5000U:
-		ret = 1U;
-		break;
-
-	case 10000U:
-		ret = 0U;
-		break;
-
-	default:
-		break;
-	}
-	return ret;
-}
-
 void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
 {
 	u32 chip_features = 0U;
@@ -795,11 +764,6 @@  int hw_atl_utils_update_stats(struct aq_hw_s *self)
 	return 0;
 }
 
-struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
-{
-	return &self->curr_stats;
-}
-
 static const u32 hw_atl_utils_hw_mac_regs[] = {
 	0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U,
 	0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U,
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/atlantic/hw_atl/hw_atl_utils.h
index d8fab010cf..f5e2b472a9 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/atlantic/hw_atl/hw_atl_utils.h
@@ -617,8 +617,6 @@  void hw_atl_utils_mpi_set(struct aq_hw_s *self,
 
 int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
 
-unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
-
 unsigned int hw_atl_utils_hw_get_reg_length(void);
 
 int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
@@ -633,8 +631,6 @@  int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
 
 int hw_atl_utils_update_stats(struct aq_hw_s *self);
 
-struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
-
 int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
 				  u32 *p, u32 cnt);
 
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index 61f99c6408..7ade8f42d3 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -456,23 +456,6 @@  static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
 	}
 }
 
-/**
- * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
- *
- * @sc:			device handle
- * @o:			vlan_mac object
- *
- * @details Notice if a pending execution exists, it would perform it -
- *          possibly releasing and reclaiming the execution queue lock.
- */
-void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
-				   struct ecore_vlan_mac_obj *o)
-{
-	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
-	__ecore_vlan_mac_h_write_unlock(sc, o);
-	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
-}
-
 /**
  * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
  *
diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h
index d58072dac0..bfb55e8d01 100644
--- a/drivers/net/bnx2x/ecore_sp.h
+++ b/drivers/net/bnx2x/ecore_sp.h
@@ -1871,8 +1871,6 @@  void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
 				  struct ecore_vlan_mac_obj *o);
 int ecore_vlan_mac_h_write_lock(struct bnx2x_softc *sc,
 				struct ecore_vlan_mac_obj *o);
-void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
-					  struct ecore_vlan_mac_obj *o);
 int ecore_config_vlan_mac(struct bnx2x_softc *sc,
 			   struct ecore_vlan_mac_ramrod_params *p);
 
diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
index b65126d718..67ebdaaa44 100644
--- a/drivers/net/bnx2x/elink.c
+++ b/drivers/net/bnx2x/elink.c
@@ -1154,931 +1154,6 @@  static uint32_t elink_get_cfg_pin(struct bnx2x_softc *sc, uint32_t pin_cfg,
 	return ELINK_STATUS_OK;
 }
 
-/******************************************************************/
-/*				ETS section			  */
-/******************************************************************/
-static void elink_ets_e2e3a0_disabled(struct elink_params *params)
-{
-	/* ETS disabled configuration*/
-	struct bnx2x_softc *sc = params->sc;
-
-	ELINK_DEBUG_P0(sc, "ETS E2E3 disabled configuration");
-
-	/* mapping between entry  priority to client number (0,1,2 -debug and
-	 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
-	 * 3bits client num.
-	 *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
-	 * cos1-100     cos0-011     dbg1-010     dbg0-001     MCP-000
-	 */
-
-	REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
-	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
-	 * as strict.  Bits 0,1,2 - debug and management entries, 3 -
-	 * COS0 entry, 4 - COS1 entry.
-	 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
-	 * bit4   bit3	  bit2   bit1	  bit0
-	 * MCP and debug are strict
-	 */
-
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
-	/* defines which entries (clients) are subjected to WFQ arbitration */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
-	/* For strict priority entries defines the number of consecutive
-	 * slots for the highest priority.
-	 */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-	/* mapping between the CREDIT_WEIGHT registers and actual client
-	 * numbers
-	 */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
-
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
-	REG_WR(sc, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
-	/* ETS mode disable */
-	REG_WR(sc, PBF_REG_ETS_ENABLED, 0);
-	/* If ETS mode is enabled (there is no strict priority) defines a WFQ
-	 * weight for COS0/COS1.
-	 */
-	REG_WR(sc, PBF_REG_COS0_WEIGHT, 0x2710);
-	REG_WR(sc, PBF_REG_COS1_WEIGHT, 0x2710);
-	/* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
-	REG_WR(sc, PBF_REG_COS0_UPPER_BOUND, 0x989680);
-	REG_WR(sc, PBF_REG_COS1_UPPER_BOUND, 0x989680);
-	/* Defines the number of consecutive slots for the strict priority */
-	REG_WR(sc, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
-}
-/******************************************************************************
- * Description:
- *	Getting min_w_val will be set according to line speed .
- *.
- ******************************************************************************/
-static uint32_t elink_ets_get_min_w_val_nig(const struct elink_vars *vars)
-{
-	uint32_t min_w_val = 0;
-	/* Calculate min_w_val.*/
-	if (vars->link_up) {
-		if (vars->line_speed == ELINK_SPEED_20000)
-			min_w_val = ELINK_ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
-		else
-			min_w_val = ELINK_ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
-	} else {
-		min_w_val = ELINK_ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
-	}
-	/* If the link isn't up (static configuration for example ) The
-	 * link will be according to 20GBPS.
-	 */
-	return min_w_val;
-}
-/******************************************************************************
- * Description:
- *	Getting credit upper bound form min_w_val.
- *.
- ******************************************************************************/
-static uint32_t elink_ets_get_credit_upper_bound(const uint32_t min_w_val)
-{
-	const uint32_t credit_upper_bound = (uint32_t)
-						ELINK_MAXVAL((150 * min_w_val),
-							ELINK_MAX_PACKET_SIZE);
-	return credit_upper_bound;
-}
-/******************************************************************************
- * Description:
- *	Set credit upper bound for NIG.
- *.
- ******************************************************************************/
-static void elink_ets_e3b0_set_credit_upper_bound_nig(
-	const struct elink_params *params,
-	const uint32_t min_w_val)
-{
-	struct bnx2x_softc *sc = params->sc;
-	const uint8_t port = params->port;
-	const uint32_t credit_upper_bound =
-	    elink_ets_get_credit_upper_bound(min_w_val);
-
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 :
-		NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 :
-		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 :
-		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 :
-		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 :
-		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
-		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
-
-	if (!port) {
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
-			credit_upper_bound);
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
-			credit_upper_bound);
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8,
-			credit_upper_bound);
-	}
-}
-/******************************************************************************
- * Description:
- *	Will return the NIG ETS registers to init values.Except
- *	credit_upper_bound.
- *	That isn't used in this configuration (No WFQ is enabled) and will be
- *	configured according to spec
- *.
- ******************************************************************************/
-static void elink_ets_e3b0_nig_disabled(const struct elink_params *params,
-					const struct elink_vars *vars)
-{
-	struct bnx2x_softc *sc = params->sc;
-	const uint8_t port = params->port;
-	const uint32_t min_w_val = elink_ets_get_min_w_val_nig(vars);
-	/* Mapping between entry  priority to client number (0,1,2 -debug and
-	 * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
-	 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
-	 * reset value or init tool
-	 */
-	if (port) {
-		REG_WR(sc, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210);
-		REG_WR(sc, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0);
-	} else {
-		REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
-		REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
-	}
-	/* For strict priority entries defines the number of consecutive
-	 * slots for the highest priority.
-	 */
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
-		   NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-	/* Mapping between the CREDIT_WEIGHT registers and actual client
-	 * numbers
-	 */
-	if (port) {
-		/*Port 1 has 6 COS*/
-		REG_WR(sc, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
-		REG_WR(sc, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0);
-	} else {
-		/*Port 0 has 9 COS*/
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB,
-		       0x43210876);
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
-	}
-
-	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
-	 * as strict.  Bits 0,1,2 - debug and management entries, 3 -
-	 * COS0 entry, 4 - COS1 entry.
-	 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
-	 * bit4   bit3	  bit2   bit1	  bit0
-	 * MCP and debug are strict
-	 */
-	if (port)
-		REG_WR(sc, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f);
-	else
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff);
-	/* defines which entries (clients) are subjected to WFQ arbitration */
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
-		   NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
-
-	/* Please notice the register address are note continuous and a
-	 * for here is note appropriate.In 2 port mode port0 only COS0-5
-	 * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
-	 * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
-	 * are never used for WFQ
-	 */
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
-		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
-		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
-		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 :
-		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 :
-		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
-		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
-	if (!port) {
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
-	}
-
-	elink_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val);
-}
-/******************************************************************************
- * Description:
- *	Set credit upper bound for PBF.
- *.
- ******************************************************************************/
-static void elink_ets_e3b0_set_credit_upper_bound_pbf(
-	const struct elink_params *params,
-	const uint32_t min_w_val)
-{
-	struct bnx2x_softc *sc = params->sc;
-	const uint32_t credit_upper_bound =
-	    elink_ets_get_credit_upper_bound(min_w_val);
-	const uint8_t port = params->port;
-	uint32_t base_upper_bound = 0;
-	uint8_t max_cos = 0;
-	uint8_t i = 0;
-	/* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
-	 * port mode port1 has COS0-2 that can be used for WFQ.
-	 */
-	if (!port) {
-		base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
-		max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
-	} else {
-		base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1;
-		max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1;
-	}
-
-	for (i = 0; i < max_cos; i++)
-		REG_WR(sc, base_upper_bound + (i << 2), credit_upper_bound);
-}
-
-/******************************************************************************
- * Description:
- *	Will return the PBF ETS registers to init values.Except
- *	credit_upper_bound.
- *	That isn't used in this configuration (No WFQ is enabled) and will be
- *	configured according to spec
- *.
- ******************************************************************************/
-static void elink_ets_e3b0_pbf_disabled(const struct elink_params *params)
-{
-	struct bnx2x_softc *sc = params->sc;
-	const uint8_t port = params->port;
-	const uint32_t min_w_val_pbf = ELINK_ETS_E3B0_PBF_MIN_W_VAL;
-	uint8_t i = 0;
-	uint32_t base_weight = 0;
-	uint8_t max_cos = 0;
-
-	/* Mapping between entry  priority to client number 0 - COS0
-	 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
-	 * TODO_ETS - Should be done by reset value or init tool
-	 */
-	if (port)
-		/*  0x688 (|011|0 10|00 1|000) */
-		REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1, 0x688);
-	else
-		/*  (10 1|100 |011|0 10|00 1|000) */
-		REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0, 0x2C688);
-
-	/* TODO_ETS - Should be done by reset value or init tool */
-	if (port)
-		/* 0x688 (|011|0 10|00 1|000)*/
-		REG_WR(sc, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688);
-	else
-	/* 0x2C688 (10 1|100 |011|0 10|00 1|000) */
-	REG_WR(sc, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688);
-
-	REG_WR(sc, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 :
-		   PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0, 0x100);
-
-
-	REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
-		   PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0, 0);
-
-	REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
-		   PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0, 0);
-	/* In 2 port mode port0 has COS0-5 that can be used for WFQ.
-	 * In 4 port mode port1 has COS0-2 that can be used for WFQ.
-	 */
-	if (!port) {
-		base_weight = PBF_REG_COS0_WEIGHT_P0;
-		max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
-	} else {
-		base_weight = PBF_REG_COS0_WEIGHT_P1;
-		max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1;
-	}
-
-	for (i = 0; i < max_cos; i++)
-		REG_WR(sc, base_weight + (0x4 * i), 0);
-
-	elink_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
-}
-/******************************************************************************
- * Description:
- *	E3B0 disable will return basicly the values to init values.
- *.
- ******************************************************************************/
-static elink_status_t elink_ets_e3b0_disabled(const struct elink_params *params,
-				   const struct elink_vars *vars)
-{
-	struct bnx2x_softc *sc = params->sc;
-
-	if (!CHIP_IS_E3B0(sc)) {
-		ELINK_DEBUG_P0(sc,
-		   "elink_ets_e3b0_disabled the chip isn't E3B0");
-		return ELINK_STATUS_ERROR;
-	}
-
-	elink_ets_e3b0_nig_disabled(params, vars);
-
-	elink_ets_e3b0_pbf_disabled(params);
-
-	return ELINK_STATUS_OK;
-}
-
-/******************************************************************************
- * Description:
- *	Disable will return basicly the values to init values.
- *
- ******************************************************************************/
-elink_status_t elink_ets_disabled(struct elink_params *params,
-		      struct elink_vars *vars)
-{
-	struct bnx2x_softc *sc = params->sc;
-	elink_status_t elink_status = ELINK_STATUS_OK;
-
-	if ((CHIP_IS_E2(sc)) || (CHIP_IS_E3A0(sc))) {
-		elink_ets_e2e3a0_disabled(params);
-	} else if (CHIP_IS_E3B0(sc)) {
-		elink_status = elink_ets_e3b0_disabled(params, vars);
-	} else {
-		ELINK_DEBUG_P0(sc, "elink_ets_disabled - chip not supported");
-		return ELINK_STATUS_ERROR;
-	}
-
-	return elink_status;
-}
-
-/******************************************************************************
- * Description
- *	Set the COS mappimg to SP and BW until this point all the COS are not
- *	set as SP or BW.
- ******************************************************************************/
-static elink_status_t elink_ets_e3b0_cli_map(const struct elink_params *params,
-		  __rte_unused const struct elink_ets_params *ets_params,
-		  const uint8_t cos_sp_bitmap,
-		  const uint8_t cos_bw_bitmap)
-{
-	struct bnx2x_softc *sc = params->sc;
-	const uint8_t port = params->port;
-	const uint8_t nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3);
-	const uint8_t pbf_cli_sp_bitmap = cos_sp_bitmap;
-	const uint8_t nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3;
-	const uint8_t pbf_cli_subject2wfq_bitmap = cos_bw_bitmap;
-
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT :
-	       NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap);
-
-	REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
-	       PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0, pbf_cli_sp_bitmap);
-
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
-	       NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
-	       nig_cli_subject2wfq_bitmap);
-
-	REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
-	       PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0,
-	       pbf_cli_subject2wfq_bitmap);
-
-	return ELINK_STATUS_OK;
-}
-
-/******************************************************************************
- * Description:
- *	This function is needed because NIG ARB_CREDIT_WEIGHT_X are
- *	not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
- ******************************************************************************/
-static elink_status_t elink_ets_e3b0_set_cos_bw(struct bnx2x_softc *sc,
-				     const uint8_t cos_entry,
-				     const uint32_t min_w_val_nig,
-				     const uint32_t min_w_val_pbf,
-				     const uint16_t total_bw,
-				     const uint8_t bw,
-				     const uint8_t port)
-{
-	uint32_t nig_reg_address_crd_weight = 0;
-	uint32_t pbf_reg_address_crd_weight = 0;
-	/* Calculate and set BW for this COS - use 1 instead of 0 for BW */
-	const uint32_t cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
-	const uint32_t cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
-
-	switch (cos_entry) {
-	case 0:
-	    nig_reg_address_crd_weight =
-		 (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
-		     NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
-	     pbf_reg_address_crd_weight = (port) ?
-		 PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
-		break;
-	case 1:
-	     nig_reg_address_crd_weight = (port) ?
-		 NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
-		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
-	     pbf_reg_address_crd_weight = (port) ?
-		 PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
-		break;
-	case 2:
-	     nig_reg_address_crd_weight = (port) ?
-		 NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
-		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
-
-		 pbf_reg_address_crd_weight = (port) ?
-		     PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
-		break;
-	case 3:
-		if (port)
-			return ELINK_STATUS_ERROR;
-		nig_reg_address_crd_weight =
-			NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
-		pbf_reg_address_crd_weight =
-			PBF_REG_COS3_WEIGHT_P0;
-		break;
-	case 4:
-		if (port)
-		return ELINK_STATUS_ERROR;
-	     nig_reg_address_crd_weight =
-		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
-	     pbf_reg_address_crd_weight = PBF_REG_COS4_WEIGHT_P0;
-		break;
-	case 5:
-		if (port)
-		return ELINK_STATUS_ERROR;
-	     nig_reg_address_crd_weight =
-		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
-	     pbf_reg_address_crd_weight = PBF_REG_COS5_WEIGHT_P0;
-		break;
-	}
-
-	REG_WR(sc, nig_reg_address_crd_weight, cos_bw_nig);
-
-	REG_WR(sc, pbf_reg_address_crd_weight, cos_bw_pbf);
-
-	return ELINK_STATUS_OK;
-}
-/******************************************************************************
- * Description:
- *	Calculate the total BW.A value of 0 isn't legal.
- *
- ******************************************************************************/
-static elink_status_t elink_ets_e3b0_get_total_bw(
-	const struct elink_params *params,
-	struct elink_ets_params *ets_params,
-	uint16_t *total_bw)
-{
-	struct bnx2x_softc *sc = params->sc;
-	uint8_t cos_idx = 0;
-	uint8_t is_bw_cos_exist = 0;
-
-	*total_bw = 0;
-	/* Calculate total BW requested */
-	for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
-		if (ets_params->cos[cos_idx].state == elink_cos_state_bw) {
-			is_bw_cos_exist = 1;
-			if (!ets_params->cos[cos_idx].params.bw_params.bw) {
-				ELINK_DEBUG_P0(sc, "elink_ets_E3B0_config BW"
-						   " was set to 0");
-				/* This is to prevent a state when ramrods
-				 * can't be sent
-				 */
-				ets_params->cos[cos_idx].params.bw_params.bw
-					 = 1;
-			}
-			*total_bw +=
-				ets_params->cos[cos_idx].params.bw_params.bw;
-		}
-	}
-
-	/* Check total BW is valid */
-	if ((is_bw_cos_exist == 1) && (*total_bw != 100)) {
-		if (*total_bw == 0) {
-			ELINK_DEBUG_P0(sc,
-			   "elink_ets_E3B0_config total BW shouldn't be 0");
-			return ELINK_STATUS_ERROR;
-		}
-		ELINK_DEBUG_P0(sc,
-		   "elink_ets_E3B0_config total BW should be 100");
-		/* We can handle a case whre the BW isn't 100 this can happen
-		 * if the TC are joined.
-		 */
-	}
-	return ELINK_STATUS_OK;
-}
-
-/******************************************************************************
- * Description:
- *	Invalidate all the sp_pri_to_cos.
- *
- ******************************************************************************/
-static void elink_ets_e3b0_sp_pri_to_cos_init(uint8_t *sp_pri_to_cos)
-{
-	uint8_t pri = 0;
-	for (pri = 0; pri < ELINK_DCBX_MAX_NUM_COS; pri++)
-		sp_pri_to_cos[pri] = DCBX_INVALID_COS;
-}
-/******************************************************************************
- * Description:
- *	Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
- *	according to sp_pri_to_cos.
- *
- ******************************************************************************/
-static elink_status_t elink_ets_e3b0_sp_pri_to_cos_set(
-					    const struct elink_params *params,
-					    uint8_t *sp_pri_to_cos,
-					    const uint8_t pri,
-					    const uint8_t cos_entry)
-{
-	struct bnx2x_softc *sc = params->sc;
-	const uint8_t port = params->port;
-	const uint8_t max_num_of_cos = (port) ?
-		ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 :
-		ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
-
-	if (pri >= max_num_of_cos) {
-		ELINK_DEBUG_P0(sc, "elink_ets_e3b0_sp_pri_to_cos_set invalid "
-		   "parameter Illegal strict priority");
-		return ELINK_STATUS_ERROR;
-	}
-
-	if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
-		ELINK_DEBUG_P0(sc, "elink_ets_e3b0_sp_pri_to_cos_set invalid "
-				   "parameter There can't be two COS's with "
-				   "the same strict pri");
-		return ELINK_STATUS_ERROR;
-	}
-
-	sp_pri_to_cos[pri] = cos_entry;
-	return ELINK_STATUS_OK;
-}
-
-/******************************************************************************
- * Description:
- *	Returns the correct value according to COS and priority in
- *	the sp_pri_cli register.
- *
- ******************************************************************************/
-static uint64_t elink_e3b0_sp_get_pri_cli_reg(const uint8_t cos,
-					 const uint8_t cos_offset,
-					 const uint8_t pri_set,
-					 const uint8_t pri_offset,
-					 const uint8_t entry_size)
-{
-	uint64_t pri_cli_nig = 0;
-	pri_cli_nig = ((uint64_t)(cos + cos_offset)) << (entry_size *
-						    (pri_set + pri_offset));
-
-	return pri_cli_nig;
-}
-/******************************************************************************
- * Description:
- *	Returns the correct value according to COS and priority in the
- *	sp_pri_cli register for NIG.
- *
- ******************************************************************************/
-static uint64_t elink_e3b0_sp_get_pri_cli_reg_nig(const uint8_t cos,
-						  const uint8_t pri_set)
-{
-	/* MCP Dbg0 and dbg1 are always with higher strict pri*/
-	const uint8_t nig_cos_offset = 3;
-	const uint8_t nig_pri_offset = 3;
-
-	return elink_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set,
-		nig_pri_offset, 4);
-}
-
-/******************************************************************************
- * Description:
- *	Returns the correct value according to COS and priority in the
- *	sp_pri_cli register for PBF.
- *
- ******************************************************************************/
-static uint64_t elink_e3b0_sp_get_pri_cli_reg_pbf(const uint8_t cos,
-						  const uint8_t pri_set)
-{
-	const uint8_t pbf_cos_offset = 0;
-	const uint8_t pbf_pri_offset = 0;
-
-	return elink_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set,
-		pbf_pri_offset, 3);
-}
-
-/******************************************************************************
- * Description:
- *	Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
- *	according to sp_pri_to_cos.(which COS has higher priority)
- *
- ******************************************************************************/
-static elink_status_t elink_ets_e3b0_sp_set_pri_cli_reg(
-					     const struct elink_params *params,
-					     uint8_t *sp_pri_to_cos)
-{
-	struct bnx2x_softc *sc = params->sc;
-	uint8_t i = 0;
-	const uint8_t port = params->port;
-	/* MCP Dbg0 and dbg1 are always with higher strict pri*/
-	uint64_t pri_cli_nig = 0x210;
-	uint32_t pri_cli_pbf = 0x0;
-	uint8_t pri_set = 0;
-	uint8_t pri_bitmask = 0;
-	const uint8_t max_num_of_cos = (port) ?
-		ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 :
-		ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
-
-	uint8_t cos_bit_to_set = (1 << max_num_of_cos) - 1;
-
-	/* Set all the strict priority first */
-	for (i = 0; i < max_num_of_cos; i++) {
-		if (sp_pri_to_cos[i] != DCBX_INVALID_COS) {
-			if (sp_pri_to_cos[i] >= ELINK_DCBX_MAX_NUM_COS) {
-				ELINK_DEBUG_P0(sc,
-					   "elink_ets_e3b0_sp_set_pri_cli_reg "
-					   "invalid cos entry");
-				return ELINK_STATUS_ERROR;
-			}
-
-			pri_cli_nig |= elink_e3b0_sp_get_pri_cli_reg_nig(
-			    sp_pri_to_cos[i], pri_set);
-
-			pri_cli_pbf |= elink_e3b0_sp_get_pri_cli_reg_pbf(
-			    sp_pri_to_cos[i], pri_set);
-			pri_bitmask = 1 << sp_pri_to_cos[i];
-			/* COS is used remove it from bitmap.*/
-			if (!(pri_bitmask & cos_bit_to_set)) {
-				ELINK_DEBUG_P0(sc,
-					"elink_ets_e3b0_sp_set_pri_cli_reg "
-					"invalid There can't be two COS's with"
-					" the same strict pri");
-				return ELINK_STATUS_ERROR;
-			}
-			cos_bit_to_set &= ~pri_bitmask;
-			pri_set++;
-		}
-	}
-
-	/* Set all the Non strict priority i= COS*/
-	for (i = 0; i < max_num_of_cos; i++) {
-		pri_bitmask = 1 << i;
-		/* Check if COS was already used for SP */
-		if (pri_bitmask & cos_bit_to_set) {
-			/* COS wasn't used for SP */
-			pri_cli_nig |= elink_e3b0_sp_get_pri_cli_reg_nig(
-			    i, pri_set);
-
-			pri_cli_pbf |= elink_e3b0_sp_get_pri_cli_reg_pbf(
-			    i, pri_set);
-			/* COS is used remove it from bitmap.*/
-			cos_bit_to_set &= ~pri_bitmask;
-			pri_set++;
-		}
-	}
-
-	if (pri_set != max_num_of_cos) {
-		ELINK_DEBUG_P0(sc, "elink_ets_e3b0_sp_set_pri_cli_reg not all "
-				   "entries were set");
-		return ELINK_STATUS_ERROR;
-	}
-
-	if (port) {
-		/* Only 6 usable clients*/
-		REG_WR(sc, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB,
-		       (uint32_t)pri_cli_nig);
-
-		REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1, pri_cli_pbf);
-	} else {
-		/* Only 9 usable clients*/
-		const uint32_t pri_cli_nig_lsb = (uint32_t)(pri_cli_nig);
-		const uint32_t pri_cli_nig_msb = (uint32_t)
-						((pri_cli_nig >> 32) & 0xF);
-
-		REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB,
-		       pri_cli_nig_lsb);
-		REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB,
-		       pri_cli_nig_msb);
-
-		REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0, pri_cli_pbf);
-	}
-	return ELINK_STATUS_OK;
-}
-
-/******************************************************************************
- * Description:
- *	Configure the COS to ETS according to BW and SP settings.
- ******************************************************************************/
-elink_status_t elink_ets_e3b0_config(const struct elink_params *params,
-			 const struct elink_vars *vars,
-			 struct elink_ets_params *ets_params)
-{
-	struct bnx2x_softc *sc = params->sc;
-	elink_status_t elink_status = ELINK_STATUS_OK;
-	const uint8_t port = params->port;
-	uint16_t total_bw = 0;
-	const uint32_t min_w_val_nig = elink_ets_get_min_w_val_nig(vars);
-	const uint32_t min_w_val_pbf = ELINK_ETS_E3B0_PBF_MIN_W_VAL;
-	uint8_t cos_bw_bitmap = 0;
-	uint8_t cos_sp_bitmap = 0;
-	uint8_t sp_pri_to_cos[ELINK_DCBX_MAX_NUM_COS] = {0};
-	const uint8_t max_num_of_cos = (port) ?
-		ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 :
-		ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
-	uint8_t cos_entry = 0;
-
-	if (!CHIP_IS_E3B0(sc)) {
-		ELINK_DEBUG_P0(sc,
-		   "elink_ets_e3b0_disabled the chip isn't E3B0");
-		return ELINK_STATUS_ERROR;
-	}
-
-	if (ets_params->num_of_cos > max_num_of_cos) {
-		ELINK_DEBUG_P0(sc, "elink_ets_E3B0_config the number of COS "
-				   "isn't supported");
-		return ELINK_STATUS_ERROR;
-	}
-
-	/* Prepare sp strict priority parameters*/
-	elink_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos);
-
-	/* Prepare BW parameters*/
-	elink_status = elink_ets_e3b0_get_total_bw(params, ets_params,
-						   &total_bw);
-	if (elink_status != ELINK_STATUS_OK) {
-		ELINK_DEBUG_P0(sc,
-		   "elink_ets_E3B0_config get_total_bw failed");
-		return ELINK_STATUS_ERROR;
-	}
-
-	/* Upper bound is set according to current link speed (min_w_val
-	 * should be the same for upper bound and COS credit val).
-	 */
-	elink_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
-	elink_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
-
-
-	for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
-		if (elink_cos_state_bw == ets_params->cos[cos_entry].state) {
-			cos_bw_bitmap |= (1 << cos_entry);
-			/* The function also sets the BW in HW(not the mappin
-			 * yet)
-			 */
-			elink_status = elink_ets_e3b0_set_cos_bw(
-				sc, cos_entry, min_w_val_nig, min_w_val_pbf,
-				total_bw,
-				ets_params->cos[cos_entry].params.bw_params.bw,
-				 port);
-		} else if (elink_cos_state_strict ==
-			ets_params->cos[cos_entry].state){
-			cos_sp_bitmap |= (1 << cos_entry);
-
-			elink_status = elink_ets_e3b0_sp_pri_to_cos_set(
-				params,
-				sp_pri_to_cos,
-				ets_params->cos[cos_entry].params.sp_params.pri,
-				cos_entry);
-
-		} else {
-			ELINK_DEBUG_P0(sc,
-			   "elink_ets_e3b0_config cos state not valid");
-			return ELINK_STATUS_ERROR;
-		}
-		if (elink_status != ELINK_STATUS_OK) {
-			ELINK_DEBUG_P0(sc,
-			   "elink_ets_e3b0_config set cos bw failed");
-			return elink_status;
-		}
-	}
-
-	/* Set SP register (which COS has higher priority) */
-	elink_status = elink_ets_e3b0_sp_set_pri_cli_reg(params,
-							 sp_pri_to_cos);
-
-	if (elink_status != ELINK_STATUS_OK) {
-		ELINK_DEBUG_P0(sc,
-		   "elink_ets_E3B0_config set_pri_cli_reg failed");
-		return elink_status;
-	}
-
-	/* Set client mapping of BW and strict */
-	elink_status = elink_ets_e3b0_cli_map(params, ets_params,
-					      cos_sp_bitmap,
-					      cos_bw_bitmap);
-
-	if (elink_status != ELINK_STATUS_OK) {
-		ELINK_DEBUG_P0(sc, "elink_ets_E3B0_config SP failed");
-		return elink_status;
-	}
-	return ELINK_STATUS_OK;
-}
-static void elink_ets_bw_limit_common(const struct elink_params *params)
-{
-	/* ETS disabled configuration */
-	struct bnx2x_softc *sc = params->sc;
-	ELINK_DEBUG_P0(sc, "ETS enabled BW limit configuration");
-	/* Defines which entries (clients) are subjected to WFQ arbitration
-	 * COS0 0x8
-	 * COS1 0x10
-	 */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
-	/* Mapping between the ARB_CREDIT_WEIGHT registers and actual
-	 * client numbers (WEIGHT_0 does not actually have to represent
-	 * client 0)
-	 *    PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
-	 *  cos1-001     cos0-000     dbg1-100     dbg0-011     MCP-010
-	 */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
-
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
-	       ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
-	       ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
-
-	/* ETS mode enabled*/
-	REG_WR(sc, PBF_REG_ETS_ENABLED, 1);
-
-	/* Defines the number of consecutive slots for the strict priority */
-	REG_WR(sc, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
-	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
-	 * as strict.  Bits 0,1,2 - debug and management entries, 3 - COS0
-	 * entry, 4 - COS1 entry.
-	 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
-	 * bit4   bit3	  bit2     bit1	   bit0
-	 * MCP and debug are strict
-	 */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
-
-	/* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
-	REG_WR(sc, PBF_REG_COS0_UPPER_BOUND,
-	       ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
-	REG_WR(sc, PBF_REG_COS1_UPPER_BOUND,
-	       ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
-}
-
-void elink_ets_bw_limit(const struct elink_params *params,
-			const uint32_t cos0_bw,
-			const uint32_t cos1_bw)
-{
-	/* ETS disabled configuration*/
-	struct bnx2x_softc *sc = params->sc;
-	const uint32_t total_bw = cos0_bw + cos1_bw;
-	uint32_t cos0_credit_weight = 0;
-	uint32_t cos1_credit_weight = 0;
-
-	ELINK_DEBUG_P0(sc, "ETS enabled BW limit configuration");
-
-	if ((!total_bw) ||
-	    (!cos0_bw) ||
-	    (!cos1_bw)) {
-		ELINK_DEBUG_P0(sc, "Total BW can't be zero");
-		return;
-	}
-
-	cos0_credit_weight = (cos0_bw * ELINK_ETS_BW_LIMIT_CREDIT_WEIGHT) /
-		total_bw;
-	cos1_credit_weight = (cos1_bw * ELINK_ETS_BW_LIMIT_CREDIT_WEIGHT) /
-		total_bw;
-
-	elink_ets_bw_limit_common(params);
-
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
-
-	REG_WR(sc, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
-	REG_WR(sc, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
-}
-
-elink_status_t elink_ets_strict(const struct elink_params *params,
-				const uint8_t strict_cos)
-{
-	/* ETS disabled configuration*/
-	struct bnx2x_softc *sc = params->sc;
-	uint32_t val	= 0;
-
-	ELINK_DEBUG_P0(sc, "ETS enabled strict configuration");
-	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
-	 * as strict.  Bits 0,1,2 - debug and management entries,
-	 * 3 - COS0 entry, 4 - COS1 entry.
-	 *  COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
-	 *  bit4   bit3	  bit2      bit1     bit0
-	 * MCP and debug are strict
-	 */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
-	/* For strict priority entries defines the number of consecutive slots
-	 * for the highest priority.
-	 */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-	/* ETS mode disable */
-	REG_WR(sc, PBF_REG_ETS_ENABLED, 0);
-	/* Defines the number of consecutive slots for the strict priority */
-	REG_WR(sc, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
-
-	/* Defines the number of consecutive slots for the strict priority */
-	REG_WR(sc, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
-
-	/* Mapping between entry  priority to client number (0,1,2 -debug and
-	 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
-	 * 3bits client num.
-	 *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
-	 * dbg0-010     dbg1-001     cos1-100     cos0-011     MCP-000
-	 * dbg0-010     dbg1-001     cos0-011     cos1-100     MCP-000
-	 */
-	val = (!strict_cos) ? 0x2318 : 0x22E0;
-	REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
-
-	return ELINK_STATUS_OK;
-}
-
 /******************************************************************/
 /*			PFC section				  */
 /******************************************************************/
@@ -2143,56 +1218,6 @@  static void elink_update_pfc_xmac(struct elink_params *params,
 	DELAY(30);
 }
 
-static void elink_emac_get_pfc_stat(struct elink_params *params,
-				    uint32_t pfc_frames_sent[2],
-				    uint32_t pfc_frames_received[2])
-{
-	/* Read pfc statistic */
-	struct bnx2x_softc *sc = params->sc;
-	uint32_t emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-	uint32_t val_xon = 0;
-	uint32_t val_xoff = 0;
-
-	ELINK_DEBUG_P0(sc, "pfc statistic read from EMAC");
-
-	/* PFC received frames */
-	val_xoff = REG_RD(sc, emac_base +
-				EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
-	val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
-	val_xon = REG_RD(sc, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
-	val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
-
-	pfc_frames_received[0] = val_xon + val_xoff;
-
-	/* PFC received sent */
-	val_xoff = REG_RD(sc, emac_base +
-				EMAC_REG_RX_PFC_STATS_XOFF_SENT);
-	val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
-	val_xon = REG_RD(sc, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
-	val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
-
-	pfc_frames_sent[0] = val_xon + val_xoff;
-}
-
-/* Read pfc statistic*/
-void elink_pfc_statistic(struct elink_params *params, struct elink_vars *vars,
-			 uint32_t pfc_frames_sent[2],
-			 uint32_t pfc_frames_received[2])
-{
-	/* Read pfc statistic */
-	struct bnx2x_softc *sc = params->sc;
-
-	ELINK_DEBUG_P0(sc, "pfc statistic");
-
-	if (!vars->link_up)
-		return;
-
-	if (vars->mac_type == ELINK_MAC_TYPE_EMAC) {
-		ELINK_DEBUG_P0(sc, "About to read PFC stats from EMAC");
-		elink_emac_get_pfc_stat(params, pfc_frames_sent,
-					pfc_frames_received);
-	}
-}
 /******************************************************************/
 /*			MAC/PBF section				  */
 /******************************************************************/
@@ -2877,54 +1902,6 @@  static void elink_update_pfc_bmac2(struct elink_params *params,
 	REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
 }
 
-/******************************************************************************
- * Description:
- *  This function is needed because NIG ARB_CREDIT_WEIGHT_X are
- *  not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
- ******************************************************************************/
-static elink_status_t elink_pfc_nig_rx_priority_mask(struct bnx2x_softc *sc,
-					   uint8_t cos_entry,
-					   uint32_t priority_mask, uint8_t port)
-{
-	uint32_t nig_reg_rx_priority_mask_add = 0;
-
-	switch (cos_entry) {
-	case 0:
-	     nig_reg_rx_priority_mask_add = (port) ?
-		 NIG_REG_P1_RX_COS0_PRIORITY_MASK :
-		 NIG_REG_P0_RX_COS0_PRIORITY_MASK;
-		break;
-	case 1:
-	    nig_reg_rx_priority_mask_add = (port) ?
-		NIG_REG_P1_RX_COS1_PRIORITY_MASK :
-		NIG_REG_P0_RX_COS1_PRIORITY_MASK;
-		break;
-	case 2:
-	    nig_reg_rx_priority_mask_add = (port) ?
-		NIG_REG_P1_RX_COS2_PRIORITY_MASK :
-		NIG_REG_P0_RX_COS2_PRIORITY_MASK;
-		break;
-	case 3:
-		if (port)
-		return ELINK_STATUS_ERROR;
-	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
-		break;
-	case 4:
-		if (port)
-		return ELINK_STATUS_ERROR;
-	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
-		break;
-	case 5:
-		if (port)
-		return ELINK_STATUS_ERROR;
-	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
-		break;
-	}
-
-	REG_WR(sc, nig_reg_rx_priority_mask_add, priority_mask);
-
-	return ELINK_STATUS_OK;
-}
 static void elink_update_mng(struct elink_params *params, uint32_t link_status)
 {
 	struct bnx2x_softc *sc = params->sc;
@@ -2934,157 +1911,6 @@  static void elink_update_mng(struct elink_params *params, uint32_t link_status)
 			port_mb[params->port].link_status), link_status);
 }
 
-static void elink_update_pfc_nig(struct elink_params *params,
-		__rte_unused struct elink_vars *vars,
-		struct elink_nig_brb_pfc_port_params *nig_params)
-{
-	uint32_t xcm_mask = 0, ppp_enable = 0, pause_enable = 0;
-	uint32_t llfc_out_en = 0;
-	uint32_t llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0;
-	uint32_t pkt_priority_to_cos = 0;
-	struct bnx2x_softc *sc = params->sc;
-	uint8_t port = params->port;
-
-	int set_pfc = params->feature_config_flags &
-		ELINK_FEATURE_CONFIG_PFC_ENABLED;
-	ELINK_DEBUG_P0(sc, "updating pfc nig parameters");
-
-	/* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
-	 * MAC control frames (that are not pause packets)
-	 * will be forwarded to the XCM.
-	 */
-	xcm_mask = REG_RD(sc, port ? NIG_REG_LLH1_XCM_MASK :
-			  NIG_REG_LLH0_XCM_MASK);
-	/* NIG params will override non PFC params, since it's possible to
-	 * do transition from PFC to SAFC
-	 */
-	if (set_pfc) {
-		pause_enable = 0;
-		llfc_out_en = 0;
-		llfc_enable = 0;
-		if (CHIP_IS_E3(sc))
-			ppp_enable = 0;
-		else
-			ppp_enable = 1;
-		xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
-				     NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
-		xcm_out_en = 0;
-		hwpfc_enable = 1;
-	} else  {
-		if (nig_params) {
-			llfc_out_en = nig_params->llfc_out_en;
-			llfc_enable = nig_params->llfc_enable;
-			pause_enable = nig_params->pause_enable;
-		} else  /* Default non PFC mode - PAUSE */
-			pause_enable = 1;
-
-		xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
-			NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
-		xcm_out_en = 1;
-	}
-
-	if (CHIP_IS_E3(sc))
-		REG_WR(sc, port ? NIG_REG_BRB1_PAUSE_IN_EN :
-		       NIG_REG_BRB0_PAUSE_IN_EN, pause_enable);
-	REG_WR(sc, port ? NIG_REG_LLFC_OUT_EN_1 :
-	       NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
-	REG_WR(sc, port ? NIG_REG_LLFC_ENABLE_1 :
-	       NIG_REG_LLFC_ENABLE_0, llfc_enable);
-	REG_WR(sc, port ? NIG_REG_PAUSE_ENABLE_1 :
-	       NIG_REG_PAUSE_ENABLE_0, pause_enable);
-
-	REG_WR(sc, port ? NIG_REG_PPP_ENABLE_1 :
-	       NIG_REG_PPP_ENABLE_0, ppp_enable);
-
-	REG_WR(sc, port ? NIG_REG_LLH1_XCM_MASK :
-	       NIG_REG_LLH0_XCM_MASK, xcm_mask);
-
-	REG_WR(sc, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
-	       NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
-
-	/* Output enable for RX_XCM # IF */
-	REG_WR(sc, port ? NIG_REG_XCM1_OUT_EN :
-	       NIG_REG_XCM0_OUT_EN, xcm_out_en);
-
-	/* HW PFC TX enable */
-	REG_WR(sc, port ? NIG_REG_P1_HWPFC_ENABLE :
-	       NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable);
-
-	if (nig_params) {
-		uint8_t i = 0;
-		pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
-
-		for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
-			elink_pfc_nig_rx_priority_mask(sc, i,
-		nig_params->rx_cos_priority_mask[i], port);
-
-		REG_WR(sc, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
-		       NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
-		       nig_params->llfc_high_priority_classes);
-
-		REG_WR(sc, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
-		       NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
-		       nig_params->llfc_low_priority_classes);
-	}
-	REG_WR(sc, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
-	       NIG_REG_P0_PKT_PRIORITY_TO_COS,
-	       pkt_priority_to_cos);
-}
-
-elink_status_t elink_update_pfc(struct elink_params *params,
-		      struct elink_vars *vars,
-		      struct elink_nig_brb_pfc_port_params *pfc_params)
-{
-	/* The PFC and pause are orthogonal to one another, meaning when
-	 * PFC is enabled, the pause are disabled, and when PFC is
-	 * disabled, pause are set according to the pause result.
-	 */
-	uint32_t val;
-	struct bnx2x_softc *sc = params->sc;
-	uint8_t bmac_loopback = (params->loopback_mode == ELINK_LOOPBACK_BMAC);
-
-	if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)
-		vars->link_status |= LINK_STATUS_PFC_ENABLED;
-	else
-		vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
-
-	elink_update_mng(params, vars->link_status);
-
-	/* Update NIG params */
-	elink_update_pfc_nig(params, vars, pfc_params);
-
-	if (!vars->link_up)
-		return ELINK_STATUS_OK;
-
-	ELINK_DEBUG_P0(sc, "About to update PFC in BMAC");
-
-	if (CHIP_IS_E3(sc)) {
-		if (vars->mac_type == ELINK_MAC_TYPE_XMAC)
-			elink_update_pfc_xmac(params, vars, 0);
-	} else {
-		val = REG_RD(sc, MISC_REG_RESET_REG_2);
-		if ((val &
-		     (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
-		    == 0) {
-			ELINK_DEBUG_P0(sc, "About to update PFC in EMAC");
-			elink_emac_enable(params, vars, 0);
-			return ELINK_STATUS_OK;
-		}
-		if (CHIP_IS_E2(sc))
-			elink_update_pfc_bmac2(params, vars, bmac_loopback);
-		else
-			elink_update_pfc_bmac1(params, vars);
-
-		val = 0;
-		if ((params->feature_config_flags &
-		     ELINK_FEATURE_CONFIG_PFC_ENABLED) ||
-		    (vars->flow_ctrl & ELINK_FLOW_CTRL_TX))
-			val = 1;
-		REG_WR(sc, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port * 4, val);
-	}
-	return ELINK_STATUS_OK;
-}
-
 static elink_status_t elink_bmac1_enable(struct elink_params *params,
 			      struct elink_vars *vars,
 			      uint8_t is_lb)
@@ -4030,40 +2856,6 @@  static void elink_cl45_read_and_write(struct bnx2x_softc *sc,
 	elink_cl45_write(sc, phy, devad, reg, val & and_val);
 }
 
-elink_status_t elink_phy_read(struct elink_params *params, uint8_t phy_addr,
-		   uint8_t devad, uint16_t reg, uint16_t *ret_val)
-{
-	uint8_t phy_index;
-	/* Probe for the phy according to the given phy_addr, and execute
-	 * the read request on it
-	 */
-	for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
-		if (params->phy[phy_index].addr == phy_addr) {
-			return elink_cl45_read(params->sc,
-					       &params->phy[phy_index], devad,
-					       reg, ret_val);
-		}
-	}
-	return ELINK_STATUS_ERROR;
-}
-
-elink_status_t elink_phy_write(struct elink_params *params, uint8_t phy_addr,
-		    uint8_t devad, uint16_t reg, uint16_t val)
-{
-	uint8_t phy_index;
-	/* Probe for the phy according to the given phy_addr, and execute
-	 * the write request on it
-	 */
-	for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
-		if (params->phy[phy_index].addr == phy_addr) {
-			return elink_cl45_write(params->sc,
-						&params->phy[phy_index], devad,
-						reg, val);
-		}
-	}
-	return ELINK_STATUS_ERROR;
-}
-
 static uint8_t elink_get_warpcore_lane(__rte_unused struct elink_phy *phy,
 				  struct elink_params *params)
 {
@@ -7108,47 +5900,6 @@  static elink_status_t elink_null_format_ver(__rte_unused uint32_t spirom_ver,
 	return ELINK_STATUS_OK;
 }
 
-elink_status_t elink_get_ext_phy_fw_version(struct elink_params *params,
-				 uint8_t *version,
-				 uint16_t len)
-{
-	struct bnx2x_softc *sc;
-	uint32_t spirom_ver = 0;
-	elink_status_t status = ELINK_STATUS_OK;
-	uint8_t *ver_p = version;
-	uint16_t remain_len = len;
-	if (version == NULL || params == NULL)
-		return ELINK_STATUS_ERROR;
-	sc = params->sc;
-
-	/* Extract first external phy*/
-	version[0] = '\0';
-	spirom_ver = REG_RD(sc, params->phy[ELINK_EXT_PHY1].ver_addr);
-
-	if (params->phy[ELINK_EXT_PHY1].format_fw_ver) {
-		status |= params->phy[ELINK_EXT_PHY1].format_fw_ver(spirom_ver,
-							      ver_p,
-							      &remain_len);
-		ver_p += (len - remain_len);
-	}
-	if ((params->num_phys == ELINK_MAX_PHYS) &&
-	    (params->phy[ELINK_EXT_PHY2].ver_addr != 0)) {
-		spirom_ver = REG_RD(sc, params->phy[ELINK_EXT_PHY2].ver_addr);
-		if (params->phy[ELINK_EXT_PHY2].format_fw_ver) {
-			*ver_p = '/';
-			ver_p++;
-			remain_len--;
-			status |= params->phy[ELINK_EXT_PHY2].format_fw_ver(
-				spirom_ver,
-				ver_p,
-				&remain_len);
-			ver_p = version + (len - remain_len);
-		}
-	}
-	*ver_p = '\0';
-	return status;
-}
-
 static void elink_set_xgxs_loopback(struct elink_phy *phy,
 				    struct elink_params *params)
 {
@@ -7360,99 +6111,6 @@  elink_status_t elink_set_led(struct elink_params *params,
 
 }
 
-/* This function comes to reflect the actual link state read DIRECTLY from the
- * HW
- */
-elink_status_t elink_test_link(struct elink_params *params,
-			       __rte_unused struct elink_vars *vars,
-		    uint8_t is_serdes)
-{
-	struct bnx2x_softc *sc = params->sc;
-	uint16_t gp_status = 0, phy_index = 0;
-	uint8_t ext_phy_link_up = 0, serdes_phy_type;
-	struct elink_vars temp_vars;
-	struct elink_phy *int_phy = &params->phy[ELINK_INT_PHY];
-#ifdef ELINK_INCLUDE_FPGA
-	if (CHIP_REV_IS_FPGA(sc))
-		return ELINK_STATUS_OK;
-#endif
-#ifdef ELINK_INCLUDE_EMUL
-	if (CHIP_REV_IS_EMUL(sc))
-		return ELINK_STATUS_OK;
-#endif
-
-	if (CHIP_IS_E3(sc)) {
-		uint16_t link_up;
-		if (params->req_line_speed[ELINK_LINK_CONFIG_IDX(ELINK_INT_PHY)]
-		    > ELINK_SPEED_10000) {
-			/* Check 20G link */
-			elink_cl45_read(sc, int_phy, MDIO_WC_DEVAD,
-					1, &link_up);
-			elink_cl45_read(sc, int_phy, MDIO_WC_DEVAD,
-					1, &link_up);
-			link_up &= (1 << 2);
-		} else {
-			/* Check 10G link and below*/
-			uint8_t lane = elink_get_warpcore_lane(int_phy, params);
-			elink_cl45_read(sc, int_phy, MDIO_WC_DEVAD,
-					MDIO_WC_REG_GP2_STATUS_GP_2_1,
-					&gp_status);
-			gp_status = ((gp_status >> 8) & 0xf) |
-				((gp_status >> 12) & 0xf);
-			link_up = gp_status & (1 << lane);
-		}
-		if (!link_up)
-			return ELINK_STATUS_NO_LINK;
-	} else {
-		CL22_RD_OVER_CL45(sc, int_phy,
-			  MDIO_REG_BANK_GP_STATUS,
-			  MDIO_GP_STATUS_TOP_AN_STATUS1,
-			  &gp_status);
-	/* Link is up only if both local phy and external phy are up */
-	if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
-		return ELINK_STATUS_NO_LINK;
-	}
-	/* In XGXS loopback mode, do not check external PHY */
-	if (params->loopback_mode == ELINK_LOOPBACK_XGXS)
-		return ELINK_STATUS_OK;
-
-	switch (params->num_phys) {
-	case 1:
-		/* No external PHY */
-		return ELINK_STATUS_OK;
-	case 2:
-		ext_phy_link_up = params->phy[ELINK_EXT_PHY1].read_status(
-			&params->phy[ELINK_EXT_PHY1],
-			params, &temp_vars);
-		break;
-	case 3: /* Dual Media */
-		for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys;
-		      phy_index++) {
-			serdes_phy_type = ((params->phy[phy_index].media_type ==
-					    ELINK_ETH_PHY_SFPP_10G_FIBER) ||
-					   (params->phy[phy_index].media_type ==
-					    ELINK_ETH_PHY_SFP_1G_FIBER) ||
-					   (params->phy[phy_index].media_type ==
-					    ELINK_ETH_PHY_XFP_FIBER) ||
-					   (params->phy[phy_index].media_type ==
-					    ELINK_ETH_PHY_DA_TWINAX));
-
-			if (is_serdes != serdes_phy_type)
-				continue;
-			if (params->phy[phy_index].read_status) {
-				ext_phy_link_up |=
-					params->phy[phy_index].read_status(
-						&params->phy[phy_index],
-						params, &temp_vars);
-			}
-		}
-		break;
-	}
-	if (ext_phy_link_up)
-		return ELINK_STATUS_OK;
-	return ELINK_STATUS_NO_LINK;
-}
-
 static elink_status_t elink_link_initialize(struct elink_params *params,
 				 struct elink_vars *vars)
 {
@@ -12443,31 +11101,6 @@  static elink_status_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t *str,
 	return ELINK_STATUS_OK;
 }
 
-void elink_sfx7101_sp_sw_reset(struct bnx2x_softc *sc, struct elink_phy *phy)
-{
-	uint16_t val, cnt;
-
-	elink_cl45_read(sc, phy,
-			MDIO_PMA_DEVAD,
-			MDIO_PMA_REG_7101_RESET, &val);
-
-	for (cnt = 0; cnt < 10; cnt++) {
-		DELAY(1000 * 50);
-		/* Writes a self-clearing reset */
-		elink_cl45_write(sc, phy,
-				 MDIO_PMA_DEVAD,
-				 MDIO_PMA_REG_7101_RESET,
-				 (val | (1 << 15)));
-		/* Wait for clear */
-		elink_cl45_read(sc, phy,
-				MDIO_PMA_DEVAD,
-				MDIO_PMA_REG_7101_RESET, &val);
-
-		if ((val & (1 << 15)) == 0)
-			break;
-	}
-}
-
 static void elink_7101_hw_reset(__rte_unused struct elink_phy *phy,
 				struct elink_params *params) {
 	/* Low power mode is controlled by GPIO 2 */
diff --git a/drivers/net/bnx2x/elink.h b/drivers/net/bnx2x/elink.h
index dd70ac6c66..f5cdf7440b 100644
--- a/drivers/net/bnx2x/elink.h
+++ b/drivers/net/bnx2x/elink.h
@@ -515,26 +515,10 @@  elink_status_t elink_lfa_reset(struct elink_params *params, struct elink_vars *v
 /* elink_link_update should be called upon link interrupt */
 elink_status_t elink_link_update(struct elink_params *params, struct elink_vars *vars);
 
-/* use the following phy functions to read/write from external_phy
- * In order to use it to read/write internal phy registers, use
- * ELINK_DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
- * the register
- */
-elink_status_t elink_phy_read(struct elink_params *params, uint8_t phy_addr,
-		   uint8_t devad, uint16_t reg, uint16_t *ret_val);
-
-elink_status_t elink_phy_write(struct elink_params *params, uint8_t phy_addr,
-		    uint8_t devad, uint16_t reg, uint16_t val);
-
 /* Reads the link_status from the shmem,
    and update the link vars accordingly */
 void elink_link_status_update(struct elink_params *input,
 			    struct elink_vars *output);
-/* returns string representing the fw_version of the external phy */
-elink_status_t elink_get_ext_phy_fw_version(struct elink_params *params,
-				 uint8_t *version,
-				 uint16_t len);
-
 /* Set/Unset the led
    Basically, the CLC takes care of the led for the link, but in case one needs
    to set/unset the led unnaturally, set the "mode" to ELINK_LED_MODE_OPER to
@@ -551,14 +535,6 @@  elink_status_t elink_set_led(struct elink_params *params,
  */
 void elink_handle_module_detect_int(struct elink_params *params);
 
-/* Get the actual link status. In case it returns ELINK_STATUS_OK, link is up,
- * otherwise link is down
- */
-elink_status_t elink_test_link(struct elink_params *params,
-		    struct elink_vars *vars,
-		    uint8_t is_serdes);
-
-
 /* One-time initialization for external phy after power up */
 elink_status_t elink_common_init_phy(struct bnx2x_softc *sc, uint32_t shmem_base_path[],
 			  uint32_t shmem2_base_path[], uint32_t chip_id,
@@ -567,9 +543,6 @@  elink_status_t elink_common_init_phy(struct bnx2x_softc *sc, uint32_t shmem_base
 /* Reset the external PHY using GPIO */
 void elink_ext_phy_hw_reset(struct bnx2x_softc *sc, uint8_t port);
 
-/* Reset the external of SFX7101 */
-void elink_sfx7101_sp_sw_reset(struct bnx2x_softc *sc, struct elink_phy *phy);
-
 /* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
 elink_status_t elink_read_sfp_module_eeprom(struct elink_phy *phy,
 				 struct elink_params *params, uint8_t dev_addr,
@@ -650,36 +623,6 @@  struct elink_ets_params {
 	struct elink_ets_cos_params cos[ELINK_DCBX_MAX_NUM_COS];
 };
 
-/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
- * when link is already up
- */
-elink_status_t elink_update_pfc(struct elink_params *params,
-		      struct elink_vars *vars,
-		      struct elink_nig_brb_pfc_port_params *pfc_params);
-
-
-/* Used to configure the ETS to disable */
-elink_status_t elink_ets_disabled(struct elink_params *params,
-		       struct elink_vars *vars);
-
-/* Used to configure the ETS to BW limited */
-void elink_ets_bw_limit(const struct elink_params *params,
-			const uint32_t cos0_bw,
-			const uint32_t cos1_bw);
-
-/* Used to configure the ETS to strict */
-elink_status_t elink_ets_strict(const struct elink_params *params,
-				const uint8_t strict_cos);
-
-
-/*  Configure the COS to ETS according to BW and SP settings.*/
-elink_status_t elink_ets_e3b0_config(const struct elink_params *params,
-			 const struct elink_vars *vars,
-			 struct elink_ets_params *ets_params);
-/* Read pfc statistic*/
-void elink_pfc_statistic(struct elink_params *params, struct elink_vars *vars,
-			 uint32_t pfc_frames_sent[2],
-			 uint32_t pfc_frames_received[2]);
 void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars,
 			    uint32_t chip_id, uint32_t shmem_base, uint32_t shmem2_base,
 			    uint8_t port);
diff --git a/drivers/net/bnxt/tf_core/bitalloc.c b/drivers/net/bnxt/tf_core/bitalloc.c
index 918cabf19c..cdb13607d5 100644
--- a/drivers/net/bnxt/tf_core/bitalloc.c
+++ b/drivers/net/bnxt/tf_core/bitalloc.c
@@ -227,62 +227,6 @@  ba_alloc_reverse(struct bitalloc *pool)
 	return ba_alloc_reverse_helper(pool, 0, 1, 32, 0, &clear);
 }
 
-static int
-ba_alloc_index_helper(struct bitalloc *pool,
-		      int              offset,
-		      int              words,
-		      unsigned int     size,
-		      int             *index,
-		      int             *clear)
-{
-	bitalloc_word_t *storage = &pool->storage[offset];
-	int       loc;
-	int       r;
-
-	if (pool->size > size)
-		r = ba_alloc_index_helper(pool,
-					  offset + words + 1,
-					  storage[words],
-					  size * 32,
-					  index,
-					  clear);
-	else
-		r = 1; /* Check if already allocated */
-
-	loc = (*index % 32);
-	*index = *index / 32;
-
-	if (r == 1) {
-		r = (storage[*index] & (1 << loc)) ? 0 : -1;
-		if (r == 0) {
-			*clear = 1;
-			pool->free_count--;
-		}
-	}
-
-	if (*clear) {
-		storage[*index] &= ~(1 << loc);
-		*clear = (storage[*index] == 0);
-	}
-
-	return r;
-}
-
-int
-ba_alloc_index(struct bitalloc *pool, int index)
-{
-	int clear = 0;
-	int index_copy = index;
-
-	if (index < 0 || index >= (int)pool->size)
-		return -1;
-
-	if (ba_alloc_index_helper(pool, 0, 1, 32, &index_copy, &clear) >= 0)
-		return index;
-	else
-		return -1;
-}
-
 static int
 ba_inuse_helper(struct bitalloc *pool,
 		int              offset,
@@ -365,107 +309,7 @@  ba_free(struct bitalloc *pool, int index)
 	return ba_free_helper(pool, 0, 1, 32, &index);
 }
 
-int
-ba_inuse_free(struct bitalloc *pool, int index)
-{
-	if (index < 0 || index >= (int)pool->size)
-		return -1;
-
-	return ba_free_helper(pool, 0, 1, 32, &index) + 1;
-}
-
-int
-ba_free_count(struct bitalloc *pool)
-{
-	return (int)pool->free_count;
-}
-
 int ba_inuse_count(struct bitalloc *pool)
 {
 	return (int)(pool->size) - (int)(pool->free_count);
 }
-
-static int
-ba_find_next_helper(struct bitalloc *pool,
-		    int              offset,
-		    int              words,
-		    unsigned int     size,
-		    int             *index,
-		    int              free)
-{
-	bitalloc_word_t *storage = &pool->storage[offset];
-	int       loc, r, bottom = 0;
-
-	if (pool->size > size)
-		r = ba_find_next_helper(pool,
-					offset + words + 1,
-					storage[words],
-					size * 32,
-					index,
-					free);
-	else
-		bottom = 1; /* Bottom of tree */
-
-	loc = (*index % 32);
-	*index = *index / 32;
-
-	if (bottom) {
-		int bit_index = *index * 32;
-
-		loc = ba_ffs(~storage[*index] & ((bitalloc_word_t)-1 << loc));
-		if (loc > 0) {
-			loc--;
-			r = (bit_index + loc);
-			if (r >= (int)pool->size)
-				r = -1;
-		} else {
-			/* Loop over array at bottom of tree */
-			r = -1;
-			bit_index += 32;
-			*index = *index + 1;
-			while ((int)pool->size > bit_index) {
-				loc = ba_ffs(~storage[*index]);
-
-				if (loc > 0) {
-					loc--;
-					r = (bit_index + loc);
-					if (r >= (int)pool->size)
-						r = -1;
-					break;
-				}
-				bit_index += 32;
-				*index = *index + 1;
-			}
-		}
-	}
-
-	if (r >= 0 && (free)) {
-		if (bottom)
-			pool->free_count++;
-		storage[*index] |= (1 << loc);
-	}
-
-	return r;
-}
-
-int
-ba_find_next_inuse(struct bitalloc *pool, int index)
-{
-	if (index < 0 ||
-	    index >= (int)pool->size ||
-	    pool->free_count == pool->size)
-		return -1;
-
-	return ba_find_next_helper(pool, 0, 1, 32, &index, 0);
-}
-
-int
-ba_find_next_inuse_free(struct bitalloc *pool, int index)
-{
-	if (index < 0 ||
-	    index >= (int)pool->size ||
-	    pool->free_count == pool->size)
-		return -1;
-
-	return ba_find_next_helper(pool, 0, 1, 32, &index, 1);
-}
diff --git a/drivers/net/bnxt/tf_core/bitalloc.h b/drivers/net/bnxt/tf_core/bitalloc.h
index 2825bb37e5..9ac6eadd81 100644
--- a/drivers/net/bnxt/tf_core/bitalloc.h
+++ b/drivers/net/bnxt/tf_core/bitalloc.h
@@ -70,7 +70,6 @@  int ba_init(struct bitalloc *pool, int size);
  * Returns -1 on failure, or index of allocated entry
  */
 int ba_alloc(struct bitalloc *pool);
-int ba_alloc_index(struct bitalloc *pool, int index);
 
 /**
  * Returns -1 on failure, or index of allocated entry
@@ -85,37 +84,12 @@  int ba_alloc_reverse(struct bitalloc *pool);
  */
 int ba_inuse(struct bitalloc *pool, int index);
 
-/**
- * Variant of ba_inuse that frees the index if it is allocated, same
- * return codes as ba_inuse
- */
-int ba_inuse_free(struct bitalloc *pool, int index);
-
-/**
- * Find next index that is in use, start checking at index 'idx'
- *
- * Returns next index that is in use on success, or
- * -1 if no in use index is found
- */
-int ba_find_next_inuse(struct bitalloc *pool, int idx);
-
-/**
- * Variant of ba_find_next_inuse that also frees the next in use index,
- * same return codes as ba_find_next_inuse
- */
-int ba_find_next_inuse_free(struct bitalloc *pool, int idx);
-
 /**
  * Multiple freeing of the same index has no negative side effects,
  * but will return -1.  returns -1 on failure, 0 on success.
  */
 int ba_free(struct bitalloc *pool, int index);
 
-/**
- * Returns the pool's free count
- */
-int ba_free_count(struct bitalloc *pool);
-
 /**
  * Returns the pool's in use count
  */
diff --git a/drivers/net/bnxt/tf_core/stack.c b/drivers/net/bnxt/tf_core/stack.c
index 954806377e..bda415e82e 100644
--- a/drivers/net/bnxt/tf_core/stack.c
+++ b/drivers/net/bnxt/tf_core/stack.c
@@ -88,28 +88,3 @@  stack_pop(struct stack *st, uint32_t *x)
 
 	return 0;
 }
-
-/* Dump the stack
- */
-void stack_dump(struct stack *st)
-{
-	int i, j;
-
-	printf("top=%d\n", st->top);
-	printf("max=%d\n", st->max);
-
-	if (st->top == -1) {
-		printf("stack is empty\n");
-		return;
-	}
-
-	for (i = 0; i < st->max + 7 / 8; i++) {
-		printf("item[%d] 0x%08x", i, st->items[i]);
-
-		for (j = 0; j < 7; j++) {
-			if (i++ < st->max - 1)
-				printf(" 0x%08x", st->items[i]);
-		}
-		printf("\n");
-	}
-}
diff --git a/drivers/net/bnxt/tf_core/stack.h b/drivers/net/bnxt/tf_core/stack.h
index 6732e03132..7e2f5dfec6 100644
--- a/drivers/net/bnxt/tf_core/stack.h
+++ b/drivers/net/bnxt/tf_core/stack.h
@@ -102,16 +102,4 @@  int stack_push(struct stack *st, uint32_t x);
  */
 int stack_pop(struct stack *st, uint32_t *x);
 
-/** Dump stack information
- *
- * Warning: Don't use for large stacks due to prints
- *
- * [in] st
- *   pointer to the stack
- *
- * return
- *    none
- */
-void stack_dump(struct stack *st);
-
 #endif /* _STACK_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c
index 0f49a00256..a4276d1bcc 100644
--- a/drivers/net/bnxt/tf_core/tf_core.c
+++ b/drivers/net/bnxt/tf_core/tf_core.c
@@ -90,69 +90,6 @@  tf_open_session(struct tf *tfp,
 	return 0;
 }
 
-int
-tf_attach_session(struct tf *tfp,
-		  struct tf_attach_session_parms *parms)
-{
-	int rc;
-	unsigned int domain, bus, slot, device;
-	struct tf_session_attach_session_parms aparms;
-
-	TF_CHECK_PARMS2(tfp, parms);
-
-	/* Verify control channel */
-	rc = sscanf(parms->ctrl_chan_name,
-		    "%x:%x:%x.%d",
-		    &domain,
-		    &bus,
-		    &slot,
-		    &device);
-	if (rc != 4) {
-		TFP_DRV_LOG(ERR,
-			    "Failed to scan device ctrl_chan_name\n");
-		return -EINVAL;
-	}
-
-	/* Verify 'attach' channel */
-	rc = sscanf(parms->attach_chan_name,
-		    "%x:%x:%x.%d",
-		    &domain,
-		    &bus,
-		    &slot,
-		    &device);
-	if (rc != 4) {
-		TFP_DRV_LOG(ERR,
-			    "Failed to scan device attach_chan_name\n");
-		return -EINVAL;
-	}
-
-	/* Prepare return value of session_id, using ctrl_chan_name
-	 * device values as it becomes the session id.
-	 */
-	parms->session_id.internal.domain = domain;
-	parms->session_id.internal.bus = bus;
-	parms->session_id.internal.device = device;
-	aparms.attach_cfg = parms;
-	rc = tf_session_attach_session(tfp,
-				       &aparms);
-	/* Logging handled by dev_bind */
-	if (rc)
-		return rc;
-
-	TFP_DRV_LOG(INFO,
-		    "Attached to session, session_id:%d\n",
-		    parms->session_id.id);
-
-	TFP_DRV_LOG(INFO,
-		    "domain:%d, bus:%d, device:%d, fw_session_id:%d\n",
-		    parms->session_id.internal.domain,
-		    parms->session_id.internal.bus,
-		    parms->session_id.internal.device,
-		    parms->session_id.internal.fw_session_id);
-
-	return rc;
-}
-
 int
 tf_close_session(struct tf *tfp)
 {
@@ -792,14 +729,6 @@  tf_set_tcam_entry(struct tf *tfp,
 	return 0;
 }
 
-int
-tf_get_tcam_entry(struct tf *tfp __rte_unused,
-		  struct tf_get_tcam_entry_parms *parms __rte_unused)
-{
-	TF_CHECK_PARMS2(tfp, parms);
-	return -EOPNOTSUPP;
-}
-
 int
 tf_free_tcam_entry(struct tf *tfp,
 		   struct tf_free_tcam_entry_parms *parms)
@@ -1228,80 +1157,6 @@  tf_get_tbl_entry(struct tf *tfp,
 	return rc;
 }
 
-int
-tf_bulk_get_tbl_entry(struct tf *tfp,
-		 struct tf_bulk_get_tbl_entry_parms *parms)
-{
-	int rc = 0;
-	struct tf_session *tfs;
-	struct tf_dev_info *dev;
-	struct tf_tbl_get_bulk_parms bparms;
-
-	TF_CHECK_PARMS2(tfp, parms);
-
-	/* Can't do static initialization due to UT enum check */
-	memset(&bparms, 0, sizeof(struct tf_tbl_get_bulk_parms));
-
-	/* Retrieve the session information */
-	rc = tf_session_get_session(tfp, &tfs);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Failed to lookup session, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	/* Retrieve the device information */
-	rc = tf_session_get_device(tfs, &dev);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Failed to lookup device, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	if (parms->type == TF_TBL_TYPE_EXT) {
-		/* Not supported, yet */
-		rc = -EOPNOTSUPP;
-		TFP_DRV_LOG(ERR,
-			    "%s, External table type not supported, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-
-		return rc;
-	}
-
-	/* Internal table type processing */
-
-	if (dev->ops->tf_dev_get_bulk_tbl == NULL) {
-		rc = -EOPNOTSUPP;
-		TFP_DRV_LOG(ERR,
-			    "%s: Operation not supported, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return -EOPNOTSUPP;
-	}
-
-	bparms.dir = parms->dir;
-	bparms.type = parms->type;
-	bparms.starting_idx = parms->starting_idx;
-	bparms.num_entries = parms->num_entries;
-	bparms.entry_sz_in_bytes = parms->entry_sz_in_bytes;
-	bparms.physical_mem_addr = parms->physical_mem_addr;
-	rc = dev->ops->tf_dev_get_bulk_tbl(tfp, &bparms);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Table get bulk failed, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	return rc;
-}
-
 int
 tf_alloc_tbl_scope(struct tf *tfp,
 		   struct tf_alloc_tbl_scope_parms *parms)
@@ -1340,44 +1195,6 @@  tf_alloc_tbl_scope(struct tf *tfp,
 
 	return rc;
 }
-int
-tf_map_tbl_scope(struct tf *tfp,
-		   struct tf_map_tbl_scope_parms *parms)
-{
-	struct tf_session *tfs;
-	struct tf_dev_info *dev;
-	int rc;
-
-	TF_CHECK_PARMS2(tfp, parms);
-
-	/* Retrieve the session information */
-	rc = tf_session_get_session(tfp, &tfs);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "Failed to lookup session, rc:%s\n",
-			    strerror(-rc));
-		return rc;
-	}
-
-	/* Retrieve the device information */
-	rc = tf_session_get_device(tfs, &dev);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "Failed to lookup device, rc:%s\n",
-			    strerror(-rc));
-		return rc;
-	}
-
-	if (dev->ops->tf_dev_map_tbl_scope != NULL) {
-		rc = dev->ops->tf_dev_map_tbl_scope(tfp, parms);
-	} else {
-		TFP_DRV_LOG(ERR,
-			    "Map table scope not supported by device\n");
-		return -EINVAL;
-	}
-
-	return rc;
-}
 
 int
 tf_free_tbl_scope(struct tf *tfp,
@@ -1475,61 +1292,3 @@  tf_set_if_tbl_entry(struct tf *tfp,
 
 	return 0;
 }
-
-int
-tf_get_if_tbl_entry(struct tf *tfp,
-		    struct tf_get_if_tbl_entry_parms *parms)
-{
-	int rc;
-	struct tf_session *tfs;
-	struct tf_dev_info *dev;
-	struct tf_if_tbl_get_parms gparms = { 0 };
-
-	TF_CHECK_PARMS2(tfp, parms);
-
-	/* Retrieve the session information */
-	rc = tf_session_get_session(tfp, &tfs);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Failed to lookup session, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	/* Retrieve the device information */
-	rc = tf_session_get_device(tfs, &dev);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Failed to lookup device, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	if (dev->ops->tf_dev_get_if_tbl == NULL) {
-		rc = -EOPNOTSUPP;
-		TFP_DRV_LOG(ERR,
-			    "%s: Operation not supported, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	gparms.dir = parms->dir;
-	gparms.type = parms->type;
-	gparms.idx = parms->idx;
-	gparms.data_sz_in_bytes = parms->data_sz_in_bytes;
-	gparms.data = parms->data;
-
-	rc = dev->ops->tf_dev_get_if_tbl(tfp, &gparms);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: If_tbl get failed, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	return 0;
-}
diff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h
index fa8ab52af1..2d556be752 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -657,27 +657,6 @@  struct tf_attach_session_parms {
 	union tf_session_id session_id;
 };
 
-/**
- * Experimental
- *
- * Allows a 2nd application instance to attach to an existing
- * session. Used when a session is to be shared between two processes.
- *
- * Attach will increment a ref count as to manage the shared session data.
- *
- * [in] tfp
- *   Pointer to TF handle
- *
- * [in] parms
- *   Pointer to attach parameters
- *
- * Returns
- *   - (0) if successful.
- *   - (-EINVAL) on failure.
- */
-int tf_attach_session(struct tf *tfp,
-		      struct tf_attach_session_parms *parms);
-
 /**
  * Closes an existing session client or the session it self. The
  * session client is default closed and if the session reference count
@@ -961,25 +940,6 @@  struct tf_map_tbl_scope_parms {
 int tf_alloc_tbl_scope(struct tf *tfp,
 		       struct tf_alloc_tbl_scope_parms *parms);
 
-/**
- * map a table scope (legacy device only Wh+/SR)
- *
- * Map a table scope to one or more partition interfaces (parifs).
- * The parif can be remapped in the L2 context lookup for legacy devices.  This
- * API allows a number of parifs to be mapped to the same table scope.  On
- * legacy devices a table scope identifies one of 16 sets of EEM table base
- * addresses and is associated with a PF communication channel.  The associated
- * PF must be configured for the table scope to operate.
- *
- * An L2 context TCAM lookup returns a remapped parif value used to
- * index into the set of 16 parif_to_pf registers which are used to map to one
- * of the 16 table scopes.  This API allows the user to map the parifs in the
- * mask to the previously allocated table scope (EEM table).
-
- * Returns success or failure code.
- */
-int tf_map_tbl_scope(struct tf *tfp,
-		      struct tf_map_tbl_scope_parms *parms);
 /**
  * free a table scope
  *
@@ -1256,18 +1216,6 @@  struct tf_get_tcam_entry_parms {
 	uint16_t result_sz_in_bits;
 };
 
-/**
- * get TCAM entry
- *
- * Program a TCAM table entry for a TruFlow session.
- *
- * If the entry has not been allocated, an error will be returned.
- *
- * Returns success or failure code.
- */
-int tf_get_tcam_entry(struct tf *tfp,
-		      struct tf_get_tcam_entry_parms *parms);
-
 /**
  * tf_free_tcam_entry parameter definition
  */
@@ -1638,22 +1586,6 @@  struct tf_bulk_get_tbl_entry_parms {
 	uint64_t physical_mem_addr;
 };
 
-/**
- * Bulk get index table entry
- *
- * Used to retrieve a set of index table entries.
- *
- * Entries within the range may not have been allocated using
- * tf_alloc_tbl_entry() at the time of access. But the range must
- * be within the bounds determined from tf_open_session() for the
- * given table type.  Currently, this is only used for collecting statistics.
- *
- * Returns success or failure code. Failure will be returned if the
- * provided data buffer is too small for the data type requested.
- */
-int tf_bulk_get_tbl_entry(struct tf *tfp,
-			  struct tf_bulk_get_tbl_entry_parms *parms);
-
 /**
  * @page exact_match Exact Match Table
  *
@@ -2066,17 +1998,4 @@  struct tf_get_if_tbl_entry_parms {
 	uint32_t idx;
 };
 
-/**
- * get interface table entry
- *
- * Used to retrieve an interface table entry.
- *
- * Reads the interface table entry value
- *
- * Returns success or failure code. Failure will be returned if the
- * provided data buffer is too small for the data type requested.
- */
-int tf_get_if_tbl_entry(struct tf *tfp,
-			struct tf_get_if_tbl_entry_parms *parms);
-
 #endif /* _TF_CORE_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_msg.c b/drivers/net/bnxt/tf_core/tf_msg.c
index 5615eedbbe..e4fe5fe055 100644
--- a/drivers/net/bnxt/tf_core/tf_msg.c
+++ b/drivers/net/bnxt/tf_core/tf_msg.c
@@ -148,14 +148,6 @@  tf_msg_session_open(struct tf *tfp,
 	return rc;
 }
 
-int
-tf_msg_session_attach(struct tf *tfp __rte_unused,
-		      char *ctrl_chan_name __rte_unused,
-		      uint8_t tf_fw_session_id __rte_unused)
-{
-	return -1;
-}
-
 int
 tf_msg_session_client_register(struct tf *tfp,
 			       char *ctrl_channel_name,
@@ -266,38 +258,6 @@  tf_msg_session_close(struct tf *tfp)
 	return rc;
 }
 
-int
-tf_msg_session_qcfg(struct tf *tfp)
-{
-	int rc;
-	struct hwrm_tf_session_qcfg_input req = { 0 };
-	struct hwrm_tf_session_qcfg_output resp = { 0 };
-	struct tfp_send_msg_parms parms = { 0 };
-	uint8_t fw_session_id;
-
-	rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "Unable to lookup FW id, rc:%s\n",
-			    strerror(-rc));
-		return rc;
-	}
-
-	/* Populate the request */
-	req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
-
-	parms.tf_type = HWRM_TF_SESSION_QCFG,
-	parms.req_data = (uint32_t *)&req;
-	parms.req_size = sizeof(req);
-	parms.resp_data = (uint32_t *)&resp;
-	parms.resp_size = sizeof(resp);
-	parms.mailbox = TF_KONG_MB;
-
-	rc = tfp_send_msg_direct(tfp,
-				 &parms);
-	return rc;
-}
-
 int
 tf_msg_session_resc_qcaps(struct tf *tfp,
 			  enum tf_dir dir,
diff --git a/drivers/net/bnxt/tf_core/tf_msg.h b/drivers/net/bnxt/tf_core/tf_msg.h
index 72bf850487..4483017ada 100644
--- a/drivers/net/bnxt/tf_core/tf_msg.h
+++ b/drivers/net/bnxt/tf_core/tf_msg.h
@@ -38,26 +38,6 @@  int tf_msg_session_open(struct tf *tfp,
 			uint8_t *fw_session_id,
 			uint8_t *fw_session_client_id);
 
-/**
- * Sends session close request to Firmware
- *
- * [in] session
- *   Pointer to session handle
- *
- * [in] ctrl_chan_name
- *   PCI name of the control channel
- *
- * [in] fw_session_id
- *   Pointer to the fw_session_id that is assigned to the session at
- *   time of session open
- *
- * Returns:
- *   0 on Success else internal Truflow error
- */
-int tf_msg_session_attach(struct tf *tfp,
-			  char *ctrl_channel_name,
-			  uint8_t tf_fw_session_id);
-
 /**
  * Sends session client register request to Firmware
  *
@@ -105,17 +85,6 @@  int tf_msg_session_client_unregister(struct tf *tfp,
  */
 int tf_msg_session_close(struct tf *tfp);
 
-/**
- * Sends session query config request to TF Firmware
- *
- * [in] session
- *   Pointer to session handle
- *
- * Returns:
- *   0 on Success else internal Truflow error
- */
-int tf_msg_session_qcfg(struct tf *tfp);
-
 /**
  * Sends session HW resource query capability request to TF Firmware
  *
diff --git a/drivers/net/bnxt/tf_core/tf_session.c b/drivers/net/bnxt/tf_core/tf_session.c
index c95c4bdbd3..912b2837f9 100644
--- a/drivers/net/bnxt/tf_core/tf_session.c
+++ b/drivers/net/bnxt/tf_core/tf_session.c
@@ -749,36 +749,3 @@  tf_session_get_fw_session_id(struct tf *tfp,
 
 	return 0;
 }
-
-int
-tf_session_get_session_id(struct tf *tfp,
-			  union tf_session_id *session_id)
-{
-	int rc;
-	struct tf_session *tfs = NULL;
-
-	if (tfp->session == NULL) {
-		rc = -EINVAL;
-		TFP_DRV_LOG(ERR,
-			    "Session not created, rc:%s\n",
-			    strerror(-rc));
-		return rc;
-	}
-
-	if (session_id == NULL) {
-		rc = -EINVAL;
-		TFP_DRV_LOG(ERR,
-			    "Invalid Argument(s), rc:%s\n",
-			    strerror(-rc));
-		return rc;
-	}
-
-	/* Using internal version as session client may not exist yet */
-	rc = tf_session_get_session_internal(tfp, &tfs);
-	if (rc)
-		return rc;
-
-	*session_id = tfs->session_id;
-
-	return 0;
-}
diff --git a/drivers/net/bnxt/tf_core/tf_session.h b/drivers/net/bnxt/tf_core/tf_session.h
index 6a5c894033..37d4703cc1 100644
--- a/drivers/net/bnxt/tf_core/tf_session.h
+++ b/drivers/net/bnxt/tf_core/tf_session.h
@@ -394,20 +394,4 @@  int tf_session_get_device(struct tf_session *tfs,
 int tf_session_get_fw_session_id(struct tf *tfp,
 				 uint8_t *fw_session_id);
 
-/**
- * Looks up the Session id the requested TF handle.
- *
- * [in] tfp
- *   Pointer to TF handle
- *
- * [out] session_id
- *   Pointer to the session_id
- *
- * Returns
- *   - (0) if successful.
- *   - (-EINVAL) on failure.
- */
-int tf_session_get_session_id(struct tf *tfp,
-			      union tf_session_id *session_id);
-
 #endif /* _TF_SESSION_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
index a4207eb3ab..2caf4f8747 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
@@ -637,59 +637,6 @@  tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms)
 	return 0;
 }
 
-int
-tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms)
-{
-	uint16_t idx;
-	struct tf_shadow_tbl_ctxt *ctxt;
-	struct tf_tbl_set_parms *sparms;
-	struct tf_shadow_tbl_db *shadow_db;
-	struct tf_shadow_tbl_shadow_result_entry *sr_entry;
-
-	if (!parms || !parms->sparms) {
-		TFP_DRV_LOG(ERR, "Null parms\n");
-		return -EINVAL;
-	}
-
-	sparms = parms->sparms;
-	if (!sparms->data || !sparms->data_sz_in_bytes) {
-		TFP_DRV_LOG(ERR, "%s:%s No result to set.\n",
-			    tf_dir_2_str(sparms->dir),
-			    tf_tbl_type_2_str(sparms->type));
-		return -EINVAL;
-	}
-
-	shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
-	ctxt = tf_shadow_tbl_ctxt_get(shadow_db, sparms->type);
-	if (!ctxt) {
-		/* We aren't tracking this table, so return success */
-		TFP_DRV_LOG(DEBUG, "%s Unable to get tbl mgr context\n",
-			    tf_tbl_type_2_str(sparms->type));
-		return 0;
-	}
-
-	idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, sparms->idx);
-	if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt)) {
-		TFP_DRV_LOG(ERR, "%s:%s Invalid idx(0x%x)\n",
-			    tf_dir_2_str(sparms->dir),
-			    tf_tbl_type_2_str(sparms->type),
-			    sparms->idx);
-		return -EINVAL;
-	}
-
-	/* Write the result table, the key/hash has been written already */
-	sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
-
-	/*
-	 * If the handle is not valid, the bind was never called.  We aren't
-	 * tracking this entry.
-	 */
-	if (!TF_SHADOW_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
-		return 0;
-
-	return 0;
-}
-
 int
 tf_shadow_tbl_free_db(struct tf_shadow_tbl_free_db_parms *parms)
 {
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.h b/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
index 96a34309b2..bbd8cfd3a9 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
@@ -225,20 +225,6 @@  int tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms);
  */
 int tf_shadow_tbl_bind_index(struct tf_shadow_tbl_bind_index_parms *parms);
 
-/**
- * Inserts an element into the Shadow table DB. Will fail if the
- * elements ref_count is different from 0. Ref_count after insert will
- * be incremented.
- *
- * [in] parms
- *   Pointer to insert parameters
- *
- * Returns
- *   - (0) if successful.
- *   - (-EINVAL) on failure.
- */
-int tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms);
-
 /**
  * Removes an element from the Shadow table DB. Will fail if the
  * elements ref_count is 0. Ref_count after removal will be
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.c b/drivers/net/bnxt/tf_core/tf_tcam.c
index 7679d09eea..e3fec46926 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_tcam.c
@@ -683,10 +683,3 @@  tf_tcam_set(struct tf *tfp __rte_unused,
 
 	return 0;
 }
-
-int
-tf_tcam_get(struct tf *tfp __rte_unused,
-	    struct tf_tcam_get_parms *parms __rte_unused)
-{
-	return 0;
-}
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.h b/drivers/net/bnxt/tf_core/tf_tcam.h
index 280f138dd3..9614cf52c7 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.h
+++ b/drivers/net/bnxt/tf_core/tf_tcam.h
@@ -355,21 +355,4 @@  int tf_tcam_alloc_search(struct tf *tfp,
 int tf_tcam_set(struct tf *tfp,
 		struct tf_tcam_set_parms *parms);
 
-/**
- * Retrieves the requested element by sending a firmware request to get
- * the element.
- *
- * [in] tfp
- *   Pointer to TF handle, used for HCAPI communication
- *
- * [in] parms
- *   Pointer to parameters
- *
- * Returns
- *   - (0) if successful.
- *   - (-EINVAL) on failure.
- */
-int tf_tcam_get(struct tf *tfp,
-		struct tf_tcam_get_parms *parms);
-
 #endif /* _TF_TCAM_H */
diff --git a/drivers/net/bnxt/tf_core/tfp.c b/drivers/net/bnxt/tf_core/tfp.c
index 0f6d63cc00..49ca034241 100644
--- a/drivers/net/bnxt/tf_core/tfp.c
+++ b/drivers/net/bnxt/tf_core/tfp.c
@@ -135,33 +135,6 @@  tfp_memcpy(void *dest, void *src, size_t n)
 	rte_memcpy(dest, src, n);
 }
 
-/**
- * Used to initialize portable spin lock
- */
-void
-tfp_spinlock_init(struct tfp_spinlock_parms *parms)
-{
-	rte_spinlock_init(&parms->slock);
-}
-
-/**
- * Used to lock portable spin lock
- */
-void
-tfp_spinlock_lock(struct tfp_spinlock_parms *parms)
-{
-	rte_spinlock_lock(&parms->slock);
-}
-
-/**
- * Used to unlock portable spin lock
- */
-void
-tfp_spinlock_unlock(struct tfp_spinlock_parms *parms)
-{
-	rte_spinlock_unlock(&parms->slock);
-}
-
 int
 tfp_get_fid(struct tf *tfp, uint16_t *fw_fid)
 {
diff --git a/drivers/net/bnxt/tf_core/tfp.h b/drivers/net/bnxt/tf_core/tfp.h
index 551b9c569f..fc2409371a 100644
--- a/drivers/net/bnxt/tf_core/tfp.h
+++ b/drivers/net/bnxt/tf_core/tfp.h
@@ -202,10 +202,6 @@  int tfp_calloc(struct tfp_calloc_parms *parms);
 void tfp_memcpy(void *dest, void *src, size_t n);
 void tfp_free(void *addr);
 
-void tfp_spinlock_init(struct tfp_spinlock_parms *slock);
-void tfp_spinlock_lock(struct tfp_spinlock_parms *slock);
-void tfp_spinlock_unlock(struct tfp_spinlock_parms *slock);
-
 /**
  * Lookup of the FID in the platform specific structure.
  *
diff --git a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
index 45025516f4..4a6105a05e 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
@@ -214,74 +214,6 @@  void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
 	rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, (void *)ctxt);
 }
 
-/*
- * DMA-in the raw counter data from the HW and accumulate in the
- * local accumulator table using the TF-Core API
- *
- * tfp [in] The TF-Core context
- *
- * fc_info [in] The ULP Flow counter info ptr
- *
- * dir [in] The direction of the flow
- *
- * num_counters [in] The number of counters
- *
- */
-__rte_unused static int32_t
-ulp_bulk_get_flow_stats(struct tf *tfp,
-			struct bnxt_ulp_fc_info *fc_info,
-			enum tf_dir dir,
-			struct bnxt_ulp_device_params *dparms)
-/* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
-{
-	int rc = 0;
-	struct tf_tbl_get_bulk_parms parms = { 0 };
-	enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD: Template? */
-	struct sw_acc_counter *sw_acc_tbl_entry = NULL;
-	uint64_t *stats = NULL;
-	uint16_t i = 0;
-
-	parms.dir = dir;
-	parms.type = stype;
-	parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
-	parms.num_entries = dparms->flow_count_db_entries / 2; /* direction */
-	/*
-	 * TODO:
-	 * Size of an entry needs to obtained from template
-	 */
-	parms.entry_sz_in_bytes = sizeof(uint64_t);
-	stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
-	parms.physical_mem_addr = (uintptr_t)fc_info->shadow_hw_tbl[dir].mem_pa;
-
-	if (!stats) {
-		PMD_DRV_LOG(ERR,
-			    "BULK: Memory not initialized id:0x%x dir:%d\n",
-			    parms.starting_idx, dir);
-		return -EINVAL;
-	}
-
-	rc = tf_tbl_bulk_get(tfp, &parms);
-	if (rc) {
-		PMD_DRV_LOG(ERR,
-			    "BULK: Get failed for id:0x%x rc:%d\n",
-			    parms.starting_idx, rc);
-		return rc;
-	}
-
-	for (i = 0; i < parms.num_entries; i++) {
-		/* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
-		sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
-		if (!sw_acc_tbl_entry->valid)
-			continue;
-		sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i],
-							      dparms);
-		sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i],
-								dparms);
-	}
-
-	return rc;
-}
-
 static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
 				    struct tf *tfp,
 				    struct bnxt_ulp_fc_info *fc_info,
@@ -387,16 +319,6 @@  ulp_fc_mgr_alarm_cb(void *arg)
 		ulp_fc_mgr_thread_cancel(ctxt);
 		return;
 	}
-	/*
-	 * Commented for now till GET_BULK is resolved, just get the first flow
-	 * stat for now
-	 for (i = 0; i < TF_DIR_MAX; i++) {
-		rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
-					     dparms->flow_count_db_entries);
-		if (rc)
-			break;
-	}
-	*/
 
 	/* reset the parent accumulation counters before accumulation if any */
 	ulp_flow_db_parent_flow_count_reset(ctxt);
diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.c b/drivers/net/bnxt/tf_ulp/ulp_port_db.c
index 4b4eaeb126..2d1dbb7e6e 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_port_db.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.c
@@ -226,37 +226,6 @@  ulp_port_db_dev_port_to_ulp_index(struct bnxt_ulp_context *ulp_ctxt,
 	return 0;
 }
 
-/*
- * Api to get the function id for a given ulp ifindex.
- *
- * ulp_ctxt [in] Ptr to ulp context
- * ifindex [in] ulp ifindex
- * func_id [out] the function id of the given ifindex.
- *
- * Returns 0 on success or negative number on failure.
- */
-int32_t
-ulp_port_db_function_id_get(struct bnxt_ulp_context *ulp_ctxt,
-			    uint32_t ifindex,
-			    uint32_t fid_type,
-			    uint16_t *func_id)
-{
-	struct bnxt_ulp_port_db *port_db;
-
-	port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt);
-	if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) {
-		BNXT_TF_DBG(ERR, "Invalid Arguments\n");
-		return -EINVAL;
-	}
-
-	if (fid_type == BNXT_ULP_DRV_FUNC_FID)
-		*func_id =  port_db->ulp_intf_list[ifindex].drv_func_id;
-	else
-		*func_id =  port_db->ulp_intf_list[ifindex].vf_func_id;
-
-	return 0;
-}
-
 /*
  * Api to get the svif for a given ulp ifindex.
  *
diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.h b/drivers/net/bnxt/tf_ulp/ulp_port_db.h
index 7b85987a0c..bd7032004f 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_port_db.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.h
@@ -122,20 +122,6 @@  int32_t
 ulp_port_db_dev_port_to_ulp_index(struct bnxt_ulp_context *ulp_ctxt,
 				  uint32_t port_id, uint32_t *ifindex);
 
-/*
- * Api to get the function id for a given ulp ifindex.
- *
- * ulp_ctxt [in] Ptr to ulp context
- * ifindex [in] ulp ifindex
- * func_id [out] the function id of the given ifindex.
- *
- * Returns 0 on success or negative number on failure.
- */
-int32_t
-ulp_port_db_function_id_get(struct bnxt_ulp_context *ulp_ctxt,
-			    uint32_t ifindex, uint32_t fid_type,
-			    uint16_t *func_id);
-
 /*
  * Api to get the svif for a given ulp ifindex.
  *
diff --git a/drivers/net/bnxt/tf_ulp/ulp_utils.c b/drivers/net/bnxt/tf_ulp/ulp_utils.c
index a13a3bbf65..b5a4f85fcf 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_utils.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_utils.c
@@ -803,17 +803,6 @@  int32_t ulp_buffer_is_empty(const uint8_t *buf, uint32_t size)
 	return buf[0] == 0 && !memcmp(buf, buf + 1, size - 1);
 }
 
-/* Function to check if bitmap is zero.Return 1 on success */
-uint32_t ulp_bitmap_is_zero(uint8_t *bitmap, int32_t size)
-{
-	while (size-- > 0) {
-		if (*bitmap != 0)
-			return 0;
-		bitmap++;
-	}
-	return 1;
-}
-
 /* Function to check if bitmap is ones. Return 1 on success */
 uint32_t ulp_bitmap_is_ones(uint8_t *bitmap, int32_t size)
 {
diff --git a/drivers/net/bnxt/tf_ulp/ulp_utils.h b/drivers/net/bnxt/tf_ulp/ulp_utils.h
index 749ac06d87..a45a2705da 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_utils.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_utils.h
@@ -384,9 +384,6 @@  ulp_encap_buffer_copy(uint8_t *dst,
  */
 int32_t ulp_buffer_is_empty(const uint8_t *buf, uint32_t size);
 
-/* Function to check if bitmap is zero.Return 1 on success */
-uint32_t ulp_bitmap_is_zero(uint8_t *bitmap, int32_t size);
-
 /* Function to check if bitmap is ones. Return 1 on success */
 uint32_t ulp_bitmap_is_ones(uint8_t *bitmap, int32_t size);
 
diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h
index 8f198bd50e..e5645a10ab 100644
--- a/drivers/net/bonding/eth_bond_private.h
+++ b/drivers/net/bonding/eth_bond_private.h
@@ -224,10 +224,6 @@  int
 mac_address_set(struct rte_eth_dev *eth_dev,
 		struct rte_ether_addr *new_mac_addr);
 
-int
-mac_address_get(struct rte_eth_dev *eth_dev,
-		struct rte_ether_addr *dst_mac_addr);
-
 int
 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev);
 
diff --git a/drivers/net/bonding/rte_eth_bond.h b/drivers/net/bonding/rte_eth_bond.h
index 874aa91a5f..23a4393f23 100644
--- a/drivers/net/bonding/rte_eth_bond.h
+++ b/drivers/net/bonding/rte_eth_bond.h
@@ -278,19 +278,6 @@  rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id);
 int
 rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms);
 
-/**
- * Get the current link monitoring frequency (in ms) for monitoring of the link
- * status of slave devices
- *
- * @param bonded_port_id	Port ID of bonded device.
- *
- * @return
- *	Monitoring interval on success, negative value otherwise.
- */
-int
-rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id);
-
-
 /**
  * Set the period in milliseconds for delaying the disabling of a bonded link
  * when the link down status has been detected
@@ -305,18 +292,6 @@  int
 rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
 				       uint32_t delay_ms);
 
-/**
- * Get the period in milliseconds set for delaying the disabling of a bonded
- * link when the link down status has been detected
- *
- * @param bonded_port_id	Port ID of bonded device.
- *
- * @return
- *  Delay period on success, negative value otherwise.
- */
-int
-rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id);
-
 /**
  * Set the period in milliseconds for delaying the enabling of a bonded link
  * when the link up status has been detected
@@ -331,19 +306,6 @@  int
 rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id,
 				    uint32_t delay_ms);
 
-/**
- * Get the period in milliseconds set for delaying the enabling of a bonded
- * link when the link up status has been detected
- *
- * @param bonded_port_id	Port ID of bonded device.
- *
- * @return
- *  Delay period on success, negative value otherwise.
- */
-int
-rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id);
-
-
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 55c8e3167c..1c09d2e4ba 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -981,19 +981,6 @@  rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms)
 	return 0;
 }
 
-int
-rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id)
-{
-	struct bond_dev_private *internals;
-
-	if (valid_bonded_port_id(bonded_port_id) != 0)
-		return -1;
-
-	internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
-	return internals->link_status_polling_interval_ms;
-}
-
 int
 rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
 				       uint32_t delay_ms)
@@ -1010,19 +997,6 @@  rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
 	return 0;
 }
 
-int
-rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id)
-{
-	struct bond_dev_private *internals;
-
-	if (valid_bonded_port_id(bonded_port_id) != 0)
-		return -1;
-
-	internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
-	return internals->link_down_delay_ms;
-}
-
 int
 rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, uint32_t delay_ms)
 
@@ -1037,16 +1011,3 @@  rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, uint32_t delay_ms)
 
 	return 0;
 }
-
-int
-rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id)
-{
-	struct bond_dev_private *internals;
-
-	if (valid_bonded_port_id(bonded_port_id) != 0)
-		return -1;
-
-	internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
-	return internals->link_up_delay_ms;
-}
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 057b1ada54..d9a0154de1 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1396,28 +1396,6 @@  link_properties_valid(struct rte_eth_dev *ethdev,
 	return 0;
 }
 
-int
-mac_address_get(struct rte_eth_dev *eth_dev,
-		struct rte_ether_addr *dst_mac_addr)
-{
-	struct rte_ether_addr *mac_addr;
-
-	if (eth_dev == NULL) {
-		RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
-		return -1;
-	}
-
-	if (dst_mac_addr == NULL) {
-		RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
-		return -1;
-	}
-
-	mac_addr = eth_dev->data->mac_addrs;
-
-	rte_ether_addr_copy(mac_addr, dst_mac_addr);
-	return 0;
-}
-
 int
 mac_address_set(struct rte_eth_dev *eth_dev,
 		struct rte_ether_addr *new_mac_addr)
diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h
index 8fe8e2a36b..6e360bc42d 100644
--- a/drivers/net/cxgbe/base/common.h
+++ b/drivers/net/cxgbe/base/common.h
@@ -363,8 +363,6 @@  int t4vf_get_vfres(struct adapter *adap);
 int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size,
 				unsigned int cache_line_size,
 				enum chip_type chip_compat);
-int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
-			 unsigned int cache_line_size);
 int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		    unsigned int vf, unsigned int nparams, const u32 *params,
@@ -485,9 +483,6 @@  static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
 		      unsigned int start_idx);
-void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
-		       unsigned int data_reg, const u32 *vals,
-		       unsigned int nregs, unsigned int start_idx);
 
 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
 int t4_get_pfres(struct adapter *adapter);
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 9217956b42..d5b916ccf5 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -189,28 +189,6 @@  void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
 	}
 }
 
-/**
- * t4_write_indirect - write indirectly addressed registers
- * @adap: the adapter
- * @addr_reg: register holding the indirect addresses
- * @data_reg: register holding the value for the indirect registers
- * @vals: values to write
- * @nregs: how many indirect registers to write
- * @start_idx: address of first indirect register to write
- *
- * Writes a sequential block of registers that are accessed indirectly
- * through an address/data register pair.
- */
-void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
-		       unsigned int data_reg, const u32 *vals,
-		       unsigned int nregs, unsigned int start_idx)
-{
-	while (nregs--) {
-		t4_write_reg(adap, addr_reg, start_idx++);
-		t4_write_reg(adap, data_reg, *vals++);
-	}
-}
-
 /**
  * t4_report_fw_error - report firmware error
  * @adap: the adapter
@@ -3860,25 +3838,6 @@  int t4_fixup_host_params_compat(struct adapter *adap,
 	return 0;
 }
 
-/**
- * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
- * @adap: the adapter
- * @page_size: the host's Base Page Size
- * @cache_line_size: the host's Cache Line Size
- *
- * Various registers in T4 contain values which are dependent on the
- * host's Base Page and Cache Line Sizes.  This function will fix all of
- * those registers with the appropriate values as passed in ...
- *
- * This routine makes changes which are compatible with T4 chips.
- */
-int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
-			 unsigned int cache_line_size)
-{
-	return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
-					   T4_LAST_REV);
-}
-
 /**
  * t4_fw_initialize - ask FW to initialize the device
  * @adap: the adapter
diff --git a/drivers/net/dpaa/fmlib/fm_vsp.c b/drivers/net/dpaa/fmlib/fm_vsp.c
index 78efd93f22..0e261e3d1a 100644
--- a/drivers/net/dpaa/fmlib/fm_vsp.c
+++ b/drivers/net/dpaa/fmlib/fm_vsp.c
@@ -19,25 +19,6 @@ 
 #include "fm_vsp_ext.h"
 #include <dpaa_ethdev.h>
 
-uint32_t
-fm_port_vsp_alloc(t_handle h_fm_port,
-		  t_fm_port_vspalloc_params *p_params)
-{
-	t_device *p_dev = (t_device *)h_fm_port;
-	ioc_fm_port_vsp_alloc_params_t params;
-
-	_fml_dbg("Calling...\n");
-	memset(&params, 0, sizeof(ioc_fm_port_vsp_alloc_params_t));
-	memcpy(&params.params, p_params, sizeof(t_fm_port_vspalloc_params));
-
-	if (ioctl(p_dev->fd, FM_PORT_IOC_VSP_ALLOC, &params))
-		RETURN_ERROR(MINOR, E_INVALID_OPERATION, NO_MSG);
-
-	_fml_dbg("Called.\n");
-
-	return E_OK;
-}
-
 t_handle
 fm_vsp_config(t_fm_vsp_params *p_fm_vsp_params)
 {
diff --git a/drivers/net/dpaa/fmlib/fm_vsp_ext.h b/drivers/net/dpaa/fmlib/fm_vsp_ext.h
index b51c46162d..97590ea4c0 100644
--- a/drivers/net/dpaa/fmlib/fm_vsp_ext.h
+++ b/drivers/net/dpaa/fmlib/fm_vsp_ext.h
@@ -99,9 +99,6 @@  typedef struct ioc_fm_buffer_prefix_content_params_t {
 	ioc_fm_buffer_prefix_content_t fm_buffer_prefix_content;
 } ioc_fm_buffer_prefix_content_params_t;
 
-uint32_t fm_port_vsp_alloc(t_handle h_fm_port,
-			  t_fm_port_vspalloc_params *p_params);
-
 t_handle fm_vsp_config(t_fm_vsp_params *p_fm_vsp_params);
 
 uint32_t fm_vsp_init(t_handle h_fm_vsp);
diff --git a/drivers/net/dpaa2/mc/dpdmux.c b/drivers/net/dpaa2/mc/dpdmux.c
index 63f1ec7d30..dce9c55a9a 100644
--- a/drivers/net/dpaa2/mc/dpdmux.c
+++ b/drivers/net/dpaa2/mc/dpdmux.c
@@ -57,227 +57,6 @@  int dpdmux_open(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dpdmux_close() - Close the control session of the object
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:		Token of DPDMUX object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_close(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_create() - Create the DPDMUX object
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id: returned object id
- *
- * Create the DPDMUX object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_create(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  const struct dpdmux_cfg	*cfg,
-		  uint32_t *obj_id)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_create *cmd_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpdmux_cmd_create *)cmd.params;
-	cmd_params->method = cfg->method;
-	cmd_params->manip = cfg->manip;
-	cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
-	cmd_params->adv_max_dmat_entries =
-			cpu_to_le16(cfg->adv.max_dmat_entries);
-	cmd_params->adv_max_mc_groups = cpu_to_le16(cfg->adv.max_mc_groups);
-	cmd_params->adv_max_vlan_ids = cpu_to_le16(cfg->adv.max_vlan_ids);
-	cmd_params->options = cpu_to_le64(cfg->adv.options);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id:	The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpdmux_destroy(struct fsl_mc_io *mc_io,
-		   uint16_t dprc_token,
-		   uint32_t cmd_flags,
-		   uint32_t object_id)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_destroy *cmd_params;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpdmux_cmd_destroy *)cmd.params;
-	cmd_params->dpdmux_id = cpu_to_le32(object_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_enable() - Enable DPDMUX functionality
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_enable(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_disable() - Disable DPDMUX functionality
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_disable(struct fsl_mc_io *mc_io,
-		   uint32_t cmd_flags,
-		   uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_is_enabled() - Check if the DPDMUX is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      int *en)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_rsp_is_enabled *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpdmux_rsp_is_enabled *)cmd.params;
-	*en = dpdmux_get_field(rsp_params->en, ENABLE);
-
-	return 0;
-}
-
-/**
- * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_reset(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpdmux_get_attributes() - Retrieve DPDMUX attributes
  * @mc_io:	Pointer to MC portal's I/O object
@@ -318,407 +97,6 @@  int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dpdmux_if_enable() - Enable Interface
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- * @if_id:	Interface Identifier
- *
- * Return:	Completion status. '0' on Success; Error code otherwise.
- */
-int dpdmux_if_enable(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     uint16_t if_id)
-{
-	struct dpdmux_cmd_if *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_if_disable() - Disable Interface
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- * @if_id:	Interface Identifier
- *
- * Return:	Completion status. '0' on Success; Error code otherwise.
- */
-int dpdmux_if_disable(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      uint16_t if_id)
-{
-	struct dpdmux_cmd_if *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_set_max_frame_length() - Set the maximum frame length in DPDMUX
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:		Token of DPDMUX object
- * @max_frame_length:	The required maximum frame length
- *
- * Update the maximum frame length on all DMUX interfaces.
- * In case of VEPA, the maximum frame length on all dmux interfaces
- * will be updated with the minimum value of the mfls of the connected
- * dpnis and the actual value of dmux mfl.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
-				uint32_t cmd_flags,
-				uint16_t token,
-				uint16_t max_frame_length)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_set_max_frame_length *cmd_params;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_MAX_FRAME_LENGTH,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_set_max_frame_length *)cmd.params;
-	cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_ul_reset_counters() - Function resets the uplink counter
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_if_set_accepted_frames() - Set the accepted frame types
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- * @if_id:	Interface ID (0 for uplink, or 1-num_ifs);
- * @cfg:	Frame types configuration
- *
- * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or
- * priority-tagged frames are discarded.
- * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or
- * priority-tagged frames are accepted.
- * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged,
- * untagged and priority-tagged frame are accepted;
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-				  uint32_t cmd_flags,
-				  uint16_t token,
-				  uint16_t if_id,
-				  const struct dpdmux_accepted_frames *cfg)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_if_set_accepted_frames *cmd_params;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if_set_accepted_frames *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-	dpdmux_set_field(cmd_params->frames_options,
-			 ACCEPTED_FRAMES_TYPE,
-			 cfg->type);
-	dpdmux_set_field(cmd_params->frames_options,
-			 UNACCEPTED_FRAMES_ACTION,
-			 cfg->unaccept_act);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- * @if_id:	Interface ID (0 for uplink, or 1-num_ifs);
- * @attr:	Interface attributes
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     uint16_t if_id,
-			     struct dpdmux_if_attr *attr)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_if *cmd_params;
-	struct dpdmux_rsp_if_get_attr *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpdmux_rsp_if_get_attr *)cmd.params;
-	attr->rate = le32_to_cpu(rsp_params->rate);
-	attr->enabled = dpdmux_get_field(rsp_params->enabled, ENABLE);
-	attr->is_default = dpdmux_get_field(rsp_params->enabled, IS_DEFAULT);
-	attr->accept_frame_type = dpdmux_get_field(
-				  rsp_params->accepted_frames_type,
-				  ACCEPTED_FRAMES_TYPE);
-
-	return 0;
-}
-
-/**
- * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- * @if_id:	Destination interface ID
- * @rule:	L2 rule
- *
- * Function removes a L2 rule from DPDMUX table
- * or adds an interface to an existing multicast address
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     uint16_t if_id,
-			     const struct dpdmux_l2_rule *rule)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_if_l2_rule *cmd_params;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-	cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
-	cmd_params->mac_addr5 = rule->mac_addr[5];
-	cmd_params->mac_addr4 = rule->mac_addr[4];
-	cmd_params->mac_addr3 = rule->mac_addr[3];
-	cmd_params->mac_addr2 = rule->mac_addr[2];
-	cmd_params->mac_addr1 = rule->mac_addr[1];
-	cmd_params->mac_addr0 = rule->mac_addr[0];
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- * @if_id:	Destination interface ID
- * @rule:	L2 rule
- *
- * Function adds a L2 rule into DPDMUX table
- * or adds an interface to an existing multicast address
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t token,
-			  uint16_t if_id,
-			  const struct dpdmux_l2_rule *rule)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_if_l2_rule *cmd_params;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-	cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
-	cmd_params->mac_addr5 = rule->mac_addr[5];
-	cmd_params->mac_addr4 = rule->mac_addr[4];
-	cmd_params->mac_addr3 = rule->mac_addr[3];
-	cmd_params->mac_addr2 = rule->mac_addr[2];
-	cmd_params->mac_addr1 = rule->mac_addr[1];
-	cmd_params->mac_addr0 = rule->mac_addr[0];
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_if_get_counter() - Functions obtains specific counter of an interface
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPDMUX object
- * @if_id:  Interface Id
- * @counter_type: counter type
- * @counter: Returned specific counter information
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t token,
-			  uint16_t if_id,
-			  enum dpdmux_counter_type counter_type,
-			  uint64_t *counter)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_if_get_counter *cmd_params;
-	struct dpdmux_rsp_if_get_counter *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if_get_counter *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-	cmd_params->counter_type = counter_type;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpdmux_rsp_if_get_counter *)cmd.params;
-	*counter = le64_to_cpu(rsp_params->counter);
-
-	return 0;
-}
-
-/**
- * dpdmux_if_set_link_cfg() - set the link configuration.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPSW object
- * @if_id: interface id
- * @cfg: Link configuration
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   uint16_t if_id,
-			   struct dpdmux_link_cfg *cfg)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_if_set_link_cfg *cmd_params;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if_set_link_cfg *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-	cmd_params->rate = cpu_to_le32(cfg->rate);
-	cmd_params->options = cpu_to_le64(cfg->options);
-	cmd_params->advertising = cpu_to_le64(cfg->advertising);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_if_get_link_state - Return the link state
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPSW object
- * @if_id: interface id
- * @state: link state
- *
- * @returns	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     uint16_t if_id,
-			     struct dpdmux_link_state *state)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_if_get_link_state *cmd_params;
-	struct dpdmux_rsp_if_get_link_state *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if_get_link_state *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpdmux_rsp_if_get_link_state *)cmd.params;
-	state->rate = le32_to_cpu(rsp_params->rate);
-	state->options = le64_to_cpu(rsp_params->options);
-	state->up = dpdmux_get_field(rsp_params->up, UP);
-	state->state_valid = dpdmux_get_field(rsp_params->up, STATE_VALID);
-	state->supported = le64_to_cpu(rsp_params->supported);
-	state->advertising = le64_to_cpu(rsp_params->advertising);
-
-	return 0;
-}
-
 /**
  * dpdmux_if_set_default - Set default interface
  * @mc_io:	Pointer to MC portal's I/O object
@@ -747,41 +125,6 @@  int dpdmux_if_set_default(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpdmux_if_get_default - Get default interface
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPSW object
- * @if_id: interface id
- *
- * @returns	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_get_default(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token,
-		uint16_t *if_id)
-{
-	struct dpdmux_cmd_if *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_DEFAULT,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpdmux_cmd_if *)cmd.params;
-	*if_id = le16_to_cpu(rsp_params->if_id);
-
-	return 0;
-}
-
 /**
  * dpdmux_set_custom_key - Set a custom classification key.
  *
@@ -859,71 +202,3 @@  int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
 	/* send command to mc*/
 	return mc_send_command(mc_io, &cmd);
 }
-
-/**
- * dpdmux_remove_custom_cls_entry - Removes a custom classification entry.
- *
- * This API is only available for DPDMUX instances created with
- * DPDMUX_METHOD_CUSTOM.  The API can be used to remove classification
- * entries previously inserted using dpdmux_add_custom_cls_entry.
- *
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPSW object
- * @rule: Classification rule to remove
- *
- * @returns	'0' on Success; Error code otherwise.
- */
-int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token,
-		struct dpdmux_rule_cfg *rule)
-{
-	struct dpdmux_cmd_remove_custom_cls_entry *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_remove_custom_cls_entry *)cmd.params;
-	cmd_params->key_size = rule->key_size;
-	cmd_params->key_iova = cpu_to_le64(rule->key_iova);
-	cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_get_api_version() - Get Data Path Demux API version
- * @mc_io:  Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of data path demux API
- * @minor_ver:	Minor version of data path demux API
- *
- * Return:  '0' on Success; Error code otherwise.
- */
-int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t *major_ver,
-			   uint16_t *minor_ver)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_rsp_get_api_version *rsp_params;
-	int err;
-
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_API_VERSION,
-					cmd_flags,
-					0);
-
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params = (struct dpdmux_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index 683d7bcc17..ad4df05dfc 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -80,99 +80,6 @@  int dpni_close(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_create() - Create the DPNI object
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id
- *
- * Create the DPNI object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpni_cfg *cfg,
-		uint32_t *obj_id)
-{
-	struct dpni_cmd_create *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpni_cmd_create *)cmd.params;
-	cmd_params->options = cpu_to_le32(cfg->options);
-	cmd_params->num_queues = cfg->num_queues;
-	cmd_params->num_tcs = cfg->num_tcs;
-	cmd_params->mac_filter_entries = cfg->mac_filter_entries;
-	cmd_params->num_rx_tcs = cfg->num_rx_tcs;
-	cmd_params->vlan_filter_entries =  cfg->vlan_filter_entries;
-	cmd_params->qos_entries = cfg->qos_entries;
-	cmd_params->fs_entries = cpu_to_le16(cfg->fs_entries);
-	cmd_params->num_cgs = cfg->num_cgs;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpni_destroy() - Destroy the DPNI object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id:	The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpni_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t object_id)
-{
-	struct dpni_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY,
-					  cmd_flags,
-					  dprc_token);
-	/* set object id to destroy */
-	cmd_params = (struct dpni_cmd_destroy *)cmd.params;
-	cmd_params->dpsw_id = cpu_to_le32(object_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpni_set_pools() - Set buffer pools configuration
  * @mc_io:	Pointer to MC portal's I/O object
@@ -356,47 +263,6 @@  int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_irq_enable() - Get overall interrupt state
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @irq_index:	The interrupt index to configure
- * @en:		Returned interrupt state - enable = 1, disable = 0
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
-			uint32_t cmd_flags,
-			uint16_t token,
-			uint8_t irq_index,
-			uint8_t *en)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_cmd_get_irq_enable *cmd_params;
-	struct dpni_rsp_get_irq_enable *rsp_params;
-
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
-	cmd_params->irq_index = irq_index;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
-	*en = dpni_get_field(rsp_params->enabled, ENABLE);
-
-	return 0;
-}
-
 /**
  * dpni_set_irq_mask() - Set interrupt mask.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -434,49 +300,6 @@  int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_irq_mask() - Get interrupt mask.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @irq_index:	The interrupt index to configure
- * @mask:	Returned event mask to trigger interrupt
- *
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      uint8_t irq_index,
-		      uint32_t *mask)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_cmd_get_irq_mask *cmd_params;
-	struct dpni_rsp_get_irq_mask *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
-	cmd_params->irq_index = irq_index;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
-	*mask = le32_to_cpu(rsp_params->mask);
-
-	return 0;
-}
-
 /**
  * dpni_get_irq_status() - Get the current status of any pending interrupts.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -633,57 +456,6 @@  int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @qtype:	Type of queue to retrieve configuration for
- * @layout:	Returns buffer layout attributes
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   enum dpni_queue_type qtype,
-			   struct dpni_buffer_layout *layout)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_cmd_get_buffer_layout *cmd_params;
-	struct dpni_rsp_get_buffer_layout *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
-	cmd_params->qtype = qtype;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
-	layout->pass_timestamp =
-				(int)dpni_get_field(rsp_params->flags, PASS_TS);
-	layout->pass_parser_result =
-				(int)dpni_get_field(rsp_params->flags, PASS_PR);
-	layout->pass_frame_status =
-				(int)dpni_get_field(rsp_params->flags, PASS_FS);
-	layout->pass_sw_opaque =
-			(int)dpni_get_field(rsp_params->flags, PASS_SWO);
-	layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
-	layout->data_align = le16_to_cpu(rsp_params->data_align);
-	layout->data_head_room = le16_to_cpu(rsp_params->head_room);
-	layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
-
-	return 0;
-}
-
 /**
  * dpni_set_buffer_layout() - Set buffer layout configuration.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -758,50 +530,6 @@  int dpni_set_offload(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_offload() - Get DPNI offload configuration.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @type:	Type of DPNI offload
- * @config:	Offload configuration.
- *			For checksum offloads, a value of 1 indicates that the
- *			offload is enabled.
- *
- * Return:	'0' on Success; Error code otherwise.
- *
- * @warning	Allowed only when DPNI is disabled
- */
-int dpni_get_offload(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     enum dpni_offload type,
-		     uint32_t *config)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_cmd_get_offload *cmd_params;
-	struct dpni_rsp_get_offload *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
-	cmd_params->dpni_offload = type;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
-	*config = le32_to_cpu(rsp_params->config);
-
-	return 0;
-}
-
 /**
  * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
  *			for enqueue operations
@@ -844,41 +572,6 @@  int dpni_get_qdid(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @data_offset: Tx data offset (from start of buffer)
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
-			    uint32_t cmd_flags,
-			    uint16_t token,
-			    uint16_t *data_offset)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_rsp_get_tx_data_offset *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
-	*data_offset = le16_to_cpu(rsp_params->data_offset);
-
-	return 0;
-}
-
 /**
  * dpni_set_link_cfg() - set the link configuration.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -978,42 +671,6 @@  int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_max_frame_length() - Get the maximum received frame length.
- * @mc_io:		Pointer to MC portal's I/O object
- * @cmd_flags:		Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:		Token of DPNI object
- * @max_frame_length:	Maximum received frame length (in bytes);
- *			frame is discarded if its length exceeds this value
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
-			      uint32_t cmd_flags,
-			      uint16_t token,
-			      uint16_t *max_frame_length)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_rsp_get_max_frame_length *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
-	*max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
-
-	return 0;
-}
-
 /**
  * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
  * @mc_io:	Pointer to MC portal's I/O object
@@ -1042,41 +699,6 @@  int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_multicast_promisc() - Get multicast promiscuous mode
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @en:		Returns '1' if enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
-			       uint32_t cmd_flags,
-			       uint16_t token,
-			       int *en)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_rsp_get_multicast_promisc *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
-	*en = dpni_get_field(rsp_params->enabled, ENABLE);
-
-	return 0;
-}
-
 /**
  * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
  * @mc_io:	Pointer to MC portal's I/O object
@@ -1096,48 +718,13 @@  int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
-	dpni_set_field(cmd_params->enable, ENABLE, en);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpni_get_unicast_promisc() - Get unicast promiscuous mode
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @en:		Returns '1' if enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     int *en)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_rsp_get_unicast_promisc *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
-	*en = dpni_get_field(rsp_params->enabled, ENABLE);
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
+	dpni_set_field(cmd_params->enable, ENABLE, en);
 
-	return 0;
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
 }
 
 /**
@@ -1281,39 +868,6 @@  int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @unicast:	Set to '1' to clear unicast addresses
- * @multicast:	Set to '1' to clear multicast addresses
- *
- * The primary MAC address is not cleared by this operation.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   int unicast,
-			   int multicast)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_cmd_clear_mac_filters *cmd_params;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
-	dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
-	dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
  *			port the DPNI is attached to
@@ -1453,29 +1007,6 @@  int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_clear_vlan_filters() - Clear all VLAN filters
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
-			    uint32_t cmd_flags,
-			    uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
  * @mc_io:	Pointer to MC portal's I/O object
@@ -1675,32 +1206,6 @@  int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_clear_qos_table() - Clear all QoS mapping entries
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- *
- * Following this function call, all frames are directed to
- * the default traffic class (0)
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
  *			(to select a flow ID)
@@ -1779,35 +1284,6 @@  int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific
- *			traffic class
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @tc_id:	Traffic class selection (0-7)
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_clear_fs_entries(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t token,
-			  uint8_t tc_id)
-{
-	struct dpni_cmd_clear_fs_entries *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_clear_fs_entries *)cmd.params;
-	cmd_params->tc_id = tc_id;
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpni_set_congestion_notification() - Set traffic class congestion
  *	notification configuration
@@ -1858,94 +1334,6 @@  int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_congestion_notification() - Get traffic class congestion
- *	notification configuration
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @qtype:	Type of queue - Rx, Tx and Tx confirm types are supported
- * @tc_id:	Traffic class selection (0-7)
- * @cfg:	congestion notification configuration
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
-				     uint32_t cmd_flags,
-				     uint16_t token,
-				     enum dpni_queue_type qtype,
-				     uint8_t tc_id,
-				struct dpni_congestion_notification_cfg *cfg)
-{
-	struct dpni_rsp_get_congestion_notification *rsp_params;
-	struct dpni_cmd_get_congestion_notification *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(
-					DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
-					cmd_flags,
-					token);
-	cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
-	cmd_params->qtype = qtype;
-	cmd_params->tc = tc_id;
-	cmd_params->congestion_point = cfg->cg_point;
-	cmd_params->cgid = cfg->cgid;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
-	cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
-	cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
-	cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
-	cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
-	cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
-	cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
-	cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
-	cfg->dest_cfg.priority = rsp_params->dest_priority;
-	cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
-						 DEST_TYPE);
-
-	return 0;
-}
-
-/**
- * dpni_get_api_version() - Get Data Path Network Interface API version
- * @mc_io:  Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of data path network interface API
- * @minor_ver:	Minor version of data path network interface API
- *
- * Return:  '0' on Success; Error code otherwise.
- */
-int dpni_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver)
-{
-	struct dpni_rsp_get_api_version *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
-					cmd_flags,
-					0);
-
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
-
 /**
  * dpni_set_queue() - Set queue parameters
  * @mc_io:	Pointer to MC portal's I/O object
@@ -2184,67 +1572,6 @@  int dpni_set_taildrop(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_taildrop() - Get taildrop information
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @cg_point:	Congestion point
- * @q_type:	Queue type on which the taildrop is configured.
- *		Only Rx queues are supported for now
- * @tc:		Traffic class to apply this taildrop to
- * @q_index:	Index of the queue if the DPNI supports multiple queues for
- *		traffic distribution. Ignored if CONGESTION_POINT is not 0.
- * @taildrop:	Taildrop structure
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_taildrop(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      enum dpni_congestion_point cg_point,
-		      enum dpni_queue_type qtype,
-		      uint8_t tc,
-		      uint8_t index,
-		      struct dpni_taildrop *taildrop)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_cmd_get_taildrop *cmd_params;
-	struct dpni_rsp_get_taildrop *rsp_params;
-	uint8_t oal_lo, oal_hi;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
-	cmd_params->congestion_point = cg_point;
-	cmd_params->qtype = qtype;
-	cmd_params->tc = tc;
-	cmd_params->index = index;
-
-	/* send command to mc */
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
-	taildrop->enable = dpni_get_field(rsp_params->enable_oal_lo, ENABLE);
-	taildrop->units = rsp_params->units;
-	taildrop->threshold = le32_to_cpu(rsp_params->threshold);
-	oal_lo = dpni_get_field(rsp_params->enable_oal_lo, OAL_LO);
-	oal_hi = dpni_get_field(rsp_params->oal_hi, OAL_HI);
-	taildrop->oal = oal_hi << DPNI_OAL_LO_SIZE | oal_lo;
-
-	/* Fill the first 4 bits, 'oal' is a 2's complement value of 12 bits */
-	if (taildrop->oal >= 0x0800)
-		taildrop->oal |= 0xF000;
-
-	return 0;
-}
-
 /**
  * dpni_set_opr() - Set Order Restoration configuration.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -2290,69 +1617,6 @@  int dpni_set_opr(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_opr() - Retrieve Order Restoration config and query.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @tc:		Traffic class, in range 0 to NUM_TCS - 1
- * @index:	Selects the specific queue out of the set allocated
- *			for the same TC. Value must be in range 0 to
- *			NUM_QUEUES - 1
- * @cfg:	Returned OPR configuration
- * @qry:	Returned OPR query
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_opr(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token,
-		 uint8_t tc,
-		 uint8_t index,
-		 struct opr_cfg *cfg,
-		 struct opr_qry *qry)
-{
-	struct dpni_rsp_get_opr *rsp_params;
-	struct dpni_cmd_get_opr *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OPR,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_get_opr *)cmd.params;
-	cmd_params->index = index;
-	cmd_params->tc_id = tc;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_opr *)cmd.params;
-	cfg->oloe = rsp_params->oloe;
-	cfg->oeane = rsp_params->oeane;
-	cfg->olws = rsp_params->olws;
-	cfg->oa = rsp_params->oa;
-	cfg->oprrws = rsp_params->oprrws;
-	qry->rip = dpni_get_field(rsp_params->flags, RIP);
-	qry->enable = dpni_get_field(rsp_params->flags, OPR_ENABLE);
-	qry->nesn = le16_to_cpu(rsp_params->nesn);
-	qry->ndsn = le16_to_cpu(rsp_params->ndsn);
-	qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
-	qry->tseq_nlis = dpni_get_field(rsp_params->tseq_nlis, TSEQ_NLIS);
-	qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
-	qry->hseq_nlis = dpni_get_field(rsp_params->hseq_nlis, HSEQ_NLIS);
-	qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
-	qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
-	qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
-	qry->opr_id = le16_to_cpu(rsp_params->opr_id);
-
-	return 0;
-}
-
 /**
  * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
  * @mc_io:	Pointer to MC portal's I/O object
@@ -2567,73 +1831,3 @@  int dpni_enable_sw_sequence(struct fsl_mc_io *mc_io,
 	/* send command to mc*/
 	return mc_send_command(mc_io, &cmd);
 }
-
-/**
- * dpni_get_sw_sequence_layout() - Get the soft sequence layout
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @src:	Source of the layout (WRIOP Rx or Tx)
- * @ss_layout_iova:  I/O virtual address of 264 bytes DMA-able memory
- *
- * warning: After calling this function, call dpni_extract_sw_sequence_layout()
- *		to get the layout.
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpni_get_sw_sequence_layout(struct fsl_mc_io *mc_io,
-	      uint32_t cmd_flags,
-	      uint16_t token,
-		  enum dpni_soft_sequence_dest src,
-		  uint64_t ss_layout_iova)
-{
-	struct dpni_get_sw_sequence_layout *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT,
-					  cmd_flags,
-					  token);
-
-	cmd_params = (struct dpni_get_sw_sequence_layout *)cmd.params;
-	cmd_params->src = src;
-	cmd_params->layout_iova = cpu_to_le64(ss_layout_iova);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpni_extract_sw_sequence_layout() - extract the software sequence layout
- * @layout:		software sequence layout
- * @sw_sequence_layout_buf:	Zeroed 264 bytes of memory before mapping it
- *				to DMA
- *
- * This function has to be called after dpni_get_sw_sequence_layout
- *
- */
-void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout,
-			     const uint8_t *sw_sequence_layout_buf)
-{
-	const struct dpni_sw_sequence_layout_entry *ext_params;
-	int i;
-	uint16_t ss_size, ss_offset;
-
-	ext_params = (const struct dpni_sw_sequence_layout_entry *)
-						sw_sequence_layout_buf;
-
-	for (i = 0; i < DPNI_SW_SEQUENCE_LAYOUT_SIZE; i++) {
-		ss_offset = le16_to_cpu(ext_params[i].ss_offset);
-		ss_size = le16_to_cpu(ext_params[i].ss_size);
-
-		if (ss_offset == 0 && ss_size == 0) {
-			layout->num_ss = i;
-			return;
-		}
-
-		layout->ss[i].ss_offset = ss_offset;
-		layout->ss[i].ss_size = ss_size;
-		layout->ss[i].param_offset = ext_params[i].param_offset;
-		layout->ss[i].param_size = ext_params[i].param_size;
-	}
-}
diff --git a/drivers/net/dpaa2/mc/dprtc.c b/drivers/net/dpaa2/mc/dprtc.c
index 42ac89150e..96e20bce81 100644
--- a/drivers/net/dpaa2/mc/dprtc.c
+++ b/drivers/net/dpaa2/mc/dprtc.c
@@ -54,213 +54,6 @@  int dprtc_open(struct fsl_mc_io *mc_io,
 	return err;
 }
 
-/**
- * dprtc_close() - Close the control session of the object
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPRTC object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_close(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_create() - Create the DPRTC object.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id
- *
- * Create the DPRTC object, allocate required resources and
- * perform required initialization.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_create(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 const struct dprtc_cfg *cfg,
-		 uint32_t *obj_id)
-{
-	struct mc_command cmd = { 0 };
-	int err;
-
-	(void)(cfg); /* unused */
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dprtc_destroy() - Destroy the DPRTC object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id:	The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dprtc_destroy(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  uint32_t object_id)
-{
-	struct dprtc_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dprtc_cmd_destroy *)cmd.params;
-	cmd_params->object_id = cpu_to_le32(object_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_enable() - Enable the DPRTC.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPRTC object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_enable(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_disable() - Disable the DPRTC.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPRTC object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_disable(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_is_enabled() - Check if the DPRTC is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPRTC object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_is_enabled(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     int *en)
-{
-	struct dprtc_rsp_is_enabled *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params;
-	*en = dprtc_get_field(rsp_params->en, ENABLE);
-
-	return 0;
-}
-
-/**
- * dprtc_reset() - Reset the DPRTC, returns the object to initial state.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPRTC object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_reset(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dprtc_get_attributes - Retrieve DPRTC attributes.
  *
@@ -299,101 +92,6 @@  int dprtc_get_attributes(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dprtc_set_clock_offset() - Sets the clock's offset
- * (usually relative to another clock).
- *
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPRTC object
- * @offset:	New clock offset (in nanoseconds).
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   int64_t offset)
-{
-	struct dprtc_cmd_set_clock_offset *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params;
-	cmd_params->offset = cpu_to_le64(offset);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
- *
- * @mc_io:		Pointer to MC portal's I/O object
- * @cmd_flags:		Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:		Token of DPRTC object
- * @freq_compensation:	The new frequency compensation value to set.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
-				uint32_t cmd_flags,
-				uint16_t token,
-				uint32_t freq_compensation)
-{
-	struct dprtc_get_freq_compensation *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
-	cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
- *
- * @mc_io:		Pointer to MC portal's I/O object
- * @cmd_flags:		Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:		Token of DPRTC object
- * @freq_compensation:	Frequency compensation value
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
-				uint32_t cmd_flags,
-				uint16_t token,
-				uint32_t *freq_compensation)
-{
-	struct dprtc_get_freq_compensation *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
-	*freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
-
-	return 0;
-}
-
 /**
  * dprtc_get_time() - Returns the current RTC time.
  *
@@ -458,66 +156,3 @@  int dprtc_set_time(struct fsl_mc_io *mc_io,
 	/* send command to mc*/
 	return mc_send_command(mc_io, &cmd);
 }
-
-/**
- * dprtc_set_alarm() - Defines and sets alarm.
- *
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPRTC object
- * @time:	In nanoseconds, the time when the alarm
- *			should go off - must be a multiple of
- *			1 microsecond
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_set_alarm(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token, uint64_t time)
-{
-	struct dprtc_time *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dprtc_time *)cmd.params;
-	cmd_params->time = cpu_to_le64(time);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_get_api_version() - Get Data Path Real Time Counter API version
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of data path real time counter API
- * @minor_ver:	Minor version of data path real time counter API
- *
- * Return:  '0' on Success; Error code otherwise.
- */
-int dprtc_get_api_version(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t *major_ver,
-			  uint16_t *minor_ver)
-{
-	struct dprtc_rsp_get_api_version *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
-					cmd_flags,
-					0);
-
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux.h b/drivers/net/dpaa2/mc/fsl_dpdmux.h
index accd1ef5c1..eb768fafbb 100644
--- a/drivers/net/dpaa2/mc/fsl_dpdmux.h
+++ b/drivers/net/dpaa2/mc/fsl_dpdmux.h
@@ -21,10 +21,6 @@  int dpdmux_open(struct fsl_mc_io *mc_io,
 		int  dpdmux_id,
 		uint16_t  *token);
 
-int dpdmux_close(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token);
-
 /**
  * DPDMUX general options
  */
@@ -102,34 +98,6 @@  struct dpdmux_cfg {
 	} adv;
 };
 
-int dpdmux_create(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  const struct dpdmux_cfg *cfg,
-		  uint32_t *obj_id);
-
-int dpdmux_destroy(struct fsl_mc_io *mc_io,
-		   uint16_t dprc_token,
-		   uint32_t cmd_flags,
-		   uint32_t object_id);
-
-int dpdmux_enable(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token);
-
-int dpdmux_disable(struct fsl_mc_io *mc_io,
-		   uint32_t cmd_flags,
-		   uint16_t token);
-
-int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      int *en);
-
-int dpdmux_reset(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token);
-
 /**
  * struct dpdmux_attr - Structure representing DPDMUX attributes
  * @id: DPDMUX object ID
@@ -153,11 +121,6 @@  int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
 			  uint16_t token,
 			  struct dpdmux_attr *attr);
 
-int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
-				uint32_t cmd_flags,
-				uint16_t token,
-				uint16_t max_frame_length);
-
 /**
  * enum dpdmux_counter_type - Counter types
  * @DPDMUX_CNT_ING_FRAME: Counts ingress frames
@@ -223,12 +186,6 @@  struct dpdmux_accepted_frames {
 	enum dpdmux_action unaccept_act;
 };
 
-int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-				  uint32_t cmd_flags,
-				  uint16_t token,
-				  uint16_t if_id,
-				  const struct dpdmux_accepted_frames *cfg);
-
 /**
  * struct dpdmux_if_attr - Structure representing frame types configuration
  * @rate: Configured interface rate (in bits per second)
@@ -242,22 +199,6 @@  struct dpdmux_if_attr {
 	enum dpdmux_accepted_frames_type accept_frame_type;
 };
 
-int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     uint16_t if_id,
-			     struct dpdmux_if_attr *attr);
-
-int dpdmux_if_enable(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     uint16_t if_id);
-
-int dpdmux_if_disable(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      uint16_t if_id);
-
 /**
  * struct dpdmux_l2_rule - Structure representing L2 rule
  * @mac_addr: MAC address
@@ -268,29 +209,6 @@  struct dpdmux_l2_rule {
 	uint16_t vlan_id;
 };
 
-int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     uint16_t if_id,
-			     const struct dpdmux_l2_rule *rule);
-
-int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t token,
-			  uint16_t if_id,
-			  const struct dpdmux_l2_rule *rule);
-
-int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t token,
-			  uint16_t if_id,
-			  enum dpdmux_counter_type counter_type,
-			  uint64_t *counter);
-
-int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token);
-
 /**
  * Enable auto-negotiation
  */
@@ -319,11 +237,6 @@  struct dpdmux_link_cfg {
 	uint64_t advertising;
 };
 
-int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   uint16_t if_id,
-			   struct dpdmux_link_cfg *cfg);
 /**
  * struct dpdmux_link_state - Structure representing DPDMUX link state
  * @rate: Rate
@@ -342,22 +255,11 @@  struct dpdmux_link_state {
 	uint64_t advertising;
 };
 
-int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     uint16_t if_id,
-			     struct dpdmux_link_state *state);
-
 int dpdmux_if_set_default(struct fsl_mc_io *mc_io,
 		uint32_t cmd_flags,
 		uint16_t token,
 		uint16_t if_id);
 
-int dpdmux_if_get_default(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token,
-		uint16_t *if_id);
-
 int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
 			uint32_t cmd_flags,
 			uint16_t token,
@@ -397,14 +299,4 @@  int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
 		struct dpdmux_rule_cfg *rule,
 		struct dpdmux_cls_action *action);
 
-int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token,
-		struct dpdmux_rule_cfg *rule);
-
-int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t *major_ver,
-			   uint16_t *minor_ver);
-
 #endif /* __FSL_DPDMUX_H */
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index 598911ddd1..2e2012d0bf 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -185,17 +185,6 @@  struct dpni_cfg {
 	uint8_t  num_cgs;
 };
 
-int dpni_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpni_cfg *cfg,
-		uint32_t *obj_id);
-
-int dpni_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t object_id);
-
 /**
  * struct dpni_pools_cfg - Structure representing buffer pools configuration
  * @num_dpbp:	Number of DPBPs
@@ -265,24 +254,12 @@  int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
 			uint8_t irq_index,
 			uint8_t en);
 
-int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
-			uint32_t cmd_flags,
-			uint16_t token,
-			uint8_t irq_index,
-			uint8_t *en);
-
 int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
 		      uint32_t cmd_flags,
 		      uint16_t token,
 		      uint8_t irq_index,
 		      uint32_t mask);
 
-int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      uint8_t irq_index,
-		      uint32_t *mask);
-
 int dpni_get_irq_status(struct fsl_mc_io *mc_io,
 			uint32_t cmd_flags,
 			uint16_t token,
@@ -495,12 +472,6 @@  enum dpni_queue_type {
 	DPNI_QUEUE_RX_ERR,
 };
 
-int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   enum dpni_queue_type qtype,
-			   struct dpni_buffer_layout *layout);
-
 int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
 			   uint32_t cmd_flags,
 			   uint16_t token,
@@ -530,23 +501,12 @@  int dpni_set_offload(struct fsl_mc_io *mc_io,
 		     enum dpni_offload type,
 		     uint32_t config);
 
-int dpni_get_offload(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     enum dpni_offload type,
-		     uint32_t *config);
-
 int dpni_get_qdid(struct fsl_mc_io *mc_io,
 		  uint32_t cmd_flags,
 		  uint16_t token,
 		  enum dpni_queue_type qtype,
 		  uint16_t *qdid);
 
-int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
-			    uint32_t cmd_flags,
-			    uint16_t token,
-			    uint16_t *data_offset);
-
 #define DPNI_STATISTICS_CNT		7
 
 /**
@@ -736,11 +696,6 @@  int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
 			      uint16_t token,
 			      uint16_t max_frame_length);
 
-int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
-			      uint32_t cmd_flags,
-			      uint16_t token,
-			      uint16_t *max_frame_length);
-
 int dpni_set_mtu(struct fsl_mc_io *mc_io,
 		 uint32_t cmd_flags,
 		 uint16_t token,
@@ -756,21 +711,11 @@  int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
 			       uint16_t token,
 			       int en);
 
-int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
-			       uint32_t cmd_flags,
-			       uint16_t token,
-			       int *en);
-
 int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
 			     uint32_t cmd_flags,
 			     uint16_t token,
 			     int en);
 
-int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     int *en);
-
 int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
 			      uint32_t cmd_flags,
 			      uint16_t token,
@@ -794,12 +739,6 @@  int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
 			 uint16_t token,
 			 const uint8_t mac_addr[6]);
 
-int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   int unicast,
-			   int multicast);
-
 int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
 			   uint32_t cmd_flags,
 			   uint16_t token,
@@ -828,10 +767,6 @@  int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
 			uint16_t token,
 			uint16_t vlan_id);
 
-int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
-			    uint32_t cmd_flags,
-			    uint16_t token);
-
 /**
  * enum dpni_dist_mode - DPNI distribution mode
  * @DPNI_DIST_MODE_NONE: No distribution
@@ -1042,13 +977,6 @@  int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
 				     uint8_t tc_id,
 			const struct dpni_congestion_notification_cfg *cfg);
 
-int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
-				     uint32_t cmd_flags,
-				     uint16_t token,
-				     enum dpni_queue_type qtype,
-				     uint8_t tc_id,
-				struct dpni_congestion_notification_cfg *cfg);
-
 /* DPNI FLC stash options */
 
 /**
@@ -1212,10 +1140,6 @@  int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
 			  uint16_t token,
 			  const struct dpni_rule_cfg *cfg);
 
-int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t token);
-
 /**
  * Discard matching traffic.  If set, this takes precedence over any other
  * configuration and matching traffic is always discarded.
@@ -1273,16 +1197,6 @@  int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
 			 uint8_t tc_id,
 			 const struct dpni_rule_cfg *cfg);
 
-int dpni_clear_fs_entries(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t token,
-			  uint8_t tc_id);
-
-int dpni_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver);
-
 /**
  * Set User Context
  */
@@ -1372,15 +1286,6 @@  int dpni_set_taildrop(struct fsl_mc_io *mc_io,
 		      uint8_t q_index,
 		      struct dpni_taildrop *taildrop);
 
-int dpni_get_taildrop(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      enum dpni_congestion_point cg_point,
-		      enum dpni_queue_type q_type,
-		      uint8_t tc,
-		      uint8_t q_index,
-		      struct dpni_taildrop *taildrop);
-
 int dpni_set_opr(struct fsl_mc_io *mc_io,
 		 uint32_t cmd_flags,
 		 uint16_t token,
@@ -1389,14 +1294,6 @@  int dpni_set_opr(struct fsl_mc_io *mc_io,
 		 uint8_t options,
 		 struct opr_cfg *cfg);
 
-int dpni_get_opr(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token,
-		 uint8_t tc,
-		 uint8_t index,
-		 struct opr_cfg *cfg,
-		 struct opr_qry *qry);
-
 /**
  * When used for queue_idx in function dpni_set_rx_dist_default_queue will
  * signal to dpni to drop all unclassified frames
@@ -1550,35 +1447,4 @@  struct dpni_sw_sequence_layout {
 	} ss[DPNI_SW_SEQUENCE_LAYOUT_SIZE];
 };
 
-/**
- * dpni_get_sw_sequence_layout() - Get the soft sequence layout
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @src:	Source of the layout (WRIOP Rx or Tx)
- * @ss_layout_iova:  I/O virtual address of 264 bytes DMA-able memory
- *
- * warning: After calling this function, call dpni_extract_sw_sequence_layout()
- *		to get the layout
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpni_get_sw_sequence_layout(struct fsl_mc_io *mc_io,
-				uint32_t cmd_flags,
-				uint16_t token,
-				enum dpni_soft_sequence_dest src,
-				uint64_t ss_layout_iova);
-
-/**
- * dpni_extract_sw_sequence_layout() - extract the software sequence layout
- * @layout:		software sequence layout
- * @sw_sequence_layout_buf:	Zeroed 264 bytes of memory before mapping it
- *				to DMA
- *
- * This function has to be called after dpni_get_sw_sequence_layout
- *
- */
-void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout,
-				     const uint8_t *sw_sequence_layout_buf);
-
 #endif /* __FSL_DPNI_H */
diff --git a/drivers/net/dpaa2/mc/fsl_dprtc.h b/drivers/net/dpaa2/mc/fsl_dprtc.h
index 49edb5a050..d8be107ef1 100644
--- a/drivers/net/dpaa2/mc/fsl_dprtc.h
+++ b/drivers/net/dpaa2/mc/fsl_dprtc.h
@@ -16,10 +16,6 @@  int dprtc_open(struct fsl_mc_io *mc_io,
 	       int dprtc_id,
 	       uint16_t *token);
 
-int dprtc_close(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token);
-
 /**
  * struct dprtc_cfg - Structure representing DPRTC configuration
  * @options:	place holder
@@ -28,49 +24,6 @@  struct dprtc_cfg {
 	uint32_t options;
 };
 
-int dprtc_create(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 const struct dprtc_cfg *cfg,
-		 uint32_t *obj_id);
-
-int dprtc_destroy(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  uint32_t object_id);
-
-int dprtc_enable(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token);
-
-int dprtc_disable(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token);
-
-int dprtc_is_enabled(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     int *en);
-
-int dprtc_reset(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token);
-
-int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   int64_t offset);
-
-int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token,
-		  uint32_t freq_compensation);
-
-int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token,
-		  uint32_t *freq_compensation);
-
 int dprtc_get_time(struct fsl_mc_io *mc_io,
 		   uint32_t cmd_flags,
 		   uint16_t token,
@@ -81,11 +34,6 @@  int dprtc_set_time(struct fsl_mc_io *mc_io,
 		   uint16_t token,
 		   uint64_t time);
 
-int dprtc_set_alarm(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token,
-		    uint64_t time);
-
 /**
  * struct dprtc_attr - Structure representing DPRTC attributes
  * @id:		DPRTC object ID
@@ -101,9 +49,4 @@  int dprtc_get_attributes(struct fsl_mc_io *mc_io,
 			 uint16_t token,
 			 struct dprtc_attr *attr);
 
-int dprtc_get_api_version(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t *major_ver,
-			  uint16_t *minor_ver);
-
 #endif /* __FSL_DPRTC_H */
diff --git a/drivers/net/e1000/base/e1000_82542.c b/drivers/net/e1000/base/e1000_82542.c
index fd473c1c6f..e14e9e9e58 100644
--- a/drivers/net/e1000/base/e1000_82542.c
+++ b/drivers/net/e1000/base/e1000_82542.c
@@ -406,103 +406,6 @@  STATIC int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_translate_register_82542 - Translate the proper register offset
- *  @reg: e1000 register to be read
- *
- *  Registers in 82542 are located in different offsets than other adapters
- *  even though they function in the same manner.  This function takes in
- *  the name of the register to read and returns the correct offset for
- *  82542 silicon.
- **/
-u32 e1000_translate_register_82542(u32 reg)
-{
-	/*
-	 * Some of the 82542 registers are located at different
-	 * offsets than they are in newer adapters.
-	 * Despite the difference in location, the registers
-	 * function in the same manner.
-	 */
-	switch (reg) {
-	case E1000_RA:
-		reg = 0x00040;
-		break;
-	case E1000_RDTR:
-		reg = 0x00108;
-		break;
-	case E1000_RDBAL(0):
-		reg = 0x00110;
-		break;
-	case E1000_RDBAH(0):
-		reg = 0x00114;
-		break;
-	case E1000_RDLEN(0):
-		reg = 0x00118;
-		break;
-	case E1000_RDH(0):
-		reg = 0x00120;
-		break;
-	case E1000_RDT(0):
-		reg = 0x00128;
-		break;
-	case E1000_RDBAL(1):
-		reg = 0x00138;
-		break;
-	case E1000_RDBAH(1):
-		reg = 0x0013C;
-		break;
-	case E1000_RDLEN(1):
-		reg = 0x00140;
-		break;
-	case E1000_RDH(1):
-		reg = 0x00148;
-		break;
-	case E1000_RDT(1):
-		reg = 0x00150;
-		break;
-	case E1000_FCRTH:
-		reg = 0x00160;
-		break;
-	case E1000_FCRTL:
-		reg = 0x00168;
-		break;
-	case E1000_MTA:
-		reg = 0x00200;
-		break;
-	case E1000_TDBAL(0):
-		reg = 0x00420;
-		break;
-	case E1000_TDBAH(0):
-		reg = 0x00424;
-		break;
-	case E1000_TDLEN(0):
-		reg = 0x00428;
-		break;
-	case E1000_TDH(0):
-		reg = 0x00430;
-		break;
-	case E1000_TDT(0):
-		reg = 0x00438;
-		break;
-	case E1000_TIDV:
-		reg = 0x00440;
-		break;
-	case E1000_VFTA:
-		reg = 0x00600;
-		break;
-	case E1000_TDFH:
-		reg = 0x08010;
-		break;
-	case E1000_TDFT:
-		reg = 0x08018;
-		break;
-	default:
-		break;
-	}
-
-	return reg;
-}
-
 /**
  *  e1000_clear_hw_cntrs_82542 - Clear device specific hardware counters
  *  @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_82543.c b/drivers/net/e1000/base/e1000_82543.c
index ca273b4368..992dffe1ff 100644
--- a/drivers/net/e1000/base/e1000_82543.c
+++ b/drivers/net/e1000/base/e1000_82543.c
@@ -364,84 +364,6 @@  STATIC bool e1000_init_phy_disabled_82543(struct e1000_hw *hw)
 	return ret_val;
 }
 
-/**
- *  e1000_tbi_adjust_stats_82543 - Adjust stats when TBI enabled
- *  @hw: pointer to the HW structure
- *  @stats: Struct containing statistic register values
- *  @frame_len: The length of the frame in question
- *  @mac_addr: The Ethernet destination address of the frame in question
- *  @max_frame_size: The maximum frame size
- *
- *  Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
- **/
-void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
-				  struct e1000_hw_stats *stats, u32 frame_len,
-				  u8 *mac_addr, u32 max_frame_size)
-{
-	if (!(e1000_tbi_sbp_enabled_82543(hw)))
-		goto out;
-
-	/* First adjust the frame length. */
-	frame_len--;
-	/*
-	 * We need to adjust the statistics counters, since the hardware
-	 * counters overcount this packet as a CRC error and undercount
-	 * the packet as a good packet
-	 */
-	/* This packet should not be counted as a CRC error. */
-	stats->crcerrs--;
-	/* This packet does count as a Good Packet Received. */
-	stats->gprc++;
-
-	/* Adjust the Good Octets received counters */
-	stats->gorc += frame_len;
-
-	/*
-	 * Is this a broadcast or multicast?  Check broadcast first,
-	 * since the test for a multicast frame will test positive on
-	 * a broadcast frame.
-	 */
-	if ((mac_addr[0] == 0xff) && (mac_addr[1] == 0xff))
-		/* Broadcast packet */
-		stats->bprc++;
-	else if (*mac_addr & 0x01)
-		/* Multicast packet */
-		stats->mprc++;
-
-	/*
-	 * In this case, the hardware has over counted the number of
-	 * oversize frames.
-	 */
-	if ((frame_len == max_frame_size) && (stats->roc > 0))
-		stats->roc--;
-
-	/*
-	 * Adjust the bin counters when the extra byte put the frame in the
-	 * wrong bin. Remember that the frame_len was adjusted above.
-	 */
-	if (frame_len == 64) {
-		stats->prc64++;
-		stats->prc127--;
-	} else if (frame_len == 127) {
-		stats->prc127++;
-		stats->prc255--;
-	} else if (frame_len == 255) {
-		stats->prc255++;
-		stats->prc511--;
-	} else if (frame_len == 511) {
-		stats->prc511++;
-		stats->prc1023--;
-	} else if (frame_len == 1023) {
-		stats->prc1023++;
-		stats->prc1522--;
-	} else if (frame_len == 1522) {
-		stats->prc1522++;
-	}
-
-out:
-	return;
-}
-
 /**
  *  e1000_read_phy_reg_82543 - Read PHY register
  *  @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_82543.h b/drivers/net/e1000/base/e1000_82543.h
index cf81e4e848..8af412bc77 100644
--- a/drivers/net/e1000/base/e1000_82543.h
+++ b/drivers/net/e1000/base/e1000_82543.h
@@ -16,10 +16,6 @@ 
 /* If TBI_COMPAT_ENABLED, then this is the current state (on/off) */
 #define TBI_SBP_ENABLED		0x2
 
-void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
-				  struct e1000_hw_stats *stats,
-				  u32 frame_len, u8 *mac_addr,
-				  u32 max_frame_size);
 void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw,
 				       bool state);
 bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw);
diff --git a/drivers/net/e1000/base/e1000_82571.c b/drivers/net/e1000/base/e1000_82571.c
index 9dc7f6025c..9da1fbf856 100644
--- a/drivers/net/e1000/base/e1000_82571.c
+++ b/drivers/net/e1000/base/e1000_82571.c
@@ -1467,41 +1467,6 @@  STATIC s32 e1000_led_on_82574(struct e1000_hw *hw)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_check_phy_82574 - check 82574 phy hung state
- *  @hw: pointer to the HW structure
- *
- *  Returns whether phy is hung or not
- **/
-bool e1000_check_phy_82574(struct e1000_hw *hw)
-{
-	u16 status_1kbt = 0;
-	u16 receive_errors = 0;
-	s32 ret_val;
-
-	DEBUGFUNC("e1000_check_phy_82574");
-
-	/* Read PHY Receive Error counter first, if its is max - all F's then
-	 * read the Base1000T status register If both are max then PHY is hung.
-	 */
-	ret_val = hw->phy.ops.read_reg(hw, E1000_RECEIVE_ERROR_COUNTER,
-				       &receive_errors);
-	if (ret_val)
-		return false;
-	if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
-		ret_val = hw->phy.ops.read_reg(hw, E1000_BASE1000T_STATUS,
-					       &status_1kbt);
-		if (ret_val)
-			return false;
-		if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
-		    E1000_IDLE_ERROR_COUNT_MASK)
-			return true;
-	}
-
-	return false;
-}
-
-
 /**
  *  e1000_setup_link_82571 - Setup flow control and link settings
  *  @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_82571.h b/drivers/net/e1000/base/e1000_82571.h
index 0d8412678d..3c1840d0e8 100644
--- a/drivers/net/e1000/base/e1000_82571.h
+++ b/drivers/net/e1000/base/e1000_82571.h
@@ -29,7 +29,6 @@ 
 #define E1000_IDLE_ERROR_COUNT_MASK	0xFF
 #define E1000_RECEIVE_ERROR_COUNTER	21
 #define E1000_RECEIVE_ERROR_MAX		0xFFFF
-bool e1000_check_phy_82574(struct e1000_hw *hw);
 bool e1000_get_laa_state_82571(struct e1000_hw *hw);
 void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state);
 
diff --git a/drivers/net/e1000/base/e1000_82575.c b/drivers/net/e1000/base/e1000_82575.c
index 7c78649393..074bd34f11 100644
--- a/drivers/net/e1000/base/e1000_82575.c
+++ b/drivers/net/e1000/base/e1000_82575.c
@@ -2119,62 +2119,6 @@  void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
 	E1000_WRITE_REG(hw, reg_offset, reg_val);
 }
 
-/**
- *  e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
- *  @hw: pointer to the hardware struct
- *  @enable: state to enter, either enabled or disabled
- *
- *  enables/disables L2 switch loopback functionality.
- **/
-void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
-{
-	u32 dtxswc;
-
-	switch (hw->mac.type) {
-	case e1000_82576:
-		dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
-		if (enable)
-			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
-		else
-			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
-		E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
-		break;
-	case e1000_i350:
-	case e1000_i354:
-		dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
-		if (enable)
-			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
-		else
-			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
-		E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
-		break;
-	default:
-		/* Currently no other hardware supports loopback */
-		break;
-	}
-
-
-}
-
-/**
- *  e1000_vmdq_set_replication_pf - enable or disable vmdq replication
- *  @hw: pointer to the hardware struct
- *  @enable: state to enter, either enabled or disabled
- *
- *  enables/disables replication of packets across multiple pools.
- **/
-void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
-{
-	u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
-
-	if (enable)
-		vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
-	else
-		vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
-
-	E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
-}
-
 /**
  *  e1000_read_phy_reg_82580 - Read 82580 MDI control register
  *  @hw: pointer to the HW structure
@@ -2596,45 +2540,6 @@  STATIC s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
 	return ret_val;
 }
 
-/**
- *  __e1000_access_emi_reg - Read/write EMI register
- *  @hw: pointer to the HW structure
- *  @address: EMI address to program
- *  @data: pointer to value to read/write from/to the EMI address
- *  @read: boolean flag to indicate read or write
- **/
-STATIC s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address,
-				  u16 *data, bool read)
-{
-	s32 ret_val;
-
-	DEBUGFUNC("__e1000_access_emi_reg");
-
-	ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
-	if (ret_val)
-		return ret_val;
-
-	if (read)
-		ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
-	else
-		ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
-
-	return ret_val;
-}
-
-/**
- *  e1000_read_emi_reg - Read Extended Management Interface register
- *  @hw: pointer to the HW structure
- *  @addr: EMI address to program
- *  @data: value to be read from the EMI address
- **/
-s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
-{
-	DEBUGFUNC("e1000_read_emi_reg");
-
-	return __e1000_access_emi_reg(hw, addr, data, true);
-}
-
 /**
  *  e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY
  *  @hw: pointer to the HW structure
@@ -2823,179 +2728,6 @@  s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw)
 	return ret_val;
 }
 
-/**
- *  e1000_set_eee_i350 - Enable/disable EEE support
- *  @hw: pointer to the HW structure
- *  @adv1G: boolean flag enabling 1G EEE advertisement
- *  @adv100M: boolean flag enabling 100M EEE advertisement
- *
- *  Enable/disable EEE based on setting in dev_spec structure.
- *
- **/
-s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
-{
-	u32 ipcnfg, eeer;
-
-	DEBUGFUNC("e1000_set_eee_i350");
-
-	if ((hw->mac.type < e1000_i350) ||
-	    (hw->phy.media_type != e1000_media_type_copper))
-		goto out;
-	ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
-	eeer = E1000_READ_REG(hw, E1000_EEER);
-
-	/* enable or disable per user setting */
-	if (!(hw->dev_spec._82575.eee_disable)) {
-		u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU);
-
-		if (adv100M)
-			ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
-		else
-			ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
-
-		if (adv1G)
-			ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
-		else
-			ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
-
-		eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
-			 E1000_EEER_LPI_FC);
-
-		/* This bit should not be set in normal operation. */
-		if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
-			DEBUGOUT("LPI Clock Stop Bit should not be set!\n");
-	} else {
-		ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
-		eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
-			  E1000_EEER_LPI_FC);
-	}
-	E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
-	E1000_WRITE_REG(hw, E1000_EEER, eeer);
-	E1000_READ_REG(hw, E1000_IPCNFG);
-	E1000_READ_REG(hw, E1000_EEER);
-out:
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_set_eee_i354 - Enable/disable EEE support
- *  @hw: pointer to the HW structure
- *  @adv1G: boolean flag enabling 1G EEE advertisement
- *  @adv100M: boolean flag enabling 100M EEE advertisement
- *
- *  Enable/disable EEE legacy mode based on setting in dev_spec structure.
- *
- **/
-s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
-{
-	struct e1000_phy_info *phy = &hw->phy;
-	s32 ret_val = E1000_SUCCESS;
-	u16 phy_data;
-
-	DEBUGFUNC("e1000_set_eee_i354");
-
-	if ((hw->phy.media_type != e1000_media_type_copper) ||
-	    ((phy->id != M88E1543_E_PHY_ID) &&
-	    (phy->id != M88E1512_E_PHY_ID)))
-		goto out;
-
-	if (!hw->dev_spec._82575.eee_disable) {
-		/* Switch to PHY page 18. */
-		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
-		if (ret_val)
-			goto out;
-
-		ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
-					    &phy_data);
-		if (ret_val)
-			goto out;
-
-		phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
-		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
-					     phy_data);
-		if (ret_val)
-			goto out;
-
-		/* Return the PHY to page 0. */
-		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
-		if (ret_val)
-			goto out;
-
-		/* Turn on EEE advertisement. */
-		ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
-					       E1000_EEE_ADV_DEV_I354,
-					       &phy_data);
-		if (ret_val)
-			goto out;
-
-		if (adv100M)
-			phy_data |= E1000_EEE_ADV_100_SUPPORTED;
-		else
-			phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
-
-		if (adv1G)
-			phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
-		else
-			phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
-
-		ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
-						E1000_EEE_ADV_DEV_I354,
-						phy_data);
-	} else {
-		/* Turn off EEE advertisement. */
-		ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
-					       E1000_EEE_ADV_DEV_I354,
-					       &phy_data);
-		if (ret_val)
-			goto out;
-
-		phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
-			      E1000_EEE_ADV_1000_SUPPORTED);
-		ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
-						E1000_EEE_ADV_DEV_I354,
-						phy_data);
-	}
-
-out:
-	return ret_val;
-}
-
-/**
- *  e1000_get_eee_status_i354 - Get EEE status
- *  @hw: pointer to the HW structure
- *  @status: EEE status
- *
- *  Get EEE status by guessing based on whether Tx or Rx LPI indications have
- *  been received.
- **/
-s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status)
-{
-	struct e1000_phy_info *phy = &hw->phy;
-	s32 ret_val = E1000_SUCCESS;
-	u16 phy_data;
-
-	DEBUGFUNC("e1000_get_eee_status_i354");
-
-	/* Check if EEE is supported on this device. */
-	if ((hw->phy.media_type != e1000_media_type_copper) ||
-	    ((phy->id != M88E1543_E_PHY_ID) &&
-	    (phy->id != M88E1512_E_PHY_ID)))
-		goto out;
-
-	ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
-				       E1000_PCS_STATUS_DEV_I354,
-				       &phy_data);
-	if (ret_val)
-		goto out;
-
-	*status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
-			      E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
-
-out:
-	return ret_val;
-}
-
 /* Due to a hw errata, if the host tries to  configure the VFTA register
  * while performing queries from the BMC or DMA, then the VFTA in some
  * cases won't be written.
@@ -3044,36 +2776,6 @@  void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
 	E1000_WRITE_FLUSH(hw);
 }
 
-
-/**
- *  e1000_set_i2c_bb - Enable I2C bit-bang
- *  @hw: pointer to the HW structure
- *
- *  Enable I2C bit-bang interface
- *
- **/
-s32 e1000_set_i2c_bb(struct e1000_hw *hw)
-{
-	s32 ret_val = E1000_SUCCESS;
-	u32 ctrl_ext, i2cparams;
-
-	DEBUGFUNC("e1000_set_i2c_bb");
-
-	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
-	ctrl_ext |= E1000_CTRL_I2C_ENA;
-	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
-	E1000_WRITE_FLUSH(hw);
-
-	i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS);
-	i2cparams |= E1000_I2CBB_EN;
-	i2cparams |= E1000_I2C_DATA_OE_N;
-	i2cparams |= E1000_I2C_CLK_OE_N;
-	E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams);
-	E1000_WRITE_FLUSH(hw);
-
-	return ret_val;
-}
-
 /**
  *  e1000_read_i2c_byte_generic - Reads 8 bit word over I2C
  *  @hw: pointer to hardware structure
diff --git a/drivers/net/e1000/base/e1000_82575.h b/drivers/net/e1000/base/e1000_82575.h
index 006b37ae98..03284ca946 100644
--- a/drivers/net/e1000/base/e1000_82575.h
+++ b/drivers/net/e1000/base/e1000_82575.h
@@ -361,9 +361,7 @@  s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
 
 /* Rx packet buffer size defines */
 #define E1000_RXPBS_SIZE_MASK_82576	0x0000007F
-void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
 void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
-void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
 
 enum e1000_promisc_type {
 	e1000_promisc_disabled = 0,   /* all promisc modes disabled */
@@ -373,15 +371,10 @@  enum e1000_promisc_type {
 	e1000_num_promisc_types
 };
 
-void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
 void e1000_rlpml_set_vf(struct e1000_hw *, u16);
 s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type);
 void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
 u16 e1000_rxpbs_adjust_82580(u32 data);
-s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
-s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M);
-s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M);
-s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *);
 s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw);
 s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw);
 
@@ -397,7 +390,6 @@  s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw);
 #define E1000_I2C_T_SU_STO	4
 #define E1000_I2C_T_BUF		5
 
-s32 e1000_set_i2c_bb(struct e1000_hw *hw);
 s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
 				u8 dev_addr, u8 *data);
 s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
diff --git a/drivers/net/e1000/base/e1000_api.c b/drivers/net/e1000/base/e1000_api.c
index 6a2376f40f..c3a8892c47 100644
--- a/drivers/net/e1000/base/e1000_api.c
+++ b/drivers/net/e1000/base/e1000_api.c
@@ -530,21 +530,6 @@  void e1000_clear_vfta(struct e1000_hw *hw)
 		hw->mac.ops.clear_vfta(hw);
 }
 
-/**
- *  e1000_write_vfta - Write value to VLAN filter table
- *  @hw: pointer to the HW structure
- *  @offset: the 32-bit offset in which to write the value to.
- *  @value: the 32-bit value to write at location offset.
- *
- *  This writes a 32-bit value to a 32-bit offset in the VLAN filter
- *  table. This is a function pointer entry point called by drivers.
- **/
-void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
-{
-	if (hw->mac.ops.write_vfta)
-		hw->mac.ops.write_vfta(hw, offset, value);
-}
-
 /**
  *  e1000_update_mc_addr_list - Update Multicast addresses
  *  @hw: pointer to the HW structure
@@ -562,19 +547,6 @@  void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
 						mc_addr_count);
 }
 
-/**
- *  e1000_force_mac_fc - Force MAC flow control
- *  @hw: pointer to the HW structure
- *
- *  Force the MAC's flow control settings. Currently no func pointer exists
- *  and all implementations are handled in the generic version of this
- *  function.
- **/
-s32 e1000_force_mac_fc(struct e1000_hw *hw)
-{
-	return e1000_force_mac_fc_generic(hw);
-}
-
 /**
  *  e1000_check_for_link - Check/Store link connection
  *  @hw: pointer to the HW structure
@@ -591,34 +563,6 @@  s32 e1000_check_for_link(struct e1000_hw *hw)
 	return -E1000_ERR_CONFIG;
 }
 
-/**
- *  e1000_check_mng_mode - Check management mode
- *  @hw: pointer to the HW structure
- *
- *  This checks if the adapter has manageability enabled.
- *  This is a function pointer entry point called by drivers.
- **/
-bool e1000_check_mng_mode(struct e1000_hw *hw)
-{
-	if (hw->mac.ops.check_mng_mode)
-		return hw->mac.ops.check_mng_mode(hw);
-
-	return false;
-}
-
-/**
- *  e1000_mng_write_dhcp_info - Writes DHCP info to host interface
- *  @hw: pointer to the HW structure
- *  @buffer: pointer to the host interface
- *  @length: size of the buffer
- *
- *  Writes the DHCP information to the host interface.
- **/
-s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
-{
-	return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
-}
-
 /**
  *  e1000_reset_hw - Reset hardware
  *  @hw: pointer to the HW structure
@@ -665,86 +609,6 @@  s32 e1000_setup_link(struct e1000_hw *hw)
 	return -E1000_ERR_CONFIG;
 }
 
-/**
- *  e1000_get_speed_and_duplex - Returns current speed and duplex
- *  @hw: pointer to the HW structure
- *  @speed: pointer to a 16-bit value to store the speed
- *  @duplex: pointer to a 16-bit value to store the duplex.
- *
- *  This returns the speed and duplex of the adapter in the two 'out'
- *  variables passed in. This is a function pointer entry point called
- *  by drivers.
- **/
-s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
-{
-	if (hw->mac.ops.get_link_up_info)
-		return hw->mac.ops.get_link_up_info(hw, speed, duplex);
-
-	return -E1000_ERR_CONFIG;
-}
-
-/**
- *  e1000_setup_led - Configures SW controllable LED
- *  @hw: pointer to the HW structure
- *
- *  This prepares the SW controllable LED for use and saves the current state
- *  of the LED so it can be later restored. This is a function pointer entry
- *  point called by drivers.
- **/
-s32 e1000_setup_led(struct e1000_hw *hw)
-{
-	if (hw->mac.ops.setup_led)
-		return hw->mac.ops.setup_led(hw);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_cleanup_led - Restores SW controllable LED
- *  @hw: pointer to the HW structure
- *
- *  This restores the SW controllable LED to the value saved off by
- *  e1000_setup_led. This is a function pointer entry point called by drivers.
- **/
-s32 e1000_cleanup_led(struct e1000_hw *hw)
-{
-	if (hw->mac.ops.cleanup_led)
-		return hw->mac.ops.cleanup_led(hw);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_blink_led - Blink SW controllable LED
- *  @hw: pointer to the HW structure
- *
- *  This starts the adapter LED blinking. Request the LED to be setup first
- *  and cleaned up after. This is a function pointer entry point called by
- *  drivers.
- **/
-s32 e1000_blink_led(struct e1000_hw *hw)
-{
-	if (hw->mac.ops.blink_led)
-		return hw->mac.ops.blink_led(hw);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_id_led_init - store LED configurations in SW
- *  @hw: pointer to the HW structure
- *
- *  Initializes the LED config in SW. This is a function pointer entry point
- *  called by drivers.
- **/
-s32 e1000_id_led_init(struct e1000_hw *hw)
-{
-	if (hw->mac.ops.id_led_init)
-		return hw->mac.ops.id_led_init(hw);
-
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_led_on - Turn on SW controllable LED
  *  @hw: pointer to the HW structure
@@ -775,43 +639,6 @@  s32 e1000_led_off(struct e1000_hw *hw)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_reset_adaptive - Reset adaptive IFS
- *  @hw: pointer to the HW structure
- *
- *  Resets the adaptive IFS. Currently no func pointer exists and all
- *  implementations are handled in the generic version of this function.
- **/
-void e1000_reset_adaptive(struct e1000_hw *hw)
-{
-	e1000_reset_adaptive_generic(hw);
-}
-
-/**
- *  e1000_update_adaptive - Update adaptive IFS
- *  @hw: pointer to the HW structure
- *
- *  Updates adapter IFS. Currently no func pointer exists and all
- *  implementations are handled in the generic version of this function.
- **/
-void e1000_update_adaptive(struct e1000_hw *hw)
-{
-	e1000_update_adaptive_generic(hw);
-}
-
-/**
- *  e1000_disable_pcie_master - Disable PCI-Express master access
- *  @hw: pointer to the HW structure
- *
- *  Disables PCI-Express master access and verifies there are no pending
- *  requests. Currently no func pointer exists and all implementations are
- *  handled in the generic version of this function.
- **/
-s32 e1000_disable_pcie_master(struct e1000_hw *hw)
-{
-	return e1000_disable_pcie_master_generic(hw);
-}
-
 /**
  *  e1000_config_collision_dist - Configure collision distance
  *  @hw: pointer to the HW structure
@@ -841,94 +668,6 @@  int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
- *  @hw: pointer to the HW structure
- *
- *  Ensures that the MDI/MDIX SW state is valid.
- **/
-s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
-{
-	if (hw->mac.ops.validate_mdi_setting)
-		return hw->mac.ops.validate_mdi_setting(hw);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_hash_mc_addr - Determines address location in multicast table
- *  @hw: pointer to the HW structure
- *  @mc_addr: Multicast address to hash.
- *
- *  This hashes an address to determine its location in the multicast
- *  table. Currently no func pointer exists and all implementations
- *  are handled in the generic version of this function.
- **/
-u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
-{
-	return e1000_hash_mc_addr_generic(hw, mc_addr);
-}
-
-/**
- *  e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
- *  @hw: pointer to the HW structure
- *
- *  Enables packet filtering on transmit packets if manageability is enabled
- *  and host interface is enabled.
- *  Currently no func pointer exists and all implementations are handled in the
- *  generic version of this function.
- **/
-bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
-{
-	return e1000_enable_tx_pkt_filtering_generic(hw);
-}
-
-/**
- *  e1000_mng_host_if_write - Writes to the manageability host interface
- *  @hw: pointer to the HW structure
- *  @buffer: pointer to the host interface buffer
- *  @length: size of the buffer
- *  @offset: location in the buffer to write to
- *  @sum: sum of the data (not checksum)
- *
- *  This function writes the buffer content at the offset given on the host if.
- *  It also does alignment considerations to do the writes in most efficient
- *  way.  Also fills up the sum of the buffer in *buffer parameter.
- **/
-s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
-			    u16 offset, u8 *sum)
-{
-	return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum);
-}
-
-/**
- *  e1000_mng_write_cmd_header - Writes manageability command header
- *  @hw: pointer to the HW structure
- *  @hdr: pointer to the host interface command header
- *
- *  Writes the command header after does the checksum calculation.
- **/
-s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
-			       struct e1000_host_mng_command_header *hdr)
-{
-	return e1000_mng_write_cmd_header_generic(hw, hdr);
-}
-
-/**
- *  e1000_mng_enable_host_if - Checks host interface is enabled
- *  @hw: pointer to the HW structure
- *
- *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
- *
- *  This function checks whether the HOST IF is enabled for command operation
- *  and also checks whether the previous command is completed.  It busy waits
- *  in case of previous command is not completed.
- **/
-s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
-{
-	return e1000_mng_enable_host_if_generic(hw);
-}
-
 /**
  *  e1000_check_reset_block - Verifies PHY can be reset
  *  @hw: pointer to the HW structure
@@ -944,126 +683,6 @@  s32 e1000_check_reset_block(struct e1000_hw *hw)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_read_phy_reg - Reads PHY register
- *  @hw: pointer to the HW structure
- *  @offset: the register to read
- *  @data: the buffer to store the 16-bit read.
- *
- *  Reads the PHY register and returns the value in data.
- *  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-	if (hw->phy.ops.read_reg)
-		return hw->phy.ops.read_reg(hw, offset, data);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_write_phy_reg - Writes PHY register
- *  @hw: pointer to the HW structure
- *  @offset: the register to write
- *  @data: the value to write.
- *
- *  Writes the PHY register at offset with the value in data.
- *  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
-{
-	if (hw->phy.ops.write_reg)
-		return hw->phy.ops.write_reg(hw, offset, data);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_release_phy - Generic release PHY
- *  @hw: pointer to the HW structure
- *
- *  Return if silicon family does not require a semaphore when accessing the
- *  PHY.
- **/
-void e1000_release_phy(struct e1000_hw *hw)
-{
-	if (hw->phy.ops.release)
-		hw->phy.ops.release(hw);
-}
-
-/**
- *  e1000_acquire_phy - Generic acquire PHY
- *  @hw: pointer to the HW structure
- *
- *  Return success if silicon family does not require a semaphore when
- *  accessing the PHY.
- **/
-s32 e1000_acquire_phy(struct e1000_hw *hw)
-{
-	if (hw->phy.ops.acquire)
-		return hw->phy.ops.acquire(hw);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_cfg_on_link_up - Configure PHY upon link up
- *  @hw: pointer to the HW structure
- **/
-s32 e1000_cfg_on_link_up(struct e1000_hw *hw)
-{
-	if (hw->phy.ops.cfg_on_link_up)
-		return hw->phy.ops.cfg_on_link_up(hw);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_read_kmrn_reg - Reads register using Kumeran interface
- *  @hw: pointer to the HW structure
- *  @offset: the register to read
- *  @data: the location to store the 16-bit value read.
- *
- *  Reads a register out of the Kumeran interface. Currently no func pointer
- *  exists and all implementations are handled in the generic version of
- *  this function.
- **/
-s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-	return e1000_read_kmrn_reg_generic(hw, offset, data);
-}
-
-/**
- *  e1000_write_kmrn_reg - Writes register using Kumeran interface
- *  @hw: pointer to the HW structure
- *  @offset: the register to write
- *  @data: the value to write.
- *
- *  Writes a register to the Kumeran interface. Currently no func pointer
- *  exists and all implementations are handled in the generic version of
- *  this function.
- **/
-s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
-{
-	return e1000_write_kmrn_reg_generic(hw, offset, data);
-}
-
-/**
- *  e1000_get_cable_length - Retrieves cable length estimation
- *  @hw: pointer to the HW structure
- *
- *  This function estimates the cable length and stores them in
- *  hw->phy.min_length and hw->phy.max_length. This is a function pointer
- *  entry point called by drivers.
- **/
-s32 e1000_get_cable_length(struct e1000_hw *hw)
-{
-	if (hw->phy.ops.get_cable_length)
-		return hw->phy.ops.get_cable_length(hw);
-
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_get_phy_info - Retrieves PHY information from registers
  *  @hw: pointer to the HW structure
@@ -1095,65 +714,6 @@  s32 e1000_phy_hw_reset(struct e1000_hw *hw)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_phy_commit - Soft PHY reset
- *  @hw: pointer to the HW structure
- *
- *  Performs a soft PHY reset on those that apply. This is a function pointer
- *  entry point called by drivers.
- **/
-s32 e1000_phy_commit(struct e1000_hw *hw)
-{
-	if (hw->phy.ops.commit)
-		return hw->phy.ops.commit(hw);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_set_d0_lplu_state - Sets low power link up state for D0
- *  @hw: pointer to the HW structure
- *  @active: boolean used to enable/disable lplu
- *
- *  Success returns 0, Failure returns 1
- *
- *  The low power link up (lplu) state is set to the power management level D0
- *  and SmartSpeed is disabled when active is true, else clear lplu for D0
- *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
- *  is used during Dx states where the power conservation is most important.
- *  During driver activity, SmartSpeed should be enabled so performance is
- *  maintained.  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
-{
-	if (hw->phy.ops.set_d0_lplu_state)
-		return hw->phy.ops.set_d0_lplu_state(hw, active);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_set_d3_lplu_state - Sets low power link up state for D3
- *  @hw: pointer to the HW structure
- *  @active: boolean used to enable/disable lplu
- *
- *  Success returns 0, Failure returns 1
- *
- *  The low power link up (lplu) state is set to the power management level D3
- *  and SmartSpeed is disabled when active is true, else clear lplu for D3
- *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
- *  is used during Dx states where the power conservation is most important.
- *  During driver activity, SmartSpeed should be enabled so performance is
- *  maintained.  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
-{
-	if (hw->phy.ops.set_d3_lplu_state)
-		return hw->phy.ops.set_d3_lplu_state(hw, active);
-
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_read_mac_addr - Reads MAC address
  *  @hw: pointer to the HW structure
@@ -1170,52 +730,6 @@  s32 e1000_read_mac_addr(struct e1000_hw *hw)
 	return e1000_read_mac_addr_generic(hw);
 }
 
-/**
- *  e1000_read_pba_string - Read device part number string
- *  @hw: pointer to the HW structure
- *  @pba_num: pointer to device part number
- *  @pba_num_size: size of part number buffer
- *
- *  Reads the product board assembly (PBA) number from the EEPROM and stores
- *  the value in pba_num.
- *  Currently no func pointer exists and all implementations are handled in the
- *  generic version of this function.
- **/
-s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size)
-{
-	return e1000_read_pba_string_generic(hw, pba_num, pba_num_size);
-}
-
-/**
- *  e1000_read_pba_length - Read device part number string length
- *  @hw: pointer to the HW structure
- *  @pba_num_size: size of part number buffer
- *
- *  Reads the product board assembly (PBA) number length from the EEPROM and
- *  stores the value in pba_num.
- *  Currently no func pointer exists and all implementations are handled in the
- *  generic version of this function.
- **/
-s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size)
-{
-	return e1000_read_pba_length_generic(hw, pba_num_size);
-}
-
-/**
- *  e1000_read_pba_num - Read device part number
- *  @hw: pointer to the HW structure
- *  @pba_num: pointer to device part number
- *
- *  Reads the product board assembly (PBA) number from the EEPROM and stores
- *  the value in pba_num.
- *  Currently no func pointer exists and all implementations are handled in the
- *  generic version of this function.
- **/
-s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
-{
-	return e1000_read_pba_num_generic(hw, pba_num);
-}
-
 /**
  *  e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
  *  @hw: pointer to the HW structure
@@ -1231,34 +745,6 @@  s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
 	return -E1000_ERR_CONFIG;
 }
 
-/**
- *  e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
- *  @hw: pointer to the HW structure
- *
- *  Updates the NVM checksum. Currently no func pointer exists and all
- *  implementations are handled in the generic version of this function.
- **/
-s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
-{
-	if (hw->nvm.ops.update)
-		return hw->nvm.ops.update(hw);
-
-	return -E1000_ERR_CONFIG;
-}
-
-/**
- *  e1000_reload_nvm - Reloads EEPROM
- *  @hw: pointer to the HW structure
- *
- *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
- *  extended control register.
- **/
-void e1000_reload_nvm(struct e1000_hw *hw)
-{
-	if (hw->nvm.ops.reload)
-		hw->nvm.ops.reload(hw);
-}
-
 /**
  *  e1000_read_nvm - Reads NVM (EEPROM)
  *  @hw: pointer to the HW structure
@@ -1295,22 +781,6 @@  s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_write_8bit_ctrl_reg - Writes 8bit Control register
- *  @hw: pointer to the HW structure
- *  @reg: 32bit register offset
- *  @offset: the register to write
- *  @data: the value to write.
- *
- *  Writes the PHY register at offset with the value in data.
- *  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
-			      u8 data)
-{
-	return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
-}
-
 /**
  * e1000_power_up_phy - Restores link in case of PHY power down
  * @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_api.h b/drivers/net/e1000/base/e1000_api.h
index 6b38e2b7bb..1c240dfcdf 100644
--- a/drivers/net/e1000/base/e1000_api.h
+++ b/drivers/net/e1000/base/e1000_api.h
@@ -29,65 +29,25 @@  s32 e1000_init_phy_params(struct e1000_hw *hw);
 s32 e1000_init_mbx_params(struct e1000_hw *hw);
 s32 e1000_get_bus_info(struct e1000_hw *hw);
 void e1000_clear_vfta(struct e1000_hw *hw);
-void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
-s32 e1000_force_mac_fc(struct e1000_hw *hw);
 s32 e1000_check_for_link(struct e1000_hw *hw);
 s32 e1000_reset_hw(struct e1000_hw *hw);
 s32 e1000_init_hw(struct e1000_hw *hw);
 s32 e1000_setup_link(struct e1000_hw *hw);
-s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-s32 e1000_disable_pcie_master(struct e1000_hw *hw);
 void e1000_config_collision_dist(struct e1000_hw *hw);
 int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
-u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
 void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
 			       u32 mc_addr_count);
-s32 e1000_setup_led(struct e1000_hw *hw);
-s32 e1000_cleanup_led(struct e1000_hw *hw);
 s32 e1000_check_reset_block(struct e1000_hw *hw);
-s32 e1000_blink_led(struct e1000_hw *hw);
 s32 e1000_led_on(struct e1000_hw *hw);
 s32 e1000_led_off(struct e1000_hw *hw);
-s32 e1000_id_led_init(struct e1000_hw *hw);
-void e1000_reset_adaptive(struct e1000_hw *hw);
-void e1000_update_adaptive(struct e1000_hw *hw);
-s32 e1000_get_cable_length(struct e1000_hw *hw);
-s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
-s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
-			      u8 data);
 s32 e1000_get_phy_info(struct e1000_hw *hw);
-void e1000_release_phy(struct e1000_hw *hw);
-s32 e1000_acquire_phy(struct e1000_hw *hw);
-s32 e1000_cfg_on_link_up(struct e1000_hw *hw);
 s32 e1000_phy_hw_reset(struct e1000_hw *hw);
-s32 e1000_phy_commit(struct e1000_hw *hw);
 void e1000_power_up_phy(struct e1000_hw *hw);
 void e1000_power_down_phy(struct e1000_hw *hw);
 s32 e1000_read_mac_addr(struct e1000_hw *hw);
-s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num);
-s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size);
-s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
-void e1000_reload_nvm(struct e1000_hw *hw);
-s32 e1000_update_nvm_checksum(struct e1000_hw *hw);
 s32 e1000_validate_nvm_checksum(struct e1000_hw *hw);
 s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
 s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
-s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
-bool e1000_check_mng_mode(struct e1000_hw *hw);
-bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
-s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
-s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
-			    u16 offset, u8 *sum);
-s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
-			       struct e1000_host_mng_command_header *hdr);
-s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
-u32  e1000_translate_register_82542(u32 reg);
-
 
 
 /*
diff --git a/drivers/net/e1000/base/e1000_base.c b/drivers/net/e1000/base/e1000_base.c
index ab73e1e59e..958aca14b2 100644
--- a/drivers/net/e1000/base/e1000_base.c
+++ b/drivers/net/e1000/base/e1000_base.c
@@ -110,81 +110,3 @@  void e1000_power_down_phy_copper_base(struct e1000_hw *hw)
 	if (phy->ops.check_reset_block(hw))
 		e1000_power_down_phy_copper(hw);
 }
-
-/**
- *  e1000_rx_fifo_flush_base - Clean Rx FIFO after Rx enable
- *  @hw: pointer to the HW structure
- *
- *  After Rx enable, if manageability is enabled then there is likely some
- *  bad data at the start of the FIFO and possibly in the DMA FIFO.  This
- *  function clears the FIFOs and flushes any packets that came in as Rx was
- *  being enabled.
- **/
-void e1000_rx_fifo_flush_base(struct e1000_hw *hw)
-{
-	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
-	int i, ms_wait;
-
-	DEBUGFUNC("e1000_rx_fifo_flush_base");
-
-	/* disable IPv6 options as per hardware errata */
-	rfctl = E1000_READ_REG(hw, E1000_RFCTL);
-	rfctl |= E1000_RFCTL_IPV6_EX_DIS;
-	E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
-
-	if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
-		return;
-
-	/* Disable all Rx queues */
-	for (i = 0; i < 4; i++) {
-		rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
-		E1000_WRITE_REG(hw, E1000_RXDCTL(i),
-				rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
-	}
-	/* Poll all queues to verify they have shut down */
-	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
-		msec_delay(1);
-		rx_enabled = 0;
-		for (i = 0; i < 4; i++)
-			rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
-		if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
-			break;
-	}
-
-	if (ms_wait == 10)
-		DEBUGOUT("Queue disable timed out after 10ms\n");
-
-	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
-	 * incoming packets are rejected.  Set enable and wait 2ms so that
-	 * any packet that was coming in as RCTL.EN was set is flushed
-	 */
-	E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
-
-	rlpml = E1000_READ_REG(hw, E1000_RLPML);
-	E1000_WRITE_REG(hw, E1000_RLPML, 0);
-
-	rctl = E1000_READ_REG(hw, E1000_RCTL);
-	temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
-	temp_rctl |= E1000_RCTL_LPE;
-
-	E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
-	E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
-	E1000_WRITE_FLUSH(hw);
-	msec_delay(2);
-
-	/* Enable Rx queues that were previously enabled and restore our
-	 * previous state
-	 */
-	for (i = 0; i < 4; i++)
-		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
-	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-	E1000_WRITE_FLUSH(hw);
-
-	E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
-	E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
-
-	/* Flush receive errors generated by workaround */
-	E1000_READ_REG(hw, E1000_ROC);
-	E1000_READ_REG(hw, E1000_RNBC);
-	E1000_READ_REG(hw, E1000_MPC);
-}
diff --git a/drivers/net/e1000/base/e1000_base.h b/drivers/net/e1000/base/e1000_base.h
index 0d6172b6d8..16d7ca98a7 100644
--- a/drivers/net/e1000/base/e1000_base.h
+++ b/drivers/net/e1000/base/e1000_base.h
@@ -8,7 +8,6 @@ 
 /* forward declaration */
 s32 e1000_init_hw_base(struct e1000_hw *hw);
 void e1000_power_down_phy_copper_base(struct e1000_hw *hw);
-extern void e1000_rx_fifo_flush_base(struct e1000_hw *hw);
 s32 e1000_acquire_phy_base(struct e1000_hw *hw);
 void e1000_release_phy_base(struct e1000_hw *hw);
 
diff --git a/drivers/net/e1000/base/e1000_ich8lan.c b/drivers/net/e1000/base/e1000_ich8lan.c
index 14f86b7bdc..4f9a7bc3f1 100644
--- a/drivers/net/e1000/base/e1000_ich8lan.c
+++ b/drivers/net/e1000/base/e1000_ich8lan.c
@@ -5467,60 +5467,6 @@  void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
 	return;
 }
 
-/**
- *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
- *  @hw: pointer to the HW structure
- *
- *  Workaround for 82566 power-down on D3 entry:
- *    1) disable gigabit link
- *    2) write VR power-down enable
- *    3) read it back
- *  Continue if successful, else issue LCD reset and repeat
- **/
-void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
-{
-	u32 reg;
-	u16 data;
-	u8  retry = 0;
-
-	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
-
-	if (hw->phy.type != e1000_phy_igp_3)
-		return;
-
-	/* Try the workaround twice (if needed) */
-	do {
-		/* Disable link */
-		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
-		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
-			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
-		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
-
-		/* Call gig speed drop workaround on Gig disable before
-		 * accessing any PHY registers
-		 */
-		if (hw->mac.type == e1000_ich8lan)
-			e1000_gig_downshift_workaround_ich8lan(hw);
-
-		/* Write VR power-down enable */
-		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
-		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
-		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
-				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
-
-		/* Read it back and test */
-		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
-		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
-		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
-			break;
-
-		/* Issue PHY reset and repeat at most one more time */
-		reg = E1000_READ_REG(hw, E1000_CTRL);
-		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
-		retry++;
-	} while (retry);
-}
-
 /**
  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
  *  @hw: pointer to the HW structure
@@ -5557,218 +5503,6 @@  void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
 				     reg_data);
 }
 
-/**
- *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
- *  @hw: pointer to the HW structure
- *
- *  During S0 to Sx transition, it is possible the link remains at gig
- *  instead of negotiating to a lower speed.  Before going to Sx, set
- *  'Gig Disable' to force link speed negotiation to a lower speed based on
- *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
- *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
- *  needs to be written.
- *  Parts that support (and are linked to a partner which support) EEE in
- *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
- *  than 10Mbps w/o EEE.
- **/
-void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
-{
-	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
-	u32 phy_ctrl;
-	s32 ret_val;
-
-	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
-
-	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
-	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
-
-	if (hw->phy.type == e1000_phy_i217) {
-		u16 phy_reg, device_id = hw->device_id;
-
-		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
-		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
-		    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
-		    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
-		    (hw->mac.type >= e1000_pch_spt)) {
-			u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
-
-			E1000_WRITE_REG(hw, E1000_FEXTNVM6,
-					fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
-		}
-
-		ret_val = hw->phy.ops.acquire(hw);
-		if (ret_val)
-			goto out;
-
-		if (!dev_spec->eee_disable) {
-			u16 eee_advert;
-
-			ret_val =
-			    e1000_read_emi_reg_locked(hw,
-						      I217_EEE_ADVERTISEMENT,
-						      &eee_advert);
-			if (ret_val)
-				goto release;
-
-			/* Disable LPLU if both link partners support 100BaseT
-			 * EEE and 100Full is advertised on both ends of the
-			 * link, and enable Auto Enable LPI since there will
-			 * be no driver to enable LPI while in Sx.
-			 */
-			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
-			    (dev_spec->eee_lp_ability &
-			     I82579_EEE_100_SUPPORTED) &&
-			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
-				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
-					      E1000_PHY_CTRL_NOND0A_LPLU);
-
-				/* Set Auto Enable LPI after link up */
-				hw->phy.ops.read_reg_locked(hw,
-							    I217_LPI_GPIO_CTRL,
-							    &phy_reg);
-				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
-				hw->phy.ops.write_reg_locked(hw,
-							     I217_LPI_GPIO_CTRL,
-							     phy_reg);
-			}
-		}
-
-		/* For i217 Intel Rapid Start Technology support,
-		 * when the system is going into Sx and no manageability engine
-		 * is present, the driver must configure proxy to reset only on
-		 * power good.  LPI (Low Power Idle) state must also reset only
-		 * on power good, as well as the MTA (Multicast table array).
-		 * The SMBus release must also be disabled on LCD reset.
-		 */
-		if (!(E1000_READ_REG(hw, E1000_FWSM) &
-		      E1000_ICH_FWSM_FW_VALID)) {
-			/* Enable proxy to reset only on power good. */
-			hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
-						    &phy_reg);
-			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
-			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
-						     phy_reg);
-
-			/* Set bit enable LPI (EEE) to reset only on
-			 * power good.
-			*/
-			hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
-			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
-			hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
-
-			/* Disable the SMB release on LCD reset. */
-			hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
-			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
-			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
-		}
-
-		/* Enable MTA to reset for Intel Rapid Start Technology
-		 * Support
-		 */
-		hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
-		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
-		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
-
-release:
-		hw->phy.ops.release(hw);
-	}
-out:
-	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
-
-	if (hw->mac.type == e1000_ich8lan)
-		e1000_gig_downshift_workaround_ich8lan(hw);
-
-	if (hw->mac.type >= e1000_pchlan) {
-		e1000_oem_bits_config_ich8lan(hw, false);
-
-		/* Reset PHY to activate OEM bits on 82577/8 */
-		if (hw->mac.type == e1000_pchlan)
-			e1000_phy_hw_reset_generic(hw);
-
-		ret_val = hw->phy.ops.acquire(hw);
-		if (ret_val)
-			return;
-		e1000_write_smbus_addr(hw);
-		hw->phy.ops.release(hw);
-	}
-
-	return;
-}
-
-/**
- *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
- *  @hw: pointer to the HW structure
- *
- *  During Sx to S0 transitions on non-managed devices or managed devices
- *  on which PHY resets are not blocked, if the PHY registers cannot be
- *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
- *  the PHY.
- *  On i217, setup Intel Rapid Start Technology.
- **/
-u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
-{
-	s32 ret_val;
-
-	DEBUGFUNC("e1000_resume_workarounds_pchlan");
-	if (hw->mac.type < e1000_pch2lan)
-		return E1000_SUCCESS;
-
-	ret_val = e1000_init_phy_workarounds_pchlan(hw);
-	if (ret_val) {
-		DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
-		return ret_val;
-	}
-
-	/* For i217 Intel Rapid Start Technology support when the system
-	 * is transitioning from Sx and no manageability engine is present
-	 * configure SMBus to restore on reset, disable proxy, and enable
-	 * the reset on MTA (Multicast table array).
-	 */
-	if (hw->phy.type == e1000_phy_i217) {
-		u16 phy_reg;
-
-		ret_val = hw->phy.ops.acquire(hw);
-		if (ret_val) {
-			DEBUGOUT("Failed to setup iRST\n");
-			return ret_val;
-		}
-
-		/* Clear Auto Enable LPI after link up */
-		hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
-		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
-		hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
-
-		if (!(E1000_READ_REG(hw, E1000_FWSM) &
-		    E1000_ICH_FWSM_FW_VALID)) {
-			/* Restore clear on SMB if no manageability engine
-			 * is present
-			 */
-			ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
-							      &phy_reg);
-			if (ret_val)
-				goto release;
-			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
-			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
-
-			/* Disable Proxy */
-			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
-		}
-		/* Enable reset on MTA */
-		ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
-						      &phy_reg);
-		if (ret_val)
-			goto release;
-		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
-		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
-release:
-		if (ret_val)
-			DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
-		hw->phy.ops.release(hw);
-		return ret_val;
-	}
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
  *  @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_ich8lan.h b/drivers/net/e1000/base/e1000_ich8lan.h
index e456e5132e..e28ebb55ba 100644
--- a/drivers/net/e1000/base/e1000_ich8lan.h
+++ b/drivers/net/e1000/base/e1000_ich8lan.h
@@ -281,10 +281,7 @@ 
 #define E1000_PCI_REVISION_ID_REG	0x08
 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
 						 bool state);
-void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
-void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
-u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
 s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time);
 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
diff --git a/drivers/net/e1000/base/e1000_mac.c b/drivers/net/e1000/base/e1000_mac.c
index d3b3a6bac9..fe1516bd92 100644
--- a/drivers/net/e1000/base/e1000_mac.c
+++ b/drivers/net/e1000/base/e1000_mac.c
@@ -124,20 +124,6 @@  void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw,
 	return;
 }
 
-/**
- *  e1000_null_rar_set - No-op function, return 0
- *  @hw: pointer to the HW structure
- *  @h: dummy variable
- *  @a: dummy variable
- **/
-int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw,
-			u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
-{
-	DEBUGFUNC("e1000_null_rar_set");
-	UNREFERENCED_3PARAMETER(hw, h, a);
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_get_bus_info_pci_generic - Get PCI(x) bus information
  *  @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_mac.h b/drivers/net/e1000/base/e1000_mac.h
index 86fcad23bb..0abaf2f452 100644
--- a/drivers/net/e1000/base/e1000_mac.h
+++ b/drivers/net/e1000/base/e1000_mac.h
@@ -13,7 +13,6 @@  s32  e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
 bool e1000_null_mng_mode(struct e1000_hw *hw);
 void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
 void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
-int  e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
 s32  e1000_blink_led_generic(struct e1000_hw *hw);
 s32  e1000_check_for_copper_link_generic(struct e1000_hw *hw);
 s32  e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
diff --git a/drivers/net/e1000/base/e1000_manage.c b/drivers/net/e1000/base/e1000_manage.c
index 4b81028302..266bb9ec91 100644
--- a/drivers/net/e1000/base/e1000_manage.c
+++ b/drivers/net/e1000/base/e1000_manage.c
@@ -353,195 +353,3 @@  bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
 
 	return false;
 }
-
-/**
- *  e1000_host_interface_command - Writes buffer to host interface
- *  @hw: pointer to the HW structure
- *  @buffer: contains a command to write
- *  @length: the byte length of the buffer, must be multiple of 4 bytes
- *
- *  Writes a buffer to the Host Interface.  Upon success, returns E1000_SUCCESS
- *  else returns E1000_ERR_HOST_INTERFACE_COMMAND.
- **/
-s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length)
-{
-	u32 hicr, i;
-
-	DEBUGFUNC("e1000_host_interface_command");
-
-	if (!(hw->mac.arc_subsystem_valid)) {
-		DEBUGOUT("Hardware doesn't support host interface command.\n");
-		return E1000_SUCCESS;
-	}
-
-	if (!hw->mac.asf_firmware_present) {
-		DEBUGOUT("Firmware is not present.\n");
-		return E1000_SUCCESS;
-	}
-
-	if (length == 0 || length & 0x3 ||
-	    length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) {
-		DEBUGOUT("Buffer length failure.\n");
-		return -E1000_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	/* Check that the host interface is enabled. */
-	hicr = E1000_READ_REG(hw, E1000_HICR);
-	if (!(hicr & E1000_HICR_EN)) {
-		DEBUGOUT("E1000_HOST_EN bit disabled.\n");
-		return -E1000_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	/* Calculate length in DWORDs */
-	length >>= 2;
-
-	/* The device driver writes the relevant command block
-	 * into the ram area.
-	 */
-	for (i = 0; i < length; i++)
-		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
-					    *((u32 *)buffer + i));
-
-	/* Setting this bit tells the ARC that a new command is pending. */
-	E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
-
-	for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
-		hicr = E1000_READ_REG(hw, E1000_HICR);
-		if (!(hicr & E1000_HICR_C))
-			break;
-		msec_delay(1);
-	}
-
-	/* Check command successful completion. */
-	if (i == E1000_HI_COMMAND_TIMEOUT ||
-	    (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) {
-		DEBUGOUT("Command has failed with no status valid.\n");
-		return -E1000_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	for (i = 0; i < length; i++)
-		*((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
-								  E1000_HOST_IF,
-								  i);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_load_firmware - Writes proxy FW code buffer to host interface
- *                        and execute.
- *  @hw: pointer to the HW structure
- *  @buffer: contains a firmware to write
- *  @length: the byte length of the buffer, must be multiple of 4 bytes
- *
- *  Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled
- *  in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND.
- **/
-s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length)
-{
-	u32 hicr, hibba, fwsm, icr, i;
-
-	DEBUGFUNC("e1000_load_firmware");
-
-	if (hw->mac.type < e1000_i210) {
-		DEBUGOUT("Hardware doesn't support loading FW by the driver\n");
-		return -E1000_ERR_CONFIG;
-	}
-
-	/* Check that the host interface is enabled. */
-	hicr = E1000_READ_REG(hw, E1000_HICR);
-	if (!(hicr & E1000_HICR_EN)) {
-		DEBUGOUT("E1000_HOST_EN bit disabled.\n");
-		return -E1000_ERR_CONFIG;
-	}
-	if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) {
-		DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n");
-		return -E1000_ERR_CONFIG;
-	}
-
-	if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) {
-		DEBUGOUT("Buffer length failure.\n");
-		return -E1000_ERR_INVALID_ARGUMENT;
-	}
-
-	/* Clear notification from ROM-FW by reading ICR register */
-	icr = E1000_READ_REG(hw, E1000_ICR_V2);
-
-	/* Reset ROM-FW */
-	hicr = E1000_READ_REG(hw, E1000_HICR);
-	hicr |= E1000_HICR_FW_RESET_ENABLE;
-	E1000_WRITE_REG(hw, E1000_HICR, hicr);
-	hicr |= E1000_HICR_FW_RESET;
-	E1000_WRITE_REG(hw, E1000_HICR, hicr);
-	E1000_WRITE_FLUSH(hw);
-
-	/* Wait till MAC notifies about its readiness after ROM-FW reset */
-	for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) {
-		icr = E1000_READ_REG(hw, E1000_ICR_V2);
-		if (icr & E1000_ICR_MNG)
-			break;
-		msec_delay(1);
-	}
-
-	/* Check for timeout */
-	if (i == E1000_HI_COMMAND_TIMEOUT) {
-		DEBUGOUT("FW reset failed.\n");
-		return -E1000_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	/* Wait till MAC is ready to accept new FW code */
-	for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
-		fwsm = E1000_READ_REG(hw, E1000_FWSM);
-		if ((fwsm & E1000_FWSM_FW_VALID) &&
-		    ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT ==
-		    E1000_FWSM_HI_EN_ONLY_MODE))
-			break;
-		msec_delay(1);
-	}
-
-	/* Check for timeout */
-	if (i == E1000_HI_COMMAND_TIMEOUT) {
-		DEBUGOUT("FW reset failed.\n");
-		return -E1000_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	/* Calculate length in DWORDs */
-	length >>= 2;
-
-	/* The device driver writes the relevant FW code block
-	 * into the ram area in DWORDs via 1kB ram addressing window.
-	 */
-	for (i = 0; i < length; i++) {
-		if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) {
-			/* Point to correct 1kB ram window */
-			hibba = E1000_HI_FW_BASE_ADDRESS +
-				((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) *
-				(i / E1000_HI_FW_BLOCK_DWORD_LENGTH));
-
-			E1000_WRITE_REG(hw, E1000_HIBBA, hibba);
-		}
-
-		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
-					    i % E1000_HI_FW_BLOCK_DWORD_LENGTH,
-					    *((u32 *)buffer + i));
-	}
-
-	/* Setting this bit tells the ARC that a new FW is ready to execute. */
-	hicr = E1000_READ_REG(hw, E1000_HICR);
-	E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
-
-	for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
-		hicr = E1000_READ_REG(hw, E1000_HICR);
-		if (!(hicr & E1000_HICR_C))
-			break;
-		msec_delay(1);
-	}
-
-	/* Check for successful FW start. */
-	if (i == E1000_HI_COMMAND_TIMEOUT) {
-		DEBUGOUT("New FW did not start within timeout period.\n");
-		return -E1000_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	return E1000_SUCCESS;
-}
diff --git a/drivers/net/e1000/base/e1000_manage.h b/drivers/net/e1000/base/e1000_manage.h
index 268a13381d..da0246b6a9 100644
--- a/drivers/net/e1000/base/e1000_manage.h
+++ b/drivers/net/e1000/base/e1000_manage.h
@@ -16,8 +16,6 @@  s32  e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
 				       u8 *buffer, u16 length);
 bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
 u8 e1000_calculate_checksum(u8 *buffer, u32 length);
-s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length);
-s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length);
 
 enum e1000_mng_mode {
 	e1000_mng_mode_none = 0,
diff --git a/drivers/net/e1000/base/e1000_nvm.c b/drivers/net/e1000/base/e1000_nvm.c
index 430fecaf6d..4b3ce7d634 100644
--- a/drivers/net/e1000/base/e1000_nvm.c
+++ b/drivers/net/e1000/base/e1000_nvm.c
@@ -947,135 +947,6 @@  s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num)
 	return E1000_SUCCESS;
 }
 
-
-/**
- *  e1000_read_pba_raw
- *  @hw: pointer to the HW structure
- *  @eeprom_buf: optional pointer to EEPROM image
- *  @eeprom_buf_size: size of EEPROM image in words
- *  @max_pba_block_size: PBA block size limit
- *  @pba: pointer to output PBA structure
- *
- *  Reads PBA from EEPROM image when eeprom_buf is not NULL.
- *  Reads PBA from physical EEPROM device when eeprom_buf is NULL.
- *
- **/
-s32 e1000_read_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
-		       u32 eeprom_buf_size, u16 max_pba_block_size,
-		       struct e1000_pba *pba)
-{
-	s32 ret_val;
-	u16 pba_block_size;
-
-	if (pba == NULL)
-		return -E1000_ERR_PARAM;
-
-	if (eeprom_buf == NULL) {
-		ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 2,
-					 &pba->word[0]);
-		if (ret_val)
-			return ret_val;
-	} else {
-		if (eeprom_buf_size > NVM_PBA_OFFSET_1) {
-			pba->word[0] = eeprom_buf[NVM_PBA_OFFSET_0];
-			pba->word[1] = eeprom_buf[NVM_PBA_OFFSET_1];
-		} else {
-			return -E1000_ERR_PARAM;
-		}
-	}
-
-	if (pba->word[0] == NVM_PBA_PTR_GUARD) {
-		if (pba->pba_block == NULL)
-			return -E1000_ERR_PARAM;
-
-		ret_val = e1000_get_pba_block_size(hw, eeprom_buf,
-						   eeprom_buf_size,
-						   &pba_block_size);
-		if (ret_val)
-			return ret_val;
-
-		if (pba_block_size > max_pba_block_size)
-			return -E1000_ERR_PARAM;
-
-		if (eeprom_buf == NULL) {
-			ret_val = e1000_read_nvm(hw, pba->word[1],
-						 pba_block_size,
-						 pba->pba_block);
-			if (ret_val)
-				return ret_val;
-		} else {
-			if (eeprom_buf_size > (u32)(pba->word[1] +
-					      pba_block_size)) {
-				memcpy(pba->pba_block,
-				       &eeprom_buf[pba->word[1]],
-				       pba_block_size * sizeof(u16));
-			} else {
-				return -E1000_ERR_PARAM;
-			}
-		}
-	}
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_write_pba_raw
- *  @hw: pointer to the HW structure
- *  @eeprom_buf: optional pointer to EEPROM image
- *  @eeprom_buf_size: size of EEPROM image in words
- *  @pba: pointer to PBA structure
- *
- *  Writes PBA to EEPROM image when eeprom_buf is not NULL.
- *  Writes PBA to physical EEPROM device when eeprom_buf is NULL.
- *
- **/
-s32 e1000_write_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
-			u32 eeprom_buf_size, struct e1000_pba *pba)
-{
-	s32 ret_val;
-
-	if (pba == NULL)
-		return -E1000_ERR_PARAM;
-
-	if (eeprom_buf == NULL) {
-		ret_val = e1000_write_nvm(hw, NVM_PBA_OFFSET_0, 2,
-					  &pba->word[0]);
-		if (ret_val)
-			return ret_val;
-	} else {
-		if (eeprom_buf_size > NVM_PBA_OFFSET_1) {
-			eeprom_buf[NVM_PBA_OFFSET_0] = pba->word[0];
-			eeprom_buf[NVM_PBA_OFFSET_1] = pba->word[1];
-		} else {
-			return -E1000_ERR_PARAM;
-		}
-	}
-
-	if (pba->word[0] == NVM_PBA_PTR_GUARD) {
-		if (pba->pba_block == NULL)
-			return -E1000_ERR_PARAM;
-
-		if (eeprom_buf == NULL) {
-			ret_val = e1000_write_nvm(hw, pba->word[1],
-						  pba->pba_block[0],
-						  pba->pba_block);
-			if (ret_val)
-				return ret_val;
-		} else {
-			if (eeprom_buf_size > (u32)(pba->word[1] +
-					      pba->pba_block[0])) {
-				memcpy(&eeprom_buf[pba->word[1]],
-				       pba->pba_block,
-				       pba->pba_block[0] * sizeof(u16));
-			} else {
-				return -E1000_ERR_PARAM;
-			}
-		}
-	}
-
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_get_pba_block_size
  *  @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_nvm.h b/drivers/net/e1000/base/e1000_nvm.h
index 056f823537..e48d638795 100644
--- a/drivers/net/e1000/base/e1000_nvm.h
+++ b/drivers/net/e1000/base/e1000_nvm.h
@@ -40,11 +40,6 @@  s32  e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num);
 s32  e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
 				   u32 pba_num_size);
 s32  e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
-s32 e1000_read_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
-		       u32 eeprom_buf_size, u16 max_pba_block_size,
-		       struct e1000_pba *pba);
-s32 e1000_write_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
-			u32 eeprom_buf_size, struct e1000_pba *pba);
 s32 e1000_get_pba_block_size(struct e1000_hw *hw, u16 *eeprom_buf,
 			     u32 eeprom_buf_size, u16 *pba_block_size);
 s32  e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/e1000/base/e1000_phy.c b/drivers/net/e1000/base/e1000_phy.c
index 62d0be5080..b3be39f7bd 100644
--- a/drivers/net/e1000/base/e1000_phy.c
+++ b/drivers/net/e1000/base/e1000_phy.c
@@ -545,79 +545,6 @@  s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_write_sfp_data_byte - Writes SFP module data.
- *  @hw: pointer to the HW structure
- *  @offset: byte location offset to write to
- *  @data: data to write
- *
- *  Writes one byte to SFP module data stored
- *  in SFP resided EEPROM memory or SFP diagnostic area.
- *  Function should be called with
- *  E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
- *  E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
- *  access
- **/
-s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
-{
-	u32 i = 0;
-	u32 i2ccmd = 0;
-	u32 data_local = 0;
-
-	DEBUGFUNC("e1000_write_sfp_data_byte");
-
-	if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
-		DEBUGOUT("I2CCMD command address exceeds upper limit\n");
-		return -E1000_ERR_PHY;
-	}
-	/* The programming interface is 16 bits wide
-	 * so we need to read the whole word first
-	 * then update appropriate byte lane and write
-	 * the updated word back.
-	 */
-	/* Set up Op-code, EEPROM Address,in the I2CCMD
-	 * register. The MAC will take care of interfacing
-	 * with an EEPROM to write the data given.
-	 */
-	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
-		  E1000_I2CCMD_OPCODE_READ);
-	/* Set a command to read single word */
-	E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
-	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
-		usec_delay(50);
-		/* Poll the ready bit to see if lastly
-		 * launched I2C operation completed
-		 */
-		i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
-		if (i2ccmd & E1000_I2CCMD_READY) {
-			/* Check if this is READ or WRITE phase */
-			if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
-			    E1000_I2CCMD_OPCODE_READ) {
-				/* Write the selected byte
-				 * lane and update whole word
-				 */
-				data_local = i2ccmd & 0xFF00;
-				data_local |= (u32)data;
-				i2ccmd = ((offset <<
-					E1000_I2CCMD_REG_ADDR_SHIFT) |
-					E1000_I2CCMD_OPCODE_WRITE | data_local);
-				E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
-			} else {
-				break;
-			}
-		}
-	}
-	if (!(i2ccmd & E1000_I2CCMD_READY)) {
-		DEBUGOUT("I2CCMD Write did not complete\n");
-		return -E1000_ERR_PHY;
-	}
-	if (i2ccmd & E1000_I2CCMD_ERROR) {
-		DEBUGOUT("I2CCMD Error bit set\n");
-		return -E1000_ERR_PHY;
-	}
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_read_phy_reg_m88 - Read m88 PHY register
  *  @hw: pointer to the HW structure
@@ -4083,134 +4010,6 @@  s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
 	return ret_val;
 }
 
-/**
- *  e1000_read_phy_reg_mphy - Read mPHY control register
- *  @hw: pointer to the HW structure
- *  @address: address to be read
- *  @data: pointer to the read data
- *
- *  Reads the mPHY control register in the PHY at offset and stores the
- *  information read to data.
- **/
-s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data)
-{
-	u32 mphy_ctrl = 0;
-	bool locked = false;
-	bool ready;
-
-	DEBUGFUNC("e1000_read_phy_reg_mphy");
-
-	/* Check if mPHY is ready to read/write operations */
-	ready = e1000_is_mphy_ready(hw);
-	if (!ready)
-		return -E1000_ERR_PHY;
-
-	/* Check if mPHY access is disabled and enable it if so */
-	mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
-	if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
-		locked = true;
-		ready = e1000_is_mphy_ready(hw);
-		if (!ready)
-			return -E1000_ERR_PHY;
-		mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
-		E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
-	}
-
-	/* Set the address that we want to read */
-	ready = e1000_is_mphy_ready(hw);
-	if (!ready)
-		return -E1000_ERR_PHY;
-
-	/* We mask address, because we want to use only current lane */
-	mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK &
-		~E1000_MPHY_ADDRESS_FNC_OVERRIDE) |
-		(address & E1000_MPHY_ADDRESS_MASK);
-	E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
-
-	/* Read data from the address */
-	ready = e1000_is_mphy_ready(hw);
-	if (!ready)
-		return -E1000_ERR_PHY;
-	*data = E1000_READ_REG(hw, E1000_MPHY_DATA);
-
-	/* Disable access to mPHY if it was originally disabled */
-	if (locked) {
-		ready = e1000_is_mphy_ready(hw);
-		if (!ready)
-			return -E1000_ERR_PHY;
-		E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
-				E1000_MPHY_DIS_ACCESS);
-	}
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_write_phy_reg_mphy - Write mPHY control register
- *  @hw: pointer to the HW structure
- *  @address: address to write to
- *  @data: data to write to register at offset
- *  @line_override: used when we want to use different line than default one
- *
- *  Writes data to mPHY control register.
- **/
-s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
-			     bool line_override)
-{
-	u32 mphy_ctrl = 0;
-	bool locked = false;
-	bool ready;
-
-	DEBUGFUNC("e1000_write_phy_reg_mphy");
-
-	/* Check if mPHY is ready to read/write operations */
-	ready = e1000_is_mphy_ready(hw);
-	if (!ready)
-		return -E1000_ERR_PHY;
-
-	/* Check if mPHY access is disabled and enable it if so */
-	mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
-	if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
-		locked = true;
-		ready = e1000_is_mphy_ready(hw);
-		if (!ready)
-			return -E1000_ERR_PHY;
-		mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
-		E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
-	}
-
-	/* Set the address that we want to read */
-	ready = e1000_is_mphy_ready(hw);
-	if (!ready)
-		return -E1000_ERR_PHY;
-
-	/* We mask address, because we want to use only current lane */
-	if (line_override)
-		mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE;
-	else
-		mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE;
-	mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) |
-		(address & E1000_MPHY_ADDRESS_MASK);
-	E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
-
-	/* Read data from the address */
-	ready = e1000_is_mphy_ready(hw);
-	if (!ready)
-		return -E1000_ERR_PHY;
-	E1000_WRITE_REG(hw, E1000_MPHY_DATA, data);
-
-	/* Disable access to mPHY if it was originally disabled */
-	if (locked) {
-		ready = e1000_is_mphy_ready(hw);
-		if (!ready)
-			return -E1000_ERR_PHY;
-		E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
-				E1000_MPHY_DIS_ACCESS);
-	}
-
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_is_mphy_ready - Check if mPHY control register is not busy
  *  @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_phy.h b/drivers/net/e1000/base/e1000_phy.h
index 81c5308589..fcd1e09f42 100644
--- a/drivers/net/e1000/base/e1000_phy.h
+++ b/drivers/net/e1000/base/e1000_phy.h
@@ -71,7 +71,6 @@  s32  e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
 s32  e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
 s32  e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
-s32  e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
 s32  e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data);
@@ -86,9 +85,6 @@  s32  e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
 s32  e1000_get_cable_length_82577(struct e1000_hw *hw);
 s32  e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
 s32  e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data);
-s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
-			     bool line_override);
 bool e1000_is_mphy_ready(struct e1000_hw *hw);
 
 s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
diff --git a/drivers/net/e1000/base/e1000_vf.c b/drivers/net/e1000/base/e1000_vf.c
index 44ebe07ee4..9b001f9c2e 100644
--- a/drivers/net/e1000/base/e1000_vf.c
+++ b/drivers/net/e1000/base/e1000_vf.c
@@ -411,25 +411,6 @@  void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
 	e1000_write_msg_read_ack(hw, msgbuf, E1000_VFMAILBOX_SIZE);
 }
 
-/**
- *  e1000_vfta_set_vf - Set/Unset vlan filter table address
- *  @hw: pointer to the HW structure
- *  @vid: determines the vfta register and bit to set/unset
- *  @set: if true then set bit, else clear bit
- **/
-void e1000_vfta_set_vf(struct e1000_hw *hw, u16 vid, bool set)
-{
-	u32 msgbuf[2];
-
-	msgbuf[0] = E1000_VF_SET_VLAN;
-	msgbuf[1] = vid;
-	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
-	if (set)
-		msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
-
-	e1000_write_msg_read_ack(hw, msgbuf, 2);
-}
-
 /** e1000_rlpml_set_vf - Set the maximum receive packet length
  *  @hw: pointer to the HW structure
  *  @max_size: value to assign to max frame size
diff --git a/drivers/net/e1000/base/e1000_vf.h b/drivers/net/e1000/base/e1000_vf.h
index 4bec21c935..ff62970132 100644
--- a/drivers/net/e1000/base/e1000_vf.h
+++ b/drivers/net/e1000/base/e1000_vf.h
@@ -260,7 +260,6 @@  enum e1000_promisc_type {
 
 /* These functions must be implemented by drivers */
 s32  e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
 void e1000_rlpml_set_vf(struct e1000_hw *, u16);
 s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type);
 #endif /* _E1000_VF_H_ */
diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c
index aae68721fb..04fd15c998 100644
--- a/drivers/net/ena/base/ena_com.c
+++ b/drivers/net/ena/base/ena_com.c
@@ -1064,11 +1064,6 @@  static int ena_com_get_feature(struct ena_com_dev *ena_dev,
 				      feature_ver);
 }
 
-int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
-{
-	return ena_dev->rss.hash_func;
-}
-
 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
 {
 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
@@ -1318,31 +1313,6 @@  static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
 	return 0;
 }
 
-static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
-						 u16 intr_delay_resolution)
-{
-	u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
-
-	if (unlikely(!intr_delay_resolution)) {
-		ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
-		intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
-	}
-
-	/* update Rx */
-	ena_dev->intr_moder_rx_interval =
-		ena_dev->intr_moder_rx_interval *
-		prev_intr_delay_resolution /
-		intr_delay_resolution;
-
-	/* update Tx */
-	ena_dev->intr_moder_tx_interval =
-		ena_dev->intr_moder_tx_interval *
-		prev_intr_delay_resolution /
-		intr_delay_resolution;
-
-	ena_dev->intr_delay_resolution = intr_delay_resolution;
-}
-
 /*****************************************************************************/
 /*******************************      API       ******************************/
 /*****************************************************************************/
@@ -1703,17 +1673,6 @@  void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
 	ena_dev->admin_queue.polling = polling;
 }
 
-bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev)
-{
-	return ena_dev->admin_queue.polling;
-}
-
-void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
-					 bool polling)
-{
-	ena_dev->admin_queue.auto_polling = polling;
-}
-
 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
@@ -1942,12 +1901,6 @@  void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
 	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
 }
 
-int ena_com_get_link_params(struct ena_com_dev *ena_dev,
-			    struct ena_admin_get_feat_resp *resp)
-{
-	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
-}
-
 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
 			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
 {
@@ -2277,24 +2230,6 @@  int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
 	return ret;
 }
 
-int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
-				 struct ena_admin_feature_offload_desc *offload)
-{
-	int ret;
-	struct ena_admin_get_feat_resp resp;
-
-	ret = ena_com_get_feature(ena_dev, &resp,
-				  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
-	if (unlikely(ret)) {
-		ena_trc_err("Failed to get offload capabilities %d\n", ret);
-		return ret;
-	}
-
-	memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
-
-	return 0;
-}
-
 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
@@ -2416,44 +2351,6 @@  int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
 	return rc;
 }
 
-int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
-			      enum ena_admin_hash_functions *func)
-{
-	struct ena_rss *rss = &ena_dev->rss;
-	struct ena_admin_get_feat_resp get_resp;
-	int rc;
-
-	if (unlikely(!func))
-		return ENA_COM_INVAL;
-
-	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
-				    ENA_ADMIN_RSS_HASH_FUNCTION,
-				    rss->hash_key_dma_addr,
-				    sizeof(*rss->hash_key), 0);
-	if (unlikely(rc))
-		return rc;
-
-	/* ENA_FFS() returns 1 in case the lsb is set */
-	rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
-	if (rss->hash_func)
-		rss->hash_func--;
-
-	*func = rss->hash_func;
-
-	return 0;
-}
-
-int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
-{
-	struct ena_admin_feature_rss_flow_hash_control *hash_key =
-		ena_dev->rss.hash_key;
-
-	if (key)
-		memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
-
-	return 0;
-}
-
 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
 			  enum ena_admin_flow_hash_proto proto,
 			  u16 *fields)
@@ -2582,43 +2479,6 @@  int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
 	return rc;
 }
 
-int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
-			   enum ena_admin_flow_hash_proto proto,
-			   u16 hash_fields)
-{
-	struct ena_rss *rss = &ena_dev->rss;
-	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
-	u16 supported_fields;
-	int rc;
-
-	if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
-		ena_trc_err("Invalid proto num (%u)\n", proto);
-		return ENA_COM_INVAL;
-	}
-
-	/* Get the ctrl table */
-	rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
-	if (unlikely(rc))
-		return rc;
-
-	/* Make sure all the fields are supported */
-	supported_fields = hash_ctrl->supported_fields[proto].fields;
-	if ((hash_fields & supported_fields) != hash_fields) {
-		ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
-			    proto, hash_fields, supported_fields);
-	}
-
-	hash_ctrl->selected_fields[proto].fields = hash_fields;
-
-	rc = ena_com_set_hash_ctrl(ena_dev);
-
-	/* In case of failure, restore the old hash ctrl */
-	if (unlikely(rc))
-		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
-
-	return 0;
-}
-
 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
 				      u16 entry_idx, u16 entry_value)
 {
@@ -2874,88 +2734,6 @@  int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
 	return ret;
 }
 
-/* Interrupt moderation */
-bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
-{
-	return ena_com_check_supported_feature_id(ena_dev,
-						  ENA_ADMIN_INTERRUPT_MODERATION);
-}
-
-static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
-							  u32 intr_delay_resolution,
-							  u32 *intr_moder_interval)
-{
-	if (!intr_delay_resolution) {
-		ena_trc_err("Illegal interrupt delay granularity value\n");
-		return ENA_COM_FAULT;
-	}
-
-	*intr_moder_interval = coalesce_usecs / intr_delay_resolution;
-
-	return 0;
-}
-
-
-int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
-						      u32 tx_coalesce_usecs)
-{