zxdh: add zxdh poll mode driver

Message ID 20240603192857865CEjaTZFRXk6x3WbOud5lK@zte.com.cn (mailing list archive)
State Changes Requested
Delegated to: Ferruh Yigit
Headers
Series zxdh: add zxdh poll mode driver |

Checks

Context Check Description
ci/github-robot: build success github build: passed
ci/iol-testing warning apply patch failure

Commit Message

Junlong Wang June 3, 2024, 11:28 a.m. UTC
  From 689a5e88b7ba123852153284b33911defc0f7b92 Mon Sep 17 00:00:00 2001
From: Junlong Wang <wang.junlong1@zte.com.cn>
Date: Mon, 3 Jun 2024 17:10:36 +0800
Subject: [PATCH] zxdh: add zxdh poll mode driver

zxdh is for ZTE 25/100G Ethernet NIC.

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 MAINTAINERS                        |    6 +
 doc/guides/nics/features/zxdh.ini  |   38 +
 doc/guides/nics/zxdh.rst           |   61 +
 drivers/net/meson.build            |    1 +
 drivers/net/zxdh/meson.build       |   94 +
 drivers/net/zxdh/msg_chan_pub.h    |  274 +++
 drivers/net/zxdh/version.map       |    3 +
 drivers/net/zxdh/zxdh_common.c     |  512 +++++
 drivers/net/zxdh/zxdh_common.h     |  154 ++
 drivers/net/zxdh/zxdh_ethdev.c     | 3431 ++++++++++++++++++++++++++++
 drivers/net/zxdh/zxdh_ethdev.h     |  244 ++
 drivers/net/zxdh/zxdh_ethdev_ops.c | 2205 ++++++++++++++++++
 drivers/net/zxdh/zxdh_ethdev_ops.h |  159 ++
 drivers/net/zxdh/zxdh_flow.c       |  973 ++++++++
 drivers/net/zxdh/zxdh_flow.h       |  129 ++
 drivers/net/zxdh/zxdh_logs.h       |   72 +
 drivers/net/zxdh/zxdh_msg_chan.c   | 1270 ++++++++++
 drivers/net/zxdh/zxdh_msg_chan.h   |  380 +++
 drivers/net/zxdh/zxdh_mtr.c        |  916 ++++++++
 drivers/net/zxdh/zxdh_mtr.h        |   46 +
 drivers/net/zxdh/zxdh_mtr_drv.c    |  527 +++++
 drivers/net/zxdh/zxdh_mtr_drv.h    |  119 +
 drivers/net/zxdh/zxdh_pci.c        |  499 ++++
 drivers/net/zxdh/zxdh_pci.h        |  272 +++
 drivers/net/zxdh/zxdh_queue.c      |  135 ++
 drivers/net/zxdh/zxdh_queue.h      |  491 ++++
 drivers/net/zxdh/zxdh_ring.h       |  160 ++
 drivers/net/zxdh/zxdh_rxtx.c       | 1307 +++++++++++
 drivers/net/zxdh/zxdh_rxtx.h       |   59 +
 drivers/net/zxdh/zxdh_table_drv.h  |  323 +++
 drivers/net/zxdh/zxdh_tables.c     | 2193 ++++++++++++++++++
 drivers/net/zxdh/zxdh_tables.h     |  227 ++
 drivers/net/zxdh/zxdh_telemetry.c  |  581 +++++
 drivers/net/zxdh/zxdh_telemetry.h  |   30 +
 34 files changed, 17891 insertions(+)
 create mode 100644 doc/guides/nics/features/zxdh.ini
 create mode 100644 doc/guides/nics/zxdh.rst
 create mode 100644 drivers/net/zxdh/meson.build
 create mode 100644 drivers/net/zxdh/msg_chan_pub.h
 create mode 100644 drivers/net/zxdh/version.map
 create mode 100644 drivers/net/zxdh/zxdh_common.c
 create mode 100644 drivers/net/zxdh/zxdh_common.h
 create mode 100644 drivers/net/zxdh/zxdh_ethdev.c
 create mode 100644 drivers/net/zxdh/zxdh_ethdev.h
 create mode 100644 drivers/net/zxdh/zxdh_ethdev_ops.c
 create mode 100644 drivers/net/zxdh/zxdh_ethdev_ops.h
 create mode 100644 drivers/net/zxdh/zxdh_flow.c
 create mode 100644 drivers/net/zxdh/zxdh_flow.h
 create mode 100644 drivers/net/zxdh/zxdh_logs.h
 create mode 100644 drivers/net/zxdh/zxdh_msg_chan.c
 create mode 100644 drivers/net/zxdh/zxdh_msg_chan.h
 create mode 100644 drivers/net/zxdh/zxdh_mtr.c
 create mode 100644 drivers/net/zxdh/zxdh_mtr.h
 create mode 100644 drivers/net/zxdh/zxdh_mtr_drv.c
 create mode 100644 drivers/net/zxdh/zxdh_mtr_drv.h
 create mode 100644 drivers/net/zxdh/zxdh_pci.c
 create mode 100644 drivers/net/zxdh/zxdh_pci.h
 create mode 100644 drivers/net/zxdh/zxdh_queue.c
 create mode 100644 drivers/net/zxdh/zxdh_queue.h
 create mode 100644 drivers/net/zxdh/zxdh_ring.h
 create mode 100644 drivers/net/zxdh/zxdh_rxtx.c
 create mode 100644 drivers/net/zxdh/zxdh_rxtx.h
 create mode 100644 drivers/net/zxdh/zxdh_table_drv.h
 create mode 100644 drivers/net/zxdh/zxdh_tables.c
 create mode 100644 drivers/net/zxdh/zxdh_tables.h
 create mode 100644 drivers/net/zxdh/zxdh_telemetry.c
 create mode 100644 drivers/net/zxdh/zxdh_telemetry.h
  

Comments

Stephen Hemminger June 3, 2024, 2:58 p.m. UTC | #1
On Mon, 3 Jun 2024 19:28:57 +0800 (CST)
<wang.junlong1@zte.com.cn> wrote:

> +Prerequisites
> +-------------
> +
> +This PMD driver need NPSDK library for system initialization and allocation of resources.
> +Communication between PMD and kernel modules is mediated by zxdh Kernel modules.
> +The NPSDK library and zxdh Kernel modules are not part of DPDK and must be installed
> +separately:
> +
> +- Getting the latest NPSDK library and software supports using
> +  ``_.

What is the license of the NPSDK ?
  
Junlong Wang June 6, 2024, 12:02 p.m. UTC | #2
>> +Prerequisites
>> +-------------
>> +
>> +This PMD driver need NPSDK library for system initialization and allocation of resources.
>> +Communication between PMD and kernel modules is mediated by zxdh Kernel modules.
>> +The NPSDK library and zxdh Kernel modules are not part of DPDK and must be installed
>> +separately:
>> +
>> +- Getting the latest NPSDK library and software supports using
>> +  ``_.

>What is the license of the NPSDK ?


We are building a download platform and will provide it in the next submission.

By the way, we would like to push zxdh to the 22.11 LTS version. What process should we follow?

Thanks
  
Ferruh Yigit July 5, 2024, 5:31 p.m. UTC | #3
On 6/6/2024 1:02 PM, Junlong Wang wrote:
>>> +Prerequisites
>>> +-------------
>>> +
>>> +This PMD driver need NPSDK library for system initialization and allocation of resources.
>>> +Communication between PMD and kernel modules is mediated by zxdh Kernel modules.
>>> +The NPSDK library and zxdh Kernel modules are not part of DPDK and must be installed
>>> +separately:
>>> +
>>> +- Getting the latest NPSDK library and software supports using
>>> +  ``_.
> 
>>What is the license of the NPSDK ?
> 
> 
> We are building a download platform and will provide it in the next submission.
> 

ack

Dependency should be publicly available.

> By the way, we would like to push zxdh to the 22.11 LTS version. What process should we follow?
> 

It is not possible to push new feature/driver to LTS releases, they only
backport fixes.

You may target next LTS, v24.11, release.
  
Ferruh Yigit July 5, 2024, 5:32 p.m. UTC | #4
On 6/3/2024 12:28 PM, wang.junlong1@zte.com.cn wrote:
> From 689a5e88b7ba123852153284b33911defc0f7b92 Mon Sep 17 00:00:00 2001
> From: Junlong Wang <wang.junlong1@zte.com.cn>
> Date: Mon, 3 Jun 2024 17:10:36 +0800
> Subject: [PATCH] zxdh: add zxdh poll mode driver
> 
> zxdh is for ZTE 25/100G Ethernet NIC.
> 

Hi Junlong,

Thanks for contributing, it is good to see ZTE drivers upstreamed.

During upstream, it helps to split the feature into multiple logical
parts. This way it is easier for people to review your code and in the
feature becomes easier to study and understand the code.

Please check another driver in the progress of upstreaming, latest
version of it has more structured patch series, you can use that as sample:
https://patches.dpdk.org/project/dpdk/list/?series=32313&state=%2A&archive=both


Meanwhile I will add some review comments below, although it is hard to
review a driver as single patch, still you can address issues in your
next version.


> Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
> ---
>  MAINTAINERS                        |    6 +
>  doc/guides/nics/features/zxdh.ini  |   38 +
>  doc/guides/nics/zxdh.rst           |   61 +
>  drivers/net/meson.build            |    1 +
>  drivers/net/zxdh/meson.build       |   94 +
>  drivers/net/zxdh/msg_chan_pub.h    |  274 +++
>  drivers/net/zxdh/version.map       |    3 +
>  drivers/net/zxdh/zxdh_common.c     |  512 +++++
>  drivers/net/zxdh/zxdh_common.h     |  154 ++
>  drivers/net/zxdh/zxdh_ethdev.c     | 3431 ++++++++++++++++++++++++++++
>  drivers/net/zxdh/zxdh_ethdev.h     |  244 ++
>  drivers/net/zxdh/zxdh_ethdev_ops.c | 2205 ++++++++++++++++++
>  drivers/net/zxdh/zxdh_ethdev_ops.h |  159 ++
>  drivers/net/zxdh/zxdh_flow.c       |  973 ++++++++
>  drivers/net/zxdh/zxdh_flow.h       |  129 ++
>  drivers/net/zxdh/zxdh_logs.h       |   72 +
>  drivers/net/zxdh/zxdh_msg_chan.c   | 1270 ++++++++++
>  drivers/net/zxdh/zxdh_msg_chan.h   |  380 +++
>  drivers/net/zxdh/zxdh_mtr.c        |  916 ++++++++
>  drivers/net/zxdh/zxdh_mtr.h        |   46 +
>  drivers/net/zxdh/zxdh_mtr_drv.c    |  527 +++++
>  drivers/net/zxdh/zxdh_mtr_drv.h    |  119 +
>  drivers/net/zxdh/zxdh_pci.c        |  499 ++++
>  drivers/net/zxdh/zxdh_pci.h        |  272 +++
>  drivers/net/zxdh/zxdh_queue.c      |  135 ++
>  drivers/net/zxdh/zxdh_queue.h      |  491 ++++
>  drivers/net/zxdh/zxdh_ring.h       |  160 ++
>  drivers/net/zxdh/zxdh_rxtx.c       | 1307 +++++++++++
>  drivers/net/zxdh/zxdh_rxtx.h       |   59 +
>  drivers/net/zxdh/zxdh_table_drv.h  |  323 +++
>  drivers/net/zxdh/zxdh_tables.c     | 2193 ++++++++++++++++++
>  drivers/net/zxdh/zxdh_tables.h     |  227 ++
>  drivers/net/zxdh/zxdh_telemetry.c  |  581 +++++
>  drivers/net/zxdh/zxdh_telemetry.h  |   30 +
>  34 files changed, 17891 insertions(+)
>  create mode 100644 doc/guides/nics/features/zxdh.ini
>  create mode 100644 doc/guides/nics/zxdh.rst
>  create mode 100644 drivers/net/zxdh/meson.build
>  create mode 100644 drivers/net/zxdh/msg_chan_pub.h
>  create mode 100644 drivers/net/zxdh/version.map
>  create mode 100644 drivers/net/zxdh/zxdh_common.c
>  create mode 100644 drivers/net/zxdh/zxdh_common.h
>  create mode 100644 drivers/net/zxdh/zxdh_ethdev.c
>  create mode 100644 drivers/net/zxdh/zxdh_ethdev.h
>  create mode 100644 drivers/net/zxdh/zxdh_ethdev_ops.c
>  create mode 100644 drivers/net/zxdh/zxdh_ethdev_ops.h
>  create mode 100644 drivers/net/zxdh/zxdh_flow.c
>  create mode 100644 drivers/net/zxdh/zxdh_flow.h
>  create mode 100644 drivers/net/zxdh/zxdh_logs.h
>  create mode 100644 drivers/net/zxdh/zxdh_msg_chan.c
>  create mode 100644 drivers/net/zxdh/zxdh_msg_chan.h
>  create mode 100644 drivers/net/zxdh/zxdh_mtr.c
>  create mode 100644 drivers/net/zxdh/zxdh_mtr.h
>  create mode 100644 drivers/net/zxdh/zxdh_mtr_drv.c
>  create mode 100644 drivers/net/zxdh/zxdh_mtr_drv.h
>  create mode 100644 drivers/net/zxdh/zxdh_pci.c
>  create mode 100644 drivers/net/zxdh/zxdh_pci.h
>  create mode 100644 drivers/net/zxdh/zxdh_queue.c
>  create mode 100644 drivers/net/zxdh/zxdh_queue.h
>  create mode 100644 drivers/net/zxdh/zxdh_ring.h
>  create mode 100644 drivers/net/zxdh/zxdh_rxtx.c
>  create mode 100644 drivers/net/zxdh/zxdh_rxtx.h
>  create mode 100644 drivers/net/zxdh/zxdh_table_drv.h
>  create mode 100644 drivers/net/zxdh/zxdh_tables.c
>  create mode 100644 drivers/net/zxdh/zxdh_tables.h
>  create mode 100644 drivers/net/zxdh/zxdh_telemetry.c
>  create mode 100644 drivers/net/zxdh/zxdh_telemetry.h
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index c9adff9846..34f9001b93 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -1063,6 +1063,12 @@ F: drivers/net/memif/
>  F: doc/guides/nics/memif.rst
>  F: doc/guides/nics/features/memif.ini
> 
> +ZTE zxdh
> +M: Junlong Wang <wang.junlong1@zte.com.cn>
> +M: Lijie Shan <shan.lijie@zte.com.cn>
> +F: drivers/net/zxdh/
> +F: doc/guides/nics/zxdh.rst
> +F: doc/guides/nics/features/zxdh.ini
> 
>  Crypto Drivers
>  --------------
> diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini
> new file mode 100644
> index 0000000000..fc41426077
> --- /dev/null
> +++ b/doc/guides/nics/features/zxdh.ini
> @@ -0,0 +1,38 @@
> +;
> +; Supported features of the 'zxdh' network poll mode driver.
> +;
> +; Refer to default.ini for the full list of available PMD features.
> +;
> +[Features]
> +Speed capabilities   = Y
> +Link status          = Y
> +Link status event    = Y
> +MTU update           = Y
> +Scattered Rx         = Y
> +TSO                  = Y
> +LRO                  = Y
> +Promiscuous mode     = Y
> +Allmulticast mode    = Y
> +Unicast MAC filter   = Y
> +Multicast MAC filter = Y
> +RSS hash             = Y
> +RSS key update       = Y
> +RSS reta update      = Y
> +Inner RSS            = Y
> +SR-IOV               = Y
> +VLAN filter          = Y
> +VLAN offload         = Y
> +L3 checksum offload  = Y
> +L4 checksum offload  = Y
> +Inner L3 checksum    = Y
> +Inner L4 checksum    = Y
> +Basic stats          = Y
> +Extended stats       = Y
> +Stats per queue      = Y
> +Flow control         = Y
> +FW version           = Y
> +Multiprocess aware   = Y
> +Linux                = Y
> +x86-64               = Y
> +ARMv8                = Y
> +

When there are multiple patches, this list can be updated with each
commit that adds that feature.


> diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst
> new file mode 100644
> index 0000000000..f7cbc5755b
> --- /dev/null
> +++ b/doc/guides/nics/zxdh.rst
> @@ -0,0 +1,61 @@
> +..  SPDX-License-Identifier: BSD-3-Clause
> +    Copyright(c) 2023 ZTE Corporation.
> +
> +
> +ZXDH Poll Mode Driver
> +======================
> +
> +The ZXDH PMD (**librte_net_zxdh**) provides poll mode driver support
> +for 25/100 Gbps ZXDH NX Series Ethernet Controller based on
> +the ZTE Ethernet Controller E310/E312.
> +

Can you please provide link to the product mentioned?

> +
> +Features
> +--------
> +
> +Features of the zxdh PMD are:
> +
> +- Multi arch support: x86_64, ARMv8.
> +- Multiple queues for TX and RX
> +- Receiver Side Scaling (RSS)
> +- MAC/VLAN filtering
> +- Checksum offload
> +- TSO offload
> +- VLAN/QinQ stripping and inserting
> +- Promiscuous mode
> +- Port hardware statistics
> +- Link state information
> +- Link flow control
> +- Scattered and gather for TX and RX
> +- SR-IOV VF
> +- VLAN filter and VLAN offload
> +- Allmulticast mode
> +- MTU update
> +- Jumbo frames
> +- Unicast MAC filter
> +- Multicast MAC filter
> +- Flow API
> +- Set Link down or up
> +- FW version
> +- LRO
> +

Similar to the .ini list, above list also should be constructed patch by
patch as the features introduced in each new patch.

> +Prerequisites
> +-------------
> +
> +This PMD driver need NPSDK library for system initialization and allocation of resources.
> +Communication between PMD and kernel modules is mediated by zxdh Kernel modules.
> +The NPSDK library and zxdh Kernel modules are not part of DPDK and must be installed
> +separately:
> +
> +- Getting the latest NPSDK library and software supports using
> +  ``_.
> +

You already mentioned you will include how to get npsdk in next version,
that is good.

But also can you please explain why npsdk is required, as far as I can
see driver is not a virtual driver, so where the dependency comes from?

> +Driver compilation and testing
> +------------------------------
> +
> +Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
> +for details.
> +
> +Limitations or Known issues
> +---------------------------
> +X86-32, Power8, ARMv7 and BSD are not supported yet.
> diff --git a/drivers/net/meson.build b/drivers/net/meson.build
> index bd38b533c5..3778d1b29a 100644
> --- a/drivers/net/meson.build
> +++ b/drivers/net/meson.build
> @@ -61,6 +61,7 @@ drivers = [
>          'vhost',
>          'virtio',
>          'vmxnet3',
> +        'zxdh',
>  ]
>  std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc
>  std_deps += ['bus_pci']         # very many PMDs depend on PCI, so make std
> diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build
> new file mode 100644
> index 0000000000..85e6eaa999
> --- /dev/null
> +++ b/drivers/net/zxdh/meson.build
> @@ -0,0 +1,94 @@
> +# SPDX-License-Identifier: BSD-3-Clause
> +# Copyright(c) 2023 ZTE Corporation
> +
> +sources += files('zxdh_ethdev.c',
> +	'zxdh_pci.c',
> +	'zxdh_rxtx.c',
> +	'zxdh_queue.c',
> +	'zxdh_ethdev_ops.c',
> +	'zxdh_flow.c',
> +	'zxdh_mtr.c',
> +	'zxdh_mtr_drv.c',
> +	'zxdh_common.c',
> +	'zxdh_tables.c',
> +	'zxdh_telemetry.c',
> +	'zxdh_msg_chan.c',
> +	)
> +
> +fs=import('fs')
> +project_dir = meson.source_root()
> +lib_npsdk_dir = '/usr/include/npsdk'
> +message('lib npsdk dir :  ' +lib_npsdk_dir)
> +dpp_include = lib_npsdk_dir + '/dpp/include/'
> +
> +cflags_options = [
> +		'-D DPP_FOR_PCIE',
> +		'-D MACRO_CPU64',
> +
> +]
> +foreach option:cflags_options
> +		if cc.has_argument(option)
> +				cflags += option
> +		endif
> +endforeach
> +cflags += '-fno-strict-aliasing'

Why strict-aliasing is disabled, is there an issue in the code, can't it
be resolved by updating the code?

> +
> +if arch_subdir == 'x86'
> +	lib_name = 'libdpp_x86_64_lit_64_rel'
> +else
> +	lib_name = 'libdpp_arm_aarch64_lit_64_rel'
> +endif
> +message('lib npsdk name :  ' + lib_name)
> +
> +lib = cc.find_library(lib_name , dirs : ['/usr/lib64' ], required: true)
> +
>

Build fails when dependency not found:

drivers/net/zxdh/meson.build:43:0: ERROR: C library
'libdpp_x86_64_lit_64_rel' not found

Btw, CI is not used to test the driver because of the apply failure. I
am not sure what caused this apply failure but for next version please
be sure to rebase it on top of latest 'main' branch, in case it helps.


> +
> +if not lib.found()
> +	build = false
> +	reason = 'missing dependency, lib_name'
> +else
> +	ext_deps += lib
> +	message(lib_npsdk_dir + '/sdk_comm/sdk_comm/comm/include')
> +	includes += include_directories(lib_npsdk_dir + '/sdk_comm/sdk_comm/comm/include')
> +	includes += include_directories(dpp_include)
> +	includes += include_directories(dpp_include + '/dev/module/se/')
> +	includes += include_directories(dpp_include + '/dev/chip/')
> +	includes += include_directories(dpp_include + '/api/')
> +	includes += include_directories(dpp_include + '/dev/reg/')
> +	includes += include_directories(dpp_include + '/dev/module/')
> +	includes += include_directories(dpp_include + '/qos/')
> +	includes += include_directories(dpp_include + '/agentchannel/')
> +
> +	includes += include_directories(dpp_include + '/diag/')
> +	includes += include_directories(dpp_include + '/dev/module/ppu/')
> +	includes += include_directories(dpp_include + '/dev/module/table/se/')
> +	includes += include_directories(dpp_include + '/dev/module/nppu/')
> +	includes += include_directories(dpp_include + '/dev/module/tm/')
> +	includes += include_directories(dpp_include + '/dev/module/dma/')
> +	includes += include_directories(dpp_include + '/dev/module/ddos/')
> +	includes += include_directories(dpp_include + '/dev/module/oam/')
> +	includes += include_directories(dpp_include + '/dev/module/trpg/')
> +	includes += include_directories(dpp_include + '/dev/module/dtb/')
> +endif
> +
> +deps += ['kvargs', 'bus_pci', 'timer']
> +
> +if arch_subdir == 'x86'
> +	if not machine_args.contains('-mno-avx512f')
> +		if cc.has_argument('-mavx512f') and cc.has_argument('-mavx512vl') and cc.has_argument('-mavx512bw')
> +			cflags += ['-DCC_AVX512_SUPPORT']
> +			zxdh_avx512_lib = static_library('zxdh_avx512_lib',
> +						  dependencies: [static_rte_ethdev,
> +						static_rte_kvargs, static_rte_bus_pci],
> +						  include_directories: includes,
> +						  c_args: [cflags, '-mavx512f', '-mavx512bw', '-mavx512vl'])
> +			if (toolchain == 'gcc' and cc.version().version_compare('>=8.3.0'))
> +				cflags += '-DVHOST_GCC_UNROLL_PRAGMA'
> +			elif (toolchain == 'clang' and cc.version().version_compare('>=3.7.0'))
> +				cflags += '-DVHOST_CLANG_UNROLL_PRAGMA'
> +			elif (toolchain == 'icc' and cc.version().version_compare('>=16.0.0'))
> +				cflags += '-DVHOST_ICC_UNROLL_PRAGMA'
> +			endif
> +		endif
> +	endif
> +endif
> diff --git a/drivers/net/zxdh/msg_chan_pub.h b/drivers/net/zxdh/msg_chan_pub.h
> new file mode 100644
> index 0000000000..f2413b2efa
> --- /dev/null
> +++ b/drivers/net/zxdh/msg_chan_pub.h
> @@ -0,0 +1,274 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 ZTE Corporation
> + */
> +
> +#ifndef _ZXDH_MSG_CHAN_PUB_H_
> +#define _ZXDH_MSG_CHAN_PUB_H_
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +#include <string.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <pthread.h>
> +#include <unistd.h>
> +#include <stdint.h>
> +
> +#include <rte_ethdev.h>
> +
> +#define PCI_NAME_LENGTH     16
> +
> +enum DRIVER_TYPE {
> +	MSG_CHAN_END_MPF = 0,
> +	MSG_CHAN_END_PF,
> +	MSG_CHAN_END_VF,
> +	MSG_CHAN_END_RISC,
> +};
> +
> +enum BAR_MSG_RTN {
> +	BAR_MSG_OK = 0,
> +	BAR_MSG_ERR_MSGID,
> +	BAR_MSG_ERR_NULL,
> +	BAR_MSG_ERR_TYPE, /* Message type exception */
> +	BAR_MSG_ERR_MODULE, /* Module ID exception */
> +	BAR_MSG_ERR_BODY_NULL, /* Message body exception */
> +	BAR_MSG_ERR_LEN, /* Message length exception */
> +	BAR_MSG_ERR_TIME_OUT, /* Message sending length too long */
> +	BAR_MSG_ERR_NOT_READY, /* Abnormal message sending conditions*/
> +	BAR_MEG_ERR_NULL_FUNC, /* Empty receive processing function pointer*/
> +	BAR_MSG_ERR_REPEAT_REGISTER, /* Module duplicate registration*/
> +	BAR_MSG_ERR_UNGISTER, /* Repeated deregistration*/
> +	/**
> +	 * The sending interface parameter boundary structure pointer is empty
> +	 */
> +	BAR_MSG_ERR_NULL_PARA,
> +	BAR_MSG_ERR_REPSBUFF_LEN, /* The length of reps_buff is too short*/
> +	/**
> +	 * Unable to find the corresponding message processing function for this module
> +	 */
> +	BAR_MSG_ERR_MODULE_NOEXIST,
> +	/**
> +	 * The virtual address in the parameters passed in by the sending interface is empty
> +	 */
> +	BAR_MSG_ERR_VIRTADDR_NULL,
> +	BAR_MSG_ERR_REPLY, /* sync msg resp_error */
> +	BAR_MSG_ERR_MPF_NOT_SCANNED,
> +	BAR_MSG_ERR_KERNEL_READY,
> +	BAR_MSG_ERR_USR_RET_ERR,
> +	BAR_MSG_ERR_ERR_PCIEID,
> +	BAR_MSG_ERR_SOCKET, /* netlink sockte err */
> +};
> +
> +enum bar_module_id {
> +	BAR_MODULE_DBG = 0, /* 0:  debug */
> +	BAR_MODULE_TBL,     /* 1:  resource table */
> +	BAR_MODULE_MISX,    /* 2:  config msix */
> +	BAR_MODULE_SDA,     /* 3: */
> +	BAR_MODULE_RDMA,    /* 4: */
> +	BAR_MODULE_DEMO,    /* 5:  channel test */
> +	BAR_MODULE_SMMU,    /* 6: */
> +	BAR_MODULE_MAC,     /* 7:  mac rx/tx stats */
> +	BAR_MODULE_VDPA,    /* 8:  vdpa live migration */
> +	BAR_MODULE_VQM,     /* 9:  vqm live migration */
> +	BAR_MODULE_NP,      /* 10: vf msg callback np */
> +	BAR_MODULE_VPORT,   /* 11: get vport */
> +	BAR_MODULE_BDF,     /* 12: get bdf */
> +	BAR_MODULE_RISC_READY, /* 13: */
> +	BAR_MODULE_REVERSE,    /* 14: byte stream reverse */
> +	BAR_MDOULE_NVME,       /* 15: */
> +	BAR_MDOULE_NPSDK,      /* 16: */
> +	BAR_MODULE_NP_TODO,    /* 17: */
> +	MODULE_BAR_MSG_TO_PF,  /* 18: */
> +	MODULE_BAR_MSG_TO_VF,  /* 19: */
> +
> +	MODULE_FLASH = 32,
> +	BAR_MODULE_OFFSET_GET = 33,
> +	BAR_EVENT_OVS_WITH_VCB = 36, /* ovs<-->vcb */
> +
> +	BAR_MSG_MODULE_NUM = 100,
> +};
> +static inline const char *module_id_name(int val)
> +{
> +	switch (val) {
> +	case BAR_MODULE_DBG:        return "BAR_MODULE_DBG";
> +	case BAR_MODULE_TBL:        return "BAR_MODULE_TBL";
> +	case BAR_MODULE_MISX:       return "BAR_MODULE_MISX";
> +	case BAR_MODULE_SDA:        return "BAR_MODULE_SDA";
> +	case BAR_MODULE_RDMA:       return "BAR_MODULE_RDMA";
> +	case BAR_MODULE_DEMO:       return "BAR_MODULE_DEMO";
> +	case BAR_MODULE_SMMU:       return "BAR_MODULE_SMMU";
> +	case BAR_MODULE_MAC:        return "BAR_MODULE_MAC";
> +	case BAR_MODULE_VDPA:       return "BAR_MODULE_VDPA";
> +	case BAR_MODULE_VQM:        return "BAR_MODULE_VQM";
> +	case BAR_MODULE_NP:         return "BAR_MODULE_NP";
> +	case BAR_MODULE_VPORT:      return "BAR_MODULE_VPORT";
> +	case BAR_MODULE_BDF:        return "BAR_MODULE_BDF";
> +	case BAR_MODULE_RISC_READY: return "BAR_MODULE_RISC_READY";
> +	case BAR_MODULE_REVERSE:    return "BAR_MODULE_REVERSE";
> +	case BAR_MDOULE_NVME:       return "BAR_MDOULE_NVME";
> +	case BAR_MDOULE_NPSDK:      return "BAR_MDOULE_NPSDK";
> +	case BAR_MODULE_NP_TODO:    return "BAR_MODULE_NP_TODO";
> +	case MODULE_BAR_MSG_TO_PF:  return "MODULE_BAR_MSG_TO_PF";
> +	case MODULE_BAR_MSG_TO_VF:  return "MODULE_BAR_MSG_TO_VF";
> +	case MODULE_FLASH:          return "MODULE_FLASH";
> +	case BAR_MODULE_OFFSET_GET: return "BAR_MODULE_OFFSET_GET";
> +	case BAR_EVENT_OVS_WITH_VCB: return "BAR_EVENT_OVS_WITH_VCB";
> +	default: return "NA";
> +	}
> +}
> +
> +struct bar_msg_header {
> +	uint8_t valid : 1; /* used by __bar_chan_msg_valid_set/get */
> +	uint8_t sync  : 1;
> +	uint8_t emec  : 1; /* emergency? */
> +	uint8_t ack   : 1; /* ack msg? */
> +	uint8_t poll  : 1;
> +	uint8_t usr   : 1;
> +	uint8_t rsv;
> +	uint16_t module_id;
> +	uint16_t len;
> +	uint16_t msg_id;
> +	uint16_t src_pcieid;
> +	uint16_t dst_pcieid; /* used in PF-->VF */
> +}; /* 12B */
> +#define BAR_MSG_ADDR_CHAN_INTERVAL  (2 * 1024) /* channel size */
> +#define BAR_MSG_PLAYLOAD_OFFSET     (sizeof(struct bar_msg_header))
> +#define BAR_MSG_PAYLOAD_MAX_LEN     (BAR_MSG_ADDR_CHAN_INTERVAL - sizeof(struct bar_msg_header))
> +
> +struct zxdh_pci_bar_msg {
> +	uint64_t virt_addr; /* bar addr */
> +	void    *payload_addr;
> +	uint16_t payload_len;
> +	uint16_t emec;
> +	uint16_t src; /* refer to BAR_DRIVER_TYPE */
> +	uint16_t dst; /* refer to BAR_DRIVER_TYPE */
> +	uint16_t module_id;
> +	uint16_t src_pcieid;
> +	uint16_t dst_pcieid;
> +	uint16_t usr;
> +}; /* 32B */
> +
> +struct zxdh_msg_recviver_mem {
> +	void    *recv_buffer; /* first 4B is head, followed by payload */
> +	uint64_t buffer_len;
> +}; /* 16B */
> +
> +enum pciebar_layout_type {
> +	URI_VQM      = 0,
> +	URI_SPINLOCK = 1,
> +	URI_FWCAP    = 2,
> +	URI_FWSHR    = 3,
> +	URI_DRS_SEC  = 4,
> +	URI_RSV      = 5,
> +	URI_CTRLCH   = 6,
> +	URI_1588     = 7,
> +	URI_QBV      = 8,
> +	URI_MACPCS   = 9,
> +	URI_RDMA     = 10,
> +/* DEBUG PF */
> +	URI_MNP      = 11,
> +	URI_MSPM     = 12,
> +	URI_MVQM     = 13,
> +	URI_MDPI     = 14,
> +	URI_NP       = 15,
> +/* END DEBUG PF */
> +	URI_MAX,
> +};
> +
> +struct bar_offset_params {
> +	uint64_t virt_addr;  /* Bar space control space virtual address */
> +	uint16_t pcie_id;
> +	uint16_t type;  /* Module types corresponding to PCIBAR planning */
> +};
> +struct bar_offset_res {
> +	uint32_t bar_offset;
> +	uint32_t bar_length;
> +};
> +
> +/**
> + * Get the offset value of the specified module
> + * @bar_offset_params:  input parameter
> + * @bar_offset_res: Module offset and length
> + */
> +int zxdh_get_bar_offset(struct bar_offset_params *paras, struct bar_offset_res *res);
> +
> +typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len, void *reps_buffer,
> +					uint16_t *reps_len, void *dev);
> +
> +/**
> + * Send synchronization messages through PCIE BAR space
> + * @in: Message sending information
> + * @result: Message result feedback
> + * @return: 0 successful, other failures
> + */
> +int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, struct zxdh_msg_recviver_mem *result);
> +
> +/**
> + * Sending asynchronous messages through PCIE BAR space
> + * @in: Message sending information
> + * @result: Message result feedback
> + * @return: 0 successful, other failures
> + */
> +int zxdh_bar_chan_async_msg_send(struct zxdh_pci_bar_msg *in, struct zxdh_msg_recviver_mem *result);
> +
> +/**
> + * PCIE BAR spatial message method, registering message reception callback
> + * @module_id: Registration module ID
> + * @callback: Pointer to the receive processing function implemented by the module
> + * @return: 0 successful, other failures
> + * Usually called during driver initialization
> + */
> +int zxdh_bar_chan_msg_recv_register(uint8_t module_id, zxdh_bar_chan_msg_recv_callback callback);
> +
> +/**
> + * PCIE BAR spatial message method, unregistered message receiving callback
> + * @module_id: Kernel PCIE device address
> + * @return: 0 successful, other failures
> + * Called during driver uninstallation
> + */
> +int zxdh_bar_chan_msg_recv_unregister(uint8_t module_id);
> +
> +/**
> + * Provide a message receiving interface for device driver interrupt handling functions
> + * @src:  Driver type for sending interrupts
> + * @dst:  Device driver's own driver type
> + * @virt_addr: The communication bar address of the device
> + * @return: 0 successful, other failures
> + */
> +int zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev);
> +
> +/**
> + * Initialize spilock and clear the hardware lock address it belongs to
> + * @pcie_id: PCIE_id of PF device
> + * @bar_base_addr: Bar0 initial base address
> + */
> +int bar_chan_pf_init_spinlock(uint16_t pcie_id, uint64_t bar_base_addr);
> +
> +struct msix_para {
> +	uint16_t pcie_id;
> +	uint16_t vector_risc;
> +	uint16_t vector_pfvf;
> +	uint16_t vector_mpf;
> +	uint64_t virt_addr;
> +	uint16_t driver_type; /* refer to DRIVER_TYPE */
> +};
> +int zxdh_bar_chan_enable(struct msix_para *_msix_para, uint16_t *vport);
> +int zxdh_msg_chan_init(void);
> +int zxdh_bar_msg_chan_exit(void);
> +
> +struct zxdh_res_para {
> +	uint64_t virt_addr;
> +	uint16_t pcie_id;
> +	uint16_t src_type; /* refer to BAR_DRIVER_TYPE */
> +};
> +int zxdh_get_res_panel_id(struct zxdh_res_para *in, uint8_t *panel_id);
> +int zxdh_get_res_hash_id(struct zxdh_res_para *in, uint8_t *hash_id);
> +
> +int zxdh_mpf_bar0_phyaddr_get(uint64_t *pPhyaddr);
> +int zxdh_mpf_bar0_vaddr_get(uint64_t *pVaddr);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +#endif /* _ZXDH_MSG_CHAN_PUB_H_ */
> diff --git a/drivers/net/zxdh/version.map b/drivers/net/zxdh/version.map
> new file mode 100644
> index 0000000000..4a76d1d52d
> --- /dev/null
> +++ b/drivers/net/zxdh/version.map
> @@ -0,0 +1,3 @@
> +DPDK_21 {
> +	local: *;
> +};

Can drop empty (no exported symbols) .map files, please check
Commit 7dde9c844a37 ("drivers: omit symbol map when unneeded")

> diff --git a/drivers/net/zxdh/zxdh_common.c b/drivers/net/zxdh/zxdh_common.c
> new file mode 100644
> index 0000000000..ca62393a08
> --- /dev/null
> +++ b/drivers/net/zxdh/zxdh_common.c
> @@ -0,0 +1,512 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 ZTE Corporation
> + */
> +
> +#include <stdint.h>
> +#include <string.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <unistd.h>
> +
> +#include <rte_memcpy.h>
> +#include <rte_malloc.h>
> +#include <rte_common.h>
> +#include <rte_memory.h>
> +
> +#include "zxdh_logs.h"
> +#include "zxdh_common.h"
> +#include "zxdh_pci.h"
> +#include "zxdh_msg_chan.h"
> +#include "zxdh_queue.h"
> +#include "zxdh_ethdev_ops.h"
> +
> +#define ZXDH_COMMON_FIELD_PCIEID   0
> +#define ZXDH_COMMON_FIELD_DATACH   3
> +#define ZXDH_COMMON_FIELD_VPORT    4
> +#define ZXDH_COMMON_FIELD_PHYPORT  6
> +#define ZXDH_COMMON_FIELD_PANELID  5
> +#define ZXDH_COMMON_FIELD_HASHIDX  7
> +
> +#define ZXDH_MAC_STATS_OFFSET   (0x1000 + 408)
> +#define ZXDH_MAC_BYTES_OFFSET   (0xb000)
> +
> +uint64_t get_cur_time_s(uint64_t tsc)
> +{
> +	return (tsc/rte_get_tsc_hz());
> +}
> +
> +/** Nano seconds per second */
> +#define NS_PER_SEC 1E9
> +
> +uint64_t get_time_ns(uint64_t tsc)
> +{
> +	return (tsc*NS_PER_SEC/rte_get_tsc_hz());
> +}
> +/**
> + * Fun:
> + */


There are multiple instance of this empty 'Fun' comments, can you please
drop the empty ones?

> +void zxdh_hex_dump(uint8_t *buff, uint16_t buff_size)
> +{
> +	uint16_t i;
> +
> +	for (i = 0; i < buff_size; i++) {
> +		if ((i % 16) == 0)
> +			printf("\n");
> +		printf("%02x ", *(buff + i));
> +	}
> +	printf("\n");
>

Driver printing to stdout without application control is not desired,
can you please convert this to some debug log that application can control?

> +}
> +/**
> + * Fun:
> + */
> +uint32_t zxdh_read_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]);
> +	uint32_t val      = *((volatile uint32_t *)(baseaddr + reg));
> +	return val;
> +}
> +/**
> + * Fun:
> + */
> +void zxdh_write_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val)

Please follow dpdk coding convention, where return type is in its own
line, like:

void
zxdh_write_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, ...)

> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]);
> +	*((volatile uint32_t *)(baseaddr + reg)) = val;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_send_command_toriscv(struct rte_eth_dev *dev,
> +	struct zxdh_pci_bar_msg      *in,
> +	enum bar_module_id           module_id,
> +	struct zxdh_msg_recviver_mem *msg_rsp)

To break long lines with multiple parameters, single tab is easy to
confuse with function body, instead double tab can be better.

> +{
> +	PMD_INIT_FUNC_TRACE();
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	in->virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET);
> +	in->src = hw->is_pf ? MSG_CHAN_END_PF : MSG_CHAN_END_VF;
> +	in->dst = MSG_CHAN_END_RISC;
> +	in->module_id = module_id;
> +	in->src_pcieid = hw->pcie_id;
> +	if (zxdh_bar_chan_sync_msg_send(in, msg_rsp) != BAR_MSG_OK) {
> +		PMD_DRV_LOG(ERR, "Failed to send sync messages or receive response");
> +		PMD_DRV_LOG(ERR, "msg_data:");
> +		HEX_DUMP(in->payload_addr, in->payload_len);
> +		return -1;
> +	}
> +	return 0;
> +}
> +/**
> + * Fun;
> + */
> +#define ZXDH_MSG_RSP_SIZE_MAX  512
> +static int32_t zxdh_send_command(struct zxdh_hw *hw,
> +	struct zxdh_pci_bar_msg      *desc,
> +	enum bar_module_id            module_id,
> +	struct zxdh_msg_recviver_mem *msg_rsp)
> +{
> +	PMD_INIT_FUNC_TRACE();
> +
> +	desc->virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET);
> +	desc->src = hw->is_pf ? MSG_CHAN_END_PF:MSG_CHAN_END_VF;
> +	desc->dst = MSG_CHAN_END_RISC;
> +	desc->module_id = module_id;
> +	desc->src_pcieid = hw->pcie_id;
> +
> +	msg_rsp->buffer_len  = ZXDH_MSG_RSP_SIZE_MAX;
> +	msg_rsp->recv_buffer = rte_zmalloc(NULL, msg_rsp->buffer_len, 0);
> +	if (unlikely(msg_rsp->recv_buffer == NULL)) {
> +		PMD_DRV_LOG(ERR, "Failed to allocate messages response");
> +		return -ENOMEM;
> +	}
> +
> +	if (zxdh_bar_chan_sync_msg_send(desc, msg_rsp) != BAR_MSG_OK) {
> +		PMD_DRV_LOG(ERR, "Failed to send sync messages or receive response");
> +		PMD_DRV_LOG(ERR, "msg_data:");
> +		HEX_DUMP(desc->payload_addr, desc->payload_len);
> +		rte_free(msg_rsp->recv_buffer);
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +struct zxdh_common_rsp_hdr {
> +	uint8_t  rsp_status;
> +	uint16_t rsp_len;
> +	uint8_t  reserved;
> +	uint8_t  payload_status;
> +	uint8_t  rsv;
> +	uint16_t payload_len;
> +} __rte_packed; /* 8B */

Putting an empty line between struct-function, function-function may
make easier to read the code.

> +static int32_t zxdh_common_rsp_check(struct zxdh_msg_recviver_mem *msg_rsp,
> +		void *buff, uint16_t len)
> +{
> +	struct zxdh_common_rsp_hdr *rsp_hdr = (struct zxdh_common_rsp_hdr *)msg_rsp->recv_buffer;
> +
> +	if ((rsp_hdr->payload_status != 0xaa) || (rsp_hdr->payload_len != len)) {
> +		PMD_DRV_LOG(ERR, "Common response is invalid, status:0x%x rsp_len:%d",
> +					rsp_hdr->payload_status, rsp_hdr->payload_len);
> +		return -1;
> +	}
> +	if (len != 0)
> +		memcpy(buff, rsp_hdr + 1, len);
> +
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +struct zxdh_common_msg {
> +	uint8_t  type;    /* 0:read table 1:write table */
> +	uint8_t  field;
> +	uint16_t pcie_id;
> +	uint16_t slen;    /* Data length for write table */
> +	uint16_t reserved;
> +} __rte_packed; /* 8B */
> +static int32_t zxdh_fill_common_msg(struct zxdh_hw *hw,
> +	struct zxdh_pci_bar_msg *desc,
> +	uint8_t        type,
> +	uint8_t        field,
> +	void          *buff,
> +	uint16_t       buff_size)
> +{
> +	uint64_t msg_len = sizeof(struct zxdh_common_msg) + buff_size;
> +
> +	desc->payload_addr = rte_zmalloc(NULL, msg_len, 0);
> +	if (unlikely(desc->payload_addr == NULL)) {
> +		PMD_DRV_LOG(ERR, "Failed to allocate msg_data");
> +		return -ENOMEM;
> +	}
> +	memset(desc->payload_addr, 0, msg_len);
> +	desc->payload_len = msg_len;
> +	struct zxdh_common_msg *msg_data = (struct zxdh_common_msg *)desc->payload_addr;
> +
> +	msg_data->type = type;
> +	msg_data->field = field;
> +	msg_data->pcie_id = hw->pcie_id;
> +	msg_data->slen = buff_size;
> +	if (buff_size != 0)
> +		memcpy(msg_data + 1, buff, buff_size);
> +
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +#define ZXDH_COMMON_TABLE_READ   0
> +#define ZXDH_COMMON_TABLE_WRITE  1
> +static int32_t zxdh_common_table_read(struct zxdh_hw *hw, uint8_t field,
> +			void *buff, uint16_t buff_size)
> +{
> +	PMD_INIT_FUNC_TRACE();
> +	if (!hw->msg_chan_init) {
> +		PMD_DRV_LOG(ERR, "Bar messages channel not initialized");
> +		return -1;
> +	}
> +	struct zxdh_pci_bar_msg desc;
> +	int32_t ret = zxdh_fill_common_msg(hw, &desc, ZXDH_COMMON_TABLE_READ, field, NULL, 0);
> +
> +	if (ret != 0) {
> +		PMD_DRV_LOG(ERR, "Failed to fill common msg");
> +		return ret;
> +	}
> +	struct zxdh_msg_recviver_mem msg_rsp;
> +
> +	ret = zxdh_send_command(hw, &desc, BAR_MODULE_TBL, &msg_rsp);
> +	if (ret != 0)
> +		goto free_msg_data;
> +
> +	ret = zxdh_common_rsp_check(&msg_rsp, buff, buff_size);
> +	if (ret != 0)
> +		goto free_rsp_data;
> +
> +free_rsp_data:
> +	rte_free(msg_rsp.recv_buffer);
> +free_msg_data:
> +	rte_free(desc.payload_addr);
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +static int32_t zxdh_common_table_write(struct zxdh_hw *hw, uint8_t field,
> +			void *buff, uint16_t buff_size)
> +{
> +	PMD_INIT_FUNC_TRACE();
> +	if (!hw->msg_chan_init) {
> +		PMD_DRV_LOG(ERR, "Bar messages channel not initialized");
> +		return -1;
> +	}
> +	if ((buff_size != 0) && (buff == NULL)) {
> +		PMD_DRV_LOG(ERR, "Buff is invalid");
> +		return -1;
> +	}
> +	struct zxdh_pci_bar_msg desc;
> +	int32_t ret = zxdh_fill_common_msg(hw, &desc, ZXDH_COMMON_TABLE_WRITE,
> +					field, buff, buff_size);
> +
> +	if (ret != 0) {
> +		PMD_DRV_LOG(ERR, "Failed to fill common msg");
> +		return ret;
> +	}
> +	struct zxdh_msg_recviver_mem msg_rsp;
> +
> +	ret = zxdh_send_command(hw, &desc, BAR_MODULE_TBL, &msg_rsp);
> +	if (ret != 0)
> +		goto free_msg_data;
> +
> +	ret = zxdh_common_rsp_check(&msg_rsp, NULL, 0);
> +	if (ret != 0)
> +		goto free_rsp_data;
> +
> +free_rsp_data:
> +	rte_free(msg_rsp.recv_buffer);
> +free_msg_data:
> +	rte_free(desc.payload_addr);
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_datach_set(struct rte_eth_dev *dev)
> +{
> +	/* payload: queue_num(2byte) + pch1(2byte) + ** + pchn */
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	uint16_t buff_size = (hw->queue_num + 1) * 2;
> +	void *buff = rte_zmalloc(NULL, buff_size, 0);
> +
> +	if (unlikely(buff == NULL)) {
> +		PMD_DRV_LOG(ERR, "Failed to allocate buff");
> +		return -ENOMEM;
> +	}
> +	memset(buff, 0, buff_size);
> +	uint16_t *pdata = (uint16_t *)buff;
> +	*pdata++ = hw->queue_num;
> +	uint16_t i;
> +
> +	for (i = 0; i < hw->queue_num; i++)
> +		*(pdata + i) = hw->channel_context[i].ph_chno;
> +
> +	int32_t ret = zxdh_common_table_write(hw, ZXDH_COMMON_FIELD_DATACH,
> +						(void *)buff, buff_size);
> +
> +	if (ret != 0)
> +		PMD_DRV_LOG(ERR, "Failed to setup data channel of common table");
> +
> +	rte_free(buff);
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_hw_stats_get(struct rte_eth_dev *dev, enum zxdh_agent_opc opcode,
> +			struct zxdh_hw_stats *hw_stats)
> +{
> +	enum bar_module_id module_id;
> +
> +	switch (opcode) {
> +	case ZXDH_VQM_DEV_STATS_GET:
> +	case ZXDH_VQM_QUEUE_STATS_GET:
> +	case ZXDH_VQM_QUEUE_STATS_RESET:
> +		module_id = BAR_MODULE_VQM;
> +		break;
> +	case ZXDH_MAC_STATS_GET:
> +	case ZXDH_MAC_STATS_RESET:
> +		module_id = BAR_MODULE_MAC;
> +		break;
> +	default:
> +		PMD_DRV_LOG(ERR, "invalid opcode %u", opcode);
> +		return -1;
> +	}
> +	/* */
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	struct zxdh_msg_reply_info reply_info = {0};
> +	struct zxdh_msg_recviver_mem result = {
> +		.recv_buffer = &reply_info,
> +		.buffer_len = sizeof(struct zxdh_msg_reply_info),
> +	};
> +	/* */

Please remove empty comments.

> +	struct zxdh_msg_info msg_info = {0};
> +
> +	ctrl_msg_build(hw, opcode, &msg_info);
> +	struct zxdh_pci_bar_msg in = {0};
> +
> +	in.payload_addr = &msg_info;
> +	in.payload_len = sizeof(msg_info);
> +	if (zxdh_send_command_toriscv(dev, &in, module_id, &result) != 0) {
> +		PMD_DRV_LOG(ERR, "Failed to get hw stats");
> +		return -1;
> +	}
> +	struct zxdh_msg_reply_body *reply_body = &reply_info.reply_body;
> +
> +	rte_memcpy(hw_stats, &reply_body->riscv_rsp.port_hw_stats, sizeof(struct zxdh_hw_stats));
> +	return 0;
> +}
> +
> +int32_t zxdh_hw_mac_get(struct rte_eth_dev *dev, struct zxdh_hw_mac_stats *mac_stats,
> +			struct zxdh_hw_mac_bytes *mac_bytes)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	uint64_t virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_MAC_OFFSET);
> +	uint64_t stats_addr =  0;
> +	uint64_t bytes_addr =  0;
> +
> +	if (hw->speed <= 25000) {
> +		stats_addr = virt_addr + ZXDH_MAC_STATS_OFFSET + 352 * (hw->phyport % 4);
> +		bytes_addr = virt_addr + ZXDH_MAC_BYTES_OFFSET + 32 * (hw->phyport % 4);
> +	} else {
> +		stats_addr = virt_addr + ZXDH_MAC_STATS_OFFSET + 352 * 4;
> +		bytes_addr = virt_addr + ZXDH_MAC_BYTES_OFFSET + 32 * 4;
> +	}
> +
> +	rte_memcpy(mac_stats, (void *)stats_addr, sizeof(struct zxdh_hw_mac_stats));
> +	rte_memcpy(mac_bytes, (void *)bytes_addr, sizeof(struct zxdh_hw_mac_bytes));
> +
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_hw_stats_reset(struct rte_eth_dev *dev, enum zxdh_agent_opc opcode)
> +{
> +	enum bar_module_id module_id;
> +
> +	switch (opcode) {
> +	case ZXDH_VQM_DEV_STATS_RESET:
> +		module_id = BAR_MODULE_VQM;
> +		break;
> +	case ZXDH_MAC_STATS_RESET:
> +		module_id = BAR_MODULE_MAC;
> +		break;
> +	default:
> +		PMD_DRV_LOG(ERR, "invalid opcode %u", opcode);
> +		return -1;
> +	}
> +	/* */
> +	struct zxdh_msg_reply_info reply_info = {0};
> +	struct zxdh_msg_recviver_mem result = {
> +		.recv_buffer = &reply_info,
> +		.buffer_len = sizeof(struct zxdh_msg_reply_info),
> +	};
> +	/* */
> +	struct zxdh_msg_info msg_info = {0};
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	ctrl_msg_build(hw, opcode, &msg_info);
> +	struct zxdh_pci_bar_msg in = {0};
> +
> +	in.payload_addr = &msg_info;
> +	in.payload_len = sizeof(msg_info);
> +	/* */
> +	if (zxdh_send_command_toriscv(dev, &in, module_id, &result) != 0) {
> +		PMD_DRV_LOG(ERR, "Failed to reset hw stats");
> +		return -1;
> +	}
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +static inline void zxdh_fill_res_para(struct rte_eth_dev *dev, struct zxdh_res_para *param)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	param->pcie_id   = hw->pcie_id;
> +	param->virt_addr = hw->bar_addr[0] + ZXDH_CTRLCH_OFFSET;
> +	param->src_type  = BAR_MODULE_TBL;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_pannelid_get(struct rte_eth_dev *dev, uint8_t *pannelid)
> +{
> +	struct zxdh_res_para param;
> +
> +	zxdh_fill_res_para(dev, &param);
> +	int32_t ret = zxdh_get_res_panel_id(&param, pannelid);
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_phyport_get(struct rte_eth_dev *dev, uint8_t *phyport)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	int32_t ret = zxdh_common_table_read(hw, ZXDH_COMMON_FIELD_PHYPORT,
> +					(void *)phyport, sizeof(*phyport));
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_hashidx_get(struct rte_eth_dev *dev, uint8_t *hash_idx)
> +{
> +	struct zxdh_res_para param;
> +
> +	zxdh_fill_res_para(dev, &param);
> +	int32_t ret = zxdh_get_res_hash_id(&param, hash_idx);
> +
> +	return ret;
> +}
> +#define DUPLEX_HALF   RTE_BIT32(0)
> +#define DUPLEX_FULL   RTE_BIT32(1)
> +
> +int32_t zxdh_link_info_get(struct rte_eth_dev *dev, struct rte_eth_link *link)
> +{
> +	PMD_INIT_FUNC_TRACE();
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	uint16_t status = 0;
> +
> +	if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS))
> +		zxdh_vtpci_read_dev_config(hw, offsetof(struct zxdh_net_config, status),
> +					&status, sizeof(status));
> +
> +	link->link_status = status;
> +
> +	if (status == RTE_ETH_LINK_DOWN) {
> +		PMD_DRV_LOG(INFO, "Port is down!\n");
> +		link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
> +		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
> +	} else {
> +		struct zxdh_msg_info msg;
> +		struct zxdh_pci_bar_msg in = {0};
> +		struct zxdh_msg_reply_info rep = {0};
> +
> +		ctrl_msg_build(hw, ZXDH_MAC_LINK_GET, &msg);
> +
> +		in.payload_addr = &msg;
> +		in.payload_len = sizeof(msg);
> +
> +		struct zxdh_msg_recviver_mem rsp_data = {
> +			.recv_buffer = (void *)&rep,
> +			.buffer_len = sizeof(rep),
> +		};
> +		if (zxdh_send_command_toriscv(dev, &in, BAR_MODULE_MAC, &rsp_data) != BAR_MSG_OK) {
> +			PMD_DRV_LOG(ERR, "Failed to get link info");
> +			return -1;
> +		}
> +		struct zxdh_msg_reply_body *ack_msg =
> +				&(((struct zxdh_msg_reply_info *)rsp_data.recv_buffer)->reply_body);
> +
> +		link->link_speed = ack_msg->link_msg.speed;
> +		hw->speed_mode = ack_msg->link_msg.speed_modes;
> +		if ((ack_msg->link_msg.duplex & DUPLEX_FULL) == DUPLEX_FULL)
> +			link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
> +		else
> +			link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
> +
> +		PMD_DRV_LOG(INFO, "Port is up!\n");
> +	}
> +	hw->speed = link->link_speed;
> +	PMD_DRV_LOG(INFO, "sw : admain_status %d ", hw->admin_status);
> +	PMD_DRV_LOG(INFO, "hw : link_status: %d,  link_speed: %d, link_duplex %d\n",
> +				link->link_status, link->link_speed, link->link_duplex);
> +	return 0;
> +}
> diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h
> new file mode 100644
> index 0000000000..2010d01e63
> --- /dev/null
> +++ b/drivers/net/zxdh/zxdh_common.h
> @@ -0,0 +1,154 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 ZTE Corporation
> + */
> +
> +#ifndef _ZXDH_COMMON_H_
> +#define _ZXDH_COMMON_H_
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
>

Although this doesn't hurt, is it expected to include this driver
internal header file from a C++ source file?
If this is not used at all, may be consider dropping them.

> +
> +#include <stdint.h>
> +#include <rte_ethdev.h>
> +#include <rte_common.h>
> +#include "msg_chan_pub.h"
> +#include "zxdh_logs.h"
> +
> +#define VF_IDX(pcie_id)  (pcie_id & 0xff)
> +#define PF_PCIE_ID(pcie_id)  ((pcie_id & 0xff00) | 1<<11)
> +#define VF_PCIE_ID(pcie_id, vf_idx)  ((pcie_id & 0xff00) | (1<<11) | (vf_idx&0xff))
> +
> +#define VFUNC_ACTIVE_BIT  11
> +#define VFUNC_NUM_MASK    0xff
> +#define GET_OWNER_PF_VPORT(vport)  ((vport&~(VFUNC_NUM_MASK))&(~(1<<VFUNC_ACTIVE_BIT)))
> +
> +/* riscv msg opcodes */
> +enum zxdh_agent_opc {
> +	ZXDH_MAC_STATS_GET = 10,
> +	ZXDH_MAC_STATS_RESET,
> +	ZXDH_MAC_PHYPORT_INIT,
> +	ZXDH_MAC_AUTONEG_SET,
> +	ZXDH_MAC_LINK_GET,
> +	ZXDH_MAC_LED_BLINK,
> +	ZXDH_MAC_FC_SET  = 18,
> +	ZXDH_MAC_FC_GET = 19,
> +	ZXDH_MAC_MODULE_EEPROM_READ = 20,
> +	ZXDH_VQM_DEV_STATS_GET = 21,
> +	ZXDH_VQM_DEV_STATS_RESET,
> +	ZXDH_FLASH_FIR_VERSION_GET = 23,
> +	ZXDH_VQM_QUEUE_STATS_GET,
> +	ZXDH_DEV_STATUS_NOTIFY = 24,
> +	ZXDH_VQM_QUEUE_STATS_RESET,
> +} __rte_packed;
> +
> +struct zxdh_hw_stats {
> +	uint64_t rx_total;
> +	uint64_t tx_total;
> +	uint64_t rx_bytes;
> +	uint64_t tx_bytes;
> +	uint64_t rx_error;
> +	uint64_t tx_error;
> +	uint64_t rx_drop;
> +} __rte_packed;
> +
> +struct zxdh_hw_mac_stats {
> +	uint64_t rx_total;
> +	uint64_t rx_pause;
> +	uint64_t rx_unicast;
> +	uint64_t rx_multicast;
> +	uint64_t rx_broadcast;
> +	uint64_t rx_vlan;
> +	uint64_t rx_size_64;
> +	uint64_t rx_size_65_127;
> +	uint64_t rx_size_128_255;
> +	uint64_t rx_size_256_511;
> +	uint64_t rx_size_512_1023;
> +	uint64_t rx_size_1024_1518;
> +	uint64_t rx_size_1519_mru;
> +	uint64_t rx_undersize;
> +	uint64_t rx_oversize;
> +	uint64_t rx_fragment;
> +	uint64_t rx_jabber;
> +	uint64_t rx_control;
> +	uint64_t rx_eee;
> +
> +	uint64_t tx_total;
> +	uint64_t tx_pause;
> +	uint64_t tx_unicast;
> +	uint64_t tx_multicast;
> +	uint64_t tx_broadcast;
> +	uint64_t tx_vlan;
> +	uint64_t tx_size_64;
> +	uint64_t tx_size_65_127;
> +	uint64_t tx_size_128_255;
> +	uint64_t tx_size_256_511;
> +	uint64_t tx_size_512_1023;
> +	uint64_t tx_size_1024_1518;
> +	uint64_t tx_size_1519_mtu;
> +	uint64_t tx_undersize;
> +	uint64_t tx_oversize;
> +	uint64_t tx_fragment;
> +	uint64_t tx_jabber;
> +	uint64_t tx_control;
> +	uint64_t tx_eee;
> +
> +	uint64_t rx_error;
> +	uint64_t rx_fcs_error;
> +	uint64_t rx_drop;
> +
> +	uint64_t tx_error;
> +	uint64_t tx_fcs_error;
> +	uint64_t tx_drop;
> +
> +} __rte_packed;
> +
> +struct zxdh_hw_mac_bytes {
> +	uint64_t rx_total_bytes;
> +	uint64_t rx_good_bytes;
> +	uint64_t tx_total_bytes;
> +	uint64_t tx_good_bytes;
> +} __rte_packed;
> +
> +void zxdh_hex_dump(uint8_t *buff, uint16_t buff_size);
> +
> +uint32_t zxdh_read_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg);
> +void zxdh_write_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val);
> +int32_t zxdh_hw_stats_get(struct rte_eth_dev *dev, enum zxdh_agent_opc opcode,
> +			struct zxdh_hw_stats *hw_stats);
> +int32_t zxdh_hw_mac_get(struct rte_eth_dev *dev, struct zxdh_hw_mac_stats *mac_stats,
> +			struct zxdh_hw_mac_bytes *mac_bytes);
> +int32_t zxdh_hw_stats_reset(struct rte_eth_dev *dev, enum zxdh_agent_opc opcode);
> +int32_t zxdh_link_info_get(struct rte_eth_dev *dev, struct rte_eth_link *link);
> +int32_t zxdh_datach_set(struct rte_eth_dev *dev);
> +int32_t zxdh_vport_get(struct rte_eth_dev *dev, uint16_t *vport);
> +int32_t zxdh_pannelid_get(struct rte_eth_dev *dev, uint8_t *pannelid);
> +int32_t zxdh_phyport_get(struct rte_eth_dev *dev, uint8_t *phyport);
> +int32_t zxdh_hashidx_get(struct rte_eth_dev *dev, uint8_t *hash_idx);
> +int32_t zxdh_send_command_toriscv(struct rte_eth_dev *dev,
> +			struct zxdh_pci_bar_msg *in,
> +			enum bar_module_id module_id,
> +			struct zxdh_msg_recviver_mem *msg_rsp);
> +
> +#define HEX_DUMP(buff, buff_size)  zxdh_hex_dump((uint8_t *)buff, (uint16_t)buff_size)
> +
> +#define ZXDH_DIRECT_FLAG_BIT       (1UL << 15)
> +
> +#define ZXDH_FLAG_YES 1
> +#define ZXDH_FLAG_NO 0
> +
> +#define ZXDH_VLAN_TAG_LEN 4
> +
> +#define ZXDH_ETH_OVERHEAD  (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ZXDH_VLAN_TAG_LEN * 2)
> +#define ZXDH_MTU_TO_PKTLEN(mtu) ((mtu) + ZXDH_ETH_OVERHEAD)
> +
> +#define VLAN_TAG_LEN   4/* 802.3ac tag (not DMA'd) */
> +
> +uint64_t get_cur_time_s(uint64_t tsc);
> +uint64_t get_time_ns(uint64_t tsc);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _ZXDH_COMMON_H_ */
> diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
> new file mode 100644
> index 0000000000..222ecbd3c1
> --- /dev/null
> +++ b/drivers/net/zxdh/zxdh_ethdev.c
> @@ -0,0 +1,3431 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 ZTE Corporation
> + */
> +
> +#include <rte_memcpy.h>
> +#include <rte_malloc.h>
> +#include <rte_interrupts.h>
> +#include <eal_interrupts.h>
> +#include <ethdev_pci.h>
> +#include <rte_kvargs.h>
> +#include <rte_hexdump.h>
> +
> +#include "zxdh_ethdev.h"
> +#include "zxdh_pci.h"
> +#include "zxdh_logs.h"
> +#include "zxdh_queue.h"
> +#include "zxdh_rxtx.h"
> +#include "zxdh_msg_chan.h"
> +#include "zxdh_common.h"
> +#include "zxdh_ethdev_ops.h"
> +#include "zxdh_tables.h"
> +#include "dpp_dtb_table_api.h"
> +#include "dpp_dev.h"
> +#include "dpp_init.h"
> +#include "zxdh_ethdev.h"
> +#include "zxdh_table_drv.h"
> +#include "dpp_log_diag.h"
> +#include "dpp_dbgstat.h"
> +#include "dpp_trpg_api.h"
> +
> +#include "zxdh_telemetry.h"
> +
> +struct rte_zxdh_xstats_name_off {
> +	char name[RTE_ETH_XSTATS_NAME_SIZE];
> +	unsigned int offset;
> +};
> +static const struct rte_zxdh_xstats_name_off rte_zxdh_np_stat_strings[] = {
> +	{"np_rx_broadcast",    offsetof(struct zxdh_hw_np_stats, np_rx_broadcast)},
> +	{"np_tx_broadcast",    offsetof(struct zxdh_hw_np_stats, np_tx_broadcast)},
> +	{"np_rx_mtu_drop_pkts",   offsetof(struct zxdh_hw_np_stats, np_rx_mtu_drop_pkts)},
> +	{"np_tx_mtu_drop_pkts",   offsetof(struct zxdh_hw_np_stats, np_tx_mtu_drop_pkts)},
> +	{"np_tx_mtu_drop_bytes",   offsetof(struct zxdh_hw_np_stats, np_tx_mtu_drop_bytes)},
> +	{"np_rx_mtu_drop_bytes",   offsetof(struct zxdh_hw_np_stats, np_rx_mtu_drop_bytes)},
> +	{"np_rx_plcr_drop_pkts",  offsetof(struct zxdh_hw_np_stats, np_rx_mtr_drop_pkts)},
> +	{"np_rx_plcr_drop_bytes",  offsetof(struct zxdh_hw_np_stats, np_rx_mtr_drop_bytes)},
> +	{"np_tx_plcr_drop_pkts",  offsetof(struct zxdh_hw_np_stats,  np_tx_mtr_drop_pkts)},
> +	{"np_tx_plcr_drop_bytes",  offsetof(struct zxdh_hw_np_stats, np_tx_mtr_drop_bytes)},
> +};
> +/* [rt]x_qX_ is prepended to the name string here */
> +static const struct rte_zxdh_xstats_name_off rte_zxdh_rxq_stat_strings[] = {
> +	{"good_packets",           offsetof(struct virtnet_rx, stats.packets)},
> +	{"good_bytes",             offsetof(struct virtnet_rx, stats.bytes)},
> +	{"errors",                 offsetof(struct virtnet_rx, stats.errors)},
> +	{"multicast_packets",      offsetof(struct virtnet_rx, stats.multicast)},
> +	{"broadcast_packets",      offsetof(struct virtnet_rx, stats.broadcast)},
> +	{"truncated_err",          offsetof(struct virtnet_rx, stats.truncated_err)},
> +	{"undersize_packets",      offsetof(struct virtnet_rx, stats.size_bins[0])},
> +	{"size_64_packets",        offsetof(struct virtnet_rx, stats.size_bins[1])},
> +	{"size_65_127_packets",    offsetof(struct virtnet_rx, stats.size_bins[2])},
> +	{"size_128_255_packets",   offsetof(struct virtnet_rx, stats.size_bins[3])},
> +	{"size_256_511_packets",   offsetof(struct virtnet_rx, stats.size_bins[4])},
> +	{"size_512_1023_packets",  offsetof(struct virtnet_rx, stats.size_bins[5])},
> +	{"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
> +	{"size_1519_max_packets",  offsetof(struct virtnet_rx, stats.size_bins[7])},
> +};
> +
> +
> +/* [rt]x_qX_ is prepended to the name string here */
> +static const struct rte_zxdh_xstats_name_off rte_zxdh_txq_stat_strings[] = {
> +	{"good_packets",           offsetof(struct virtnet_tx, stats.packets)},
> +	{"good_bytes",             offsetof(struct virtnet_tx, stats.bytes)},
> +	{"errors",                 offsetof(struct virtnet_tx, stats.errors)},
> +	{"multicast_packets",      offsetof(struct virtnet_tx, stats.multicast)},
> +	{"broadcast_packets",      offsetof(struct virtnet_tx, stats.broadcast)},
> +	{"truncated_err",          offsetof(struct virtnet_tx, stats.truncated_err)},
> +	{"undersize_packets",      offsetof(struct virtnet_tx, stats.size_bins[0])},
> +	{"size_64_packets",        offsetof(struct virtnet_tx, stats.size_bins[1])},
> +	{"size_65_127_packets",    offsetof(struct virtnet_tx, stats.size_bins[2])},
> +	{"size_128_255_packets",   offsetof(struct virtnet_tx, stats.size_bins[3])},
> +	{"size_256_511_packets",   offsetof(struct virtnet_tx, stats.size_bins[4])},
> +	{"size_512_1023_packets",  offsetof(struct virtnet_tx, stats.size_bins[5])},
> +	{"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
> +	{"size_1519_max_packets",  offsetof(struct virtnet_tx, stats.size_bins[7])},
> +};
> +static const struct rte_zxdh_xstats_name_off rte_zxdh_mac_stat_strings[] = {
> +	{"mac_rx_total",    offsetof(struct zxdh_hw_mac_stats, rx_total)},
> +	{"mac_rx_pause",    offsetof(struct zxdh_hw_mac_stats, rx_pause)},
> +	{"mac_rx_unicast",   offsetof(struct zxdh_hw_mac_stats, rx_unicast)},
> +	{"mac_rx_multicast",   offsetof(struct zxdh_hw_mac_stats, rx_multicast)},
> +	{"mac_rx_broadcast",   offsetof(struct zxdh_hw_mac_stats, rx_broadcast)},
> +	{"mac_rx_vlan",   offsetof(struct zxdh_hw_mac_stats, rx_vlan)},
> +	{"mac_rx_size_64",  offsetof(struct zxdh_hw_mac_stats, rx_size_64)},
> +	{"mac_rx_size_65_127",  offsetof(struct zxdh_hw_mac_stats, rx_size_65_127)},
> +	{"mac_rx_size_128_255",  offsetof(struct zxdh_hw_mac_stats,  rx_size_128_255)},
> +	{"mac_rx_size_256_511",  offsetof(struct zxdh_hw_mac_stats, rx_size_256_511)},
> +	{"mac_rx_size_512_1023",    offsetof(struct zxdh_hw_mac_stats, rx_size_512_1023)},
> +	{"mac_rx_size_1024_1518",    offsetof(struct zxdh_hw_mac_stats, rx_size_1024_1518)},
> +	{"mac_rx_size_1519_mru",   offsetof(struct zxdh_hw_mac_stats, rx_size_1519_mru)},
> +	{"mac_rx_undersize",   offsetof(struct zxdh_hw_mac_stats, rx_undersize)},
> +	{"mac_rx_oversize",   offsetof(struct zxdh_hw_mac_stats, rx_oversize)},
> +	{"mac_rx_fragment",   offsetof(struct zxdh_hw_mac_stats, rx_fragment)},
> +	{"mac_rx_jabber",  offsetof(struct zxdh_hw_mac_stats, rx_jabber)},
> +	{"mac_rx_control",  offsetof(struct zxdh_hw_mac_stats, rx_control)},
> +	{"mac_rx_eee",  offsetof(struct zxdh_hw_mac_stats,  rx_eee)},
> +	{"mac_rx_error",  offsetof(struct zxdh_hw_mac_stats, rx_error)},
> +	{"mac_rx_fcs_error",    offsetof(struct zxdh_hw_mac_stats, rx_fcs_error)},
> +	{"mac_rx_drop",    offsetof(struct zxdh_hw_mac_stats, rx_drop)},
> +
> +	{"mac_tx_total",   offsetof(struct zxdh_hw_mac_stats, tx_total)},
> +	{"mac_tx_pause",   offsetof(struct zxdh_hw_mac_stats, tx_pause)},
> +	{"mac_tx_unicast",  offsetof(struct zxdh_hw_mac_stats, tx_unicast)},
> +	{"mac_tx_multicast",  offsetof(struct zxdh_hw_mac_stats, tx_multicast)},
> +	{"mac_tx_broadcast",  offsetof(struct zxdh_hw_mac_stats,  tx_broadcast)},
> +	{"mac_tx_vlan",  offsetof(struct zxdh_hw_mac_stats, tx_vlan)},
> +	{"mac_tx_size_64",   offsetof(struct zxdh_hw_mac_stats, tx_size_64)},
> +	{"mac_tx_size_65_127",   offsetof(struct zxdh_hw_mac_stats, tx_size_65_127)},
> +	{"mac_tx_size_128_255",  offsetof(struct zxdh_hw_mac_stats, tx_size_128_255)},
> +	{"mac_tx_size_256_511",  offsetof(struct zxdh_hw_mac_stats, tx_size_256_511)},
> +	{"mac_tx_size_512_1023",  offsetof(struct zxdh_hw_mac_stats,  tx_size_512_1023)},
> +	{"mac_tx_size_1024_1518",  offsetof(struct zxdh_hw_mac_stats, tx_size_1024_1518)},
> +	{"mac_tx_size_1519_mtu",   offsetof(struct zxdh_hw_mac_stats, tx_size_1519_mtu)},
> +	{"mac_tx_undersize",   offsetof(struct zxdh_hw_mac_stats, tx_undersize)},
> +	{"mac_tx_oversize",  offsetof(struct zxdh_hw_mac_stats, tx_oversize)},
> +	{"mac_tx_fragment",  offsetof(struct zxdh_hw_mac_stats, tx_fragment)},
> +	{"mac_tx_jabber",  offsetof(struct zxdh_hw_mac_stats,  tx_jabber)},
> +	{"mac_tx_control",  offsetof(struct zxdh_hw_mac_stats, tx_control)},
> +	{"mac_tx_eee",   offsetof(struct zxdh_hw_mac_stats, tx_eee)},
> +	{"mac_tx_error",   offsetof(struct zxdh_hw_mac_stats, tx_error)},
> +	{"mac_tx_fcs_error",  offsetof(struct zxdh_hw_mac_stats, tx_fcs_error)},
> +	{"mac_tx_drop",  offsetof(struct zxdh_hw_mac_stats, tx_drop)},
> +};
> +
> +static const struct rte_zxdh_xstats_name_off rte_zxdh_mac_bytes_strings[] = {
> +	{"mac_rx_total_bytes",   offsetof(struct zxdh_hw_mac_bytes, rx_total_bytes)},
> +	{"mac_rx_good_bytes",   offsetof(struct zxdh_hw_mac_bytes, rx_good_bytes)},
> +	{"mac_tx_total_bytes",  offsetof(struct zxdh_hw_mac_bytes,  tx_total_bytes)},
> +	{"mac_tx_good_bytes",  offsetof(struct zxdh_hw_mac_bytes, tx_good_bytes)},
> +};
> +
> +static const struct rte_zxdh_xstats_name_off rte_zxdh_vqm_stat_strings[] = {
> +	{"vqm_rx_vport_packets",    offsetof(struct zxdh_hw_stats, rx_total)},
> +	{"vqm_tx_vport_packets",    offsetof(struct zxdh_hw_stats, tx_total)},
> +	{"vqm_rx_vport_bytes",   offsetof(struct zxdh_hw_stats, rx_bytes)},
> +	{"vqm_tx_vport_bytes",   offsetof(struct zxdh_hw_stats, tx_bytes)},
> +	{"vqm_rx_vport_dropped",   offsetof(struct zxdh_hw_stats, rx_drop)},
> +};
> +
> +#define EAL_INTR_EPOLL_WAIT_FOREVER			(-1)
> +#define VLAN_TAG_LEN						4 /* 802.3ac tag (not DMA'd) */
> +
> +#define LOW3_BIT_MASK						0x7
> +#define LOW5_BIT_MASK						0x1f
> +
> +
> +#define ZXDH_VF_LOCK_REG					0x90
> +#define ZXDH_VF_LOCK_ENABLE_MASK			0x1
> +#define ZXDH_COI_TABLE_BASE_ADDR			0x5000
> +#define ZXDH_ACQUIRE_CHANNEL_NUM_MAX		10
> +
> +#define ZXDH_MIN_RX_BUFSIZE					64
> +
> +#define ZXDH_NB_RXQ_XSTATS (sizeof(rte_zxdh_rxq_stat_strings) / \
> +							sizeof(rte_zxdh_rxq_stat_strings[0]))
> +#define ZXDH_NB_TXQ_XSTATS (sizeof(rte_zxdh_txq_stat_strings) / \
> +							sizeof(rte_zxdh_txq_stat_strings[0]))
> +
> +#define ZXDH_NP_XSTATS (sizeof(rte_zxdh_np_stat_strings) / \
> +							sizeof(rte_zxdh_np_stat_strings[0]))
> +
> +#define ZXDH_MAC_XSTATS (sizeof(rte_zxdh_mac_stat_strings) / \
> +							sizeof(rte_zxdh_mac_stat_strings[0]))
> +
> +#define ZXDH_MAC_BYTES (sizeof(rte_zxdh_mac_bytes_strings) / \
> +							sizeof(rte_zxdh_mac_bytes_strings[0]))
> +
> +#define ZXDH_VQM_XSTATS (sizeof(rte_zxdh_vqm_stat_strings) / \
> +							sizeof(rte_zxdh_vqm_stat_strings[0]))
> +
> +static void zxdh_dev_free_mbufs(struct rte_eth_dev *dev);
> +static void zxdh_notify_peers(struct rte_eth_dev *dev);
> +static int32_t zxdh_eth_dev_uninit(struct rte_eth_dev *eth_dev);
> +static void zxdh_priv_res_free(struct zxdh_hw *priv);
> +static void zxdh_queues_unbind_intr(struct rte_eth_dev *dev);
> +static int zxdh_tables_init(struct rte_eth_dev *dev);
> +static int32_t zxdh_free_queues(struct rte_eth_dev *dev);
> +static int32_t zxdh_acquire_lock(struct rte_eth_dev *dev);
> +static int32_t zxdh_release_lock(struct rte_eth_dev *dev);
> +static int32_t zxdh_acquire_channel(struct rte_eth_dev *dev, uint16_t lch);
> +static int32_t zxdh_release_channel(struct rte_eth_dev *dev);
> +
> +static int vf_recv_bar_msg(void *pay_load, uint16_t len, void *reps_buffer,
> +			uint16_t *reps_len, void *eth_dev __rte_unused);
> +static int pf_recv_bar_msg(void *pay_load, uint16_t len, void *reps_buffer,
> +			uint16_t *reps_len, void *eth_dev __rte_unused);
> +static void zxdh_np_destroy(struct rte_eth_dev *dev);
> +static void zxdh_intr_cb_reg(struct rte_eth_dev *dev);
> +static void zxdh_intr_cb_unreg(struct rte_eth_dev *dev);
> +static int32_t zxdh_dev_devargs_parse(struct rte_devargs *devargs, struct zxdh_hw *hw);
> +
> +int32_t zxdh_dev_xstats_get_names(struct rte_eth_dev *dev,
> +			struct rte_eth_xstat_name *xstats_names,
> +			__rte_unused unsigned int limit)
> +{
> +	uint32_t i     = 0;
> +	uint32_t count = 0;
> +	uint32_t t     = 0;
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	unsigned int nstats = dev->data->nb_tx_queues * ZXDH_NB_TXQ_XSTATS +
> +					dev->data->nb_rx_queues * ZXDH_NB_RXQ_XSTATS +
> +					ZXDH_NP_XSTATS + ZXDH_VQM_XSTATS;
> +
> +	if (hw->is_pf)
> +		nstats += ZXDH_MAC_XSTATS + ZXDH_MAC_BYTES;
> +
> +	if (xstats_names != NULL) {
> +		/* Note: limit checked in rte_eth_xstats_names() */
> +		for (i = 0; i < ZXDH_NP_XSTATS; i++) {
> +			snprintf(xstats_names[count].name, sizeof(xstats_names[count].name),
> +			"%s", rte_zxdh_np_stat_strings[i].name);
> +			count++;
> +		}
> +		if (hw->is_pf) {
> +			for (i = 0; i < ZXDH_MAC_XSTATS; i++) {
> +				snprintf(xstats_names[count].name, sizeof(xstats_names[count].name),
> +				"%s", rte_zxdh_mac_stat_strings[i].name);
> +				count++;
> +			}
> +			for (i = 0; i < ZXDH_MAC_BYTES; i++) {
> +				snprintf(xstats_names[count].name, sizeof(xstats_names[count].name),
> +				"%s", rte_zxdh_mac_bytes_strings[i].name);
> +				count++;
> +			}
> +		}
> +		for (i = 0; i < ZXDH_VQM_XSTATS; i++) {
> +			snprintf(xstats_names[count].name, sizeof(xstats_names[count].name),
> +			"%s", rte_zxdh_vqm_stat_strings[i].name);
> +			count++;
> +		}
> +		for (i = 0; i < dev->data->nb_rx_queues; i++) {
> +			struct virtnet_rx *rxvq = dev->data->rx_queues[i];
> +
> +			if (rxvq == NULL)
> +				continue;
> +			for (t = 0; t < ZXDH_NB_RXQ_XSTATS; t++) {
> +				snprintf(xstats_names[count].name, sizeof(xstats_names[count].name),
> +				"rx_q%u_%s", i, rte_zxdh_rxq_stat_strings[t].name);
> +				count++;
> +			}
> +		}
> +
> +		for (i = 0; i < dev->data->nb_tx_queues; i++) {
> +			struct virtnet_tx *txvq = dev->data->tx_queues[i];
> +
> +			if (txvq == NULL)
> +				continue;
> +			for (t = 0; t < ZXDH_NB_TXQ_XSTATS; t++) {
> +				snprintf(xstats_names[count].name, sizeof(xstats_names[count].name),
> +				"tx_q%u_%s", i, rte_zxdh_txq_stat_strings[t].name);
> +				count++;
> +			}
> +		}
> +		PMD_DRV_LOG(INFO, "stats count  = %u", count);
> +		return count;
> +	}
> +	return nstats;
> +}
> +int32_t zxdh_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, uint32_t n)
> +{
> +	uint32_t i	   = 0;
> +	uint32_t count = 0;
> +	uint32_t t = 0;
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	struct zxdh_hw_np_stats np_stats = {0};
> +	struct zxdh_hw_mac_stats mac_stats = {0};
> +	struct zxdh_hw_mac_bytes mac_bytes = {0};
> +	struct zxdh_hw_stats  vqm_stats = {0};
> +	uint32_t nstats = dev->data->nb_tx_queues * ZXDH_NB_TXQ_XSTATS +
> +			dev->data->nb_rx_queues * ZXDH_NB_RXQ_XSTATS +
> +			ZXDH_NP_XSTATS + ZXDH_VQM_XSTATS;
> +
> +	if (hw->is_pf) {
> +		nstats += ZXDH_MAC_XSTATS + ZXDH_MAC_BYTES;
> +		zxdh_hw_mac_get(dev, &mac_stats, &mac_bytes);
> +	}
> +	if (n < nstats)
> +		return nstats;
> +	zxdh_hw_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);
> +	zxdh_hw_np_stats(dev, &np_stats);
> +	for (i = 0; i < ZXDH_NP_XSTATS; i++) {
> +		xstats[count].value = *(uint64_t *)(((char *)&np_stats) +
> +				rte_zxdh_np_stat_strings[i].offset);
> +		xstats[count].id = count;
> +		count++;
> +	}
> +	if (hw->is_pf) {
> +		for (i = 0; i < ZXDH_MAC_XSTATS; i++) {
> +			xstats[count].value = *(uint64_t *)(((char *)&mac_stats) +
> +					rte_zxdh_mac_stat_strings[i].offset);
> +			xstats[count].id = count;
> +			count++;
> +		}
> +		for (i = 0; i < ZXDH_MAC_BYTES; i++) {
> +			xstats[count].value = *(uint64_t *)(((char *)&mac_bytes) +
> +					rte_zxdh_mac_bytes_strings[i].offset);
> +			xstats[count].id = count;
> +			count++;
> +		}
> +	}
> +	for (i = 0; i < ZXDH_VQM_XSTATS; i++) {
> +		xstats[count].value = *(uint64_t *)(((char *)&vqm_stats) +
> +				rte_zxdh_vqm_stat_strings[i].offset);
> +		xstats[count].id = count;
> +		count++;
> +	}
> +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> +		struct virtnet_rx *rxvq = dev->data->rx_queues[i];
> +
> +		if (rxvq == NULL)
> +			continue;
> +		for (t = 0; t < ZXDH_NB_RXQ_XSTATS; t++) {
> +			xstats[count].value = *(uint64_t *)(((char *)rxvq) +
> +					rte_zxdh_rxq_stat_strings[t].offset);
> +			xstats[count].id = count;
> +			count++;
> +		}
> +	}
> +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> +		struct virtnet_tx *txvq = dev->data->tx_queues[i];
> +
> +		if (txvq == NULL)
> +			continue;
> +
> +		for (t = 0; t < ZXDH_NB_TXQ_XSTATS; t++) {
> +			xstats[count].value = *(uint64_t *)(((char *)txvq) +
> +					rte_zxdh_txq_stat_strings[t].offset);
> +			xstats[count].id = count;
> +			count++;
> +		}
> +	}
> +	PMD_DRV_LOG(INFO, "stats count  = %u", count);
> +	return count;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	struct zxdh_hw_stats  vqm_stats = {0};
> +	struct zxdh_hw_np_stats np_stats = {0};
> +	struct zxdh_hw_mac_stats mac_stats = {0};
> +	struct zxdh_hw_mac_bytes mac_bytes = {0};
> +	uint32_t i = 0;
> +
> +	zxdh_hw_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);
> +	if (hw->is_pf)
> +		zxdh_hw_mac_get(dev, &mac_stats, &mac_bytes);
> +
> +	zxdh_hw_np_stats(dev, &np_stats);
> +
> +	stats->ipackets = vqm_stats.rx_total;
> +	stats->opackets = vqm_stats.tx_total;
> +	stats->ibytes = vqm_stats.rx_bytes;
> +	stats->obytes = vqm_stats.tx_bytes;
> +	stats->imissed = vqm_stats.rx_drop + mac_stats.rx_drop;
> +	stats->ierrors = vqm_stats.rx_error + mac_stats.rx_error + np_stats.np_rx_mtu_drop_pkts;
> +	stats->oerrors = vqm_stats.tx_error + mac_stats.tx_error + np_stats.np_tx_mtu_drop_pkts;
> +
> +	if (hw->i_mtr_en || hw->e_mtr_en)
> +		stats->imissed += np_stats.np_rx_mtr_drop_pkts;
> +
> +	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
> +	for (i = 0; (i < dev->data->nb_rx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) {
> +		struct virtnet_rx *rxvq = dev->data->rx_queues[i];
> +
> +		if (rxvq == NULL)
> +			continue;
> +		stats->q_ipackets[i] = *(uint64_t *)(((char *)rxvq) +
> +				rte_zxdh_rxq_stat_strings[0].offset);
> +		stats->q_ibytes[i] = *(uint64_t *)(((char *)rxvq) +
> +				rte_zxdh_rxq_stat_strings[1].offset);
> +		stats->q_errors[i] = *(uint64_t *)(((char *)rxvq) +
> +				rte_zxdh_rxq_stat_strings[2].offset);
> +		stats->q_errors[i] += *(uint64_t *)(((char *)rxvq) +
> +				rte_zxdh_rxq_stat_strings[5].offset);
> +	}
> +
> +	for (i = 0; (i < dev->data->nb_tx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) {
> +		struct virtnet_tx *txvq = dev->data->tx_queues[i];
> +
> +		if (txvq == NULL)
> +			continue;
> +		stats->q_opackets[i] = *(uint64_t *)(((char *)txvq) +
> +				rte_zxdh_txq_stat_strings[0].offset);
> +		stats->q_obytes[i] = *(uint64_t *)(((char *)txvq) +
> +				rte_zxdh_txq_stat_strings[1].offset);
> +		stats->q_errors[i] += *(uint64_t *)(((char *)txvq) +
> +				rte_zxdh_txq_stat_strings[2].offset);
> +		stats->q_errors[i] += *(uint64_t *)(((char *)txvq) +
> +				rte_zxdh_txq_stat_strings[5].offset);
> +	}
> +	return 0;
> +}
> +
> +/**
> + * Fun:
> + */
> +int32_t zxdh_dev_stats_reset(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	zxdh_hw_stats_reset(dev, ZXDH_VQM_DEV_STATS_RESET);
> +	if (hw->is_pf)
> +		zxdh_hw_stats_reset(dev, ZXDH_MAC_STATS_RESET);
> +
> +	return 0;
> +}
> +
> +

There are two spaces between above function, and none between below
function. These are basic, non functional syntax issues, so I won't
comment more about them, but please take care of these basics so they
don't grap our attention to get in way of real issues.
Please go through the code from scratch to address the syntax issues,
commented out code, empty comments, etc...


> +static void zxdh_init_vring(struct virtqueue *vq)
> +{
> +	int32_t  size	  = vq->vq_nentries;
> +	uint8_t *ring_mem = vq->vq_ring_virt_mem;
> +
> +	PMD_INIT_FUNC_TRACE();
> +
> +	memset(ring_mem, 0, vq->vq_ring_size);
> +
> +	vq->vq_used_cons_idx = 0;
> +	vq->vq_desc_head_idx = 0;
> +	vq->vq_avail_idx	 = 0;
> +	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
> +	vq->vq_free_cnt = vq->vq_nentries;
> +	memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
> +	vring_init_packed(&vq->vq_packed.ring, ring_mem, ZXDH_PCI_VRING_ALIGN, size);
> +	vring_desc_init_packed(vq, size);
> +	/*
> +	 * Disable device(host) interrupting guest
> +	 */
> +	virtqueue_disable_intr(vq);
> +}
> +/**
> + * Fun:
> + */
> +static inline int32_t get_queue_type(uint16_t vtpci_queue_idx)
> +{
> +	if (vtpci_queue_idx % 2 == 0)
> +		return VTNET_RQ;
> +	else
> +		return VTNET_TQ;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)
> +{
> +	char vq_name[VIRTQUEUE_MAX_NAME_SZ] = {0};
> +	char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ] = {0};
> +	const struct rte_memzone *mz = NULL;
> +	const struct rte_memzone *hdr_mz = NULL;
> +	uint32_t size = 0;
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	struct virtnet_rx *rxvq = NULL;
> +	struct virtnet_tx *txvq = NULL;
> +	struct virtqueue *vq = NULL;
> +	size_t sz_hdr_mz = 0;
> +	void *sw_ring = NULL;
> +	int32_t queue_type = get_queue_type(vtpci_logic_qidx);
> +	int32_t numa_node = dev->device->numa_node;
> +	uint16_t vtpci_phy_qidx = 0;
> +	uint32_t vq_size = 0;
> +	int32_t ret = 0;
> +
> +	if (hw->channel_context[vtpci_logic_qidx].valid == 0) {
> +		PMD_INIT_LOG(ERR, "lch %d is invalid", vtpci_logic_qidx);
> +		return -EINVAL;
> +	}
> +	vtpci_phy_qidx = hw->channel_context[vtpci_logic_qidx].ph_chno;
> +
> +	PMD_INIT_LOG(INFO, "vtpci_logic_qidx :%d setting up physical queue: %u on NUMA node %d",
> +			vtpci_logic_qidx, vtpci_phy_qidx, numa_node);
> +
> +	vq_size = hw->q_depth;
> +
> +	if (VTPCI_OPS(hw)->set_queue_num != NULL)
> +		VTPCI_OPS(hw)->set_queue_num(hw, vtpci_phy_qidx, vq_size);
> +
> +	snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", dev->data->port_id, vtpci_phy_qidx);
> +
> +	size = RTE_ALIGN_CEIL(sizeof(*vq) + vq_size * sizeof(struct vq_desc_extra),
> +				RTE_CACHE_LINE_SIZE);
> +	if (queue_type == VTNET_TQ) {
> +		/*
> +		 * For each xmit packet, allocate a zxdh_net_hdr
> +		 * and indirect ring elements
> +		 */
> +		sz_hdr_mz = vq_size * sizeof(struct zxdh_tx_region);
> +	}
> +
> +	vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, numa_node);
> +	if (vq == NULL) {
> +		PMD_INIT_LOG(ERR, "can not allocate vq");
> +		return -ENOMEM;
> +	}
> +	hw->vqs[vtpci_logic_qidx] = vq;
> +
> +	vq->hw = hw;
> +	vq->vq_queue_index = vtpci_phy_qidx;
> +	vq->vq_nentries = vq_size;
> +
> +	vq->vq_packed.used_wrap_counter = 1;
> +	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
> +	vq->vq_packed.event_flags_shadow = 0;
> +	if (queue_type == VTNET_RQ)
> +		vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
> +
> +	/*
> +	 * Reserve a memzone for vring elements
> +	 */
> +	size = vring_size(hw, vq_size, ZXDH_PCI_VRING_ALIGN);
> +	vq->vq_ring_size = RTE_ALIGN_CEIL(size, ZXDH_PCI_VRING_ALIGN);
> +	PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size);
> +
> +	mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
> +				numa_node, RTE_MEMZONE_IOVA_CONTIG,
> +				ZXDH_PCI_VRING_ALIGN);
> +	if (mz == NULL) {
> +		if (rte_errno == EEXIST)
> +			mz = rte_memzone_lookup(vq_name);
> +		if (mz == NULL) {
> +			ret = -ENOMEM;
> +			goto fail_q_alloc;
> +		}
> +	}
> +
> +	memset(mz->addr, 0, mz->len);
> +
> +	vq->vq_ring_mem = mz->iova;
> +	vq->vq_ring_virt_mem = mz->addr;
> +	PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem:	   0x%" PRIx64, (uint64_t)mz->iova);
> +	PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64, (uint64_t)(uintptr_t)mz->addr);
> +
> +	zxdh_init_vring(vq);
> +
> +	if (sz_hdr_mz) {
> +		snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
> +					dev->data->port_id, vtpci_phy_qidx);
> +		hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
> +					numa_node, RTE_MEMZONE_IOVA_CONTIG,
> +					RTE_CACHE_LINE_SIZE);
> +		if (hdr_mz == NULL) {
> +			if (rte_errno == EEXIST)
> +				hdr_mz = rte_memzone_lookup(vq_hdr_name);
> +			if (hdr_mz == NULL) {
> +				ret = -ENOMEM;
> +				goto fail_q_alloc;
> +			}
> +		}
> +	}
> +
> +	if (queue_type == VTNET_RQ) {
> +		size_t sz_sw = (ZXDH_MBUF_BURST_SZ + vq_size) * sizeof(vq->sw_ring[0]);
> +
> +		sw_ring = rte_zmalloc_socket("sw_ring", sz_sw, RTE_CACHE_LINE_SIZE, numa_node);
> +		if (!sw_ring) {
> +			PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
> +			ret = -ENOMEM;
> +			goto fail_q_alloc;
> +		}
> +
> +		vq->sw_ring = sw_ring;
> +		rxvq = &vq->rxq;
> +		rxvq->vq = vq;
> +		rxvq->port_id = dev->data->port_id;
> +		rxvq->mz = mz;
> +	} else {             /* queue_type == VTNET_TQ */
> +		txvq = &vq->txq;
> +		txvq->vq = vq;
> +		txvq->port_id = dev->data->port_id;
> +		txvq->mz = mz;
> +		txvq->virtio_net_hdr_mz = hdr_mz;
> +		txvq->virtio_net_hdr_mem = hdr_mz->iova;
> +	}
> +
> +	vq->offset = offsetof(struct rte_mbuf, buf_iova);
> +	if (queue_type == VTNET_TQ) {
> +		struct zxdh_tx_region *txr = hdr_mz->addr;
> +		uint32_t i;
> +
> +		memset(txr, 0, vq_size * sizeof(*txr));
> +		for (i = 0; i < vq_size; i++) {
> +			/* first indirect descriptor is always the tx header */
> +			struct vring_packed_desc *start_dp = txr[i].tx_packed_indir;
> +
> +			vring_desc_init_indirect_packed(start_dp, RTE_DIM(txr[i].tx_packed_indir));
> +			start_dp->addr = txvq->virtio_net_hdr_mem + i * sizeof(*txr) +
> +					offsetof(struct zxdh_tx_region, tx_hdr);
> +			/* length will be updated to actual pi hdr size when xmit pkt */
> +			start_dp->len = 0;
> +		}
> +	}
> +	if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
> +		PMD_INIT_LOG(ERR, "setup_queue failed");
> +		return -EINVAL;
> +	}
> +	return 0;
> +fail_q_alloc:
> +	rte_free(sw_ring);
> +	rte_memzone_free(hdr_mz);
> +	rte_memzone_free(mz);
> +	rte_free(vq);
> +	return ret;
> +}
> +
> +int32_t zxdh_free_queues(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	uint16_t nr_vq = hw->queue_num;
> +	struct virtqueue *vq = NULL;
> +	int32_t queue_type = 0;
> +	uint16_t i = 0;
> +
> +	if (hw->vqs == NULL)
> +		return 0;
> +
> +	/* Clear COI table */
> +	if (zxdh_release_channel(dev) < 0) {
> +		PMD_INIT_LOG(ERR, "Failed to clear coi table");
> +		return -1;
> +	}
> +
> +	for (i = 0; i < nr_vq; i++) {
> +		vq = hw->vqs[i];
> +		if (vq == NULL)
> +			continue;
> +
> +		VTPCI_OPS(hw)->del_queue(hw, vq);
> +		queue_type = get_queue_type(i);
> +		if (queue_type == VTNET_RQ) {
> +			rte_free(vq->sw_ring);
> +			rte_memzone_free(vq->rxq.mz);
> +		} else if (queue_type == VTNET_TQ) {
> +			rte_memzone_free(vq->txq.mz);
> +			rte_memzone_free(vq->txq.virtio_net_hdr_mz);
> +		}
> +
> +		rte_free(vq);
> +		hw->vqs[i] = NULL;
> +		PMD_INIT_LOG(DEBUG, "Release to queue %d success!", i);
> +	}
> +
> +	rte_free(hw->vqs);
> +	hw->vqs = NULL;
> +
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +static int32_t zxdh_alloc_queues(struct rte_eth_dev *dev, uint16_t nr_vq)
> +{
> +	uint16_t lch;
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
> +	if (!hw->vqs) {
> +		PMD_INIT_LOG(ERR, "Failed to allocate vqs");
> +		return -ENOMEM;
> +	}
> +	for (lch = 0; lch < nr_vq; lch++) {
> +		if (zxdh_acquire_channel(dev, lch) < 0) {
> +			PMD_INIT_LOG(ERR, "Failed to acquire the channels");
> +			zxdh_free_queues(dev);
> +			return -1;
> +		}
> +		if (zxdh_init_queue(dev, lch) < 0) {
> +			PMD_INIT_LOG(ERR, "Failed to alloc virtio queue");
> +			zxdh_free_queues(dev);
> +			return -1;
> +		}
> +	}
> +	return 0;
> +}
> +
> +int32_t zxdh_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
> +{
> +	struct zxdh_hw	*hw   = dev->data->dev_private;
> +	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
> +	struct virtqueue  *vq	= rxvq->vq;
> +
> +	virtqueue_enable_intr(vq);
> +	zxdh_mb(hw->weak_barriers);
> +	return 0;
> +}
> +
> +int32_t zxdh_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
> +{
> +	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
> +	struct virtqueue  *vq	= rxvq->vq;
> +
> +	virtqueue_disable_intr(vq);
> +	return 0;
> +}
> +
> +
> +static int32_t zxdh_intr_unmask(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	if (rte_intr_ack(dev->intr_handle) < 0)
> +		return -1;
> +
> +	hw->use_msix = zxdh_vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
> +
> +	return 0;
> +}
> +
> +static int32_t zxdh_intr_enable(struct rte_eth_dev *dev)
> +{
> +	int ret = 0;
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	if (!hw->intr_enabled) {
> +		zxdh_intr_cb_reg(dev);
> +		ret = rte_intr_enable(dev->intr_handle);
> +		if (unlikely(ret))
> +			PMD_INIT_LOG(ERR, "Failed to enable %s intr", dev->data->name);
> +
> +		hw->intr_enabled = 1;
> +	}
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +static int32_t zxdh_intr_disable(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	if (!hw->intr_enabled)
> +		return 0;
> +
> +	zxdh_intr_cb_unreg(dev);
> +	if (rte_intr_disable(dev->intr_handle) < 0)
> +		return -1;
> +
> +	hw->intr_enabled = 0;
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +static int32_t zxdh_dev_link_update(struct rte_eth_dev *dev, __rte_unused int32_t wait_to_complete)
> +{
> +	struct rte_eth_link link;
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	int32_t ret = 0;
> +
> +	memset(&link, 0, sizeof(link));
> +	link.link_duplex = hw->duplex;
> +	link.link_speed  = hw->speed;
> +	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
> +
> +	if (!hw->started) {
> +		PMD_INIT_LOG(INFO, "port not start");
> +		link.link_status = RTE_ETH_LINK_DOWN;
> +		link.link_speed  = RTE_ETH_SPEED_NUM_UNKNOWN;
> +	}
> +	PMD_DRV_LOG(INFO, "Get link status from hw");
> +	ret = zxdh_link_info_get(dev, &link);
> +	if (ret != 0) {
> +		PMD_DRV_LOG(ERR, " Failed to get link status from hw\n");
> +		return ret;
> +	}
> +	link.link_status &= hw->admin_status;
> +	if (link.link_status == RTE_ETH_LINK_DOWN)
> +		link.link_speed  = RTE_ETH_SPEED_NUM_UNKNOWN;
> +
> +	PMD_DRV_LOG(INFO, "link.link_status %u link.link_speed %u link.link_duplex %u ",
> +			link.link_status, link.link_speed, link.link_duplex);
> +	ret = zxdh_dev_config_port_status(dev, link.link_status);
> +	if (ret != 0) {
> +		PMD_DRV_LOG(ERR, "set port attr.is_up = %u failed.", link.link_status);
> +		return ret;
> +	}
> +	return rte_eth_linkstatus_set(dev, &link);
> +}
> +/*
> + * Process  dev config changed interrupt. Call the callback
> + * if link state changed, generate gratuitous RARP packet if
> + * the status indicates an ANNOUNCE.
> + */
> +#define ZXDH_NET_S_LINK_UP   1 /* Link is up */
> +#define ZXDH_NET_S_ANNOUNCE  2 /* Announcement is needed */
> +
> +
> +#define ZXDH_PF_STATE_VF_AUTO 0
> +#define ZXDH_PF_STATE_VF_ENABLE 1
> +#define ZXDH_PF_STATE_VF_DSIABLE 2
> +static void zxdh_devconf_intr_handler(void *param)
> +{
> +	struct rte_eth_dev *dev = param;
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	uint16_t status = 0;
> +	/* Read interrupt status which clears interrupt */
> +	uint8_t isr = zxdh_vtpci_isr(hw);
> +
> +	if (zxdh_intr_unmask(dev) < 0)
> +		PMD_DRV_LOG(ERR, "interrupt enable failed");
> +	if (isr & ZXDH_PCI_ISR_CONFIG) {
> +		if (zxdh_dev_link_update(dev, 0) == 0)
> +			rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
> +
> +		if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS)) {
> +			zxdh_vtpci_read_dev_config(hw, offsetof(struct zxdh_net_config, status),
> +					&status, sizeof(status));
> +			if (status & ZXDH_NET_S_ANNOUNCE)
> +				zxdh_notify_peers(dev);
> +		}
> +	}
> +}
> +
> +/* Interrupt handler triggered by NIC for handling specific interrupt. */
> +static void zxdh_fromriscv_intr_handler(void *param)
> +{
> +	struct rte_eth_dev *dev = param;
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	uint64_t virt_addr = 0;
> +
> +	virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET);
> +	if (hw->is_pf) {
> +		PMD_INIT_LOG(INFO, "zxdh_risc2pf_intr_handler  PF ");
> +		zxdh_bar_irq_recv(MSG_CHAN_END_RISC, MSG_CHAN_END_PF, virt_addr, dev);
> +	} else {
> +		PMD_INIT_LOG(INFO, "zxdh_riscvf_intr_handler  VF ");
> +		zxdh_bar_irq_recv(MSG_CHAN_END_RISC, MSG_CHAN_END_VF, virt_addr, dev);
> +
> +	}
> +}
> +
> +/* Interrupt handler triggered by NIC for handling specific interrupt. */
> +static void zxdh_frompfvf_intr_handler(void *param)
> +{
> +	struct rte_eth_dev *dev = param;
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	uint64_t virt_addr = 0;
> +
> +	virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_MSG_CHAN_PFVFSHARE_OFFSET);
> +	if (hw->is_pf) {
> +		PMD_INIT_LOG(INFO, "zxdh_pf2vf_intr_handler  PF ");
> +		zxdh_bar_irq_recv(MSG_CHAN_END_VF, MSG_CHAN_END_PF, virt_addr, dev);
> +	} else {
> +		PMD_INIT_LOG(INFO, "zxdh_pf2vf_intr_handler  VF ");
> +		zxdh_bar_irq_recv(MSG_CHAN_END_PF, MSG_CHAN_END_VF, virt_addr, dev);
> +
> +	}
> +}
> +
> +static int32_t zxdh_intr_release(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
> +		VTPCI_OPS(hw)->set_config_irq(hw, ZXDH_MSI_NO_VECTOR);
> +
> +	zxdh_queues_unbind_intr(dev);
> +	zxdh_intr_disable(dev);
> +
> +	rte_intr_efd_disable(dev->intr_handle);
> +	rte_intr_vec_list_free(dev->intr_handle);
> +	rte_free(hw->risc_intr);
> +	hw->risc_intr = NULL;
> +	rte_free(hw->dtb_intr);
> +	hw->dtb_intr = NULL;
> +	return 0;
> +}
> +
> +static uint64_t get_cur_time_ms(void)
> +{
> +	return (rte_rdtsc() / rte_get_tsc_hz());
> +}
> +
> +static int16_t zxdh_promisc_unint(struct zxdh_hw *hw)
> +{
> +	int16_t ret = 0, vf_group_id = 0;
> +	struct zxdh_brocast_t brocast_table = {0};
> +	struct zxdh_unitcast_t uc_table = {0};
> +	struct zxdh_multicast_t mc_table = {0};
> +
> +	for (; vf_group_id < 4; vf_group_id++) {
> +		DPP_DTB_ERAM_ENTRY_INFO_T eram_brocast_entry = {
> +			((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id,
> +			(ZXIC_UINT32 *)&brocast_table
> +		};
> +		DPP_DTB_USER_ENTRY_T eram_brocast = {
> +			.sdt_no = ZXDH_SDT_BROCAST_ATT_TABLE,
> +			.p_entry_data = (void *)&eram_brocast_entry
> +		};
> +
> +		ret = dpp_dtb_table_entry_delete(DEVICE_NO, g_dtb_data.queueid, 1, &eram_brocast);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Write eram-promisc failed, code:%d", ret);
> +			return ret;
> +		}
> +
> +		DPP_DTB_ERAM_ENTRY_INFO_T eram_uc_entry = {
> +			((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id,
> +			(ZXIC_UINT32 *)&uc_table
> +		};
> +		DPP_DTB_USER_ENTRY_T entry_unicast = {
> +			.sdt_no = ZXDH_SDT_UNICAST_ATT_TABLE,
> +			.p_entry_data = (void *)&eram_uc_entry
> +		};
> +
> +		ret = dpp_dtb_table_entry_delete(DEVICE_NO, g_dtb_data.queueid, 1, &entry_unicast);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Write eram-promisc failed, code:%d", ret);
> +			return ret;
> +		}
> +
> +		DPP_DTB_ERAM_ENTRY_INFO_T eram_mc_entry = {
> +			((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id,
> +			(ZXIC_UINT32 *)&mc_table
> +		};
> +		DPP_DTB_USER_ENTRY_T entry_multicast = {
> +			.sdt_no = ZXDH_SDT_MULTICAST_ATT_TABLE,
> +			.p_entry_data = (void *)&eram_mc_entry
> +		};
> +
> +		ret = dpp_dtb_table_entry_delete(DEVICE_NO, g_dtb_data.queueid,
> +					1, &entry_multicast);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Write eram-promisc failed, code:%d", ret);
> +			return ret;
> +		}
> +	}
> +	return ret;
> +}
> +
> +
> +static int16_t zxdh_port_unint(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	struct zxdh_msg_info msg_info = {0};
> +	struct zxdh_port_att_entry port_attr = {0};
> +	int16_t ret = 0;
> +
> +	if (hw->i_mtr_en || hw->e_mtr_en)
> +		zxdh_mtr_release(dev);
> +
> +
> +	if (hw->is_pf == 1) {
> +		DPP_DTB_ERAM_ENTRY_INFO_T port_attr_entry = {hw->vfid, (ZXIC_UINT32 *)&port_attr};
> +		DPP_DTB_USER_ENTRY_T entry = {
> +			.sdt_no = ZXDH_SDT_VPORT_ATT_TABLE,
> +			.p_entry_data = (void *)&port_attr_entry
> +		};
> +		ret = dpp_dtb_table_entry_delete(DEVICE_NO, g_dtb_data.queueid, 1, &entry);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Write port_attr_eram failed, code:%d", ret);
> +			return ret;
> +		}
> +
> +		ret = zxdh_promisc_unint(hw);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Write promisc_table failed, code:%d", ret);
> +			return ret;
> +		}
> +	} else {
> +		msg_head_build(hw, ZXDH_VF_PORT_UNINIT, &msg_info);
> +		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
> +		if (ret)
> +			PMD_DRV_LOG(ERR, "vf port_init failed");
> +
> +	}
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_dev_close(struct rte_eth_dev *dev)
> +{
> +	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> +		return 0;
> +	PMD_INIT_LOG(DEBUG, "zxdh_dev_close");
> +	int ret = zxdh_dev_stop(dev);
> +
> +	if (ret != 0) {
> +		PMD_INIT_LOG(ERR, "%s :stop port %s failed ", __func__, dev->device->name);
> +		return -1;
> +	}
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	hw->started = 0;
> +	hw->admin_status = 0;
> +
> +	ret = zxdh_port_unint(dev);
> +	if (ret != 0) {
> +		PMD_INIT_LOG(ERR, "%s :unint port %s failed ", __func__, dev->device->name);
> +		return -1;
> +	}
> +	if (zxdh_shared_data != NULL)
> +		zxdh_mtr_release(dev);
> +
> +	zxdh_intr_release(dev);
> +
> +	PMD_DRV_LOG(INFO, "zxdh_dtb_data_destroy  begin  time: %ld s", get_cur_time_ms());
> +	zxdh_np_destroy(dev);
> +	PMD_DRV_LOG(INFO, "zxdh_dtb_data_destroy  end  time: %ld s", get_cur_time_ms());
> +
> +	zxdh_vtpci_reset(hw);
> +	zxdh_dev_free_mbufs(dev);
> +	zxdh_free_queues(dev);
> +
> +	zxdh_bar_msg_chan_exit();
> +	zxdh_priv_res_free(hw);
> +
> +	if (dev->data->mac_addrs != NULL) {
> +		rte_free(dev->data->mac_addrs);
> +		dev->data->mac_addrs = NULL;
> +	}
> +	if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
> +		rte_free(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key);
> +		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
> +	}
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +#define ZXDH_PMD_DEFAULT_HOST_FEATURES   \
> +	(1ULL << ZXDH_NET_F_MRG_RXBUF | \
> +	 1ULL << ZXDH_NET_F_STATUS    | \
> +	 1ULL << ZXDH_NET_F_MQ        | \
> +	 1ULL << ZXDH_F_ANY_LAYOUT    | \
> +	 1ULL << ZXDH_F_VERSION_1   | \
> +	 1ULL << ZXDH_F_RING_PACKED | \
> +	 1ULL << ZXDH_F_IN_ORDER    | \
> +	 1ULL << ZXDH_F_ORDER_PLATFORM | \
> +	 1ULL << ZXDH_F_NOTIFICATION_DATA |\
> +	 1ULL << ZXDH_NET_F_MAC | \
> +	 1ULL << ZXDH_NET_F_CSUM |\
> +	 1ULL << ZXDH_NET_F_GUEST_CSUM |\
> +	 1ULL << ZXDH_NET_F_GUEST_TSO4 |\
> +	 1ULL << ZXDH_NET_F_GUEST_TSO6 |\
> +	 1ULL << ZXDH_NET_F_HOST_TSO4 |\
> +	 1ULL << ZXDH_NET_F_HOST_TSO6 |\
> +	 1ULL << ZXDH_NET_F_GUEST_UFO |\
> +	 1ULL << ZXDH_NET_F_HOST_UFO)
> +
> +#define ZXDH_PMD_DEFAULT_GUEST_FEATURES   \
> +	(1ULL << ZXDH_NET_F_MRG_RXBUF | \
> +	 1ULL << ZXDH_NET_F_STATUS    | \
> +	 1ULL << ZXDH_NET_F_MQ        | \
> +	 1ULL << ZXDH_F_ANY_LAYOUT    | \
> +	 1ULL << ZXDH_F_VERSION_1     | \
> +	 1ULL << ZXDH_F_RING_PACKED   | \
> +	 1ULL << ZXDH_F_IN_ORDER      | \
> +	 1ULL << ZXDH_F_NOTIFICATION_DATA | \
> +	 1ULL << ZXDH_NET_F_MAC)
> +
> +#define ZXDH_RX_QUEUES_MAX  128U
> +#define ZXDH_TX_QUEUES_MAX  128U
> +static int32_t zxdh_get_pci_dev_config(struct zxdh_hw *hw)
> +{
> +	hw->host_features = zxdh_vtpci_get_features(hw);
> +	hw->host_features = ZXDH_PMD_DEFAULT_HOST_FEATURES;
> +
> +	uint64_t guest_features = (uint64_t)ZXDH_PMD_DEFAULT_GUEST_FEATURES;
> +	uint64_t nego_features = guest_features & hw->host_features;
> +
> +	hw->guest_features = nego_features;
> +
> +	if (hw->guest_features & (1ULL << ZXDH_NET_F_MAC)) {
> +		zxdh_vtpci_read_dev_config(hw, offsetof(struct zxdh_net_config, mac),
> +				&hw->mac_addr, RTE_ETHER_ADDR_LEN);
> +		PMD_INIT_LOG(DEBUG, "get dev mac: %02X:%02X:%02X:%02X:%02X:%02X",
> +				hw->mac_addr[0], hw->mac_addr[1],
> +				hw->mac_addr[2], hw->mac_addr[3],
> +				hw->mac_addr[4], hw->mac_addr[5]);
> +	} else {
> +		rte_eth_random_addr(&hw->mac_addr[0]);
> +		PMD_INIT_LOG(DEBUG, "random dev mac: %02X:%02X:%02X:%02X:%02X:%02X",
> +				hw->mac_addr[0], hw->mac_addr[1],
> +				hw->mac_addr[2], hw->mac_addr[3],
> +				hw->mac_addr[4], hw->mac_addr[5]);
> +	}
> +	uint32_t max_queue_pairs;
> +
> +	zxdh_vtpci_read_dev_config(hw, offsetof(struct zxdh_net_config, max_virtqueue_pairs),
> +			&max_queue_pairs, sizeof(max_queue_pairs));
> +	PMD_INIT_LOG(DEBUG, "get max queue pairs %u", max_queue_pairs);
> +	if (max_queue_pairs == 0)
> +		hw->max_queue_pairs = ZXDH_RX_QUEUES_MAX;
> +	else
> +		hw->max_queue_pairs = RTE_MIN(ZXDH_RX_QUEUES_MAX, max_queue_pairs);
> +
> +	PMD_INIT_LOG(INFO, "set max queue pairs %d", hw->max_queue_pairs);
> +
> +	hw->weak_barriers = !vtpci_with_feature(hw, ZXDH_F_ORDER_PLATFORM);
> +	return 0;
> +}
> +
> +int32_t zxdh_dev_pause(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	rte_spinlock_lock(&hw->state_lock);
> +
> +	if (hw->started == 0) {
> +		/* Device is just stopped. */
> +		rte_spinlock_unlock(&hw->state_lock);
> +		return -1;
> +	}
> +	hw->started = 0;
> +	hw->admin_status = 0;
> +	/*
> +	 * Prevent the worker threads from touching queues to avoid contention,
> +	 * 1 ms should be enough for the ongoing Tx function to finish.
> +	 */
> +	rte_delay_ms(1);
> +	return 0;
> +}
> +
> +/*
> + * Recover hw state to let the worker threads continue.
> + */
> +void zxdh_dev_resume(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	hw->started = 1;
> +	hw->admin_status = 1;
> +	rte_spinlock_unlock(&hw->state_lock);
> +}
> +
> +/*
> + * Should be called only after device is paused.
> + */
> +int32_t zxdh_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts, int32_t nb_pkts)
> +{
> +	struct zxdh_hw	*hw   = dev->data->dev_private;
> +	struct virtnet_tx *txvq = dev->data->tx_queues[0];
> +	int32_t ret = 0;
> +
> +	hw->inject_pkts = tx_pkts;
> +	ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts);
> +	hw->inject_pkts = NULL;
> +
> +	return ret;
> +}
>

Why driver inject pkts?


btw, this functions seems only called from this file, why not make it
'static'.
Please make functions static as much as possible.

> +
> +static void zxdh_notify_peers(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	struct virtnet_rx *rxvq = NULL;
> +	struct rte_mbuf *rarp_mbuf = NULL;
> +
> +	if (!dev->data->rx_queues)
> +		return;
> +
> +	rxvq = dev->data->rx_queues[0];
> +	if (!rxvq)
> +		return;
> +
> +	rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool, (struct rte_ether_addr *)hw->mac_addr);
> +	if (rarp_mbuf == NULL) {
> +		PMD_DRV_LOG(ERR, "failed to make RARP packet.");
> +		return;
> +	}
> +
> +	/* If virtio port just stopped, no need to send RARP */
> +	if (zxdh_dev_pause(dev) < 0) {
> +		rte_pktmbuf_free(rarp_mbuf);
> +		return;
> +	}
> +
> +	zxdh_inject_pkts(dev, &rarp_mbuf, 1);
> +	zxdh_dev_resume(dev);
> +}
> +/**
> + * Fun:
> + */
> +static int32_t set_rxtx_funcs(struct rte_eth_dev *eth_dev)
> +{
> +	eth_dev->tx_pkt_prepare = zxdh_xmit_pkts_prepare;
> +	struct zxdh_hw *hw = eth_dev->data->dev_private;
> +
> +	if (!vtpci_packed_queue(hw)) {
> +		PMD_INIT_LOG(ERR, " port %u not support packed queue", eth_dev->data->port_id);
> +		return -1;
> +	}
> +	if (!vtpci_with_feature(hw, ZXDH_NET_F_MRG_RXBUF)) {
> +		PMD_INIT_LOG(ERR, " port %u not support rx mergeable", eth_dev->data->port_id);
> +		return -1;
> +	}
> +	/* */
> +	eth_dev->tx_pkt_burst = &zxdh_xmit_pkts_packed;
> +	eth_dev->rx_pkt_burst = &zxdh_recv_mergeable_pkts_packed;
> +	return 0;
> +}
> +/* Only support 1:1 queue/interrupt mapping so far.
> + * TODO: support n:1 queue/interrupt mapping when there are limited number of
> + * interrupt vectors (<N+1).
> + */
> +static int32_t zxdh_queues_bind_intr(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	int32_t i;
> +	uint16_t vec;
> +
> +	if (!dev->data->dev_conf.intr_conf.rxq) {
> +		PMD_INIT_LOG(INFO, "queue/interrupt mask, nb_rx_queues %u",
> +				dev->data->nb_rx_queues);
> +		for (i = 0; i < dev->data->nb_rx_queues; ++i) {
> +			vec = VTPCI_OPS(hw)->set_queue_irq(hw,
> +					hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR);
> +			PMD_INIT_LOG(INFO, "vq%d irq set 0x%x, get 0x%x",
> +					i * 2, ZXDH_MSI_NO_VECTOR, vec);
> +		}
> +	} else {
> +		PMD_INIT_LOG(DEBUG, "queue/interrupt binding, nb_rx_queues %u",
> +				dev->data->nb_rx_queues);
> +		for (i = 0; i < dev->data->nb_rx_queues; ++i) {
> +			vec = VTPCI_OPS(hw)->set_queue_irq(hw,
> +					hw->vqs[i * 2], i + ZXDH_QUE_INTR_VEC_BASE);
> +			PMD_INIT_LOG(INFO, "vq%d irq set %d, get %d",
> +					i * 2, i + ZXDH_QUE_INTR_VEC_BASE, vec);
> +		}
> +	}
> +	/* mask all txq intr */
> +	for (i = 0; i < dev->data->nb_tx_queues; ++i) {
> +		vec = VTPCI_OPS(hw)->set_queue_irq(hw,
> +				hw->vqs[(i * 2) + 1], ZXDH_MSI_NO_VECTOR);
> +		PMD_INIT_LOG(INFO, "vq%d irq set 0x%x, get 0x%x",
> +				(i * 2) + 1, ZXDH_MSI_NO_VECTOR, vec);
> +	}
> +	return 0;
> +}
> +
> +static void zxdh_queues_unbind_intr(struct rte_eth_dev *dev)
> +{
> +	PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	int32_t i;
> +
> +	for (i = 0; i < dev->data->nb_rx_queues; ++i) {
> +		VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR);
> +		VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2 + 1], ZXDH_MSI_NO_VECTOR);
> +	}
> +}
> +/**
> + * Fun:
> + */
> +static int32_t zxdh_setup_dtb_interrupts(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	if (!hw->dtb_intr) {
> +		hw->dtb_intr = rte_zmalloc("dtb_intr", sizeof(struct rte_intr_handle), 0);
> +		if (hw->dtb_intr == NULL) {
> +			PMD_INIT_LOG(ERR, "Failed to allocate dtb_intr");
> +			return -ENOMEM;
> +		}
> +	}
> +
> +	if (dev->intr_handle->efds[ZXDH_MSIX_INTR_DTB_VEC - 1] < 0) {
> +		PMD_INIT_LOG(ERR, "[%d]dtb interrupt fd is invalid", ZXDH_MSIX_INTR_DTB_VEC - 1);
> +		rte_free(hw->dtb_intr);
> +		hw->dtb_intr = NULL;
> +		return -1;
> +	}
> +	hw->dtb_intr->fd = dev->intr_handle->efds[ZXDH_MSIX_INTR_DTB_VEC - 1];
> +	hw->dtb_intr->type = dev->intr_handle->type;
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +static int32_t zxdh_setup_risc_interrupts(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	if (!hw->risc_intr) {
> +		PMD_INIT_LOG(ERR, " to allocate risc_intr");
> +		hw->risc_intr = rte_zmalloc("risc_intr",
> +			ZXDH_MSIX_INTR_MSG_VEC_NUM * sizeof(struct rte_intr_handle), 0);
> +		if (hw->risc_intr == NULL) {
> +			PMD_INIT_LOG(ERR, "Failed to allocate risc_intr");
> +			return -ENOMEM;
> +		}
> +	}
> +
> +	uint8_t i;
> +
> +	for (i = 0; i < ZXDH_MSIX_INTR_MSG_VEC_NUM; i++) {
> +		if (dev->intr_handle->efds[i] < 0) {
> +			PMD_INIT_LOG(ERR, "[%u]risc interrupt fd is invalid", i);
> +			rte_free(hw->risc_intr);
> +			hw->risc_intr = NULL;
> +			return -1;
> +		}
> +
> +		struct rte_intr_handle *intr_handle = hw->risc_intr + i;
> +
> +		intr_handle->fd = dev->intr_handle->efds[i];
> +		intr_handle->type = dev->intr_handle->type;
> +	}
> +
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +static void zxdh_intr_cb_reg(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
> +		rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);
> +
> +	/* register callback to update dev config intr */
> +	rte_intr_callback_register(dev->intr_handle, zxdh_devconf_intr_handler, dev);
> +	/* Register rsic_v to pf interrupt callback */
> +	struct rte_intr_handle *tmp = hw->risc_intr +
> +			(MSIX_FROM_PFVF - ZXDH_MSIX_INTR_MSG_VEC_BASE);
> +
> +	rte_intr_callback_register(tmp, zxdh_frompfvf_intr_handler, dev);
> +
> +	tmp = hw->risc_intr + (MSIX_FROM_RISCV - ZXDH_MSIX_INTR_MSG_VEC_BASE);
> +	rte_intr_callback_register(tmp, zxdh_fromriscv_intr_handler, dev);
> +}
> +
> +static void zxdh_intr_cb_unreg(struct rte_eth_dev *dev)
> +{
> +	PMD_INIT_LOG(ERR, "");
> +	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
> +		rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);
> +
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	/* register callback to update dev config intr */
> +	rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);
> +	/* Register rsic_v to pf interrupt callback */
> +	struct rte_intr_handle *tmp = hw->risc_intr +
> +			(MSIX_FROM_PFVF - ZXDH_MSIX_INTR_MSG_VEC_BASE);
> +
> +	rte_intr_callback_unregister(tmp, zxdh_frompfvf_intr_handler, dev);
> +	tmp = hw->risc_intr + (MSIX_FROM_RISCV-ZXDH_MSIX_INTR_MSG_VEC_BASE);
> +	rte_intr_callback_unregister(tmp, zxdh_fromriscv_intr_handler, dev);
> +}
> +
> +/**
> + * Fun:
> + */
> +static int32_t zxdh_configure_intr(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	int32_t ret = 0;
> +
> +	if (!rte_intr_cap_multiple(dev->intr_handle)) {
> +		PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
> +		return -ENOTSUP;
> +	}
> +	zxdh_intr_release(dev);
> +	uint8_t nb_efd = ZXDH_MSIX_INTR_DTB_VEC_NUM + ZXDH_MSIX_INTR_MSG_VEC_NUM;
> +
> +	if (dev->data->dev_conf.intr_conf.rxq)
> +		nb_efd += dev->data->nb_rx_queues;
> +
> +	if (rte_intr_efd_enable(dev->intr_handle, nb_efd)) {
> +		PMD_INIT_LOG(ERR, "Fail to create eventfd");
> +		return -1;
> +	}
> +
> +	if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec",
> +					hw->max_queue_pairs+ZXDH_INTR_NONQUE_NUM)) {
> +		PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
> +					hw->max_queue_pairs+ZXDH_INTR_NONQUE_NUM);
> +		return -ENOMEM;
> +	}
> +	PMD_INIT_LOG(INFO, "allocate %u rxq vectors", dev->intr_handle->vec_list_size);
> +	if (zxdh_setup_risc_interrupts(dev) != 0) {
> +		PMD_INIT_LOG(ERR, "Error setting up rsic_v interrupts!");
> +		ret = -1;
> +		goto free_intr_vec;
> +	}
> +	if (zxdh_setup_dtb_interrupts(dev) != 0) {
> +		PMD_INIT_LOG(ERR, "Error setting up dtb interrupts!");
> +		ret = -1;
> +		goto free_intr_vec;
> +	}
> +
> +	if (zxdh_queues_bind_intr(dev) < 0) {
> +		PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
> +		ret = -1;
> +		goto free_intr_vec;
> +	}
> +	/** DO NOT try to remove this! This function will enable msix,
> +	 * or QEMU will encounter SIGSEGV when DRIVER_OK is sent.
> +	 * And for legacy devices, this should be done before queue/vec
> +	 * binding to change the config size from 20 to 24, or
> +	 * ZXDH_MSI_QUEUE_VECTOR (22) will be ignored.
> +	 **/
> +	if (zxdh_intr_enable(dev) < 0) {
> +		PMD_DRV_LOG(ERR, "interrupt enable failed");
> +		ret = -1;
> +		goto free_intr_vec;
> +	}
> +	return 0;
> +
> +free_intr_vec:
> +	zxdh_intr_release(dev);
> +	return ret;
> +}
> +/**
> + * Fun: reset device and renegotiate features if needed
> + */
> +struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];
> +static int32_t zxdh_init_device(struct rte_eth_dev *eth_dev)
> +{
> +	struct zxdh_hw *hw = eth_dev->data->dev_private;
> +	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
> +	int ret = zxdh_read_pci_caps(pci_dev, hw);
> +
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "port 0x%x pci caps read failed .", hw->vport.vport);
> +		goto err;
> +	}
> +	zxdh_hw_internal[hw->port_id].vtpci_ops = &zxdh_modern_ops;
> +	zxdh_vtpci_reset(hw);
> +	zxdh_get_pci_dev_config(hw);
> +	if (hw->vqs) { /* not reachable? */
> +		zxdh_dev_free_mbufs(eth_dev);
> +		ret = zxdh_free_queues(eth_dev);
> +		if (ret < 0) {
> +			PMD_INIT_LOG(ERR, "port 0x%x free queue failed.", hw->vport.vport);
> +			goto err;
> +		}
> +	}
> +	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
> +	hw->vtnet_hdr_size = ZXDH_DL_NET_HDR_SIZE;
> +	hw->otpid = RTE_ETHER_TYPE_VLAN;
> +	hw->speed = RTE_ETH_SPEED_NUM_UNKNOWN;
> +	hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
> +	hw->max_mtu = ZXDH_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN - VLAN_TAG_LEN - ZXDH_DL_NET_HDR_SIZE;
> +	PMD_INIT_LOG(DEBUG, "max_mtu=%u", hw->max_mtu);
> +	eth_dev->data->mtu = RTE_ETHER_MTU;
> +	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, &eth_dev->data->mac_addrs[0]);
> +	PMD_INIT_LOG(DEBUG, "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
> +		eth_dev->data->mac_addrs->addr_bytes[0],
> +		eth_dev->data->mac_addrs->addr_bytes[1],
> +		eth_dev->data->mac_addrs->addr_bytes[2],
> +		eth_dev->data->mac_addrs->addr_bytes[3],
> +		eth_dev->data->mac_addrs->addr_bytes[4],
> +		eth_dev->data->mac_addrs->addr_bytes[5]);
> +	/* If host does not support both status and MSI-X then disable LSC */
> +	if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS) && (hw->use_msix != ZXDH_MSIX_NONE)) {
> +		eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
> +		PMD_INIT_LOG(DEBUG, "LSC enable");
> +	} else {
> +		eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
> +	}
> +	return 0;
> +
> +err:
> +	PMD_INIT_LOG(ERR, "port %d init device failed", eth_dev->data->port_id);
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +static void zxdh_priv_res_init(struct zxdh_hw *hw)
> +{
> +	hw->vlan_fiter = (uint64_t *)rte_malloc("vlan_filter", 64 * sizeof(uint64_t), 1);
> +	memset(hw->vlan_fiter, 0, 64 * sizeof(uint64_t));
> +	if (hw->is_pf)
> +		hw->vfinfo = rte_zmalloc("vfinfo", ZXDH_MAX_VF * sizeof(struct vfinfo), 4);
> +	else
> +		hw->vfinfo = NULL;
> +}
> +/**
> + * Fun:
> + */
> +static void set_vfs_pcieid(struct zxdh_hw *hw)
> +{
> +	if (hw->pfinfo.vf_nums > ZXDH_MAX_VF) {
> +		PMD_DRV_LOG(ERR, "vf nums %u out of range", hw->pfinfo.vf_nums);
> +		return;
> +	}
> +	if (hw->vfinfo == NULL) {
> +		PMD_DRV_LOG(ERR, " vfinfo uninited");
> +		return;
> +	}
> +
> +	PMD_DRV_LOG(INFO, "vf nums %d", hw->pfinfo.vf_nums);
> +	int vf_idx;
> +
> +	for (vf_idx = 0; vf_idx < hw->pfinfo.vf_nums; vf_idx++)
> +		hw->vfinfo[vf_idx].pcieid = VF_PCIE_ID(hw->pcie_id, vf_idx);
> +
> +}
> +
> +
> +static void zxdh_sriovinfo_init(struct zxdh_hw *hw)
> +{
> +	hw->pfinfo.pcieid = PF_PCIE_ID(hw->pcie_id);
> +
> +	if (hw->is_pf)
> +		set_vfs_pcieid(hw);
> +}
> +/**
> + * Fun:
> + */
> +#define SRIOV_MSGINFO_LEN  256
> +enum sriov_msg_opcode {
> +	SRIOV_SET_VF_MAC = 0,    /* pf set vf's mac */
> +	SRIOV_SET_VF_VLAN,       /* pf set vf's vlan */
> +	SRIOV_SET_VF_LINK_STATE, /* pf set vf's link state */
> +	SRIOV_VF_RESET,
> +	SET_RSS_TABLE,
> +	SRIOV_OPCODE_NUM,
> +};
> +struct sriov_msg_payload {
> +	uint16_t pcieid;/* sender's pcie id */
> +	uint16_t vf_id;
> +	enum sriov_msg_opcode opcode;
> +	uint16_t slen;
> +	uint8_t content[0]; /* payload */
> +} __rte_packed;
> +int vf_recv_bar_msg(void *payload, uint16_t len __rte_unused,
> +			void *reps_buffer, uint16_t *reps_len, void *eth_dev __rte_unused)
> +{
> +	int32_t ret = 0;
> +	struct zxdh_hw *hw;
> +	struct sriov_msg_payload *msg_payload = (struct sriov_msg_payload *)payload;
> +	struct zxdh_msg_reply_body *reply_body = reps_buffer;
> +
> +	uint8_t *content = NULL;
> +	uint16_t vf_id = msg_payload->vf_id;
> +	uint16_t pcieid = msg_payload->pcieid;
> +	uint16_t opcode = msg_payload->opcode;
> +	uint16_t slen = msg_payload->slen;
> +
> +	content = msg_payload->content;
> +	struct rte_eth_dev *dev = (struct rte_eth_dev *)eth_dev;
> +
> +	if (dev == NULL) {
> +		PMD_DRV_LOG(ERR, "param invalid\n");
> +		ret = -2;
> +		return ret;
> +	}
> +	hw = dev->data->dev_private;
> +
> +	PMD_DRV_LOG(DEBUG, "%s content %p vf_id %d pcieid %x slen %d\n",
> +			__func__, content, vf_id, pcieid, slen);
> +	switch (opcode) {
> +	case SRIOV_SET_VF_MAC:
> +		PMD_DRV_LOG(DEBUG, "pf pcie id is 0x%x:\n", pcieid);
> +		PMD_DRV_LOG(DEBUG, "[VF GET MSG FROM PF]--vf mac is been set.\n");
> +		PMD_DRV_LOG(DEBUG, "VF[%d] old mac is %02X:%02X:%02X:%02X:%02X:%02X\n",
> +			vf_id,
> +			(hw->mac_addr)[0], (hw->mac_addr)[1], (hw->mac_addr)[2],
> +			(hw->mac_addr)[3], (hw->mac_addr)[4], (hw->mac_addr)[5]);
> +
> +		memcpy(hw->mac_addr, content, 6);
> +		reply_body->flag = ZXDH_REPS_SUCC;
> +		char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "test";
> +
> +		sprintf(str, "vf %d process msg set mac ok ", vf_id);
> +		memcpy(reply_body->reply_data, str, strlen(str)+1);
> +		*reps_len = sizeof(*reply_body);
> +		break;
> +	case SRIOV_SET_VF_LINK_STATE:
> +		/* set vf link state(link up or link down) */
> +		PMD_DRV_LOG(DEBUG, "[VF GET MSG FROM PF]--vf link state is been set.\n");
> +		break;
> +	case SRIOV_VF_RESET:
> +		PMD_DRV_LOG(DEBUG, "[VF GET MSG FROM PF]--reset. port should be stopped\n");
> +		break;
> +	default:
> +		PMD_DRV_LOG(ERR, "[VF GET MSG FROM PF]--unknown msg opcode %d\n", opcode);
> +		ret = -1;
> +		break;
> +	}
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +static inline int config_func_call(struct zxdh_hw *hw, struct zxdh_msg_info *msg_info,
> +			struct zxdh_msg_reply_body *res, uint16_t *res_len)
> +{
> +	int ret = -1;
> +	struct zxdh_msg_head *msghead = &(msg_info->msg_head);
> +	enum zxdh_msg_type msg_type = msghead->msg_type;
> +
> +	if (!res || !res_len) {
> +		PMD_DRV_LOG(INFO, "-%s  invalid param\n", __func__);
> +		return -1;
> +	}
> +	if (proc_func[msg_type]) {
> +		PMD_DRV_LOG(INFO, "-%s begin-msg_type:%d\n", __func__, msg_type);
> +		ret = proc_func[msg_type](hw, msghead->vport,
> +				(void *)&msg_info->data, res, res_len);
> +		if (!ret)
> +			res->flag = ZXDH_REPS_SUCC;
> +	} else {
> +		res->flag = ZXDH_REPS_FAIL;
> +	}
> +	*res_len += sizeof(res->flag);
> +	PMD_DRV_LOG(INFO, "-%s-end-msg_type:%d -res_len 0x%x\n",
> +			__func__, msg_type, *res_len);
> +	return ret;
> +}
> +int pf_recv_bar_msg(void *pay_load, uint16_t len, void *reps_buffer,
> +			uint16_t *reps_len, void *eth_dev __rte_unused)
> +{
> +	struct zxdh_msg_info *msg_info = (struct zxdh_msg_info *)pay_load;
> +	struct zxdh_msg_head *msghead = &(msg_info->msg_head);
> +	struct zxdh_msg_reply_body *reply_body = reps_buffer;
> +	uint16_t vf_id = msghead->vf_id;
> +	uint16_t pcieid = msghead->pcieid;
> +	int32_t ret = 0;
> +	enum zxdh_msg_type msg_type = msghead->msg_type;
> +
> +	if (msg_type >= ZXDH_FUNC_END) {
> +		PMD_DRV_LOG(ERR, "%s vf_id %d pcieid 0x%x len %u msg_type %d unsupported\n",
> +				__func__, vf_id, pcieid, len, msg_type);
> +		ret = -2;
> +		goto msg_proc_end;
> +	}
> +	PMD_DRV_LOG(DEBUG, "%s vf_id %d pcieid 0x%x len %d msg_type %d\n",
> +			__func__, vf_id, pcieid, len, msg_type);
> +	struct rte_eth_dev *dev = (struct rte_eth_dev *)eth_dev;
> +
> +	if (dev == NULL) {
> +		PMD_DRV_LOG(ERR, "param invalid\n");
> +		ret = -2;
> +		goto msg_proc_end;
> +	}
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	uint16_t reply_len = 0;
> +
> +	ret = config_func_call(hw, msg_info, reply_body, &reply_len);
> +	*reps_len = reply_len+sizeof(struct zxdh_msg_reply_head);
> +	PMD_DRV_LOG(INFO, "len %d\n", *reps_len);
> +
> +	return ret;
> +
> +msg_proc_end:
> +	PMD_DRV_LOG(DEBUG, "[PF GET MSG FROM VF] ret %d proc result:ret 0x%x reslt info: %s reply_len: 0x%x\n",
> +			ret, reply_body->flag, reply_body->reply_data, reply_len);
> +	memcpy(reply_body->reply_data, &ret, sizeof(ret));
> +	reply_len = sizeof(ret);
> +	*reps_len = sizeof(struct zxdh_msg_reply_head) + reply_len;
> +	rte_hexdump(stdout, "pf reply msg ", reply_body, reply_len);
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +static void zxdh_msg_cb_reg(struct zxdh_hw *hw)
> +{
> +	if (hw->is_pf)
> +		zxdh_bar_chan_msg_recv_register(MODULE_BAR_MSG_TO_PF, pf_recv_bar_msg);
> +	else
> +		zxdh_bar_chan_msg_recv_register(MODULE_BAR_MSG_TO_VF, vf_recv_bar_msg);
> +}
> +static void zxdh_priv_res_free(struct zxdh_hw *priv)
> +{
> +	rte_free(priv->vlan_fiter);
> +	priv->vlan_fiter = NULL;
> +	rte_free(priv->vfinfo);
> +	priv->vfinfo = NULL;
> +	rte_free(priv->reta_idx);
> +	priv->reta_idx = NULL;
> +}
> +
> +static bool rx_offload_enabled(struct zxdh_hw *hw)
> +{
> +	return vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM) ||
> +		   vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||
> +		   vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6) ||
> +		   (hw->vlan_offload_cfg.vlan_strip == 1);
> +}
> +
> +static bool tx_offload_enabled(struct zxdh_hw *hw)
> +{
> +	return vtpci_with_feature(hw, ZXDH_NET_F_CSUM) ||
> +		   vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO4) ||
> +		   vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO6) ||
> +		   vtpci_with_feature(hw, ZXDH_NET_F_HOST_UFO);
> +}
> +
> +static int32_t zxdh_features_update(struct zxdh_hw *hw,
> +				const struct rte_eth_rxmode *rxmode,
> +				const struct rte_eth_txmode *txmode)
> +{
> +	uint64_t rx_offloads = rxmode->offloads;
> +	uint64_t tx_offloads = txmode->offloads;
> +	uint64_t req_features = hw->guest_features;
> +
> +	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
> +		req_features |= (1ULL << ZXDH_NET_F_GUEST_CSUM);
> +
> +	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
> +		req_features |= (1ULL << ZXDH_NET_F_GUEST_TSO4) |
> +						(1ULL << ZXDH_NET_F_GUEST_TSO6);
> +
> +	if (tx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
> +		req_features |= (1ULL << ZXDH_NET_F_CSUM);
> +
> +	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
> +		req_features |= (1ULL << ZXDH_NET_F_HOST_TSO4) |
> +						(1ULL << ZXDH_NET_F_HOST_TSO6);
> +
> +	if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_TSO)
> +		req_features |= (1ULL << ZXDH_NET_F_HOST_UFO);
> +
> +	req_features = req_features & hw->host_features;
> +	hw->guest_features =   req_features;
> +
> +	VTPCI_OPS(hw)->set_features(hw, req_features);
> +
> +	PMD_INIT_LOG(INFO, "set  featrue %lx!", req_features);
> +
> +	PMD_INIT_LOG(DEBUG, "host_features	= %" PRIx64, hw->host_features);
> +	PMD_INIT_LOG(DEBUG, "guest_features = %" PRIx64, hw->guest_features);
> +
> +	if ((rx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) &&
> +		 !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM)) {
> +		PMD_DRV_LOG(ERR, "rx checksum not available on this host");
> +		return -ENOTSUP;
> +	}
> +
> +	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
> +		(!vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||
> +		 !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6))) {
> +		PMD_DRV_LOG(ERR, "Large Receive Offload not available on this host");
> +		return -ENOTSUP;
> +	}
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_acquire_lock(struct rte_eth_dev *dev)
> +{
> +	uint32_t var = zxdh_read_reg(dev, ZXDH_BAR0_INDEX, ZXDH_VF_LOCK_REG);
> +
> +	/* check whether lock is used */
> +	if (!(var & ZXDH_VF_LOCK_ENABLE_MASK))
> +		return -1;
> +
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_release_lock(struct rte_eth_dev *dev)
> +{
> +	uint32_t var = zxdh_read_reg(dev, ZXDH_BAR0_INDEX, ZXDH_VF_LOCK_REG);
> +
> +	if (var & ZXDH_VF_LOCK_ENABLE_MASK) {
> +		var &= ~ZXDH_VF_LOCK_ENABLE_MASK;
> +		zxdh_write_reg(dev, ZXDH_BAR0_INDEX, ZXDH_VF_LOCK_REG, var);
> +		return 0;
> +	}
> +
> +	PMD_INIT_LOG(ERR, "No lock need to be release\n");
> +	return -1;
> +}
> +/**
> + * Fun:
> + */
> +static int32_t zxdh_get_available_channel(struct rte_eth_dev *dev, uint8_t queue_type)
> +{
> +	uint16_t base	 = (queue_type == VTNET_RQ) ? 0 : 1;  /* txq only polls odd bits*/
> +	uint16_t i		 = 0;
> +	uint16_t j		 = 0;
> +	uint16_t done	 = 0;
> +	uint16_t timeout = 0;
>

It seems intentions is to allign '=' but went wrong, please fix it. And
there are more instance of this, please scan all code to fix them.

> +
> +	while ((timeout++) < ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {
> +		rte_delay_us_block(1000);
> +		/* acquire hw lock */
> +		if (zxdh_acquire_lock(dev) < 0) {
> +			PMD_INIT_LOG(ERR, "Acquiring hw lock got failed, timeout: %d", timeout);
> +			continue;
> +		}
> +		/* Iterate COI table and find free channel */
> +		for (i = ZXDH_QUEUES_BASE/32; i < ZXDH_TOTAL_QUEUES_NUM/32; i++) {
> +			uint32_t addr = ZXDH_QUERES_SHARE_BASE + (i * sizeof(uint32_t));
> +			uint32_t var = zxdh_read_reg(dev, ZXDH_BAR0_INDEX, addr);
> +
> +			for (j = base; j < 32; j += 2) {
> +				/* Got the available channel & update COI table */
> +				if ((var & (1 << j)) == 0) {
> +					var |= (1 << j);
> +					zxdh_write_reg(dev, ZXDH_BAR0_INDEX, addr, var);
> +					done = 1;
> +					break;
> +				}
> +			}
> +			if (done)
> +				break;
> +		}
> +		break;
> +	}
> +	if (timeout >= ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {
> +		PMD_INIT_LOG(ERR, "Failed to acquire channel");
> +		return -1;
> +	}
> +	zxdh_release_lock(dev);
> +	/* check for no channel condition */
> +	if (done != 1) {
> +		PMD_INIT_LOG(ERR, "NO availd queues\n");
> +		return -1;
> +	}
> +	/* reruen available channel ID */
> +	return (i * 32) + j;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_acquire_channel(struct rte_eth_dev *dev, uint16_t lch)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	if (hw->channel_context[lch].valid == 1) {
> +		PMD_INIT_LOG(DEBUG, "Logic channel:%u already acquired Physics channel:%u",
> +				lch, hw->channel_context[lch].ph_chno);
> +		return hw->channel_context[lch].ph_chno;
> +	}
> +	int32_t pch = zxdh_get_available_channel(dev, get_queue_type(lch));
> +
> +	if (pch < 0) {
> +		PMD_INIT_LOG(ERR, "Failed to acquire channel");
> +		return -1;
> +	}
> +	hw->channel_context[lch].ph_chno = (uint16_t)pch;
> +	hw->channel_context[lch].valid = 1;
> +	PMD_INIT_LOG(DEBUG, "Acquire channel success lch:%u --> pch:%d", lch, pch);
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_release_channel(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	uint16_t nr_vq = hw->queue_num;
> +	uint32_t var  = 0;
> +	uint32_t addr = 0;
> +	uint32_t widx = 0;
> +	uint32_t bidx = 0;
> +	uint16_t pch  = 0;
> +	uint16_t lch  = 0;
> +	uint16_t timeout = 0;
> +
> +	while ((timeout++) < ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {
> +		if (zxdh_acquire_lock(dev) != 0) {
> +			PMD_INIT_LOG(ERR,
> +				"Could not acquire lock to release channel, timeout %d", timeout);
> +			continue;
> +		}
> +		break;
> +	}
> +
> +	if (timeout >= ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {
> +		PMD_INIT_LOG(ERR, "Acquire lock timeout");
> +		return -1;
> +	}
> +
> +	for (lch = 0; lch < nr_vq; lch++) {
> +		if (hw->channel_context[lch].valid == 0) {
> +			PMD_INIT_LOG(DEBUG, "Logic channel %d does not need to release", lch);
> +			continue;
> +		}
> +
> +		/* get coi table offset and index */
> +		pch  = hw->channel_context[lch].ph_chno;
> +		widx = pch / 32;
> +		bidx = pch % 32;
> +
> +		addr = ZXDH_QUERES_SHARE_BASE + (widx * sizeof(uint32_t));
> +		var  = zxdh_read_reg(dev, ZXDH_BAR0_INDEX, addr);
> +		var &= ~(1 << bidx);
> +		zxdh_write_reg(dev, ZXDH_BAR0_INDEX, addr, var);
> +
> +		hw->channel_context[lch].valid = 0;
> +		hw->channel_context[lch].ph_chno = 0;
> +	}
> +
> +	zxdh_release_lock(dev);
> +
> +	return 0;
> +}
> +
> +static int32_t zxdh_promisc_table_init(struct zxdh_hw *hw)
> +{
> +	uint32_t ret, vf_group_id = 0;
> +	struct zxdh_brocast_t brocast_table = {0};
> +	struct zxdh_unitcast_t uc_table = {0};
> +	struct zxdh_multicast_t mc_table = {0};
> +
> +	for (; vf_group_id < 4; vf_group_id++) {
> +		brocast_table.flag = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG);
> +		DPP_DTB_ERAM_ENTRY_INFO_T eram_brocast_entry = {
> +			((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id,
> +			(ZXIC_UINT32 *)&brocast_table
> +		};
> +		DPP_DTB_USER_ENTRY_T entry_brocast = {
> +			.sdt_no = ZXDH_SDT_BROCAST_ATT_TABLE,
> +			.p_entry_data = (void *)&eram_brocast_entry
> +		};
> +
> +		ret = dpp_dtb_table_entry_write(DEVICE_NO, g_dtb_data.queueid, 1, &entry_brocast);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Write eram-brocast failed, code:%d", ret);
> +			return ret;
> +		}
> +
> +		uc_table.uc_flood_pf_enable = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG);
> +		DPP_DTB_ERAM_ENTRY_INFO_T eram_uc_entry = {
> +			((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id,
> +			(ZXIC_UINT32 *)&uc_table
> +		};
> +		DPP_DTB_USER_ENTRY_T entry_unicast = {
> +			.sdt_no = ZXDH_SDT_UNICAST_ATT_TABLE,
> +			.p_entry_data = (void *)&eram_uc_entry
> +		};
> +
> +		ret = dpp_dtb_table_entry_write(DEVICE_NO, g_dtb_data.queueid, 1, &entry_unicast);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Write eram-unicast failed, code:%d", ret);
> +			return ret;
> +		}
> +
> +		mc_table.mc_flood_pf_enable = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG);
> +		DPP_DTB_ERAM_ENTRY_INFO_T eram_mc_entry = {
> +			((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id,
> +			(ZXIC_UINT32 *)&mc_table
> +		};
> +		DPP_DTB_USER_ENTRY_T entry_multicast = {
> +			.sdt_no = ZXDH_SDT_MULTICAST_ATT_TABLE,
> +			.p_entry_data = (void *)&eram_mc_entry
> +		};
> +
> +		ret = dpp_dtb_table_entry_write(DEVICE_NO, g_dtb_data.queueid,
> +					1, &entry_multicast);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Write eram-multicast failed, code:%d", ret);
> +			return ret;
> +		}
> +	}
> +
> +	PMD_DRV_LOG(DEBUG, "write promise tbl hw->hash_search_index:%d, vqm_vfid:%d",
> +			hw->hash_search_index, hw->vfid);
> +
> +	return ret;
> +}
> +
> +static int zxdh_config_qid(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	struct zxdh_port_att_entry port_attr = {0};
> +	struct zxdh_msg_info msg_info = {0};
> +	int ret = 0;
> +
> +	if (hw->is_pf) {
> +		DPP_DTB_ERAM_ENTRY_INFO_T port_attr_entry = {hw->vfid, (ZXIC_UINT32 *)&port_attr};
> +		DPP_DTB_USER_ENTRY_T entry = {
> +			.sdt_no = ZXDH_SDT_VPORT_ATT_TABLE,
> +			.p_entry_data = (void *)&port_attr_entry
> +		};
> +
> +		ret = dpp_dtb_entry_get(DEVICE_NO, g_dtb_data.queueid, &entry, 1);
> +		port_attr.port_base_qid = hw->channel_context[0].ph_chno & 0xfff;
> +
> +		ret = dpp_dtb_table_entry_write(DEVICE_NO, g_dtb_data.queueid, 1, &entry);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "PF:%d port_base_qid insert failed\n", hw->vfid);
> +			return -ret;
> +		}
> +	} else {
> +		struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_set_msg;
> +
> +		msg_head_build(hw, ZXDH_PORT_ATTRS_SET, &msg_info);
> +		attr_msg->mode = EGR_FLAG_PORT_BASE_QID;
> +		attr_msg->value = hw->channel_context[0].ph_chno&0xfff;
> +		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ",
> +					hw->vport.vport, EGR_FLAG_PORT_BASE_QID);
> +			return ret;
> +		}
> +	}
> +	return ret;
> +}
> +/*
> + * Configure virtio device
>

'virtio' device?
Is the host interface from device a virtio-net interface?



> + * It returns 0 on success.
> + */
> +int32_t zxdh_dev_configure(struct rte_eth_dev *dev)
> +{
> +	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
> +	const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	uint64_t rx_offloads = rxmode->offloads;
> +	uint32_t nr_vq = 0;
> +	int32_t  ret = 0;
> +
> +	PMD_INIT_LOG(DEBUG, "configure");
> +
> +	if (dev->data->nb_rx_queues != dev->data->nb_tx_queues) {
> +		PMD_INIT_LOG(ERR, "nb_rx_queues=%d and nb_tx_queues=%d not equal!",
> +					 dev->data->nb_rx_queues, dev->data->nb_tx_queues);
> +		return -EINVAL;
> +	}
> +	if ((dev->data->nb_rx_queues + dev->data->nb_tx_queues) >= ZXDH_QUEUES_NUM_MAX) {
> +		PMD_INIT_LOG(ERR, "nb_rx_queues=%d + nb_tx_queues=%d must < (%d)!",
> +					 dev->data->nb_rx_queues, dev->data->nb_tx_queues,
> +					 ZXDH_QUEUES_NUM_MAX);
> +		return -EINVAL;
> +	}
> +	if ((rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) && (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE))	{
> +		PMD_DRV_LOG(ERR, "Unsupported Rx multi queue mode %d", rxmode->mq_mode);
> +		return -EINVAL;
> +	}
> +
> +	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
> +		PMD_DRV_LOG(ERR, "Unsupported Tx multi queue mode %d", txmode->mq_mode);
> +		return -EINVAL;
> +	}
> +	if ((rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) && (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE))	{
> +		PMD_DRV_LOG(ERR, "Unsupported Rx multi queue mode %d", rxmode->mq_mode);
> +		return -EINVAL;
> +	}
> +
> +	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
> +		PMD_DRV_LOG(ERR, "Unsupported Tx multi queue mode %d", txmode->mq_mode);
> +		return -EINVAL;
> +	}
> +
> +	ret = zxdh_features_update(hw, rxmode, txmode);
> +	if (ret < 0)
> +		return ret;
> +
> +	/* check if lsc interrupt feature is enabled */
> +	if (dev->data->dev_conf.intr_conf.lsc) {
> +		if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
> +			PMD_DRV_LOG(ERR, "link status not supported by host");
> +			return -ENOTSUP;
> +		}
> +	}
> +	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
> +		hw->vlan_offload_cfg.vlan_strip = 1;
> +
> +	hw->has_tx_offload = tx_offload_enabled(hw);
> +	hw->has_rx_offload = rx_offload_enabled(hw);
> +
> +	nr_vq = dev->data->nb_rx_queues + dev->data->nb_tx_queues;
> +	if (nr_vq == hw->queue_num) {
> +		/*no que changed */
> +		goto conf_end;
> +	}
> +
> +	PMD_DRV_LOG(DEBUG, "que changed need reset ");
> +	/* Reset the device although not necessary at startup */
> +	zxdh_vtpci_reset(hw);
> +
> +	/* Tell the host we've noticed this device. */
> +	zxdh_vtpci_set_status(hw, ZXDH_CONFIG_STATUS_ACK);
> +
> +	/* Tell the host we've known how to drive the device. */
> +	zxdh_vtpci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER);
> +	/* The queue needs to be released when reconfiguring*/
> +	if (hw->vqs != NULL) {
> +		zxdh_dev_free_mbufs(dev);
> +		zxdh_free_queues(dev);
> +	}
> +
> +	hw->queue_num = nr_vq;
> +	ret = zxdh_alloc_queues(dev, nr_vq);
> +	if (ret < 0)
> +		return ret;
> +
> +	zxdh_datach_set(dev);
> +
> +	if (zxdh_configure_intr(dev) < 0) {
> +		PMD_INIT_LOG(ERR, "Failed to configure interrupt");
> +		zxdh_free_queues(dev);
> +		return -1;
> +	}
> +	ret = zxdh_config_qid(dev);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to configure base qid!");
> +		return -1;
> +	}
> +
> +	zxdh_vtpci_reinit_complete(hw);
> +
> +conf_end:
> +	ret = zxdh_rx_csum_lro_offload_configure(dev);
> +	if (ret)
> +		PMD_INIT_LOG(ERR, "Failed to configure csum offload!");
> +
> +	zxdh_dev_conf_offload(dev);
> +	PMD_INIT_LOG(DEBUG, " configure end");
> +
> +	return ret;
> +}
> +
> +int zxdh_vlan_filter_table_init(uint16_t vfid)
> +{
> +	int16_t ret = 0;
> +	struct zxdh_vlan_t vlan_table = {0};
> +
> +	for (uint8_t vlan_group = 0; vlan_group < VLAN_GROUP_NUM; vlan_group++) {
> +		if (vlan_group == 0) {
> +			vlan_table.vlans[0] |= (1 << FIRST_VLAN_GROUP_VALID_BITS);
> +			vlan_table.vlans[0] |= (1 << VLAN_GROUP_VALID_BITS);
> +
> +		} else {
> +			vlan_table.vlans[0] = 0;
> +		}
> +
> +		uint32_t index = (vlan_group << VQM_VFID_BITS) | vfid;
> +
> +		DPP_DTB_ERAM_ENTRY_INFO_T entry_data = {index, (ZXIC_UINT32 *)&vlan_table};
> +		DPP_DTB_USER_ENTRY_T user_entry = {ZXDH_SDT_VLAN_ATT_TABLE, &entry_data};
> +
> +		ret = dpp_dtb_table_entry_write(DEVICE_NO, g_dtb_data.queueid, 1, &user_entry);
> +		if (ret != DPP_OK)
> +			PMD_INIT_LOG(WARNING,
> +				"[vfid:%d], vlan_group:%d, init vlan filter tbl failed, ret:%d",
> +				vfid, vlan_group, ret);
> +	}
> +	return ret;
> +}
> +
> +static int zxdh_mac_config(struct rte_eth_dev *eth_dev)
> +{
> +	struct zxdh_hw *hw = eth_dev->data->dev_private;
> +	struct zxdh_msg_info msg_info = {0};
> +	int ret = 0;
> +
> +	if (hw->is_pf == 1) {
> +		PMD_INIT_LOG(INFO, "mac_config pf");
> +		ret = dev_mac_addr_add(hw->vport.vport,
> +				&eth_dev->data->mac_addrs[0], hw->hash_search_index);
> +		if (ret)
> +			PMD_DRV_LOG(ERR, "Failed to add mac: port 0x%x", hw->vport.vport);
> +
> +		hw->uc_num++;
> +	} else {
> +		PMD_DRV_LOG(INFO, "port 0x%x Send to pf\n", hw->vport.vport);
> +		struct zxdh_mac_filter *mac_filter = &msg_info.data.zxdh_mac_filter;
> +
> +		mac_filter->filter_flag = 0xff;
> +		rte_memcpy(&mac_filter->mac, &eth_dev->data->mac_addrs[0],
> +				sizeof(eth_dev->data->mac_addrs[0]));
> +		msg_head_build(hw, ZXDH_MAC_ADD, &msg_info);
> +		ret = zxdh_vf_send_msg_to_pf(eth_dev, &msg_info, sizeof(msg_info), NULL, 0);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ",
> +					hw->vport.vport, ZXDH_MAC_ADD);
> +			return ret;
> +		}
> +		hw->uc_num++;
> +	}
> +	return ret;
> +}
> +
> +int32_t zxdh_dev_config_port_status(struct rte_eth_dev *dev, uint16_t link_status)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	struct zxdh_port_att_entry port_attr = {0};
> +	struct zxdh_msg_info msg_info = {0};
> +	int32_t ret = 0;
> +
> +	if (hw->is_pf) {
> +		DPP_DTB_ERAM_ENTRY_INFO_T port_attr_entry = {hw->vfid, (ZXIC_UINT32 *)&port_attr};
> +		DPP_DTB_USER_ENTRY_T entry = {
> +			.sdt_no = ZXDH_SDT_VPORT_ATT_TABLE,
> +			.p_entry_data = (void *)&port_attr_entry
> +		};
> +
> +		ret = dpp_dtb_entry_get(DEVICE_NO, g_dtb_data.queueid, &entry, 1);
> +		port_attr.is_up = link_status;
> +
> +		ret = dpp_dtb_table_entry_write(DEVICE_NO, g_dtb_data.queueid, 1, &entry);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "PF:%d port_is_up insert failed\n", hw->vfid);
> +			return -ret;
> +		}
> +	} else {
> +		struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_set_msg;
> +
> +		msg_head_build(hw, ZXDH_PORT_ATTRS_SET, &msg_info);
> +		attr_msg->mode = EGR_FLAG_VPORT_IS_UP;
> +		attr_msg->value = link_status;
> +		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ",
> +				hw->vport.vport, EGR_FLAG_VPORT_IS_UP);
> +			return ret;
> +		}
> +	}
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_dev_start(struct rte_eth_dev *dev)
> +{
> +	int32_t ret;
> +	uint16_t vtpci_logic_qidx;
> +	/* Finish the initialization of the queues */
> +	uint16_t i;
> +
> +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> +		vtpci_logic_qidx = 2 * i + RQ_QUEUE_IDX;
> +		ret = zxdh_dev_rx_queue_setup_finish(dev, vtpci_logic_qidx);
> +		if (ret < 0)
> +			return ret;
> +	}
> +	set_rxtx_funcs(dev);
> +	ret = zxdh_intr_enable(dev);
> +	if (ret) {
> +		PMD_DRV_LOG(ERR, "interrupt enable failed");
> +		return -EIO;
> +	}
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	struct virtqueue *vq;
> +
> +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> +		vtpci_logic_qidx = 2 * i + RQ_QUEUE_IDX;
> +		vq = hw->vqs[vtpci_logic_qidx];
> +		/* Flush the old packets */
> +		zxdh_virtqueue_rxvq_flush(vq);
> +		virtqueue_notify(vq);
> +	}
> +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> +		vtpci_logic_qidx = 2 * i + TQ_QUEUE_IDX;
> +		vq = hw->vqs[vtpci_logic_qidx];
> +		virtqueue_notify(vq);
> +	}
> +	hw->started = true;
> +	ret = zxdh_mac_config(hw->eth_dev);
> +	if (ret) {
> +		PMD_DRV_LOG(ERR, " mac config failed");
> +		zxdh_dev_set_link_up(dev);
> +	}
> +	return 0;
> +}
> +
> +static void zxdh_dev_free_mbufs(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	uint16_t nr_vq = hw->queue_num;
> +	uint32_t i, mbuf_num = 0;
> +
> +	const char *type __rte_unused;
> +	struct virtqueue *vq = NULL;
> +	struct rte_mbuf *buf = NULL;
> +	int32_t queue_type = 0;
> +
> +	if (hw->vqs == NULL)
> +		return;
> +
> +	for (i = 0; i < nr_vq; i++) {
> +		vq = hw->vqs[i];
> +		if (!vq)
> +			continue;
> +
> +		queue_type = get_queue_type(i);
> +		if (queue_type == VTNET_RQ)
> +			type = "rxq";
> +		else if (queue_type == VTNET_TQ)
> +			type = "txq";
> +		else
> +			continue;
> +
> +		PMD_INIT_LOG(DEBUG, "Before freeing %s[%d] used and unused buf", type, i);
> +
> +		while ((buf = zxdh_virtqueue_detach_unused(vq)) != NULL) {
> +			rte_pktmbuf_free(buf);
> +			mbuf_num++;
> +		}
> +
> +		PMD_INIT_LOG(DEBUG, "After freeing %s[%d] used and unused buf", type, i);
> +	}
> +
> +	PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);
> +}
> +
> +/*
> + * Stop device: disable interrupt and mark link down
> + */
> +int32_t zxdh_dev_stop(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	if (dev->data->dev_started == 0)
> +		return 0;
> +
> +	PMD_INIT_LOG(DEBUG, "stop");
> +
> +	rte_spinlock_lock(&hw->state_lock);
> +	if (!hw->started)
> +		goto out_unlock;
> +	hw->started = 0;
> +
> +	zxdh_intr_disable(dev);
> +	zxdh_dev_set_link_down(dev);
> +	/*que disable*/
> +
> +out_unlock:
> +	rte_spinlock_unlock(&hw->state_lock);
> +
> +	return 0;
> +}
> +/**
> + *  Fun:
> + */
> +static uint32_t zxdh_dev_speed_capa_get(uint32_t speed)
> +{
> +	switch (speed) {
> +	case RTE_ETH_SPEED_NUM_10G:  return RTE_ETH_LINK_SPEED_10G;
> +	case RTE_ETH_SPEED_NUM_20G:  return RTE_ETH_LINK_SPEED_20G;
> +	case RTE_ETH_SPEED_NUM_25G:  return RTE_ETH_LINK_SPEED_25G;
> +	case RTE_ETH_SPEED_NUM_40G:  return RTE_ETH_LINK_SPEED_40G;
> +	case RTE_ETH_SPEED_NUM_50G:  return RTE_ETH_LINK_SPEED_50G;
> +	case RTE_ETH_SPEED_NUM_56G:  return RTE_ETH_LINK_SPEED_56G;
> +	case RTE_ETH_SPEED_NUM_100G: return RTE_ETH_LINK_SPEED_100G;
> +	case RTE_ETH_SPEED_NUM_200G: return RTE_ETH_LINK_SPEED_200G;
> +	default:                     return 0;
> +	}
> +}
> +int32_t zxdh_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	dev_info->speed_capa	   = zxdh_dev_speed_capa_get(hw->speed);
> +	dev_info->max_rx_queues    = RTE_MIN(hw->max_queue_pairs, ZXDH_RX_QUEUES_MAX);
> +	dev_info->max_tx_queues    = RTE_MIN(hw->max_queue_pairs, ZXDH_TX_QUEUES_MAX);
> +	dev_info->min_rx_bufsize   = ZXDH_MIN_RX_BUFSIZE;
> +	dev_info->max_rx_pktlen    = ZXDH_MAX_RX_PKTLEN;
> +	dev_info->max_mac_addrs    = ZXDH_MAX_MAC_ADDRS;
> +	dev_info->rx_offload_capa  = (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
> +					RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
> +					RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
> +	dev_info->rx_offload_capa |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
> +					RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
> +					RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
> +					RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM);
> +	dev_info->rx_offload_capa |= (RTE_ETH_RX_OFFLOAD_SCATTER);
> +	dev_info->rx_offload_capa |=  RTE_ETH_RX_OFFLOAD_TCP_LRO;
> +	dev_info->rx_offload_capa |=  RTE_ETH_RX_OFFLOAD_RSS_HASH;
> +
> +	dev_info->reta_size = ZXDH_RETA_SIZE;
> +	dev_info->hash_key_size = ZXDH_RSK_LEN;
> +	dev_info->flow_type_rss_offloads = ZXDH_RSS_HF;
> +	dev_info->max_mtu = hw->max_mtu;
> +	dev_info->min_mtu = 50;
> +
> +	dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS);
> +	dev_info->tx_offload_capa |= (RTE_ETH_TX_OFFLOAD_TCP_TSO |
> +					RTE_ETH_TX_OFFLOAD_UDP_TSO);
> +	dev_info->tx_offload_capa |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
> +					RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
> +					RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO);
> +	dev_info->tx_offload_capa |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
> +					RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
> +					RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
> +					RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
> +					RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM);
> +
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +static void zxdh_log_init(void)
> +{
> +#ifdef RTE_LIBRTE_ZXDH_DEBUG_TX
> +	if (zxdh_logtype_tx >= 0)
> +		rte_log_set_level(zxdh_logtype_tx, RTE_LOG_DEBUG);
> +#endif
> +#ifdef RTE_LIBRTE_ZXDH_DEBUG_RX
> +	if (zxdh_logtype_rx >= 0)
> +		rte_log_set_level(zxdh_logtype_rx, RTE_LOG_DEBUG);
> +#endif
>

If you put logging in the datapath, even it is not printing it may
consume a few cycles, so you may prefer to macros specific to datapath
logging and they may be enabled/disabled with 'RTE_ETHDEV_DEBUG_RX' &
'RTE_ETHDEV_DEBUG_TX', not driver specific macros.

> +#ifdef RTE_LIBRTE_ZXDH_DEBUG_MSG
> +	if (zxdh_logtype_msg >= 0)
> +		rte_log_set_level(zxdh_logtype_msg, RTE_LOG_DEBUG);
> +#endif
>

It is already dynamically configurable log level, do we need compile
time macro for this? Since we are trying to remove compile time flags as
much as possible, it would be nice to get rid of RTE_LIBRTE_ZXDH_DEBUG_MSG.

> +}
> +
> +struct zxdh_dtb_shared_data g_dtb_data = {0};
> +
> +static int zxdh_tbl_entry_destroy(struct rte_eth_dev *dev)
> +{
> +	int ret = 0;
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	if (!g_dtb_data.init_done)
> +		return ret;
> +
> +	if (hw->is_pf) {
> +		/*hash  &ddr*/
> +		uint32_t sdt_no;
> +
> +		sdt_no = MK_SDT_NO(L2_ENTRY, hw->hash_search_index);
> +		ret = dpp_dtb_hash_online_delete(0, g_dtb_data.queueid, sdt_no);
> +		PMD_DRV_LOG(INFO, "%s dpp_dtb_hash_online_delete sdt_no %d",
> +				dev->data->name, sdt_no);
> +		if (ret)
> +			PMD_DRV_LOG(ERR, "%s dpp_dtb_hash_online_delete sdt_no %d failed",
> +				dev->data->name, sdt_no);
> +
> +		sdt_no = MK_SDT_NO(MC, hw->hash_search_index);
> +		ret = dpp_dtb_hash_online_delete(0, g_dtb_data.queueid, sdt_no);
> +		PMD_DRV_LOG(INFO, "%s dpp_dtb_hash_online_delete sdt_no %d",
> +				dev->data->name, sdt_no);
> +		if (ret)
> +			PMD_DRV_LOG(ERR, "%s dpp_dtb_hash_online_delete sdt_no %d failed",
> +				dev->data->name, sdt_no);
> +	}
> +
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +#define INVALID_DTBQUE  0xFFFF
> +static void _dtb_data_res_free(struct zxdh_hw *hw)
> +{
> +	struct rte_eth_dev *dev = hw->eth_dev;
> +
> +	if ((g_dtb_data.init_done) && (g_dtb_data.bind_device == dev))  {
> +		PMD_DRV_LOG(INFO, "%s g_dtb_data free queue %d",
> +				dev->data->name, g_dtb_data.queueid);
> +
> +		int ret = 0;
> +
> +		ret = dpp_np_online_uninstall(0, dev->data->name, g_dtb_data.queueid);
> +		if (ret)
> +			PMD_DRV_LOG(ERR, "%s dpp_np_online_uninstall failed", dev->data->name);
> +
> +		PMD_DRV_LOG(INFO, "%s dpp_np_online_uninstall queid %d",
> +				dev->data->name, g_dtb_data.queueid);
> +		if (g_dtb_data.dtb_table_conf_mz) {
> +			rte_memzone_free(g_dtb_data.dtb_table_conf_mz);
> +			PMD_DRV_LOG(INFO, "%s free  dtb_table_conf_mz  ", dev->data->name);
> +			g_dtb_data.dtb_table_conf_mz = NULL;
> +		}
> +		if (g_dtb_data.dtb_table_dump_mz) {
> +
> +			PMD_DRV_LOG(INFO, "%s free  dtb_table_dump_mz  ", dev->data->name);
> +			rte_memzone_free(g_dtb_data.dtb_table_dump_mz);
> +			g_dtb_data.dtb_table_dump_mz = NULL;
> +		}
> +		int i;
> +
> +		for (i = 0; i < DPU_MAX_BASE_DTB_TABLE_COUNT; i++) {
> +			if (g_dtb_data.dtb_table_bulk_dump_mz[i]) {
> +				rte_memzone_free(g_dtb_data.dtb_table_bulk_dump_mz[i]);
> +
> +				PMD_DRV_LOG(INFO, "%s free dtb_table_bulk_dump_mz[%d]",
> +						dev->data->name, i);
> +				g_dtb_data.dtb_table_bulk_dump_mz[i] = NULL;
> +			}
> +		}
> +		g_dtb_data.init_done = 0;
> +		g_dtb_data.bind_device = NULL;
> +	}
> +	if (zxdh_shared_data != NULL)
> +		zxdh_shared_data->npsdk_init_done = 0;
> +
> +}
> +
> +#define MK_SDT_HASHRES(table, hash_idx) \
> +{ \
> +	.mz_name = RTE_STR(ZXDH_## table ##_TABLE), \
> +	.mz_size = DPU_DTB_TABLE_BULK_ZCAM_DUMP_SIZE, \
> +	.sdt_no = ZXDH_SDT_##table##_TABLE0 + hash_idx, \
> +	.mz = NULL\
> +}
> +/**
> + * Fun:
> + */
> +static inline int zxdh_dtb_dump_res_init(struct zxdh_hw *hw __rte_unused,
> +			DPP_DEV_INIT_CTRL_T *dpp_ctrl)
> +{
> +	int ret = 0;
> +	int i;
> +
> +	struct zxdh_dtb_bulk_dump_info dtb_dump_baseres[] = {
> +	/* eram */
> +	{"zxdh_sdt_vxlan_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_VXLAN_ATT_TABLE, NULL},
> +	{"zxdh_sdt_vport_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_VPORT_ATT_TABLE, NULL},
> +	{"zxdh_sdt_panel_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_PANEL_ATT_TABLE, NULL},
> +	{"zxdh_sdt_rss_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_RSS_ATT_TABLE, NULL},
> +	{"zxdh_sdt_vlan_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_VLAN_ATT_TABLE, NULL},
> +	{"zxdh_sdt_lag_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_LAG_ATT_TABLE, NULL},
> +	/* zcam */
> +	/*hash*/
> +	{"zxdh_sdt_l2_entry_table0", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE0, NULL},
> +	{"zxdh_sdt_l2_entry_table1", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE1, NULL},
> +	{"zxdh_sdt_l2_entry_table2", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE2, NULL},
> +	{"zxdh_sdt_l2_entry_table3", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE3, NULL},
> +	{"zxdh_sdt_l2_entry_table4", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE4, NULL},
> +	{"zxdh_sdt_l2_entry_table5", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE5, NULL},
> +	{"zxdh_sdt_mc_table0", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE0, NULL},
> +	{"zxdh_sdt_mc_table1", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE1, NULL},
> +	{"zxdh_sdt_mc_table2", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE2, NULL},
> +	{"zxdh_sdt_mc_table3", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE3, NULL},
> +	{"zxdh_sdt_mc_table4", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE4, NULL},
> +	{"zxdh_sdt_mc_table5", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE5, NULL},
> +	};
> +	for (i = 0; i < (int) RTE_DIM(dtb_dump_baseres); i++) {
> +		struct zxdh_dtb_bulk_dump_info *p = dtb_dump_baseres + i;
> +		const struct rte_memzone *generic_dump_mz = rte_memzone_reserve_aligned(p->mz_name,
> +					p->mz_size, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
> +
> +		if (generic_dump_mz == NULL) {
> +			PMD_DRV_LOG(ERR,
> +				"Cannot alloc mem for dtb tbl bulk dump, mz_name is %s, mz_size is %u",
> +				p->mz_name, p->mz_size);
> +			ret = -ENOMEM;
> +			return ret;
> +		}
> +		p->mz = generic_dump_mz;
> +		dpp_ctrl->dump_addr_info[i].vir_addr = generic_dump_mz->addr_64;
> +		dpp_ctrl->dump_addr_info[i].phy_addr = generic_dump_mz->iova;
> +		dpp_ctrl->dump_addr_info[i].sdt_no   = p->sdt_no;
> +		dpp_ctrl->dump_addr_info[i].size	  = p->mz_size;
> +		PMD_INIT_LOG(DEBUG,
> +			"dump_addr_info[%2d] vir_addr:0x%llx phy_addr:0x%llx sdt_no:%u size:%u",
> +			i,
> +			dpp_ctrl->dump_addr_info[i].vir_addr,
> +			dpp_ctrl->dump_addr_info[i].phy_addr,
> +			dpp_ctrl->dump_addr_info[i].sdt_no,
> +			dpp_ctrl->dump_addr_info[i].size);
> +
> +		g_dtb_data.dtb_table_bulk_dump_mz[dpp_ctrl->dump_sdt_num] = generic_dump_mz;
> +		dpp_ctrl->dump_sdt_num++;
> +	}
> +	return ret;
> +}
> +/**
> + * Fun:  last entry to clear
> + */
> +static int zxdh_tbl_entry_offline_destroy(struct zxdh_hw *hw)
> +{
> +	int ret = 0;
> +
> +	if (!g_dtb_data.init_done)
> +		return ret;
> +
> +	if (hw->is_pf) {
> +		/*hash  &ddr*/
> +		uint32_t sdt_no;
> +
> +		sdt_no = MK_SDT_NO(L2_ENTRY, hw->hash_search_index);
> +		ret = dpp_dtb_hash_offline_delete(0, g_dtb_data.queueid, sdt_no, 0);
> +		PMD_DRV_LOG(INFO, "%d dpp_dtb_hash_offline_delete sdt_no %d",
> +				hw->port_id, sdt_no);
> +		if (ret)
> +			PMD_DRV_LOG(ERR, "%d dpp_dtb_hash_offline_delete sdt_no %d failed",
> +					hw->port_id, sdt_no);
> +
> +		sdt_no = MK_SDT_NO(MC, hw->hash_search_index);
> +		ret = dpp_dtb_hash_offline_delete(0, g_dtb_data.queueid, sdt_no, 0);
> +		PMD_DRV_LOG(INFO, "%d dpp_dtb_hash_offline_delete sdt_no %d",
> +				hw->port_id, sdt_no);
> +		if (ret)
> +			PMD_DRV_LOG(ERR, "%d dpp_dtb_hash_offline_delete sdt_no %d failed",
> +				hw->port_id, sdt_no);
> +
> +		/*eram  iterm by iterm*/
> +		/*etcam*/
> +	}
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +static inline int npsdk_dtb_res_init(struct rte_eth_dev *dev)
> +{
> +	int ret = 0;
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	if (g_dtb_data.init_done) {
> +		PMD_INIT_LOG(DEBUG, "DTB res already init done, dev %s no need init",
> +			dev->device->name);
> +		return 0;
> +	}
> +	g_dtb_data.queueid = INVALID_DTBQUE;
> +	g_dtb_data.bind_device = dev;
> +	g_dtb_data.dev_refcnt++;
> +	g_dtb_data.init_done = 1;
> +	/* */
> +	DPP_DEV_INIT_CTRL_T *dpp_ctrl = malloc(sizeof(*dpp_ctrl) +
> +			sizeof(DPP_DTB_ADDR_INFO_T) * 256);
> +
> +	if (dpp_ctrl == NULL) {
> +		PMD_INIT_LOG(ERR, "dev %s annot allocate memory for dpp_ctrl", dev->device->name);
> +		ret = -ENOMEM;
> +		goto free_res;
> +	}
> +	memset(dpp_ctrl, 0, sizeof(*dpp_ctrl) + sizeof(DPP_DTB_ADDR_INFO_T) * 256);
> +
> +	dpp_ctrl->queue_id = 0xff;
> +	dpp_ctrl->vport	 = hw->vport.vport;
> +	dpp_ctrl->vector = ZXDH_MSIX_INTR_DTB_VEC;
> +	strcpy((char *)dpp_ctrl->port_name, dev->device->name);
> +	dpp_ctrl->pcie_vir_addr = (ZXIC_ADDR_T)hw->bar_addr[0];
> +
> +	struct bar_offset_params param = {0};
> +	struct bar_offset_res  res = {0};
> +
> +	param.pcie_id = hw->pcie_id;
> +	param.virt_addr = hw->bar_addr[0]+ZXDH_CTRLCH_OFFSET;
> +	param.type = URI_NP;
> +
> +	ret = zxdh_get_bar_offset(&param, &res);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "dev %s get npbar offset failed", dev->device->name);
> +		goto free_res;
> +	}
> +	dpp_ctrl->np_bar_len = res.bar_length;
> +	dpp_ctrl->np_bar_offset = res.bar_offset;
> +	PMD_INIT_LOG(ERR,
> +		"dpp_ctrl->pcie_vir_addr 0x%llx bar_offs  0x%x bar_len 0x%x",
> +		dpp_ctrl->pcie_vir_addr, dpp_ctrl->np_bar_offset, dpp_ctrl->np_bar_len);
> +	if (!g_dtb_data.dtb_table_conf_mz) {
> +		const struct rte_memzone *conf_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_conf_mz",
> +				DPU_DTB_TABLE_CONF_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
> +
> +		if (conf_mz == NULL) {
> +			PMD_INIT_LOG(ERR,
> +				"dev %s annot allocate memory for dtb table conf",
> +				dev->device->name);
> +			ret = -ENOMEM;
> +			goto free_res;
> +		}
> +		dpp_ctrl->down_vir_addr = conf_mz->addr_64;
> +		dpp_ctrl->down_phy_addr = conf_mz->iova;
> +		g_dtb_data.dtb_table_conf_mz = conf_mz;
> +	}
> +	/* */
> +	if (!g_dtb_data.dtb_table_dump_mz) {
> +		const struct rte_memzone *dump_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_dump_mz",
> +				DPU_DTB_TABLE_DUMP_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
> +
> +		if (dump_mz == NULL) {
> +			PMD_INIT_LOG(ERR,
> +				"dev %s Cannot allocate memory for dtb table dump",
> +				dev->device->name);
> +			ret = -ENOMEM;
> +			goto free_res;
> +		}
> +		dpp_ctrl->dump_vir_addr = dump_mz->addr_64;
> +		dpp_ctrl->dump_phy_addr = dump_mz->iova;
> +		g_dtb_data.dtb_table_dump_mz = dump_mz;
> +	}
> +	/* init bulk dump */
> +	zxdh_dtb_dump_res_init(hw, dpp_ctrl);
> +
> +	ret = dpp_host_np_init(0, dpp_ctrl);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "dev %s dpp host np init failed .ret %d", dev->device->name, ret);
> +		goto free_res;
> +	}
> +
> +	PMD_INIT_LOG(INFO, "dev %s dpp host np init ok.dtb queue %d",
> +		dev->device->name, dpp_ctrl->queue_id);
> +	g_dtb_data.queueid = dpp_ctrl->queue_id;
> +	free(dpp_ctrl);
> +	return 0;
> +
> +free_res:
> +	_dtb_data_res_free(hw);
> +	free(dpp_ctrl);
> +	return -ret;
> +}
> +/**
> + * Fun:
> + */
> +static uint32_t dpp_res_uni_init(ZXIC_UINT32 type)
> +{
> +	DPP_STATUS rc = DPP_OK;
> +	ZXIC_UINT32 dev_id = 0;
> +	DPP_APT_HASH_RES_INIT_T tHashResInit = {0};
> +	DPP_APT_ERAM_RES_INIT_T tEramResInit = {0};
> +	DPP_APT_ACL_RES_INIT_T tAclResInit = {0};
> +	DPP_APT_DDR_RES_INIT_T tDdrResInit = {0};
> +	DPP_APT_LPM_RES_INIT_T tLpmResInit = {0};
> +	DPP_APT_STAT_RES_INIT_T tStatResInit = {0};
> +
> +	ZXIC_COMM_MEMSET(&tHashResInit, 0x0, sizeof(DPP_APT_HASH_RES_INIT_T));
> +	ZXIC_COMM_MEMSET(&tEramResInit, 0x0, sizeof(DPP_APT_ERAM_RES_INIT_T));
> +	ZXIC_COMM_MEMSET(&tAclResInit, 0x0, sizeof(DPP_APT_ACL_RES_INIT_T));
> +	ZXIC_COMM_MEMSET(&tDdrResInit, 0x0, sizeof(DPP_APT_DDR_RES_INIT_T));
> +	ZXIC_COMM_MEMSET(&tLpmResInit, 0x0, sizeof(DPP_APT_LPM_RES_INIT_T));
> +	ZXIC_COMM_MEMSET(&tStatResInit, 0x0, sizeof(DPP_APT_STAT_RES_INIT_T));
> +
> +	/* Obtain all flow table resources */
> +	rc = dpp_apt_hash_res_get(type, &tHashResInit);
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_drv_hash_res_get");
> +	rc = dpp_apt_eram_res_get(type, &tEramResInit);
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_drv_eram_res_get");
> +	rc = dpp_apt_acl_res_get(type, &tAclResInit);
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_drv_acl_res_get");
> +	rc = dpp_apt_ddr_res_get(type, &tDdrResInit);
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_ddr_res_get");
> +	rc = dpp_apt_lpm_res_get(type, &tLpmResInit);
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_lpm_res_get");
> +	rc = dpp_apt_stat_res_get(type, &tStatResInit);
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_stat_res_get");
> +
> +	/* hash init */
> +	rc = dpp_apt_hash_global_res_init(dev_id);
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_global_res_init");
> +
> +	rc = dpp_apt_hash_func_res_init(dev_id, tHashResInit.func_num, tHashResInit.func_res);
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_func_res_init");
> +	PMD_INIT_LOG(INFO, " func_num  %d", tHashResInit.func_num);
> +
> +	rc = dpp_apt_hash_bulk_res_init(dev_id, tHashResInit.bulk_num, tHashResInit.bulk_res);
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_bulk_res_init");
> +	PMD_INIT_LOG(INFO, " bulk_num  %d", tHashResInit.bulk_num);
> +
> +	/* tbl-res must be initialized after fun-res and buld-res */
> +	rc = dpp_apt_hash_tbl_res_init(dev_id, tHashResInit.tbl_num, tHashResInit.tbl_res);
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_tbl_res_init");
> +	PMD_INIT_LOG(INFO, " tbl_num  %d", tHashResInit.tbl_num);
> +	/* eram init */
> +	rc = dpp_apt_eram_res_init(dev_id, tEramResInit.tbl_num, tEramResInit.eram_res);
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_eram_res_init");
> +
> +	/* init acl */
> +	rc = dpp_apt_acl_res_init(dev_id, tAclResInit.tbl_num, tAclResInit.acl_res);
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_acl_res_init");
> +
> +	/* init stat */
> +	rc = dpp_stat_ppu_eram_baddr_set(dev_id, tStatResInit.eram_baddr);
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_stat_ppu_eram_baddr_set");
> +
> +	rc = dpp_stat_ppu_eram_depth_set(dev_id, tStatResInit.eram_depth); /* unit: 128bits */
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_stat_ppu_eram_depth_set");
> +
> +	rc = dpp_se_cmmu_smmu1_cfg_set(dev_id, tStatResInit.ddr_baddr);
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_se_cmmu_smmu1_cfg_set");
> +
> +	rc = dpp_stat_ppu_ddr_baddr_set(dev_id, tStatResInit.ppu_ddr_offset); /* unit: 128bits */
> +	ZXIC_COMM_CHECK_RC(rc, "dpp_stat_ppu_eram_depth_set");
> +
> +	return DPP_OK;
> +}
> +
> +static inline int npsdk_apt_res_init(struct rte_eth_dev *dev __rte_unused)
> +{
> +	uint32_t ret = 0;
> +
> +	ret = dpp_res_uni_init(SE_NIC_RES_TYPE);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "init stand dpp res failed");
> +		return -1;
> +	}
> +
> +	PMD_INIT_LOG(INFO, " end ...time: %lu s", get_cur_time_ms());
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +static void zxdh_np_destroy(struct rte_eth_dev *dev)
> +{
> +	zxdh_tbl_entry_destroy(dev);
> +	if ((!g_dtb_data.init_done) && (!g_dtb_data.dev_refcnt))
> +		return;
> +
> +	if (--g_dtb_data.dev_refcnt == 0) {
> +		struct zxdh_hw *hw = dev->data->dev_private;
> +
> +		_dtb_data_res_free(hw);
> +	}
> +
> +	PMD_DRV_LOG(INFO, "g_dtb_data	dev_refcnt %d", g_dtb_data.dev_refcnt);
> +}
> +
> +/**
> + * Fun:
> + */
> +static int zxdh_tables_init(struct rte_eth_dev *dev)
> +{
> +	/*	port attr\pannel attr\rss\mac vlan filter flush */
> +	int ret = 0;
> +
> +	ret = zxdh_port_attr_init(dev);
> +	if (ret != 0) {
> +		PMD_INIT_LOG(ERR, " zxdh_port_attr_init failed");
> +		return ret;
> +	}
> +
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	if (hw->is_pf) {
> +		ret = zxdh_panel_table_init(dev);
> +		if (ret) {
> +			PMD_INIT_LOG(ERR, " panel table init failed");
> +			return ret;
> +		}
> +		ret = zxdh_vlan_filter_table_init(vport_to_vfid(hw->vport));
> +		if (ret) {
> +			PMD_INIT_LOG(ERR, " panel table init failed");
> +			return ret;
> +		}
> +		ret = zxdh_promisc_table_init(hw);
> +		if (ret) {
> +			PMD_INIT_LOG(ERR, " promisc_table_init failed");
> +			return ret;
> +		}
> +		config_default_hash_key();
> +	}
> +	return ret;
> +}
> +/**
> + * Fun:
> + */
> +const char *MZ_ZXDH_PMD_SHARED_DATA = "zxdh_pmd_shared_data";
> +rte_spinlock_t zxdh_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
> +struct zxdh_shared_data *zxdh_shared_data;
> +
> +static int zxdh_init_shared_data(void)
> +{
> +	const struct rte_memzone *mz;
> +	int ret = 0;
> +
> +	rte_spinlock_lock(&zxdh_shared_data_lock);
> +	if (zxdh_shared_data == NULL) {
> +		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> +			/* Allocate shared memory. */
> +			mz = rte_memzone_reserve(MZ_ZXDH_PMD_SHARED_DATA,
> +					sizeof(*zxdh_shared_data), SOCKET_ID_ANY, 0);
> +			if (mz == NULL) {
> +				PMD_INIT_LOG(ERR, "Cannot allocate zxdh shared data");
> +				ret = -rte_errno;
> +				goto error;
> +			}
> +			zxdh_shared_data = mz->addr;
> +			memset(zxdh_shared_data, 0, sizeof(*zxdh_shared_data));
> +			rte_spinlock_init(&zxdh_shared_data->lock);
> +		} else { /* Lookup allocated shared memory. */
> +			mz = rte_memzone_lookup(MZ_ZXDH_PMD_SHARED_DATA);
> +			if (mz == NULL) {
> +				PMD_INIT_LOG(ERR, "Cannot attach zxdh shared data");
> +				ret = -rte_errno;
> +				goto error;
> +			}
> +			zxdh_shared_data = mz->addr;
> +		}
> +	}
> +
> +error:
> +	rte_spinlock_unlock(&zxdh_shared_data_lock);
> +	return ret;
> +}
> +
> +static void zxdh_free_sh_res(void)
> +{
> +	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> +		rte_spinlock_lock(&zxdh_shared_data_lock);
> +		if ((zxdh_shared_data != NULL) && zxdh_shared_data->init_done &&
> +			(--zxdh_shared_data->dev_refcnt == 0)) {
> +			rte_mempool_free(zxdh_shared_data->flow_mp);
> +			rte_mempool_free(zxdh_shared_data->mtr_mp);
> +			rte_mempool_free(zxdh_shared_data->mtr_profile_mp);
> +			rte_mempool_free(zxdh_shared_data->mtr_policy_mp);
> +		}
> +		rte_spinlock_unlock(&zxdh_shared_data_lock);
> +	}
> +}
> +
> +/**
> + * Fun:
> + */
> +static int zxdh_init_sh_res(struct zxdh_shared_data *sd)
> +{
> +	const char *MZ_ZXDH_FLOW_MP        = "zxdh_flow_mempool";
> +	const char *MZ_ZXDH_MTR_MP         = "zxdh_mtr_mempool";
> +	const char *MZ_ZXDH_MTR_PROFILE_MP = "zxdh_mtr_profile_mempool";
> +	const char *MZ_ZXDH_MTR_POLICY_MP = "zxdh_mtr_policy_mempool";
> +	struct rte_mempool *flow_mp = NULL;
> +	struct rte_mempool *mtr_mp = NULL;
> +	struct rte_mempool *mtr_profile_mp = NULL;
> +	struct rte_mempool *mtr_policy_mp = NULL;
> +
> +	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> +		flow_mp = rte_mempool_create(MZ_ZXDH_FLOW_MP, MAX_FLOW_NUM,
> +			sizeof(struct zxdh_flow),
> +			64, 0, NULL, NULL, NULL, NULL,
> +			SOCKET_ID_ANY, 0);
> +		if (flow_mp == NULL) {
> +			PMD_INIT_LOG(ERR, "Cannot allocate zxdh flow mempool");
> +			goto error;
> +		}
> +		mtr_mp = rte_mempool_create(MZ_ZXDH_MTR_MP, MAX_MTR_NUM,
> +			sizeof(struct zxdh_mtr_object),
> +			64, 0, NULL, NULL, NULL, NULL,
> +			SOCKET_ID_ANY, 0);
> +		if (mtr_mp == NULL) {
> +			PMD_INIT_LOG(ERR, "Cannot allocate zxdh mtr mempool");
> +			goto error;
> +		}
> +		mtr_profile_mp = rte_mempool_create(MZ_ZXDH_MTR_PROFILE_MP, MAX_MTR_PROFILE_NUM,
> +			sizeof(struct zxdh_meter_profile),
> +			64, 0, NULL, NULL, NULL, NULL,
> +			SOCKET_ID_ANY, 0);
> +		if (mtr_profile_mp == NULL) {
> +			PMD_INIT_LOG(ERR, "Cannot allocate zxdh mtr profile mempool");
> +			goto error;
> +		}
> +		mtr_policy_mp = rte_mempool_create(MZ_ZXDH_MTR_POLICY_MP, ZXDH_MAX_POLICY_NUM,
> +			sizeof(struct zxdh_meter_policy),
> +			64, 0, NULL, NULL, NULL, NULL,
> +			SOCKET_ID_ANY, 0);
> +		if (mtr_policy_mp == NULL) {
> +			PMD_INIT_LOG(ERR, "Cannot allocate zxdh mtr profile mempool");
> +			goto error;
> +		}
> +		sd->flow_mp = flow_mp;
> +		sd->mtr_mp = mtr_mp;
> +		sd->mtr_profile_mp = mtr_profile_mp;
> +		sd->mtr_policy_mp = mtr_policy_mp;
> +
> +		TAILQ_INIT(&zxdh_shared_data->flow_list);
> +		TAILQ_INIT(&zxdh_shared_data->meter_profile_list);
> +		TAILQ_INIT(&zxdh_shared_data->mtr_list);
> +		TAILQ_INIT(&zxdh_shared_data->mtr_policy_list);
> +	}
> +	return 0;
> +
> +error:
> +	rte_mempool_free(mtr_policy_mp);
> +	rte_mempool_free(mtr_profile_mp);
> +	rte_mempool_free(mtr_mp);
> +	rte_mempool_free(flow_mp);
> +	return -rte_errno;
> +}
> +
> +/**
> + * Fun:
> + */
> +struct zxdh_mtr_res g_mtr_res;
> +static void zxdh_mtr_init(void)
> +{
> +	rte_spinlock_init(&g_mtr_res.hw_plcr_res_lock);
> +	memset(&g_mtr_res, 0, sizeof(g_mtr_res));
> +}
> +
> +#define ZXDH_HASHIDX_MAX  6
> +
> +/**
> + * Fun:
> + */
> +static int zxdh_np_init(struct rte_eth_dev *eth_dev)
> +{
> +	uint32_t ret = 0;
> +	struct zxdh_hw *hw = eth_dev->data->dev_private;
> +
> +	if ((zxdh_shared_data != NULL) && zxdh_shared_data->npsdk_init_done) {
> +		g_dtb_data.dev_refcnt++;
> +		zxdh_tbl_entry_offline_destroy(hw);
> +		PMD_DRV_LOG(INFO, "no need to init dtb  dtb chanenl %d devref %d",
> +				g_dtb_data.queueid, g_dtb_data.dev_refcnt);
> +		return 0;
> +	}
> +
> +	if (hw->is_pf) {
> +		PMD_DRV_LOG(INFO, "dpp_dtb_res_init time: %ld s", get_cur_time_ms());
> +		ret = npsdk_dtb_res_init(eth_dev);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "dpp apt init failed, ret:%d ", ret);
> +			return -ret;
> +		}
> +		PMD_DRV_LOG(INFO, "dpp_dtb_res_init ok");
> +
> +		PMD_DRV_LOG(INFO, "%s time: %ld s", __func__, get_cur_time_ms());
> +		ret = npsdk_apt_res_init(eth_dev);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "dpp apt init failed, ret:%d ", ret);
> +			return -ret;
> +		}
> +
> +		PMD_DRV_LOG(INFO, "dpp_apt_res_init ok");
> +		if (!hw->switchoffload) {
> +			if (hw->hash_search_index >= ZXDH_HASHIDX_MAX) {
> +				PMD_DRV_LOG(ERR, "invalid hash idx %d", hw->hash_search_index);
> +				return -1;
> +			}
> +			zxdh_tbl_entry_offline_destroy(hw);
> +		}
> +	}
> +	if (zxdh_shared_data != NULL)
> +		zxdh_shared_data->npsdk_init_done = 1;
> +
> +	PMD_DRV_LOG(DEBUG, "np init ok ");
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +static int zxdh_init_once(struct rte_eth_dev *eth_dev)
> +{
> +	PMD_INIT_LOG(DEBUG, "port 0x%x init...", eth_dev->data->port_id);
> +	if (zxdh_init_shared_data())
> +		return -rte_errno;
> +
> +	struct zxdh_shared_data *sd = zxdh_shared_data;
> +	int ret = 0;
> +
> +	rte_spinlock_lock(&sd->lock);
> +	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
> +		if (!sd->init_done) {
> +			++sd->secondary_cnt;
> +			sd->init_done = true;
> +		}
> +		goto out;
> +	}
> +	/* RTE_PROC_PRIMARY */
> +	if (!sd->init_done) {
> +		/*shared struct and res init */
> +		ret = zxdh_init_sh_res(sd);
> +		if (ret != 0)
> +			goto out;
> +
> +		zxdh_mtr_init();
> +		sd->init_done = true;
> +	}
> +	sd->dev_refcnt++;
> +out:
> +	rte_spinlock_unlock(&sd->lock);
> +	return ret;
> +}
> +/* dev_ops for virtio, bare necessities for basic operation */
> +static const struct eth_dev_ops zxdh_eth_dev_ops = {
> +	.dev_configure			 = zxdh_dev_configure,
> +	.dev_start				 = zxdh_dev_start,
> +	.dev_stop				 = zxdh_dev_stop,
> +	.dev_close				 = zxdh_dev_close,
> +	.dev_infos_get			 = zxdh_dev_info_get,
> +	.stats_get				 = zxdh_dev_stats_get,
> +	.xstats_get				 = zxdh_dev_xstats_get,
> +	.xstats_get_names		 = zxdh_dev_xstats_get_names,
> +	.stats_reset			 = zxdh_dev_stats_reset,
> +	.xstats_reset			 = zxdh_dev_stats_reset,
> +	.link_update			 = zxdh_dev_link_update,
> +	.rx_queue_setup			 = zxdh_dev_rx_queue_setup,
> +	.rx_queue_intr_enable	 = zxdh_dev_rx_queue_intr_enable,
> +	.rx_queue_intr_disable	 = zxdh_dev_rx_queue_intr_disable,
> +	.rx_queue_release		 = NULL,
> +	.rxq_info_get			 = zxdh_rxq_info_get,
> +	.txq_info_get			 = zxdh_txq_info_get,
> +	.tx_queue_setup			 = zxdh_dev_tx_queue_setup,
> +	.tx_queue_release		 = NULL,
> +	.queue_stats_mapping_set = NULL,
> +
> +	.mac_addr_add			 = zxdh_dev_mac_addr_add,
> +	.mac_addr_remove		 = zxdh_dev_mac_addr_remove,
> +	.mac_addr_set			 = zxdh_dev_mac_addr_set,
> +	.mtu_set				 = zxdh_dev_mtu_set,
> +	.dev_set_link_up		 = zxdh_dev_set_link_up,
> +	.dev_set_link_down		 = zxdh_dev_set_link_down,
> +	.promiscuous_enable		 = zxdh_dev_promiscuous_enable,
> +	.promiscuous_disable	 = zxdh_dev_promiscuous_disable,
> +	.allmulticast_enable	 = zxdh_dev_allmulticast_enable,
> +	.allmulticast_disable	 = zxdh_dev_allmulticast_disable,
> +	.vlan_filter_set		 = zxdh_vlan_filter_set,
> +	.vlan_offload_set		 = zxdh_vlan_offload_set,
> +	.vlan_pvid_set			 = zxdh_vlan_pvid_set,
> +	.vlan_tpid_set			 = zxdh_vlan_tpid_set,
> +	.udp_tunnel_port_add	 = zxdh_dev_udp_tunnel_port_add,
> +	.udp_tunnel_port_del	 = zxdh_dev_udp_tunnel_port_del,
> +	.reta_update			 = zxdh_dev_rss_reta_update,
> +	.reta_query				 = zxdh_dev_rss_reta_query,
> +	.rss_hash_update		 = zxdh_rss_hash_update,
> +	.rss_hash_conf_get		 = zxdh_rss_hash_conf_get,
> +	.mtr_ops_get			 = zxdh_meter_ops_get,
> +	.flow_ops_get			 = zxdh_flow_ops_get,
> +	.fw_version_get			 = zxdh_dev_fw_version_get,
> +	.get_module_info		 = zxdh_dev_get_module_info,
> +	.get_module_eeprom		 = zxdh_dev_get_module_eeprom,
> +	.flow_ctrl_get			 = zxdh_flow_ctrl_get,
> +	.flow_ctrl_set			 = zxdh_flow_ctrl_set,
> +	.eth_dev_priv_dump		 = zxdh_dev_priv_dump,
> +};
> +/**
> + * Fun:
> + */
> +static int32_t zxdh_msg_chan_enable(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +	struct msix_para misx_info = {
> +		.vector_risc = MSIX_FROM_RISCV,
> +		.vector_pfvf = MSIX_FROM_PFVF,
> +		.vector_mpf  = MSIX_FROM_MPF,
> +		.pcie_id     = hw->pcie_id,
> +		.driver_type = hw->is_pf ? MSG_CHAN_END_PF : MSG_CHAN_END_VF,
> +		.virt_addr   = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET),
> +	};
> +
> +	return zxdh_bar_chan_enable(&misx_info, &hw->vport.vport);
> +}
> +
> +static int32_t zxdh_msg_chan_hwlock_init(struct rte_eth_dev *dev)
> +{
> +	struct zxdh_hw *hw = dev->data->dev_private;
> +
> +	if (!hw->is_pf)
> +		return 0;
> +	return bar_chan_pf_init_spinlock(hw->pcie_id, (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX]));
> +}
> +
> +/**
> + * Fun:
> + */
> +static int zxdh_agent_comm(struct rte_eth_dev *eth_dev, struct zxdh_hw *hw)
> +{
> +	if (zxdh_phyport_get(eth_dev, &hw->phyport) != 0) {
> +		PMD_INIT_LOG(ERR, "Failed to get phyport");
> +		return -1;
> +	}
> +	PMD_INIT_LOG(INFO, "Get phyport success: 0x%x", hw->phyport);
> +	hw->vfid = vport_to_vfid(hw->vport);
> +	if (zxdh_hashidx_get(eth_dev, &hw->hash_search_index) != 0) {
> +		PMD_INIT_LOG(ERR, "Failed to get hash idx");
> +		return -1;
> +	}
> +	PMD_INIT_LOG(DEBUG, "Get hash idx success: 0x%x", hw->hash_search_index);
> +	if (zxdh_pannelid_get(eth_dev, &hw->panel_id) != 0) {
> +		PMD_INIT_LOG(ERR, "Failed to get panel_id");
> +		return -1;
> +	}
> +	PMD_INIT_LOG(INFO, "Get pannel id success: 0x%x", hw->panel_id);
> +
> +	return 0;
> +}
> +/**
> + * Fun: is based on probe() function in zxdh_pci.c
> + * It returns 0 on success.
> + */
> +static int32_t zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
> +{
> +	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
> +	int ret;
> +	uint64_t pre_time = get_cur_time_ms();
> +
> +	PMD_INIT_LOG(INFO, "dev init begin time: %lu s", pre_time);
> +	eth_dev->dev_ops = &zxdh_eth_dev_ops;
> +
> +	/**
> +	 * Primary process does the whole initialization,
> +	 * for secondaryprocesses, we just select the same Rx and Tx function as primary.
> +	 */
> +	struct zxdh_hw *hw = eth_dev->data->dev_private;
> +
> +	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
> +		VTPCI_OPS(hw) = &zxdh_modern_ops;
> +		set_rxtx_funcs(eth_dev);
> +		return 0;
> +	}
> +	/* Allocate memory for storing MAC addresses */
> +	eth_dev->data->mac_addrs = rte_zmalloc("zxdh_mac",
> +			ZXDH_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);
> +	if (eth_dev->data->mac_addrs == NULL) {
> +		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes store MAC addresses",
> +				ZXDH_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN);
> +		return -ENOMEM;
> +	}
> +	memset(hw, 0, sizeof(*hw));
> +	ret = zxdh_dev_devargs_parse(eth_dev->device->devargs, hw);
> +	if (ret < 0) {
> +		PMD_INIT_LOG(ERR, "dev args parse failed");
> +		return -EINVAL;
> +	}
> +
> +	hw->bar_addr[0] = (uint64_t)pci_dev->mem_resource[0].addr;
> +	if (hw->bar_addr[0] == 0) {
> +		PMD_INIT_LOG(ERR, "Bad mem resource.");
> +		return -EIO;
> +	}
> +	hw->device_id = pci_dev->id.device_id;
> +	hw->port_id = eth_dev->data->port_id;
> +	hw->eth_dev = eth_dev;
> +	hw->speed = RTE_ETH_SPEED_NUM_UNKNOWN;
> +	hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
> +	hw->is_pf = 0;
> +
> +	hw->reta_idx = NULL;
> +	hw->vfinfo = NULL;
> +	hw->vlan_fiter = NULL;
> +
> +	hw->admin_status = RTE_ETH_LINK_UP;
> +	rte_spinlock_init(&hw->state_lock);
> +	if (pci_dev->id.device_id == ZXDH_PCI_PF_DEVICEID) {
> +		hw->is_pf = 1;
> +		hw->pfinfo.vf_nums = pci_dev->max_vfs;
> +	}
> +
> +	/* reset device and get dev config*/
> +	ret = zxdh_init_once(eth_dev);
> +	if (ret != 0)
> +		goto err_zxdh_init;
> +
> +	ret = zxdh_init_device(eth_dev);
> +	if (ret < 0)
> +		goto err_zxdh_init;
> +
> +	ret = zxdh_msg_chan_init();
> +	if (ret < 0) {
> +		PMD_INIT_LOG(ERR, "Failed to init bar msg chan");
> +		goto err_zxdh_init;
> +	}
> +	hw->msg_chan_init = 1;
> +	PMD_INIT_LOG(DEBUG, "Init bar msg chan OK");
> +	ret = zxdh_msg_chan_hwlock_init(eth_dev);
> +	if (ret != 0) {
> +		PMD_INIT_LOG(ERR, "zxdh_msg_chan_hwlock_init failed ret %d", ret);
> +		goto err_zxdh_init;
> +	}
> +	ret = zxdh_msg_chan_enable(eth_dev);
> +	if (ret != 0) {
> +		PMD_INIT_LOG(ERR, "zxdh_msg_bar_chan_enable failed ret %d", ret);
> +		goto err_zxdh_init;
> +	}
> +	PMD_INIT_LOG(DEBUG, "pcie_id: 0x%x, vport: 0x%x", hw->pcie_id, hw->vport.vport);
> +
> +	ret = zxdh_agent_comm(eth_dev, hw);
> +	if (ret != 0)
> +		goto err_zxdh_init;
> +
> +	ret = zxdh_np_init(eth_dev);
> +	if (ret)
> +		goto err_zxdh_init;
> +
> +
> +	zxdh_priv_res_init(hw);
> +	zxdh_sriovinfo_init(hw);
> +	zxdh_msg_cb_reg(hw);
> +	zxdh_configure_intr(eth_dev);
> +	ret = zxdh_tables_init(eth_dev);
> +	if (ret != 0)
> +		goto err_zxdh_init;
> +
> +	uint64_t time = get_cur_time_ms();
> +
> +	PMD_INIT_LOG(ERR, "dev init end time: %lu s total time %" PRIu64, time, time - pre_time);
> +	return 0;
> +
> +err_zxdh_init:
> +	zxdh_intr_release(eth_dev);
> +	zxdh_np_destroy(eth_dev);
> +	zxdh_bar_msg_chan_exit();
> +	zxdh_priv_res_free(hw);
> +	zxdh_free_sh_res();
> +	rte_free(eth_dev->data->mac_addrs);
> +	eth_dev->data->mac_addrs = NULL;
> +	rte_free(eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key);
> +	eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
> +	return ret;
> +}
> +
> +static unsigned int
> +log2above(unsigned int v)
> +{
> +	unsigned int l;
> +	unsigned int r;
> +
> +	for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
> +		r |= (v & 1);
> +	return l + r;
> +}
> +
> +static uint16_t zxdh_queue_desc_pre_setup(uint16_t desc)
> +{
> +	uint32_t nb_desc = desc;
> +
> +	if (desc < ZXDH_MIN_QUEUE_DEPTH) {
> +		PMD_RX_LOG(WARNING,
> +			"nb_desc(%u) increased number of descriptors to the min queue depth (%u)",
> +			desc, ZXDH_MIN_QUEUE_DEPTH);
> +		return ZXDH_MIN_QUEUE_DEPTH;
> +	}
> +
> +	if (desc > ZXDH_MAX_QUEUE_DEPTH) {
> +		PMD_RX_LOG(WARNING,
> +			"nb_desc(%u) can't be greater than max_rxds (%d), turn to max queue depth",
> +			desc, ZXDH_MAX_QUEUE_DEPTH);
> +		return ZXDH_MAX_QUEUE_DEPTH;
> +	}
> +
> +	if (!rte_is_power_of_2(desc)) {
> +		nb_desc = 1 << log2above(desc);
> +		if (nb_desc > ZXDH_MAX_QUEUE_DEPTH)
> +			nb_desc = ZXDH_MAX_QUEUE_DEPTH;
> +
> +		PMD_RX_LOG(WARNING,
> +			"nb_desc(%u) increased number of descriptors to the next power of two (%d)",
> +			desc, nb_desc);
> +	}
> +
> +	return nb_desc;
> +}
> +
> +static int32_t hw_q_depth_handler(const char *key __rte_unused,
> +				const char *value, void *ret_val)
> +{
> +	uint16_t val = 0;
> +	struct zxdh_hw *hw = ret_val;
> +
> +	val = strtoul(value, NULL, 0);
> +	uint16_t q_depth = zxdh_queue_desc_pre_setup(val);
> +
> +	hw->q_depth = q_depth;
> +	return 0;
> +}
> +
> +static int32_t zxdh_dev_devargs_parse(struct rte_devargs *devargs, struct zxdh_hw *hw)
> +{
> +	struct rte_kvargs *kvlist = NULL;
> +	int32_t ret = 0;
> +
> +	if (devargs == NULL)
> +		return 0;
> +
> +	kvlist = rte_kvargs_parse(devargs->args, NULL);
> +	if (kvlist == NULL) {
> +		PMD_INIT_LOG(ERR, "error when parsing param");
> +		return 0;
> +	}
> +
> +	ret = rte_kvargs_process(kvlist, "q_depth", hw_q_depth_handler, hw);
> +	if (ret < 0) {
> +		PMD_INIT_LOG(ERR, "Failed to parse q_depth");
> +		goto exit;
> +	}
> +	if (!hw->q_depth)
> +		hw->q_depth = ZXDH_MIN_QUEUE_DEPTH;
> +
> +exit:
> +	rte_kvargs_free(kvlist);
> +	return ret;
> +}
> +
> +/**
> + * Fun:
> + */
> +int32_t zxdh_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
> +			struct rte_pci_device *pci_dev)
> +{
> +#ifdef RTE_LIBRTE_ZXDH_DEBUG
> +	rte_log_set_level(zxdh_logtype_init, RTE_LOG_DEBUG);
> +	rte_log_set_level(zxdh_logtype_driver, RTE_LOG_DEBUG);
> +	rte_log_set_level(RTE_LOGTYPE_PMD, RTE_LOG_DEBUG);
> +#endif
> +	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct zxdh_hw), zxdh_eth_dev_init);
> +}
> +/**
> + * Fun:
> + */
> +static int32_t zxdh_eth_dev_uninit(struct rte_eth_dev *eth_dev)
> +{
> +	PMD_INIT_FUNC_TRACE();
> +	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
> +		return 0;
> +	zxdh_dev_close(eth_dev);
> +	return 0;
> +}
> +/**
> + * Fun:
> + */
> +int32_t zxdh_eth_pci_remove(struct rte_pci_device *pci_dev)
> +{
> +	int32_t ret = rte_eth_dev_pci_generic_remove(pci_dev, zxdh_eth_dev_uninit);
> +
> +	if (ret == -ENODEV) { /* Port has already been released by close. */
> +		ret = 0;
> +	}
> +	return ret;
> +}
> +static const struct rte_pci_id pci_id_zxdh_map[] = {
> +	{RTE_PCI_DEVICE(PCI_VENDOR_ID_ZTE, ZXDH_PCI_PF_DEVICEID)},
> +	{RTE_PCI_DEVICE(PCI_VENDOR_ID_ZTE, ZXDH_PCI_VF_DEVICEID)},
> +	{.vendor_id = 0, /* sentinel */ },
> +};
> +static struct rte_pci_driver zxdh_pmd = {
> +	.driver = {.name = "net_zxdh", },
> +	.id_table = pci_id_zxdh_map,
> +	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
> +	.probe = zxdh_eth_pci_probe,
> +	.remove = zxdh_eth_pci_remove,
> +};
> +RTE_INIT(rte_zxdh_pmd_init)
> +{
> +	zxdh_log_init();
> +	rte_pci_register(&zxdh_pmd);
>

you can use 'RTE_PMD_REGISTER_PCI' insted, and call
'rte_telemetry_register_cmd()' from the probe functions, so that
telemetry exists only for cases zxdh device exists.

I assume 'rte_telemetry_register_cmd()' doesn't need to be in RTE_INIT
but can you please double check. If it has to be, you can create a
separate RTE_INIT() for telemetry.


> +	rte_telemetry_register_cmd("/zxdh/dumppkt",
> +		handle_pkt_dump,
> +		"Returns None. Parameter: port id, mode(0:all_off;1:rx_on;2:tx_on;3:all_on), dumplen");
>

This is not formalized yet, but for the telemetry command for drives,
what about using similar hierarchy logging has, like /pmd/net/zxdh/* ?

As far as I can see only other driver using telemetry directly is
'cnxk', pleease feel free to sync with Jerin (and cc me) to sync on this
format.

> +	rte_telemetry_register_cmd("/zxdh/dumpque",
> +		handle_queue_dump,
> +		"Returns None. Parameter: port id, queid, dump_descnum, logfile(eg /home/que.log)");
> +}
> +RTE_PMD_EXPORT_NAME(net_zxdh, __COUNTER__);
>

If you use 'RTE_PMD_REGISTER_PCI', can drop above.

> +RTE_PMD_REGISTER_PCI_TABLE(net_zxdh, pci_id_zxdh_map);
> +RTE_PMD_REGISTER_KMOD_DEP(net_zxdh, "* vfio-pci");
> +RTE_LOG_REGISTER(zxdh_logtype_init, pmd.net.zxdh.init, DEBUG);
> +RTE_LOG_REGISTER(zxdh_logtype_driver, pmd.net.zxdh.driver, INFO);
> +RTE_LOG_REGISTER(zxdh_logtype_zxdh_driver, pmd.net.zxdh.zxdh_driver, DEBUG);
> +RTE_LOG_REGISTER(zxdh_logtype_tx, pmd.net.zxdh.tx, NOTICE);
> +RTE_LOG_REGISTER(zxdh_logtype_rx, pmd.net.zxdh.rx, NOTICE);
> +RTE_LOG_REGISTER(zxdh_logtype_msg, pmd.net.zxdh.msg, INFO);
>

Can use 'RTE_LOG_REGISTER_SUFFIX' instead, simpler.

<...>

> +}
> +static void DataHitolo(uint64_t *data)

Please don't use CamelCase in function naming.

<...>

> +struct fd_flow_result {
> +	uint8_t rsv:7;
> +	uint8_t hit_flag:1;
> +	uint8_t rsv0;
> +	uint8_t uplink_flag; /*0:fdid;1:4B fdir;2:8B fdif*/
> +	uint8_t action_idx; /*1:fwd 2:drop*/
> +	rte_le16_t qid;
> +	rte_le16_t vfid;
> +	rte_le32_t uplink_fdid;
> +	uint8_t rsv1[3];
> +	uint8_t fdir_offset;/*����l2 offset*/
>

Please use only readable characters.

I am stopping the review here, can continue review in next version where
driver split into patch series.

<...>
  

Patch

diff --git a/MAINTAINERS b/MAINTAINERS
index c9adff9846..34f9001b93 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1063,6 +1063,12 @@  F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini

+ZTE zxdh
+M: Junlong Wang <wang.junlong1@zte.com.cn>
+M: Lijie Shan <shan.lijie@zte.com.cn>
+F: drivers/net/zxdh/
+F: doc/guides/nics/zxdh.rst
+F: doc/guides/nics/features/zxdh.ini

 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini
new file mode 100644
index 0000000000..fc41426077
--- /dev/null
+++ b/doc/guides/nics/features/zxdh.ini
@@ -0,0 +1,38 @@ 
+;
+; Supported features of the 'zxdh' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Link status event    = Y
+MTU update           = Y
+Scattered Rx         = Y
+TSO                  = Y
+LRO                  = Y
+Promiscuous mode     = Y
+Allmulticast mode    = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+RSS reta update      = Y
+Inner RSS            = Y
+SR-IOV               = Y
+VLAN filter          = Y
+VLAN offload         = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+Stats per queue      = Y
+Flow control         = Y
+FW version           = Y
+Multiprocess aware   = Y
+Linux                = Y
+x86-64               = Y
+ARMv8                = Y
+
diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst
new file mode 100644
index 0000000000..f7cbc5755b
--- /dev/null
+++ b/doc/guides/nics/zxdh.rst
@@ -0,0 +1,61 @@ 
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 ZTE Corporation.
+
+
+ZXDH Poll Mode Driver
+======================
+
+The ZXDH PMD (**librte_net_zxdh**) provides poll mode driver support
+for 25/100 Gbps ZXDH NX Series Ethernet Controller based on
+the ZTE Ethernet Controller E310/E312.
+
+
+Features
+--------
+
+Features of the zxdh PMD are:
+
+- Multi arch support: x86_64, ARMv8.
+- Multiple queues for TX and RX
+- Receiver Side Scaling (RSS)
+- MAC/VLAN filtering
+- Checksum offload
+- TSO offload
+- VLAN/QinQ stripping and inserting
+- Promiscuous mode
+- Port hardware statistics
+- Link state information
+- Link flow control
+- Scattered and gather for TX and RX
+- SR-IOV VF
+- VLAN filter and VLAN offload
+- Allmulticast mode
+- MTU update
+- Jumbo frames
+- Unicast MAC filter
+- Multicast MAC filter
+- Flow API
+- Set Link down or up
+- FW version
+- LRO
+
+Prerequisites
+-------------
+
+This PMD driver need NPSDK library for system initialization and allocation of resources.
+Communication between PMD and kernel modules is mediated by zxdh Kernel modules.
+The NPSDK library and zxdh Kernel modules are not part of DPDK and must be installed
+separately:
+
+- Getting the latest NPSDK library and software supports using
+  ``_.
+
+Driver compilation and testing
+------------------------------
+
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
+
+Limitations or Known issues
+---------------------------
+X86-32, Power8, ARMv7 and BSD are not supported yet.
diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..3778d1b29a 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -61,6 +61,7 @@  drivers = [
         'vhost',
         'virtio',
         'vmxnet3',
+        'zxdh',
 ]
 std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc
 std_deps += ['bus_pci']         # very many PMDs depend on PCI, so make std
diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build
new file mode 100644
index 0000000000..85e6eaa999
--- /dev/null
+++ b/drivers/net/zxdh/meson.build
@@ -0,0 +1,94 @@ 
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2023 ZTE Corporation
+
+sources += files('zxdh_ethdev.c',
+	'zxdh_pci.c',
+	'zxdh_rxtx.c',
+	'zxdh_queue.c',
+	'zxdh_ethdev_ops.c',
+	'zxdh_flow.c',
+	'zxdh_mtr.c',
+	'zxdh_mtr_drv.c',
+	'zxdh_common.c',
+	'zxdh_tables.c',
+	'zxdh_telemetry.c',
+	'zxdh_msg_chan.c',
+	)
+
+fs=import('fs')
+project_dir = meson.source_root()
+lib_npsdk_dir = '/usr/include/npsdk'
+message('lib npsdk dir :  ' +lib_npsdk_dir)
+dpp_include = lib_npsdk_dir + '/dpp/include/'
+
+cflags_options = [
+		'-D DPP_FOR_PCIE',
+		'-D MACRO_CPU64',
+
+]
+foreach option:cflags_options
+		if cc.has_argument(option)
+				cflags += option
+		endif
+endforeach
+cflags += '-fno-strict-aliasing'
+
+if arch_subdir == 'x86'
+	lib_name = 'libdpp_x86_64_lit_64_rel'
+else
+	lib_name = 'libdpp_arm_aarch64_lit_64_rel'
+endif
+message('lib npsdk name :  ' + lib_name)
+
+lib = cc.find_library(lib_name , dirs : ['/usr/lib64' ], required: true)
+
+
+if not lib.found()
+	build = false
+	reason = 'missing dependency, lib_name'
+else
+	ext_deps += lib
+	message(lib_npsdk_dir + '/sdk_comm/sdk_comm/comm/include')
+	includes += include_directories(lib_npsdk_dir + '/sdk_comm/sdk_comm/comm/include')
+	includes += include_directories(dpp_include)
+	includes += include_directories(dpp_include + '/dev/module/se/')
+	includes += include_directories(dpp_include + '/dev/chip/')
+	includes += include_directories(dpp_include + '/api/')
+	includes += include_directories(dpp_include + '/dev/reg/')
+	includes += include_directories(dpp_include + '/dev/module/')
+	includes += include_directories(dpp_include + '/qos/')
+	includes += include_directories(dpp_include + '/agentchannel/')
+
+	includes += include_directories(dpp_include + '/diag/')
+	includes += include_directories(dpp_include + '/dev/module/ppu/')
+	includes += include_directories(dpp_include + '/dev/module/table/se/')
+	includes += include_directories(dpp_include + '/dev/module/nppu/')
+	includes += include_directories(dpp_include + '/dev/module/tm/')
+	includes += include_directories(dpp_include + '/dev/module/dma/')
+	includes += include_directories(dpp_include + '/dev/module/ddos/')
+	includes += include_directories(dpp_include + '/dev/module/oam/')
+	includes += include_directories(dpp_include + '/dev/module/trpg/')
+	includes += include_directories(dpp_include + '/dev/module/dtb/')
+endif
+
+deps += ['kvargs', 'bus_pci', 'timer']
+
+if arch_subdir == 'x86'
+	if not machine_args.contains('-mno-avx512f')
+		if cc.has_argument('-mavx512f') and cc.has_argument('-mavx512vl') and cc.has_argument('-mavx512bw')
+			cflags += ['-DCC_AVX512_SUPPORT']
+			zxdh_avx512_lib = static_library('zxdh_avx512_lib',
+						  dependencies: [static_rte_ethdev,
+						static_rte_kvargs, static_rte_bus_pci],
+						  include_directories: includes,
+						  c_args: [cflags, '-mavx512f', '-mavx512bw', '-mavx512vl'])
+			if (toolchain == 'gcc' and cc.version().version_compare('>=8.3.0'))
+				cflags += '-DVHOST_GCC_UNROLL_PRAGMA'
+			elif (toolchain == 'clang' and cc.version().version_compare('>=3.7.0'))
+				cflags += '-DVHOST_CLANG_UNROLL_PRAGMA'
+			elif (toolchain == 'icc' and cc.version().version_compare('>=16.0.0'))
+				cflags += '-DVHOST_ICC_UNROLL_PRAGMA'
+			endif
+		endif
+	endif
+endif
diff --git a/drivers/net/zxdh/msg_chan_pub.h b/drivers/net/zxdh/msg_chan_pub.h
new file mode 100644
index 0000000000..f2413b2efa
--- /dev/null
+++ b/drivers/net/zxdh/msg_chan_pub.h
@@ -0,0 +1,274 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#ifndef _ZXDH_MSG_CHAN_PUB_H_
+#define _ZXDH_MSG_CHAN_PUB_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <stdint.h>
+
+#include <rte_ethdev.h>
+
+#define PCI_NAME_LENGTH     16
+
+enum DRIVER_TYPE {
+	MSG_CHAN_END_MPF = 0,
+	MSG_CHAN_END_PF,
+	MSG_CHAN_END_VF,
+	MSG_CHAN_END_RISC,
+};
+
+enum BAR_MSG_RTN {
+	BAR_MSG_OK = 0,
+	BAR_MSG_ERR_MSGID,
+	BAR_MSG_ERR_NULL,
+	BAR_MSG_ERR_TYPE, /* Message type exception */
+	BAR_MSG_ERR_MODULE, /* Module ID exception */
+	BAR_MSG_ERR_BODY_NULL, /* Message body exception */
+	BAR_MSG_ERR_LEN, /* Message length exception */
+	BAR_MSG_ERR_TIME_OUT, /* Message sending length too long */
+	BAR_MSG_ERR_NOT_READY, /* Abnormal message sending conditions*/
+	BAR_MEG_ERR_NULL_FUNC, /* Empty receive processing function pointer*/
+	BAR_MSG_ERR_REPEAT_REGISTER, /* Module duplicate registration*/
+	BAR_MSG_ERR_UNGISTER, /* Repeated deregistration*/
+	/**
+	 * The sending interface parameter boundary structure pointer is empty
+	 */
+	BAR_MSG_ERR_NULL_PARA,
+	BAR_MSG_ERR_REPSBUFF_LEN, /* The length of reps_buff is too short*/
+	/**
+	 * Unable to find the corresponding message processing function for this module
+	 */
+	BAR_MSG_ERR_MODULE_NOEXIST,
+	/**
+	 * The virtual address in the parameters passed in by the sending interface is empty
+	 */
+	BAR_MSG_ERR_VIRTADDR_NULL,
+	BAR_MSG_ERR_REPLY, /* sync msg resp_error */
+	BAR_MSG_ERR_MPF_NOT_SCANNED,
+	BAR_MSG_ERR_KERNEL_READY,
+	BAR_MSG_ERR_USR_RET_ERR,
+	BAR_MSG_ERR_ERR_PCIEID,
+	BAR_MSG_ERR_SOCKET, /* netlink sockte err */
+};
+
+enum bar_module_id {
+	BAR_MODULE_DBG = 0, /* 0:  debug */
+	BAR_MODULE_TBL,     /* 1:  resource table */
+	BAR_MODULE_MISX,    /* 2:  config msix */
+	BAR_MODULE_SDA,     /* 3: */
+	BAR_MODULE_RDMA,    /* 4: */
+	BAR_MODULE_DEMO,    /* 5:  channel test */
+	BAR_MODULE_SMMU,    /* 6: */
+	BAR_MODULE_MAC,     /* 7:  mac rx/tx stats */
+	BAR_MODULE_VDPA,    /* 8:  vdpa live migration */
+	BAR_MODULE_VQM,     /* 9:  vqm live migration */
+	BAR_MODULE_NP,      /* 10: vf msg callback np */
+	BAR_MODULE_VPORT,   /* 11: get vport */
+	BAR_MODULE_BDF,     /* 12: get bdf */
+	BAR_MODULE_RISC_READY, /* 13: */
+	BAR_MODULE_REVERSE,    /* 14: byte stream reverse */
+	BAR_MDOULE_NVME,       /* 15: */
+	BAR_MDOULE_NPSDK,      /* 16: */
+	BAR_MODULE_NP_TODO,    /* 17: */
+	MODULE_BAR_MSG_TO_PF,  /* 18: */
+	MODULE_BAR_MSG_TO_VF,  /* 19: */
+
+	MODULE_FLASH = 32,
+	BAR_MODULE_OFFSET_GET = 33,
+	BAR_EVENT_OVS_WITH_VCB = 36, /* ovs<-->vcb */
+
+	BAR_MSG_MODULE_NUM = 100,
+};
+static inline const char *module_id_name(int val)
+{
+	switch (val) {
+	case BAR_MODULE_DBG:        return "BAR_MODULE_DBG";
+	case BAR_MODULE_TBL:        return "BAR_MODULE_TBL";
+	case BAR_MODULE_MISX:       return "BAR_MODULE_MISX";
+	case BAR_MODULE_SDA:        return "BAR_MODULE_SDA";
+	case BAR_MODULE_RDMA:       return "BAR_MODULE_RDMA";
+	case BAR_MODULE_DEMO:       return "BAR_MODULE_DEMO";
+	case BAR_MODULE_SMMU:       return "BAR_MODULE_SMMU";
+	case BAR_MODULE_MAC:        return "BAR_MODULE_MAC";
+	case BAR_MODULE_VDPA:       return "BAR_MODULE_VDPA";
+	case BAR_MODULE_VQM:        return "BAR_MODULE_VQM";
+	case BAR_MODULE_NP:         return "BAR_MODULE_NP";
+	case BAR_MODULE_VPORT:      return "BAR_MODULE_VPORT";
+	case BAR_MODULE_BDF:        return "BAR_MODULE_BDF";
+	case BAR_MODULE_RISC_READY: return "BAR_MODULE_RISC_READY";
+	case BAR_MODULE_REVERSE:    return "BAR_MODULE_REVERSE";
+	case BAR_MDOULE_NVME:       return "BAR_MDOULE_NVME";
+	case BAR_MDOULE_NPSDK:      return "BAR_MDOULE_NPSDK";
+	case BAR_MODULE_NP_TODO:    return "BAR_MODULE_NP_TODO";
+	case MODULE_BAR_MSG_TO_PF:  return "MODULE_BAR_MSG_TO_PF";
+	case MODULE_BAR_MSG_TO_VF:  return "MODULE_BAR_MSG_TO_VF";
+	case MODULE_FLASH:          return "MODULE_FLASH";
+	case BAR_MODULE_OFFSET_GET: return "BAR_MODULE_OFFSET_GET";
+	case BAR_EVENT_OVS_WITH_VCB: return "BAR_EVENT_OVS_WITH_VCB";
+	default: return "NA";
+	}
+}
+
+struct bar_msg_header {
+	uint8_t valid : 1; /* used by __bar_chan_msg_valid_set/get */
+	uint8_t sync  : 1;
+	uint8_t emec  : 1; /* emergency? */
+	uint8_t ack   : 1; /* ack msg? */
+	uint8_t poll  : 1;
+	uint8_t usr   : 1;
+	uint8_t rsv;
+	uint16_t module_id;
+	uint16_t len;
+	uint16_t msg_id;
+	uint16_t src_pcieid;
+	uint16_t dst_pcieid; /* used in PF-->VF */
+}; /* 12B */
+#define BAR_MSG_ADDR_CHAN_INTERVAL  (2 * 1024) /* channel size */
+#define BAR_MSG_PLAYLOAD_OFFSET     (sizeof(struct bar_msg_header))
+#define BAR_MSG_PAYLOAD_MAX_LEN     (BAR_MSG_ADDR_CHAN_INTERVAL - sizeof(struct bar_msg_header))
+
+struct zxdh_pci_bar_msg {
+	uint64_t virt_addr; /* bar addr */
+	void    *payload_addr;
+	uint16_t payload_len;
+	uint16_t emec;
+	uint16_t src; /* refer to BAR_DRIVER_TYPE */
+	uint16_t dst; /* refer to BAR_DRIVER_TYPE */
+	uint16_t module_id;
+	uint16_t src_pcieid;
+	uint16_t dst_pcieid;
+	uint16_t usr;
+}; /* 32B */
+
+struct zxdh_msg_recviver_mem {
+	void    *recv_buffer; /* first 4B is head, followed by payload */
+	uint64_t buffer_len;
+}; /* 16B */
+
+enum pciebar_layout_type {
+	URI_VQM      = 0,
+	URI_SPINLOCK = 1,
+	URI_FWCAP    = 2,
+	URI_FWSHR    = 3,
+	URI_DRS_SEC  = 4,
+	URI_RSV      = 5,
+	URI_CTRLCH   = 6,
+	URI_1588     = 7,
+	URI_QBV      = 8,
+	URI_MACPCS   = 9,
+	URI_RDMA     = 10,
+/* DEBUG PF */
+	URI_MNP      = 11,
+	URI_MSPM     = 12,
+	URI_MVQM     = 13,
+	URI_MDPI     = 14,
+	URI_NP       = 15,
+/* END DEBUG PF */
+	URI_MAX,
+};
+
+struct bar_offset_params {
+	uint64_t virt_addr;  /* Bar space control space virtual address */
+	uint16_t pcie_id;
+	uint16_t type;  /* Module types corresponding to PCIBAR planning */
+};
+struct bar_offset_res {
+	uint32_t bar_offset;
+	uint32_t bar_length;
+};
+
+/**
+ * Get the offset value of the specified module
+ * @bar_offset_params:  input parameter
+ * @bar_offset_res: Module offset and length
+ */
+int zxdh_get_bar_offset(struct bar_offset_params *paras, struct bar_offset_res *res);
+
+typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len, void *reps_buffer,
+					uint16_t *reps_len, void *dev);
+
+/**
+ * Send synchronization messages through PCIE BAR space
+ * @in: Message sending information
+ * @result: Message result feedback
+ * @return: 0 successful, other failures
+ */
+int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, struct zxdh_msg_recviver_mem *result);
+
+/**
+ * Sending asynchronous messages through PCIE BAR space
+ * @in: Message sending information
+ * @result: Message result feedback
+ * @return: 0 successful, other failures
+ */
+int zxdh_bar_chan_async_msg_send(struct zxdh_pci_bar_msg *in, struct zxdh_msg_recviver_mem *result);
+
+/**
+ * PCIE BAR spatial message method, registering message reception callback
+ * @module_id: Registration module ID
+ * @callback: Pointer to the receive processing function implemented by the module
+ * @return: 0 successful, other failures
+ * Usually called during driver initialization
+ */
+int zxdh_bar_chan_msg_recv_register(uint8_t module_id, zxdh_bar_chan_msg_recv_callback callback);
+
+/**
+ * PCIE BAR spatial message method, unregistered message receiving callback
+ * @module_id: Kernel PCIE device address
+ * @return: 0 successful, other failures
+ * Called during driver uninstallation
+ */
+int zxdh_bar_chan_msg_recv_unregister(uint8_t module_id);
+
+/**
+ * Provide a message receiving interface for device driver interrupt handling functions
+ * @src:  Driver type for sending interrupts
+ * @dst:  Device driver's own driver type
+ * @virt_addr: The communication bar address of the device
+ * @return: 0 successful, other failures
+ */
+int zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev);
+
+/**
+ * Initialize spilock and clear the hardware lock address it belongs to
+ * @pcie_id: PCIE_id of PF device
+ * @bar_base_addr: Bar0 initial base address
+ */
+int bar_chan_pf_init_spinlock(uint16_t pcie_id, uint64_t bar_base_addr);
+
+struct msix_para {
+	uint16_t pcie_id;
+	uint16_t vector_risc;
+	uint16_t vector_pfvf;
+	uint16_t vector_mpf;
+	uint64_t virt_addr;
+	uint16_t driver_type; /* refer to DRIVER_TYPE */
+};
+int zxdh_bar_chan_enable(struct msix_para *_msix_para, uint16_t *vport);
+int zxdh_msg_chan_init(void);
+int zxdh_bar_msg_chan_exit(void);
+
+struct zxdh_res_para {
+	uint64_t virt_addr;
+	uint16_t pcie_id;
+	uint16_t src_type; /* refer to BAR_DRIVER_TYPE */
+};
+int zxdh_get_res_panel_id(struct zxdh_res_para *in, uint8_t *panel_id);
+int zxdh_get_res_hash_id(struct zxdh_res_para *in, uint8_t *hash_id);
+
+int zxdh_mpf_bar0_phyaddr_get(uint64_t *pPhyaddr);
+int zxdh_mpf_bar0_vaddr_get(uint64_t *pVaddr);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _ZXDH_MSG_CHAN_PUB_H_ */
diff --git a/drivers/net/zxdh/version.map b/drivers/net/zxdh/version.map
new file mode 100644
index 0000000000..4a76d1d52d
--- /dev/null
+++ b/drivers/net/zxdh/version.map
@@ -0,0 +1,3 @@ 
+DPDK_21 {
+	local: *;
+};
diff --git a/drivers/net/zxdh/zxdh_common.c b/drivers/net/zxdh/zxdh_common.c
new file mode 100644
index 0000000000..ca62393a08
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_common.c
@@ -0,0 +1,512 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <rte_memcpy.h>
+#include <rte_malloc.h>
+#include <rte_common.h>
+#include <rte_memory.h>
+
+#include "zxdh_logs.h"
+#include "zxdh_common.h"
+#include "zxdh_pci.h"
+#include "zxdh_msg_chan.h"
+#include "zxdh_queue.h"
+#include "zxdh_ethdev_ops.h"
+
+#define ZXDH_COMMON_FIELD_PCIEID   0
+#define ZXDH_COMMON_FIELD_DATACH   3
+#define ZXDH_COMMON_FIELD_VPORT    4
+#define ZXDH_COMMON_FIELD_PHYPORT  6
+#define ZXDH_COMMON_FIELD_PANELID  5
+#define ZXDH_COMMON_FIELD_HASHIDX  7
+
+#define ZXDH_MAC_STATS_OFFSET   (0x1000 + 408)
+#define ZXDH_MAC_BYTES_OFFSET   (0xb000)
+
+uint64_t get_cur_time_s(uint64_t tsc)
+{
+	return (tsc/rte_get_tsc_hz());
+}
+
+/** Nano seconds per second */
+#define NS_PER_SEC 1E9
+
+uint64_t get_time_ns(uint64_t tsc)
+{
+	return (tsc*NS_PER_SEC/rte_get_tsc_hz());
+}
+/**
+ * Fun:
+ */
+void zxdh_hex_dump(uint8_t *buff, uint16_t buff_size)
+{
+	uint16_t i;
+
+	for (i = 0; i < buff_size; i++) {
+		if ((i % 16) == 0)
+			printf("\n");
+		printf("%02x ", *(buff + i));
+	}
+	printf("\n");
+}
+/**
+ * Fun:
+ */
+uint32_t zxdh_read_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]);
+	uint32_t val      = *((volatile uint32_t *)(baseaddr + reg));
+	return val;
+}
+/**
+ * Fun:
+ */
+void zxdh_write_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]);
+	*((volatile uint32_t *)(baseaddr + reg)) = val;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_send_command_toriscv(struct rte_eth_dev *dev,
+	struct zxdh_pci_bar_msg      *in,
+	enum bar_module_id           module_id,
+	struct zxdh_msg_recviver_mem *msg_rsp)
+{
+	PMD_INIT_FUNC_TRACE();
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	in->virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET);
+	in->src = hw->is_pf ? MSG_CHAN_END_PF : MSG_CHAN_END_VF;
+	in->dst = MSG_CHAN_END_RISC;
+	in->module_id = module_id;
+	in->src_pcieid = hw->pcie_id;
+	if (zxdh_bar_chan_sync_msg_send(in, msg_rsp) != BAR_MSG_OK) {
+		PMD_DRV_LOG(ERR, "Failed to send sync messages or receive response");
+		PMD_DRV_LOG(ERR, "msg_data:");
+		HEX_DUMP(in->payload_addr, in->payload_len);
+		return -1;
+	}
+	return 0;
+}
+/**
+ * Fun;
+ */
+#define ZXDH_MSG_RSP_SIZE_MAX  512
+static int32_t zxdh_send_command(struct zxdh_hw *hw,
+	struct zxdh_pci_bar_msg      *desc,
+	enum bar_module_id            module_id,
+	struct zxdh_msg_recviver_mem *msg_rsp)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	desc->virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET);
+	desc->src = hw->is_pf ? MSG_CHAN_END_PF:MSG_CHAN_END_VF;
+	desc->dst = MSG_CHAN_END_RISC;
+	desc->module_id = module_id;
+	desc->src_pcieid = hw->pcie_id;
+
+	msg_rsp->buffer_len  = ZXDH_MSG_RSP_SIZE_MAX;
+	msg_rsp->recv_buffer = rte_zmalloc(NULL, msg_rsp->buffer_len, 0);
+	if (unlikely(msg_rsp->recv_buffer == NULL)) {
+		PMD_DRV_LOG(ERR, "Failed to allocate messages response");
+		return -ENOMEM;
+	}
+
+	if (zxdh_bar_chan_sync_msg_send(desc, msg_rsp) != BAR_MSG_OK) {
+		PMD_DRV_LOG(ERR, "Failed to send sync messages or receive response");
+		PMD_DRV_LOG(ERR, "msg_data:");
+		HEX_DUMP(desc->payload_addr, desc->payload_len);
+		rte_free(msg_rsp->recv_buffer);
+		return -1;
+	}
+
+	return 0;
+}
+/**
+ * Fun:
+ */
+struct zxdh_common_rsp_hdr {
+	uint8_t  rsp_status;
+	uint16_t rsp_len;
+	uint8_t  reserved;
+	uint8_t  payload_status;
+	uint8_t  rsv;
+	uint16_t payload_len;
+} __rte_packed; /* 8B */
+static int32_t zxdh_common_rsp_check(struct zxdh_msg_recviver_mem *msg_rsp,
+		void *buff, uint16_t len)
+{
+	struct zxdh_common_rsp_hdr *rsp_hdr = (struct zxdh_common_rsp_hdr *)msg_rsp->recv_buffer;
+
+	if ((rsp_hdr->payload_status != 0xaa) || (rsp_hdr->payload_len != len)) {
+		PMD_DRV_LOG(ERR, "Common response is invalid, status:0x%x rsp_len:%d",
+					rsp_hdr->payload_status, rsp_hdr->payload_len);
+		return -1;
+	}
+	if (len != 0)
+		memcpy(buff, rsp_hdr + 1, len);
+
+	return 0;
+}
+/**
+ * Fun:
+ */
+struct zxdh_common_msg {
+	uint8_t  type;    /* 0:read table 1:write table */
+	uint8_t  field;
+	uint16_t pcie_id;
+	uint16_t slen;    /* Data length for write table */
+	uint16_t reserved;
+} __rte_packed; /* 8B */
+static int32_t zxdh_fill_common_msg(struct zxdh_hw *hw,
+	struct zxdh_pci_bar_msg *desc,
+	uint8_t        type,
+	uint8_t        field,
+	void          *buff,
+	uint16_t       buff_size)
+{
+	uint64_t msg_len = sizeof(struct zxdh_common_msg) + buff_size;
+
+	desc->payload_addr = rte_zmalloc(NULL, msg_len, 0);
+	if (unlikely(desc->payload_addr == NULL)) {
+		PMD_DRV_LOG(ERR, "Failed to allocate msg_data");
+		return -ENOMEM;
+	}
+	memset(desc->payload_addr, 0, msg_len);
+	desc->payload_len = msg_len;
+	struct zxdh_common_msg *msg_data = (struct zxdh_common_msg *)desc->payload_addr;
+
+	msg_data->type = type;
+	msg_data->field = field;
+	msg_data->pcie_id = hw->pcie_id;
+	msg_data->slen = buff_size;
+	if (buff_size != 0)
+		memcpy(msg_data + 1, buff, buff_size);
+
+	return 0;
+}
+/**
+ * Fun:
+ */
+#define ZXDH_COMMON_TABLE_READ   0
+#define ZXDH_COMMON_TABLE_WRITE  1
+static int32_t zxdh_common_table_read(struct zxdh_hw *hw, uint8_t field,
+			void *buff, uint16_t buff_size)
+{
+	PMD_INIT_FUNC_TRACE();
+	if (!hw->msg_chan_init) {
+		PMD_DRV_LOG(ERR, "Bar messages channel not initialized");
+		return -1;
+	}
+	struct zxdh_pci_bar_msg desc;
+	int32_t ret = zxdh_fill_common_msg(hw, &desc, ZXDH_COMMON_TABLE_READ, field, NULL, 0);
+
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to fill common msg");
+		return ret;
+	}
+	struct zxdh_msg_recviver_mem msg_rsp;
+
+	ret = zxdh_send_command(hw, &desc, BAR_MODULE_TBL, &msg_rsp);
+	if (ret != 0)
+		goto free_msg_data;
+
+	ret = zxdh_common_rsp_check(&msg_rsp, buff, buff_size);
+	if (ret != 0)
+		goto free_rsp_data;
+
+free_rsp_data:
+	rte_free(msg_rsp.recv_buffer);
+free_msg_data:
+	rte_free(desc.payload_addr);
+	return ret;
+}
+/**
+ * Fun:
+ */
+static int32_t zxdh_common_table_write(struct zxdh_hw *hw, uint8_t field,
+			void *buff, uint16_t buff_size)
+{
+	PMD_INIT_FUNC_TRACE();
+	if (!hw->msg_chan_init) {
+		PMD_DRV_LOG(ERR, "Bar messages channel not initialized");
+		return -1;
+	}
+	if ((buff_size != 0) && (buff == NULL)) {
+		PMD_DRV_LOG(ERR, "Buff is invalid");
+		return -1;
+	}
+	struct zxdh_pci_bar_msg desc;
+	int32_t ret = zxdh_fill_common_msg(hw, &desc, ZXDH_COMMON_TABLE_WRITE,
+					field, buff, buff_size);
+
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to fill common msg");
+		return ret;
+	}
+	struct zxdh_msg_recviver_mem msg_rsp;
+
+	ret = zxdh_send_command(hw, &desc, BAR_MODULE_TBL, &msg_rsp);
+	if (ret != 0)
+		goto free_msg_data;
+
+	ret = zxdh_common_rsp_check(&msg_rsp, NULL, 0);
+	if (ret != 0)
+		goto free_rsp_data;
+
+free_rsp_data:
+	rte_free(msg_rsp.recv_buffer);
+free_msg_data:
+	rte_free(desc.payload_addr);
+	return ret;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_datach_set(struct rte_eth_dev *dev)
+{
+	/* payload: queue_num(2byte) + pch1(2byte) + ** + pchn */
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint16_t buff_size = (hw->queue_num + 1) * 2;
+	void *buff = rte_zmalloc(NULL, buff_size, 0);
+
+	if (unlikely(buff == NULL)) {
+		PMD_DRV_LOG(ERR, "Failed to allocate buff");
+		return -ENOMEM;
+	}
+	memset(buff, 0, buff_size);
+	uint16_t *pdata = (uint16_t *)buff;
+	*pdata++ = hw->queue_num;
+	uint16_t i;
+
+	for (i = 0; i < hw->queue_num; i++)
+		*(pdata + i) = hw->channel_context[i].ph_chno;
+
+	int32_t ret = zxdh_common_table_write(hw, ZXDH_COMMON_FIELD_DATACH,
+						(void *)buff, buff_size);
+
+	if (ret != 0)
+		PMD_DRV_LOG(ERR, "Failed to setup data channel of common table");
+
+	rte_free(buff);
+	return ret;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_hw_stats_get(struct rte_eth_dev *dev, enum zxdh_agent_opc opcode,
+			struct zxdh_hw_stats *hw_stats)
+{
+	enum bar_module_id module_id;
+
+	switch (opcode) {
+	case ZXDH_VQM_DEV_STATS_GET:
+	case ZXDH_VQM_QUEUE_STATS_GET:
+	case ZXDH_VQM_QUEUE_STATS_RESET:
+		module_id = BAR_MODULE_VQM;
+		break;
+	case ZXDH_MAC_STATS_GET:
+	case ZXDH_MAC_STATS_RESET:
+		module_id = BAR_MODULE_MAC;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "invalid opcode %u", opcode);
+		return -1;
+	}
+	/* */
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_msg_reply_info reply_info = {0};
+	struct zxdh_msg_recviver_mem result = {
+		.recv_buffer = &reply_info,
+		.buffer_len = sizeof(struct zxdh_msg_reply_info),
+	};
+	/* */
+	struct zxdh_msg_info msg_info = {0};
+
+	ctrl_msg_build(hw, opcode, &msg_info);
+	struct zxdh_pci_bar_msg in = {0};
+
+	in.payload_addr = &msg_info;
+	in.payload_len = sizeof(msg_info);
+	if (zxdh_send_command_toriscv(dev, &in, module_id, &result) != 0) {
+		PMD_DRV_LOG(ERR, "Failed to get hw stats");
+		return -1;
+	}
+	struct zxdh_msg_reply_body *reply_body = &reply_info.reply_body;
+
+	rte_memcpy(hw_stats, &reply_body->riscv_rsp.port_hw_stats, sizeof(struct zxdh_hw_stats));
+	return 0;
+}
+
+int32_t zxdh_hw_mac_get(struct rte_eth_dev *dev, struct zxdh_hw_mac_stats *mac_stats,
+			struct zxdh_hw_mac_bytes *mac_bytes)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint64_t virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_MAC_OFFSET);
+	uint64_t stats_addr =  0;
+	uint64_t bytes_addr =  0;
+
+	if (hw->speed <= 25000) {
+		stats_addr = virt_addr + ZXDH_MAC_STATS_OFFSET + 352 * (hw->phyport % 4);
+		bytes_addr = virt_addr + ZXDH_MAC_BYTES_OFFSET + 32 * (hw->phyport % 4);
+	} else {
+		stats_addr = virt_addr + ZXDH_MAC_STATS_OFFSET + 352 * 4;
+		bytes_addr = virt_addr + ZXDH_MAC_BYTES_OFFSET + 32 * 4;
+	}
+
+	rte_memcpy(mac_stats, (void *)stats_addr, sizeof(struct zxdh_hw_mac_stats));
+	rte_memcpy(mac_bytes, (void *)bytes_addr, sizeof(struct zxdh_hw_mac_bytes));
+
+	return 0;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_hw_stats_reset(struct rte_eth_dev *dev, enum zxdh_agent_opc opcode)
+{
+	enum bar_module_id module_id;
+
+	switch (opcode) {
+	case ZXDH_VQM_DEV_STATS_RESET:
+		module_id = BAR_MODULE_VQM;
+		break;
+	case ZXDH_MAC_STATS_RESET:
+		module_id = BAR_MODULE_MAC;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "invalid opcode %u", opcode);
+		return -1;
+	}
+	/* */
+	struct zxdh_msg_reply_info reply_info = {0};
+	struct zxdh_msg_recviver_mem result = {
+		.recv_buffer = &reply_info,
+		.buffer_len = sizeof(struct zxdh_msg_reply_info),
+	};
+	/* */
+	struct zxdh_msg_info msg_info = {0};
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	ctrl_msg_build(hw, opcode, &msg_info);
+	struct zxdh_pci_bar_msg in = {0};
+
+	in.payload_addr = &msg_info;
+	in.payload_len = sizeof(msg_info);
+	/* */
+	if (zxdh_send_command_toriscv(dev, &in, module_id, &result) != 0) {
+		PMD_DRV_LOG(ERR, "Failed to reset hw stats");
+		return -1;
+	}
+	return 0;
+}
+/**
+ * Fun:
+ */
+static inline void zxdh_fill_res_para(struct rte_eth_dev *dev, struct zxdh_res_para *param)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	param->pcie_id   = hw->pcie_id;
+	param->virt_addr = hw->bar_addr[0] + ZXDH_CTRLCH_OFFSET;
+	param->src_type  = BAR_MODULE_TBL;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_pannelid_get(struct rte_eth_dev *dev, uint8_t *pannelid)
+{
+	struct zxdh_res_para param;
+
+	zxdh_fill_res_para(dev, &param);
+	int32_t ret = zxdh_get_res_panel_id(&param, pannelid);
+	return ret;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_phyport_get(struct rte_eth_dev *dev, uint8_t *phyport)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	int32_t ret = zxdh_common_table_read(hw, ZXDH_COMMON_FIELD_PHYPORT,
+					(void *)phyport, sizeof(*phyport));
+	return ret;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_hashidx_get(struct rte_eth_dev *dev, uint8_t *hash_idx)
+{
+	struct zxdh_res_para param;
+
+	zxdh_fill_res_para(dev, &param);
+	int32_t ret = zxdh_get_res_hash_id(&param, hash_idx);
+
+	return ret;
+}
+#define DUPLEX_HALF   RTE_BIT32(0)
+#define DUPLEX_FULL   RTE_BIT32(1)
+
+int32_t zxdh_link_info_get(struct rte_eth_dev *dev, struct rte_eth_link *link)
+{
+	PMD_INIT_FUNC_TRACE();
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint16_t status = 0;
+
+	if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS))
+		zxdh_vtpci_read_dev_config(hw, offsetof(struct zxdh_net_config, status),
+					&status, sizeof(status));
+
+	link->link_status = status;
+
+	if (status == RTE_ETH_LINK_DOWN) {
+		PMD_DRV_LOG(INFO, "Port is down!\n");
+		link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
+		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	} else {
+		struct zxdh_msg_info msg;
+		struct zxdh_pci_bar_msg in = {0};
+		struct zxdh_msg_reply_info rep = {0};
+
+		ctrl_msg_build(hw, ZXDH_MAC_LINK_GET, &msg);
+
+		in.payload_addr = &msg;
+		in.payload_len = sizeof(msg);
+
+		struct zxdh_msg_recviver_mem rsp_data = {
+			.recv_buffer = (void *)&rep,
+			.buffer_len = sizeof(rep),
+		};
+		if (zxdh_send_command_toriscv(dev, &in, BAR_MODULE_MAC, &rsp_data) != BAR_MSG_OK) {
+			PMD_DRV_LOG(ERR, "Failed to get link info");
+			return -1;
+		}
+		struct zxdh_msg_reply_body *ack_msg =
+				&(((struct zxdh_msg_reply_info *)rsp_data.recv_buffer)->reply_body);
+
+		link->link_speed = ack_msg->link_msg.speed;
+		hw->speed_mode = ack_msg->link_msg.speed_modes;
+		if ((ack_msg->link_msg.duplex & DUPLEX_FULL) == DUPLEX_FULL)
+			link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+		else
+			link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+
+		PMD_DRV_LOG(INFO, "Port is up!\n");
+	}
+	hw->speed = link->link_speed;
+	PMD_DRV_LOG(INFO, "sw : admain_status %d ", hw->admin_status);
+	PMD_DRV_LOG(INFO, "hw : link_status: %d,  link_speed: %d, link_duplex %d\n",
+				link->link_status, link->link_speed, link->link_duplex);
+	return 0;
+}
diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h
new file mode 100644
index 0000000000..2010d01e63
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_common.h
@@ -0,0 +1,154 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#ifndef _ZXDH_COMMON_H_
+#define _ZXDH_COMMON_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_common.h>
+#include "msg_chan_pub.h"
+#include "zxdh_logs.h"
+
+#define VF_IDX(pcie_id)  (pcie_id & 0xff)
+#define PF_PCIE_ID(pcie_id)  ((pcie_id & 0xff00) | 1<<11)
+#define VF_PCIE_ID(pcie_id, vf_idx)  ((pcie_id & 0xff00) | (1<<11) | (vf_idx&0xff))
+
+#define VFUNC_ACTIVE_BIT  11
+#define VFUNC_NUM_MASK    0xff
+#define GET_OWNER_PF_VPORT(vport)  ((vport&~(VFUNC_NUM_MASK))&(~(1<<VFUNC_ACTIVE_BIT)))
+
+/* riscv msg opcodes */
+enum zxdh_agent_opc {
+	ZXDH_MAC_STATS_GET = 10,
+	ZXDH_MAC_STATS_RESET,
+	ZXDH_MAC_PHYPORT_INIT,
+	ZXDH_MAC_AUTONEG_SET,
+	ZXDH_MAC_LINK_GET,
+	ZXDH_MAC_LED_BLINK,
+	ZXDH_MAC_FC_SET  = 18,
+	ZXDH_MAC_FC_GET = 19,
+	ZXDH_MAC_MODULE_EEPROM_READ = 20,
+	ZXDH_VQM_DEV_STATS_GET = 21,
+	ZXDH_VQM_DEV_STATS_RESET,
+	ZXDH_FLASH_FIR_VERSION_GET = 23,
+	ZXDH_VQM_QUEUE_STATS_GET,
+	ZXDH_DEV_STATUS_NOTIFY = 24,
+	ZXDH_VQM_QUEUE_STATS_RESET,
+} __rte_packed;
+
+struct zxdh_hw_stats {
+	uint64_t rx_total;
+	uint64_t tx_total;
+	uint64_t rx_bytes;
+	uint64_t tx_bytes;
+	uint64_t rx_error;
+	uint64_t tx_error;
+	uint64_t rx_drop;
+} __rte_packed;
+
+struct zxdh_hw_mac_stats {
+	uint64_t rx_total;
+	uint64_t rx_pause;
+	uint64_t rx_unicast;
+	uint64_t rx_multicast;
+	uint64_t rx_broadcast;
+	uint64_t rx_vlan;
+	uint64_t rx_size_64;
+	uint64_t rx_size_65_127;
+	uint64_t rx_size_128_255;
+	uint64_t rx_size_256_511;
+	uint64_t rx_size_512_1023;
+	uint64_t rx_size_1024_1518;
+	uint64_t rx_size_1519_mru;
+	uint64_t rx_undersize;
+	uint64_t rx_oversize;
+	uint64_t rx_fragment;
+	uint64_t rx_jabber;
+	uint64_t rx_control;
+	uint64_t rx_eee;
+
+	uint64_t tx_total;
+	uint64_t tx_pause;
+	uint64_t tx_unicast;
+	uint64_t tx_multicast;
+	uint64_t tx_broadcast;
+	uint64_t tx_vlan;
+	uint64_t tx_size_64;
+	uint64_t tx_size_65_127;
+	uint64_t tx_size_128_255;
+	uint64_t tx_size_256_511;
+	uint64_t tx_size_512_1023;
+	uint64_t tx_size_1024_1518;
+	uint64_t tx_size_1519_mtu;
+	uint64_t tx_undersize;
+	uint64_t tx_oversize;
+	uint64_t tx_fragment;
+	uint64_t tx_jabber;
+	uint64_t tx_control;
+	uint64_t tx_eee;
+
+	uint64_t rx_error;
+	uint64_t rx_fcs_error;
+	uint64_t rx_drop;
+
+	uint64_t tx_error;
+	uint64_t tx_fcs_error;
+	uint64_t tx_drop;
+
+} __rte_packed;
+
+struct zxdh_hw_mac_bytes {
+	uint64_t rx_total_bytes;
+	uint64_t rx_good_bytes;
+	uint64_t tx_total_bytes;
+	uint64_t tx_good_bytes;
+} __rte_packed;
+
+void zxdh_hex_dump(uint8_t *buff, uint16_t buff_size);
+
+uint32_t zxdh_read_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg);
+void zxdh_write_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val);
+int32_t zxdh_hw_stats_get(struct rte_eth_dev *dev, enum zxdh_agent_opc opcode,
+			struct zxdh_hw_stats *hw_stats);
+int32_t zxdh_hw_mac_get(struct rte_eth_dev *dev, struct zxdh_hw_mac_stats *mac_stats,
+			struct zxdh_hw_mac_bytes *mac_bytes);
+int32_t zxdh_hw_stats_reset(struct rte_eth_dev *dev, enum zxdh_agent_opc opcode);
+int32_t zxdh_link_info_get(struct rte_eth_dev *dev, struct rte_eth_link *link);
+int32_t zxdh_datach_set(struct rte_eth_dev *dev);
+int32_t zxdh_vport_get(struct rte_eth_dev *dev, uint16_t *vport);
+int32_t zxdh_pannelid_get(struct rte_eth_dev *dev, uint8_t *pannelid);
+int32_t zxdh_phyport_get(struct rte_eth_dev *dev, uint8_t *phyport);
+int32_t zxdh_hashidx_get(struct rte_eth_dev *dev, uint8_t *hash_idx);
+int32_t zxdh_send_command_toriscv(struct rte_eth_dev *dev,
+			struct zxdh_pci_bar_msg *in,
+			enum bar_module_id module_id,
+			struct zxdh_msg_recviver_mem *msg_rsp);
+
+#define HEX_DUMP(buff, buff_size)  zxdh_hex_dump((uint8_t *)buff, (uint16_t)buff_size)
+
+#define ZXDH_DIRECT_FLAG_BIT       (1UL << 15)
+
+#define ZXDH_FLAG_YES 1
+#define ZXDH_FLAG_NO 0
+
+#define ZXDH_VLAN_TAG_LEN 4
+
+#define ZXDH_ETH_OVERHEAD  (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ZXDH_VLAN_TAG_LEN * 2)
+#define ZXDH_MTU_TO_PKTLEN(mtu) ((mtu) + ZXDH_ETH_OVERHEAD)
+
+#define VLAN_TAG_LEN   4/* 802.3ac tag (not DMA'd) */
+
+uint64_t get_cur_time_s(uint64_t tsc);
+uint64_t get_time_ns(uint64_t tsc);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ZXDH_COMMON_H_ */
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
new file mode 100644
index 0000000000..222ecbd3c1
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -0,0 +1,3431 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#include <rte_memcpy.h>
+#include <rte_malloc.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <ethdev_pci.h>
+#include <rte_kvargs.h>
+#include <rte_hexdump.h>
+
+#include "zxdh_ethdev.h"
+#include "zxdh_pci.h"
+#include "zxdh_logs.h"
+#include "zxdh_queue.h"
+#include "zxdh_rxtx.h"
+#include "zxdh_msg_chan.h"
+#include "zxdh_common.h"
+#include "zxdh_ethdev_ops.h"
+#include "zxdh_tables.h"
+#include "dpp_dtb_table_api.h"
+#include "dpp_dev.h"
+#include "dpp_init.h"
+#include "zxdh_ethdev.h"
+#include "zxdh_table_drv.h"
+#include "dpp_log_diag.h"
+#include "dpp_dbgstat.h"
+#include "dpp_trpg_api.h"
+
+#include "zxdh_telemetry.h"
+
+struct rte_zxdh_xstats_name_off {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	unsigned int offset;
+};
+static const struct rte_zxdh_xstats_name_off rte_zxdh_np_stat_strings[] = {
+	{"np_rx_broadcast",    offsetof(struct zxdh_hw_np_stats, np_rx_broadcast)},
+	{"np_tx_broadcast",    offsetof(struct zxdh_hw_np_stats, np_tx_broadcast)},
+	{"np_rx_mtu_drop_pkts",   offsetof(struct zxdh_hw_np_stats, np_rx_mtu_drop_pkts)},
+	{"np_tx_mtu_drop_pkts",   offsetof(struct zxdh_hw_np_stats, np_tx_mtu_drop_pkts)},
+	{"np_tx_mtu_drop_bytes",   offsetof(struct zxdh_hw_np_stats, np_tx_mtu_drop_bytes)},
+	{"np_rx_mtu_drop_bytes",   offsetof(struct zxdh_hw_np_stats, np_rx_mtu_drop_bytes)},
+	{"np_rx_plcr_drop_pkts",  offsetof(struct zxdh_hw_np_stats, np_rx_mtr_drop_pkts)},
+	{"np_rx_plcr_drop_bytes",  offsetof(struct zxdh_hw_np_stats, np_rx_mtr_drop_bytes)},
+	{"np_tx_plcr_drop_pkts",  offsetof(struct zxdh_hw_np_stats,  np_tx_mtr_drop_pkts)},
+	{"np_tx_plcr_drop_bytes",  offsetof(struct zxdh_hw_np_stats, np_tx_mtr_drop_bytes)},
+};
+/* [rt]x_qX_ is prepended to the name string here */
+static const struct rte_zxdh_xstats_name_off rte_zxdh_rxq_stat_strings[] = {
+	{"good_packets",           offsetof(struct virtnet_rx, stats.packets)},
+	{"good_bytes",             offsetof(struct virtnet_rx, stats.bytes)},
+	{"errors",                 offsetof(struct virtnet_rx, stats.errors)},
+	{"multicast_packets",      offsetof(struct virtnet_rx, stats.multicast)},
+	{"broadcast_packets",      offsetof(struct virtnet_rx, stats.broadcast)},
+	{"truncated_err",          offsetof(struct virtnet_rx, stats.truncated_err)},
+	{"undersize_packets",      offsetof(struct virtnet_rx, stats.size_bins[0])},
+	{"size_64_packets",        offsetof(struct virtnet_rx, stats.size_bins[1])},
+	{"size_65_127_packets",    offsetof(struct virtnet_rx, stats.size_bins[2])},
+	{"size_128_255_packets",   offsetof(struct virtnet_rx, stats.size_bins[3])},
+	{"size_256_511_packets",   offsetof(struct virtnet_rx, stats.size_bins[4])},
+	{"size_512_1023_packets",  offsetof(struct virtnet_rx, stats.size_bins[5])},
+	{"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
+	{"size_1519_max_packets",  offsetof(struct virtnet_rx, stats.size_bins[7])},
+};
+
+
+/* [rt]x_qX_ is prepended to the name string here */
+static const struct rte_zxdh_xstats_name_off rte_zxdh_txq_stat_strings[] = {
+	{"good_packets",           offsetof(struct virtnet_tx, stats.packets)},
+	{"good_bytes",             offsetof(struct virtnet_tx, stats.bytes)},
+	{"errors",                 offsetof(struct virtnet_tx, stats.errors)},
+	{"multicast_packets",      offsetof(struct virtnet_tx, stats.multicast)},
+	{"broadcast_packets",      offsetof(struct virtnet_tx, stats.broadcast)},
+	{"truncated_err",          offsetof(struct virtnet_tx, stats.truncated_err)},
+	{"undersize_packets",      offsetof(struct virtnet_tx, stats.size_bins[0])},
+	{"size_64_packets",        offsetof(struct virtnet_tx, stats.size_bins[1])},
+	{"size_65_127_packets",    offsetof(struct virtnet_tx, stats.size_bins[2])},
+	{"size_128_255_packets",   offsetof(struct virtnet_tx, stats.size_bins[3])},
+	{"size_256_511_packets",   offsetof(struct virtnet_tx, stats.size_bins[4])},
+	{"size_512_1023_packets",  offsetof(struct virtnet_tx, stats.size_bins[5])},
+	{"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
+	{"size_1519_max_packets",  offsetof(struct virtnet_tx, stats.size_bins[7])},
+};
+static const struct rte_zxdh_xstats_name_off rte_zxdh_mac_stat_strings[] = {
+	{"mac_rx_total",    offsetof(struct zxdh_hw_mac_stats, rx_total)},
+	{"mac_rx_pause",    offsetof(struct zxdh_hw_mac_stats, rx_pause)},
+	{"mac_rx_unicast",   offsetof(struct zxdh_hw_mac_stats, rx_unicast)},
+	{"mac_rx_multicast",   offsetof(struct zxdh_hw_mac_stats, rx_multicast)},
+	{"mac_rx_broadcast",   offsetof(struct zxdh_hw_mac_stats, rx_broadcast)},
+	{"mac_rx_vlan",   offsetof(struct zxdh_hw_mac_stats, rx_vlan)},
+	{"mac_rx_size_64",  offsetof(struct zxdh_hw_mac_stats, rx_size_64)},
+	{"mac_rx_size_65_127",  offsetof(struct zxdh_hw_mac_stats, rx_size_65_127)},
+	{"mac_rx_size_128_255",  offsetof(struct zxdh_hw_mac_stats,  rx_size_128_255)},
+	{"mac_rx_size_256_511",  offsetof(struct zxdh_hw_mac_stats, rx_size_256_511)},
+	{"mac_rx_size_512_1023",    offsetof(struct zxdh_hw_mac_stats, rx_size_512_1023)},
+	{"mac_rx_size_1024_1518",    offsetof(struct zxdh_hw_mac_stats, rx_size_1024_1518)},
+	{"mac_rx_size_1519_mru",   offsetof(struct zxdh_hw_mac_stats, rx_size_1519_mru)},
+	{"mac_rx_undersize",   offsetof(struct zxdh_hw_mac_stats, rx_undersize)},
+	{"mac_rx_oversize",   offsetof(struct zxdh_hw_mac_stats, rx_oversize)},
+	{"mac_rx_fragment",   offsetof(struct zxdh_hw_mac_stats, rx_fragment)},
+	{"mac_rx_jabber",  offsetof(struct zxdh_hw_mac_stats, rx_jabber)},
+	{"mac_rx_control",  offsetof(struct zxdh_hw_mac_stats, rx_control)},
+	{"mac_rx_eee",  offsetof(struct zxdh_hw_mac_stats,  rx_eee)},
+	{"mac_rx_error",  offsetof(struct zxdh_hw_mac_stats, rx_error)},
+	{"mac_rx_fcs_error",    offsetof(struct zxdh_hw_mac_stats, rx_fcs_error)},
+	{"mac_rx_drop",    offsetof(struct zxdh_hw_mac_stats, rx_drop)},
+
+	{"mac_tx_total",   offsetof(struct zxdh_hw_mac_stats, tx_total)},
+	{"mac_tx_pause",   offsetof(struct zxdh_hw_mac_stats, tx_pause)},
+	{"mac_tx_unicast",  offsetof(struct zxdh_hw_mac_stats, tx_unicast)},
+	{"mac_tx_multicast",  offsetof(struct zxdh_hw_mac_stats, tx_multicast)},
+	{"mac_tx_broadcast",  offsetof(struct zxdh_hw_mac_stats,  tx_broadcast)},
+	{"mac_tx_vlan",  offsetof(struct zxdh_hw_mac_stats, tx_vlan)},
+	{"mac_tx_size_64",   offsetof(struct zxdh_hw_mac_stats, tx_size_64)},
+	{"mac_tx_size_65_127",   offsetof(struct zxdh_hw_mac_stats, tx_size_65_127)},
+	{"mac_tx_size_128_255",  offsetof(struct zxdh_hw_mac_stats, tx_size_128_255)},
+	{"mac_tx_size_256_511",  offsetof(struct zxdh_hw_mac_stats, tx_size_256_511)},
+	{"mac_tx_size_512_1023",  offsetof(struct zxdh_hw_mac_stats,  tx_size_512_1023)},
+	{"mac_tx_size_1024_1518",  offsetof(struct zxdh_hw_mac_stats, tx_size_1024_1518)},
+	{"mac_tx_size_1519_mtu",   offsetof(struct zxdh_hw_mac_stats, tx_size_1519_mtu)},
+	{"mac_tx_undersize",   offsetof(struct zxdh_hw_mac_stats, tx_undersize)},
+	{"mac_tx_oversize",  offsetof(struct zxdh_hw_mac_stats, tx_oversize)},
+	{"mac_tx_fragment",  offsetof(struct zxdh_hw_mac_stats, tx_fragment)},
+	{"mac_tx_jabber",  offsetof(struct zxdh_hw_mac_stats,  tx_jabber)},
+	{"mac_tx_control",  offsetof(struct zxdh_hw_mac_stats, tx_control)},
+	{"mac_tx_eee",   offsetof(struct zxdh_hw_mac_stats, tx_eee)},
+	{"mac_tx_error",   offsetof(struct zxdh_hw_mac_stats, tx_error)},
+	{"mac_tx_fcs_error",  offsetof(struct zxdh_hw_mac_stats, tx_fcs_error)},
+	{"mac_tx_drop",  offsetof(struct zxdh_hw_mac_stats, tx_drop)},
+};
+
+static const struct rte_zxdh_xstats_name_off rte_zxdh_mac_bytes_strings[] = {
+	{"mac_rx_total_bytes",   offsetof(struct zxdh_hw_mac_bytes, rx_total_bytes)},
+	{"mac_rx_good_bytes",   offsetof(struct zxdh_hw_mac_bytes, rx_good_bytes)},
+	{"mac_tx_total_bytes",  offsetof(struct zxdh_hw_mac_bytes,  tx_total_bytes)},
+	{"mac_tx_good_bytes",  offsetof(struct zxdh_hw_mac_bytes, tx_good_bytes)},
+};
+
+static const struct rte_zxdh_xstats_name_off rte_zxdh_vqm_stat_strings[] = {
+	{"vqm_rx_vport_packets",    offsetof(struct zxdh_hw_stats, rx_total)},
+	{"vqm_tx_vport_packets",    offsetof(struct zxdh_hw_stats, tx_total)},
+	{"vqm_rx_vport_bytes",   offsetof(struct zxdh_hw_stats, rx_bytes)},
+	{"vqm_tx_vport_bytes",   offsetof(struct zxdh_hw_stats, tx_bytes)},
+	{"vqm_rx_vport_dropped",   offsetof(struct zxdh_hw_stats, rx_drop)},
+};
+
+#define EAL_INTR_EPOLL_WAIT_FOREVER			(-1)
+#define VLAN_TAG_LEN						4 /* 802.3ac tag (not DMA'd) */
+
+#define LOW3_BIT_MASK						0x7
+#define LOW5_BIT_MASK						0x1f
+
+
+#define ZXDH_VF_LOCK_REG					0x90
+#define ZXDH_VF_LOCK_ENABLE_MASK			0x1
+#define ZXDH_COI_TABLE_BASE_ADDR			0x5000
+#define ZXDH_ACQUIRE_CHANNEL_NUM_MAX		10
+
+#define ZXDH_MIN_RX_BUFSIZE					64
+
+#define ZXDH_NB_RXQ_XSTATS (sizeof(rte_zxdh_rxq_stat_strings) / \
+							sizeof(rte_zxdh_rxq_stat_strings[0]))
+#define ZXDH_NB_TXQ_XSTATS (sizeof(rte_zxdh_txq_stat_strings) / \
+							sizeof(rte_zxdh_txq_stat_strings[0]))
+
+#define ZXDH_NP_XSTATS (sizeof(rte_zxdh_np_stat_strings) / \
+							sizeof(rte_zxdh_np_stat_strings[0]))
+
+#define ZXDH_MAC_XSTATS (sizeof(rte_zxdh_mac_stat_strings) / \
+							sizeof(rte_zxdh_mac_stat_strings[0]))
+
+#define ZXDH_MAC_BYTES (sizeof(rte_zxdh_mac_bytes_strings) / \
+							sizeof(rte_zxdh_mac_bytes_strings[0]))
+
+#define ZXDH_VQM_XSTATS (sizeof(rte_zxdh_vqm_stat_strings) / \
+							sizeof(rte_zxdh_vqm_stat_strings[0]))
+
+static void zxdh_dev_free_mbufs(struct rte_eth_dev *dev);
+static void zxdh_notify_peers(struct rte_eth_dev *dev);
+static int32_t zxdh_eth_dev_uninit(struct rte_eth_dev *eth_dev);
+static void zxdh_priv_res_free(struct zxdh_hw *priv);
+static void zxdh_queues_unbind_intr(struct rte_eth_dev *dev);
+static int zxdh_tables_init(struct rte_eth_dev *dev);
+static int32_t zxdh_free_queues(struct rte_eth_dev *dev);
+static int32_t zxdh_acquire_lock(struct rte_eth_dev *dev);
+static int32_t zxdh_release_lock(struct rte_eth_dev *dev);
+static int32_t zxdh_acquire_channel(struct rte_eth_dev *dev, uint16_t lch);
+static int32_t zxdh_release_channel(struct rte_eth_dev *dev);
+
+static int vf_recv_bar_msg(void *pay_load, uint16_t len, void *reps_buffer,
+			uint16_t *reps_len, void *eth_dev __rte_unused);
+static int pf_recv_bar_msg(void *pay_load, uint16_t len, void *reps_buffer,
+			uint16_t *reps_len, void *eth_dev __rte_unused);
+static void zxdh_np_destroy(struct rte_eth_dev *dev);
+static void zxdh_intr_cb_reg(struct rte_eth_dev *dev);
+static void zxdh_intr_cb_unreg(struct rte_eth_dev *dev);
+static int32_t zxdh_dev_devargs_parse(struct rte_devargs *devargs, struct zxdh_hw *hw);
+
+int32_t zxdh_dev_xstats_get_names(struct rte_eth_dev *dev,
+			struct rte_eth_xstat_name *xstats_names,
+			__rte_unused unsigned int limit)
+{
+	uint32_t i     = 0;
+	uint32_t count = 0;
+	uint32_t t     = 0;
+	struct zxdh_hw *hw = dev->data->dev_private;
+	unsigned int nstats = dev->data->nb_tx_queues * ZXDH_NB_TXQ_XSTATS +
+					dev->data->nb_rx_queues * ZXDH_NB_RXQ_XSTATS +
+					ZXDH_NP_XSTATS + ZXDH_VQM_XSTATS;
+
+	if (hw->is_pf)
+		nstats += ZXDH_MAC_XSTATS + ZXDH_MAC_BYTES;
+
+	if (xstats_names != NULL) {
+		/* Note: limit checked in rte_eth_xstats_names() */
+		for (i = 0; i < ZXDH_NP_XSTATS; i++) {
+			snprintf(xstats_names[count].name, sizeof(xstats_names[count].name),
+			"%s", rte_zxdh_np_stat_strings[i].name);
+			count++;
+		}
+		if (hw->is_pf) {
+			for (i = 0; i < ZXDH_MAC_XSTATS; i++) {
+				snprintf(xstats_names[count].name, sizeof(xstats_names[count].name),
+				"%s", rte_zxdh_mac_stat_strings[i].name);
+				count++;
+			}
+			for (i = 0; i < ZXDH_MAC_BYTES; i++) {
+				snprintf(xstats_names[count].name, sizeof(xstats_names[count].name),
+				"%s", rte_zxdh_mac_bytes_strings[i].name);
+				count++;
+			}
+		}
+		for (i = 0; i < ZXDH_VQM_XSTATS; i++) {
+			snprintf(xstats_names[count].name, sizeof(xstats_names[count].name),
+			"%s", rte_zxdh_vqm_stat_strings[i].name);
+			count++;
+		}
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			struct virtnet_rx *rxvq = dev->data->rx_queues[i];
+
+			if (rxvq == NULL)
+				continue;
+			for (t = 0; t < ZXDH_NB_RXQ_XSTATS; t++) {
+				snprintf(xstats_names[count].name, sizeof(xstats_names[count].name),
+				"rx_q%u_%s", i, rte_zxdh_rxq_stat_strings[t].name);
+				count++;
+			}
+		}
+
+		for (i = 0; i < dev->data->nb_tx_queues; i++) {
+			struct virtnet_tx *txvq = dev->data->tx_queues[i];
+
+			if (txvq == NULL)
+				continue;
+			for (t = 0; t < ZXDH_NB_TXQ_XSTATS; t++) {
+				snprintf(xstats_names[count].name, sizeof(xstats_names[count].name),
+				"tx_q%u_%s", i, rte_zxdh_txq_stat_strings[t].name);
+				count++;
+			}
+		}
+		PMD_DRV_LOG(INFO, "stats count  = %u", count);
+		return count;
+	}
+	return nstats;
+}
+int32_t zxdh_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, uint32_t n)
+{
+	uint32_t i	   = 0;
+	uint32_t count = 0;
+	uint32_t t = 0;
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_hw_np_stats np_stats = {0};
+	struct zxdh_hw_mac_stats mac_stats = {0};
+	struct zxdh_hw_mac_bytes mac_bytes = {0};
+	struct zxdh_hw_stats  vqm_stats = {0};
+	uint32_t nstats = dev->data->nb_tx_queues * ZXDH_NB_TXQ_XSTATS +
+			dev->data->nb_rx_queues * ZXDH_NB_RXQ_XSTATS +
+			ZXDH_NP_XSTATS + ZXDH_VQM_XSTATS;
+
+	if (hw->is_pf) {
+		nstats += ZXDH_MAC_XSTATS + ZXDH_MAC_BYTES;
+		zxdh_hw_mac_get(dev, &mac_stats, &mac_bytes);
+	}
+	if (n < nstats)
+		return nstats;
+	zxdh_hw_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);
+	zxdh_hw_np_stats(dev, &np_stats);
+	for (i = 0; i < ZXDH_NP_XSTATS; i++) {
+		xstats[count].value = *(uint64_t *)(((char *)&np_stats) +
+				rte_zxdh_np_stat_strings[i].offset);
+		xstats[count].id = count;
+		count++;
+	}
+	if (hw->is_pf) {
+		for (i = 0; i < ZXDH_MAC_XSTATS; i++) {
+			xstats[count].value = *(uint64_t *)(((char *)&mac_stats) +
+					rte_zxdh_mac_stat_strings[i].offset);
+			xstats[count].id = count;
+			count++;
+		}
+		for (i = 0; i < ZXDH_MAC_BYTES; i++) {
+			xstats[count].value = *(uint64_t *)(((char *)&mac_bytes) +
+					rte_zxdh_mac_bytes_strings[i].offset);
+			xstats[count].id = count;
+			count++;
+		}
+	}
+	for (i = 0; i < ZXDH_VQM_XSTATS; i++) {
+		xstats[count].value = *(uint64_t *)(((char *)&vqm_stats) +
+				rte_zxdh_vqm_stat_strings[i].offset);
+		xstats[count].id = count;
+		count++;
+	}
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		struct virtnet_rx *rxvq = dev->data->rx_queues[i];
+
+		if (rxvq == NULL)
+			continue;
+		for (t = 0; t < ZXDH_NB_RXQ_XSTATS; t++) {
+			xstats[count].value = *(uint64_t *)(((char *)rxvq) +
+					rte_zxdh_rxq_stat_strings[t].offset);
+			xstats[count].id = count;
+			count++;
+		}
+	}
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		struct virtnet_tx *txvq = dev->data->tx_queues[i];
+
+		if (txvq == NULL)
+			continue;
+
+		for (t = 0; t < ZXDH_NB_TXQ_XSTATS; t++) {
+			xstats[count].value = *(uint64_t *)(((char *)txvq) +
+					rte_zxdh_txq_stat_strings[t].offset);
+			xstats[count].id = count;
+			count++;
+		}
+	}
+	PMD_DRV_LOG(INFO, "stats count  = %u", count);
+	return count;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_hw_stats  vqm_stats = {0};
+	struct zxdh_hw_np_stats np_stats = {0};
+	struct zxdh_hw_mac_stats mac_stats = {0};
+	struct zxdh_hw_mac_bytes mac_bytes = {0};
+	uint32_t i = 0;
+
+	zxdh_hw_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);
+	if (hw->is_pf)
+		zxdh_hw_mac_get(dev, &mac_stats, &mac_bytes);
+
+	zxdh_hw_np_stats(dev, &np_stats);
+
+	stats->ipackets = vqm_stats.rx_total;
+	stats->opackets = vqm_stats.tx_total;
+	stats->ibytes = vqm_stats.rx_bytes;
+	stats->obytes = vqm_stats.tx_bytes;
+	stats->imissed = vqm_stats.rx_drop + mac_stats.rx_drop;
+	stats->ierrors = vqm_stats.rx_error + mac_stats.rx_error + np_stats.np_rx_mtu_drop_pkts;
+	stats->oerrors = vqm_stats.tx_error + mac_stats.tx_error + np_stats.np_tx_mtu_drop_pkts;
+
+	if (hw->i_mtr_en || hw->e_mtr_en)
+		stats->imissed += np_stats.np_rx_mtr_drop_pkts;
+
+	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+	for (i = 0; (i < dev->data->nb_rx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) {
+		struct virtnet_rx *rxvq = dev->data->rx_queues[i];
+
+		if (rxvq == NULL)
+			continue;
+		stats->q_ipackets[i] = *(uint64_t *)(((char *)rxvq) +
+				rte_zxdh_rxq_stat_strings[0].offset);
+		stats->q_ibytes[i] = *(uint64_t *)(((char *)rxvq) +
+				rte_zxdh_rxq_stat_strings[1].offset);
+		stats->q_errors[i] = *(uint64_t *)(((char *)rxvq) +
+				rte_zxdh_rxq_stat_strings[2].offset);
+		stats->q_errors[i] += *(uint64_t *)(((char *)rxvq) +
+				rte_zxdh_rxq_stat_strings[5].offset);
+	}
+
+	for (i = 0; (i < dev->data->nb_tx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) {
+		struct virtnet_tx *txvq = dev->data->tx_queues[i];
+
+		if (txvq == NULL)
+			continue;
+		stats->q_opackets[i] = *(uint64_t *)(((char *)txvq) +
+				rte_zxdh_txq_stat_strings[0].offset);
+		stats->q_obytes[i] = *(uint64_t *)(((char *)txvq) +
+				rte_zxdh_txq_stat_strings[1].offset);
+		stats->q_errors[i] += *(uint64_t *)(((char *)txvq) +
+				rte_zxdh_txq_stat_strings[2].offset);
+		stats->q_errors[i] += *(uint64_t *)(((char *)txvq) +
+				rte_zxdh_txq_stat_strings[5].offset);
+	}
+	return 0;
+}
+
+/**
+ * Fun:
+ */
+int32_t zxdh_dev_stats_reset(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	zxdh_hw_stats_reset(dev, ZXDH_VQM_DEV_STATS_RESET);
+	if (hw->is_pf)
+		zxdh_hw_stats_reset(dev, ZXDH_MAC_STATS_RESET);
+
+	return 0;
+}
+
+
+static void zxdh_init_vring(struct virtqueue *vq)
+{
+	int32_t  size	  = vq->vq_nentries;
+	uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+	PMD_INIT_FUNC_TRACE();
+
+	memset(ring_mem, 0, vq->vq_ring_size);
+
+	vq->vq_used_cons_idx = 0;
+	vq->vq_desc_head_idx = 0;
+	vq->vq_avail_idx	 = 0;
+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+	vq->vq_free_cnt = vq->vq_nentries;
+	memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
+	vring_init_packed(&vq->vq_packed.ring, ring_mem, ZXDH_PCI_VRING_ALIGN, size);
+	vring_desc_init_packed(vq, size);
+	/*
+	 * Disable device(host) interrupting guest
+	 */
+	virtqueue_disable_intr(vq);
+}
+/**
+ * Fun:
+ */
+static inline int32_t get_queue_type(uint16_t vtpci_queue_idx)
+{
+	if (vtpci_queue_idx % 2 == 0)
+		return VTNET_RQ;
+	else
+		return VTNET_TQ;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)
+{
+	char vq_name[VIRTQUEUE_MAX_NAME_SZ] = {0};
+	char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ] = {0};
+	const struct rte_memzone *mz = NULL;
+	const struct rte_memzone *hdr_mz = NULL;
+	uint32_t size = 0;
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct virtnet_rx *rxvq = NULL;
+	struct virtnet_tx *txvq = NULL;
+	struct virtqueue *vq = NULL;
+	size_t sz_hdr_mz = 0;
+	void *sw_ring = NULL;
+	int32_t queue_type = get_queue_type(vtpci_logic_qidx);
+	int32_t numa_node = dev->device->numa_node;
+	uint16_t vtpci_phy_qidx = 0;
+	uint32_t vq_size = 0;
+	int32_t ret = 0;
+
+	if (hw->channel_context[vtpci_logic_qidx].valid == 0) {
+		PMD_INIT_LOG(ERR, "lch %d is invalid", vtpci_logic_qidx);
+		return -EINVAL;
+	}
+	vtpci_phy_qidx = hw->channel_context[vtpci_logic_qidx].ph_chno;
+
+	PMD_INIT_LOG(INFO, "vtpci_logic_qidx :%d setting up physical queue: %u on NUMA node %d",
+			vtpci_logic_qidx, vtpci_phy_qidx, numa_node);
+
+	vq_size = hw->q_depth;
+
+	if (VTPCI_OPS(hw)->set_queue_num != NULL)
+		VTPCI_OPS(hw)->set_queue_num(hw, vtpci_phy_qidx, vq_size);
+
+	snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", dev->data->port_id, vtpci_phy_qidx);
+
+	size = RTE_ALIGN_CEIL(sizeof(*vq) + vq_size * sizeof(struct vq_desc_extra),
+				RTE_CACHE_LINE_SIZE);
+	if (queue_type == VTNET_TQ) {
+		/*
+		 * For each xmit packet, allocate a zxdh_net_hdr
+		 * and indirect ring elements
+		 */
+		sz_hdr_mz = vq_size * sizeof(struct zxdh_tx_region);
+	}
+
+	vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, numa_node);
+	if (vq == NULL) {
+		PMD_INIT_LOG(ERR, "can not allocate vq");
+		return -ENOMEM;
+	}
+	hw->vqs[vtpci_logic_qidx] = vq;
+
+	vq->hw = hw;
+	vq->vq_queue_index = vtpci_phy_qidx;
+	vq->vq_nentries = vq_size;
+
+	vq->vq_packed.used_wrap_counter = 1;
+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+	vq->vq_packed.event_flags_shadow = 0;
+	if (queue_type == VTNET_RQ)
+		vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
+
+	/*
+	 * Reserve a memzone for vring elements
+	 */
+	size = vring_size(hw, vq_size, ZXDH_PCI_VRING_ALIGN);
+	vq->vq_ring_size = RTE_ALIGN_CEIL(size, ZXDH_PCI_VRING_ALIGN);
+	PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size);
+
+	mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
+				numa_node, RTE_MEMZONE_IOVA_CONTIG,
+				ZXDH_PCI_VRING_ALIGN);
+	if (mz == NULL) {
+		if (rte_errno == EEXIST)
+			mz = rte_memzone_lookup(vq_name);
+		if (mz == NULL) {
+			ret = -ENOMEM;
+			goto fail_q_alloc;
+		}
+	}
+
+	memset(mz->addr, 0, mz->len);
+
+	vq->vq_ring_mem = mz->iova;
+	vq->vq_ring_virt_mem = mz->addr;
+	PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem:	   0x%" PRIx64, (uint64_t)mz->iova);
+	PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64, (uint64_t)(uintptr_t)mz->addr);
+
+	zxdh_init_vring(vq);
+
+	if (sz_hdr_mz) {
+		snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
+					dev->data->port_id, vtpci_phy_qidx);
+		hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
+					numa_node, RTE_MEMZONE_IOVA_CONTIG,
+					RTE_CACHE_LINE_SIZE);
+		if (hdr_mz == NULL) {
+			if (rte_errno == EEXIST)
+				hdr_mz = rte_memzone_lookup(vq_hdr_name);
+			if (hdr_mz == NULL) {
+				ret = -ENOMEM;
+				goto fail_q_alloc;
+			}
+		}
+	}
+
+	if (queue_type == VTNET_RQ) {
+		size_t sz_sw = (ZXDH_MBUF_BURST_SZ + vq_size) * sizeof(vq->sw_ring[0]);
+
+		sw_ring = rte_zmalloc_socket("sw_ring", sz_sw, RTE_CACHE_LINE_SIZE, numa_node);
+		if (!sw_ring) {
+			PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
+			ret = -ENOMEM;
+			goto fail_q_alloc;
+		}
+
+		vq->sw_ring = sw_ring;
+		rxvq = &vq->rxq;
+		rxvq->vq = vq;
+		rxvq->port_id = dev->data->port_id;
+		rxvq->mz = mz;
+	} else {             /* queue_type == VTNET_TQ */
+		txvq = &vq->txq;
+		txvq->vq = vq;
+		txvq->port_id = dev->data->port_id;
+		txvq->mz = mz;
+		txvq->virtio_net_hdr_mz = hdr_mz;
+		txvq->virtio_net_hdr_mem = hdr_mz->iova;
+	}
+
+	vq->offset = offsetof(struct rte_mbuf, buf_iova);
+	if (queue_type == VTNET_TQ) {
+		struct zxdh_tx_region *txr = hdr_mz->addr;
+		uint32_t i;
+
+		memset(txr, 0, vq_size * sizeof(*txr));
+		for (i = 0; i < vq_size; i++) {
+			/* first indirect descriptor is always the tx header */
+			struct vring_packed_desc *start_dp = txr[i].tx_packed_indir;
+
+			vring_desc_init_indirect_packed(start_dp, RTE_DIM(txr[i].tx_packed_indir));
+			start_dp->addr = txvq->virtio_net_hdr_mem + i * sizeof(*txr) +
+					offsetof(struct zxdh_tx_region, tx_hdr);
+			/* length will be updated to actual pi hdr size when xmit pkt */
+			start_dp->len = 0;
+		}
+	}
+	if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
+		PMD_INIT_LOG(ERR, "setup_queue failed");
+		return -EINVAL;
+	}
+	return 0;
+fail_q_alloc:
+	rte_free(sw_ring);
+	rte_memzone_free(hdr_mz);
+	rte_memzone_free(mz);
+	rte_free(vq);
+	return ret;
+}
+
+int32_t zxdh_free_queues(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint16_t nr_vq = hw->queue_num;
+	struct virtqueue *vq = NULL;
+	int32_t queue_type = 0;
+	uint16_t i = 0;
+
+	if (hw->vqs == NULL)
+		return 0;
+
+	/* Clear COI table */
+	if (zxdh_release_channel(dev) < 0) {
+		PMD_INIT_LOG(ERR, "Failed to clear coi table");
+		return -1;
+	}
+
+	for (i = 0; i < nr_vq; i++) {
+		vq = hw->vqs[i];
+		if (vq == NULL)
+			continue;
+
+		VTPCI_OPS(hw)->del_queue(hw, vq);
+		queue_type = get_queue_type(i);
+		if (queue_type == VTNET_RQ) {
+			rte_free(vq->sw_ring);
+			rte_memzone_free(vq->rxq.mz);
+		} else if (queue_type == VTNET_TQ) {
+			rte_memzone_free(vq->txq.mz);
+			rte_memzone_free(vq->txq.virtio_net_hdr_mz);
+		}
+
+		rte_free(vq);
+		hw->vqs[i] = NULL;
+		PMD_INIT_LOG(DEBUG, "Release to queue %d success!", i);
+	}
+
+	rte_free(hw->vqs);
+	hw->vqs = NULL;
+
+	return 0;
+}
+/**
+ * Fun:
+ */
+static int32_t zxdh_alloc_queues(struct rte_eth_dev *dev, uint16_t nr_vq)
+{
+	uint16_t lch;
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
+	if (!hw->vqs) {
+		PMD_INIT_LOG(ERR, "Failed to allocate vqs");
+		return -ENOMEM;
+	}
+	for (lch = 0; lch < nr_vq; lch++) {
+		if (zxdh_acquire_channel(dev, lch) < 0) {
+			PMD_INIT_LOG(ERR, "Failed to acquire the channels");
+			zxdh_free_queues(dev);
+			return -1;
+		}
+		if (zxdh_init_queue(dev, lch) < 0) {
+			PMD_INIT_LOG(ERR, "Failed to alloc virtio queue");
+			zxdh_free_queues(dev);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int32_t zxdh_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct zxdh_hw	*hw   = dev->data->dev_private;
+	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
+	struct virtqueue  *vq	= rxvq->vq;
+
+	virtqueue_enable_intr(vq);
+	zxdh_mb(hw->weak_barriers);
+	return 0;
+}
+
+int32_t zxdh_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
+	struct virtqueue  *vq	= rxvq->vq;
+
+	virtqueue_disable_intr(vq);
+	return 0;
+}
+
+
+static int32_t zxdh_intr_unmask(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (rte_intr_ack(dev->intr_handle) < 0)
+		return -1;
+
+	hw->use_msix = zxdh_vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
+
+	return 0;
+}
+
+static int32_t zxdh_intr_enable(struct rte_eth_dev *dev)
+{
+	int ret = 0;
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (!hw->intr_enabled) {
+		zxdh_intr_cb_reg(dev);
+		ret = rte_intr_enable(dev->intr_handle);
+		if (unlikely(ret))
+			PMD_INIT_LOG(ERR, "Failed to enable %s intr", dev->data->name);
+
+		hw->intr_enabled = 1;
+	}
+	return ret;
+}
+/**
+ * Fun:
+ */
+static int32_t zxdh_intr_disable(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (!hw->intr_enabled)
+		return 0;
+
+	zxdh_intr_cb_unreg(dev);
+	if (rte_intr_disable(dev->intr_handle) < 0)
+		return -1;
+
+	hw->intr_enabled = 0;
+	return 0;
+}
+/**
+ * Fun:
+ */
+static int32_t zxdh_dev_link_update(struct rte_eth_dev *dev, __rte_unused int32_t wait_to_complete)
+{
+	struct rte_eth_link link;
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int32_t ret = 0;
+
+	memset(&link, 0, sizeof(link));
+	link.link_duplex = hw->duplex;
+	link.link_speed  = hw->speed;
+	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
+
+	if (!hw->started) {
+		PMD_INIT_LOG(INFO, "port not start");
+		link.link_status = RTE_ETH_LINK_DOWN;
+		link.link_speed  = RTE_ETH_SPEED_NUM_UNKNOWN;
+	}
+	PMD_DRV_LOG(INFO, "Get link status from hw");
+	ret = zxdh_link_info_get(dev, &link);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, " Failed to get link status from hw\n");
+		return ret;
+	}
+	link.link_status &= hw->admin_status;
+	if (link.link_status == RTE_ETH_LINK_DOWN)
+		link.link_speed  = RTE_ETH_SPEED_NUM_UNKNOWN;
+
+	PMD_DRV_LOG(INFO, "link.link_status %u link.link_speed %u link.link_duplex %u ",
+			link.link_status, link.link_speed, link.link_duplex);
+	ret = zxdh_dev_config_port_status(dev, link.link_status);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "set port attr.is_up = %u failed.", link.link_status);
+		return ret;
+	}
+	return rte_eth_linkstatus_set(dev, &link);
+}
+/*
+ * Process  dev config changed interrupt. Call the callback
+ * if link state changed, generate gratuitous RARP packet if
+ * the status indicates an ANNOUNCE.
+ */
+#define ZXDH_NET_S_LINK_UP   1 /* Link is up */
+#define ZXDH_NET_S_ANNOUNCE  2 /* Announcement is needed */
+
+
+#define ZXDH_PF_STATE_VF_AUTO 0
+#define ZXDH_PF_STATE_VF_ENABLE 1
+#define ZXDH_PF_STATE_VF_DSIABLE 2
+static void zxdh_devconf_intr_handler(void *param)
+{
+	struct rte_eth_dev *dev = param;
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint16_t status = 0;
+	/* Read interrupt status which clears interrupt */
+	uint8_t isr = zxdh_vtpci_isr(hw);
+
+	if (zxdh_intr_unmask(dev) < 0)
+		PMD_DRV_LOG(ERR, "interrupt enable failed");
+	if (isr & ZXDH_PCI_ISR_CONFIG) {
+		if (zxdh_dev_link_update(dev, 0) == 0)
+			rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+
+		if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS)) {
+			zxdh_vtpci_read_dev_config(hw, offsetof(struct zxdh_net_config, status),
+					&status, sizeof(status));
+			if (status & ZXDH_NET_S_ANNOUNCE)
+				zxdh_notify_peers(dev);
+		}
+	}
+}
+
+/* Interrupt handler triggered by NIC for handling specific interrupt. */
+static void zxdh_fromriscv_intr_handler(void *param)
+{
+	struct rte_eth_dev *dev = param;
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint64_t virt_addr = 0;
+
+	virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET);
+	if (hw->is_pf) {
+		PMD_INIT_LOG(INFO, "zxdh_risc2pf_intr_handler  PF ");
+		zxdh_bar_irq_recv(MSG_CHAN_END_RISC, MSG_CHAN_END_PF, virt_addr, dev);
+	} else {
+		PMD_INIT_LOG(INFO, "zxdh_riscvf_intr_handler  VF ");
+		zxdh_bar_irq_recv(MSG_CHAN_END_RISC, MSG_CHAN_END_VF, virt_addr, dev);
+
+	}
+}
+
+/* Interrupt handler triggered by NIC for handling specific interrupt. */
+static void zxdh_frompfvf_intr_handler(void *param)
+{
+	struct rte_eth_dev *dev = param;
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint64_t virt_addr = 0;
+
+	virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_MSG_CHAN_PFVFSHARE_OFFSET);
+	if (hw->is_pf) {
+		PMD_INIT_LOG(INFO, "zxdh_pf2vf_intr_handler  PF ");
+		zxdh_bar_irq_recv(MSG_CHAN_END_VF, MSG_CHAN_END_PF, virt_addr, dev);
+	} else {
+		PMD_INIT_LOG(INFO, "zxdh_pf2vf_intr_handler  VF ");
+		zxdh_bar_irq_recv(MSG_CHAN_END_PF, MSG_CHAN_END_VF, virt_addr, dev);
+
+	}
+}
+
+static int32_t zxdh_intr_release(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+		VTPCI_OPS(hw)->set_config_irq(hw, ZXDH_MSI_NO_VECTOR);
+
+	zxdh_queues_unbind_intr(dev);
+	zxdh_intr_disable(dev);
+
+	rte_intr_efd_disable(dev->intr_handle);
+	rte_intr_vec_list_free(dev->intr_handle);
+	rte_free(hw->risc_intr);
+	hw->risc_intr = NULL;
+	rte_free(hw->dtb_intr);
+	hw->dtb_intr = NULL;
+	return 0;
+}
+
+static uint64_t get_cur_time_ms(void)
+{
+	return (rte_rdtsc() / rte_get_tsc_hz());
+}
+
+static int16_t zxdh_promisc_unint(struct zxdh_hw *hw)
+{
+	int16_t ret = 0, vf_group_id = 0;
+	struct zxdh_brocast_t brocast_table = {0};
+	struct zxdh_unitcast_t uc_table = {0};
+	struct zxdh_multicast_t mc_table = {0};
+
+	for (; vf_group_id < 4; vf_group_id++) {
+		DPP_DTB_ERAM_ENTRY_INFO_T eram_brocast_entry = {
+			((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id,
+			(ZXIC_UINT32 *)&brocast_table
+		};
+		DPP_DTB_USER_ENTRY_T eram_brocast = {
+			.sdt_no = ZXDH_SDT_BROCAST_ATT_TABLE,
+			.p_entry_data = (void *)&eram_brocast_entry
+		};
+
+		ret = dpp_dtb_table_entry_delete(DEVICE_NO, g_dtb_data.queueid, 1, &eram_brocast);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Write eram-promisc failed, code:%d", ret);
+			return ret;
+		}
+
+		DPP_DTB_ERAM_ENTRY_INFO_T eram_uc_entry = {
+			((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id,
+			(ZXIC_UINT32 *)&uc_table
+		};
+		DPP_DTB_USER_ENTRY_T entry_unicast = {
+			.sdt_no = ZXDH_SDT_UNICAST_ATT_TABLE,
+			.p_entry_data = (void *)&eram_uc_entry
+		};
+
+		ret = dpp_dtb_table_entry_delete(DEVICE_NO, g_dtb_data.queueid, 1, &entry_unicast);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Write eram-promisc failed, code:%d", ret);
+			return ret;
+		}
+
+		DPP_DTB_ERAM_ENTRY_INFO_T eram_mc_entry = {
+			((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id,
+			(ZXIC_UINT32 *)&mc_table
+		};
+		DPP_DTB_USER_ENTRY_T entry_multicast = {
+			.sdt_no = ZXDH_SDT_MULTICAST_ATT_TABLE,
+			.p_entry_data = (void *)&eram_mc_entry
+		};
+
+		ret = dpp_dtb_table_entry_delete(DEVICE_NO, g_dtb_data.queueid,
+					1, &entry_multicast);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Write eram-promisc failed, code:%d", ret);
+			return ret;
+		}
+	}
+	return ret;
+}
+
+
+static int16_t zxdh_port_unint(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_msg_info msg_info = {0};
+	struct zxdh_port_att_entry port_attr = {0};
+	int16_t ret = 0;
+
+	if (hw->i_mtr_en || hw->e_mtr_en)
+		zxdh_mtr_release(dev);
+
+
+	if (hw->is_pf == 1) {
+		DPP_DTB_ERAM_ENTRY_INFO_T port_attr_entry = {hw->vfid, (ZXIC_UINT32 *)&port_attr};
+		DPP_DTB_USER_ENTRY_T entry = {
+			.sdt_no = ZXDH_SDT_VPORT_ATT_TABLE,
+			.p_entry_data = (void *)&port_attr_entry
+		};
+		ret = dpp_dtb_table_entry_delete(DEVICE_NO, g_dtb_data.queueid, 1, &entry);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Write port_attr_eram failed, code:%d", ret);
+			return ret;
+		}
+
+		ret = zxdh_promisc_unint(hw);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Write promisc_table failed, code:%d", ret);
+			return ret;
+		}
+	} else {
+		msg_head_build(hw, ZXDH_VF_PORT_UNINIT, &msg_info);
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+		if (ret)
+			PMD_DRV_LOG(ERR, "vf port_init failed");
+
+	}
+	return ret;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_dev_close(struct rte_eth_dev *dev)
+{
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+	PMD_INIT_LOG(DEBUG, "zxdh_dev_close");
+	int ret = zxdh_dev_stop(dev);
+
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "%s :stop port %s failed ", __func__, dev->device->name);
+		return -1;
+	}
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	hw->started = 0;
+	hw->admin_status = 0;
+
+	ret = zxdh_port_unint(dev);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "%s :unint port %s failed ", __func__, dev->device->name);
+		return -1;
+	}
+	if (zxdh_shared_data != NULL)
+		zxdh_mtr_release(dev);
+
+	zxdh_intr_release(dev);
+
+	PMD_DRV_LOG(INFO, "zxdh_dtb_data_destroy  begin  time: %ld s", get_cur_time_ms());
+	zxdh_np_destroy(dev);
+	PMD_DRV_LOG(INFO, "zxdh_dtb_data_destroy  end  time: %ld s", get_cur_time_ms());
+
+	zxdh_vtpci_reset(hw);
+	zxdh_dev_free_mbufs(dev);
+	zxdh_free_queues(dev);
+
+	zxdh_bar_msg_chan_exit();
+	zxdh_priv_res_free(hw);
+
+	if (dev->data->mac_addrs != NULL) {
+		rte_free(dev->data->mac_addrs);
+		dev->data->mac_addrs = NULL;
+	}
+	if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
+		rte_free(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key);
+		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
+	}
+	return 0;
+}
+/**
+ * Fun:
+ */
+#define ZXDH_PMD_DEFAULT_HOST_FEATURES   \
+	(1ULL << ZXDH_NET_F_MRG_RXBUF | \
+	 1ULL << ZXDH_NET_F_STATUS    | \
+	 1ULL << ZXDH_NET_F_MQ        | \
+	 1ULL << ZXDH_F_ANY_LAYOUT    | \
+	 1ULL << ZXDH_F_VERSION_1   | \
+	 1ULL << ZXDH_F_RING_PACKED | \
+	 1ULL << ZXDH_F_IN_ORDER    | \
+	 1ULL << ZXDH_F_ORDER_PLATFORM | \
+	 1ULL << ZXDH_F_NOTIFICATION_DATA |\
+	 1ULL << ZXDH_NET_F_MAC | \
+	 1ULL << ZXDH_NET_F_CSUM |\
+	 1ULL << ZXDH_NET_F_GUEST_CSUM |\
+	 1ULL << ZXDH_NET_F_GUEST_TSO4 |\
+	 1ULL << ZXDH_NET_F_GUEST_TSO6 |\
+	 1ULL << ZXDH_NET_F_HOST_TSO4 |\
+	 1ULL << ZXDH_NET_F_HOST_TSO6 |\
+	 1ULL << ZXDH_NET_F_GUEST_UFO |\
+	 1ULL << ZXDH_NET_F_HOST_UFO)
+
+#define ZXDH_PMD_DEFAULT_GUEST_FEATURES   \
+	(1ULL << ZXDH_NET_F_MRG_RXBUF | \
+	 1ULL << ZXDH_NET_F_STATUS    | \
+	 1ULL << ZXDH_NET_F_MQ        | \
+	 1ULL << ZXDH_F_ANY_LAYOUT    | \
+	 1ULL << ZXDH_F_VERSION_1     | \
+	 1ULL << ZXDH_F_RING_PACKED   | \
+	 1ULL << ZXDH_F_IN_ORDER      | \
+	 1ULL << ZXDH_F_NOTIFICATION_DATA | \
+	 1ULL << ZXDH_NET_F_MAC)
+
+#define ZXDH_RX_QUEUES_MAX  128U
+#define ZXDH_TX_QUEUES_MAX  128U
+static int32_t zxdh_get_pci_dev_config(struct zxdh_hw *hw)
+{
+	hw->host_features = zxdh_vtpci_get_features(hw);
+	hw->host_features = ZXDH_PMD_DEFAULT_HOST_FEATURES;
+
+	uint64_t guest_features = (uint64_t)ZXDH_PMD_DEFAULT_GUEST_FEATURES;
+	uint64_t nego_features = guest_features & hw->host_features;
+
+	hw->guest_features = nego_features;
+
+	if (hw->guest_features & (1ULL << ZXDH_NET_F_MAC)) {
+		zxdh_vtpci_read_dev_config(hw, offsetof(struct zxdh_net_config, mac),
+				&hw->mac_addr, RTE_ETHER_ADDR_LEN);
+		PMD_INIT_LOG(DEBUG, "get dev mac: %02X:%02X:%02X:%02X:%02X:%02X",
+				hw->mac_addr[0], hw->mac_addr[1],
+				hw->mac_addr[2], hw->mac_addr[3],
+				hw->mac_addr[4], hw->mac_addr[5]);
+	} else {
+		rte_eth_random_addr(&hw->mac_addr[0]);
+		PMD_INIT_LOG(DEBUG, "random dev mac: %02X:%02X:%02X:%02X:%02X:%02X",
+				hw->mac_addr[0], hw->mac_addr[1],
+				hw->mac_addr[2], hw->mac_addr[3],
+				hw->mac_addr[4], hw->mac_addr[5]);
+	}
+	uint32_t max_queue_pairs;
+
+	zxdh_vtpci_read_dev_config(hw, offsetof(struct zxdh_net_config, max_virtqueue_pairs),
+			&max_queue_pairs, sizeof(max_queue_pairs));
+	PMD_INIT_LOG(DEBUG, "get max queue pairs %u", max_queue_pairs);
+	if (max_queue_pairs == 0)
+		hw->max_queue_pairs = ZXDH_RX_QUEUES_MAX;
+	else
+		hw->max_queue_pairs = RTE_MIN(ZXDH_RX_QUEUES_MAX, max_queue_pairs);
+
+	PMD_INIT_LOG(INFO, "set max queue pairs %d", hw->max_queue_pairs);
+
+	hw->weak_barriers = !vtpci_with_feature(hw, ZXDH_F_ORDER_PLATFORM);
+	return 0;
+}
+
+int32_t zxdh_dev_pause(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	rte_spinlock_lock(&hw->state_lock);
+
+	if (hw->started == 0) {
+		/* Device is just stopped. */
+		rte_spinlock_unlock(&hw->state_lock);
+		return -1;
+	}
+	hw->started = 0;
+	hw->admin_status = 0;
+	/*
+	 * Prevent the worker threads from touching queues to avoid contention,
+	 * 1 ms should be enough for the ongoing Tx function to finish.
+	 */
+	rte_delay_ms(1);
+	return 0;
+}
+
+/*
+ * Recover hw state to let the worker threads continue.
+ */
+void zxdh_dev_resume(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	hw->started = 1;
+	hw->admin_status = 1;
+	rte_spinlock_unlock(&hw->state_lock);
+}
+
+/*
+ * Should be called only after device is paused.
+ */
+int32_t zxdh_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts, int32_t nb_pkts)
+{
+	struct zxdh_hw	*hw   = dev->data->dev_private;
+	struct virtnet_tx *txvq = dev->data->tx_queues[0];
+	int32_t ret = 0;
+
+	hw->inject_pkts = tx_pkts;
+	ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts);
+	hw->inject_pkts = NULL;
+
+	return ret;
+}
+
+static void zxdh_notify_peers(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct virtnet_rx *rxvq = NULL;
+	struct rte_mbuf *rarp_mbuf = NULL;
+
+	if (!dev->data->rx_queues)
+		return;
+
+	rxvq = dev->data->rx_queues[0];
+	if (!rxvq)
+		return;
+
+	rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool, (struct rte_ether_addr *)hw->mac_addr);
+	if (rarp_mbuf == NULL) {
+		PMD_DRV_LOG(ERR, "failed to make RARP packet.");
+		return;
+	}
+
+	/* If virtio port just stopped, no need to send RARP */
+	if (zxdh_dev_pause(dev) < 0) {
+		rte_pktmbuf_free(rarp_mbuf);
+		return;
+	}
+
+	zxdh_inject_pkts(dev, &rarp_mbuf, 1);
+	zxdh_dev_resume(dev);
+}
+/**
+ * Fun:
+ */
+static int32_t set_rxtx_funcs(struct rte_eth_dev *eth_dev)
+{
+	eth_dev->tx_pkt_prepare = zxdh_xmit_pkts_prepare;
+	struct zxdh_hw *hw = eth_dev->data->dev_private;
+
+	if (!vtpci_packed_queue(hw)) {
+		PMD_INIT_LOG(ERR, " port %u not support packed queue", eth_dev->data->port_id);
+		return -1;
+	}
+	if (!vtpci_with_feature(hw, ZXDH_NET_F_MRG_RXBUF)) {
+		PMD_INIT_LOG(ERR, " port %u not support rx mergeable", eth_dev->data->port_id);
+		return -1;
+	}
+	/* */
+	eth_dev->tx_pkt_burst = &zxdh_xmit_pkts_packed;
+	eth_dev->rx_pkt_burst = &zxdh_recv_mergeable_pkts_packed;
+	return 0;
+}
+/* Only support 1:1 queue/interrupt mapping so far.
+ * TODO: support n:1 queue/interrupt mapping when there are limited number of
+ * interrupt vectors (<N+1).
+ */
+static int32_t zxdh_queues_bind_intr(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int32_t i;
+	uint16_t vec;
+
+	if (!dev->data->dev_conf.intr_conf.rxq) {
+		PMD_INIT_LOG(INFO, "queue/interrupt mask, nb_rx_queues %u",
+				dev->data->nb_rx_queues);
+		for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+			vec = VTPCI_OPS(hw)->set_queue_irq(hw,
+					hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR);
+			PMD_INIT_LOG(INFO, "vq%d irq set 0x%x, get 0x%x",
+					i * 2, ZXDH_MSI_NO_VECTOR, vec);
+		}
+	} else {
+		PMD_INIT_LOG(DEBUG, "queue/interrupt binding, nb_rx_queues %u",
+				dev->data->nb_rx_queues);
+		for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+			vec = VTPCI_OPS(hw)->set_queue_irq(hw,
+					hw->vqs[i * 2], i + ZXDH_QUE_INTR_VEC_BASE);
+			PMD_INIT_LOG(INFO, "vq%d irq set %d, get %d",
+					i * 2, i + ZXDH_QUE_INTR_VEC_BASE, vec);
+		}
+	}
+	/* mask all txq intr */
+	for (i = 0; i < dev->data->nb_tx_queues; ++i) {
+		vec = VTPCI_OPS(hw)->set_queue_irq(hw,
+				hw->vqs[(i * 2) + 1], ZXDH_MSI_NO_VECTOR);
+		PMD_INIT_LOG(INFO, "vq%d irq set 0x%x, get 0x%x",
+				(i * 2) + 1, ZXDH_MSI_NO_VECTOR, vec);
+	}
+	return 0;
+}
+
+static void zxdh_queues_unbind_intr(struct rte_eth_dev *dev)
+{
+	PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int32_t i;
+
+	for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+		VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR);
+		VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2 + 1], ZXDH_MSI_NO_VECTOR);
+	}
+}
+/**
+ * Fun:
+ */
+static int32_t zxdh_setup_dtb_interrupts(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (!hw->dtb_intr) {
+		hw->dtb_intr = rte_zmalloc("dtb_intr", sizeof(struct rte_intr_handle), 0);
+		if (hw->dtb_intr == NULL) {
+			PMD_INIT_LOG(ERR, "Failed to allocate dtb_intr");
+			return -ENOMEM;
+		}
+	}
+
+	if (dev->intr_handle->efds[ZXDH_MSIX_INTR_DTB_VEC - 1] < 0) {
+		PMD_INIT_LOG(ERR, "[%d]dtb interrupt fd is invalid", ZXDH_MSIX_INTR_DTB_VEC - 1);
+		rte_free(hw->dtb_intr);
+		hw->dtb_intr = NULL;
+		return -1;
+	}
+	hw->dtb_intr->fd = dev->intr_handle->efds[ZXDH_MSIX_INTR_DTB_VEC - 1];
+	hw->dtb_intr->type = dev->intr_handle->type;
+	return 0;
+}
+/**
+ * Fun:
+ */
+static int32_t zxdh_setup_risc_interrupts(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (!hw->risc_intr) {
+		PMD_INIT_LOG(ERR, " to allocate risc_intr");
+		hw->risc_intr = rte_zmalloc("risc_intr",
+			ZXDH_MSIX_INTR_MSG_VEC_NUM * sizeof(struct rte_intr_handle), 0);
+		if (hw->risc_intr == NULL) {
+			PMD_INIT_LOG(ERR, "Failed to allocate risc_intr");
+			return -ENOMEM;
+		}
+	}
+
+	uint8_t i;
+
+	for (i = 0; i < ZXDH_MSIX_INTR_MSG_VEC_NUM; i++) {
+		if (dev->intr_handle->efds[i] < 0) {
+			PMD_INIT_LOG(ERR, "[%u]risc interrupt fd is invalid", i);
+			rte_free(hw->risc_intr);
+			hw->risc_intr = NULL;
+			return -1;
+		}
+
+		struct rte_intr_handle *intr_handle = hw->risc_intr + i;
+
+		intr_handle->fd = dev->intr_handle->efds[i];
+		intr_handle->type = dev->intr_handle->type;
+	}
+
+	return 0;
+}
+/**
+ * Fun:
+ */
+static void zxdh_intr_cb_reg(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+		rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);
+
+	/* register callback to update dev config intr */
+	rte_intr_callback_register(dev->intr_handle, zxdh_devconf_intr_handler, dev);
+	/* Register rsic_v to pf interrupt callback */
+	struct rte_intr_handle *tmp = hw->risc_intr +
+			(MSIX_FROM_PFVF - ZXDH_MSIX_INTR_MSG_VEC_BASE);
+
+	rte_intr_callback_register(tmp, zxdh_frompfvf_intr_handler, dev);
+
+	tmp = hw->risc_intr + (MSIX_FROM_RISCV - ZXDH_MSIX_INTR_MSG_VEC_BASE);
+	rte_intr_callback_register(tmp, zxdh_fromriscv_intr_handler, dev);
+}
+
+static void zxdh_intr_cb_unreg(struct rte_eth_dev *dev)
+{
+	PMD_INIT_LOG(ERR, "");
+	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+		rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);
+
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	/* register callback to update dev config intr */
+	rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);
+	/* Register rsic_v to pf interrupt callback */
+	struct rte_intr_handle *tmp = hw->risc_intr +
+			(MSIX_FROM_PFVF - ZXDH_MSIX_INTR_MSG_VEC_BASE);
+
+	rte_intr_callback_unregister(tmp, zxdh_frompfvf_intr_handler, dev);
+	tmp = hw->risc_intr + (MSIX_FROM_RISCV-ZXDH_MSIX_INTR_MSG_VEC_BASE);
+	rte_intr_callback_unregister(tmp, zxdh_fromriscv_intr_handler, dev);
+}
+
+/**
+ * Fun:
+ */
+static int32_t zxdh_configure_intr(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int32_t ret = 0;
+
+	if (!rte_intr_cap_multiple(dev->intr_handle)) {
+		PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
+		return -ENOTSUP;
+	}
+	zxdh_intr_release(dev);
+	uint8_t nb_efd = ZXDH_MSIX_INTR_DTB_VEC_NUM + ZXDH_MSIX_INTR_MSG_VEC_NUM;
+
+	if (dev->data->dev_conf.intr_conf.rxq)
+		nb_efd += dev->data->nb_rx_queues;
+
+	if (rte_intr_efd_enable(dev->intr_handle, nb_efd)) {
+		PMD_INIT_LOG(ERR, "Fail to create eventfd");
+		return -1;
+	}
+
+	if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec",
+					hw->max_queue_pairs+ZXDH_INTR_NONQUE_NUM)) {
+		PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
+					hw->max_queue_pairs+ZXDH_INTR_NONQUE_NUM);
+		return -ENOMEM;
+	}
+	PMD_INIT_LOG(INFO, "allocate %u rxq vectors", dev->intr_handle->vec_list_size);
+	if (zxdh_setup_risc_interrupts(dev) != 0) {
+		PMD_INIT_LOG(ERR, "Error setting up rsic_v interrupts!");
+		ret = -1;
+		goto free_intr_vec;
+	}
+	if (zxdh_setup_dtb_interrupts(dev) != 0) {
+		PMD_INIT_LOG(ERR, "Error setting up dtb interrupts!");
+		ret = -1;
+		goto free_intr_vec;
+	}
+
+	if (zxdh_queues_bind_intr(dev) < 0) {
+		PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
+		ret = -1;
+		goto free_intr_vec;
+	}
+	/** DO NOT try to remove this! This function will enable msix,
+	 * or QEMU will encounter SIGSEGV when DRIVER_OK is sent.
+	 * And for legacy devices, this should be done before queue/vec
+	 * binding to change the config size from 20 to 24, or
+	 * ZXDH_MSI_QUEUE_VECTOR (22) will be ignored.
+	 **/
+	if (zxdh_intr_enable(dev) < 0) {
+		PMD_DRV_LOG(ERR, "interrupt enable failed");
+		ret = -1;
+		goto free_intr_vec;
+	}
+	return 0;
+
+free_intr_vec:
+	zxdh_intr_release(dev);
+	return ret;
+}
+/**
+ * Fun: reset device and renegotiate features if needed
+ */
+struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];
+static int32_t zxdh_init_device(struct rte_eth_dev *eth_dev)
+{
+	struct zxdh_hw *hw = eth_dev->data->dev_private;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	int ret = zxdh_read_pci_caps(pci_dev, hw);
+
+	if (ret) {
+		PMD_INIT_LOG(ERR, "port 0x%x pci caps read failed .", hw->vport.vport);
+		goto err;
+	}
+	zxdh_hw_internal[hw->port_id].vtpci_ops = &zxdh_modern_ops;
+	zxdh_vtpci_reset(hw);
+	zxdh_get_pci_dev_config(hw);
+	if (hw->vqs) { /* not reachable? */
+		zxdh_dev_free_mbufs(eth_dev);
+		ret = zxdh_free_queues(eth_dev);
+		if (ret < 0) {
+			PMD_INIT_LOG(ERR, "port 0x%x free queue failed.", hw->vport.vport);
+			goto err;
+		}
+	}
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+	hw->vtnet_hdr_size = ZXDH_DL_NET_HDR_SIZE;
+	hw->otpid = RTE_ETHER_TYPE_VLAN;
+	hw->speed = RTE_ETH_SPEED_NUM_UNKNOWN;
+	hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	hw->max_mtu = ZXDH_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN - VLAN_TAG_LEN - ZXDH_DL_NET_HDR_SIZE;
+	PMD_INIT_LOG(DEBUG, "max_mtu=%u", hw->max_mtu);
+	eth_dev->data->mtu = RTE_ETHER_MTU;
+	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, &eth_dev->data->mac_addrs[0]);
+	PMD_INIT_LOG(DEBUG, "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
+		eth_dev->data->mac_addrs->addr_bytes[0],
+		eth_dev->data->mac_addrs->addr_bytes[1],
+		eth_dev->data->mac_addrs->addr_bytes[2],
+		eth_dev->data->mac_addrs->addr_bytes[3],
+		eth_dev->data->mac_addrs->addr_bytes[4],
+		eth_dev->data->mac_addrs->addr_bytes[5]);
+	/* If host does not support both status and MSI-X then disable LSC */
+	if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS) && (hw->use_msix != ZXDH_MSIX_NONE)) {
+		eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+		PMD_INIT_LOG(DEBUG, "LSC enable");
+	} else {
+		eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
+	}
+	return 0;
+
+err:
+	PMD_INIT_LOG(ERR, "port %d init device failed", eth_dev->data->port_id);
+	return ret;
+}
+/**
+ * Fun:
+ */
+static void zxdh_priv_res_init(struct zxdh_hw *hw)
+{
+	hw->vlan_fiter = (uint64_t *)rte_malloc("vlan_filter", 64 * sizeof(uint64_t), 1);
+	memset(hw->vlan_fiter, 0, 64 * sizeof(uint64_t));
+	if (hw->is_pf)
+		hw->vfinfo = rte_zmalloc("vfinfo", ZXDH_MAX_VF * sizeof(struct vfinfo), 4);
+	else
+		hw->vfinfo = NULL;
+}
+/**
+ * Fun:
+ */
+static void set_vfs_pcieid(struct zxdh_hw *hw)
+{
+	if (hw->pfinfo.vf_nums > ZXDH_MAX_VF) {
+		PMD_DRV_LOG(ERR, "vf nums %u out of range", hw->pfinfo.vf_nums);
+		return;
+	}
+	if (hw->vfinfo == NULL) {
+		PMD_DRV_LOG(ERR, " vfinfo uninited");
+		return;
+	}
+
+	PMD_DRV_LOG(INFO, "vf nums %d", hw->pfinfo.vf_nums);
+	int vf_idx;
+
+	for (vf_idx = 0; vf_idx < hw->pfinfo.vf_nums; vf_idx++)
+		hw->vfinfo[vf_idx].pcieid = VF_PCIE_ID(hw->pcie_id, vf_idx);
+
+}
+
+
+static void zxdh_sriovinfo_init(struct zxdh_hw *hw)
+{
+	hw->pfinfo.pcieid = PF_PCIE_ID(hw->pcie_id);
+
+	if (hw->is_pf)
+		set_vfs_pcieid(hw);
+}
+/**
+ * Fun:
+ */
+#define SRIOV_MSGINFO_LEN  256
+enum sriov_msg_opcode {
+	SRIOV_SET_VF_MAC = 0,    /* pf set vf's mac */
+	SRIOV_SET_VF_VLAN,       /* pf set vf's vlan */
+	SRIOV_SET_VF_LINK_STATE, /* pf set vf's link state */
+	SRIOV_VF_RESET,
+	SET_RSS_TABLE,
+	SRIOV_OPCODE_NUM,
+};
+struct sriov_msg_payload {
+	uint16_t pcieid;/* sender's pcie id */
+	uint16_t vf_id;
+	enum sriov_msg_opcode opcode;
+	uint16_t slen;
+	uint8_t content[0]; /* payload */
+} __rte_packed;
+int vf_recv_bar_msg(void *payload, uint16_t len __rte_unused,
+			void *reps_buffer, uint16_t *reps_len, void *eth_dev __rte_unused)
+{
+	int32_t ret = 0;
+	struct zxdh_hw *hw;
+	struct sriov_msg_payload *msg_payload = (struct sriov_msg_payload *)payload;
+	struct zxdh_msg_reply_body *reply_body = reps_buffer;
+
+	uint8_t *content = NULL;
+	uint16_t vf_id = msg_payload->vf_id;
+	uint16_t pcieid = msg_payload->pcieid;
+	uint16_t opcode = msg_payload->opcode;
+	uint16_t slen = msg_payload->slen;
+
+	content = msg_payload->content;
+	struct rte_eth_dev *dev = (struct rte_eth_dev *)eth_dev;
+
+	if (dev == NULL) {
+		PMD_DRV_LOG(ERR, "param invalid\n");
+		ret = -2;
+		return ret;
+	}
+	hw = dev->data->dev_private;
+
+	PMD_DRV_LOG(DEBUG, "%s content %p vf_id %d pcieid %x slen %d\n",
+			__func__, content, vf_id, pcieid, slen);
+	switch (opcode) {
+	case SRIOV_SET_VF_MAC:
+		PMD_DRV_LOG(DEBUG, "pf pcie id is 0x%x:\n", pcieid);
+		PMD_DRV_LOG(DEBUG, "[VF GET MSG FROM PF]--vf mac is been set.\n");
+		PMD_DRV_LOG(DEBUG, "VF[%d] old mac is %02X:%02X:%02X:%02X:%02X:%02X\n",
+			vf_id,
+			(hw->mac_addr)[0], (hw->mac_addr)[1], (hw->mac_addr)[2],
+			(hw->mac_addr)[3], (hw->mac_addr)[4], (hw->mac_addr)[5]);
+
+		memcpy(hw->mac_addr, content, 6);
+		reply_body->flag = ZXDH_REPS_SUCC;
+		char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "test";
+
+		sprintf(str, "vf %d process msg set mac ok ", vf_id);
+		memcpy(reply_body->reply_data, str, strlen(str)+1);
+		*reps_len = sizeof(*reply_body);
+		break;
+	case SRIOV_SET_VF_LINK_STATE:
+		/* set vf link state(link up or link down) */
+		PMD_DRV_LOG(DEBUG, "[VF GET MSG FROM PF]--vf link state is been set.\n");
+		break;
+	case SRIOV_VF_RESET:
+		PMD_DRV_LOG(DEBUG, "[VF GET MSG FROM PF]--reset. port should be stopped\n");
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "[VF GET MSG FROM PF]--unknown msg opcode %d\n", opcode);
+		ret = -1;
+		break;
+	}
+	return ret;
+}
+/**
+ * Fun:
+ */
+static inline int config_func_call(struct zxdh_hw *hw, struct zxdh_msg_info *msg_info,
+			struct zxdh_msg_reply_body *res, uint16_t *res_len)
+{
+	int ret = -1;
+	struct zxdh_msg_head *msghead = &(msg_info->msg_head);
+	enum zxdh_msg_type msg_type = msghead->msg_type;
+
+	if (!res || !res_len) {
+		PMD_DRV_LOG(INFO, "-%s  invalid param\n", __func__);
+		return -1;
+	}
+	if (proc_func[msg_type]) {
+		PMD_DRV_LOG(INFO, "-%s begin-msg_type:%d\n", __func__, msg_type);
+		ret = proc_func[msg_type](hw, msghead->vport,
+				(void *)&msg_info->data, res, res_len);
+		if (!ret)
+			res->flag = ZXDH_REPS_SUCC;
+	} else {
+		res->flag = ZXDH_REPS_FAIL;
+	}
+	*res_len += sizeof(res->flag);
+	PMD_DRV_LOG(INFO, "-%s-end-msg_type:%d -res_len 0x%x\n",
+			__func__, msg_type, *res_len);
+	return ret;
+}
+int pf_recv_bar_msg(void *pay_load, uint16_t len, void *reps_buffer,
+			uint16_t *reps_len, void *eth_dev __rte_unused)
+{
+	struct zxdh_msg_info *msg_info = (struct zxdh_msg_info *)pay_load;
+	struct zxdh_msg_head *msghead = &(msg_info->msg_head);
+	struct zxdh_msg_reply_body *reply_body = reps_buffer;
+	uint16_t vf_id = msghead->vf_id;
+	uint16_t pcieid = msghead->pcieid;
+	int32_t ret = 0;
+	enum zxdh_msg_type msg_type = msghead->msg_type;
+
+	if (msg_type >= ZXDH_FUNC_END) {
+		PMD_DRV_LOG(ERR, "%s vf_id %d pcieid 0x%x len %u msg_type %d unsupported\n",
+				__func__, vf_id, pcieid, len, msg_type);
+		ret = -2;
+		goto msg_proc_end;
+	}
+	PMD_DRV_LOG(DEBUG, "%s vf_id %d pcieid 0x%x len %d msg_type %d\n",
+			__func__, vf_id, pcieid, len, msg_type);
+	struct rte_eth_dev *dev = (struct rte_eth_dev *)eth_dev;
+
+	if (dev == NULL) {
+		PMD_DRV_LOG(ERR, "param invalid\n");
+		ret = -2;
+		goto msg_proc_end;
+	}
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint16_t reply_len = 0;
+
+	ret = config_func_call(hw, msg_info, reply_body, &reply_len);
+	*reps_len = reply_len+sizeof(struct zxdh_msg_reply_head);
+	PMD_DRV_LOG(INFO, "len %d\n", *reps_len);
+
+	return ret;
+
+msg_proc_end:
+	PMD_DRV_LOG(DEBUG, "[PF GET MSG FROM VF] ret %d proc result:ret 0x%x reslt info: %s reply_len: 0x%x\n",
+			ret, reply_body->flag, reply_body->reply_data, reply_len);
+	memcpy(reply_body->reply_data, &ret, sizeof(ret));
+	reply_len = sizeof(ret);
+	*reps_len = sizeof(struct zxdh_msg_reply_head) + reply_len;
+	rte_hexdump(stdout, "pf reply msg ", reply_body, reply_len);
+	return ret;
+}
+/**
+ * Fun:
+ */
+static void zxdh_msg_cb_reg(struct zxdh_hw *hw)
+{
+	if (hw->is_pf)
+		zxdh_bar_chan_msg_recv_register(MODULE_BAR_MSG_TO_PF, pf_recv_bar_msg);
+	else
+		zxdh_bar_chan_msg_recv_register(MODULE_BAR_MSG_TO_VF, vf_recv_bar_msg);
+}
+static void zxdh_priv_res_free(struct zxdh_hw *priv)
+{
+	rte_free(priv->vlan_fiter);
+	priv->vlan_fiter = NULL;
+	rte_free(priv->vfinfo);
+	priv->vfinfo = NULL;
+	rte_free(priv->reta_idx);
+	priv->reta_idx = NULL;
+}
+
+static bool rx_offload_enabled(struct zxdh_hw *hw)
+{
+	return vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM) ||
+		   vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||
+		   vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6) ||
+		   (hw->vlan_offload_cfg.vlan_strip == 1);
+}
+
+static bool tx_offload_enabled(struct zxdh_hw *hw)
+{
+	return vtpci_with_feature(hw, ZXDH_NET_F_CSUM) ||
+		   vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO4) ||
+		   vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO6) ||
+		   vtpci_with_feature(hw, ZXDH_NET_F_HOST_UFO);
+}
+
+static int32_t zxdh_features_update(struct zxdh_hw *hw,
+				const struct rte_eth_rxmode *rxmode,
+				const struct rte_eth_txmode *txmode)
+{
+	uint64_t rx_offloads = rxmode->offloads;
+	uint64_t tx_offloads = txmode->offloads;
+	uint64_t req_features = hw->guest_features;
+
+	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
+		req_features |= (1ULL << ZXDH_NET_F_GUEST_CSUM);
+
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
+		req_features |= (1ULL << ZXDH_NET_F_GUEST_TSO4) |
+						(1ULL << ZXDH_NET_F_GUEST_TSO6);
+
+	if (tx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
+		req_features |= (1ULL << ZXDH_NET_F_CSUM);
+
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
+		req_features |= (1ULL << ZXDH_NET_F_HOST_TSO4) |
+						(1ULL << ZXDH_NET_F_HOST_TSO6);
+
+	if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_TSO)
+		req_features |= (1ULL << ZXDH_NET_F_HOST_UFO);
+
+	req_features = req_features & hw->host_features;
+	hw->guest_features =   req_features;
+
+	VTPCI_OPS(hw)->set_features(hw, req_features);
+
+	PMD_INIT_LOG(INFO, "set  featrue %lx!", req_features);
+
+	PMD_INIT_LOG(DEBUG, "host_features	= %" PRIx64, hw->host_features);
+	PMD_INIT_LOG(DEBUG, "guest_features = %" PRIx64, hw->guest_features);
+
+	if ((rx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) &&
+		 !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM)) {
+		PMD_DRV_LOG(ERR, "rx checksum not available on this host");
+		return -ENOTSUP;
+	}
+
+	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
+		(!vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||
+		 !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6))) {
+		PMD_DRV_LOG(ERR, "Large Receive Offload not available on this host");
+		return -ENOTSUP;
+	}
+	return 0;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_acquire_lock(struct rte_eth_dev *dev)
+{
+	uint32_t var = zxdh_read_reg(dev, ZXDH_BAR0_INDEX, ZXDH_VF_LOCK_REG);
+
+	/* check whether lock is used */
+	if (!(var & ZXDH_VF_LOCK_ENABLE_MASK))
+		return -1;
+
+	return 0;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_release_lock(struct rte_eth_dev *dev)
+{
+	uint32_t var = zxdh_read_reg(dev, ZXDH_BAR0_INDEX, ZXDH_VF_LOCK_REG);
+
+	if (var & ZXDH_VF_LOCK_ENABLE_MASK) {
+		var &= ~ZXDH_VF_LOCK_ENABLE_MASK;
+		zxdh_write_reg(dev, ZXDH_BAR0_INDEX, ZXDH_VF_LOCK_REG, var);
+		return 0;
+	}
+
+	PMD_INIT_LOG(ERR, "No lock need to be release\n");
+	return -1;
+}
+/**
+ * Fun:
+ */
+static int32_t zxdh_get_available_channel(struct rte_eth_dev *dev, uint8_t queue_type)
+{
+	uint16_t base	 = (queue_type == VTNET_RQ) ? 0 : 1;  /* txq only polls odd bits*/
+	uint16_t i		 = 0;
+	uint16_t j		 = 0;
+	uint16_t done	 = 0;
+	uint16_t timeout = 0;
+
+	while ((timeout++) < ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {
+		rte_delay_us_block(1000);
+		/* acquire hw lock */
+		if (zxdh_acquire_lock(dev) < 0) {
+			PMD_INIT_LOG(ERR, "Acquiring hw lock got failed, timeout: %d", timeout);
+			continue;
+		}
+		/* Iterate COI table and find free channel */
+		for (i = ZXDH_QUEUES_BASE/32; i < ZXDH_TOTAL_QUEUES_NUM/32; i++) {
+			uint32_t addr = ZXDH_QUERES_SHARE_BASE + (i * sizeof(uint32_t));
+			uint32_t var = zxdh_read_reg(dev, ZXDH_BAR0_INDEX, addr);
+
+			for (j = base; j < 32; j += 2) {
+				/* Got the available channel & update COI table */
+				if ((var & (1 << j)) == 0) {
+					var |= (1 << j);
+					zxdh_write_reg(dev, ZXDH_BAR0_INDEX, addr, var);
+					done = 1;
+					break;
+				}
+			}
+			if (done)
+				break;
+		}
+		break;
+	}
+	if (timeout >= ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {
+		PMD_INIT_LOG(ERR, "Failed to acquire channel");
+		return -1;
+	}
+	zxdh_release_lock(dev);
+	/* check for no channel condition */
+	if (done != 1) {
+		PMD_INIT_LOG(ERR, "NO availd queues\n");
+		return -1;
+	}
+	/* reruen available channel ID */
+	return (i * 32) + j;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_acquire_channel(struct rte_eth_dev *dev, uint16_t lch)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (hw->channel_context[lch].valid == 1) {
+		PMD_INIT_LOG(DEBUG, "Logic channel:%u already acquired Physics channel:%u",
+				lch, hw->channel_context[lch].ph_chno);
+		return hw->channel_context[lch].ph_chno;
+	}
+	int32_t pch = zxdh_get_available_channel(dev, get_queue_type(lch));
+
+	if (pch < 0) {
+		PMD_INIT_LOG(ERR, "Failed to acquire channel");
+		return -1;
+	}
+	hw->channel_context[lch].ph_chno = (uint16_t)pch;
+	hw->channel_context[lch].valid = 1;
+	PMD_INIT_LOG(DEBUG, "Acquire channel success lch:%u --> pch:%d", lch, pch);
+	return 0;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_release_channel(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint16_t nr_vq = hw->queue_num;
+	uint32_t var  = 0;
+	uint32_t addr = 0;
+	uint32_t widx = 0;
+	uint32_t bidx = 0;
+	uint16_t pch  = 0;
+	uint16_t lch  = 0;
+	uint16_t timeout = 0;
+
+	while ((timeout++) < ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {
+		if (zxdh_acquire_lock(dev) != 0) {
+			PMD_INIT_LOG(ERR,
+				"Could not acquire lock to release channel, timeout %d", timeout);
+			continue;
+		}
+		break;
+	}
+
+	if (timeout >= ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {
+		PMD_INIT_LOG(ERR, "Acquire lock timeout");
+		return -1;
+	}
+
+	for (lch = 0; lch < nr_vq; lch++) {
+		if (hw->channel_context[lch].valid == 0) {
+			PMD_INIT_LOG(DEBUG, "Logic channel %d does not need to release", lch);
+			continue;
+		}
+
+		/* get coi table offset and index */
+		pch  = hw->channel_context[lch].ph_chno;
+		widx = pch / 32;
+		bidx = pch % 32;
+
+		addr = ZXDH_QUERES_SHARE_BASE + (widx * sizeof(uint32_t));
+		var  = zxdh_read_reg(dev, ZXDH_BAR0_INDEX, addr);
+		var &= ~(1 << bidx);
+		zxdh_write_reg(dev, ZXDH_BAR0_INDEX, addr, var);
+
+		hw->channel_context[lch].valid = 0;
+		hw->channel_context[lch].ph_chno = 0;
+	}
+
+	zxdh_release_lock(dev);
+
+	return 0;
+}
+
+static int32_t zxdh_promisc_table_init(struct zxdh_hw *hw)
+{
+	uint32_t ret, vf_group_id = 0;
+	struct zxdh_brocast_t brocast_table = {0};
+	struct zxdh_unitcast_t uc_table = {0};
+	struct zxdh_multicast_t mc_table = {0};
+
+	for (; vf_group_id < 4; vf_group_id++) {
+		brocast_table.flag = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG);
+		DPP_DTB_ERAM_ENTRY_INFO_T eram_brocast_entry = {
+			((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id,
+			(ZXIC_UINT32 *)&brocast_table
+		};
+		DPP_DTB_USER_ENTRY_T entry_brocast = {
+			.sdt_no = ZXDH_SDT_BROCAST_ATT_TABLE,
+			.p_entry_data = (void *)&eram_brocast_entry
+		};
+
+		ret = dpp_dtb_table_entry_write(DEVICE_NO, g_dtb_data.queueid, 1, &entry_brocast);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Write eram-brocast failed, code:%d", ret);
+			return ret;
+		}
+
+		uc_table.uc_flood_pf_enable = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG);
+		DPP_DTB_ERAM_ENTRY_INFO_T eram_uc_entry = {
+			((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id,
+			(ZXIC_UINT32 *)&uc_table
+		};
+		DPP_DTB_USER_ENTRY_T entry_unicast = {
+			.sdt_no = ZXDH_SDT_UNICAST_ATT_TABLE,
+			.p_entry_data = (void *)&eram_uc_entry
+		};
+
+		ret = dpp_dtb_table_entry_write(DEVICE_NO, g_dtb_data.queueid, 1, &entry_unicast);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Write eram-unicast failed, code:%d", ret);
+			return ret;
+		}
+
+		mc_table.mc_flood_pf_enable = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG);
+		DPP_DTB_ERAM_ENTRY_INFO_T eram_mc_entry = {
+			((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id,
+			(ZXIC_UINT32 *)&mc_table
+		};
+		DPP_DTB_USER_ENTRY_T entry_multicast = {
+			.sdt_no = ZXDH_SDT_MULTICAST_ATT_TABLE,
+			.p_entry_data = (void *)&eram_mc_entry
+		};
+
+		ret = dpp_dtb_table_entry_write(DEVICE_NO, g_dtb_data.queueid,
+					1, &entry_multicast);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Write eram-multicast failed, code:%d", ret);
+			return ret;
+		}
+	}
+
+	PMD_DRV_LOG(DEBUG, "write promise tbl hw->hash_search_index:%d, vqm_vfid:%d",
+			hw->hash_search_index, hw->vfid);
+
+	return ret;
+}
+
+static int zxdh_config_qid(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_port_att_entry port_attr = {0};
+	struct zxdh_msg_info msg_info = {0};
+	int ret = 0;
+
+	if (hw->is_pf) {
+		DPP_DTB_ERAM_ENTRY_INFO_T port_attr_entry = {hw->vfid, (ZXIC_UINT32 *)&port_attr};
+		DPP_DTB_USER_ENTRY_T entry = {
+			.sdt_no = ZXDH_SDT_VPORT_ATT_TABLE,
+			.p_entry_data = (void *)&port_attr_entry
+		};
+
+		ret = dpp_dtb_entry_get(DEVICE_NO, g_dtb_data.queueid, &entry, 1);
+		port_attr.port_base_qid = hw->channel_context[0].ph_chno & 0xfff;
+
+		ret = dpp_dtb_table_entry_write(DEVICE_NO, g_dtb_data.queueid, 1, &entry);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "PF:%d port_base_qid insert failed\n", hw->vfid);
+			return -ret;
+		}
+	} else {
+		struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_set_msg;
+
+		msg_head_build(hw, ZXDH_PORT_ATTRS_SET, &msg_info);
+		attr_msg->mode = EGR_FLAG_PORT_BASE_QID;
+		attr_msg->value = hw->channel_context[0].ph_chno&0xfff;
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ",
+					hw->vport.vport, EGR_FLAG_PORT_BASE_QID);
+			return ret;
+		}
+	}
+	return ret;
+}
+/*
+ * Configure virtio device
+ * It returns 0 on success.
+ */
+int32_t zxdh_dev_configure(struct rte_eth_dev *dev)
+{
+	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+	const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint64_t rx_offloads = rxmode->offloads;
+	uint32_t nr_vq = 0;
+	int32_t  ret = 0;
+
+	PMD_INIT_LOG(DEBUG, "configure");
+
+	if (dev->data->nb_rx_queues != dev->data->nb_tx_queues) {
+		PMD_INIT_LOG(ERR, "nb_rx_queues=%d and nb_tx_queues=%d not equal!",
+					 dev->data->nb_rx_queues, dev->data->nb_tx_queues);
+		return -EINVAL;
+	}
+	if ((dev->data->nb_rx_queues + dev->data->nb_tx_queues) >= ZXDH_QUEUES_NUM_MAX) {
+		PMD_INIT_LOG(ERR, "nb_rx_queues=%d + nb_tx_queues=%d must < (%d)!",
+					 dev->data->nb_rx_queues, dev->data->nb_tx_queues,
+					 ZXDH_QUEUES_NUM_MAX);
+		return -EINVAL;
+	}
+	if ((rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) && (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE))	{
+		PMD_DRV_LOG(ERR, "Unsupported Rx multi queue mode %d", rxmode->mq_mode);
+		return -EINVAL;
+	}
+
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
+		PMD_DRV_LOG(ERR, "Unsupported Tx multi queue mode %d", txmode->mq_mode);
+		return -EINVAL;
+	}
+	if ((rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) && (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE))	{
+		PMD_DRV_LOG(ERR, "Unsupported Rx multi queue mode %d", rxmode->mq_mode);
+		return -EINVAL;
+	}
+
+	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
+		PMD_DRV_LOG(ERR, "Unsupported Tx multi queue mode %d", txmode->mq_mode);
+		return -EINVAL;
+	}
+
+	ret = zxdh_features_update(hw, rxmode, txmode);
+	if (ret < 0)
+		return ret;
+
+	/* check if lsc interrupt feature is enabled */
+	if (dev->data->dev_conf.intr_conf.lsc) {
+		if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
+			PMD_DRV_LOG(ERR, "link status not supported by host");
+			return -ENOTSUP;
+		}
+	}
+	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+		hw->vlan_offload_cfg.vlan_strip = 1;
+
+	hw->has_tx_offload = tx_offload_enabled(hw);
+	hw->has_rx_offload = rx_offload_enabled(hw);
+
+	nr_vq = dev->data->nb_rx_queues + dev->data->nb_tx_queues;
+	if (nr_vq == hw->queue_num) {
+		/*no que changed */
+		goto conf_end;
+	}
+
+	PMD_DRV_LOG(DEBUG, "que changed need reset ");
+	/* Reset the device although not necessary at startup */
+	zxdh_vtpci_reset(hw);
+
+	/* Tell the host we've noticed this device. */
+	zxdh_vtpci_set_status(hw, ZXDH_CONFIG_STATUS_ACK);
+
+	/* Tell the host we've known how to drive the device. */
+	zxdh_vtpci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER);
+	/* The queue needs to be released when reconfiguring*/
+	if (hw->vqs != NULL) {
+		zxdh_dev_free_mbufs(dev);
+		zxdh_free_queues(dev);
+	}
+
+	hw->queue_num = nr_vq;
+	ret = zxdh_alloc_queues(dev, nr_vq);
+	if (ret < 0)
+		return ret;
+
+	zxdh_datach_set(dev);
+
+	if (zxdh_configure_intr(dev) < 0) {
+		PMD_INIT_LOG(ERR, "Failed to configure interrupt");
+		zxdh_free_queues(dev);
+		return -1;
+	}
+	ret = zxdh_config_qid(dev);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to configure base qid!");
+		return -1;
+	}
+
+	zxdh_vtpci_reinit_complete(hw);
+
+conf_end:
+	ret = zxdh_rx_csum_lro_offload_configure(dev);
+	if (ret)
+		PMD_INIT_LOG(ERR, "Failed to configure csum offload!");
+
+	zxdh_dev_conf_offload(dev);
+	PMD_INIT_LOG(DEBUG, " configure end");
+
+	return ret;
+}
+
+int zxdh_vlan_filter_table_init(uint16_t vfid)
+{
+	int16_t ret = 0;
+	struct zxdh_vlan_t vlan_table = {0};
+
+	for (uint8_t vlan_group = 0; vlan_group < VLAN_GROUP_NUM; vlan_group++) {
+		if (vlan_group == 0) {
+			vlan_table.vlans[0] |= (1 << FIRST_VLAN_GROUP_VALID_BITS);
+			vlan_table.vlans[0] |= (1 << VLAN_GROUP_VALID_BITS);
+
+		} else {
+			vlan_table.vlans[0] = 0;
+		}
+
+		uint32_t index = (vlan_group << VQM_VFID_BITS) | vfid;
+
+		DPP_DTB_ERAM_ENTRY_INFO_T entry_data = {index, (ZXIC_UINT32 *)&vlan_table};
+		DPP_DTB_USER_ENTRY_T user_entry = {ZXDH_SDT_VLAN_ATT_TABLE, &entry_data};
+
+		ret = dpp_dtb_table_entry_write(DEVICE_NO, g_dtb_data.queueid, 1, &user_entry);
+		if (ret != DPP_OK)
+			PMD_INIT_LOG(WARNING,
+				"[vfid:%d], vlan_group:%d, init vlan filter tbl failed, ret:%d",
+				vfid, vlan_group, ret);
+	}
+	return ret;
+}
+
+static int zxdh_mac_config(struct rte_eth_dev *eth_dev)
+{
+	struct zxdh_hw *hw = eth_dev->data->dev_private;
+	struct zxdh_msg_info msg_info = {0};
+	int ret = 0;
+
+	if (hw->is_pf == 1) {
+		PMD_INIT_LOG(INFO, "mac_config pf");
+		ret = dev_mac_addr_add(hw->vport.vport,
+				&eth_dev->data->mac_addrs[0], hw->hash_search_index);
+		if (ret)
+			PMD_DRV_LOG(ERR, "Failed to add mac: port 0x%x", hw->vport.vport);
+
+		hw->uc_num++;
+	} else {
+		PMD_DRV_LOG(INFO, "port 0x%x Send to pf\n", hw->vport.vport);
+		struct zxdh_mac_filter *mac_filter = &msg_info.data.zxdh_mac_filter;
+
+		mac_filter->filter_flag = 0xff;
+		rte_memcpy(&mac_filter->mac, &eth_dev->data->mac_addrs[0],
+				sizeof(eth_dev->data->mac_addrs[0]));
+		msg_head_build(hw, ZXDH_MAC_ADD, &msg_info);
+		ret = zxdh_vf_send_msg_to_pf(eth_dev, &msg_info, sizeof(msg_info), NULL, 0);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ",
+					hw->vport.vport, ZXDH_MAC_ADD);
+			return ret;
+		}
+		hw->uc_num++;
+	}
+	return ret;
+}
+
+int32_t zxdh_dev_config_port_status(struct rte_eth_dev *dev, uint16_t link_status)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_port_att_entry port_attr = {0};
+	struct zxdh_msg_info msg_info = {0};
+	int32_t ret = 0;
+
+	if (hw->is_pf) {
+		DPP_DTB_ERAM_ENTRY_INFO_T port_attr_entry = {hw->vfid, (ZXIC_UINT32 *)&port_attr};
+		DPP_DTB_USER_ENTRY_T entry = {
+			.sdt_no = ZXDH_SDT_VPORT_ATT_TABLE,
+			.p_entry_data = (void *)&port_attr_entry
+		};
+
+		ret = dpp_dtb_entry_get(DEVICE_NO, g_dtb_data.queueid, &entry, 1);
+		port_attr.is_up = link_status;
+
+		ret = dpp_dtb_table_entry_write(DEVICE_NO, g_dtb_data.queueid, 1, &entry);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "PF:%d port_is_up insert failed\n", hw->vfid);
+			return -ret;
+		}
+	} else {
+		struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_set_msg;
+
+		msg_head_build(hw, ZXDH_PORT_ATTRS_SET, &msg_info);
+		attr_msg->mode = EGR_FLAG_VPORT_IS_UP;
+		attr_msg->value = link_status;
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ",
+				hw->vport.vport, EGR_FLAG_VPORT_IS_UP);
+			return ret;
+		}
+	}
+	return ret;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_dev_start(struct rte_eth_dev *dev)
+{
+	int32_t ret;
+	uint16_t vtpci_logic_qidx;
+	/* Finish the initialization of the queues */
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		vtpci_logic_qidx = 2 * i + RQ_QUEUE_IDX;
+		ret = zxdh_dev_rx_queue_setup_finish(dev, vtpci_logic_qidx);
+		if (ret < 0)
+			return ret;
+	}
+	set_rxtx_funcs(dev);
+	ret = zxdh_intr_enable(dev);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "interrupt enable failed");
+		return -EIO;
+	}
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct virtqueue *vq;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		vtpci_logic_qidx = 2 * i + RQ_QUEUE_IDX;
+		vq = hw->vqs[vtpci_logic_qidx];
+		/* Flush the old packets */
+		zxdh_virtqueue_rxvq_flush(vq);
+		virtqueue_notify(vq);
+	}
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		vtpci_logic_qidx = 2 * i + TQ_QUEUE_IDX;
+		vq = hw->vqs[vtpci_logic_qidx];
+		virtqueue_notify(vq);
+	}
+	hw->started = true;
+	ret = zxdh_mac_config(hw->eth_dev);
+	if (ret) {
+		PMD_DRV_LOG(ERR, " mac config failed");
+		zxdh_dev_set_link_up(dev);
+	}
+	return 0;
+}
+
+static void zxdh_dev_free_mbufs(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint16_t nr_vq = hw->queue_num;
+	uint32_t i, mbuf_num = 0;
+
+	const char *type __rte_unused;
+	struct virtqueue *vq = NULL;
+	struct rte_mbuf *buf = NULL;
+	int32_t queue_type = 0;
+
+	if (hw->vqs == NULL)
+		return;
+
+	for (i = 0; i < nr_vq; i++) {
+		vq = hw->vqs[i];
+		if (!vq)
+			continue;
+
+		queue_type = get_queue_type(i);
+		if (queue_type == VTNET_RQ)
+			type = "rxq";
+		else if (queue_type == VTNET_TQ)
+			type = "txq";
+		else
+			continue;
+
+		PMD_INIT_LOG(DEBUG, "Before freeing %s[%d] used and unused buf", type, i);
+
+		while ((buf = zxdh_virtqueue_detach_unused(vq)) != NULL) {
+			rte_pktmbuf_free(buf);
+			mbuf_num++;
+		}
+
+		PMD_INIT_LOG(DEBUG, "After freeing %s[%d] used and unused buf", type, i);
+	}
+
+	PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);
+}
+
+/*
+ * Stop device: disable interrupt and mark link down
+ */
+int32_t zxdh_dev_stop(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (dev->data->dev_started == 0)
+		return 0;
+
+	PMD_INIT_LOG(DEBUG, "stop");
+
+	rte_spinlock_lock(&hw->state_lock);
+	if (!hw->started)
+		goto out_unlock;
+	hw->started = 0;
+
+	zxdh_intr_disable(dev);
+	zxdh_dev_set_link_down(dev);
+	/*que disable*/
+
+out_unlock:
+	rte_spinlock_unlock(&hw->state_lock);
+
+	return 0;
+}
+/**
+ *  Fun:
+ */
+static uint32_t zxdh_dev_speed_capa_get(uint32_t speed)
+{
+	switch (speed) {
+	case RTE_ETH_SPEED_NUM_10G:  return RTE_ETH_LINK_SPEED_10G;
+	case RTE_ETH_SPEED_NUM_20G:  return RTE_ETH_LINK_SPEED_20G;
+	case RTE_ETH_SPEED_NUM_25G:  return RTE_ETH_LINK_SPEED_25G;
+	case RTE_ETH_SPEED_NUM_40G:  return RTE_ETH_LINK_SPEED_40G;
+	case RTE_ETH_SPEED_NUM_50G:  return RTE_ETH_LINK_SPEED_50G;
+	case RTE_ETH_SPEED_NUM_56G:  return RTE_ETH_LINK_SPEED_56G;
+	case RTE_ETH_SPEED_NUM_100G: return RTE_ETH_LINK_SPEED_100G;
+	case RTE_ETH_SPEED_NUM_200G: return RTE_ETH_LINK_SPEED_200G;
+	default:                     return 0;
+	}
+}
+int32_t zxdh_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	dev_info->speed_capa	   = zxdh_dev_speed_capa_get(hw->speed);
+	dev_info->max_rx_queues    = RTE_MIN(hw->max_queue_pairs, ZXDH_RX_QUEUES_MAX);
+	dev_info->max_tx_queues    = RTE_MIN(hw->max_queue_pairs, ZXDH_TX_QUEUES_MAX);
+	dev_info->min_rx_bufsize   = ZXDH_MIN_RX_BUFSIZE;
+	dev_info->max_rx_pktlen    = ZXDH_MAX_RX_PKTLEN;
+	dev_info->max_mac_addrs    = ZXDH_MAX_MAC_ADDRS;
+	dev_info->rx_offload_capa  = (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+					RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+					RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
+	dev_info->rx_offload_capa |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+					RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+					RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+					RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM);
+	dev_info->rx_offload_capa |= (RTE_ETH_RX_OFFLOAD_SCATTER);
+	dev_info->rx_offload_capa |=  RTE_ETH_RX_OFFLOAD_TCP_LRO;
+	dev_info->rx_offload_capa |=  RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+	dev_info->reta_size = ZXDH_RETA_SIZE;
+	dev_info->hash_key_size = ZXDH_RSK_LEN;
+	dev_info->flow_type_rss_offloads = ZXDH_RSS_HF;
+	dev_info->max_mtu = hw->max_mtu;
+	dev_info->min_mtu = 50;
+
+	dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS);
+	dev_info->tx_offload_capa |= (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+					RTE_ETH_TX_OFFLOAD_UDP_TSO);
+	dev_info->tx_offload_capa |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+					RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+					RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO);
+	dev_info->tx_offload_capa |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+					RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+					RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+					RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+					RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM);
+
+	return 0;
+}
+/**
+ * Fun:
+ */
+static void zxdh_log_init(void)
+{
+#ifdef RTE_LIBRTE_ZXDH_DEBUG_TX
+	if (zxdh_logtype_tx >= 0)
+		rte_log_set_level(zxdh_logtype_tx, RTE_LOG_DEBUG);
+#endif
+#ifdef RTE_LIBRTE_ZXDH_DEBUG_RX
+	if (zxdh_logtype_rx >= 0)
+		rte_log_set_level(zxdh_logtype_rx, RTE_LOG_DEBUG);
+#endif
+#ifdef RTE_LIBRTE_ZXDH_DEBUG_MSG
+	if (zxdh_logtype_msg >= 0)
+		rte_log_set_level(zxdh_logtype_msg, RTE_LOG_DEBUG);
+#endif
+}
+
+struct zxdh_dtb_shared_data g_dtb_data = {0};
+
+static int zxdh_tbl_entry_destroy(struct rte_eth_dev *dev)
+{
+	int ret = 0;
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (!g_dtb_data.init_done)
+		return ret;
+
+	if (hw->is_pf) {
+		/*hash  &ddr*/
+		uint32_t sdt_no;
+
+		sdt_no = MK_SDT_NO(L2_ENTRY, hw->hash_search_index);
+		ret = dpp_dtb_hash_online_delete(0, g_dtb_data.queueid, sdt_no);
+		PMD_DRV_LOG(INFO, "%s dpp_dtb_hash_online_delete sdt_no %d",
+				dev->data->name, sdt_no);
+		if (ret)
+			PMD_DRV_LOG(ERR, "%s dpp_dtb_hash_online_delete sdt_no %d failed",
+				dev->data->name, sdt_no);
+
+		sdt_no = MK_SDT_NO(MC, hw->hash_search_index);
+		ret = dpp_dtb_hash_online_delete(0, g_dtb_data.queueid, sdt_no);
+		PMD_DRV_LOG(INFO, "%s dpp_dtb_hash_online_delete sdt_no %d",
+				dev->data->name, sdt_no);
+		if (ret)
+			PMD_DRV_LOG(ERR, "%s dpp_dtb_hash_online_delete sdt_no %d failed",
+				dev->data->name, sdt_no);
+	}
+
+	return ret;
+}
+/**
+ * Fun:
+ */
+#define INVALID_DTBQUE  0xFFFF
+static void _dtb_data_res_free(struct zxdh_hw *hw)
+{
+	struct rte_eth_dev *dev = hw->eth_dev;
+
+	if ((g_dtb_data.init_done) && (g_dtb_data.bind_device == dev))  {
+		PMD_DRV_LOG(INFO, "%s g_dtb_data free queue %d",
+				dev->data->name, g_dtb_data.queueid);
+
+		int ret = 0;
+
+		ret = dpp_np_online_uninstall(0, dev->data->name, g_dtb_data.queueid);
+		if (ret)
+			PMD_DRV_LOG(ERR, "%s dpp_np_online_uninstall failed", dev->data->name);
+
+		PMD_DRV_LOG(INFO, "%s dpp_np_online_uninstall queid %d",
+				dev->data->name, g_dtb_data.queueid);
+		if (g_dtb_data.dtb_table_conf_mz) {
+			rte_memzone_free(g_dtb_data.dtb_table_conf_mz);
+			PMD_DRV_LOG(INFO, "%s free  dtb_table_conf_mz  ", dev->data->name);
+			g_dtb_data.dtb_table_conf_mz = NULL;
+		}
+		if (g_dtb_data.dtb_table_dump_mz) {
+
+			PMD_DRV_LOG(INFO, "%s free  dtb_table_dump_mz  ", dev->data->name);
+			rte_memzone_free(g_dtb_data.dtb_table_dump_mz);
+			g_dtb_data.dtb_table_dump_mz = NULL;
+		}
+		int i;
+
+		for (i = 0; i < DPU_MAX_BASE_DTB_TABLE_COUNT; i++) {
+			if (g_dtb_data.dtb_table_bulk_dump_mz[i]) {
+				rte_memzone_free(g_dtb_data.dtb_table_bulk_dump_mz[i]);
+
+				PMD_DRV_LOG(INFO, "%s free dtb_table_bulk_dump_mz[%d]",
+						dev->data->name, i);
+				g_dtb_data.dtb_table_bulk_dump_mz[i] = NULL;
+			}
+		}
+		g_dtb_data.init_done = 0;
+		g_dtb_data.bind_device = NULL;
+	}
+	if (zxdh_shared_data != NULL)
+		zxdh_shared_data->npsdk_init_done = 0;
+
+}
+
+#define MK_SDT_HASHRES(table, hash_idx) \
+{ \
+	.mz_name = RTE_STR(ZXDH_## table ##_TABLE), \
+	.mz_size = DPU_DTB_TABLE_BULK_ZCAM_DUMP_SIZE, \
+	.sdt_no = ZXDH_SDT_##table##_TABLE0 + hash_idx, \
+	.mz = NULL\
+}
+/**
+ * Fun:
+ */
+static inline int zxdh_dtb_dump_res_init(struct zxdh_hw *hw __rte_unused,
+			DPP_DEV_INIT_CTRL_T *dpp_ctrl)
+{
+	int ret = 0;
+	int i;
+
+	struct zxdh_dtb_bulk_dump_info dtb_dump_baseres[] = {
+	/* eram */
+	{"zxdh_sdt_vxlan_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_VXLAN_ATT_TABLE, NULL},
+	{"zxdh_sdt_vport_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_VPORT_ATT_TABLE, NULL},
+	{"zxdh_sdt_panel_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_PANEL_ATT_TABLE, NULL},
+	{"zxdh_sdt_rss_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_RSS_ATT_TABLE, NULL},
+	{"zxdh_sdt_vlan_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_VLAN_ATT_TABLE, NULL},
+	{"zxdh_sdt_lag_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_LAG_ATT_TABLE, NULL},
+	/* zcam */
+	/*hash*/
+	{"zxdh_sdt_l2_entry_table0", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE0, NULL},
+	{"zxdh_sdt_l2_entry_table1", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE1, NULL},
+	{"zxdh_sdt_l2_entry_table2", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE2, NULL},
+	{"zxdh_sdt_l2_entry_table3", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE3, NULL},
+	{"zxdh_sdt_l2_entry_table4", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE4, NULL},
+	{"zxdh_sdt_l2_entry_table5", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE5, NULL},
+	{"zxdh_sdt_mc_table0", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE0, NULL},
+	{"zxdh_sdt_mc_table1", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE1, NULL},
+	{"zxdh_sdt_mc_table2", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE2, NULL},
+	{"zxdh_sdt_mc_table3", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE3, NULL},
+	{"zxdh_sdt_mc_table4", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE4, NULL},
+	{"zxdh_sdt_mc_table5", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE5, NULL},
+	};
+	for (i = 0; i < (int) RTE_DIM(dtb_dump_baseres); i++) {
+		struct zxdh_dtb_bulk_dump_info *p = dtb_dump_baseres + i;
+		const struct rte_memzone *generic_dump_mz = rte_memzone_reserve_aligned(p->mz_name,
+					p->mz_size, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
+
+		if (generic_dump_mz == NULL) {
+			PMD_DRV_LOG(ERR,
+				"Cannot alloc mem for dtb tbl bulk dump, mz_name is %s, mz_size is %u",
+				p->mz_name, p->mz_size);
+			ret = -ENOMEM;
+			return ret;
+		}
+		p->mz = generic_dump_mz;
+		dpp_ctrl->dump_addr_info[i].vir_addr = generic_dump_mz->addr_64;
+		dpp_ctrl->dump_addr_info[i].phy_addr = generic_dump_mz->iova;
+		dpp_ctrl->dump_addr_info[i].sdt_no   = p->sdt_no;
+		dpp_ctrl->dump_addr_info[i].size	  = p->mz_size;
+		PMD_INIT_LOG(DEBUG,
+			"dump_addr_info[%2d] vir_addr:0x%llx phy_addr:0x%llx sdt_no:%u size:%u",
+			i,
+			dpp_ctrl->dump_addr_info[i].vir_addr,
+			dpp_ctrl->dump_addr_info[i].phy_addr,
+			dpp_ctrl->dump_addr_info[i].sdt_no,
+			dpp_ctrl->dump_addr_info[i].size);
+
+		g_dtb_data.dtb_table_bulk_dump_mz[dpp_ctrl->dump_sdt_num] = generic_dump_mz;
+		dpp_ctrl->dump_sdt_num++;
+	}
+	return ret;
+}
+/**
+ * Fun:  last entry to clear
+ */
+static int zxdh_tbl_entry_offline_destroy(struct zxdh_hw *hw)
+{
+	int ret = 0;
+
+	if (!g_dtb_data.init_done)
+		return ret;
+
+	if (hw->is_pf) {
+		/*hash  &ddr*/
+		uint32_t sdt_no;
+
+		sdt_no = MK_SDT_NO(L2_ENTRY, hw->hash_search_index);
+		ret = dpp_dtb_hash_offline_delete(0, g_dtb_data.queueid, sdt_no, 0);
+		PMD_DRV_LOG(INFO, "%d dpp_dtb_hash_offline_delete sdt_no %d",
+				hw->port_id, sdt_no);
+		if (ret)
+			PMD_DRV_LOG(ERR, "%d dpp_dtb_hash_offline_delete sdt_no %d failed",
+					hw->port_id, sdt_no);
+
+		sdt_no = MK_SDT_NO(MC, hw->hash_search_index);
+		ret = dpp_dtb_hash_offline_delete(0, g_dtb_data.queueid, sdt_no, 0);
+		PMD_DRV_LOG(INFO, "%d dpp_dtb_hash_offline_delete sdt_no %d",
+				hw->port_id, sdt_no);
+		if (ret)
+			PMD_DRV_LOG(ERR, "%d dpp_dtb_hash_offline_delete sdt_no %d failed",
+				hw->port_id, sdt_no);
+
+		/*eram  iterm by iterm*/
+		/*etcam*/
+	}
+	return ret;
+}
+/**
+ * Fun:
+ */
+static inline int npsdk_dtb_res_init(struct rte_eth_dev *dev)
+{
+	int ret = 0;
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (g_dtb_data.init_done) {
+		PMD_INIT_LOG(DEBUG, "DTB res already init done, dev %s no need init",
+			dev->device->name);
+		return 0;
+	}
+	g_dtb_data.queueid = INVALID_DTBQUE;
+	g_dtb_data.bind_device = dev;
+	g_dtb_data.dev_refcnt++;
+	g_dtb_data.init_done = 1;
+	/* */
+	DPP_DEV_INIT_CTRL_T *dpp_ctrl = malloc(sizeof(*dpp_ctrl) +
+			sizeof(DPP_DTB_ADDR_INFO_T) * 256);
+
+	if (dpp_ctrl == NULL) {
+		PMD_INIT_LOG(ERR, "dev %s annot allocate memory for dpp_ctrl", dev->device->name);
+		ret = -ENOMEM;
+		goto free_res;
+	}
+	memset(dpp_ctrl, 0, sizeof(*dpp_ctrl) + sizeof(DPP_DTB_ADDR_INFO_T) * 256);
+
+	dpp_ctrl->queue_id = 0xff;
+	dpp_ctrl->vport	 = hw->vport.vport;
+	dpp_ctrl->vector = ZXDH_MSIX_INTR_DTB_VEC;
+	strcpy((char *)dpp_ctrl->port_name, dev->device->name);
+	dpp_ctrl->pcie_vir_addr = (ZXIC_ADDR_T)hw->bar_addr[0];
+
+	struct bar_offset_params param = {0};
+	struct bar_offset_res  res = {0};
+
+	param.pcie_id = hw->pcie_id;
+	param.virt_addr = hw->bar_addr[0]+ZXDH_CTRLCH_OFFSET;
+	param.type = URI_NP;
+
+	ret = zxdh_get_bar_offset(&param, &res);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "dev %s get npbar offset failed", dev->device->name);
+		goto free_res;
+	}
+	dpp_ctrl->np_bar_len = res.bar_length;
+	dpp_ctrl->np_bar_offset = res.bar_offset;
+	PMD_INIT_LOG(ERR,
+		"dpp_ctrl->pcie_vir_addr 0x%llx bar_offs  0x%x bar_len 0x%x",
+		dpp_ctrl->pcie_vir_addr, dpp_ctrl->np_bar_offset, dpp_ctrl->np_bar_len);
+	if (!g_dtb_data.dtb_table_conf_mz) {
+		const struct rte_memzone *conf_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_conf_mz",
+				DPU_DTB_TABLE_CONF_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
+
+		if (conf_mz == NULL) {
+			PMD_INIT_LOG(ERR,
+				"dev %s annot allocate memory for dtb table conf",
+				dev->device->name);
+			ret = -ENOMEM;
+			goto free_res;
+		}
+		dpp_ctrl->down_vir_addr = conf_mz->addr_64;
+		dpp_ctrl->down_phy_addr = conf_mz->iova;
+		g_dtb_data.dtb_table_conf_mz = conf_mz;
+	}
+	/* */
+	if (!g_dtb_data.dtb_table_dump_mz) {
+		const struct rte_memzone *dump_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_dump_mz",
+				DPU_DTB_TABLE_DUMP_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
+
+		if (dump_mz == NULL) {
+			PMD_INIT_LOG(ERR,
+				"dev %s Cannot allocate memory for dtb table dump",
+				dev->device->name);
+			ret = -ENOMEM;
+			goto free_res;
+		}
+		dpp_ctrl->dump_vir_addr = dump_mz->addr_64;
+		dpp_ctrl->dump_phy_addr = dump_mz->iova;
+		g_dtb_data.dtb_table_dump_mz = dump_mz;
+	}
+	/* init bulk dump */
+	zxdh_dtb_dump_res_init(hw, dpp_ctrl);
+
+	ret = dpp_host_np_init(0, dpp_ctrl);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "dev %s dpp host np init failed .ret %d", dev->device->name, ret);
+		goto free_res;
+	}
+
+	PMD_INIT_LOG(INFO, "dev %s dpp host np init ok.dtb queue %d",
+		dev->device->name, dpp_ctrl->queue_id);
+	g_dtb_data.queueid = dpp_ctrl->queue_id;
+	free(dpp_ctrl);
+	return 0;
+
+free_res:
+	_dtb_data_res_free(hw);
+	free(dpp_ctrl);
+	return -ret;
+}
+/**
+ * Fun:
+ */
+static uint32_t dpp_res_uni_init(ZXIC_UINT32 type)
+{
+	DPP_STATUS rc = DPP_OK;
+	ZXIC_UINT32 dev_id = 0;
+	DPP_APT_HASH_RES_INIT_T tHashResInit = {0};
+	DPP_APT_ERAM_RES_INIT_T tEramResInit = {0};
+	DPP_APT_ACL_RES_INIT_T tAclResInit = {0};
+	DPP_APT_DDR_RES_INIT_T tDdrResInit = {0};
+	DPP_APT_LPM_RES_INIT_T tLpmResInit = {0};
+	DPP_APT_STAT_RES_INIT_T tStatResInit = {0};
+
+	ZXIC_COMM_MEMSET(&tHashResInit, 0x0, sizeof(DPP_APT_HASH_RES_INIT_T));
+	ZXIC_COMM_MEMSET(&tEramResInit, 0x0, sizeof(DPP_APT_ERAM_RES_INIT_T));
+	ZXIC_COMM_MEMSET(&tAclResInit, 0x0, sizeof(DPP_APT_ACL_RES_INIT_T));
+	ZXIC_COMM_MEMSET(&tDdrResInit, 0x0, sizeof(DPP_APT_DDR_RES_INIT_T));
+	ZXIC_COMM_MEMSET(&tLpmResInit, 0x0, sizeof(DPP_APT_LPM_RES_INIT_T));
+	ZXIC_COMM_MEMSET(&tStatResInit, 0x0, sizeof(DPP_APT_STAT_RES_INIT_T));
+
+	/* Obtain all flow table resources */
+	rc = dpp_apt_hash_res_get(type, &tHashResInit);
+	ZXIC_COMM_CHECK_RC(rc, "dpp_drv_hash_res_get");
+	rc = dpp_apt_eram_res_get(type, &tEramResInit);
+	ZXIC_COMM_CHECK_RC(rc, "dpp_drv_eram_res_get");
+	rc = dpp_apt_acl_res_get(type, &tAclResInit);
+	ZXIC_COMM_CHECK_RC(rc, "dpp_drv_acl_res_get");
+	rc = dpp_apt_ddr_res_get(type, &tDdrResInit);
+	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_ddr_res_get");
+	rc = dpp_apt_lpm_res_get(type, &tLpmResInit);
+	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_lpm_res_get");
+	rc = dpp_apt_stat_res_get(type, &tStatResInit);
+	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_stat_res_get");
+
+	/* hash init */
+	rc = dpp_apt_hash_global_res_init(dev_id);
+	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_global_res_init");
+
+	rc = dpp_apt_hash_func_res_init(dev_id, tHashResInit.func_num, tHashResInit.func_res);
+	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_func_res_init");
+	PMD_INIT_LOG(INFO, " func_num  %d", tHashResInit.func_num);
+
+	rc = dpp_apt_hash_bulk_res_init(dev_id, tHashResInit.bulk_num, tHashResInit.bulk_res);
+	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_bulk_res_init");
+	PMD_INIT_LOG(INFO, " bulk_num  %d", tHashResInit.bulk_num);
+
+	/* tbl-res must be initialized after fun-res and buld-res */
+	rc = dpp_apt_hash_tbl_res_init(dev_id, tHashResInit.tbl_num, tHashResInit.tbl_res);
+	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_tbl_res_init");
+	PMD_INIT_LOG(INFO, " tbl_num  %d", tHashResInit.tbl_num);
+	/* eram init */
+	rc = dpp_apt_eram_res_init(dev_id, tEramResInit.tbl_num, tEramResInit.eram_res);
+	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_eram_res_init");
+
+	/* init acl */
+	rc = dpp_apt_acl_res_init(dev_id, tAclResInit.tbl_num, tAclResInit.acl_res);
+	ZXIC_COMM_CHECK_RC(rc, "dpp_apt_acl_res_init");
+
+	/* init stat */
+	rc = dpp_stat_ppu_eram_baddr_set(dev_id, tStatResInit.eram_baddr);
+	ZXIC_COMM_CHECK_RC(rc, "dpp_stat_ppu_eram_baddr_set");
+
+	rc = dpp_stat_ppu_eram_depth_set(dev_id, tStatResInit.eram_depth); /* unit: 128bits */
+	ZXIC_COMM_CHECK_RC(rc, "dpp_stat_ppu_eram_depth_set");
+
+	rc = dpp_se_cmmu_smmu1_cfg_set(dev_id, tStatResInit.ddr_baddr);
+	ZXIC_COMM_CHECK_RC(rc, "dpp_se_cmmu_smmu1_cfg_set");
+
+	rc = dpp_stat_ppu_ddr_baddr_set(dev_id, tStatResInit.ppu_ddr_offset); /* unit: 128bits */
+	ZXIC_COMM_CHECK_RC(rc, "dpp_stat_ppu_eram_depth_set");
+
+	return DPP_OK;
+}
+
+static inline int npsdk_apt_res_init(struct rte_eth_dev *dev __rte_unused)
+{
+	uint32_t ret = 0;
+
+	ret = dpp_res_uni_init(SE_NIC_RES_TYPE);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "init stand dpp res failed");
+		return -1;
+	}
+
+	PMD_INIT_LOG(INFO, " end ...time: %lu s", get_cur_time_ms());
+	return ret;
+}
+/**
+ * Fun:
+ */
+static void zxdh_np_destroy(struct rte_eth_dev *dev)
+{
+	zxdh_tbl_entry_destroy(dev);
+	if ((!g_dtb_data.init_done) && (!g_dtb_data.dev_refcnt))
+		return;
+
+	if (--g_dtb_data.dev_refcnt == 0) {
+		struct zxdh_hw *hw = dev->data->dev_private;
+
+		_dtb_data_res_free(hw);
+	}
+
+	PMD_DRV_LOG(INFO, "g_dtb_data	dev_refcnt %d", g_dtb_data.dev_refcnt);
+}
+
+/**
+ * Fun:
+ */
+static int zxdh_tables_init(struct rte_eth_dev *dev)
+{
+	/*	port attr\pannel attr\rss\mac vlan filter flush */
+	int ret = 0;
+
+	ret = zxdh_port_attr_init(dev);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, " zxdh_port_attr_init failed");
+		return ret;
+	}
+
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (hw->is_pf) {
+		ret = zxdh_panel_table_init(dev);
+		if (ret) {
+			PMD_INIT_LOG(ERR, " panel table init failed");
+			return ret;
+		}
+		ret = zxdh_vlan_filter_table_init(vport_to_vfid(hw->vport));
+		if (ret) {
+			PMD_INIT_LOG(ERR, " panel table init failed");
+			return ret;
+		}
+		ret = zxdh_promisc_table_init(hw);
+		if (ret) {
+			PMD_INIT_LOG(ERR, " promisc_table_init failed");
+			return ret;
+		}
+		config_default_hash_key();
+	}
+	return ret;
+}
+/**
+ * Fun:
+ */
+const char *MZ_ZXDH_PMD_SHARED_DATA = "zxdh_pmd_shared_data";
+rte_spinlock_t zxdh_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
+struct zxdh_shared_data *zxdh_shared_data;
+
+static int zxdh_init_shared_data(void)
+{
+	const struct rte_memzone *mz;
+	int ret = 0;
+
+	rte_spinlock_lock(&zxdh_shared_data_lock);
+	if (zxdh_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate shared memory. */
+			mz = rte_memzone_reserve(MZ_ZXDH_PMD_SHARED_DATA,
+					sizeof(*zxdh_shared_data), SOCKET_ID_ANY, 0);
+			if (mz == NULL) {
+				PMD_INIT_LOG(ERR, "Cannot allocate zxdh shared data");
+				ret = -rte_errno;
+				goto error;
+			}
+			zxdh_shared_data = mz->addr;
+			memset(zxdh_shared_data, 0, sizeof(*zxdh_shared_data));
+			rte_spinlock_init(&zxdh_shared_data->lock);
+		} else { /* Lookup allocated shared memory. */
+			mz = rte_memzone_lookup(MZ_ZXDH_PMD_SHARED_DATA);
+			if (mz == NULL) {
+				PMD_INIT_LOG(ERR, "Cannot attach zxdh shared data");
+				ret = -rte_errno;
+				goto error;
+			}
+			zxdh_shared_data = mz->addr;
+		}
+	}
+
+error:
+	rte_spinlock_unlock(&zxdh_shared_data_lock);
+	return ret;
+}
+
+static void zxdh_free_sh_res(void)
+{
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		rte_spinlock_lock(&zxdh_shared_data_lock);
+		if ((zxdh_shared_data != NULL) && zxdh_shared_data->init_done &&
+			(--zxdh_shared_data->dev_refcnt == 0)) {
+			rte_mempool_free(zxdh_shared_data->flow_mp);
+			rte_mempool_free(zxdh_shared_data->mtr_mp);
+			rte_mempool_free(zxdh_shared_data->mtr_profile_mp);
+			rte_mempool_free(zxdh_shared_data->mtr_policy_mp);
+		}
+		rte_spinlock_unlock(&zxdh_shared_data_lock);
+	}
+}
+
+/**
+ * Fun:
+ */
+static int zxdh_init_sh_res(struct zxdh_shared_data *sd)
+{
+	const char *MZ_ZXDH_FLOW_MP        = "zxdh_flow_mempool";
+	const char *MZ_ZXDH_MTR_MP         = "zxdh_mtr_mempool";
+	const char *MZ_ZXDH_MTR_PROFILE_MP = "zxdh_mtr_profile_mempool";
+	const char *MZ_ZXDH_MTR_POLICY_MP = "zxdh_mtr_policy_mempool";
+	struct rte_mempool *flow_mp = NULL;
+	struct rte_mempool *mtr_mp = NULL;
+	struct rte_mempool *mtr_profile_mp = NULL;
+	struct rte_mempool *mtr_policy_mp = NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		flow_mp = rte_mempool_create(MZ_ZXDH_FLOW_MP, MAX_FLOW_NUM,
+			sizeof(struct zxdh_flow),
+			64, 0, NULL, NULL, NULL, NULL,
+			SOCKET_ID_ANY, 0);
+		if (flow_mp == NULL) {
+			PMD_INIT_LOG(ERR, "Cannot allocate zxdh flow mempool");
+			goto error;
+		}
+		mtr_mp = rte_mempool_create(MZ_ZXDH_MTR_MP, MAX_MTR_NUM,
+			sizeof(struct zxdh_mtr_object),
+			64, 0, NULL, NULL, NULL, NULL,
+			SOCKET_ID_ANY, 0);
+		if (mtr_mp == NULL) {
+			PMD_INIT_LOG(ERR, "Cannot allocate zxdh mtr mempool");
+			goto error;
+		}
+		mtr_profile_mp = rte_mempool_create(MZ_ZXDH_MTR_PROFILE_MP, MAX_MTR_PROFILE_NUM,
+			sizeof(struct zxdh_meter_profile),
+			64, 0, NULL, NULL, NULL, NULL,
+			SOCKET_ID_ANY, 0);
+		if (mtr_profile_mp == NULL) {
+			PMD_INIT_LOG(ERR, "Cannot allocate zxdh mtr profile mempool");
+			goto error;
+		}
+		mtr_policy_mp = rte_mempool_create(MZ_ZXDH_MTR_POLICY_MP, ZXDH_MAX_POLICY_NUM,
+			sizeof(struct zxdh_meter_policy),
+			64, 0, NULL, NULL, NULL, NULL,
+			SOCKET_ID_ANY, 0);
+		if (mtr_policy_mp == NULL) {
+			PMD_INIT_LOG(ERR, "Cannot allocate zxdh mtr profile mempool");
+			goto error;
+		}
+		sd->flow_mp = flow_mp;
+		sd->mtr_mp = mtr_mp;
+		sd->mtr_profile_mp = mtr_profile_mp;
+		sd->mtr_policy_mp = mtr_policy_mp;
+
+		TAILQ_INIT(&zxdh_shared_data->flow_list);
+		TAILQ_INIT(&zxdh_shared_data->meter_profile_list);
+		TAILQ_INIT(&zxdh_shared_data->mtr_list);
+		TAILQ_INIT(&zxdh_shared_data->mtr_policy_list);
+	}
+	return 0;
+
+error:
+	rte_mempool_free(mtr_policy_mp);
+	rte_mempool_free(mtr_profile_mp);
+	rte_mempool_free(mtr_mp);
+	rte_mempool_free(flow_mp);
+	return -rte_errno;
+}
+
+/**
+ * Fun:
+ */
+struct zxdh_mtr_res g_mtr_res;
+static void zxdh_mtr_init(void)
+{
+	rte_spinlock_init(&g_mtr_res.hw_plcr_res_lock);
+	memset(&g_mtr_res, 0, sizeof(g_mtr_res));
+}
+
+#define ZXDH_HASHIDX_MAX  6
+
+/**
+ * Fun:
+ */
+static int zxdh_np_init(struct rte_eth_dev *eth_dev)
+{
+	uint32_t ret = 0;
+	struct zxdh_hw *hw = eth_dev->data->dev_private;
+
+	if ((zxdh_shared_data != NULL) && zxdh_shared_data->npsdk_init_done) {
+		g_dtb_data.dev_refcnt++;
+		zxdh_tbl_entry_offline_destroy(hw);
+		PMD_DRV_LOG(INFO, "no need to init dtb  dtb chanenl %d devref %d",
+				g_dtb_data.queueid, g_dtb_data.dev_refcnt);
+		return 0;
+	}
+
+	if (hw->is_pf) {
+		PMD_DRV_LOG(INFO, "dpp_dtb_res_init time: %ld s", get_cur_time_ms());
+		ret = npsdk_dtb_res_init(eth_dev);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "dpp apt init failed, ret:%d ", ret);
+			return -ret;
+		}
+		PMD_DRV_LOG(INFO, "dpp_dtb_res_init ok");
+
+		PMD_DRV_LOG(INFO, "%s time: %ld s", __func__, get_cur_time_ms());
+		ret = npsdk_apt_res_init(eth_dev);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "dpp apt init failed, ret:%d ", ret);
+			return -ret;
+		}
+
+		PMD_DRV_LOG(INFO, "dpp_apt_res_init ok");
+		if (!hw->switchoffload) {
+			if (hw->hash_search_index >= ZXDH_HASHIDX_MAX) {
+				PMD_DRV_LOG(ERR, "invalid hash idx %d", hw->hash_search_index);
+				return -1;
+			}
+			zxdh_tbl_entry_offline_destroy(hw);
+		}
+	}
+	if (zxdh_shared_data != NULL)
+		zxdh_shared_data->npsdk_init_done = 1;
+
+	PMD_DRV_LOG(DEBUG, "np init ok ");
+	return 0;
+}
+/**
+ * Fun:
+ */
+static int zxdh_init_once(struct rte_eth_dev *eth_dev)
+{
+	PMD_INIT_LOG(DEBUG, "port 0x%x init...", eth_dev->data->port_id);
+	if (zxdh_init_shared_data())
+		return -rte_errno;
+
+	struct zxdh_shared_data *sd = zxdh_shared_data;
+	int ret = 0;
+
+	rte_spinlock_lock(&sd->lock);
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (!sd->init_done) {
+			++sd->secondary_cnt;
+			sd->init_done = true;
+		}
+		goto out;
+	}
+	/* RTE_PROC_PRIMARY */
+	if (!sd->init_done) {
+		/*shared struct and res init */
+		ret = zxdh_init_sh_res(sd);
+		if (ret != 0)
+			goto out;
+
+		zxdh_mtr_init();
+		sd->init_done = true;
+	}
+	sd->dev_refcnt++;
+out:
+	rte_spinlock_unlock(&sd->lock);
+	return ret;
+}
+/* dev_ops for virtio, bare necessities for basic operation */
+static const struct eth_dev_ops zxdh_eth_dev_ops = {
+	.dev_configure			 = zxdh_dev_configure,
+	.dev_start				 = zxdh_dev_start,
+	.dev_stop				 = zxdh_dev_stop,
+	.dev_close				 = zxdh_dev_close,
+	.dev_infos_get			 = zxdh_dev_info_get,
+	.stats_get				 = zxdh_dev_stats_get,
+	.xstats_get				 = zxdh_dev_xstats_get,
+	.xstats_get_names		 = zxdh_dev_xstats_get_names,
+	.stats_reset			 = zxdh_dev_stats_reset,
+	.xstats_reset			 = zxdh_dev_stats_reset,
+	.link_update			 = zxdh_dev_link_update,
+	.rx_queue_setup			 = zxdh_dev_rx_queue_setup,
+	.rx_queue_intr_enable	 = zxdh_dev_rx_queue_intr_enable,
+	.rx_queue_intr_disable	 = zxdh_dev_rx_queue_intr_disable,
+	.rx_queue_release		 = NULL,
+	.rxq_info_get			 = zxdh_rxq_info_get,
+	.txq_info_get			 = zxdh_txq_info_get,
+	.tx_queue_setup			 = zxdh_dev_tx_queue_setup,
+	.tx_queue_release		 = NULL,
+	.queue_stats_mapping_set = NULL,
+
+	.mac_addr_add			 = zxdh_dev_mac_addr_add,
+	.mac_addr_remove		 = zxdh_dev_mac_addr_remove,
+	.mac_addr_set			 = zxdh_dev_mac_addr_set,
+	.mtu_set				 = zxdh_dev_mtu_set,
+	.dev_set_link_up		 = zxdh_dev_set_link_up,
+	.dev_set_link_down		 = zxdh_dev_set_link_down,
+	.promiscuous_enable		 = zxdh_dev_promiscuous_enable,
+	.promiscuous_disable	 = zxdh_dev_promiscuous_disable,
+	.allmulticast_enable	 = zxdh_dev_allmulticast_enable,
+	.allmulticast_disable	 = zxdh_dev_allmulticast_disable,
+	.vlan_filter_set		 = zxdh_vlan_filter_set,
+	.vlan_offload_set		 = zxdh_vlan_offload_set,
+	.vlan_pvid_set			 = zxdh_vlan_pvid_set,
+	.vlan_tpid_set			 = zxdh_vlan_tpid_set,
+	.udp_tunnel_port_add	 = zxdh_dev_udp_tunnel_port_add,
+	.udp_tunnel_port_del	 = zxdh_dev_udp_tunnel_port_del,
+	.reta_update			 = zxdh_dev_rss_reta_update,
+	.reta_query				 = zxdh_dev_rss_reta_query,
+	.rss_hash_update		 = zxdh_rss_hash_update,
+	.rss_hash_conf_get		 = zxdh_rss_hash_conf_get,
+	.mtr_ops_get			 = zxdh_meter_ops_get,
+	.flow_ops_get			 = zxdh_flow_ops_get,
+	.fw_version_get			 = zxdh_dev_fw_version_get,
+	.get_module_info		 = zxdh_dev_get_module_info,
+	.get_module_eeprom		 = zxdh_dev_get_module_eeprom,
+	.flow_ctrl_get			 = zxdh_flow_ctrl_get,
+	.flow_ctrl_set			 = zxdh_flow_ctrl_set,
+	.eth_dev_priv_dump		 = zxdh_dev_priv_dump,
+};
+/**
+ * Fun:
+ */
+static int32_t zxdh_msg_chan_enable(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct msix_para misx_info = {
+		.vector_risc = MSIX_FROM_RISCV,
+		.vector_pfvf = MSIX_FROM_PFVF,
+		.vector_mpf  = MSIX_FROM_MPF,
+		.pcie_id     = hw->pcie_id,
+		.driver_type = hw->is_pf ? MSG_CHAN_END_PF : MSG_CHAN_END_VF,
+		.virt_addr   = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET),
+	};
+
+	return zxdh_bar_chan_enable(&misx_info, &hw->vport.vport);
+}
+
+static int32_t zxdh_msg_chan_hwlock_init(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (!hw->is_pf)
+		return 0;
+	return bar_chan_pf_init_spinlock(hw->pcie_id, (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX]));
+}
+
+/**
+ * Fun:
+ */
+static int zxdh_agent_comm(struct rte_eth_dev *eth_dev, struct zxdh_hw *hw)
+{
+	if (zxdh_phyport_get(eth_dev, &hw->phyport) != 0) {
+		PMD_INIT_LOG(ERR, "Failed to get phyport");
+		return -1;
+	}
+	PMD_INIT_LOG(INFO, "Get phyport success: 0x%x", hw->phyport);
+	hw->vfid = vport_to_vfid(hw->vport);
+	if (zxdh_hashidx_get(eth_dev, &hw->hash_search_index) != 0) {
+		PMD_INIT_LOG(ERR, "Failed to get hash idx");
+		return -1;
+	}
+	PMD_INIT_LOG(DEBUG, "Get hash idx success: 0x%x", hw->hash_search_index);
+	if (zxdh_pannelid_get(eth_dev, &hw->panel_id) != 0) {
+		PMD_INIT_LOG(ERR, "Failed to get panel_id");
+		return -1;
+	}
+	PMD_INIT_LOG(INFO, "Get pannel id success: 0x%x", hw->panel_id);
+
+	return 0;
+}
+/**
+ * Fun: is based on probe() function in zxdh_pci.c
+ * It returns 0 on success.
+ */
+static int32_t zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	int ret;
+	uint64_t pre_time = get_cur_time_ms();
+
+	PMD_INIT_LOG(INFO, "dev init begin time: %lu s", pre_time);
+	eth_dev->dev_ops = &zxdh_eth_dev_ops;
+
+	/**
+	 * Primary process does the whole initialization,
+	 * for secondaryprocesses, we just select the same Rx and Tx function as primary.
+	 */
+	struct zxdh_hw *hw = eth_dev->data->dev_private;
+
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		VTPCI_OPS(hw) = &zxdh_modern_ops;
+		set_rxtx_funcs(eth_dev);
+		return 0;
+	}
+	/* Allocate memory for storing MAC addresses */
+	eth_dev->data->mac_addrs = rte_zmalloc("zxdh_mac",
+			ZXDH_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);
+	if (eth_dev->data->mac_addrs == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes store MAC addresses",
+				ZXDH_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN);
+		return -ENOMEM;
+	}
+	memset(hw, 0, sizeof(*hw));
+	ret = zxdh_dev_devargs_parse(eth_dev->device->devargs, hw);
+	if (ret < 0) {
+		PMD_INIT_LOG(ERR, "dev args parse failed");
+		return -EINVAL;
+	}
+
+	hw->bar_addr[0] = (uint64_t)pci_dev->mem_resource[0].addr;
+	if (hw->bar_addr[0] == 0) {
+		PMD_INIT_LOG(ERR, "Bad mem resource.");
+		return -EIO;
+	}
+	hw->device_id = pci_dev->id.device_id;
+	hw->port_id = eth_dev->data->port_id;
+	hw->eth_dev = eth_dev;
+	hw->speed = RTE_ETH_SPEED_NUM_UNKNOWN;
+	hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	hw->is_pf = 0;
+
+	hw->reta_idx = NULL;
+	hw->vfinfo = NULL;
+	hw->vlan_fiter = NULL;
+
+	hw->admin_status = RTE_ETH_LINK_UP;
+	rte_spinlock_init(&hw->state_lock);
+	if (pci_dev->id.device_id == ZXDH_PCI_PF_DEVICEID) {
+		hw->is_pf = 1;
+		hw->pfinfo.vf_nums = pci_dev->max_vfs;
+	}
+
+	/* reset device and get dev config*/
+	ret = zxdh_init_once(eth_dev);
+	if (ret != 0)
+		goto err_zxdh_init;
+
+	ret = zxdh_init_device(eth_dev);
+	if (ret < 0)
+		goto err_zxdh_init;
+
+	ret = zxdh_msg_chan_init();
+	if (ret < 0) {
+		PMD_INIT_LOG(ERR, "Failed to init bar msg chan");
+		goto err_zxdh_init;
+	}
+	hw->msg_chan_init = 1;
+	PMD_INIT_LOG(DEBUG, "Init bar msg chan OK");
+	ret = zxdh_msg_chan_hwlock_init(eth_dev);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "zxdh_msg_chan_hwlock_init failed ret %d", ret);
+		goto err_zxdh_init;
+	}
+	ret = zxdh_msg_chan_enable(eth_dev);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "zxdh_msg_bar_chan_enable failed ret %d", ret);
+		goto err_zxdh_init;
+	}
+	PMD_INIT_LOG(DEBUG, "pcie_id: 0x%x, vport: 0x%x", hw->pcie_id, hw->vport.vport);
+
+	ret = zxdh_agent_comm(eth_dev, hw);
+	if (ret != 0)
+		goto err_zxdh_init;
+
+	ret = zxdh_np_init(eth_dev);
+	if (ret)
+		goto err_zxdh_init;
+
+
+	zxdh_priv_res_init(hw);
+	zxdh_sriovinfo_init(hw);
+	zxdh_msg_cb_reg(hw);
+	zxdh_configure_intr(eth_dev);
+	ret = zxdh_tables_init(eth_dev);
+	if (ret != 0)
+		goto err_zxdh_init;
+
+	uint64_t time = get_cur_time_ms();
+
+	PMD_INIT_LOG(ERR, "dev init end time: %lu s total time %" PRIu64, time, time - pre_time);
+	return 0;
+
+err_zxdh_init:
+	zxdh_intr_release(eth_dev);
+	zxdh_np_destroy(eth_dev);
+	zxdh_bar_msg_chan_exit();
+	zxdh_priv_res_free(hw);
+	zxdh_free_sh_res();
+	rte_free(eth_dev->data->mac_addrs);
+	eth_dev->data->mac_addrs = NULL;
+	rte_free(eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key);
+	eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
+	return ret;
+}
+
+static unsigned int
+log2above(unsigned int v)
+{
+	unsigned int l;
+	unsigned int r;
+
+	for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
+		r |= (v & 1);
+	return l + r;
+}
+
+static uint16_t zxdh_queue_desc_pre_setup(uint16_t desc)
+{
+	uint32_t nb_desc = desc;
+
+	if (desc < ZXDH_MIN_QUEUE_DEPTH) {
+		PMD_RX_LOG(WARNING,
+			"nb_desc(%u) increased number of descriptors to the min queue depth (%u)",
+			desc, ZXDH_MIN_QUEUE_DEPTH);
+		return ZXDH_MIN_QUEUE_DEPTH;
+	}
+
+	if (desc > ZXDH_MAX_QUEUE_DEPTH) {
+		PMD_RX_LOG(WARNING,
+			"nb_desc(%u) can't be greater than max_rxds (%d), turn to max queue depth",
+			desc, ZXDH_MAX_QUEUE_DEPTH);
+		return ZXDH_MAX_QUEUE_DEPTH;
+	}
+
+	if (!rte_is_power_of_2(desc)) {
+		nb_desc = 1 << log2above(desc);
+		if (nb_desc > ZXDH_MAX_QUEUE_DEPTH)
+			nb_desc = ZXDH_MAX_QUEUE_DEPTH;
+
+		PMD_RX_LOG(WARNING,
+			"nb_desc(%u) increased number of descriptors to the next power of two (%d)",
+			desc, nb_desc);
+	}
+
+	return nb_desc;
+}
+
+static int32_t hw_q_depth_handler(const char *key __rte_unused,
+				const char *value, void *ret_val)
+{
+	uint16_t val = 0;
+	struct zxdh_hw *hw = ret_val;
+
+	val = strtoul(value, NULL, 0);
+	uint16_t q_depth = zxdh_queue_desc_pre_setup(val);
+
+	hw->q_depth = q_depth;
+	return 0;
+}
+
+static int32_t zxdh_dev_devargs_parse(struct rte_devargs *devargs, struct zxdh_hw *hw)
+{
+	struct rte_kvargs *kvlist = NULL;
+	int32_t ret = 0;
+
+	if (devargs == NULL)
+		return 0;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (kvlist == NULL) {
+		PMD_INIT_LOG(ERR, "error when parsing param");
+		return 0;
+	}
+
+	ret = rte_kvargs_process(kvlist, "q_depth", hw_q_depth_handler, hw);
+	if (ret < 0) {
+		PMD_INIT_LOG(ERR, "Failed to parse q_depth");
+		goto exit;
+	}
+	if (!hw->q_depth)
+		hw->q_depth = ZXDH_MIN_QUEUE_DEPTH;
+
+exit:
+	rte_kvargs_free(kvlist);
+	return ret;
+}
+
+/**
+ * Fun:
+ */
+int32_t zxdh_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+#ifdef RTE_LIBRTE_ZXDH_DEBUG
+	rte_log_set_level(zxdh_logtype_init, RTE_LOG_DEBUG);
+	rte_log_set_level(zxdh_logtype_driver, RTE_LOG_DEBUG);
+	rte_log_set_level(RTE_LOGTYPE_PMD, RTE_LOG_DEBUG);
+#endif
+	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct zxdh_hw), zxdh_eth_dev_init);
+}
+/**
+ * Fun:
+ */
+static int32_t zxdh_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+	PMD_INIT_FUNC_TRACE();
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return 0;
+	zxdh_dev_close(eth_dev);
+	return 0;
+}
+/**
+ * Fun:
+ */
+int32_t zxdh_eth_pci_remove(struct rte_pci_device *pci_dev)
+{
+	int32_t ret = rte_eth_dev_pci_generic_remove(pci_dev, zxdh_eth_dev_uninit);
+
+	if (ret == -ENODEV) { /* Port has already been released by close. */
+		ret = 0;
+	}
+	return ret;
+}
+static const struct rte_pci_id pci_id_zxdh_map[] = {
+	{RTE_PCI_DEVICE(PCI_VENDOR_ID_ZTE, ZXDH_PCI_PF_DEVICEID)},
+	{RTE_PCI_DEVICE(PCI_VENDOR_ID_ZTE, ZXDH_PCI_VF_DEVICEID)},
+	{.vendor_id = 0, /* sentinel */ },
+};
+static struct rte_pci_driver zxdh_pmd = {
+	.driver = {.name = "net_zxdh", },
+	.id_table = pci_id_zxdh_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+	.probe = zxdh_eth_pci_probe,
+	.remove = zxdh_eth_pci_remove,
+};
+RTE_INIT(rte_zxdh_pmd_init)
+{
+	zxdh_log_init();
+	rte_pci_register(&zxdh_pmd);
+	rte_telemetry_register_cmd("/zxdh/dumppkt",
+		handle_pkt_dump,
+		"Returns None. Parameter: port id, mode(0:all_off;1:rx_on;2:tx_on;3:all_on), dumplen");
+	rte_telemetry_register_cmd("/zxdh/dumpque",
+		handle_queue_dump,
+		"Returns None. Parameter: port id, queid, dump_descnum, logfile(eg /home/que.log)");
+}
+RTE_PMD_EXPORT_NAME(net_zxdh, __COUNTER__);
+RTE_PMD_REGISTER_PCI_TABLE(net_zxdh, pci_id_zxdh_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_zxdh, "* vfio-pci");
+RTE_LOG_REGISTER(zxdh_logtype_init, pmd.net.zxdh.init, DEBUG);
+RTE_LOG_REGISTER(zxdh_logtype_driver, pmd.net.zxdh.driver, INFO);
+RTE_LOG_REGISTER(zxdh_logtype_zxdh_driver, pmd.net.zxdh.zxdh_driver, DEBUG);
+RTE_LOG_REGISTER(zxdh_logtype_tx, pmd.net.zxdh.tx, NOTICE);
+RTE_LOG_REGISTER(zxdh_logtype_rx, pmd.net.zxdh.rx, NOTICE);
+RTE_LOG_REGISTER(zxdh_logtype_msg, pmd.net.zxdh.msg, INFO);
+RTE_PMD_REGISTER_PARAM_STRING(net_zxdh,
+	"q_depth=<int>");
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
new file mode 100644
index 0000000000..31438048df
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -0,0 +1,244 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#ifndef _ZXDH_ETHDEV_H_
+#define _ZXDH_ETHDEV_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include "ethdev_pci.h"
+
+#include "msg_chan_pub.h"
+#include "zxdh_mtr.h"
+#include "zxdh_flow.h"
+#include "zxdh_table_drv.h"
+
+/* BAR definitions */
+#define ZXDH_NUM_BARS    2
+#define ZXDH_BAR0_INDEX  0
+
+/**
+ * zxdh has a total of 4096 queues,
+ * pf/vf devices support up to 256 queues
+ * (include private queues)
+ */
+#define ZXDH_QUEUES_BASE       2048
+#define ZXDH_TOTAL_QUEUES_NUM  4096
+#define ZXDH_QUEUES_NUM_MAX    256
+#define ZXDH_TYPE_HDR_SIZE     sizeof(struct zxdh_type_hdr)
+#define ZXDH_PI_HDR_SIZE       sizeof(struct zxdh_pi_hdr)
+#define ZXDH_DL_NET_HDR_SIZE   sizeof(struct zxdh_net_hdr_dl)
+#define ZXDH_UL_NET_HDR_SIZE   sizeof(struct zxdh_net_hdr_ul)
+#define ZXDH_MBUF_MIN_SIZE     (ZXDH_DL_NET_HDR_SIZE)
+
+#define ZXDH_ETHER_MIN_MTU     68
+#define ZXDH_MAX_RX_PKTLEN     14000U
+#define ZXDH_MAX_UC_MAC_ADDRS  32
+#define ZXDH_MAX_MC_MAC_ADDRS  32
+#define ZXDH_MAX_MAC_ADDRS     (ZXDH_MAX_UC_MAC_ADDRS + ZXDH_MAX_MC_MAC_ADDRS)
+#define ZXDH_BASE_VFID         1152
+#define ZXDH_TABLE_HIT_FLAG    128
+
+extern struct zxdh_dtb_shared_data g_dtb_data;
+extern const struct eth_dev_ops zxdh_user_secondary_eth_dev_ops;
+extern const struct eth_dev_ops zxdh_bond_dev_ops;
+
+struct pfinfo {
+	uint16_t pcieid;
+	uint16_t vf_nums;
+	struct zxdh_port_att_entry port_attr;
+};
+struct vfinfo {
+	uint16_t vf_idx;
+	uint16_t pcieid;
+	uint16_t vport;
+	uint8_t flag;
+	uint8_t state;
+	uint8_t rsv;
+	struct rte_ether_addr mac_addr;
+	struct rte_ether_addr vf_mac[ZXDH_MAX_MAC_ADDRS];
+	struct zxdh_port_att_entry port_attr;
+};
+
+#define ZXDH_MAX_VF 256
+struct pf {
+	uint16_t pcieid;
+	uint16_t vf_nums;
+	struct vfinfo vfinfo[];
+};
+
+struct vf {
+	uint16_t pcieid;
+	uint16_t vf_idx;
+	uint16_t pf_pcieid;
+	uint16_t pf_vport;
+};
+
+union VPORT {
+	uint16_t vport;
+
+	__extension__
+	struct {
+		uint16_t vfid:8;
+		uint16_t pfid:3;
+		uint16_t vf_flag:1;
+		uint16_t epid:3;
+		uint16_t direct_flag:1;
+	};
+};
+
+struct chnl_context {
+	uint16_t valid;
+	uint16_t ph_chno;
+}; /* 4B */
+
+struct zxdh_vlan_offload_cfg {
+	uint8_t vlan_strip:1;
+	uint8_t vlan_filter:1;
+	uint8_t vlan_extend:1;
+	uint8_t qinq_strip:1;
+	uint8_t resv:4;
+};
+
+struct zxdh_hw {
+	uint64_t host_features;
+	uint64_t guest_features;
+	/* */
+	uint32_t max_queue_pairs;
+	uint16_t max_mtu;
+	uint8_t  vtnet_hdr_size;
+	uint8_t  vlan_strip;
+	/* */
+	uint8_t  use_msix;
+	uint8_t  intr_enabled;
+	uint8_t  started;
+	uint8_t  weak_barriers;
+
+	bool has_tx_offload;
+	bool has_rx_offload;
+
+	uint8_t  mac_addr[RTE_ETHER_ADDR_LEN];
+	uint16_t port_id;
+
+	uint32_t  notify_off_multiplier;
+	uint32_t  speed;  /* link speed in MB */
+	uint32_t  speed_mode;  /* link speed in 1x 2x 3x */
+	uint8_t   duplex;
+	uint8_t  *isr;
+	uint16_t *notify_base;
+
+	struct zxdh_pci_common_cfg *common_cfg;
+	struct zxdh_net_config     *dev_cfg;
+
+	uint16_t queue_num;
+	uint16_t device_id;
+
+	uint16_t pcie_id;
+	uint8_t  phyport;
+	bool     msg_chan_init;
+
+	uint8_t panel_id;
+	uint8_t rsv[1];
+
+	/**
+	 * App management thread and virtio interrupt handler
+	 * thread both can change device state,
+	 * this lock is meant to avoid such a contention.
+	 */
+	rte_spinlock_t     state_lock;
+	struct rte_mbuf  **inject_pkts;
+	struct virtqueue **vqs;
+
+	uint64_t bar_addr[ZXDH_NUM_BARS];
+	struct rte_intr_handle *risc_intr;  /* Interrupt handle of rsic_v to host */
+	struct rte_intr_handle *dtb_intr;  /* Interrupt handle of rsic_v to host */
+
+	struct chnl_context channel_context[ZXDH_QUEUES_NUM_MAX];
+	union VPORT vport;
+	struct zxdh_flow  *cur_flow;
+	struct FLOW_LIST flow_list; /* double link list */
+
+	uint8_t is_pf         : 1,
+			switchoffload : 1,
+			i_mtr_en      : 1, /* meter en. */
+			e_mtr_en      : 1; /* meter en. */
+	uint8_t  hash_search_index;
+	uint16_t vfid;
+	uint16_t reta_idx_n;
+	uint16_t pvid;
+	uint16_t otpid;
+	uint16_t mc_num;
+	uint16_t uc_num;
+	uint8_t promisc_status;
+	uint8_t allmulti_status;
+	uint8_t admin_status;
+	uint16_t *reta_idx;
+	uint64_t *vlan_fiter;
+
+	struct pfinfo pfinfo;
+	struct vfinfo *vfinfo;
+	struct rte_eth_dev *eth_dev;
+
+	struct zxdh_vlan_offload_cfg vlan_offload_cfg;
+	uint8_t rss_enable;
+	uint8_t rss_init;
+	uint16_t q_depth;
+};
+
+int32_t zxdh_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int32_t zxdh_eth_pci_remove(struct rte_pci_device *pci_dev);
+
+int32_t zxdh_dev_close(struct rte_eth_dev *dev);
+int32_t zxdh_dev_configure(struct rte_eth_dev *dev);
+int32_t zxdh_dev_start(struct rte_eth_dev *dev);
+int32_t zxdh_dev_stop(struct rte_eth_dev *dev);
+int32_t zxdh_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
+int32_t zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+int32_t zxdh_dev_stats_reset(struct rte_eth_dev *dev);
+int32_t zxdh_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, uint32_t n);
+int32_t zxdh_dev_xstats_get_names(struct rte_eth_dev *dev,
+			struct rte_eth_xstat_name *xstats_names,
+			unsigned int limit);
+int32_t zxdh_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
+int32_t zxdh_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
+
+int32_t zxdh_dev_rx_queue_done(void *rxq, uint16_t offset);
+int32_t zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev,
+			uint16_t rx_queue_id,
+			uint16_t nb_rx_desc,
+			uint32_t socket_id,
+			const struct rte_eth_rxconf *rx_conf,
+			struct rte_mempool *mb_pool);
+
+int32_t zxdh_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int32_t zxdh_dev_tx_queue_setup(struct rte_eth_dev *dev,
+			uint16_t tx_queue_id,
+			uint16_t nb_tx_desc,
+			uint32_t socket_id,
+			const struct rte_eth_txconf *tx_conf);
+
+uint16_t zxdh_recv_mergeable_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
+			uint16_t nb_pkts);
+uint16_t zxdh_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+uint16_t zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+
+void zxdh_interrupt_handler(void *param);
+int32_t zxdh_dev_pause(struct rte_eth_dev *dev);
+void zxdh_dev_resume(struct rte_eth_dev *dev);
+int32_t zxdh_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts, int32_t nb_pkts);
+int32_t zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx);
+int zxdh_vlan_filter_table_init(uint16_t vfid);
+int32_t zxdh_dev_config_port_status(struct rte_eth_dev *dev, uint16_t link_status);
+int32_t zxdh_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
+int32_t zxdh_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _ZXDH_ETHDEV_H_ */
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c
new file mode 100644
index 0000000000..6473143b58
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c
@@ -0,0 +1,2205 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#include <sys/mman.h>
+#include <rte_ethdev.h>
+
+#include "zxdh_pci.h"
+#include "zxdh_table_drv.h"
+#include "zxdh_common.h"
+#include "msg_chan_pub.h"
+#include "zxdh_msg_chan.h"
+#include "zxdh_ethdev_ops.h"
+#include "zxdh_ethdev.h"
+
+#include "zxdh_mtr.h"
+#include "zxdh_tables.h"
+#include "zxdh_rxtx.h"
+
+#include "dpp_dtb_table_api.h"
+
+#define ZXDH_VLAN_FILTER_BACKUP_GROUPS 64
+#define MSG_REPS_OK  0xff
+#define ZXDH_RSS_HF_MASK (~(ZXDH_RSS_HF))
+#define INVALID_LOGIC_QID 0xFFFFU
+
+int zxdh_force_read_from_hw = 1;
+
+static inline uint32_t
+zxdh_rss_hf_to_hw(uint64_t hf)
+{
+	uint32_t hw_hf = 0;
+
+	if (hf & ZXDH_HF_MAC_VLAN_ETH)
+		hw_hf |= ZXDH_HF_MAC_VLAN;
+	if (hf & ZXDH_HF_F3_ETH)
+		hw_hf |= ZXDH_HF_F3;
+	if (hf & ZXDH_HF_F5_ETH)
+		hw_hf |= ZXDH_HF_F5;
+
+	if (hw_hf == (ZXDH_HF_MAC_VLAN | ZXDH_HF_F3 | ZXDH_HF_F5))
+		hw_hf = ZXDH_HF_ALL;
+	return hw_hf;
+}
+
+static inline uint16_t
+zxdh_qid_ph_to_logic(struct rte_eth_dev *dev, uint16_t qid)
+{
+	uint16_t i;
+	uint16_t rx_queues = dev->data->nb_rx_queues;
+	struct zxdh_hw *priv = (struct zxdh_hw *)dev->data->dev_private;
+
+	for (i = 0; i < rx_queues; i++) {
+		if (qid == priv->channel_context[i * 2].ph_chno)
+			return i;
+
+	}
+	return INVALID_LOGIC_QID;
+}
+
+static inline uint64_t
+zxdh_rss_hf_to_eth(uint32_t hw_hf)
+{
+	uint64_t hf = 0;
+
+	if (hw_hf == ZXDH_HF_ALL)
+		return (ZXDH_HF_MAC_VLAN_ETH | ZXDH_HF_F3_ETH | ZXDH_HF_F5_ETH);
+
+	if (hw_hf & ZXDH_HF_MAC_VLAN)
+		hf |= ZXDH_HF_MAC_VLAN_ETH;
+	if (hw_hf & ZXDH_HF_F3)
+		hf |= ZXDH_HF_F3_ETH;
+	if (hw_hf & ZXDH_HF_F5)
+		hf |= ZXDH_HF_F5_ETH;
+
+	return hf;
+}
+
+/**
+ * Fun:
+ */
+int logic_qid_to_vqm_phyqid(struct rte_eth_dev *dev, int16_t qid)
+{
+	uint32_t logic_qid = qid;
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (logic_qid < hw->max_queue_pairs) {
+		if (hw->channel_context[logic_qid].valid)
+			return hw->channel_context[logic_qid].ph_chno;
+	}
+	return -1;
+}
+/**
+ * Fun:
+ */
+uint16_t vport_to_vfid(union VPORT v)
+{
+	/* epid > 4 is local soft queue. return 1192 */
+	if (v.epid > 4)
+		return 1192;
+	if (v.vf_flag)
+		return v.epid * 256 + v.vfid;
+	else
+		return (v.epid * 8 + v.pfid) + 1152;
+
+}
+/**
+ * Fun:
+ */
+uint16_t vport_to_pf_vfid(union VPORT v)
+{
+	/* epid > 4 is local soft queue. return 1192 */
+	if (v.epid > 4)
+		return 1192;
+	return (v.epid * 8 + v.pfid) + 1152;
+}
+/**
+ * Fun:
+ */
+uint16_t vport_to_hash_index(union VPORT v)
+{
+	return vport_to_pf_vfid(v) - 1152;
+}
+
+/**
+ * Fun:
+ */
+#define ZXDH_VLAN_TAG_LEN   4
+#define ZXDH_ETH_OVERHEAD  (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ZXDH_VLAN_TAG_LEN * 2)
+#define ZXDH_MTU_TO_PKTLEN(mtu) ((mtu) + ZXDH_ETH_OVERHEAD)
+int zxdh_dev_mtu_set(struct rte_eth_dev *dev, uint16_t new_mtu)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_panel_port_t panel;
+	struct zxdh_port_att_entry vport_att = {0};
+	uint8_t index_phy_port = hw->phyport;
+	uint16_t vfid = vport_to_pf_vfid(hw->vport);
+	int ret = 0;
+
+	PMD_DRV_LOG(INFO, "vport %d--%d--%d\n", hw->vport.epid, hw->vport.pfid, hw->vport.vfid);
+
+	if ((new_mtu < ZXDH_ETHER_MIN_MTU) || (new_mtu > hw->max_mtu)) {
+		PMD_DRV_LOG(ERR, "invalid mtu:%d, range[%d, %d]\n",
+				new_mtu, ZXDH_ETHER_MIN_MTU, ZXDH_MAX_RX_PKTLEN);
+		return -EINVAL;
+	}
+
+	if (dev->data->mtu == new_mtu)
+		return 0;
+
+	if (hw->is_pf) {
+		PMD_DRV_LOG(INFO, "[vfid:%d] zxdh_dev_mtu, set ok mtu = %d---(%d)\n",
+				vfid, new_mtu, index_phy_port);
+		memset(&panel, 0, sizeof(panel));
+		memset(&vport_att, 0, sizeof(vport_att));
+		ret = get_panel_attr(dev, &panel);
+		if (ret != DPP_OK) {
+			PMD_DRV_LOG(DEBUG, "get_panel_attr ret:%d\n", ret);
+			return -1;
+		}
+
+		ret = get_vport_attr(vfid, &vport_att);
+		if (ret != DPP_OK) {
+			PMD_DRV_LOG(DEBUG,
+				"[vfid:%d] zxdh_dev_mtu, get vport dpp_ret:%d\n", vfid, ret);
+			return -1;
+		}
+
+		panel.mtu = new_mtu;
+		panel.mtu_enable = 1;
+		ret = set_panel_attr(dev, &panel);
+		if (ret != DPP_OK) {
+			PMD_DRV_LOG(ERR, "set zxdh_dev_mtu failed, ret:%u", ret);
+			return ret;
+		}
+
+		vport_att.mtu_enable = 1;
+		vport_att.mtu = new_mtu;
+		ret = set_vport_attr(vfid, &vport_att);
+		if (ret != DPP_OK) {
+			PMD_DRV_LOG(DEBUG,
+				"[vfid:%d] zxdh_dev_mtu, set vport dpp_ret:%d\n", vfid, ret);
+			return ret;
+		}
+	} else {
+		struct zxdh_msg_info msg_info = {0};
+		struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_set_msg;
+
+		msg_head_build(hw, ZXDH_PORT_ATTRS_SET, &msg_info);
+		attr_msg->mode = EGR_FLAG_MTU_OFFLOAD_EN_OFF;
+		attr_msg->value = 1;
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ",
+				hw->vport.vport, EGR_FLAG_MTU_OFFLOAD_EN_OFF);
+			return ret;
+		}
+		attr_msg->mode = EGR_FLAG_MTU;
+		attr_msg->value = new_mtu;
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ",
+				hw->vport.vport, EGR_FLAG_MTU);
+			return ret;
+		}
+	}
+	dev->data->mtu = new_mtu;
+	return 0;
+}
+/**
+ * Fun:
+ */
+#define SPM_FC_NONE                     RTE_BIT32(0)
+#define SPM_FC_PAUSE_RX                 RTE_BIT32(1)
+#define SPM_FC_PAUSE_TX                 RTE_BIT32(2)
+#define SPM_FC_PAUSE_FULL               RTE_BIT32(3)
+static int32_t zxdh_rsp_body_check(struct zxdh_msg_reply_info rsp_data)
+{
+	struct zxdh_msg_reply_body *ack_msg = &(rsp_data.reply_body);
+
+	if (ack_msg->flag != ZXDH_REPS_SUCC) {
+		PMD_DRV_LOG(ERR, "Reply body msg flag is not %d ", ZXDH_REPS_SUCC);
+		return -1;
+	}
+	return 0;
+}
+static int BIT_TO_NUM(int bit)
+{
+	int fc_mode_bit = 0;
+
+	while (bit > 1) {
+		if (bit % 2 != 0)
+			break;
+
+		bit = bit / 2;
+		fc_mode_bit++;
+	}
+	return fc_mode_bit;
+}
+int32_t zxdh_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int32_t ret = 0;
+
+	if (hw->is_pf) {
+
+		struct rte_eth_fc_conf cur_fc_conf = {0};
+
+		zxdh_flow_ctrl_get(dev, &cur_fc_conf);
+
+		if (fc_conf->autoneg || fc_conf->high_water ||
+			fc_conf->low_water || fc_conf->pause_time ||
+			fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) {
+			PMD_DRV_LOG(INFO, "Does not support pause parameter configuration, except for pause mode");
+			fc_conf->high_water = 0;
+			fc_conf->low_water = 0;
+			fc_conf->autoneg = 0;
+			fc_conf->pause_time = 0;
+			fc_conf->send_xon = 0;
+			fc_conf->mac_ctrl_frame_fwd = 0;
+		}
+		struct zxdh_msg_info msg = {0};
+		struct zxdh_pci_bar_msg in = {0};
+		struct zxdh_msg_reply_info rep = {0};
+
+		ctrl_msg_build(hw, ZXDH_MAC_FC_SET, &msg);
+
+		PMD_DRV_LOG(INFO, " fc_conf->mode : %d\n", fc_conf->mode);
+
+		int cur_mode = BIT_TO_NUM(cur_fc_conf.mode);
+		int fc_mode = fc_conf->mode;
+		static enum rte_eth_fc_mode zxdh_fc_mode[4][4] = {
+			{RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
+			 RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL},
+			{RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
+			 RTE_ETH_FC_FULL, RTE_ETH_FC_FULL},
+			{RTE_ETH_FC_NONE, RTE_ETH_FC_FULL,
+			 RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL},
+			{RTE_ETH_FC_NONE, RTE_ETH_FC_FULL,
+			 RTE_ETH_FC_FULL, RTE_ETH_FC_FULL},
+		};
+		PMD_DRV_LOG(INFO, "cur_mode : %d fc_mode : %d\n", cur_mode, fc_mode);
+		msg.data.zxdh_fc_param.fc_mode = RTE_BIT32(zxdh_fc_mode[cur_mode][fc_mode]);
+
+		PMD_DRV_LOG(INFO, "msg.data.zxdh_fc_param.fc_mode : %d\n",
+			msg.data.zxdh_fc_param.fc_mode);
+		in.payload_addr = &msg;
+		in.payload_len = sizeof(msg);
+
+		struct zxdh_msg_recviver_mem rsp_data = {
+			.recv_buffer = (void *)&rep,
+			.buffer_len = sizeof(rep),
+		};
+
+		if (zxdh_send_command_toriscv(dev, &in, BAR_MODULE_MAC, &rsp_data) != BAR_MSG_OK) {
+			PMD_DRV_LOG(ERR, "Failed to set fc configure.");
+			return -1;
+		}
+		ret = zxdh_rsp_body_check(rep);
+	}
+	return ret;
+}
+
+int32_t zxdh_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int32_t ret = 0;
+
+	if (hw->is_pf) {
+		struct zxdh_msg_info msg = {0};
+		struct zxdh_pci_bar_msg in = {0};
+		struct zxdh_msg_reply_info rep = {0};
+
+		ctrl_msg_build(hw, ZXDH_MAC_FC_GET, &msg);
+
+		in.payload_addr = &msg;
+		in.payload_len = sizeof(msg);
+
+		struct zxdh_msg_recviver_mem rsp_data = {
+			.recv_buffer = (void *)&rep,
+			.buffer_len = sizeof(rep),
+		};
+
+		if (zxdh_send_command_toriscv(dev, &in, BAR_MODULE_MAC, &rsp_data) != BAR_MSG_OK) {
+			PMD_DRV_LOG(ERR, "Failed to get fc configure.");
+			return -1;
+		}
+		if (zxdh_rsp_body_check(rep) != 0)
+			return -1;
+
+		struct zxdh_msg_reply_body *ack_msg =
+				&(((struct zxdh_msg_reply_info *)rsp_data.recv_buffer)->reply_body);
+		int mode = ack_msg->zxdh_fc_param.fc_mode;
+
+		fc_conf->mode = BIT_TO_NUM(mode);
+	}
+	return ret;
+}
+
+/**
+ * Fun:
+ */
+static int zxdh_set_link_status(struct rte_eth_dev *dev, uint8_t link_status)
+{
+	uint16_t curr_link_status = dev->data->dev_link.link_status;
+	int32_t ret = 0;
+	struct rte_eth_link link;
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (link_status == curr_link_status) {
+		PMD_DRV_LOG(INFO, "curr_link_status %u\n", curr_link_status);
+		return 0;
+	}
+
+	hw->admin_status = link_status;
+	ret = zxdh_link_info_get(dev, &link);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to get link status from hw\n");
+		return ret;
+	}
+	dev->data->dev_link.link_status = hw->admin_status & link.link_status;
+
+	if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
+		dev->data->dev_link.link_speed = link.link_speed;
+		dev->data->dev_link.link_duplex = link.link_duplex;
+	} else {
+		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
+		dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	}
+	PMD_DRV_LOG(INFO, "NOW: link-status %d  link_speed %d  link_duplex %d\n",
+			dev->data->dev_link.link_status, dev->data->dev_link.link_speed,
+			dev->data->dev_link.link_duplex);
+	return zxdh_dev_config_port_status(dev, dev->data->dev_link.link_status);
+}
+/**
+ * Fun: Set device link up.
+ */
+int zxdh_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	int ret = zxdh_set_link_status(dev, RTE_ETH_LINK_UP);
+
+	if (ret)
+		PMD_DRV_LOG(ERR, "Set link up failed, code:%d", ret);
+	return ret;
+}
+/**
+ * Fun: Set device link down.
+ */
+int zxdh_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	int ret = zxdh_set_link_status(dev, RTE_ETH_LINK_DOWN);
+
+	if (ret)
+		PMD_DRV_LOG(ERR, "Set link down failed, code:%d", ret);
+	return ret;
+}
+
+int zxdh_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
+{
+	struct zxdh_hw *hw = (struct zxdh_hw *)dev->data->dev_private;
+	union VPORT vport = hw->vport;
+	struct rte_ether_addr *old_addr = &dev->data->mac_addrs[0];
+	struct zxdh_msg_info msg_info = {0};
+	uint16_t ret = 0;
+
+	if (!rte_is_valid_assigned_ether_addr(addr)) {
+		PMD_DRV_LOG(ERR, "mac address is invalid!");
+		return -EINVAL;
+	}
+
+	if (hw->is_pf) {
+		struct zxdh_mac_filter mac_filter;
+		struct zxdh_msg_reply_body reply_body;
+		uint16_t res_len = 0;
+
+		rte_memcpy(&mac_filter.mac, old_addr, sizeof(struct rte_ether_addr));
+		ret = proc_func[ZXDH_MAC_DEL](hw, vport.vport, &mac_filter, &reply_body, &res_len);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "mac del failed!");
+			return -ret;
+		}
+		hw->uc_num--;
+
+		rte_memcpy(&mac_filter.mac, addr, sizeof(struct rte_ether_addr));
+		ret = proc_func[ZXDH_MAC_ADD](hw, vport.vport, &mac_filter, &reply_body, &res_len);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "mac add failed!");
+			return ret;
+		}
+		hw->uc_num++;
+	} else {
+		PMD_DRV_LOG(INFO, "port 0x%x Send to pf\n", vport.vport);
+		struct zxdh_mac_filter *mac_filter = &msg_info.data.zxdh_mac_filter;
+
+		mac_filter->filter_flag = MAC_UNFILTER;
+		mac_filter->mac_flag = true;
+		rte_memcpy(&mac_filter->mac, old_addr, sizeof(struct rte_ether_addr));
+		msg_head_build(hw, ZXDH_MAC_DEL, &msg_info);
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ",
+				hw->vport.vport, ZXDH_MAC_DEL);
+			return ret;
+		}
+		hw->uc_num--;
+		PMD_DRV_LOG(INFO, "Success to send msg: port 0x%x msg type %d",
+			hw->vport.vport, ZXDH_MAC_DEL);
+
+		mac_filter->filter_flag = MAC_UNFILTER;
+		rte_memcpy(&mac_filter->mac, addr, sizeof(struct rte_ether_addr));
+		msg_head_build(hw, ZXDH_MAC_ADD, &msg_info);
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ",
+				hw->vport.vport, ZXDH_MAC_ADD);
+			return ret;
+		}
+		hw->uc_num++;
+		PMD_DRV_LOG(INFO, "Success to send msg: port 0x%x msg type %d",
+			hw->vport.vport, ZXDH_MAC_ADD);
+	}
+	rte_ether_addr_copy(addr, (struct rte_ether_addr *)hw->mac_addr);
+	PMD_DRV_LOG(INFO, "get dev mac1: %02X:%02X:%02X:%02X:%02X:%02X",
+			hw->mac_addr[0], hw->mac_addr[1],
+			hw->mac_addr[2], hw->mac_addr[3],
+			hw->mac_addr[4], hw->mac_addr[5]);
+	zxdh_vtpci_write_dev_config(hw, offsetof(struct zxdh_net_config, mac),
+								&hw->mac_addr, RTE_ETHER_ADDR_LEN);
+	return ret;
+}
+/**
+ * Fun:
+ */
+int zxdh_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
+	uint32_t index, uint32_t vmdq __rte_unused)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	union VPORT vport = hw->vport;
+	struct zxdh_msg_info msg_info = {0};
+	struct zxdh_msg_reply_body reply_body;
+	uint16_t i, ret;
+	uint16_t res_len = 0;
+
+	if (index >= ZXDH_MAX_MAC_ADDRS) {
+		PMD_DRV_LOG(ERR, "Add mac index (%u) is out of range", index);
+		return -EINVAL;
+	}
+
+	for (i = 0; (i != ZXDH_MAX_MAC_ADDRS); ++i) {
+		if (memcmp(&dev->data->mac_addrs[i], mac_addr, sizeof(*mac_addr)))
+			continue;
+
+		PMD_DRV_LOG(INFO, "MAC address already configured");
+		return -EADDRINUSE;
+	}
+
+	if (hw->is_pf) {
+		struct zxdh_mac_filter mac_filter;
+
+		rte_memcpy(&mac_filter.mac, mac_addr, sizeof(struct rte_ether_addr));
+		if (rte_is_unicast_ether_addr(mac_addr)) {
+			if (hw->uc_num < ZXDH_MAX_UC_MAC_ADDRS) {
+				ret = proc_func[ZXDH_MAC_ADD](hw, vport.vport, &mac_filter,
+							&reply_body, &res_len);
+				if (ret) {
+					PMD_DRV_LOG(ERR, "mac_addr_add failed, code:%d", ret);
+					return -ret;
+				}
+				hw->uc_num++;
+			} else {
+				PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d",
+						ZXDH_MAX_MC_MAC_ADDRS);
+				return -EINVAL;
+			}
+		} else {
+			if (hw->mc_num < ZXDH_MAX_MC_MAC_ADDRS) {
+				ret = proc_func[ZXDH_MAC_ADD](hw, vport.vport, &mac_filter,
+							&reply_body, &res_len);
+				if (ret) {
+					PMD_DRV_LOG(ERR, "mac_addr_add  failed, code:%d", ret);
+					return -ret;
+				}
+				hw->mc_num++;
+			} else {
+				PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d",
+						ZXDH_MAX_MC_MAC_ADDRS);
+				return -EINVAL;
+			}
+		}
+	} else {
+		PMD_DRV_LOG(INFO, "port 0x%x Send to pf\n", vport.vfid);
+		struct zxdh_mac_filter *mac_filter = &msg_info.data.zxdh_mac_filter;
+
+		mac_filter->filter_flag = MAC_FILTER;
+		rte_memcpy(&mac_filter->mac, mac_addr, sizeof(struct rte_ether_addr));
+		msg_head_build(hw, ZXDH_MAC_ADD, &msg_info);
+		if (rte_is_unicast_ether_addr(mac_addr)) {
+			if (hw->uc_num < ZXDH_MAX_UC_MAC_ADDRS) {
+				ret = zxdh_vf_send_msg_to_pf(dev, &msg_info,
+							sizeof(msg_info), NULL, 0);
+				if (ret) {
+					PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d",
+							hw->vport.vport, ZXDH_MAC_ADD);
+					return -ret;
+				}
+				hw->uc_num++;
+			} else {
+				PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d",
+						ZXDH_MAX_MC_MAC_ADDRS);
+				return -EINVAL;
+			}
+		} else {
+			if (hw->mc_num < ZXDH_MAX_MC_MAC_ADDRS) {
+				ret = zxdh_vf_send_msg_to_pf(dev, &msg_info,
+							sizeof(msg_info), NULL, 0);
+				if (ret) {
+					PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d",
+							hw->vport.vport, ZXDH_MAC_ADD);
+					return -ret;
+				}
+				hw->mc_num++;
+			} else {
+				PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d",
+						ZXDH_MAX_MC_MAC_ADDRS);
+				return -EINVAL;
+			}
+		}
+		PMD_DRV_LOG(INFO, "Success to send msg: port 0x%x msg type %d",
+				hw->vport.vport, ZXDH_MAC_ADD);
+	}
+	dev->data->mac_addrs[index] = *mac_addr;
+	return 0;
+}
+/**
+ * Fun:
+ */
+void zxdh_dev_mac_addr_remove(struct rte_eth_dev *dev __rte_unused, uint32_t index __rte_unused)
+{
+	struct zxdh_hw *hw	= dev->data->dev_private;
+	union VPORT vport = hw->vport;
+	struct zxdh_msg_info msg_info = {0};
+	struct zxdh_msg_reply_body reply_body;
+	struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index];
+	uint16_t ret = 0, res_len = 0;
+
+	if (index >= ZXDH_MAX_MAC_ADDRS)
+		return;
+
+	if (hw->is_pf) {
+		struct zxdh_mac_filter mac_filter;
+
+		rte_memcpy(&mac_filter.mac, mac_addr, sizeof(struct rte_ether_addr));
+		if (rte_is_unicast_ether_addr(mac_addr)) {
+			if (hw->uc_num <= ZXDH_MAX_UC_MAC_ADDRS) {
+				ret = proc_func[ZXDH_MAC_DEL](hw, vport.vport, &mac_filter,
+							&reply_body, &res_len);
+				if (ret) {
+					PMD_DRV_LOG(ERR, "mac_addr_del  failed, code:%d", ret);
+					return;
+				}
+				hw->uc_num--;
+			} else {
+				PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d",
+						ZXDH_MAX_MC_MAC_ADDRS);
+				return;
+			}
+		} else {
+			if (hw->mc_num <= ZXDH_MAX_MC_MAC_ADDRS) {
+				ret = proc_func[ZXDH_MAC_DEL](hw, vport.vport, &mac_filter,
+							&reply_body, &res_len);
+				if (ret) {
+					PMD_DRV_LOG(ERR, "mac_addr_del  failed, code:%d", ret);
+					return;
+				}
+				hw->mc_num--;
+			} else {
+				PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d",
+						ZXDH_MAX_MC_MAC_ADDRS);
+				return;
+			}
+		}
+	} else {
+		PMD_DRV_LOG(INFO, "port 0x%x Send to pf\n", vport.vfid);
+		struct zxdh_mac_filter *mac_filter = &msg_info.data.zxdh_mac_filter;
+
+		mac_filter->filter_flag = MAC_FILTER;
+		rte_memcpy(&mac_filter->mac, mac_addr, sizeof(struct rte_ether_addr));
+		msg_head_build(hw, ZXDH_MAC_DEL, &msg_info);
+		if (rte_is_unicast_ether_addr(mac_addr)) {
+			if (hw->uc_num <= ZXDH_MAX_UC_MAC_ADDRS) {
+				ret = zxdh_vf_send_msg_to_pf(dev, &msg_info,
+							sizeof(msg_info), NULL, 0);
+				if (ret) {
+					PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d",
+							hw->vport.vport, ZXDH_MAC_DEL);
+					return;
+				}
+				hw->uc_num--;
+			} else {
+				PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d",
+						ZXDH_MAX_MC_MAC_ADDRS);
+				return;
+			}
+		} else {
+			if (hw->mc_num <= ZXDH_MAX_MC_MAC_ADDRS) {
+				ret = zxdh_vf_send_msg_to_pf(dev, &msg_info,
+							sizeof(msg_info), NULL, 0);
+				if (ret) {
+					PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d",
+							hw->vport.vport, ZXDH_MAC_DEL);
+					return;
+				}
+				hw->mc_num--;
+			} else {
+				PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d",
+						ZXDH_MAX_MC_MAC_ADDRS);
+				return;
+			}
+		}
+		PMD_DRV_LOG(INFO, "Success to send msg: port 0x%x msg type %d",
+				hw->vport.vport, ZXDH_MAC_DEL);
+	}
+	memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr));
+}
+int zxdh_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw	= dev->data->dev_private;
+	struct zxdh_msg_info msg_info = {0};
+	int16_t ret = 0;
+
+	if (hw->promisc_status == 0) {
+		if (hw->is_pf) {
+			ret = zxdh_dev_unicast_set(hw, hw->vport.vport, true);
+			if (hw->allmulti_status == 0)
+				ret = zxdh_dev_multicast_set(hw, hw->vport.vport, true);
+
+		} else {
+			struct zxdh_port_promisc_msg *promisc_msg = &msg_info.data.port_promisc_msg;
+
+			msg_head_build(hw, ZXDH_PORT_PROMISC_SET, &msg_info);
+			promisc_msg->mode = ZXDH_PROMISC_MODE;
+			promisc_msg->value = true;
+			if (hw->allmulti_status == 0)
+				promisc_msg->mc_follow = true;
+
+			ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+			if (ret) {
+				PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d",
+						hw->vport.vport, ZXDH_PROMISC_MODE);
+				return ret;
+			}
+		}
+		hw->promisc_status = 1;
+	}
+	return ret;
+}
+/**
+ * Fun:
+ */
+int zxdh_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw	= dev->data->dev_private;
+	int16_t ret = 0;
+	struct zxdh_msg_info msg_info = {0};
+
+	if (hw->promisc_status == 1) {
+		if (hw->is_pf) {
+			ret = zxdh_dev_unicast_set(hw, hw->vport.vport, false);
+			if (hw->allmulti_status == 0)
+				ret = zxdh_dev_multicast_set(hw, hw->vport.vport, false);
+
+		} else {
+			struct zxdh_port_promisc_msg *promisc_msg = &msg_info.data.port_promisc_msg;
+
+			msg_head_build(hw, ZXDH_PORT_PROMISC_SET, &msg_info);
+			promisc_msg->mode = ZXDH_PROMISC_MODE;
+			promisc_msg->value = false;
+			if (hw->allmulti_status == 0)
+				promisc_msg->mc_follow = true;
+
+			ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+			if (ret) {
+				PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d",
+						hw->vport.vport, ZXDH_PROMISC_MODE);
+				return ret;
+			}
+		}
+		hw->promisc_status = 0;
+	}
+	return ret;
+}
+/**
+ * Fun:
+ */
+int zxdh_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int16_t ret = 0;
+	struct zxdh_msg_info msg_info = {0};
+
+	if (hw->allmulti_status == 0) {
+		if (hw->is_pf) {
+			ret = zxdh_dev_multicast_set(hw, hw->vport.vport, true);
+		} else {
+			struct zxdh_port_promisc_msg *promisc_msg = &msg_info.data.port_promisc_msg;
+
+			msg_head_build(hw, ZXDH_PORT_PROMISC_SET, &msg_info);
+
+			promisc_msg->mode = ZXDH_ALLMULTI_MODE;
+			promisc_msg->value = true;
+			ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+			if (ret) {
+				PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d",
+						hw->vport.vport, ZXDH_ALLMULTI_MODE);
+				return ret;
+			}
+		}
+		hw->allmulti_status = 1;
+	}
+	return ret;
+}
+/**
+ * Fun:
+ */
+int zxdh_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int16_t ret = 0;
+	struct zxdh_msg_info msg_info = {0};
+
+	if (hw->allmulti_status == 1) {
+		if (hw->is_pf) {
+			if (hw->promisc_status == 1)
+				goto end;
+			ret = zxdh_dev_multicast_set(hw, hw->vport.vport, false);
+		} else {
+			struct zxdh_port_promisc_msg *promisc_msg = &msg_info.data.port_promisc_msg;
+
+			msg_head_build(hw, ZXDH_PORT_PROMISC_SET, &msg_info);
+			if (hw->promisc_status == 1)
+				goto end;
+			promisc_msg->mode = ZXDH_ALLMULTI_MODE;
+			promisc_msg->value = false;
+			ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+			if (ret) {
+				PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d",
+						hw->vport.vport, EGR_FLAG_IFF_ALLMULTI_EN_OFF);
+				return ret;
+			}
+		}
+		hw->allmulti_status = 0;
+	}
+	return ret;
+end:
+	hw->allmulti_status = 0;
+	return ret;
+}
+/**
+ * Fun:
+ */
+int zxdh_dev_unicast_set(struct zxdh_hw *hw, uint16_t vport_t, bool enable)
+{
+	int16_t ret = 0;
+	struct zxdh_unitcast_t uc_table = {0};
+	union VPORT vport = (union VPORT)vport_t;
+
+	DPP_DTB_ERAM_ENTRY_INFO_T uc_table_entry = {
+		((hw->vfid-ZXDH_BASE_VFID) << 2) + vport.vfid / 64,
+		(ZXIC_UINT32 *)&uc_table
+	};
+	DPP_DTB_USER_ENTRY_T entry = {
+		.sdt_no = ZXDH_SDT_UNICAST_ATT_TABLE,
+		.p_entry_data = (void *)&uc_table_entry
+	};
+
+	ret = dpp_dtb_entry_get(DEVICE_NO, g_dtb_data.queueid, &entry, 1);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "unicast_table_get_failed:%d\n", hw->vfid);
+		return -ret;
+	}
+
+	if (vport.vf_flag) {
+		if (enable)
+			uc_table.bitmap[(vport.vfid % 64) / 32] |=
+					UINT32_C(1) << (31-(vport.vfid % 64) % 32);
+		else
+			uc_table.bitmap[(vport.vfid % 64) / 32] &=
+					~(UINT32_C(1) << (31-(vport.vfid % 64) % 32));
+	} else
+		uc_table.uc_flood_pf_enable = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG + (enable << 6));
+
+	ret = dpp_dtb_table_entry_write(DEVICE_NO, g_dtb_data.queueid, 1, &entry);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "unicast_table_set_failed:%d\n",  hw->vfid);
+		return -ret;
+	}
+	return 0;
+}
+/**
+ * Fun:
+ */
+int zxdh_dev_multicast_set(struct zxdh_hw *hw, uint16_t vport_t, bool enable)
+{
+	int16_t ret = 0;
+	struct zxdh_multicast_t mc_table = {0};
+	union VPORT vport = (union VPORT)vport_t;
+
+	DPP_DTB_ERAM_ENTRY_INFO_T mc_table_entry = {
+		((hw->vfid-ZXDH_BASE_VFID) << 2) + vport.vfid / 64,
+		(ZXIC_UINT32 *)&mc_table
+	};
+	DPP_DTB_USER_ENTRY_T entry = {
+		.sdt_no = ZXDH_SDT_MULTICAST_ATT_TABLE,
+		.p_entry_data = (void *)&mc_table_entry
+	};
+
+	ret = dpp_dtb_entry_get(DEVICE_NO, g_dtb_data.queueid, &entry, 1);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "allmulti_table_get_failed:%d\n", hw->vfid);
+		return -ret;
+	}
+
+	if (vport.vf_flag) {
+		if (enable)
+			mc_table.bitmap[(vport.vfid % 64) / 32] |=
+					UINT32_C(1) << (31 - (vport.vfid % 64) % 32);
+		else
+			mc_table.bitmap[(vport.vfid%64)/32] &=
+					~(UINT32_C(1) << (31 - (vport.vfid % 64) % 32));
+
+	} else {
+		mc_table.mc_flood_pf_enable = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG + (enable << 6));
+	}
+	ret = dpp_dtb_table_entry_write(DEVICE_NO, g_dtb_data.queueid, 1, &entry);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "allmulti_table_set_failed:%d\n", hw->vfid);
+		return -ret;
+	}
+	return 0;
+}
+/**
+ * Fun:
+ */
+int zxdh_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops)
+{
+	if (!dev)
+		return -EINVAL;
+	*ops = &zxdh_flow_ops;
+	return 0;
+}
+
+static void *get_msg_data(struct zxdh_msg_info *msg_info)
+{
+	switch (msg_info->msg_head.msg_type) {
+	case ZXDH_VLAN_FILTER_SET:
+		return &msg_info->data.zxdh_vlan_filter_set;
+
+	case ZXDH_VLAN_FILTER_ADD:
+		 /* fallthrough */
+	case ZXDH_VLAN_FILTER_DEL:
+		return &msg_info->data.zxdh_vlan_filter;
+
+	case ZXDH_RSS_ENABLE:
+		return &msg_info->data.rss_enable;
+
+	case ZXDH_RSS_RETA_GET:
+		/* fallthrough */
+	case ZXDH_RSS_RETA_SET:
+		return &msg_info->data.zxdh_rss_reta;
+
+	case ZXDH_RSS_KEY_GET:
+		/* fallthrough */
+	case ZXDH_RSS_KEY_SET:
+		return &msg_info->data.zxdh_rss_key;
+
+	case ZXDH_RSS_HF_GET:
+		/* fallthrough */
+	case ZXDH_RSS_HF_SET:
+		return &msg_info->data.zxdh_rss_hf;
+
+	case ZXDH_VLAN_OFFLOAD:
+		return &msg_info->data.zxdh_vlan_offload;
+
+	case ZXDH_SET_TPID:
+		return &msg_info->data.zxdh_vlan_tpid;
+
+	case ZXDH_VXLAN_OFFLOAD_DEL:
+		/* fallthrough */
+	case ZXDH_VXLAN_OFFLOAD_ADD:
+		return &msg_info->data.zxdh_vxlan_port;
+
+	default:
+		PMD_DRV_LOG(ERR, "Unknown msg type:%d", msg_info->msg_head.msg_type);
+	}
+
+	return NULL;
+}
+
+static void
+out_result(enum zxdh_msg_type msg_type, struct zxdh_msg_reply_body *reply,
+		struct zxdh_msg_info *out_msg)
+{
+	PMD_DRV_LOG(DEBUG, "%s msg type:[%d]", __func__, msg_type);
+	switch (msg_type) {
+	case ZXDH_RSS_KEY_GET:
+		memcpy(out_msg->data.zxdh_rss_key.rss_key,
+			reply->rss_key_msg.rss_key, ZXDH_RSK_LEN);
+		break;
+
+	case ZXDH_RSS_HF_GET:
+		out_msg->data.zxdh_rss_hf.rss_hf = reply->rss_hf_msg.rss_hf;
+		break;
+
+	case ZXDH_RSS_RETA_GET:
+		memcpy(out_msg->data.zxdh_rss_reta.reta,
+			reply->rss_reta_msg.reta, sizeof(reply->rss_reta_msg.reta));
+		break;
+
+	default:
+		PMD_DRV_LOG(ERR, "Unknown msg type:[%d]", msg_type);
+		break;
+	}
+}
+
+static int
+zxdh_hw_config_proc(struct rte_eth_dev *dev, struct zxdh_msg_info *msg_info,
+		struct zxdh_msg_info *out_msg)
+{
+	struct zxdh_hw *hw = (struct zxdh_hw *)dev->data->dev_private;
+	struct zxdh_msg_reply_info  reply = {0};
+	uint16_t res_len = 0;
+	int ret = 0;
+	uint16_t vfid = vport_to_vfid(hw->vport);
+	uint8_t msg_type = msg_info->msg_head.msg_type;
+
+	if (hw->is_pf) {
+		PMD_DRV_LOG(DEBUG, "PF hw config, msg_type:%d, vfid:%d", msg_type, vfid);
+		if (proc_func[msg_type] == NULL) {
+			PMD_DRV_LOG(ERR, "msg type:%d not found process function.", msg_type);
+			ret = -ENOTSUP;
+		}
+		void *p_data = get_msg_data(msg_info);
+
+		ret = proc_func[msg_type](hw, hw->vport.vport, p_data, &reply.reply_body, &res_len);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "pf msg_type:%d exec failed, ret:%d.", msg_type, ret);
+			return -ret;
+		}
+
+	} else {
+		PMD_DRV_LOG(DEBUG, "VF hw config, msg_type:%d, vfid:%d", msg_type, vfid);
+		if (out_msg)
+			ret = zxdh_vf_send_msg_to_pf(hw->eth_dev, msg_info,
+						sizeof(struct zxdh_msg_info), &reply,
+						sizeof(struct zxdh_msg_reply_info));
+		else
+			ret = zxdh_vf_send_msg_to_pf(hw->eth_dev, msg_info,
+						sizeof(struct zxdh_msg_info), NULL, 0);
+
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR, "vf config failed, port:0x%x msg type:%d, ret:%d",
+					hw->vport.vport, msg_type, ret);
+			return -ret;
+		}
+	}
+
+	if (out_msg)
+		out_result(msg_type, &reply.reply_body, out_msg);
+
+	return ret;
+}
+
+int zxdh_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+	PMD_DRV_LOG(DEBUG, "vlan filter set vlan_id:%d, on:%d", vlan_id, on);
+	int ret = 0;
+	uint16_t idx;
+	uint16_t bit_idx;
+	struct zxdh_hw *hw = (struct zxdh_hw *)dev->data->dev_private;
+
+	vlan_id &= 0x0fff;
+	if (vlan_id == 0 || vlan_id == 4095) {
+		PMD_DRV_LOG(ERR, "VID(%d) is reserved", vlan_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started == 0) {
+		PMD_DRV_LOG(ERR, "vlan_filter dev not start");
+		return -1;
+	}
+
+	if (!hw->vlan_fiter) {
+		hw->vlan_fiter = rte_zmalloc("vlan filter bak",
+				ZXDH_VLAN_FILTER_BACKUP_GROUPS * sizeof(uint64_t), 0);
+		if (hw->vlan_fiter == NULL) {
+			PMD_DRV_LOG(ERR, "alloc fail");
+			return -1;
+		}
+	}
+	idx = vlan_id / ZXDH_VLAN_FILTER_BACKUP_GROUPS;
+	bit_idx = vlan_id % ZXDH_VLAN_FILTER_BACKUP_GROUPS;
+	uint8_t msg_type = 0;
+
+	if (on) {
+		if (hw->vlan_fiter[idx] & (1ULL << bit_idx)) {
+			PMD_DRV_LOG(DEBUG, "msg type:%d, vlan:%d has already added.",
+					msg_type, vlan_id);
+			return 0;
+		}
+		msg_type = ZXDH_VLAN_FILTER_ADD;
+	} else {
+		if ((hw->vlan_fiter[idx] & (1ULL << bit_idx)) == 0) {
+			PMD_DRV_LOG(DEBUG, "msg type:%d, vlan:%d has already deleted.",
+					msg_type, vlan_id);
+			return 0;
+		}
+		msg_type = ZXDH_VLAN_FILTER_DEL;
+	}
+
+	if (hw->is_pf) {
+		struct zxdh_vlan_filter vlan_filter = {.vlan_id = vlan_id };
+		struct zxdh_msg_reply_body reply;
+		uint16_t res_len = 0;
+
+		if (proc_func[msg_type]) {
+			ret = proc_func[msg_type](hw, hw->vport.vport, &vlan_filter,
+						&reply, &res_len);
+		} else {
+			PMD_DRV_LOG(ERR, "msg type:%d not found in process function.", msg_type);
+			ret = -1;
+		}
+
+	} else {
+		struct zxdh_msg_info msg = {0};
+
+		msg_head_build(hw, msg_type, &msg);
+		msg.data.zxdh_vlan_filter.vlan_id = vlan_id;
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg, sizeof(msg), NULL, 0);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ",
+					hw->vport.vport, msg_type);
+			return ret;
+		}
+		PMD_DRV_LOG(INFO, "Success to send msg: port 0x%x msg type %d ",
+				hw->vport.vport, msg_type);
+	}
+
+	if (on)
+		hw->vlan_fiter[idx] |= (1ULL << bit_idx);
+	else
+		hw->vlan_fiter[idx] &= ~(1ULL << bit_idx);
+
+	return 0;
+}
+
+static inline int
+zxdh_add_vxlan_port(struct rte_eth_dev *dev __rte_unused, uint16_t udp_port)
+{
+	int ret = 0;
+	struct zxdh_hw *priv = (struct zxdh_hw *)dev->data->dev_private;
+	struct zxdh_msg_info msg = {0};
+
+	msg_head_build(priv, ZXDH_VXLAN_OFFLOAD_ADD, &msg);
+	msg.data.zxdh_vxlan_port.port = udp_port;
+
+	ret = zxdh_hw_config_proc(dev, &msg, NULL);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "tunnel_cfg_fail add err(%d)", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static inline int
+zxdh_del_vxlan_port(struct rte_eth_dev *dev __rte_unused, uint16_t udp_port)
+{
+	int ret = 0;
+	struct zxdh_hw *priv = (struct zxdh_hw *)dev->data->dev_private;
+	struct zxdh_msg_info msg = {0};
+
+	msg_head_build(priv, ZXDH_VXLAN_OFFLOAD_DEL, &msg);
+	msg.data.zxdh_vxlan_port.port = udp_port;
+
+	ret = zxdh_hw_config_proc(dev, &msg, NULL);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "tunnel_cfg_fail del err(%d)", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+int zxdh_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+				struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	PMD_DRV_LOG(DEBUG, "dev-ops[%s] called.", __func__);
+	int ret;
+
+	if (udp_tunnel == NULL) {
+		PMD_DRV_LOG(ERR, "udp_tunnel is null (NULL_PTR)");
+		return -1;
+	}
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+		ret = zxdh_add_vxlan_port(dev, udp_tunnel->udp_port);
+		break;
+
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
+		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
+		ret = -ENOTSUP;
+		break;
+
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+int zxdh_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+				struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	PMD_DRV_LOG(DEBUG, "dev-ops[%s] called.", __func__);
+	int ret;
+
+	if (udp_tunnel == NULL) {
+		PMD_DRV_LOG(ERR, "udp_tunnel is null (NULL_PTR)");
+		return -1;
+	}
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_ETH_TUNNEL_TYPE_VXLAN:
+		ret = zxdh_del_vxlan_port(dev, udp_tunnel->udp_port);
+		break;
+
+	case RTE_ETH_TUNNEL_TYPE_GENEVE:
+	case RTE_ETH_TUNNEL_TYPE_TEREDO:
+		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
+		ret = -ENOTSUP;
+		break;
+
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+/*only outer can write to hw, not same with others, be careful*/
+int
+zxdh_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, uint16_t tpid)
+{
+	PMD_DRV_LOG(DEBUG, "dev-ops[%s] called.", __func__);
+	struct zxdh_hw *priv = (struct zxdh_hw *)dev->data->dev_private;
+	struct rte_eth_rxmode *rxmode;
+	int ret = 0;
+	int cond;
+
+	rxmode = &dev->data->dev_conf.rxmode;
+	if (!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
+		PMD_DRV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", priv->port_id);
+		return -ENOSYS;
+	}
+
+	cond = (vlan_type != RTE_ETH_VLAN_TYPE_OUTER);
+	if (cond) {
+		PMD_DRV_LOG(ERR, "ZXDH_PARA_VALUE_ILLEGAL vlan_type (%d)", vlan_type);
+		return -1;
+	}
+
+	struct zxdh_msg_info msg = {0};
+
+	msg_head_build(priv, ZXDH_SET_TPID, &msg);
+	msg.data.zxdh_vlan_tpid.tpid = tpid;
+	ret = zxdh_hw_config_proc(dev, &msg, NULL);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "zxdh_hw_config_proc err(%d)", ret);
+		return ret;
+	}
+	priv->otpid = tpid;
+
+	return 0;
+}
+
+int zxdh_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+	PMD_DRV_LOG(DEBUG, "vlan offload set. mask:0x%0x", mask);
+
+	int ret = 0;
+	struct zxdh_msg_info msg = {0};
+	struct rte_eth_rxmode *rxmode;
+	struct zxdh_hw *priv = (struct zxdh_hw *)dev->data->dev_private;
+
+	rxmode = &dev->data->dev_conf.rxmode;
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+			msg.data.zxdh_vlan_filter_set.enable = ZXDH_FLAG_YES;
+		else
+			msg.data.zxdh_vlan_filter_set.enable = ZXDH_FLAG_NO;
+
+		if (priv->vlan_offload_cfg.vlan_filter !=
+				msg.data.zxdh_vlan_filter_set.enable) {
+			msg_head_build(priv, ZXDH_VLAN_FILTER_SET, &msg);
+			ret = zxdh_hw_config_proc(dev, &msg, NULL);
+			if (ret == 0)
+				priv->vlan_offload_cfg.vlan_filter =
+					msg.data.zxdh_vlan_filter_set.enable;
+		}
+	}
+
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		memset(&msg, 0, sizeof(struct zxdh_msg_info));
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			msg.data.zxdh_vlan_offload.enable = ZXDH_FLAG_YES;
+		else
+			msg.data.zxdh_vlan_offload.enable = ZXDH_FLAG_NO;
+		msg.data.zxdh_vlan_offload.type = VLAN_STRIP_MSG_TYPE;
+
+		msg_head_build(priv, ZXDH_VLAN_OFFLOAD, &msg);
+		ret = zxdh_hw_config_proc(dev, &msg, NULL);
+		if (!ret)
+			priv->vlan_offload_cfg.vlan_strip = msg.data.zxdh_vlan_offload.enable;
+	}
+
+	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+		memset(&msg, 0, sizeof(struct zxdh_msg_info));
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+			msg.data.zxdh_vlan_offload.enable = ZXDH_FLAG_YES;
+		else
+			msg.data.zxdh_vlan_offload.enable = ZXDH_FLAG_NO;
+		msg.data.zxdh_vlan_offload.type = QINQ_STRIP_MSG_TYPE;
+
+		msg_head_build(priv, ZXDH_VLAN_OFFLOAD, &msg);
+		ret = zxdh_hw_config_proc(dev, &msg, NULL);
+		if (!ret)
+			priv->vlan_offload_cfg.qinq_strip = msg.data.zxdh_vlan_offload.enable;
+	}
+
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		memset(&msg, 0, sizeof(struct zxdh_msg_info));
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+			msg.data.zxdh_vlan_offload.enable = ZXDH_FLAG_YES;
+		else
+			msg.data.zxdh_vlan_offload.enable  = ZXDH_FLAG_NO;
+
+		msg_head_build(priv, ZXDH_VLAN_EXTEND_SET, &msg);
+		ret = zxdh_hw_config_proc(dev, &msg, NULL);
+		if (!ret)
+			priv->vlan_offload_cfg.vlan_extend = msg.data.zxdh_vlan_offload.enable;
+	}
+
+	return ret;
+}
+
+/**
+ * Fun:
+ */
+void msg_head_build(struct zxdh_hw *hw, enum zxdh_msg_type type, struct zxdh_msg_info *msg_info)
+{
+	struct zxdh_msg_head *msghead = &msg_info->msg_head;
+
+	msghead->msg_type = type;
+	msghead->vport    = hw->vport.vport;
+	msghead->vf_id    = hw->vport.vfid;
+	msghead->pcieid   = hw->pcie_id;
+}
+
+/**
+ * Fun:
+ */
+void ctrl_msg_build(struct zxdh_hw *hw, enum zxdh_agent_opc opcode, struct zxdh_msg_info *msg_info)
+{
+	struct agent_msg_head *agt_head = &msg_info->agent_head;
+
+	agt_head->op_code = opcode;
+	agt_head->panel_id = hw->panel_id;
+	agt_head->phyport = hw->phyport;
+	agt_head->vf_id = hw->vfid;
+	agt_head->pcie_id = hw->pcie_id;
+}
+
+int
+zxdh_rss_hash_update(struct rte_eth_dev *dev,
+			 struct rte_eth_rss_conf *rss_conf)
+{
+	int ret = 0;
+	uint32_t hw_hf_new, hw_hf_old;
+	int need_update_hf = 0;
+	int need_update_key = 0;
+	uint64_t cond;
+	struct zxdh_msg_info msg = {0};
+	struct zxdh_hw *priv = (struct zxdh_hw *)dev->data->dev_private;
+	struct rte_eth_rss_conf *old_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+
+	cond = rss_conf->rss_hf & ZXDH_RSS_HF_MASK;
+	if (cond) {
+		PMD_DRV_LOG(ERR, "rss_conf->rss_hf not support(%08lx)", rss_conf->rss_hf);
+		return -1;
+	}
+
+	hw_hf_new = zxdh_rss_hf_to_hw(rss_conf->rss_hf);
+	hw_hf_old = zxdh_rss_hf_to_hw(old_rss_conf->rss_hf);
+	PMD_DRV_LOG(INFO, "hf_hw_old(%u), convert_to_hw_new(%u) ", hw_hf_old, hw_hf_new);
+
+	if ((hw_hf_new != hw_hf_old || priv->rss_enable != (!!rss_conf->rss_hf)))
+		need_update_hf = 1;
+
+	if (rss_conf->rss_key && rss_conf->rss_key_len) {
+		if (rss_conf->rss_key_len != ZXDH_RSK_LEN) {
+			PMD_DRV_LOG(ERR, "rss_conf->rss_key_len(%u) not equal (%u)",
+				rss_conf->rss_key_len, ZXDH_RSK_LEN);
+			return -1;
+		}
+		if (old_rss_conf->rss_key)
+			ret = memcmp(rss_conf->rss_key, old_rss_conf->rss_key,
+						ZXDH_RSK_LEN);
+
+		if (ret || (old_rss_conf->rss_key == NULL))
+			need_update_key = 1;
+	}
+
+	/* update hash factor */
+	if (need_update_key) {
+		PMD_DRV_LOG(DEBUG, "need update hash key.");
+		memset(&msg, 0, sizeof(struct zxdh_msg_info));
+		for (uint16_t i = 0; i < ZXDH_RSK_LEN; i++)
+			msg.data.zxdh_rss_key.rss_key[i] =
+				rss_conf->rss_key[ZXDH_RSK_LEN - 1 - i];
+
+
+		msg_head_build(priv, ZXDH_RSS_KEY_SET, &msg);
+		ret = zxdh_hw_config_proc(dev, &msg, NULL);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "zxdh_hw_config_proc err(%d)", ret);
+			return ret;
+		}
+
+		if (old_rss_conf->rss_key == NULL) {
+			old_rss_conf->rss_key =
+				rte_malloc("rss_key", ZXDH_RSK_LEN, rte_socket_id());
+			if (old_rss_conf->rss_key == NULL) {
+				PMD_DRV_LOG(ERR, "alloc fail");
+				return -1;
+			}
+		}
+
+		memcpy(old_rss_conf->rss_key, rss_conf->rss_key, ZXDH_RSK_LEN);
+		old_rss_conf->rss_key_len = ZXDH_RSK_LEN;
+	}
+
+	if (need_update_hf) {
+		memset(&msg, 0, sizeof(struct zxdh_msg_info));
+		msg.data.rss_enable.enable = !!rss_conf->rss_hf;
+		msg_head_build(priv, ZXDH_RSS_ENABLE, &msg);
+
+		ret = zxdh_hw_config_proc(dev, &msg, NULL);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "zxdh_hw_config_proc err(%d)", ret);
+			return -1;
+		}
+		priv->rss_enable = msg.data.rss_enable.enable;
+
+		if (rss_conf->rss_hf == 0)
+			return 0;
+
+
+		/* update hash factor */
+		PMD_DRV_LOG(DEBUG, "need update hash factor");
+		msg.data.zxdh_rss_hf.rss_hf = hw_hf_new;
+		msg_head_build(priv, ZXDH_RSS_HF_SET, &msg);
+		ret = zxdh_hw_config_proc(dev, &msg, NULL);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "zxdh_hw_config_proc err(%d)", ret);
+			return -1;
+		}
+		old_rss_conf->rss_hf = rss_conf->rss_hf;
+	}
+
+	return 0;
+}
+
+int
+zxdh_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
+{
+	int ret;
+	uint32_t hw_hf;
+	int read_from_hw;
+	struct zxdh_hw *priv = (struct zxdh_hw *)dev->data->dev_private;
+	struct rte_eth_rss_conf *old_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+
+	if (rss_conf == NULL) {
+		PMD_DRV_LOG(ERR, "rss_conf is NULL");
+		return -1;
+	}
+
+	if (priv->rss_enable == 0) {
+		rss_conf->rss_hf = 0;
+		return 0;
+	}
+
+	if (rss_conf->rss_key) {
+		if (old_rss_conf->rss_key == NULL) {
+			read_from_hw = 1;
+			old_rss_conf->rss_key =
+				rte_malloc("rss_key", ZXDH_RSK_LEN, rte_socket_id());
+			if (old_rss_conf->rss_key == NULL) {
+				PMD_DRV_LOG(ERR, "alloc fail");
+				return -1;
+			}
+		} else {
+			rss_conf->rss_key_len = ZXDH_RSK_LEN;
+			memcpy(rss_conf->rss_key, old_rss_conf->rss_key, rss_conf->rss_key_len);
+		}
+	}
+	if ((old_rss_conf->rss_key == NULL) && (old_rss_conf->rss_hf == 0))
+		read_from_hw = 1;
+
+	if (!read_from_hw) {
+		hw_hf = zxdh_rss_hf_to_hw(old_rss_conf->rss_hf);
+		rss_conf->rss_hf = zxdh_rss_hf_to_eth(hw_hf);
+	}
+
+	if (read_from_hw | zxdh_force_read_from_hw) {
+		struct zxdh_msg_info msg = {0}, out_msg = {0};
+
+		/* get hash key */
+		if (rss_conf->rss_key) {
+			msg_head_build(priv, ZXDH_RSS_KEY_GET, &msg);
+			ret = zxdh_hw_config_proc(dev, &msg, &out_msg);
+			if (ret) {
+				PMD_DRV_LOG(ERR, "zxdh_hw_config_proc err(%d)", ret);
+				return -ret;
+			}
+
+			rss_conf->rss_key_len = ZXDH_RSK_LEN;
+			for (uint16_t i = 0; i < ZXDH_RSK_LEN; i++)
+				rss_conf->rss_key[i] =
+					out_msg.data.zxdh_rss_key.rss_key[ZXDH_RSK_LEN - 1 - i];
+
+			memcpy(old_rss_conf->rss_key, rss_conf->rss_key, ZXDH_RSK_LEN);
+			old_rss_conf->rss_key_len = ZXDH_RSK_LEN;
+		}
+
+		/* get hash factor  */
+		memset(&msg, 0, sizeof(msg));
+		memset(&out_msg, 0, sizeof(out_msg));
+		msg_head_build(priv, ZXDH_RSS_HF_GET, &msg);
+		ret = zxdh_hw_config_proc(dev, &msg, &out_msg);
+		uint32_t hw_hf = out_msg.data.zxdh_rss_hf.rss_hf;
+
+		rss_conf->rss_hf = zxdh_rss_hf_to_eth(hw_hf);
+	}
+
+	return 0;
+}
+
+int
+zxdh_dev_rss_reta_query(struct rte_eth_dev *dev,
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			uint16_t reta_size)
+{
+	uint16_t idx;
+	uint16_t i;
+	int ret;
+	int cond;
+	uint16_t qid_logic;
+	struct zxdh_hw *priv = (struct zxdh_hw *)dev->data->dev_private;
+
+	cond = (!reta_size || reta_size > priv->reta_idx_n);
+	if (cond) {
+		PMD_DRV_LOG(ERR, "request reta size(%u) not same with buffered(%u)",
+			reta_size, priv->reta_idx_n);
+		return -1;
+	}
+
+	/* Fill each entry of the table even if its bit is not set. */
+	for (idx = 0, i = 0; (i != reta_size); ++i) {
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] = priv->reta_idx[i];
+	}
+
+	if (zxdh_force_read_from_hw) {
+		struct zxdh_msg_info msg = {0}, out_msg = {0};
+
+		msg_head_build(priv, ZXDH_RSS_RETA_GET, &msg);
+		ret = zxdh_hw_config_proc(dev, &msg, &out_msg);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "zxdh_hw_config_proc err(%d)", ret);
+			return -ret;
+		}
+
+		struct zxdh_rss_reta *reta_tbl = &out_msg.data.zxdh_rss_reta;
+
+		for (idx = 0, i = 0; i < reta_size; ++i) {
+			idx = i / RTE_ETH_RETA_GROUP_SIZE;
+
+			qid_logic = zxdh_qid_ph_to_logic(dev, reta_tbl->reta[i]);
+			if (qid_logic == INVALID_LOGIC_QID) {
+				PMD_DRV_LOG(ERR, "rsp phy reta qid (%u) is illegal(%u)",
+					reta_tbl->reta[i], qid_logic);
+				return -EINVAL;
+			}
+			reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] = qid_logic;
+		}
+	}
+	return 0;
+}
+
+int
+zxdh_dev_rss_reta_update(struct rte_eth_dev *dev,
+			 struct rte_eth_rss_reta_entry64 *reta_conf,
+			 uint16_t reta_size)
+{
+	int ret;
+	struct zxdh_hw *priv = (struct zxdh_hw *)dev->data->dev_private;
+	unsigned int idx;
+	uint16_t i;
+	unsigned int pos;
+	uint16_t reta_bak[ZXDH_RETA_SIZE];
+
+	if (reta_size != ZXDH_RETA_SIZE) {
+		PMD_DRV_LOG(ERR, "reta_size is illegal(%u).reta_size should be 256", reta_size);
+		return -1;
+	}
+	priv->reta_idx_n = ZXDH_RETA_SIZE;
+	if (!priv->reta_idx) {
+		priv->reta_idx = rte_zmalloc(NULL, ZXDH_RETA_SIZE * sizeof(uint16_t), 4);
+		if (priv->reta_idx == NULL) {
+			PMD_DRV_LOG(ERR, "alloc memory fail");
+			return -1;
+		}
+	}
+	for (idx = 0, i = 0; (i < reta_size); ++i) {
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		pos = i % RTE_ETH_RETA_GROUP_SIZE;
+		if (((reta_conf[idx].mask >> pos) & 0x1) == 0)
+			continue;
+		if (reta_conf[idx].reta[pos] > dev->data->nb_rx_queues) {
+			PMD_DRV_LOG(ERR, "reta table value err(%u >= %u)",
+			reta_conf[idx].reta[pos], dev->data->nb_rx_queues);
+			return -1;
+		}
+		if (priv->reta_idx[i] != reta_conf[idx].reta[pos])
+			break;
+	}
+	if (i == reta_size) {
+		PMD_DRV_LOG(DEBUG, "reta table same with buffered table");
+		return 0;
+	}
+	memcpy(reta_bak, priv->reta_idx, sizeof(reta_bak));
+
+	for (idx = 0, i = 0; i < reta_size; ++i) {
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		pos = i % RTE_ETH_RETA_GROUP_SIZE;
+		if (((reta_conf[idx].mask >> pos) & 0x1) == 0)
+			continue;
+		priv->reta_idx[i] = reta_conf[idx].reta[pos];
+	}
+
+	struct zxdh_msg_info msg = {0};
+
+	msg_head_build(priv, ZXDH_RSS_RETA_SET, &msg);
+	for (i = 0; i < reta_size; i++)
+		msg.data.zxdh_rss_reta.reta[i] =
+			(priv->channel_context[priv->reta_idx[i] * 2].ph_chno);
+
+	ret = zxdh_hw_config_proc(dev, &msg, NULL);
+	if (ret != 0) {
+		memcpy(priv->reta_idx, reta_bak, sizeof(reta_bak));
+		PMD_DRV_LOG(ERR, "zxdh_hw_config_proc err(%d)", ret);
+		return -ret;
+	}
+
+	return ret;
+}
+
+static int
+get_rss_enable_conf(struct rte_eth_dev *dev)
+{
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
+		return dev->data->nb_rx_queues == 1 ? 0 : 1;
+	else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
+		return 0;
+
+	return 0;
+}
+
+int
+zxdh_rss_configure(struct rte_eth_dev *dev)
+{
+	int ret = 0;
+	int cond;
+	uint32_t hw_hf;
+	uint8_t *rss_key;
+	struct zxdh_hw *priv = (struct zxdh_hw *)dev->data->dev_private;
+	struct zxdh_msg_info msg = {0};
+	uint32_t i;
+	struct rte_eth_dev_data *dev_data = dev->data;
+	uint8_t use_app_rss_key =
+	!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+
+	cond = (use_app_rss_key &&
+			(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
+			ZXDH_RSK_LEN));
+	if (cond) {
+		PMD_DRV_LOG(ERR, "port %u RSS key len must be %u Bytes long",
+			dev->data->port_id, ZXDH_RSK_LEN);
+		return -1;
+	}
+
+	rss_key = use_app_rss_key ? dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key : NULL;
+	dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
+		(rss_key) ? ZXDH_RSK_LEN : 0;
+	if (dev->data->nb_rx_queues == 0) {
+		PMD_DRV_LOG(ERR, "port %u nb_rx_queues is 0", dev->data->port_id);
+		return -1;
+	}
+
+	/* config rss enable */
+	uint8_t curr_rss_enable = get_rss_enable_conf(dev);
+
+	if (priv->rss_enable != curr_rss_enable) {
+		PMD_DRV_LOG(DEBUG, "update rss enable. new(%d), old(%d)",
+			curr_rss_enable, priv->rss_enable);
+		msg.data.rss_enable.enable = curr_rss_enable;
+		msg_head_build(priv, ZXDH_RSS_ENABLE, &msg);
+		ret = zxdh_hw_config_proc(dev, &msg, NULL);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "zxdh_hw_config_proc err(%d)", ret);
+			return -ret;
+		}
+		priv->rss_enable = curr_rss_enable;
+	}
+
+	if (curr_rss_enable && priv->rss_init == 0) {
+		/* config hash factor */
+		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = ZXDH_HF_F5_ETH;
+		hw_hf = zxdh_rss_hf_to_hw(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
+		memset(&msg, 0, sizeof(msg));
+		msg.data.zxdh_rss_hf.rss_hf = hw_hf;
+		msg_head_build(priv, ZXDH_RSS_HF_SET, &msg);
+		ret = zxdh_hw_config_proc(dev, &msg, NULL);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "zxdh_hw_config_proc err(%d)", ret);
+			return -ret;
+		}
+
+		priv->rss_init = 1;
+	}
+
+	/* config reta */
+	priv->reta_idx_n = ZXDH_RETA_SIZE;
+	if (!priv->reta_idx) {
+		priv->reta_idx = rte_zmalloc(NULL, ZXDH_RETA_SIZE * sizeof(uint16_t), 4);
+		if (priv->reta_idx == NULL) {
+			PMD_DRV_LOG(ERR, "alloc memory fail");
+			return -1;
+		}
+	}
+	for (i = 0; i < ZXDH_RETA_SIZE; i++)
+		priv->reta_idx[i] = i % dev_data->nb_rx_queues;
+
+	/* hw config reta */
+	msg_head_build(priv, ZXDH_RSS_RETA_SET, &msg);
+	for (i = 0; i < ZXDH_RETA_SIZE; i++)
+		msg.data.zxdh_rss_reta.reta[i] =
+			priv->channel_context[priv->reta_idx[i] * 2].ph_chno;
+
+	ret = zxdh_hw_config_proc(dev, &msg, NULL);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "zxdh_hw_config_proc err(%d)", ret);
+		return -ret;
+	}
+
+	return 0;
+}
+static void DataHitolo(uint64_t *data)
+{
+	uint32_t n_data_hi;
+	uint32_t n_data_lo;
+
+	n_data_lo = *data >> 32;
+	n_data_hi = *data;
+	*data =  (uint64_t)(rte_le_to_cpu_32(n_data_hi))<<32 |
+				rte_le_to_cpu_32(n_data_lo);
+}
+static int zxdh_hw_np_stats_pf(struct rte_eth_dev *dev, struct zxdh_hw_np_stats *mtu_stats)
+{
+	int ret = 0;
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint32_t stats_id = vport_to_pf_vfid(hw->vport);
+	struct zxdh_hw_stats_data  stats_data;
+	uint32_t idx = 0;
+
+	idx = stats_id + DPP_BROAD_STATS_EGRESS_BASE;
+	ret = dpp_dtb_stat_ppu_cnt_get(DEVICE_NO, g_dtb_data.queueid,
+				STAT_64_MODE, idx, (uint32_t *)&mtu_stats->np_tx_broadcast);
+	if (ret)
+		return ret;
+
+	DataHitolo(&mtu_stats->np_tx_broadcast);
+	idx = stats_id + DPP_BROAD_STATS_INGRESS_BASE;
+	memset(&stats_data, 0, sizeof(stats_data));
+	ret = dpp_dtb_stat_ppu_cnt_get(DEVICE_NO, g_dtb_data.queueid,
+				STAT_64_MODE, idx, (uint32_t *)&mtu_stats->np_rx_broadcast);
+	if (ret)
+		return ret;
+
+	DataHitolo(&mtu_stats->np_rx_broadcast);
+	idx = stats_id + DPP_MTU_STATS_EGRESS_BASE;
+	memset(&stats_data, 0, sizeof(stats_data));
+	ret = dpp_dtb_stat_ppu_cnt_get(DEVICE_NO, g_dtb_data.queueid,
+				STAT_128_MODE, idx, (uint32_t *)&stats_data);
+	if (ret)
+		return ret;
+
+	mtu_stats->np_tx_mtu_drop_pkts = stats_data.n_pkts_dropped;
+	mtu_stats->np_tx_mtu_drop_bytes = stats_data.n_bytes_dropped;
+	DataHitolo(&mtu_stats->np_tx_mtu_drop_pkts);
+	DataHitolo(&mtu_stats->np_tx_mtu_drop_bytes);
+	idx = stats_id + DPP_MTU_STATS_INGRESS_BASE;
+	memset(&stats_data, 0, sizeof(stats_data));
+	ret = dpp_dtb_stat_ppu_cnt_get(DEVICE_NO, g_dtb_data.queueid,
+				STAT_128_MODE, idx, (uint32_t *)&stats_data);
+	if (ret)
+		return ret;
+
+	mtu_stats->np_rx_mtu_drop_pkts = stats_data.n_pkts_dropped;
+	mtu_stats->np_rx_mtu_drop_bytes = stats_data.n_bytes_dropped;
+	DataHitolo(&mtu_stats->np_rx_mtu_drop_pkts);
+	DataHitolo(&mtu_stats->np_rx_mtu_drop_bytes);
+	idx = stats_id + DPP_MTR_STATS_EGRESS_BASE;
+	memset(&stats_data, 0, sizeof(stats_data));
+	ret = dpp_dtb_stat_ppu_cnt_get(DEVICE_NO, g_dtb_data.queueid,
+				STAT_128_MODE, idx, (uint32_t *)&stats_data);
+	if (ret)
+		return ret;
+
+	mtu_stats->np_tx_mtr_drop_pkts = stats_data.n_pkts_dropped;
+	mtu_stats->np_tx_mtr_drop_bytes = stats_data.n_bytes_dropped;
+	DataHitolo(&mtu_stats->np_tx_mtr_drop_pkts);
+	DataHitolo(&mtu_stats->np_tx_mtr_drop_bytes);
+	idx = stats_id + DPP_MTR_STATS_INGRESS_BASE;
+	memset(&stats_data, 0, sizeof(stats_data));
+	ret = dpp_dtb_stat_ppu_cnt_get(DEVICE_NO, g_dtb_data.queueid,
+				STAT_128_MODE, idx, (uint32_t *)&stats_data);
+	if (ret)
+		return ret;
+
+	mtu_stats->np_rx_mtr_drop_pkts = stats_data.n_pkts_dropped;
+	mtu_stats->np_rx_mtr_drop_bytes = stats_data.n_bytes_dropped;
+	DataHitolo(&mtu_stats->np_rx_mtr_drop_pkts);
+	DataHitolo(&mtu_stats->np_rx_mtr_drop_bytes);
+
+	return 0;
+}
+
+
+static int zxdh_hw_np_stats_vf(struct rte_eth_dev *dev, struct zxdh_hw_np_stats *np_stats)
+{
+	int ret = 0;
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_msg_info msg_info = {0};
+	struct zxdh_msg_reply_info reply_info = {0};
+
+	msg_head_build(hw, ZXDH_GET_NP_STATS, &msg_info);
+	ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(struct zxdh_msg_info),
+				&reply_info, sizeof(reply_info));
+	if (ret) {
+		PMD_DRV_LOG(ERR,
+			"Failed to send msg: port 0x%x msg type ZXDH_PORT_METER_STAT_GET",
+			hw->vport.vport);
+		return -1;
+	}
+	memcpy(np_stats, &reply_info.reply_body.hw_stats, sizeof(struct zxdh_hw_np_stats));
+	return ret;
+}
+
+
+
+int zxdh_hw_np_stats(struct rte_eth_dev *dev,  struct zxdh_hw_np_stats *np_stats)
+{
+	int ret = 0;
+
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (hw->is_pf) {
+		ret = zxdh_hw_np_stats_pf(dev, np_stats);
+		PMD_DRV_LOG(DEBUG, "zxdh_hw_stats_pf");
+	} else {
+		ret = zxdh_hw_np_stats_vf(dev, np_stats);
+		PMD_DRV_LOG(DEBUG, "zxdh_hw_stats_vf");
+	}
+	PMD_DRV_LOG(DEBUG, "stats np_rx_broadcast      = %08lx", np_stats->np_rx_broadcast);
+	PMD_DRV_LOG(DEBUG, "stats np_tx_broadcast      = %08lx", np_stats->np_tx_broadcast);
+	PMD_DRV_LOG(DEBUG, "stats np_rx_mtu_drop_pkts  = %08lx", np_stats->np_rx_mtu_drop_pkts);
+	PMD_DRV_LOG(DEBUG, "stats np_tx_mtu_drop_pkts  = %08lx", np_stats->np_tx_mtu_drop_pkts);
+	PMD_DRV_LOG(DEBUG, "stats np_rx_mtu_drop_bytes = %08lx", np_stats->np_rx_mtu_drop_bytes);
+	PMD_DRV_LOG(DEBUG, "stats np_tx_mtu_drop_bytes = %08lx", np_stats->np_tx_mtu_drop_bytes);
+	PMD_DRV_LOG(DEBUG, "stats np_rx_mtr_drop_pkts  = %08lx", np_stats->np_rx_mtr_drop_pkts);
+	PMD_DRV_LOG(DEBUG, "stats np_tx_mtr_drop_pkts  = %08lx", np_stats->np_tx_mtr_drop_pkts);
+	PMD_DRV_LOG(DEBUG, "stats np_rx_mtr_drop_bytes = %08lx", np_stats->np_rx_mtr_drop_bytes);
+	PMD_DRV_LOG(DEBUG, "stats np_tx_mtr_drop_bytes = %08lx", np_stats->np_tx_mtr_drop_bytes);
+	return ret;
+}
+
+
+int zxdh_dev_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size __rte_unused)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_msg_info msg = {0};
+	struct zxdh_msg_reply_info reps = {0};
+	struct zxdh_pci_bar_msg in = {0};
+	char fw_ver[ZXDH_FWVERS_LEN] = {0};
+	uint32_t ret = 0;
+
+	if (!hw->is_pf)
+		return -EOPNOTSUPP;
+
+	ctrl_msg_build(hw, ZXDH_FLASH_FIR_VERSION_GET, &msg);
+
+	in.payload_addr = &msg;
+	in.payload_len = sizeof(msg);
+
+	struct zxdh_msg_recviver_mem rsp_data = {
+			.recv_buffer = (void *)&reps,
+			.buffer_len = sizeof(struct zxdh_msg_reply_info),
+	};
+
+	ret = zxdh_send_command_toriscv(dev, &in, MODULE_FLASH, &rsp_data);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "fw_version_get failed: %d\n", ret);
+		return ret;
+	}
+	struct zxdh_msg_reply_body *ack_msg =
+		&(((struct zxdh_msg_reply_info *)rsp_data.recv_buffer)->reply_body);
+
+	memcpy(fw_ver, ack_msg->flash_msg.firmware_version, ZXDH_FWVERS_LEN);
+
+	snprintf(fw_version, ZXDH_FWVERS_LEN-1, "%s", fw_ver);
+
+	return 0;
+}
+int zxdh_dev_priv_dump(struct rte_eth_dev *dev, FILE *file)
+{
+	char fw_version[32];
+
+	zxdh_dev_fw_version_get(dev, fw_version, ZXDH_FWVERS_LEN);
+	if (fw_version != NULL)
+		PMD_DRV_LOG(INFO, "fw_version:%s", fw_version);
+
+	if (file == NULL) {
+		PMD_DRV_LOG(ERR, "fail to dump file ");
+		return -EINVAL;
+	}
+	fprintf(file, "fw_version:%s", fw_version);
+	return 0;
+}
+
+
+static uint32_t zxdh_en_module_eeprom_read(struct rte_eth_dev *dev,
+					struct zxdh_en_module_eeprom_param *query, uint8_t *data)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_msg_info msg = {0};
+	struct zxdh_msg_reply_info reps = {0};
+	struct zxdh_pci_bar_msg in = {0};
+	uint32_t ret = 0;
+
+	ctrl_msg_build(hw, ZXDH_MAC_MODULE_EEPROM_READ, &msg);
+
+	msg.data.module_eeprom_msg.i2c_addr = query->i2c_addr;
+	msg.data.module_eeprom_msg.bank = query->bank;
+	msg.data.module_eeprom_msg.page = query->page;
+	msg.data.module_eeprom_msg.offset = query->offset;
+	msg.data.module_eeprom_msg.length = query->length;
+
+	in.payload_addr = &msg;
+	in.payload_len = sizeof(msg);
+
+	struct zxdh_msg_recviver_mem rsp_data = {
+			.recv_buffer = (void *)&reps,
+			.buffer_len = sizeof(struct zxdh_msg_reply_info),
+	};
+
+	ret = zxdh_send_command_toriscv(dev, &in, MODULE_FLASH, &rsp_data);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "zxdh_send_command_to_riscv_mac failed, err: %d\n", ret);
+		return ret;
+	}
+
+	struct zxdh_msg_reply_body *ack_msg =
+				&(((struct zxdh_msg_reply_info *)rsp_data.recv_buffer)->reply_body);
+
+	if (ack_msg->flag == ZXDH_REPS_FAIL) {
+		PMD_DRV_LOG(ERR, "zxdh_send_command_to_riscv_mac reply msg failed");
+		return -EINVAL;
+	}
+
+	if (data)
+		memcpy(data, ack_msg->module_eeprom_msg.data, ack_msg->module_eeprom_msg.length);
+
+	return ack_msg->module_eeprom_msg.length;
+}
+
+int zxdh_dev_get_module_info(struct rte_eth_dev *dev, struct rte_eth_dev_module_info *modinfo)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_en_module_eeprom_param query = {0};
+	uint32_t read_bytes;
+	uint8_t data[2] = {0};
+
+	if (!hw->is_pf)
+		return -EOPNOTSUPP;
+
+	query.i2c_addr = SFF_I2C_ADDRESS_LOW;
+	query.page = 0;
+	query.offset = 0;
+	query.length = 2;
+
+	read_bytes = zxdh_en_module_eeprom_read(dev, &query, data);
+	if (read_bytes != query.length) {
+		PMD_DRV_LOG(ERR, "zxdh_en_module_eeprom_read failed!\n");
+		return -EIO;
+	}
+
+	switch (data[0]) {
+	case ZXDH_MODULE_ID_SFP:
+		modinfo->type       = RTE_ETH_MODULE_SFF_8472;
+		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+		break;
+	case ZXDH_MODULE_ID_QSFP:
+		modinfo->type       = RTE_ETH_MODULE_SFF_8436;
+		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
+		break;
+	case ZXDH_MODULE_ID_QSFP_PLUS:
+	case ZXDH_MODULE_ID_QSFP28:
+		if (data[1] < 3) {
+			modinfo->type       = RTE_ETH_MODULE_SFF_8436;
+			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
+		} else {
+			modinfo->type       = RTE_ETH_MODULE_SFF_8636;
+			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
+		}
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "can not recognize module identifier 0x%x!\n", data[0]);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int zxdh_dev_get_module_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *info)
+{
+	struct zxdh_en_module_eeprom_param query = {0};
+	uint32_t offset = info->offset;
+	uint32_t length = info->length;
+	uint32_t offset_boundary = 0;
+	uint32_t total_read_bytes = 0;
+	uint32_t read_bytes = 0;
+	uint8_t identifier;
+	uint8_t *data = NULL;
+
+	if (!info->length)
+		return -EINVAL;
+
+	memset(&data, 0, info->length);
+
+	query.i2c_addr = SFF_I2C_ADDRESS_LOW;
+	query.bank = 0;
+	query.page = 0;
+	query.offset = 0;
+	query.length = 1;
+	read_bytes = zxdh_en_module_eeprom_read(dev, &query, &identifier);
+	if (read_bytes != query.length) {
+		PMD_DRV_LOG(ERR, "zxdh_en_module_eeprom_read failed!\n");
+		return -EIO;
+	}
+
+	while (total_read_bytes < info->length) {
+		if (identifier == ZXDH_MODULE_ID_SFP) {
+			if (offset < 256) {
+				query.i2c_addr = SFF_I2C_ADDRESS_LOW;
+				query.page = 0;
+				query.offset = offset;
+			} else {
+				query.i2c_addr = SFF_I2C_ADDRESS_HIGH;
+				query.page = 0;
+				query.offset = offset - 256;
+			}
+			offset_boundary = (query.offset < 128) ? 128 : 256;
+			query.length = ((query.offset + length) > offset_boundary) ?
+						(offset_boundary - query.offset) : length;
+		} else if (identifier == ZXDH_MODULE_ID_QSFP ||
+				identifier == ZXDH_MODULE_ID_QSFP_PLUS ||
+				identifier == ZXDH_MODULE_ID_QSFP28) {
+			query.i2c_addr = SFF_I2C_ADDRESS_LOW;
+			if (offset < 256) {
+				query.page = 0;
+				query.offset = offset;
+			} else {
+				query.page = (offset - 256) / 128 + 1;
+				query.offset = offset - 128 * query.page;
+			}
+			offset_boundary = (query.offset < 128) ? 128 : 256;
+			query.length = ((query.offset + length) > offset_boundary) ?
+						(offset_boundary - query.offset) : length;
+		} else {
+			PMD_DRV_LOG(ERR, "can not recognize module identifier 0x%x!\n", identifier);
+			return -EINVAL;
+		}
+
+		read_bytes = zxdh_en_module_eeprom_read(dev, &query, data + total_read_bytes);
+		if (read_bytes != query.length) {
+			PMD_DRV_LOG(ERR, "zxdh_en_module_eeprom_read failed!\n");
+			return -EIO;
+		}
+
+		total_read_bytes += read_bytes;
+		offset += read_bytes;
+		length -= read_bytes;
+	}
+
+	return 0;
+}
+
+
+/**
+ * Fun:
+ */
+void dump_all_tables(void)
+{
+	uint16_t port;
+
+	for (port = 0; port < RTE_MAX_ETHPORTS; ++port) {
+		if (!rte_eth_dev_is_valid_port(port))
+			continue;
+
+		struct rte_eth_dev *dev = &rte_eth_devices[port];
+
+		if (dev == NULL)
+			continue;
+
+		zxdh_dump_tables(dev);
+	}
+}
+/**
+ * Fun:
+
+ *  *msg_req  : point to zxdh_msg_info
+ *  *reply_info:  point to zxdh_msg_reply_info
+ * return
+ *  >0: msg channel failed
+ *  0 :msg channel ok and msg process ok
+ * -1 :msg reply incorrect
+ *- 2 :msg reply correct, but process incorrect
+ */
+int zxdh_vf_send_msg_to_pf(struct rte_eth_dev *dev,  void *msg_req,
+			uint16_t msg_req_len, void *reply, uint16_t reply_len)
+{
+	struct zxdh_hw *hw  = dev->data->dev_private;
+	int32_t retval = 0;
+	struct zxdh_pci_bar_msg in = {0};
+	struct zxdh_msg_recviver_mem result = {0};
+	struct zxdh_msg_reply_info reply_info = {0};
+
+	if (reply) {
+		RTE_ASSERT(reply_len < sizeof(zxdh_msg_reply_info));
+		result.recv_buffer  = reply;
+		result.buffer_len = reply_len;
+	} else {
+		result.recv_buffer = &reply_info;
+		result.buffer_len = sizeof(reply_info);
+	}
+
+	struct zxdh_msg_info  *tmp = (struct zxdh_msg_info *)msg_req;
+
+	PMD_MSG_LOG(INFO, "  send bar msg to pf  msg %d .\n", tmp->msg_head.msg_type);
+
+	struct zxdh_msg_reply_head *reply_head =
+				&(((struct zxdh_msg_reply_info *)result.recv_buffer)->reply_head);
+	struct zxdh_msg_reply_body *reply_body =
+				&(((struct zxdh_msg_reply_info *)result.recv_buffer)->reply_body);
+
+	in.virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_MSG_CHAN_PFVFSHARE_OFFSET);
+	in.payload_addr = msg_req;
+	in.payload_len = msg_req_len;
+	in.src = MSG_CHAN_END_VF;
+	in.dst = MSG_CHAN_END_PF;
+	in.module_id = MODULE_BAR_MSG_TO_PF;
+
+	in.src_pcieid = hw->pcie_id;
+	in.dst_pcieid = hw->pfinfo.pcieid;
+	PMD_MSG_LOG(INFO, "vf[pcie 0x%x] send bar msg to pf[pcie 0x%x] vfunc [0x%x] vport [0x%x] vfid[%d].\n",
+		in.src_pcieid, in.dst_pcieid, hw->vport.vfid,
+		hw->vport.vport, vport_to_vfid(hw->vport));
+
+	retval = zxdh_bar_chan_sync_msg_send(&in, &result);
+	if (retval != BAR_MSG_OK) {
+		PMD_MSG_LOG(ERR,
+			"vf[%d] send bar msg to pf failed.retval %d\n", hw->vport.vfid, retval);
+		return retval;
+	}
+	if (reply_head->flag != MSG_REPS_OK) {
+		PMD_MSG_LOG(ERR, "vf[%d] get pf reply failed: reply_head flag : 0x%x(0xff is OK).replylen %d",
+				hw->vport.vfid, reply_head->flag, reply_head->reps_len);
+		return -1;
+	}
+	if (reply_body->flag != ZXDH_REPS_SUCC) {
+		PMD_MSG_LOG(ERR, "vf[%d] msg processing failed\n", hw->vfid);
+		return -2;
+	}
+
+	PMD_DRV_LOG(INFO, "vf[%d] get pf reply  OK: reply_head flag : 0x%x(0x%x is OK).replylen %d",
+			hw->vport.vfid, reply_head->flag, ZXDH_REPS_SUCC, reply_head->reps_len);
+	return 0;
+}
+/**
+ * Fun:
+ */
+int zxdh_pf_send_msg_to_vf(struct rte_eth_dev  *dev, int vf_id,
+			uint8_t *content, uint16_t content_len)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_msg_reply_info reply_info = {0};
+	struct zxdh_msg_reply_head *reply_head = &reply_info.reply_head;
+	struct zxdh_msg_reply_body *reply_body = &reply_info.reply_body;
+	/* */
+	struct zxdh_pci_bar_msg in = {0};
+
+	in.virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_MSG_CHAN_PFVFSHARE_OFFSET);
+	in.payload_addr = content;
+	in.payload_len = content_len;
+	in.src = MSG_CHAN_END_PF;
+	in.dst = MSG_CHAN_END_VF;
+	in.module_id = MODULE_BAR_MSG_TO_VF;
+	in.src_pcieid = hw->pcie_id;
+	in.dst_pcieid = hw->vfinfo[vf_id].pcieid;
+	PMD_MSG_LOG(INFO, " pf 0x%x send bar msg to vf[0x%x].\n", in.src_pcieid, in.dst_pcieid);
+	/* */
+	struct zxdh_msg_recviver_mem result = {0};
+
+	result.recv_buffer = &reply_info;
+	result.buffer_len = sizeof(struct zxdh_msg_reply_info);
+	/* */
+	int32_t retval = zxdh_bar_chan_sync_msg_send(&in, &result);
+
+	if (retval != BAR_MSG_OK) {
+		PMD_MSG_LOG(ERR, "send bar msg to vf[%d]  failed.\n", vf_id);
+		return retval;
+	}
+	PMD_MSG_LOG(DEBUG, "pf send bar msg to vf[%d] reply_head flag:0x%x(0xff is OK). reply body 0x%x",
+			vf_id, reply_head->flag, reply_body->reply_data[0]);
+	if (reply_head->flag != MSG_REPS_OK) {
+		PMD_MSG_LOG(ERR, "vf[%d] get pf recv msg failed.\n", vf_id);
+		return -1;
+	}
+	return retval;
+}
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h
new file mode 100644
index 0000000000..3e46275bd0
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.h
@@ -0,0 +1,159 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#ifndef _ZXDH_ETHDEV_OPS_H_
+#define _ZXDH_ETHDEV_OPS_H_
+
+#include "zxdh_table_drv.h"
+#include "zxdh_flow.h"
+#include "zxdh_mtr.h"
+#include "rte_memzone.h"
+#include "zxdh_msg_chan.h"
+#include "zxdh_ethdev.h"
+
+#define DEVICE_NO 0
+#define ZXDH_MSG_CHAN_PFVFSHARE_OFFSET  (ZXDH_CTRLCH_OFFSET+0x1000)
+#define INGRESS 0
+#define EGRESS 1
+
+#define SFF_I2C_ADDRESS_LOW     (0x50)
+#define SFF_I2C_ADDRESS_HIGH    (0x51)
+
+enum zxdh_module_id {
+	ZXDH_MODULE_ID_SFP              = 0x3,
+	ZXDH_MODULE_ID_QSFP             = 0xC,
+	ZXDH_MODULE_ID_QSFP_PLUS        = 0xD,
+	ZXDH_MODULE_ID_QSFP28           = 0x11,
+	ZXDH_MODULE_ID_QSFP_DD          = 0x18,
+	ZXDH_MODULE_ID_OSFP             = 0x19,
+	ZXDH_MODULE_ID_DSFP             = 0x1B,
+};
+
+struct zxdh_mtu_stats {
+	uint64_t n_pkts_dropped;
+	uint64_t n_bytes_dropped;
+};
+
+int zxdh_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel);
+int zxdh_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel);
+int zxdh_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, uint16_t tpid);
+int zxdh_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+int zxdh_rss_configure(struct rte_eth_dev *dev);
+int zxdh_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf);
+int zxdh_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf);
+int zxdh_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf,
+		uint16_t reta_size);
+int zxdh_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf,
+		uint16_t reta_size);
+int zxdh_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
+		uint32_t index, uint32_t vmdq);
+int zxdh_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr);
+void zxdh_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
+int zxdh_dev_promiscuous_enable(struct rte_eth_dev *dev);
+int zxdh_dev_promiscuous_disable(struct rte_eth_dev *dev);
+int zxdh_dev_allmulticast_enable(struct rte_eth_dev *dev);
+int zxdh_dev_allmulticast_disable(struct rte_eth_dev *dev);
+int zxdh_dev_mtu_set(struct rte_eth_dev *dev, uint16_t new_mtu);
+int zxdh_dev_set_link_up(struct rte_eth_dev *dev);
+int zxdh_dev_set_link_down(struct rte_eth_dev *dev);
+int zxdh_vlan_filter_set(__rte_unused struct rte_eth_dev *dev, __rte_unused uint16_t vlan_id,
+		__rte_unused int32_t on);
+int zxdh_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops);
+
+int  zxdh_dev_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size);
+int zxdh_dev_priv_dump(struct rte_eth_dev *dev, FILE *file);
+int zxdh_dev_get_module_info(struct rte_eth_dev *dev, struct rte_eth_dev_module_info *modinfo);
+int zxdh_dev_get_module_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *info);
+void dump_all_tables(void);
+uint16_t vport_to_hash_index(union VPORT v);
+struct rte_eth_dev *get_dev_by_vfid(uint16_t vfid);
+int zxdh_pf_send_msg_to_vf(struct rte_eth_dev *dev, int vf_id,
+		uint8_t *content, uint16_t content_len);
+int zxdh_vf_send_msg_to_pf(struct rte_eth_dev *dev, void *content,
+		uint16_t content_len, void *reply, uint16_t reply_len);
+uint16_t vport_to_vfid(union VPORT v);
+uint16_t vport_to_pf_vfid(union VPORT v);
+int logic_qid_to_vqm_phyqid(struct rte_eth_dev *dev, int16_t qid);
+void msg_head_build(struct zxdh_hw *hw, enum zxdh_msg_type type, struct zxdh_msg_info *msg_info);
+void ctrl_msg_build(struct zxdh_hw *hw, enum zxdh_agent_opc opcode, struct zxdh_msg_info *msg_info);
+int zxdh_dev_unicast_set(struct zxdh_hw *hw, uint16_t vport_t, bool enable);
+int zxdh_dev_multicast_set(struct zxdh_hw *hw, uint16_t vport_t, bool enable);
+int zxdh_hw_np_stats(struct rte_eth_dev *dev,  struct zxdh_hw_np_stats *mtu_stats);
+/* Shared data between primary and secondary processes. */
+struct zxdh_shared_data {
+	rte_spinlock_t lock; /* Global spinlock for primary and secondary processes. */
+	int init_done;       /* Whether primary has done initialization. */
+	unsigned int secondary_cnt; /* Number of secondary processes init'd. */
+
+	int npsdk_init_done;
+	uint32_t  dev_refcnt;
+	struct zxdh_dtb_shared_data *dtb_data;
+	struct rte_mempool *flow_mp;
+	struct zxdh_flow  *cur_flow;
+	struct FLOW_LIST flow_list; /* double link list */
+
+	struct rte_mempool *mtr_mp;
+	struct rte_mempool *mtr_profile_mp;
+	struct rte_mempool *mtr_policy_mp;
+	struct zxdh_mtr_profile_list meter_profile_list;
+	struct zxdh_mtr_list mtr_list; /* MTR list. */
+	struct zxdh_mtr_policy_list mtr_policy_list;
+};
+
+struct zxdh_en_module_eeprom_param {
+	uint8_t i2c_addr;
+	uint8_t bank;
+	uint8_t page;
+	uint8_t offset;
+	uint8_t length;
+};
+
+/* Per-process data structure, not visible to other processes. */
+struct zxdh_local_data {
+	int init_done; /* Whether a secondary has done initialization. */
+};
+
+extern struct zxdh_shared_data *zxdh_shared_data;
+
+#ifndef ZXDH_TBL_ERAM_DUMP_SIZE
+#define ZXDH_TBL_ERAM_DUMP_SIZE  (4*1024*1024)
+#endif
+
+#ifndef DPU_DTB_TABLE_BULK_DDR_DUMP_SIZE
+#define DTB_TBL_DDR_DUMP_SIZE  (64*1024*1024)
+#endif
+
+#ifndef ZXDH_TBL_ZCAM_DUMP_SIZE
+#define ZXDH_TBL_ZCAM_DUMP_SIZE  (5*1024*1024)
+#endif
+
+#ifndef DPU_DTB_TABLE_BULK_ETCAM_DUMP_SIZE
+#define DPU_DTB_TABLE_BULK_ETCAM_DUMP_SIZE  (4*1024*1024)
+#endif
+
+#define DPU_DTB_TABLE_CONF_SIZE  (32*(16+16*1024))
+#define DPU_DTB_TABLE_DUMP_SIZE  (32*(16+16*1024))
+#define DPU_MAX_PF_COUNT 4
+#define DPU_MAX_BASE_DTB_TABLE_COUNT 30
+
+struct zxdh_dtb_bulk_dump_info {
+	const char *mz_name;
+	uint32_t mz_size;
+	uint32_t sdt_no;        /** <@brief sdt no 0~255 */
+	const struct rte_memzone *mz;
+};
+
+struct zxdh_dtb_shared_data {
+	int init_done;
+	char name[32];
+	uint16_t queueid;
+	uint16_t vport;
+	uint32_t vector;
+	const struct rte_memzone *dtb_table_conf_mz;
+	const struct rte_memzone *dtb_table_dump_mz;
+	const struct rte_memzone *dtb_table_bulk_dump_mz[DPU_MAX_BASE_DTB_TABLE_COUNT];
+	struct rte_eth_dev *bind_device;
+	uint32_t dev_refcnt;
+};
+#endif
diff --git a/drivers/net/zxdh/zxdh_flow.c b/drivers/net/zxdh/zxdh_flow.c
new file mode 100644
index 0000000000..73e986933b
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_flow.c
@@ -0,0 +1,973 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_bitmap.h>
+
+
+#include "zxdh_logs.h"
+#include "zxdh_flow.h"
+#include "dpp_apt_se_api.h"
+#include "zxdh_table_drv.h"
+#include "zxdh_ethdev_ops.h"
+
+
+#define ZXDH_IPV6_FRAG_HEADER	44
+#define ZXDH_TENANT_ARRAY_NUM	3
+#define ZXDH_VLAN_TCI_MASK	0xFFFF
+#define ZXDH_VLAN_PRI_MASK	0xE000
+#define ZXDH_VLAN_CFI_MASK	0x1000
+#define ZXDH_VLAN_VID_MASK	0x0FFF
+
+static int zxdh_flow_validate(struct rte_eth_dev *dev,
+				  const struct rte_flow_attr *attr,
+				  const struct rte_flow_item pattern[],
+				  const struct rte_flow_action actions[],
+				  struct rte_flow_error *error);
+static struct rte_flow *zxdh_flow_create(struct rte_eth_dev *dev,
+					 const struct rte_flow_attr *attr,
+					 const struct rte_flow_item pattern[],
+					 const struct rte_flow_action actions[],
+					 struct rte_flow_error *error);
+static int zxdh_flow_destroy(struct rte_eth_dev *dev,
+				 struct rte_flow *flow,
+				 struct rte_flow_error *error);
+static int zxdh_flow_flush(struct rte_eth_dev *dev,
+			   struct rte_flow_error *error);
+static int zxdh_flow_query(struct rte_eth_dev *dev,
+			   struct rte_flow *flow,
+			   const struct rte_flow_action *actions,
+			   void *data, struct rte_flow_error *error);
+
+struct zxdh_flow *get_dhflow(struct rte_eth_dev *dev, struct zxdh_rte_flow *flow);
+
+static void flow_item_dump(const struct rte_flow_item *item);
+static void offlow_key_dump(struct fd_flow_key *key, struct fd_flow_key *key_mask);
+static void offlow_result_dump(struct fd_flow_result *res);
+
+
+
+const struct rte_flow_ops zxdh_flow_ops = {
+	.validate = zxdh_flow_validate,
+	.create = zxdh_flow_create,
+	.destroy = zxdh_flow_destroy,
+	.flush = zxdh_flow_flush,
+	.query = zxdh_flow_query,
+};
+
+static int
+zxdh_flow_parse_attr(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_attr *attr,
+			 struct rte_flow_error *error, struct zxdh_flow *flow)
+{
+	/* Not supported */
+	if (attr->egress) {
+		rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+			   attr, "Not support egress.");
+		return -rte_errno;
+	}
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Not support priority.");
+		return -rte_errno;
+	}
+	/* Not supported */
+	if (attr->group >= MAX_GROUP) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   attr, "Not support group.");
+		return -rte_errno;
+	}
+	flow->group = attr->group;
+	return 0;
+}
+
+static inline void
+print_ether_addr(const char *what, const struct rte_ether_addr *eth_addr,
+		 char print_buf[], int buf_size, int *cur_len)
+{
+	char buf[RTE_ETHER_ADDR_FMT_SIZE];
+
+	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
+	MKDUMPSTR(print_buf, buf_size, *cur_len, "%s%s", what, buf);
+}
+
+
+
+static void flow_item_dump(const struct rte_flow_item *item)
+{
+	char print_buf[MAX_STRING_LEN];
+	int buf_size = MAX_STRING_LEN;
+	int cur_len = 0;
+
+	if (!item)
+		return;
+
+	switch (item->type) {
+	case RTE_FLOW_ITEM_TYPE_ETH:
+	{
+		const struct rte_flow_item_eth *eth_spec =
+						(const struct rte_flow_item_eth *)item->spec;
+		const struct rte_flow_item_eth *eth_mask =
+						(const struct rte_flow_item_eth *)item->mask;
+
+		if (!eth_spec && !eth_mask) {
+			PMD_DRV_LOG(INFO, "eth spec and mask are  NULL ");
+			return;
+		}
+		print_ether_addr("spec:	src=", &eth_spec->src,
+				 print_buf, buf_size, &cur_len);
+		print_ether_addr(" - dst=", &eth_spec->dst,
+				 print_buf, buf_size, &cur_len);
+		MKDUMPSTR(print_buf, buf_size, cur_len, " - type=0x%04x\n", eth_spec->type);
+
+		print_ether_addr("mask:	src=", &eth_mask->src,
+				 print_buf, buf_size, &cur_len);
+		print_ether_addr(" - dst=", &eth_mask->dst,
+				 print_buf, buf_size, &cur_len);
+		MKDUMPSTR(print_buf, buf_size, cur_len, " - type=0x%04x", eth_mask->type);
+		PMD_DRV_LOG(INFO, "ITEM	ETH:%s", print_buf);
+		break;
+	}
+	case RTE_FLOW_ITEM_TYPE_VLAN:
+	{
+		const struct rte_flow_item_vlan *spec =
+						(const struct rte_flow_item_vlan *)item->spec;
+		const struct rte_flow_item_vlan *mask =
+						(const struct rte_flow_item_vlan *)item->mask;
+
+		if (!spec && !mask) {
+			PMD_DRV_LOG(INFO, "IPV4 spec and mask	 are  NULL ");
+			return;
+		}
+		MKDUMPSTR(print_buf, buf_size, cur_len,
+				"spec: tci=0x%x  inner type=0x%x more_vlan=%u\n",
+				spec->tci, spec->inner_type, spec->has_more_vlan);
+		MKDUMPSTR(print_buf, buf_size, cur_len,
+				"mask: tci=0x%x  inner type=0x%x  more_vlan=%u ",
+				mask->tci, mask->inner_type, mask->has_more_vlan);
+
+		PMD_DRV_LOG(INFO, "ITEM	VLAN :%s", print_buf);
+		break;
+	}
+	case RTE_FLOW_ITEM_TYPE_IPV4:
+	{
+		const struct rte_flow_item_ipv4 *spec =
+					(const struct rte_flow_item_ipv4 *)item->spec;
+		const struct rte_flow_item_ipv4 *mask =
+					(const struct rte_flow_item_ipv4 *)item->mask;
+
+		if (!spec && !mask) {
+			PMD_DRV_LOG(INFO, "IPV4 spec and mask are  NULL ");
+			return;
+		}
+
+		MKDUMPSTR(print_buf, buf_size, cur_len,
+			"spec: src ip =0x%08x ("IPV4_BYTES_FMT")",
+			spec->hdr.src_addr,
+			(uint8_t)(((spec->hdr.src_addr) >> 24) & 0xFF),
+			(uint8_t)(((spec->hdr.src_addr) >> 16) & 0xFF),
+			(uint8_t)(((spec->hdr.src_addr) >> 8) & 0xFF),
+			(uint8_t)(((spec->hdr.src_addr)) & 0xFF));
+		MKDUMPSTR(print_buf, buf_size, cur_len,
+			" -dst ip =0x%x ("IPV4_BYTES_FMT")",
+			spec->hdr.dst_addr,
+			(uint8_t)(((spec->hdr.dst_addr) >> 24) & 0xFF),
+			(uint8_t)(((spec->hdr.dst_addr) >> 16) & 0xFF),
+			(uint8_t)(((spec->hdr.dst_addr) >> 8) & 0xFF),
+			(uint8_t)(((spec->hdr.dst_addr)) & 0xFF));
+		MKDUMPSTR(print_buf, buf_size, cur_len, " -ip proto =0x%x",
+				spec->hdr.next_proto_id);
+
+		MKDUMPSTR(print_buf, buf_size, cur_len,
+			"\nmask:src ip =0x%x ("IPV4_BYTES_FMT")",
+			mask->hdr.src_addr,
+			(uint8_t)(((mask->hdr.src_addr) >> 24) & 0xFF),
+			(uint8_t)(((mask->hdr.src_addr) >> 16) & 0xFF),
+			(uint8_t)(((mask->hdr.src_addr) >> 8) & 0xFF),
+			(uint8_t)(((mask->hdr.src_addr)) & 0xFF));
+		MKDUMPSTR(print_buf, buf_size, cur_len,
+			" -dst ip =0x%x ("IPV4_BYTES_FMT")",
+				mask->hdr.dst_addr,
+			(uint8_t)(((mask->hdr.dst_addr) >> 24) & 0xFF),
+			(uint8_t)(((mask->hdr.dst_addr) >> 16) & 0xFF),
+			(uint8_t)(((mask->hdr.dst_addr) >> 8) & 0xFF),
+			(uint8_t)(((mask->hdr.dst_addr)) & 0xFF));
+		MKDUMPSTR(print_buf, buf_size, cur_len, " -ip proto =0x%x",
+				mask->hdr.next_proto_id);
+		PMD_DRV_LOG(INFO, "ITEM IPV4:\n%s", print_buf);
+		break;
+	}
+	case RTE_FLOW_ITEM_TYPE_IPV6:
+	{
+		const struct rte_flow_item_ipv6 *spec =
+						(const struct rte_flow_item_ipv6 *)item->spec;
+		const struct rte_flow_item_ipv6 *mask =
+						(const struct rte_flow_item_ipv6 *)item->mask;
+
+		if (!spec && !mask) {
+			PMD_DRV_LOG(INFO, "IPV6 spec and mask  are  NULL ");
+			return;
+		}
+		MKDUMPSTR(print_buf, buf_size, cur_len,
+			"spec: src ip = "IPV6_BYTES_FMT"",
+			(spec->hdr.src_addr)[0], (spec->hdr.src_addr)[1],
+			(spec->hdr.src_addr)[2], (spec->hdr.src_addr)[3],
+			(spec->hdr.src_addr)[4], (spec->hdr.src_addr)[5],
+			(spec->hdr.src_addr)[6], (spec->hdr.src_addr)[7],
+			(spec->hdr.src_addr)[8], (spec->hdr.src_addr)[9],
+			(spec->hdr.src_addr)[10], (spec->hdr.src_addr)[11],
+			(spec->hdr.src_addr)[12], (spec->hdr.src_addr)[13],
+			(spec->hdr.src_addr)[14], (spec->hdr.src_addr)[15]);
+		MKDUMPSTR(print_buf, buf_size, cur_len,
+			" -dst ip = "IPV6_BYTES_FMT"",
+			(spec->hdr.dst_addr)[0], (spec->hdr.dst_addr)[1],
+			(spec->hdr.dst_addr)[2], (spec->hdr.dst_addr)[3],
+			(spec->hdr.dst_addr)[4], (spec->hdr.dst_addr)[5],
+			(spec->hdr.dst_addr)[6], (spec->hdr.dst_addr)[7],
+			(spec->hdr.dst_addr)[8], (spec->hdr.dst_addr)[9],
+			(spec->hdr.dst_addr)[10], (spec->hdr.dst_addr)[11],
+			(spec->hdr.dst_addr)[12], (spec->hdr.dst_addr)[13],
+			(spec->hdr.dst_addr)[14], (spec->hdr.dst_addr)[15]);
+		MKDUMPSTR(print_buf, buf_size, cur_len, " -ip proto =0x%x\n", spec->hdr.proto);
+
+		MKDUMPSTR(print_buf, buf_size, cur_len,
+			"\nmask:src ip = "IPV6_BYTES_FMT"",
+			(mask->hdr.src_addr)[0], (mask->hdr.src_addr)[1],
+			(mask->hdr.src_addr)[2], (mask->hdr.src_addr)[3],
+			(mask->hdr.src_addr)[4], (mask->hdr.src_addr)[5],
+			(mask->hdr.src_addr)[6], (mask->hdr.src_addr)[7],
+			(mask->hdr.src_addr)[8], (mask->hdr.src_addr)[9],
+			(mask->hdr.src_addr)[10], (mask->hdr.src_addr)[11],
+			(mask->hdr.src_addr)[12], (mask->hdr.src_addr)[13],
+			(mask->hdr.src_addr)[14], (mask->hdr.src_addr)[15]);
+		MKDUMPSTR(print_buf, buf_size, cur_len,
+			" -dst ip = "IPV6_BYTES_FMT"",
+			(mask->hdr.dst_addr)[0], (mask->hdr.dst_addr)[1],
+			(mask->hdr.dst_addr)[2], (mask->hdr.dst_addr)[3],
+			(mask->hdr.dst_addr)[4], (mask->hdr.dst_addr)[5],
+			(mask->hdr.dst_addr)[6], (mask->hdr.dst_addr)[7],
+			(mask->hdr.dst_addr)[8], (mask->hdr.dst_addr)[9],
+			(mask->hdr.dst_addr)[10], (mask->hdr.dst_addr)[11],
+			(mask->hdr.dst_addr)[12], (mask->hdr.dst_addr)[13],
+			(mask->hdr.dst_addr)[14], (mask->hdr.dst_addr)[15]);
+		MKDUMPSTR(print_buf, buf_size, cur_len, " -ip proto =0x%x", mask->hdr.proto);
+		PMD_DRV_LOG(INFO, "ITEM	IPV6:\n%s", print_buf);
+		break;
+	}
+	case RTE_FLOW_ITEM_TYPE_TCP:
+	case RTE_FLOW_ITEM_TYPE_UDP:
+	case RTE_FLOW_ITEM_TYPE_ICMP:
+	case RTE_FLOW_ITEM_TYPE_SCTP:
+	case RTE_FLOW_ITEM_TYPE_ICMP6:
+	{
+		const struct rte_flow_item_tcp *spec =
+						(const struct rte_flow_item_tcp *)item->spec;
+		const struct rte_flow_item_tcp *mask =
+						(const struct rte_flow_item_tcp *)item->mask;
+
+		if (!spec && !mask) {
+			PMD_DRV_LOG(INFO, "TCP spec and mask are  NULL ");
+			return;
+		}
+		MKDUMPSTR(print_buf, buf_size, cur_len,
+			" spec: src port =0x%x dst port =0x%x\n",
+			spec->hdr.src_port, spec->hdr.dst_port);
+		MKDUMPSTR(print_buf, buf_size, cur_len,
+			"\n mask: src port =0x%x dst port =0x%x",
+			mask->hdr.src_port, mask->hdr.dst_port);
+		PMD_DRV_LOG(INFO, "ITEM	L4:\n%s", print_buf);
+		break;
+	}
+	default:
+		PMD_DRV_LOG(INFO, "unsupport type %d", item->type);
+		return;
+	}
+
+	return;
+
+}
+
+static void __entry_dump(char *print_buf, int buf_size, int *cur_len, struct fd_flow_key *key)
+{
+	print_ether_addr("\nL2\t  dst=", &key->mac_dst, print_buf, buf_size, cur_len);
+	print_ether_addr(" - src=", &key->mac_src, print_buf, buf_size, cur_len);
+	MKDUMPSTR(print_buf, buf_size, *cur_len, " -eth type=0x%04x", key->ether_type);
+	MKDUMPSTR(print_buf, buf_size, *cur_len, " -vlan_tci=0x%04x", key->vlan_tci);
+	MKDUMPSTR(print_buf, buf_size, *cur_len, " -vni=0x%08x\n", key->vni);
+
+	MKDUMPSTR(print_buf, buf_size, *cur_len,
+		"L3\t dstip=0x%08X 0x%08X 0x%08X 0x%08X ("IPV6_BYTES_FMT")\n",
+		*(uint32_t *)key->dst_ip, *((uint32_t *)key->dst_ip + 1),
+		*((uint32_t *)key->dst_ip + 2), *((uint32_t *)key->dst_ip + 3),
+		(key->dst_ip)[0], (key->dst_ip)[1],
+		(key->dst_ip)[2], (key->dst_ip)[3],
+		(key->dst_ip)[4], (key->dst_ip)[5],
+		(key->dst_ip)[6], (key->dst_ip)[7],
+		(key->dst_ip)[8], (key->dst_ip)[9],
+		(key->dst_ip)[10], (key->dst_ip)[11],
+		(key->dst_ip)[12], (key->dst_ip)[13],
+		(key->dst_ip)[14], (key->dst_ip)[15]);
+	MKDUMPSTR(print_buf, buf_size, *cur_len,
+		"\tsrcip=0x%08X 0x%08X 0x%08X 0x%08X ("IPV6_BYTES_FMT")\n",
+		*(uint32_t *)key->src_ip, *((uint32_t *)key->src_ip + 1),
+		*((uint32_t *)key->src_ip + 2), *((uint32_t *)key->src_ip + 3),
+		(key->src_ip)[0], (key->src_ip)[1],
+		(key->src_ip)[2], (key->src_ip)[3],
+		(key->src_ip)[4], (key->src_ip)[5],
+		(key->src_ip)[6], (key->src_ip)[7],
+		(key->src_ip)[8], (key->src_ip)[9],
+		(key->src_ip)[10], (key->src_ip)[11],
+		(key->src_ip)[12], (key->src_ip)[13],
+		(key->src_ip)[14], (key->src_ip)[15]);
+	MKDUMPSTR(print_buf, buf_size, *cur_len,
+				"\ttos=0x%02x -nw-proto=0x%02x\n", key->tos, key->nw_proto);
+	MKDUMPSTR(print_buf, buf_size, *cur_len,
+				"L4\t dstport=0x%04x -srcport=0x%04x", key->tp_dst, key->tp_src);
+}
+
+static void __result_dump(char *print_buf, int buf_size, int *cur_len, struct fd_flow_result *res)
+{
+
+	MKDUMPSTR(print_buf, buf_size, *cur_len, " -hit_flag = 0x%04x", res->hit_flag);
+	MKDUMPSTR(print_buf, buf_size, *cur_len, " -uplink_flag = 0x%02x", res->uplink_flag);
+	MKDUMPSTR(print_buf, buf_size, *cur_len, " -action_idx = 0x%02x", res->action_idx);
+	MKDUMPSTR(print_buf, buf_size, *cur_len, " -qid = 0x%04x", res->qid);
+	MKDUMPSTR(print_buf, buf_size, *cur_len, " -vfid = 0x%04x", res->vfid);
+	MKDUMPSTR(print_buf, buf_size, *cur_len, " -uplink_fdid = 0x%08x", res->uplink_fdid);
+	MKDUMPSTR(print_buf, buf_size, *cur_len, " -fdir_offset = 0x%02x", res->fdir_offset);
+}
+
+void dump_mem(void *base, int bytelen)
+{
+	int i;
+	int cur_len = 0;
+	int dumplen;
+	char print_buf[MAX_STRING_LEN];
+	int buf_size = MAX_STRING_LEN;
+	char *tmp = (char *)base;
+
+	dumplen = RTE_MIN(bytelen, MAX_STRING_LEN);
+	for (i = 0; i < bytelen; i++) {
+		if (i % 16 == 0)
+			MKDUMPSTR(print_buf, buf_size, cur_len, "\n");
+
+		MKDUMPSTR(print_buf, buf_size, cur_len, "0x%02x ", *(uint8_t *)tmp);
+		tmp++;
+	}
+	MKDUMPSTR(print_buf, buf_size, cur_len, "\n");
+
+	PMD_DRV_LOG(DEBUG, "  dump mem %dB\n %s", dumplen, print_buf);
+}
+
+static void offlow_key_dump(struct fd_flow_key *key, struct fd_flow_key *key_mask)
+{
+	char print_buf[MAX_STRING_LEN];
+	int buf_size = MAX_STRING_LEN;
+	int cur_len = 0;
+
+	MKDUMPSTR(print_buf, buf_size, cur_len, "ofload key:\n\t");
+
+	__entry_dump(print_buf, buf_size, &cur_len, key);
+
+	MKDUMPSTR(print_buf, buf_size, cur_len, "\nofload key_mask:\n\t");
+	__entry_dump(print_buf, buf_size, &cur_len, key_mask);
+
+	PMD_DRV_LOG(INFO, "%s\n", print_buf);
+	PMD_DRV_LOG(INFO, "\n===key ===	");
+	dump_mem(key, sizeof(struct fd_flow_key));
+	PMD_DRV_LOG(INFO, "\n===key mask === ");
+	dump_mem(key_mask, sizeof(struct fd_flow_key));
+}
+
+static void offlow_result_dump(struct fd_flow_result *res)
+{
+	char print_buf[MAX_STRING_LEN];
+	int buf_size = MAX_STRING_LEN;
+	int cur_len = 0;
+
+	MKDUMPSTR(print_buf, buf_size, cur_len, "ofload result:\n");
+	__result_dump(print_buf, buf_size, &cur_len, res);
+	PMD_DRV_LOG(INFO, "%s\n ", print_buf);
+	PMD_DRV_LOG(INFO, "memdump : ===result ===\n ");
+	dump_mem(res, sizeof(struct fd_flow_result));
+}
+
+static int
+zxdh_flow_parse_pattern(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_item *items,
+			 struct rte_flow_error *error, struct zxdh_flow *dh_flow)
+{
+	struct zxdh_rte_flow *flow = &dh_flow->flowentry;
+	const struct rte_flow_item *item;
+	enum rte_flow_item_type next_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec = NULL, *ipv6_mask = NULL;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+	struct fd_flow_key *key, *key_mask;
+
+	key = &(flow->fd_flow.key);
+	key_mask = &(flow->fd_flow.key_mask);
+	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+		item = items;
+		if (items->last) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					items,
+					"Not support range");
+			return -rte_errno;
+		}
+
+		PMD_DRV_LOG(INFO, "ITEM	type %d", item->type);
+		flow_item_dump(item);
+
+		switch (item->type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			next_type = (item + 1)->type;
+			if (next_type == RTE_FLOW_ITEM_TYPE_END &&
+							(!eth_spec || !eth_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"NULL eth spec/mask.");
+				return -rte_errno;
+			}
+			if (eth_spec && eth_mask) {
+
+				key->mac_dst = eth_spec->dst;
+				key->mac_src  = eth_spec->src;
+				key_mask->mac_dst  = eth_mask->dst;
+				key_mask->mac_src  = eth_mask->src;
+
+				if (eth_mask->type == 0xffff) {
+					key->ether_type = rte_cpu_to_le_16(eth_spec->type);
+					key_mask->ether_type = rte_cpu_to_le_16(eth_mask->type);
+				}
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VLAN:
+			vlan_spec = item->spec;
+			vlan_mask = item->mask;
+
+			if (vlan_spec && vlan_mask) {
+				if (vlan_mask->tci !=
+					rte_cpu_to_be_16(ZXDH_VLAN_VID_MASK)) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Unsupported TCI mask.just vlanid supported ");
+				}
+				key->vlan_tci = rte_cpu_to_le_16(vlan_spec->tci);
+				key_mask->vlan_tci = rte_cpu_to_le_16(vlan_mask->tci);
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (ipv4_spec && ipv4_mask) {
+				/* Check IPv4 mask and update input set */
+				if (ipv4_mask->hdr.version_ihl ||
+					ipv4_mask->hdr.total_length ||
+					ipv4_mask->hdr.packet_id ||
+					ipv4_mask->hdr.fragment_offset ||
+					ipv4_mask->hdr.hdr_checksum ||
+					ipv4_mask->hdr.time_to_live) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv4 mask.");
+					return -rte_errno;
+				}
+					/* Get the filter info */
+				key->nw_proto =
+						ipv4_spec->hdr.next_proto_id;
+				key->tos =
+						ipv4_spec->hdr.type_of_service;
+				key_mask->nw_proto =
+						ipv4_mask->hdr.next_proto_id;
+				key_mask->tos =
+						ipv4_mask->hdr.type_of_service;
+				rte_memcpy((uint32_t *)key->src_ip + 3,
+					   &ipv4_spec->hdr.src_addr, 4);
+				rte_memcpy((uint32_t *)key->dst_ip + 3,
+					   &ipv4_spec->hdr.dst_addr, 4);
+				rte_memcpy((uint32_t *)key_mask->src_ip + 3,
+					   &ipv4_mask->hdr.src_addr, 4);
+				rte_memcpy((uint32_t *)key_mask->dst_ip + 3,
+					   &ipv4_mask->hdr.dst_addr, 4);
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			if (ipv6_spec && ipv6_mask) {
+				/* Check IPv6 mask and update input set */
+				if ((ipv6_mask->hdr.payload_len) ||
+					 (ipv6_mask->hdr.hop_limits == UINT8_MAX)) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv6 mask");
+					return -rte_errno;
+				}
+				key->tc = (uint8_t)(rte_cpu_to_be_16(ipv6_spec->hdr.vtc_flow)
+								>> RTE_IPV6_HDR_TC_SHIFT);
+				key_mask->tc = (uint8_t)(rte_cpu_to_be_16(ipv6_mask->hdr.vtc_flow)
+								>> RTE_IPV6_HDR_TC_SHIFT);
+
+				key->nw_proto = ipv6_spec->hdr.proto;
+				key_mask->nw_proto = ipv6_mask->hdr.proto;
+
+				rte_memcpy(key->src_ip,
+					   &ipv6_spec->hdr.src_addr, 16);
+				rte_memcpy(key->dst_ip,
+					   &ipv6_spec->hdr.dst_addr, 16);
+				rte_memcpy(key_mask->src_ip,
+					   &ipv6_mask->hdr.src_addr, 16);
+				rte_memcpy(key_mask->dst_ip,
+					   &ipv6_mask->hdr.dst_addr, 16);
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+			if (tcp_spec && tcp_mask) {
+				/* Check TCP mask and update input set */
+				if (tcp_mask->hdr.sent_seq ||
+					tcp_mask->hdr.recv_ack ||
+					tcp_mask->hdr.data_off ||
+					tcp_mask->hdr.tcp_flags ||
+					tcp_mask->hdr.rx_win ||
+					tcp_mask->hdr.cksum ||
+					tcp_mask->hdr.tcp_urp ||
+					(tcp_mask->hdr.src_port &&
+					 (tcp_mask->hdr.src_port != UINT16_MAX)) ||
+					(tcp_mask->hdr.dst_port &&
+					 (tcp_mask->hdr.dst_port != UINT16_MAX))) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+					return -rte_errno;
+				}
+
+				key->tp_src = rte_cpu_to_le_16(tcp_spec->hdr.src_port);
+				key_mask->tp_src = rte_cpu_to_le_16(tcp_mask->hdr.src_port);
+
+				key->tp_dst = rte_cpu_to_le_16(tcp_spec->hdr.dst_port);
+				key_mask->tp_dst = rte_cpu_to_le_16(tcp_mask->hdr.dst_port);
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (udp_spec && udp_mask) {
+				/* Check UDP mask and update input set*/
+				if (udp_mask->hdr.dgram_len ||
+					udp_mask->hdr.dgram_cksum ||
+					(udp_mask->hdr.src_port &&
+					 (udp_mask->hdr.src_port != UINT16_MAX)) ||
+					(udp_mask->hdr.dst_port &&
+					 (udp_mask->hdr.dst_port != UINT16_MAX))) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+					return -rte_errno;
+				}
+				key->tp_src = rte_cpu_to_le_16(udp_spec->hdr.src_port);
+				key_mask->tp_src = rte_cpu_to_le_16(udp_mask->hdr.src_port);
+				key->tp_dst = rte_cpu_to_le_16(udp_spec->hdr.dst_port);
+				key_mask->tp_dst = rte_cpu_to_le_16(udp_mask->hdr.dst_port);
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+		{
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			static const struct rte_flow_item_vxlan   flow_item_vxlan_mask = {
+				.vni = "\xff\xff\xff",
+			};
+			if (memcmp(vxlan_mask, &flow_item_vxlan_mask,
+				sizeof(struct rte_flow_item_vxlan))) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid vxlan mask");
+					return -rte_errno;
+			}
+			rte_memcpy(&key->vni, &vxlan_spec->vni, 3);
+			rte_memcpy(&key_mask->vni, &vxlan_mask->vni, 3);
+			break;
+		}
+		default:
+				return rte_flow_error_set(error, ENOTSUP,
+							  RTE_FLOW_ERROR_TYPE_ITEM,
+							  NULL, "item not supported");
+		}
+	}
+	offlow_key_dump(key, key_mask);
+	PMD_DRV_LOG(DEBUG, "parse pattern ok");
+	return 0;
+}
+
+static int
+zxdh_flow_parse_action(struct rte_eth_dev *dev, const struct rte_flow_action *actions,
+			 struct rte_flow_error *error, struct zxdh_flow *dh_flow)
+{
+	struct zxdh_rte_flow *flow = &dh_flow->flowentry;
+	uint32_t dest_num = 0;
+	struct fd_flow_result *result = &(flow->fd_flow.result);
+	int ret;
+
+	memset(result, 0, sizeof(*result));
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+		{
+			dest_num++;
+			const struct rte_flow_action_queue *act_q;
+
+			act_q = actions->conf;
+			if (act_q->index >= dev->data->nb_rx_queues) {
+				rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ACTION, actions,
+						"Invalid queue ID .");
+				return -rte_errno;
+			}
+			result->action_idx = NP_ACTION_FWD;
+			ret = logic_qid_to_vqm_phyqid(dev, act_q->index);
+			if (ret < 0) {
+				rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ACTION, actions,
+						"Invalid phy queue ID .");
+				return -rte_errno;
+			}
+			PMD_DRV_LOG(DEBUG, " QID RET 0x%x beorder 0x%x",
+				ret, rte_cpu_to_be_16((uint16_t)ret));
+			result->qid = rte_cpu_to_le_16(ret);
+			result->action_idx = 1;
+			PMD_DRV_LOG(DEBUG, " QID RET 0x%x", result->qid);
+			break;
+		}
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			dest_num++;
+			result->action_idx = 2;
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				"Invalid action.");
+			return -rte_errno;
+		}
+		PMD_DRV_LOG(DEBUG, " action parse ok");
+	}
+		if (dest_num > 2) {
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Unsupported action combination");
+			return -rte_errno;
+		}
+	PMD_DRV_LOG(DEBUG, " action parse ok");
+	result->hit_flag = 1;
+	offlow_result_dump(result);
+
+	return 0;
+}
+static void  zxdh_flow_free(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *priv =   dev->data->dev_private;
+	struct zxdh_flow *dh_flow = priv->cur_flow;
+
+	if (dh_flow)
+		rte_mempool_put(zxdh_shared_data->flow_mp, dh_flow);
+}
+
+static int
+zxdh_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item  *pattern,
+		   const struct rte_flow_action *actions,
+		   struct rte_flow_error *error)
+{
+	int ret;
+	struct zxdh_hw *priv = dev->data->dev_private;
+	struct zxdh_flow *dh_flow = priv->cur_flow;
+
+	if (!dh_flow) {
+		ret = rte_mempool_get(zxdh_shared_data->flow_mp, (void **)&dh_flow);
+		if (ret) {
+			rte_flow_error_set(error, ENOMEM,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+						"Failed to allocate memory");
+			return -rte_errno;
+		}
+		priv->cur_flow = dh_flow;
+	}
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = zxdh_flow_parse_attr(dev, attr, error, dh_flow);
+	if (ret < 0)
+		goto err;
+
+	PMD_DRV_LOG(INFO, "--------zxdh_flow_parse_attr	ok ");
+	ret = zxdh_flow_parse_pattern(dev, pattern, error, dh_flow);
+	if (ret < 0)
+		goto err;
+
+	PMD_DRV_LOG(INFO, "--------zxdh_flow_parse_pattern  ok ");
+	ret = zxdh_flow_parse_action(dev, actions, error, dh_flow);
+	if (ret < 0)
+		goto err;
+
+	PMD_DRV_LOG(INFO, "--------zxdh_flow_parse_action ok");
+err:
+	if (ret)
+		zxdh_flow_free(dev);
+
+	return ret;
+}
+
+static int zxdh_hw_flow_insert(struct zxdh_flow *dh_flow)
+{
+	uint32_t ret;
+	struct zxdh_rte_flow *flow = &dh_flow->flowentry;
+
+	ret = dpp_apt_acl_entry_insert(0,  ZXDH_SDT_FD_CFG_TABLE_TMP, flow);
+	if (ret != 0)
+		return -1;
+
+	PMD_DRV_LOG(INFO, "--------%s return idx %d ", __func__, flow->hw_idx);
+	return 0;
+}
+
+static int zxdh_hw_flow_del(struct zxdh_flow *dh_flow)
+{
+	uint32_t ret;
+	struct zxdh_rte_flow *flow = &dh_flow->flowentry;
+
+	offlow_key_dump(&flow->fd_flow.key, &flow->fd_flow.key_mask);
+	offlow_result_dump(&flow->fd_flow.result);
+
+	ret = dpp_apt_acl_entry_delete(0,  ZXDH_SDT_FD_CFG_TABLE_TMP, flow);
+	if (ret != 0)
+		return -1;
+
+	return 0;
+}
+static int zxdh_hw_flow_query(struct zxdh_flow *dh_flow)
+{
+	uint32_t ret;
+	struct zxdh_rte_flow *flow = &dh_flow->flowentry;
+
+	ret = dpp_apt_acl_entry_get(0, ZXDH_SDT_FD_CFG_TABLE_TMP, flow);
+	if (ret != 0)
+		return -1;
+	return 0;
+}
+
+static int zxdh_hw_flow_flush(void)
+{
+	uint32_t ret = 0;
+
+	if (ret != 0)
+		return -1;
+
+	return 0;
+}
+
+static struct rte_flow *
+zxdh_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct zxdh_hw *priv = dev->data->dev_private;
+
+	struct zxdh_flow *dh_flow = NULL;
+	int ret;
+
+	ret = rte_mempool_get(zxdh_shared_data->flow_mp, (void **)&dh_flow);
+	if (ret) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					"Failed to allocate memory");
+		return NULL;
+	}
+
+	priv->cur_flow = dh_flow;
+	ret = zxdh_flow_validate(dev, attr, pattern, actions, error);
+	if (ret < 0)
+		goto free_flow;
+
+	dh_flow = priv->cur_flow;
+	if (dh_flow == NULL) {
+		PMD_DRV_LOG(INFO, "--------zxdh_flow_validate  failed");
+		goto free_flow;
+	}
+
+	ret = zxdh_hw_flow_insert(dh_flow);
+	if (ret < 0) {
+
+		PMD_DRV_LOG(INFO, "--------zxdh_created failed");
+		rte_flow_error_set(error, ENOTSUP,
+					RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					"Failed to insert to hw");
+		goto free_flow;
+	}
+	TAILQ_INSERT_TAIL(&priv->flow_list,
+						 &dh_flow->flowentry, node);
+	priv->cur_flow = NULL;
+	PMD_DRV_LOG(INFO, "--------zxdh_created ok  entry %p", &dh_flow->flowentry);
+
+	return (struct rte_flow *)&dh_flow->flowentry;
+
+free_flow:
+	if (ret)
+		zxdh_flow_free(dev);
+
+	priv->cur_flow = NULL;
+	rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to create flow.");
+	return NULL;
+}
+
+struct zxdh_flow *get_dhflow(struct rte_eth_dev *dev, struct zxdh_rte_flow *flow)
+{
+	struct zxdh_hw *priv = dev->data->dev_private;
+	struct zxdh_rte_flow *entry;
+
+	TAILQ_FOREACH(entry, &priv->flow_list, node) {
+
+		PMD_DRV_LOG(INFO, "entry %p ", entry);
+		if (!memcmp(&(entry->fd_flow.key), &(flow->fd_flow.key),
+				  sizeof(struct fd_flow_key))) {
+			return  RTE_PTR_SUB(entry, 4);
+		}
+	}
+	return NULL;
+}
+
+static int
+zxdh_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct zxdh_hw *priv =	dev->data->dev_private;
+	struct zxdh_flow *dh_flow;
+	int ret;
+
+	dh_flow  = get_dhflow(dev, (struct zxdh_rte_flow *)flow);
+	ret = zxdh_hw_flow_del(dh_flow);
+	if (!ret) {
+		TAILQ_REMOVE(&priv->flow_list, &dh_flow->flowentry, node);
+	} else
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to destroy flow.");
+	return ret;
+}
+
+
+static int
+zxdh_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+	struct zxdh_hw *priv =	dev->data->dev_private;
+	struct zxdh_rte_flow *entry, *temp;
+	int ret;
+
+	ret = zxdh_hw_flow_flush();
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to flush FDIR flows.");
+		return -rte_errno;
+	}
+		/* Delete FDIR flows in flow list. */
+	RTE_TAILQ_FOREACH_SAFE(entry, &priv->flow_list, node, temp) {
+				TAILQ_REMOVE(&priv->flow_list, entry, node);
+	}
+	return ret;
+}
+
+static int
+zxdh_flow_query(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		const struct rte_flow_action *actions,
+		void *data, struct rte_flow_error *error)
+{
+	struct zxdh_flow *dh_flow;
+	int ret;
+	struct zxdh_rte_flow *zxdh_rte_flow = (struct zxdh_rte_flow *)flow;
+
+	offlow_key_dump(&zxdh_rte_flow->fd_flow.key, &zxdh_rte_flow->fd_flow.key_mask);
+
+	offlow_result_dump(&zxdh_rte_flow->fd_flow.result);
+
+	dh_flow  = get_dhflow(dev, zxdh_rte_flow);
+	ret = zxdh_hw_flow_query(dh_flow);
+	if (ret) {
+		return	rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid rule");
+	}
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			*(int *)data = 10;
+			break;
+		default:
+			return rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION,
+				actions,
+				"action not supported");
+		}
+	}
+	return 0;
+}
+
diff --git a/drivers/net/zxdh/zxdh_flow.h b/drivers/net/zxdh/zxdh_flow.h
new file mode 100644
index 0000000000..d3418ca9f5
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_flow.h
@@ -0,0 +1,129 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2023 ZTE Corporation, Ltd
+ */
+
+#ifndef ZXDH_FLOW_H_
+#define ZXDH_FLOW_H_
+
+/**
+ * @file
+ * RTE generic flow API
+ *
+ * This interface provides the ability to program packet matching and
+ * associated actions in hardware through flow rules.
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <rte_arp.h>
+#include <rte_common.h>
+#include <rte_ether.h>
+#include <rte_icmp.h>
+#include <rte_ip.h>
+#include <rte_sctp.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_byteorder.h>
+#include <rte_flow_driver.h>
+
+#include <sys/queue.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_GROUP  1
+#define MAX_FLOW_NUM 2048
+
+#define  NP_ACTION_FWD   1
+#define  NP_ACTION_DROP 2
+
+
+#define REPORT_ID  0
+#define	REPORT_ID_FLEX_4 1  /* Report FD ID and 4 flex bytes. */
+#define	REPORT_FLEX_8 2    /* Report 8 flex bytes. */
+
+struct fd_flow_key {
+	struct rte_ether_addr mac_src; /**< Destination MAC. */
+	struct rte_ether_addr mac_dst; /**< Source MAC. */
+	rte_le16_t ether_type; /**< EtherType  */
+	rte_le16_t vlan_tci; /**< vlanid 0xfff  is  valid */
+	uint8_t  src_ip[16];  /** ip src  */
+	uint8_t  dst_ip[16];  /** ip dst  */
+	uint16_t rsv;
+	union {
+		uint8_t  tos;
+		uint8_t  tc;
+	};
+	uint8_t  nw_proto;
+	rte_le16_t  tp_src;
+	rte_le16_t  tp_dst;
+	uint32_t spi;
+	uint32_t vni;
+};
+
+
+struct fd_flow_result {
+	uint8_t rsv:7;
+	uint8_t hit_flag:1;
+	uint8_t rsv0;
+	uint8_t uplink_flag; /*0:fdid;1:4B fdir;2:8B fdif*/
+	uint8_t action_idx; /*1:fwd 2:drop*/
+	rte_le16_t qid;
+	rte_le16_t vfid;
+	rte_le32_t uplink_fdid;
+	uint8_t rsv1[3];
+	uint8_t fdir_offset;/*����l2 offset*/
+
+};
+struct fd_flow_entry {
+	struct fd_flow_key key;
+	struct fd_flow_key key_mask;
+	struct fd_flow_result result;
+};
+
+enum flow_type {
+	 FLOW_TYPE_FLOW = 0,
+	 FLOW_TYPE_FD,
+	 FLOW_TYPE_ACL,
+};
+struct zxdh_rte_flow {
+	TAILQ_ENTRY(zxdh_rte_flow) node;
+	enum flow_type flowtype;
+	uint16_t hw_idx;
+	struct fd_flow_entry fd_flow;
+};
+
+/**
+ * struct FLOW_ENTRY {
+ * TAILQ_ENTRY(FLOW_ENTRY) node;
+ * struct  zxdh_rte_flow  flow_rule;
+ * };
+ **/
+
+
+struct zxdh_flow {
+	uint8_t direct; /* 0 in 1 out */
+	uint8_t group;  /* rule group id */
+	uint8_t pri; /* priority */
+	uint8_t rsv; /*  */
+	struct zxdh_rte_flow  flowentry;
+};
+
+
+TAILQ_HEAD(FLOW_LIST, zxdh_rte_flow);
+
+
+void dump_mem(void *base, int bytelen);
+
+
+extern const struct rte_flow_ops zxdh_flow_ops;
+
+#endif /*ZXDH_FLOW_H_*/
+
+
+
+
+
+
diff --git a/drivers/net/zxdh/zxdh_logs.h b/drivers/net/zxdh/zxdh_logs.h
new file mode 100644
index 0000000000..eca4c4a798
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_logs.h
@@ -0,0 +1,72 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#ifndef _ZXDH_LOGS_H_
+#define _ZXDH_LOGS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_log.h>
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+extern int32_t zxdh_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, zxdh_logtype_init, \
+	"offload_zxdh %s(): " fmt "\n", __func__, ##args)
+
+extern int32_t zxdh_logtype_driver;
+#define PMD_DRV_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, zxdh_logtype_driver, \
+	"offload_zxdh %s(): " fmt "\n", __func__, ## args)
+
+extern int zxdh_logtype_rx;
+#define PMD_RX_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, zxdh_logtype_rx, \
+	"offload_zxdh %s(): " fmt "\n", __func__, ## args)
+
+extern int zxdh_logtype_tx;
+#define PMD_TX_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, zxdh_logtype_tx, \
+	"offload_zxdh %s(): " fmt "\n", __func__, ## args)
+
+extern int32_t zxdh_logtype_msg;
+#define PMD_MSG_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, zxdh_logtype_msg, \
+	"offload_zxdh %s(): " fmt "\n", __func__, ## args)
+
+#ifndef IPV4_BYTES
+#define IPV4_BYTES_FMT "%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8
+#endif
+
+#ifndef IPV6_BYTES
+#define IPV6_BYTES_FMT \
+	"%02x%02x:%02x%02x:%02x%02x:%02x%02x:" \
+	"%02x%02x:%02x%02x:%02x%02x:%02x%02x"
+#endif
+
+#define IPV6_ADDR_LEN 16
+
+#ifndef ETHMAC_BYTES
+#define ETHMAC_BYTES_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#endif
+
+#ifndef MKDUMPSTR
+#define MAX_STRING_LEN 8192
+#define MKDUMPSTR(buf, buf_size, cur_len, ...) \
+	do { \
+		typeof(cur_len) len = (cur_len);\
+		if ((len) >= (buf_size)) { \
+			break; } \
+		cur_len += snprintf((buf) + (len), (buf_size) - (len), __VA_ARGS__); \
+	} while (0)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ZXDH_LOGS_H_ */
diff --git a/drivers/net/zxdh/zxdh_msg_chan.c b/drivers/net/zxdh/zxdh_msg_chan.c
new file mode 100644
index 0000000000..1fbb772499
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_msg_chan.c
@@ -0,0 +1,1270 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#include <stdbool.h>
+#include "msg_chan_pub.h"
+#include "rte_common.h"
+#include <rte_memcpy.h>
+#include <rte_branch_prediction.h>
+#include "zxdh_logs.h"
+
+#define REPS_INFO_FLAG_USABLE  0x00
+#define REPS_INFO_FLAG_USED    0xa0
+
+#define BDF_ECAM(bus, devid, func)   (((bus & 0xff) << 8) | (func & 0x07) | ((devid & 0x1f) << 3))
+
+/**************************************************************************
+ * common.ko will work in 5 scenarios
+ * 1: SCENE_HOST_IN_DPU  : host in DPU card
+ * 2: SCENE_ZF_IN_DPU    : zf   in DPU card
+ * 3: SCENE_NIC_WITH_DDR : inic with DDR
+ * 4: SCENE_NIC_NO_DDR   : inic without DDR
+ * 5: SCENE_STD_NIC      : std card
+ **************************************************************************/
+#ifdef SCENE_HOST_IN_DPU
+#define BAR_PF_NUM             31
+#define BAR_VF_NUM             1024
+#define BAR_INDEX_PF_TO_VF     1
+#define BAR_INDEX_MPF_TO_MPF   1
+#define BAR_INDEX_MPF_TO_PFVF  0xff
+#define BAR_INDEX_PFVF_TO_MPF  0xff
+#endif
+
+#ifdef SCENE_ZF_IN_DPU
+#define BAR_PF_NUM             7
+#define BAR_VF_NUM             128
+#define BAR_INDEX_PF_TO_VF     0xff
+#define BAR_INDEX_MPF_TO_MPF   1
+#define BAR_INDEX_MPF_TO_PFVF  0xff
+#define BAR_INDEX_PFVF_TO_MPF  0xff
+#endif
+
+#ifdef SCENE_NIC_WITH_DDR
+#define BAR_PF_NUM             31
+#define BAR_VF_NUM             1024
+#define BAR_INDEX_PF_TO_VF     1
+#define BAR_INDEX_MPF_TO_MPF   0xff
+#define BAR_INDEX_MPF_TO_PFVF  0xff
+#define BAR_INDEX_PFVF_TO_MPF  0xff
+#endif
+
+#ifdef SCENE_NIC_NO_DDR
+#define BAR_PF_NUM             31
+#define BAR_VF_NUM             1024
+#define BAR_INDEX_PF_TO_VF     1
+#define BAR_INDEX_MPF_TO_MPF   0xff
+#define BAR_INDEX_MPF_TO_PFVF  1
+#define BAR_INDEX_PFVF_TO_MPF  2
+#endif
+
+#ifdef SCENE_STD_NIC
+#define BAR_PF_NUM             7
+#define BAR_VF_NUM             256
+#define BAR_INDEX_PF_TO_VF     1
+#define BAR_INDEX_MPF_TO_MPF   0xff
+#define BAR_INDEX_MPF_TO_PFVF  1
+#define BAR_INDEX_PFVF_TO_MPF  2
+#endif
+
+#define SCENE_TEST
+#ifdef SCENE_TEST
+#define BAR_PF_NUM             7
+#define BAR_VF_NUM             256
+#define BAR_INDEX_PF_TO_VF     0
+#define BAR_INDEX_MPF_TO_MPF   0xff
+#define BAR_INDEX_MPF_TO_PFVF  0
+#define BAR_INDEX_PFVF_TO_MPF  0
+#endif
+
+/**
+ * 0: left 2K,    1: right 2K
+ * src/dst: TO_RISC, TO_PFVF, TO_MPF
+ * MPF:       0         0       0
+ * PF:        0         0       1
+ * VF:        0         1       1
+ **/
+#define BAR_MSG_SRC_NUM   3
+#define BAR_MSG_SRC_MPF   0
+#define BAR_MSG_SRC_PF    1
+#define BAR_MSG_SRC_VF    2
+#define BAR_MSG_SRC_ERR   0xff
+
+#define BAR_MSG_DST_NUM   3
+#define BAR_MSG_DST_RISC  0
+#define BAR_MSG_DST_MPF   2
+#define BAR_MSG_DST_PFVF  1
+#define BAR_MSG_DST_ERR   0xff
+
+#define BAR_SUBCHAN_INDEX_SEND  0
+#define BAR_SUBCHAN_INDEX_RECV  1
+uint8_t subchan_id_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] = {
+	{BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_SEND},
+	{BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_RECV},
+	{BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_RECV, BAR_SUBCHAN_INDEX_RECV}
+};
+
+#define BAR_INDEX_TO_RISC  0
+uint8_t chan_id_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] = {
+	{BAR_INDEX_TO_RISC, BAR_INDEX_MPF_TO_PFVF, BAR_INDEX_MPF_TO_MPF},
+	{BAR_INDEX_TO_RISC, BAR_INDEX_PF_TO_VF,    BAR_INDEX_PFVF_TO_MPF},
+	{BAR_INDEX_TO_RISC, BAR_INDEX_PF_TO_VF,    BAR_INDEX_PFVF_TO_MPF}
+};
+
+static uint8_t __bar_msg_src_index_trans(uint8_t src)
+{
+	uint8_t src_index = 0;
+
+	switch (src) {
+	case MSG_CHAN_END_MPF:
+		src_index = BAR_MSG_SRC_MPF;
+		break;
+	case MSG_CHAN_END_PF:
+		src_index = BAR_MSG_SRC_PF;
+		break;
+	case MSG_CHAN_END_VF:
+		src_index = BAR_MSG_SRC_VF;
+		break;
+	default:
+		src_index = BAR_MSG_SRC_ERR;
+		break;
+	}
+	return src_index;
+}
+/**
+ * Fun:
+ */
+static uint8_t __bar_msg_dst_index_trans(uint8_t dst)
+{
+	uint8_t dst_index = 0;
+
+	switch (dst) {
+	case MSG_CHAN_END_MPF:
+		dst_index = BAR_MSG_DST_MPF;
+		break;
+	case MSG_CHAN_END_PF:
+		dst_index = BAR_MSG_DST_PFVF;
+		break;
+	case MSG_CHAN_END_VF:
+		dst_index = BAR_MSG_DST_PFVF;
+		break;
+	case MSG_CHAN_END_RISC:
+		dst_index = BAR_MSG_DST_RISC;
+		break;
+	default:
+		dst_index = BAR_MSG_SRC_ERR;
+		break;
+	}
+	return dst_index;
+}
+
+struct seqid_item {
+	void *reps_addr;
+	uint16_t id;
+	uint16_t buffer_len;
+	uint16_t flag;
+}; /* 16B */
+#define BAR_SEQID_NUM_MAX  256
+struct seqid_ring {
+	uint16_t cur_id;
+	pthread_spinlock_t lock;
+	struct seqid_item reps_info_tbl[BAR_SEQID_NUM_MAX];
+};
+struct seqid_ring g_seqid_ring = {0};
+/**
+ * Fun:
+ */
+static int __bar_chan_msgid_allocate(uint16_t *msgid)
+{
+	struct seqid_item *seqid_reps_info = NULL;
+
+	pthread_spin_lock(&g_seqid_ring.lock);
+	uint16_t g_id = g_seqid_ring.cur_id;
+	uint16_t count = 0;
+
+	do {
+		count++;
+		++g_id;
+		g_id %= BAR_SEQID_NUM_MAX;
+		seqid_reps_info = &g_seqid_ring.reps_info_tbl[g_id];
+	} while ((seqid_reps_info->flag != REPS_INFO_FLAG_USABLE) && (count < BAR_SEQID_NUM_MAX));
+	int rc;
+
+	if (count >= BAR_SEQID_NUM_MAX) { /* cant get usable */
+		rc = -1;
+		goto out;
+	}
+	seqid_reps_info->flag = REPS_INFO_FLAG_USED;
+	g_seqid_ring.cur_id = g_id;
+	*msgid = g_id;
+	rc = BAR_MSG_OK;
+
+out:
+	pthread_spin_unlock(&g_seqid_ring.lock);
+	return rc;
+}
+static uint16_t __bar_chan_save_recv_info(struct zxdh_msg_recviver_mem *result, uint16_t *msg_id)
+{
+	int ret = __bar_chan_msgid_allocate(msg_id);
+
+	if (ret != BAR_MSG_OK)
+		return BAR_MSG_ERR_MSGID;
+
+	PMD_MSG_LOG(DEBUG, "allocate msg_id: %u", *msg_id);
+	struct seqid_item *reps_info = &g_seqid_ring.reps_info_tbl[*msg_id];
+
+	reps_info->reps_addr = result->recv_buffer;
+	reps_info->buffer_len = result->buffer_len;
+	return BAR_MSG_OK;
+}
+/**
+ * Fun:
+ */
+static void __bar_chan_msgid_free(uint16_t msg_id)
+{
+	struct seqid_item *seqid_reps_info = &g_seqid_ring.reps_info_tbl[msg_id];
+
+	pthread_spin_lock(&g_seqid_ring.lock);
+	seqid_reps_info->flag = REPS_INFO_FLAG_USABLE;
+	PMD_MSG_LOG(DEBUG, "free msg_id: %u", msg_id);
+	pthread_spin_unlock(&g_seqid_ring.lock);
+}
+/**************************************************************************/
+static uint64_t subchan_addr_cal(uint64_t virt_addr, uint8_t chan_id, uint8_t subchan_id)
+{
+	return virt_addr + (2 * chan_id + subchan_id) * BAR_MSG_ADDR_CHAN_INTERVAL;
+}
+/**
+ * Fun:
+ */
+static uint16_t __bar_chan_subchan_addr_get(struct zxdh_pci_bar_msg *in, uint64_t *subchan_addr)
+{
+	uint8_t src_index = __bar_msg_src_index_trans(in->src);
+	uint8_t dst_index = __bar_msg_dst_index_trans(in->dst);
+	uint16_t chan_id = chan_id_tbl[src_index][dst_index];
+	uint16_t subchan_id = subchan_id_tbl[src_index][dst_index];
+
+	*subchan_addr = subchan_addr_cal(in->virt_addr, chan_id, subchan_id);
+	return BAR_MSG_OK;
+}
+
+#define BAR_ALIGN_WORD_MASK  0xfffffffc
+static int __bar_chan_reg_write(uint64_t subchan_addr, uint32_t offset, uint32_t data)
+{
+	uint32_t algin_offset = (offset & BAR_ALIGN_WORD_MASK);
+
+	if (unlikely(algin_offset >= BAR_MSG_ADDR_CHAN_INTERVAL)) {
+		PMD_MSG_LOG(ERR, "write addr: 0x%lx + 0x%x", subchan_addr, algin_offset);
+		return -1;
+	}
+	*(uint32_t *)(subchan_addr + algin_offset) = data;
+	return 0;
+}
+static int __bar_chan_reg_read(uint64_t subchan_addr, uint32_t offset, uint32_t *pdata)
+{
+	uint32_t algin_offset = (offset & BAR_ALIGN_WORD_MASK);
+
+	if (unlikely(algin_offset >= BAR_MSG_ADDR_CHAN_INTERVAL)) {
+		PMD_MSG_LOG(ERR, "read addr: 0x%lx + 0x%x", subchan_addr, algin_offset);
+		return -1;
+	}
+	*pdata = *(uint32_t *)(subchan_addr + algin_offset);
+	return 0;
+}
+
+static uint16_t __bar_chan_msg_header_set(uint64_t subchan_addr, struct bar_msg_header *msg_header)
+{
+	uint32_t *data = (uint32_t *)msg_header;
+	uint16_t idx;
+
+	for (idx = 0; idx < (BAR_MSG_PLAYLOAD_OFFSET >> 2); idx++)
+		__bar_chan_reg_write(subchan_addr, idx * 4, *(data + idx));
+
+	return BAR_MSG_OK;
+}
+
+static uint16_t __bar_chan_msg_header_get(uint64_t subchan_addr, struct bar_msg_header *msg_header)
+{
+	uint32_t *data = (uint32_t *)msg_header;
+	uint16_t idx;
+
+	for (idx = 0; idx < (BAR_MSG_PLAYLOAD_OFFSET >> 2); idx++)
+		__bar_chan_reg_read(subchan_addr, idx * 4, data + idx);
+
+	return BAR_MSG_OK;
+}
+
+static uint16_t __bar_chan_msg_payload_set(uint64_t subchan_addr, uint8_t *msg, uint16_t len)
+{
+	uint32_t *data = (uint32_t *)msg;
+	uint32_t count = (len >> 2); /* 4B unit */
+	uint32_t ix;
+
+	for (ix = 0; ix < count; ix++)
+		__bar_chan_reg_write(subchan_addr, 4 * ix + BAR_MSG_PLAYLOAD_OFFSET, *(data + ix));
+
+	/* not 4B align part */
+	uint32_t remain = (len & 0x3);
+
+	if (remain) {
+		uint32_t remain_data = 0;
+
+		for (ix = 0; ix < remain; ix++)
+			remain_data |= *((uint8_t *)(msg + len - remain + ix)) << (8 * ix);
+
+		__bar_chan_reg_write(subchan_addr, 4 * count +
+				BAR_MSG_PLAYLOAD_OFFSET, remain_data);
+	}
+	return BAR_MSG_OK;
+}
+
+static uint16_t __bar_chan_msg_payload_get(uint64_t subchan_addr, uint8_t *msg, uint16_t len)
+{
+	uint32_t *data = (uint32_t *)msg;
+	uint32_t count = (len >> 2); /* 4B unit */
+	uint32_t ix;
+
+	for (ix = 0; ix < count; ix++)
+		__bar_chan_reg_read(subchan_addr, 4 * ix + BAR_MSG_PLAYLOAD_OFFSET, (data + ix));
+
+	/* not 4B align part */
+	uint32_t remain = (len & 0x3);
+
+	if (remain) {
+		uint32_t remain_data = 0;
+
+		__bar_chan_reg_read(subchan_addr, 4 * count +
+				BAR_MSG_PLAYLOAD_OFFSET, &remain_data);
+		for (ix = 0; ix < remain; ix++)
+			*((uint8_t *)(msg + (len - remain + ix))) = remain_data >> (8 * ix);
+
+	}
+	return BAR_MSG_OK;
+}
+
+#define BAR_MSG_VALID_MASK    1
+#define BAR_MSG_VALID_OFFSET  0
+static uint16_t __bar_chan_msg_valid_set(uint64_t subchan_addr, uint8_t valid_label)
+{
+	uint32_t data;
+
+	__bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data);
+	data &= (~BAR_MSG_VALID_MASK);
+	data |= (uint32_t)valid_label;
+	__bar_chan_reg_write(subchan_addr, BAR_MSG_VALID_OFFSET, data);
+	return BAR_MSG_OK;
+}
+
+#define BAR_MSG_CHAN_USABLE  0
+#define BAR_MSG_CHAN_USED    1
+static uint16_t __bar_msg_valid_stat_get(uint64_t subchan_addr)
+{
+	uint32_t data;
+
+	__bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data);
+	if (BAR_MSG_CHAN_USABLE == (data & BAR_MSG_VALID_MASK))
+		return BAR_MSG_CHAN_USABLE;
+
+	return BAR_MSG_CHAN_USED;
+}
+
+#define READ_CHECK  1
+#if READ_CHECK
+static uint8_t temp_msg[BAR_MSG_ADDR_CHAN_INTERVAL];
+#endif
+static uint16_t __bar_chan_msg_send(uint64_t subchan_addr, void *payload_addr,
+					uint16_t payload_len, struct bar_msg_header *msg_header)
+{
+	__bar_chan_msg_header_set(subchan_addr, msg_header);
+#if READ_CHECK
+	__bar_chan_msg_header_get(subchan_addr, (struct bar_msg_header *)temp_msg);
+#endif
+	__bar_chan_msg_payload_set(subchan_addr, (uint8_t *)(payload_addr), payload_len);
+#if READ_CHECK
+	__bar_chan_msg_payload_get(subchan_addr, temp_msg, payload_len);
+#endif
+	__bar_chan_msg_valid_set(subchan_addr, BAR_MSG_CHAN_USED);
+	return BAR_MSG_OK;
+}
+/**
+ * Fun:
+ */
+#define BAR_MSG_POL_MASK    (0x10)
+#define BAR_MSG_POL_OFFSET  (4)
+static uint16_t __bar_chan_msg_poltag_set(uint64_t subchan_addr, uint8_t label)
+{
+	uint32_t data;
+
+	__bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data);
+	data &= (~(uint32_t)BAR_MSG_POL_MASK);
+	data |= ((uint32_t)label << BAR_MSG_POL_OFFSET);
+	__bar_chan_reg_write(subchan_addr, BAR_MSG_VALID_OFFSET, data);
+	return BAR_MSG_OK;
+}
+/**
+ * Fun:
+ */
+#define REPS_HEADER_LEN_OFFSET      1
+#define REPS_HEADER_PAYLOAD_OFFSET  4
+#define REPS_HEADER_REPLYED         0xff
+static uint16_t __bar_chan_sync_msg_reps_get(uint64_t subchan_addr,
+					uint64_t recv_buffer, uint16_t buffer_len)
+{
+	struct bar_msg_header msg_header;
+
+	__bar_chan_msg_header_get(subchan_addr, &msg_header);
+	uint16_t msg_id = msg_header.msg_id;
+	struct seqid_item *reps_info = &g_seqid_ring.reps_info_tbl[msg_id];
+
+	if (reps_info->flag != REPS_INFO_FLAG_USED) {
+		PMD_MSG_LOG(ERR, "msg_id %u unused", msg_id);
+		return BAR_MSG_ERR_REPLY;
+	}
+	uint16_t msg_len = msg_header.len;
+
+	if (msg_len > buffer_len - 4) {
+		PMD_MSG_LOG(ERR, "recv buffer len is: %u, but reply msg len is: %u",
+				buffer_len, msg_len + 4);
+		return BAR_MSG_ERR_REPSBUFF_LEN;
+	}
+	uint8_t *recv_msg = (uint8_t *)recv_buffer;
+
+	__bar_chan_msg_payload_get(subchan_addr, recv_msg + REPS_HEADER_PAYLOAD_OFFSET, msg_len);
+	*(uint16_t *)(recv_msg + REPS_HEADER_LEN_OFFSET) = msg_len;
+	*recv_msg = REPS_HEADER_REPLYED; /* set reps's valid */
+	return BAR_MSG_OK;
+}
+/**
+ * Fun:
+ */
+static int __bar_chan_send_para_check(struct zxdh_pci_bar_msg *in,
+					struct zxdh_msg_recviver_mem *result)
+{
+	if (in == NULL || result == NULL) {
+		PMD_MSG_LOG(ERR, "send para ERR: null para.");
+		return BAR_MSG_ERR_NULL_PARA;
+	}
+	uint8_t src_index = __bar_msg_src_index_trans(in->src);
+	uint8_t dst_index = __bar_msg_dst_index_trans(in->dst);
+
+	if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR) {
+		PMD_MSG_LOG(ERR, "send para ERR: chan doesn't exist.");
+		return BAR_MSG_ERR_TYPE;
+	}
+	if (in->module_id >= BAR_MSG_MODULE_NUM) {
+		PMD_MSG_LOG(ERR, "send para ERR: invalid module_id: %d.", in->module_id);
+		return BAR_MSG_ERR_MODULE;
+	}
+	if (in->payload_addr == NULL) {
+		PMD_MSG_LOG(ERR, "send para ERR: null message.");
+		return BAR_MSG_ERR_BODY_NULL;
+	}
+	if (in->payload_len > BAR_MSG_PAYLOAD_MAX_LEN) {
+		PMD_MSG_LOG(ERR, "send para ERR: len %d is too long.", in->payload_len);
+		return BAR_MSG_ERR_LEN;
+	}
+	if (in->virt_addr == 0 || result->recv_buffer == NULL) {
+		PMD_MSG_LOG(ERR, "send para ERR: virt_addr or recv_buffer is NULL.");
+		return BAR_MSG_ERR_VIRTADDR_NULL;
+	}
+	if (result->buffer_len < REPS_HEADER_PAYLOAD_OFFSET)
+		PMD_MSG_LOG(ERR,
+			"recv buffer's len: %lu is short than mininal 4 bytes\n",
+			result->buffer_len);
+
+	return BAR_MSG_OK;
+}
+
+#define LOCK_TYPE_HARD  (1)
+#define LOCK_TYPE_SOFT  (0)
+uint8_t lock_type_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] = {
+	{LOCK_TYPE_HARD, LOCK_TYPE_HARD, LOCK_TYPE_HARD},
+	{LOCK_TYPE_SOFT, LOCK_TYPE_SOFT, LOCK_TYPE_HARD},
+	{LOCK_TYPE_HARD, LOCK_TYPE_HARD, LOCK_TYPE_HARD}
+};
+
+#define PCIEID_IS_PF_MASK   (0x0800)
+#define PCIEID_PF_IDX_MASK  (0x0700)
+#define PCIEID_VF_IDX_MASK  (0x00ff)
+#define PCIEID_EP_IDX_MASK  (0x7000)
+/* PCIEID bit field offset */
+#define PCIEID_PF_IDX_OFFSET  (8)
+#define PCIEID_EP_IDX_OFFSET  (12)
+
+#define MAX_EP_NUM     (4)
+#define PF_NUM_PER_EP  (8)
+#define VF_NUM_PER_PF  (32)
+
+#define MULTIPLY_BY_8(x)    ((x) << 3)
+#define MULTIPLY_BY_32(x)   ((x) << 5)
+#define MULTIPLY_BY_256(x)  ((x) << 8)
+
+#define MAX_HARD_SPINLOCK_NUM        (511)
+#define MAX_HARD_SPINLOCK_ASK_TIMES  (1000)
+#define SPINLOCK_POLLING_SPAN_US     (100)
+
+static uint16_t pcie_id_to_hard_lock(uint16_t src_pcieid, uint8_t dst)
+{
+	uint16_t lock_id = 0;
+	uint16_t pf_idx = (src_pcieid & PCIEID_PF_IDX_MASK) >> PCIEID_PF_IDX_OFFSET;
+	uint16_t ep_idx = (src_pcieid & PCIEID_EP_IDX_MASK) >> PCIEID_EP_IDX_OFFSET;
+
+	switch (dst) {
+	/* msg to risc */
+	case MSG_CHAN_END_RISC:
+		lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx;
+		break;
+	/* msg to pf/vf */
+	case MSG_CHAN_END_VF:
+	case MSG_CHAN_END_PF:
+		lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx + MULTIPLY_BY_8(1 + MAX_EP_NUM);
+		break;
+	default:
+		lock_id = 0;
+		break;
+	}
+	if (lock_id >= MAX_HARD_SPINLOCK_NUM)
+		lock_id = 0;
+
+	return lock_id;
+}
+
+static uint8_t spinklock_read(uint64_t virt_lock_addr, uint32_t lock_id)
+{
+	return *(volatile uint8_t *)((uint64_t)virt_lock_addr + (uint64_t)lock_id);
+}
+
+static void spinlock_write(uint64_t virt_lock_addr, uint32_t lock_id, uint8_t data)
+{
+	*(volatile uint8_t *)((uint64_t)virt_lock_addr + (uint64_t)lock_id) = data;
+}
+
+static void label_write(uint64_t label_lock_addr, uint32_t lock_id, uint16_t value)
+{
+	*(volatile uint16_t *)(label_lock_addr + lock_id * 2) = value;
+}
+
+static int32_t zxdh_spinlock_lock(uint32_t virt_lock_id, uint64_t virt_addr,
+					uint64_t label_addr, uint16_t master_id)
+{
+	uint32_t lock_rd_cnt = 0;
+
+	do {
+		/* read to lock */
+		uint8_t spl_val = spinklock_read(virt_addr, virt_lock_id);
+
+		if (spl_val == 0) {
+			label_write((uint64_t)label_addr, virt_lock_id, master_id);
+			break;
+		}
+		rte_delay_us_block(SPINLOCK_POLLING_SPAN_US);
+		lock_rd_cnt++;
+	} while (lock_rd_cnt < MAX_HARD_SPINLOCK_ASK_TIMES);
+	if (lock_rd_cnt >= MAX_HARD_SPINLOCK_ASK_TIMES)
+		return -1;
+
+	return 0;
+}
+
+static int32_t zxdh_spinlock_unlock(uint32_t virt_lock_id, uint64_t virt_addr, uint64_t label_addr)
+{
+	label_write((uint64_t)label_addr, virt_lock_id, 0);
+	spinlock_write(virt_addr, virt_lock_id, 0);
+	return 0;
+}
+
+#define LOCK_MASTER_ID_MASK                (0x8000)
+/* bar offset */
+#define BAR0_CHAN_RISC_OFFSET              (0x2000)
+#define BAR0_CHAN_PFVF_OFFSET              (0x3000)
+#define BAR0_SPINLOCK_OFFSET               (0x4000)
+#define FW_SHRD_OFFSET                     (0x5000)
+#define FW_SHRD_INNER_HW_LABEL_PAT         (0x800)
+#define HW_LABEL_OFFSET                    (FW_SHRD_OFFSET + FW_SHRD_INNER_HW_LABEL_PAT)
+
+#define CHAN_RISC_SPINLOCK_OFFSET          (BAR0_SPINLOCK_OFFSET - BAR0_CHAN_RISC_OFFSET)
+#define CHAN_PFVF_SPINLOCK_OFFSET          (BAR0_SPINLOCK_OFFSET - BAR0_CHAN_PFVF_OFFSET)
+#define CHAN_RISC_LABEL_OFFSET             (HW_LABEL_OFFSET - BAR0_CHAN_RISC_OFFSET)
+#define CHAN_PFVF_LABEL_OFFSET             (HW_LABEL_OFFSET - BAR0_CHAN_PFVF_OFFSET)
+
+static int bar_hard_lock(uint16_t src_pcieid, uint8_t dst, uint64_t virt_addr)
+{
+	int ret = 0;
+	uint16_t lockid = pcie_id_to_hard_lock(src_pcieid, dst);
+
+	PMD_MSG_LOG(DEBUG, "dev pcieid: 0x%x lock, get hardlockid: %u\n", src_pcieid, lockid);
+	if (dst == MSG_CHAN_END_RISC)
+		ret = zxdh_spinlock_lock(lockid, virt_addr + CHAN_RISC_SPINLOCK_OFFSET,
+					virt_addr + CHAN_RISC_LABEL_OFFSET,
+					src_pcieid | LOCK_MASTER_ID_MASK);
+	else
+		ret = zxdh_spinlock_lock(lockid, virt_addr + CHAN_PFVF_SPINLOCK_OFFSET,
+					virt_addr + CHAN_PFVF_LABEL_OFFSET,
+					src_pcieid | LOCK_MASTER_ID_MASK);
+
+	return ret;
+}
+
+static void bar_hard_unlock(uint16_t src_pcieid, uint8_t dst, uint64_t virt_addr)
+{
+	uint16_t lockid = pcie_id_to_hard_lock(src_pcieid, dst);
+
+	PMD_MSG_LOG(DEBUG, "dev pcieid: 0x%x unlock, get hardlockid: %u\n", src_pcieid, lockid);
+	if (dst == MSG_CHAN_END_RISC)
+		zxdh_spinlock_unlock(lockid, virt_addr + CHAN_RISC_SPINLOCK_OFFSET,
+				virt_addr + CHAN_RISC_LABEL_OFFSET);
+	else
+		zxdh_spinlock_unlock(lockid, virt_addr + CHAN_PFVF_SPINLOCK_OFFSET,
+				virt_addr + CHAN_PFVF_LABEL_OFFSET);
+
+}
+/**
+ * Fun: PF init hard_spinlock addr
+ * @pcie_id: pf's pcie_id
+ * @bar_base_addr:
+ */
+int bar_chan_pf_init_spinlock(uint16_t pcie_id, uint64_t bar_base_addr)
+{
+	int lock_id = pcie_id_to_hard_lock(pcie_id, MSG_CHAN_END_RISC);
+
+	zxdh_spinlock_unlock(lock_id, bar_base_addr + BAR0_SPINLOCK_OFFSET,
+			bar_base_addr + HW_LABEL_OFFSET);
+	lock_id = pcie_id_to_hard_lock(pcie_id, MSG_CHAN_END_VF);
+	zxdh_spinlock_unlock(lock_id, bar_base_addr + BAR0_SPINLOCK_OFFSET,
+			bar_base_addr + HW_LABEL_OFFSET);
+	return 0;
+}
+
+/**
+ * Fun: lock the channel
+ */
+pthread_spinlock_t chan_lock;
+static int bar_chan_lock(uint8_t src, uint8_t dst, uint16_t src_pcieid, uint64_t virt_addr)
+{
+	int ret = 0;
+	uint8_t src_index = __bar_msg_src_index_trans(src);
+	uint8_t dst_index = __bar_msg_dst_index_trans(dst);
+
+	if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR) {
+		PMD_MSG_LOG(ERR, "lock ERR: chan doesn't exist.\n");
+		return BAR_MSG_ERR_TYPE;
+	}
+	uint16_t idx = lock_type_tbl[src_index][dst_index];
+
+	if (idx == LOCK_TYPE_SOFT)
+		pthread_spin_lock(&chan_lock);
+	else
+		ret = bar_hard_lock(src_pcieid, dst, virt_addr);
+
+	if (ret != 0)
+		PMD_MSG_LOG(ERR, "dev: 0x%x failed to lock.\n", src_pcieid);
+
+	return ret;
+}
+/**
+ * Fun: unlock the channel
+ */
+static int bar_chan_unlock(uint8_t src, uint8_t dst, uint16_t src_pcieid, uint64_t virt_addr)
+{
+	uint8_t src_index = __bar_msg_src_index_trans(src);
+	uint8_t dst_index = __bar_msg_dst_index_trans(dst);
+
+	if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR) {
+		PMD_MSG_LOG(ERR, "unlock ERR: chan doesn't exist.\n");
+		return BAR_MSG_ERR_TYPE;
+	}
+	uint16_t idx = lock_type_tbl[src_index][dst_index];
+
+	if (idx == LOCK_TYPE_SOFT)
+		pthread_spin_unlock(&chan_lock);
+	else
+		bar_hard_unlock(src_pcieid, dst, virt_addr);
+
+	return BAR_MSG_OK;
+}
+
+static void __bar_chan_msg_header_pr(uint64_t addr)
+{
+	struct bar_msg_header *hdr = (struct bar_msg_header *)addr;
+	PMD_MSG_LOG(DEBUG,
+		"valid:%u, msg_id:%u, mod_id:%u, ack:%u s_pcie:0x%x, d_pcie:0x%x.\n",
+		hdr->valid, hdr->msg_id, hdr->module_id, hdr->ack,
+		hdr->src_pcieid, hdr->dst_pcieid);
+}
+
+/**
+ * Fun:
+ */
+#define BAR_MSG_POLLING_SPAN     100 /* sleep us */
+#define BAR_MSG_POLL_CNT_PER_MS  (1 * 1000 / BAR_MSG_POLLING_SPAN)
+#define BAR_MSG_POLL_CNT_PER_S   (1 * 1000 * 1000 / BAR_MSG_POLLING_SPAN)
+#define BAR_MSG_TIMEOUT_TH       (10 * 1000 * 1000 / BAR_MSG_POLLING_SPAN) /* 10s */
+
+#define BAR_CHAN_MSG_SYNC     0
+#define BAR_CHAN_MSG_ASYNC    1
+#define BAR_CHAN_MSG_NO_EMEC  0
+#define BAR_CHAN_MSG_EMEC     1
+#define BAR_CHAN_MSG_NO_ACK   0
+#define BAR_CHAN_MSG_ACK      1
+int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, struct zxdh_msg_recviver_mem *result)
+{
+	uint16_t ret = __bar_chan_send_para_check(in, result);
+
+	if (ret != BAR_MSG_OK)
+		goto exit;
+
+	uint16_t seq_id;
+
+	ret = __bar_chan_save_recv_info(result, &seq_id);
+	if (ret != BAR_MSG_OK)
+		goto exit;
+
+	uint64_t subchan_addr;
+
+	__bar_chan_subchan_addr_get(in, &subchan_addr);
+	/* fill */
+	struct bar_msg_header msg_header = {0};
+
+	msg_header.sync = BAR_CHAN_MSG_SYNC;
+	msg_header.emec = in->emec; /* 0x4 when set */
+	msg_header.usr  = 0;
+	msg_header.rsv  = 0;
+	msg_header.module_id  = in->module_id;
+	msg_header.len        = in->payload_len;
+	msg_header.msg_id     = seq_id;
+	msg_header.src_pcieid = in->src_pcieid;
+	msg_header.dst_pcieid = in->dst_pcieid;
+	/* */
+	ret = bar_chan_lock(in->src, in->dst, in->src_pcieid, in->virt_addr);
+	if (ret != BAR_MSG_OK) {
+		__bar_chan_msgid_free(seq_id);
+		goto exit;
+	}
+	__bar_chan_msg_send(subchan_addr, in->payload_addr, in->payload_len, &msg_header);
+	/* wait unset valid */
+	uint32_t time_out_cnt = 0;
+	uint16_t valid;
+
+	do {
+		rte_delay_us_block(BAR_MSG_POLLING_SPAN);
+		valid = __bar_msg_valid_stat_get(subchan_addr);
+		++time_out_cnt;
+		if ((time_out_cnt%BAR_MSG_POLL_CNT_PER_S) == 0) /* 1s/per_line */
+			PMD_MSG_LOG(INFO, "waiting %u ms", time_out_cnt/BAR_MSG_POLL_CNT_PER_MS);
+
+	} while ((time_out_cnt < BAR_MSG_TIMEOUT_TH) && (valid == BAR_MSG_CHAN_USED));
+	if (time_out_cnt/BAR_MSG_POLL_CNT_PER_MS > 10) /* 10ms */
+		PMD_MSG_LOG(INFO, "module_id: %s(%u) total waiting %u ms",
+			module_id_name(msg_header.module_id), msg_header.module_id,
+				time_out_cnt / BAR_MSG_POLL_CNT_PER_MS);
+	else
+		PMD_MSG_LOG(DEBUG, "module_id: %s(%u) total waiting %u ms",
+			module_id_name(msg_header.module_id), msg_header.module_id,
+				time_out_cnt / BAR_MSG_POLL_CNT_PER_MS);
+
+	if ((time_out_cnt == BAR_MSG_TIMEOUT_TH) && (valid != BAR_MSG_CHAN_USABLE)) {
+		__bar_chan_msg_valid_set(subchan_addr, BAR_MSG_CHAN_USABLE);
+		__bar_chan_msg_poltag_set(subchan_addr, 0);
+		PMD_MSG_LOG(ERR, "BAR MSG ERR: chan type time out.");
+		__bar_chan_msg_header_pr(subchan_addr);
+		ret = BAR_MSG_ERR_TIME_OUT;
+	} else {
+		ret = __bar_chan_sync_msg_reps_get(subchan_addr,
+					(uint64_t)result->recv_buffer, result->buffer_len);
+	}
+	__bar_chan_msgid_free(seq_id);
+	bar_chan_unlock(in->src, in->dst, in->src_pcieid, in->virt_addr);
+
+exit:
+	return ret;
+}
+
+static uint64_t recv_addr_get(uint8_t src_type, uint8_t dst_type, uint64_t virt_addr)
+{
+	uint8_t src = __bar_msg_dst_index_trans(src_type);
+	uint8_t dst = __bar_msg_src_index_trans(dst_type);
+
+	if (src == BAR_MSG_SRC_ERR || dst == BAR_MSG_DST_ERR)
+		return 0;
+
+	uint8_t chan_id = chan_id_tbl[dst][src];
+	uint8_t subchan_id = 1 - subchan_id_tbl[dst][src];
+
+	return subchan_addr_cal(virt_addr, chan_id, subchan_id);
+}
+
+static uint64_t reply_addr_get(uint8_t sync, uint8_t src_type, uint8_t dst_type, uint64_t virt_addr)
+{
+	uint8_t src = __bar_msg_dst_index_trans(src_type);
+	uint8_t dst = __bar_msg_src_index_trans(dst_type);
+
+	if (src == BAR_MSG_SRC_ERR || dst == BAR_MSG_DST_ERR)
+		return 0;
+
+	uint8_t chan_id = chan_id_tbl[dst][src];
+	uint8_t subchan_id = 1 - subchan_id_tbl[dst][src];
+	uint64_t recv_rep_addr;
+
+	if (sync == BAR_CHAN_MSG_SYNC)
+		recv_rep_addr = subchan_addr_cal(virt_addr, chan_id, subchan_id);
+	else
+		recv_rep_addr = subchan_addr_cal(virt_addr, chan_id, 1 - subchan_id);
+
+	return recv_rep_addr;
+}
+
+zxdh_bar_chan_msg_recv_callback msg_recv_func_tbl[BAR_MSG_MODULE_NUM];
+static uint16_t __bar_chan_msg_header_check(struct bar_msg_header *msg_header)
+{
+	if (msg_header->valid != BAR_MSG_CHAN_USED) {
+		PMD_MSG_LOG(ERR, "recv header ERR: valid label is not used.");
+		return BAR_MSG_ERR_MODULE;
+	}
+	uint8_t module_id = msg_header->module_id;
+
+	if (module_id >= (uint8_t)BAR_MSG_MODULE_NUM) {
+		PMD_MSG_LOG(ERR, "recv header ERR: invalid module_id: %u.", module_id);
+		return BAR_MSG_ERR_MODULE;
+	}
+	uint16_t len = msg_header->len;
+
+	if (len > BAR_MSG_PAYLOAD_MAX_LEN) {
+		PMD_MSG_LOG(ERR, "recv header ERR: invalid mesg len: %u.", len);
+		return BAR_MSG_ERR_LEN;
+	}
+	if (msg_recv_func_tbl[msg_header->module_id] == NULL) {
+		PMD_MSG_LOG(ERR, "recv header ERR: module:%s(%u) doesn't register",
+				module_id_name(module_id), module_id);
+		return BAR_MSG_ERR_MODULE_NOEXIST;
+	}
+	return BAR_MSG_OK;
+}
+
+static void __bar_msg_sync_msg_proc(uint64_t reply_addr, struct bar_msg_header *msg_header,
+					uint8_t *reciver_buff, void *dev)
+{
+	uint8_t *reps_buffer = malloc(BAR_MSG_PAYLOAD_MAX_LEN);
+
+	if (reps_buffer == NULL)
+		return;
+
+	zxdh_bar_chan_msg_recv_callback recv_func = msg_recv_func_tbl[msg_header->module_id];
+	uint16_t reps_len = 0;
+
+	recv_func(reciver_buff, msg_header->len, reps_buffer, &reps_len, dev);
+	msg_header->ack = BAR_CHAN_MSG_ACK;
+	msg_header->len = reps_len;
+	__bar_chan_msg_header_set(reply_addr, msg_header);
+	__bar_chan_msg_payload_set(reply_addr, reps_buffer, reps_len);
+	__bar_chan_msg_valid_set(reply_addr, BAR_MSG_CHAN_USABLE);
+	free(reps_buffer);
+}
+
+static void __bar_msg_ack_async_msg_proc(struct bar_msg_header *msg_header, uint8_t *reciver_buff)
+{
+	struct seqid_item *reps_info = &g_seqid_ring.reps_info_tbl[msg_header->msg_id];
+
+	if (reps_info->flag != REPS_INFO_FLAG_USED) {
+		PMD_MSG_LOG(ERR, "msg_id: %u is released", msg_header->msg_id);
+		return;
+	}
+	if (msg_header->len > reps_info->buffer_len - 4) {
+		PMD_MSG_LOG(ERR, "reps_buf_len is %u, but reps_msg_len is %u",
+				reps_info->buffer_len, msg_header->len + 4);
+		goto free_id;
+	}
+	uint8_t *reps_buffer = (uint8_t *)reps_info->reps_addr;
+
+	memcpy(reps_buffer + 4, reciver_buff, msg_header->len);
+	*(uint16_t *)(reps_buffer + 1) = msg_header->len;
+	*(uint8_t *)(reps_info->reps_addr) = REPS_HEADER_REPLYED;
+
+free_id:
+	__bar_chan_msgid_free(msg_header->msg_id);
+}
+
+int zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev)
+{
+	uint64_t recv_addr = recv_addr_get(src, dst, virt_addr);
+
+	if (recv_addr == 0) {
+		PMD_MSG_LOG(ERR, "invalid driver type(src:%u, dst:%u).", src, dst);
+		return -1;
+	}
+	/* */
+	struct bar_msg_header msg_header;
+
+	__bar_chan_msg_header_get(recv_addr, &msg_header);
+	uint16_t ret = __bar_chan_msg_header_check(&msg_header);
+
+	if (ret != BAR_MSG_OK) {
+		PMD_MSG_LOG(ERR, "recv msg_head err, ret: %u.", ret);
+		return -1;
+	}
+	uint8_t *recved_msg = malloc(msg_header.len);
+
+	if (recved_msg == NULL) {
+		PMD_MSG_LOG(ERR, "malloc temp buff failed.");
+		return -1;
+	}
+	__bar_chan_msg_payload_get(recv_addr, recved_msg, msg_header.len);
+	/* */
+	uint64_t reps_addr = reply_addr_get(msg_header.sync, src, dst, virt_addr);
+
+	if (msg_header.sync == BAR_CHAN_MSG_SYNC) {
+		__bar_msg_sync_msg_proc(reps_addr, &msg_header, recved_msg, dev);
+		goto exit;
+	}
+	__bar_chan_msg_valid_set(recv_addr, BAR_MSG_CHAN_USABLE);
+	if (msg_header.ack == BAR_CHAN_MSG_ACK) {
+		__bar_msg_ack_async_msg_proc(&msg_header, recved_msg);
+		goto exit;
+	} else {
+		/* TODO: async && not_ack msg process */
+	}
+
+exit:
+	free(recved_msg);
+	return BAR_MSG_OK;
+}
+
+int zxdh_bar_chan_async_msg_send(__rte_unused struct zxdh_pci_bar_msg *in,
+		__rte_unused struct zxdh_msg_recviver_mem *result)
+{
+	return BAR_MSG_OK;
+}
+
+int zxdh_bar_chan_msg_recv_register(uint8_t module_id, zxdh_bar_chan_msg_recv_callback callback)
+{
+	if (module_id >= (uint16_t)BAR_MSG_MODULE_NUM) {
+		PMD_MSG_LOG(ERR, "register ERR: invalid module_id: %u.", module_id);
+		return BAR_MSG_ERR_MODULE;
+	}
+	if (callback == NULL) {
+		PMD_MSG_LOG(ERR, "register %s(%u) error: null callback.",
+			module_id_name(module_id), module_id);
+		return BAR_MEG_ERR_NULL_FUNC;
+	}
+	if (msg_recv_func_tbl[module_id] != NULL) {
+		PMD_MSG_LOG(INFO, "register warning, event:%s(%u) already be registered.",
+			module_id_name(module_id), module_id);
+		return BAR_MSG_ERR_REPEAT_REGISTER;
+	}
+	msg_recv_func_tbl[module_id] = callback;
+	PMD_MSG_LOG(INFO, "register module: %s(%u) success.", module_id_name(module_id), module_id);
+	return BAR_MSG_OK;
+}
+
+int zxdh_bar_chan_msg_recv_unregister(uint8_t module_id)
+{
+	if (module_id >= (uint16_t)BAR_MSG_MODULE_NUM) {
+		PMD_MSG_LOG(ERR, "unregister ERR: invalid module_id :%u.", module_id);
+		return BAR_MSG_ERR_MODULE;
+	}
+	if (msg_recv_func_tbl[module_id] == NULL) {
+		PMD_MSG_LOG(INFO, "unregister wanning, event: %s(%d) has already be unregistered.",
+			module_id_name(module_id), module_id);
+		return BAR_MSG_ERR_UNGISTER;
+	}
+	msg_recv_func_tbl[module_id] = NULL;
+	PMD_MSG_LOG(INFO, "unregister module %s(%d) success.",
+		module_id_name(module_id), module_id);
+	return BAR_MSG_OK;
+}
+enum TBL_MSG_TYPE {
+	TBL_TYPE_READ,
+	TBL_TYPE_WRITE,
+	TBL_TYPE_NON,
+};
+/**
+ * Fun:
+ */
+static int bar_get_sum(uint8_t *ptr, uint8_t len)
+{
+	uint64_t sum = 0;
+	int idx;
+
+	for (idx = 0; idx < len; idx++)
+		sum += *(ptr + idx);
+
+	return (uint16_t)sum;
+}
+
+#define RSC_TBL_CONTENT_LEN_MAX  (257 * 2)
+struct tbl_msg_header {
+	uint8_t  type;  /* r/w */
+	uint8_t  field; /* which table? */
+	uint16_t pcieid;
+	uint16_t slen;
+	uint16_t rsv;
+}; /* 8B */
+struct tbl_msg_reps_header {
+	uint8_t  check;
+	uint8_t  rsv;
+	uint16_t len;
+}; /* 4B */
+#define TBL_MSG_PRO_SUCCESS  0xaa
+static int zxdh_get_res_info(struct zxdh_res_para *dev, uint8_t field, uint8_t *res, uint16_t *len)
+{
+	if (!res || !dev)
+		return BAR_MSG_ERR_NULL;
+
+	struct tbl_msg_header tbl_msg = {
+		.type = TBL_TYPE_READ,
+		.field = field,
+		.pcieid = dev->pcie_id,
+		.slen = 0,
+		.rsv = 0,
+	};
+
+	struct zxdh_pci_bar_msg in = {0};
+
+	in.virt_addr = dev->virt_addr;
+	in.payload_addr = &tbl_msg;
+	in.payload_len = sizeof(tbl_msg);
+	in.src = dev->src_type;
+	in.dst = MSG_CHAN_END_RISC;
+	in.module_id = BAR_MODULE_TBL;
+	in.src_pcieid = dev->pcie_id;
+
+	uint8_t recv_buf[RSC_TBL_CONTENT_LEN_MAX + 8] = {0};
+	struct zxdh_msg_recviver_mem result = {
+		.recv_buffer = recv_buf,
+		.buffer_len = sizeof(recv_buf),
+	};
+	int ret = zxdh_bar_chan_sync_msg_send(&in, &result);
+
+	if (ret != BAR_MSG_OK) {
+		PMD_MSG_LOG(ERR,
+			"send sync_msg failed. pcieid: 0x%x, ret: %d.\n", dev->pcie_id, ret);
+		return ret;
+	}
+	struct tbl_msg_reps_header *tbl_reps =
+		(struct tbl_msg_reps_header *)(recv_buf + REPS_HEADER_PAYLOAD_OFFSET);
+
+	if (tbl_reps->check != TBL_MSG_PRO_SUCCESS) {
+		PMD_MSG_LOG(ERR,
+			"get resource_field failed. pcieid: 0x%x, ret: %d.\n", dev->pcie_id, ret);
+		return ret;
+	}
+	*len = tbl_reps->len;
+	memcpy(res,
+		(recv_buf + REPS_HEADER_PAYLOAD_OFFSET + sizeof(struct tbl_msg_reps_header)), *len);
+	return ret;
+}
+enum RES_TBL_FILED {
+	TBL_FIELD_PCIEID     = 0,
+	TBL_FIELD_BDF        = 1,
+	TBL_FIELD_MSGCH      = 2,
+	TBL_FIELD_DATACH     = 3,
+	TBL_FIELD_VPORT      = 4,
+	TBL_FIELD_PNLID      = 5,
+	TBL_FIELD_PHYPORT    = 6,
+	TBL_FIELD_SERDES_NUM = 7,
+	TBL_FIELD_NP_PORT    = 8,
+	TBL_FIELD_SPEED      = 9,
+	TBL_FIELD_HASHID     = 10,
+	TBL_FIELD_NON,
+};
+int zxdh_get_res_panel_id(struct zxdh_res_para *in, uint8_t *panel_id)
+{
+	uint8_t reps = 0;
+	uint16_t reps_len = 0;
+
+	if (zxdh_get_res_info(in, TBL_FIELD_PNLID, &reps, &reps_len) != BAR_MSG_OK)
+		return -1;
+
+	*panel_id = reps;
+	return BAR_MSG_OK;
+}
+int zxdh_get_res_hash_id(struct zxdh_res_para *in, uint8_t *hash_id)
+{
+	uint8_t reps = 0;
+	uint16_t reps_len = 0;
+
+	if (zxdh_get_res_info(in, TBL_FIELD_HASHID, &reps, &reps_len) != BAR_MSG_OK)
+		return -1;
+
+	*hash_id = reps;
+	return BAR_MSG_OK;
+}
+/**
+ * Fun:
+ */
+struct msix_msg {
+	uint16_t pcie_id;
+	uint16_t vector_risc;
+	uint16_t vector_pfvf;
+	uint16_t vector_mpf;
+};
+/* private reps struct */
+struct bar_msix_reps {
+	uint16_t pcie_id;
+	uint16_t check;
+	uint16_t vport;
+	uint16_t rsv;
+} __rte_packed; /* 8B */
+
+struct bar_offset_reps {
+	uint16_t check;
+	uint16_t rsv;
+	uint32_t offset;
+	uint32_t length;
+} __rte_packed; /* 12B */
+
+struct bar_recv_msg {
+	/* fix 4B */
+	uint8_t  reps_ok;
+	uint16_t reps_len;
+	uint8_t  rsv;
+	/* */
+	union {
+		struct bar_msix_reps   msix_reps;   /* 8B */
+		struct bar_offset_reps offset_reps; /* 12B */
+	} __rte_packed;
+} __rte_packed;
+int zxdh_bar_chan_enable(struct msix_para *_msix_para, uint16_t *vport)
+{
+	PMD_MSG_LOG(INFO, "sizeof(struct bar_msg_header) :%lu", sizeof(struct bar_msg_header));
+	PMD_MSG_LOG(INFO, "sizeof(struct bar_msix_reps)  :%lu", sizeof(struct bar_msix_reps));
+	PMD_MSG_LOG(INFO, "sizeof(struct bar_offset_reps):%lu", sizeof(struct bar_offset_reps));
+	PMD_MSG_LOG(INFO, "sizeof(struct bar_recv_msg)   :%lu", sizeof(struct bar_recv_msg));
+	PMD_MSG_LOG(INFO, "MSG_CHAN_END_RISC --> MSG_CHAN_END_PF");
+	PMD_MSG_LOG(INFO, "BAR_MSG_TIMEOUT:%d s", BAR_MSG_TIMEOUT_TH/BAR_MSG_POLL_CNT_PER_S);
+	recv_addr_get(MSG_CHAN_END_RISC, MSG_CHAN_END_PF, 0x0);
+
+	if (!_msix_para)
+		return BAR_MSG_ERR_NULL;
+
+	/* */
+	struct msix_msg msix_msg = {
+		.pcie_id = _msix_para->pcie_id,
+		.vector_risc = _msix_para->vector_risc,
+		.vector_pfvf = _msix_para->vector_pfvf,
+		.vector_mpf = _msix_para->vector_mpf,
+	};
+	struct zxdh_pci_bar_msg in = {
+		.virt_addr = _msix_para->virt_addr,
+		.payload_addr = &msix_msg,
+		.payload_len = sizeof(msix_msg),
+		.emec = 0,
+		.src = _msix_para->driver_type,
+		.dst = MSG_CHAN_END_RISC,
+		.module_id = BAR_MODULE_MISX,
+		.src_pcieid = _msix_para->pcie_id,
+		.dst_pcieid = 0,
+		.usr = 0,
+	};
+	/* */
+	struct bar_recv_msg recv_msg = {0};
+	struct zxdh_msg_recviver_mem result = {
+		.recv_buffer = &recv_msg,
+		.buffer_len = sizeof(recv_msg),
+	};
+	/* */
+	int ret = zxdh_bar_chan_sync_msg_send(&in, &result);
+
+	if (ret != BAR_MSG_OK)
+		return -ret;
+
+	uint16_t check_token = recv_msg.msix_reps.check;
+	uint16_t sum_res = bar_get_sum((uint8_t *)&msix_msg, sizeof(msix_msg));
+
+	if (check_token != sum_res) {
+		PMD_MSG_LOG(ERR, "expect token: 0x%x, get token: 0x%x.\n", sum_res, check_token);
+		return BAR_MSG_ERR_REPLY;
+	}
+	*vport = recv_msg.msix_reps.vport;
+	PMD_MSG_LOG(INFO, "vport of pcieid: 0x%x get success.\n", _msix_para->pcie_id);
+	return BAR_MSG_OK;
+}
+/**
+ * Fun:
+ */
+struct offset_get_msg {
+	uint16_t pcie_id;
+	uint16_t type;
+}; /* 4B */
+int zxdh_get_bar_offset(struct bar_offset_params *paras, struct bar_offset_res *res)
+{
+	if (!paras)
+		return BAR_MSG_ERR_NULL;
+
+	struct offset_get_msg send_msg = {
+		.pcie_id = paras->pcie_id,
+		.type = paras->type,
+	};
+	struct zxdh_pci_bar_msg in = {0};
+
+	in.payload_addr = &send_msg;
+	in.payload_len = sizeof(send_msg);
+	in.virt_addr = paras->virt_addr;
+	in.src = MSG_CHAN_END_PF;
+	in.dst = MSG_CHAN_END_RISC;
+	in.module_id = BAR_MODULE_OFFSET_GET;
+	in.src_pcieid = paras->pcie_id;
+	/* */
+	struct bar_recv_msg recv_msg = {0};
+	struct zxdh_msg_recviver_mem result = {
+		.recv_buffer = &recv_msg,
+		.buffer_len = sizeof(recv_msg),
+	};
+	/* */
+	int ret = zxdh_bar_chan_sync_msg_send(&in, &result);
+
+	if (ret != BAR_MSG_OK)
+		return -ret;
+
+	uint16_t check_token = recv_msg.offset_reps.check;
+	uint16_t sum_res = bar_get_sum((uint8_t *)&send_msg, sizeof(send_msg));
+
+	if (check_token != sum_res) {
+		PMD_MSG_LOG(ERR, "expect token: 0x%x, get token: 0x%x.\n", sum_res, check_token);
+		return BAR_MSG_ERR_REPLY;
+	}
+	res->bar_offset = recv_msg.offset_reps.offset;
+	res->bar_length = recv_msg.offset_reps.length;
+	return BAR_MSG_OK;
+}
+/**
+ * Fun:
+ */
+struct dev_stat {
+	bool is_mpf_scanned; /* not use */
+	bool is_res_init;
+	int16_t dev_cnt; /* probe cnt */
+};
+struct dev_stat g_dev_stat = {0};
+int zxdh_msg_chan_init(void)
+{
+	g_dev_stat.dev_cnt++;
+	if (g_dev_stat.is_res_init)
+		return BAR_MSG_OK;
+
+	pthread_spin_init(&chan_lock, 0);
+	/* */
+	g_seqid_ring.cur_id = 0;
+	pthread_spin_init(&g_seqid_ring.lock, 0);
+	uint16_t seq_id;
+
+	for (seq_id = 0; seq_id < BAR_SEQID_NUM_MAX; seq_id++) {
+		struct seqid_item *reps_info = &(g_seqid_ring.reps_info_tbl[seq_id]);
+
+		reps_info->id = seq_id;
+		reps_info->flag = REPS_INFO_FLAG_USABLE;
+	}
+	g_dev_stat.is_res_init = true;
+	return BAR_MSG_OK;
+}
+/**
+ * Fun:
+ */
+int zxdh_bar_msg_chan_exit(void)
+{
+	if (!g_dev_stat.is_res_init || (--g_dev_stat.dev_cnt > 0))
+		return BAR_MSG_OK;
+
+	g_dev_stat.is_res_init = false;
+	PMD_MSG_LOG(INFO, "success!");
+	return BAR_MSG_OK;
+}
diff --git a/drivers/net/zxdh/zxdh_msg_chan.h b/drivers/net/zxdh/zxdh_msg_chan.h
new file mode 100644
index 0000000000..2729188ec5
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_msg_chan.h
@@ -0,0 +1,380 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#ifndef _ZXDH_MSG_CHAN_H_
+#define _ZXDH_MSG_CHAN_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_ethdev.h>
+#include "msg_chan_pub.h"
+#include "zxdh_tables.h"
+#include "zxdh_ethdev.h"
+#include "zxdh_common.h"
+#include "zxdh_mtr.h"
+/*
+ * The interface for communication among HOST, RISC-V and ZF drivers
+ * is as follows...
+ *
+ * COMMUNICATION THROUGH PKT CHANNEL
+ *
+ * Make sure you have allocated private queues and MSI-X interrupt for them.
+ * Then set callback of the vector with zxdh_pkt_chan_msg_recv().
+ * Choose the proper parameters and fill them in the zxdh_pkt_chan_msg_send().
+ * Enjoying communicating with others whenever you want.
+ */
+
+/* vec0  : dev  interrupt
+ * vec1~3: risc interrupt
+ * vec4  : dtb  interrupt
+ */
+#define ZXDH_DEV_INTR_VEC            0
+#define ZXDH_MSIX_INTR_MSG_VEC_BASE  1
+#define ZXDH_MSIX_INTR_MSG_VEC_NUM   3
+#define ZXDH_INTR_NONQUE_NUM   (ZXDH_MSIX_INTR_MSG_VEC_NUM+ZXDH_MSIX_INTR_DTB_VEC_NUM+1)
+enum {
+	MSIX_FROM_PFVF = ZXDH_MSIX_INTR_MSG_VEC_BASE, /* 1 */
+	MSIX_FROM_MPF,   /* 2 */
+	MSIX_FROM_RISCV, /* 3 */
+	MSG_VEC_NUM      /* 4 */
+} MSG_VEC;
+
+#define ZXDH_MSIX_INTR_DTB_VEC      (ZXDH_MSIX_INTR_MSG_VEC_BASE+ZXDH_MSIX_INTR_MSG_VEC_NUM) /* 4 */
+#define ZXDH_MSIX_INTR_DTB_VEC_NUM  1
+#define ZXDH_QUE_INTR_VEC_BASE      (ZXDH_MSIX_INTR_DTB_VEC+ZXDH_MSIX_INTR_DTB_VEC_NUM) /* 5 */
+#define ZXDH_QUE_INTR_VEC_NUM       256
+
+#define ZXDH_PROMISC_MODE 1
+#define ZXDH_ALLMULTI_MODE 2
+#define ZXDH_FWVERS_LEN 32
+
+enum MSG_TYPE {
+	/* loopback test type */
+	TYPE_DEBUG = 0,
+	DST_RISCV,
+	DST_MPF,
+	DST_PF_OR_VF,
+	DST_ZF,
+	MSG_TYPE_NUM,
+};
+
+struct msg_header {
+	bool is_async;
+	enum MSG_TYPE msg_type;
+	enum bar_module_id msg_module_id;
+	uint8_t msg_priority;
+	uint16_t vport_dst;
+	uint16_t qid_dst;
+};
+
+#define MSG_CHAN_RET_ERR_RECV_FAIL              (-11)
+#define ZXDH_INDIR_RQT_SIZE 256
+#define MODULE_EEPROM_DATA_LEN 128
+/**
+ * Recv msg through msg_chan_pkt
+ * @dev: rte_eth_dev
+ * @mp: rte_mempool used to alloc pkts_tx for response msg
+ * @return zero for success, negative for failure
+ */
+int16_t zxdh_pkt_chan_msg_recv(const struct rte_eth_dev *dev, struct rte_mempool *mp);
+
+struct msg_chan_pkt_statics {
+	/* num of pkts sent in this module, include the num of dropped */
+	uint64_t num_tx;
+	/* num of pkts received in this module, include the num of dropped */
+	uint64_t num_rx;
+	/* num of pkts sent but dropped in this module */
+	uint64_t tx_drop;
+	/* num of pkts received but dropped in this module */
+	uint64_t rx_drop;
+};
+
+/**
+ * Acquire msg_chan_pkt statics
+ * @p_statics: msg_chan_pkt_statics
+ * @return zero for success, negative for failure
+ */
+int16_t zxdh_pkt_chan_statics_acquire(struct msg_chan_pkt_statics *p_statics);
+
+/**
+ * Init msg_chan_pkt in probe()
+ * @return zero for success, negative for failure
+ */
+int16_t zxdh_msg_chan_pkt_init(void);
+void zxdh_msg_chan_pkt_remove(void); /* Remove msg_chan_pkt in probe() */
+
+enum zxdh_msg_type {
+	ZXDH_NULL = 0,
+	ZXDH_VF_PORT_INIT = 1,
+	ZXDH_VF_PORT_UNINIT = 2,
+	ZXDH_MAC_ADD = 3,
+	ZXDH_MAC_DEL = 4,
+	ZXDH_MAC_GET = 5,
+
+	ZXDH_RSS_ENABLE = 7,
+	ZXDH_RSS_RETA_SET = 8,
+	ZXDH_RSS_RETA_GET = 9,
+	ZXDH_RSS_RETA_DEL = 10,
+	ZXDH_RSS_KEY_SET = 11,
+	ZXDH_RSS_KEY_GET = 12,
+	ZXDH_RSS_FUNC_SET = 13,
+	ZXDH_RSS_FUNC_GET = 14,
+	ZXDH_RSS_HF_SET = 15,
+	ZXDH_RSS_HF_GET = 16,
+	ZXDH_VLAN_FILTER_SET = 17,
+	ZXDH_VLAN_FILTER_ADD,
+	ZXDH_VLAN_FILTER_DEL,
+	ZXDH_VLAN_FILTER_UNINIT,
+	ZXDH_VLAN_OFFLOAD = 21,
+
+	ZXDH_SET_TPID = 23,
+	ZXDH_VXLAN_OFFLOAD_ADD = 24,
+	ZXDH_PORT_ATTRS_SET = 25,
+	ZXDH_PORT_PROMISC_SET = 26,
+
+	ZXDH_GET_NP_STATS = 31,
+
+	ZXDH_PLCR_CAR_PROFILE_ID_ADD = 36,
+	ZXDH_PLCR_CAR_PROFILE_ID_DELETE = 37,
+	ZXDH_PLCR_CAR_PROFILE_CFG_SET,
+	ZXDH_PLCR_CAR_PROFILE_CFG_GET,
+	ZXDH_PLCR_CAR_QUEUE_CFG_SET,
+	ZXDH_PORT_METER_STAT_CLR,
+	ZXDH_PORT_METER_STAT_GET,
+
+	ZXDH_VXLAN_OFFLOAD_DEL,
+	ZXDH_VLAN_EXTEND_SET,
+
+	ZXDH_FUNC_END,
+} __rte_packed;
+
+struct zxdh_msg_head {
+	enum zxdh_msg_type msg_type;
+	uint16_t  vport;
+	uint16_t  vf_id;
+	uint16_t pcieid;
+} __rte_packed;
+
+struct zxdh_vf_init_msg {
+	uint8_t link_up;
+	uint16_t base_qid;
+	struct rte_ether_addr mac_addr;
+	uint32_t speed;
+	uint32_t autoneg_enable;
+	uint32_t sup_link_modes;
+	uint32_t adv_link_modes;
+	uint8_t hash_search_idx;
+	uint8_t duplex;
+	uint8_t phy_port;
+	uint8_t rss_enable;
+} __rte_packed;
+
+struct zxdh_rxfh_set_msg {
+	uint32_t queue_map[ZXDH_INDIR_RQT_SIZE];
+} __rte_packed;
+
+struct zxdh_plcr_profile_free {
+	uint8_t car_type;
+	uint8_t rsv;
+	uint16_t profile_id;
+} __rte_packed;
+
+struct zxdh_plcr_profile_cfg {
+	uint8_t car_type;/* 0 :carA ; 1:carB ;2 carC*/
+	uint8_t packet_mode;  /*0 bps  1 pps */
+	uint16_t hw_profile_id;
+	union zxdh_offload_profile_cfg plcr_param;
+} __rte_packed;
+
+struct zxdh_plcr_profile_add {
+	uint8_t car_type;/* 0 :carA ; 1:carB ;2 carC*/
+} __rte_packed;
+
+struct zxdh_pcie_msix_msg {
+	uint16_t num; /* the num of vf which will trigger intr */
+	uint16_t func_no[ZXDH_MAX_VF]; /* vfIdx (pf:bit0~3, vf:bit7~15) */
+} __rte_packed;
+
+struct agent_msg_head {
+	enum zxdh_agent_opc op_code;
+	uint8_t panel_id;
+	uint8_t phyport;
+	uint8_t rsv;
+	uint16_t vf_id;
+	uint16_t pcie_id;
+} __rte_packed;
+
+
+struct zxdh_fc_param {
+	uint8_t   fc_mode;     /* eg     1<<SPM_FC_PAUSE_RX    only rx enable   */
+} __rte_packed;
+
+
+struct link_info_msg_body {
+	uint8_t autoneg;
+	uint8_t link_state;
+	uint8_t blink_enable;
+	uint8_t duplex;
+	uint32_t speed_modes;
+	uint32_t speed;
+} __rte_packed;
+
+struct zxdh_plcr_flow_cfg {
+	uint8_t car_type;  /* 0:carA; 1:carB; 2:carC */
+	uint8_t drop_flag; /* default */
+	uint8_t plcr_en;   /* 1:bind, 0:unbind */
+	uint8_t rsv;
+	uint16_t flow_id;
+	uint16_t profile_id;
+} __rte_packed;
+
+struct zxdh_mtr_stats_query {
+	uint8_t direction;
+	uint8_t is_clr;
+};
+
+struct agent_mac_module_eeprom_msg {
+	uint8_t i2c_addr;
+	uint8_t bank;
+	uint8_t page;
+	uint8_t offset;
+	uint8_t length;
+	uint8_t data[MODULE_EEPROM_DATA_LEN];
+} __rte_packed;
+
+#define ZXDH_MSG_HEAD_LEN 8
+#define ZXDH_MSG_REQ_BODY_MAX_LEN	(BAR_MSG_PAYLOAD_MAX_LEN - ZXDH_MSG_HEAD_LEN)
+struct zxdh_msg_info {
+	union {
+		uint8_t head_len[ZXDH_MSG_HEAD_LEN];
+		struct zxdh_msg_head  msg_head;
+		struct agent_msg_head agent_head;
+	};
+	union {
+		uint8_t  datainfo[ZXDH_MSG_REQ_BODY_MAX_LEN];
+		struct zxdh_mac_filter zxdh_mac_filter;
+		struct zxdh_port_attr_set_msg port_attr_set_msg;
+		struct zxdh_port_promisc_msg port_promisc_msg;
+		struct zxdh_rxfh_set_msg rxfh_set_msg;
+		struct zxdh_vf_init_msg vf_init_msg;
+		struct zxdh_rss_enable rss_enable;
+		struct zxdh_rss_key zxdh_rss_key;
+		struct zxdh_rss_hf zxdh_rss_hf;
+		struct zxdh_rss_reta zxdh_rss_reta;
+		struct zxdh_vlan_filter zxdh_vlan_filter;
+		struct zxdh_vlan_filter_set zxdh_vlan_filter_set;
+		struct zxdh_vlan_offload zxdh_vlan_offload;
+		struct zxdh_vlan_pvid zxdh_vlan_pvid;
+		struct zxdh_vlan_tpid zxdh_vlan_tpid;
+		struct zxdh_vxlan_port zxdh_vxlan_port;
+		struct zxdh_port_attr zxdh_port_attr;
+		struct zxdh_plcr_profile_add zxdh_plcr_profile_add;
+		struct zxdh_plcr_profile_free zxdh_plcr_profile_free;
+		struct zxdh_plcr_profile_cfg zxdh_plcr_profile_cfg;
+		struct zxdh_plcr_flow_cfg  zxdh_plcr_flow_cfg;
+		struct zxdh_mtr_stats_query  zxdh_mtr_stats_query;
+		struct link_info_msg_body link_msg_body;
+		struct zxdh_pcie_msix_msg pcie_msix_msg;
+		struct agent_mac_module_eeprom_msg module_eeprom_msg;
+		struct zxdh_fc_param zxdh_fc_param;
+	} __rte_packed data;
+} __rte_packed;
+#define ZXDH_MSG_REPLY_BODY_MAX_LEN     (BAR_MSG_PAYLOAD_MAX_LEN-sizeof(struct zxdh_msg_reply_head))
+struct zxdh_msg_reply_head {
+	uint8_t flag;
+	uint16_t reps_len;
+	uint8_t resvd;
+} __rte_packed;
+
+struct zxdh_mac_reply_msg {
+	struct rte_ether_addr mac_addr;
+};
+
+struct zxdh_mtr_profile_info {
+	uint16_t profile_id;
+};
+enum zxdh_reps_flag {
+	ZXDH_REPS_FAIL,
+	ZXDH_REPS_SUCC = 0xaa,
+} __rte_packed;
+
+struct port_link_info {
+	uint32_t link_speed;
+	uint8_t  link_info;
+} __rte_packed;
+
+enum agent_resp {
+	AGENT_RESP_FAIL,
+	AGENT_RESP_SUCC = 0xaa,
+} __rte_packed;
+
+struct zxdh_riscv_rsp {
+	union {
+		struct zxdh_hw_stats port_hw_stats;
+		struct port_link_info  port_link_info;
+	} __rte_packed;
+} __rte_packed;
+
+struct zxdh_hw_stats_data {
+	uint64_t n_pkts_dropped;
+	uint64_t n_bytes_dropped;
+};
+
+struct zxdh_hw_np_stats {
+	uint64_t np_rx_broadcast;
+	uint64_t np_tx_broadcast;
+	uint64_t np_rx_mtu_drop_pkts;
+	uint64_t np_tx_mtu_drop_pkts;
+	uint64_t np_rx_mtu_drop_bytes;
+	uint64_t np_tx_mtu_drop_bytes;
+	uint64_t np_rx_mtr_drop_pkts;
+	uint64_t np_tx_mtr_drop_pkts;
+	uint64_t np_rx_mtr_drop_bytes;
+	uint64_t np_tx_mtr_drop_bytes;
+};
+
+struct agent_flash_msg {
+	uint8_t firmware_version[ZXDH_FWVERS_LEN];
+} __rte_packed;
+
+
+#define ZXDH_MSG_REPLYBODY_HEAD  sizeof(enum zxdh_reps_flag)
+#define BAR_MOUDLE_MAC_MSG_HEADER_SIZE 4
+struct zxdh_msg_reply_body {
+	enum zxdh_reps_flag flag;
+	union {
+		uint8_t reply_data[ZXDH_MSG_REPLY_BODY_MAX_LEN - sizeof(enum zxdh_reps_flag)];
+		struct zxdh_mac_reply_msg mac_reply_msg;
+		struct zxdh_rss_key rss_key_msg;
+		struct zxdh_rss_hf rss_hf_msg;
+		struct zxdh_rss_reta rss_reta_msg;
+		struct zxdh_riscv_rsp riscv_rsp;
+		struct zxdh_hw_mtr_stats hw_mtr_stats;
+		struct zxdh_hw_np_stats hw_stats;
+		struct zxdh_mtr_profile_info  mtr_profile_info;
+		struct link_info_msg_body link_msg;
+		struct zxdh_pcie_msix_msg msix_msg;
+		struct agent_flash_msg flash_msg;
+		struct agent_mac_module_eeprom_msg module_eeprom_msg;
+		struct zxdh_fc_param zxdh_fc_param;
+	} __rte_packed;
+} __rte_packed;
+
+struct zxdh_msg_reply_info {
+	struct zxdh_msg_reply_head reply_head;
+	struct zxdh_msg_reply_body reply_body;
+} __rte_packed;
+
+typedef int (*process_func)(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+	struct zxdh_msg_reply_body *res_info, uint16_t *res_len);
+
+extern process_func proc_func[];
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ZXDH_MSG_CHAN_H_  */
diff --git a/drivers/net/zxdh/zxdh_mtr.c b/drivers/net/zxdh/zxdh_mtr.c
new file mode 100644
index 0000000000..65f52e8c6a
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_mtr.c
@@ -0,0 +1,916 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#include <rte_bitops.h>
+#include <rte_eal_memconfig.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_rwlock.h>
+#include <rte_bus_pci.h>
+#include <rte_mtr_driver.h>
+
+#include "zxdh_ethdev_ops.h"
+#include "zxdh_msg_chan.h"
+#include "zxdh_ethdev.h"
+#include "zxdh_mtr.h"
+
+/* hw flow&profile res are alloced based ep ,
+ * here, based on port isnot correct, just for  test, it's supposed that
+ * we use only one pf per ep and just one process  , so init can be done by pf .and alloced by port
+ */
+static int zxdh_hw_profile_alloc(struct rte_eth_dev *dev, uint16_t *hw_profile_id,
+				struct rte_mtr_error *error)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int ret = 0;
+
+	if (hw->is_pf)
+		ret = zxdh_hw_profile_alloc_direct(hw->vport.vport, CAR_A, hw_profile_id, error);
+	else {
+		PMD_DRV_LOG(INFO, "port 0x%x Send to pf\n", hw->vport.vport);
+		struct zxdh_msg_info msg_info = {0};
+		struct zxdh_msg_reply_info reply_info = {0};
+		struct zxdh_plcr_profile_add *zxdh_plcr_profile_add =
+				&msg_info.data.zxdh_plcr_profile_add;
+
+		zxdh_plcr_profile_add->car_type = CAR_A;
+		msg_head_build(hw, ZXDH_PLCR_CAR_PROFILE_ID_ADD, &msg_info);
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info,
+				ZXDH_MSG_HEAD_LEN + sizeof(struct zxdh_plcr_profile_add),
+				&reply_info, sizeof(struct zxdh_msg_reply_info));
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				"Failed to send msg: port 0x%x msg type ZXDH_PLCR_CAR_PROFILE_ID_ADD",
+				hw->vport.vport);
+			return -rte_mtr_error_set(error, ENOTSUP,
+					RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL,
+					"Meter offload alloc profile  id msg failed ");
+		}
+		*hw_profile_id = reply_info.reply_body.mtr_profile_info.profile_id;
+		if (*hw_profile_id == HW_PROFILE_MAX) {
+			return -rte_mtr_error_set(error, ENOTSUP,
+					RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL,
+					"Meter offload alloc profile  id invalid  ");
+		}
+	}
+	PMD_DRV_LOG(INFO, " alloc profile id %d ret %d\n", *hw_profile_id, ret);
+	return ret;
+}
+
+static int  zxdh_hw_profile_config(struct rte_eth_dev *dev, uint16_t hw_profile_id,
+				struct zxdh_meter_profile *mp, struct rte_mtr_error *error)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int ret = 0;
+
+	if (hw->is_pf) {
+		ret = zxdh_hw_profile_config_direct(CAR_A, hw_profile_id, mp, error);
+	} else {
+		PMD_DRV_LOG(INFO, "port 0x%x Send to pf\n", hw->vport.vport);
+		struct zxdh_msg_info msg_info = {0};
+		struct zxdh_msg_reply_info reply_info = {0};
+		struct zxdh_plcr_profile_cfg *zxdh_plcr_profile_cfg =
+				&msg_info.data.zxdh_plcr_profile_cfg;
+
+		zxdh_plcr_profile_cfg->car_type = CAR_A;
+		zxdh_plcr_profile_cfg->packet_mode = mp->profile.packet_mode;
+		zxdh_plcr_profile_cfg->hw_profile_id = hw_profile_id;
+		rte_memcpy(&zxdh_plcr_profile_cfg->plcr_param, &mp->plcr_param,
+			sizeof(zxdh_plcr_profile_cfg->plcr_param));
+		msg_head_build(hw, ZXDH_PLCR_CAR_PROFILE_CFG_SET, &msg_info);
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info,
+				ZXDH_MSG_HEAD_LEN + sizeof(struct zxdh_plcr_profile_cfg),
+				&reply_info, sizeof(struct zxdh_msg_reply_info));
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				"Failed msg: port 0x%x msg type ZXDH_PLCR_CAR_PROFILE_CFG_SET",
+				hw->vport.vport);
+			return -rte_mtr_error_set(error, ENOTSUP,
+					RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					"Meter offload cfg profile failed ");
+		}
+	}
+	PMD_DRV_LOG(INFO, " config  profile id %d  ret %d\n", hw_profile_id, ret);
+	return ret;
+}
+
+static uint16_t zxdh_hw_profile_free(struct rte_eth_dev *dev, uint8_t car_type,
+					uint16_t hw_profile_id, struct rte_mtr_error *error)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int ret = 0;
+
+	if (hw->is_pf)
+		ret = zxdh_hw_profile_free_direct(hw->vport.vport,
+					car_type, (uint64_t)hw_profile_id, error);
+	else {
+		PMD_DRV_LOG(INFO, "port 0x%x Send to pf\n", hw->vport.vport);
+		struct zxdh_msg_info msg_info = {0};
+		struct zxdh_msg_reply_info reply_info = {0};
+		struct zxdh_plcr_profile_free *zxdh_plcr_profile_free =
+					&msg_info.data.zxdh_plcr_profile_free;
+
+		zxdh_plcr_profile_free->profile_id = hw_profile_id;
+		zxdh_plcr_profile_free->car_type = car_type;
+		msg_head_build(hw, ZXDH_PLCR_CAR_PROFILE_ID_DELETE, &msg_info);
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info,
+				ZXDH_MSG_HEAD_LEN + sizeof(struct zxdh_plcr_profile_free),
+				&reply_info, sizeof(struct zxdh_msg_reply_info));
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				"Failed msg: port 0x%x msg type ZXDH_PLCR_CAR_PROFILE_ID_DELETE",
+				hw->vport.vport);
+			return -rte_mtr_error_set(error, ENOTSUP,
+					RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL,
+					"Meter free  profile failed ");
+		}
+	}
+	PMD_DRV_LOG(INFO, " free  hw_profile_id %d ret %d\n", hw_profile_id, ret);
+	return ret;
+}
+
+int zxdh_hw_profile_ref(uint16_t hw_profile_id)
+{
+	if (hw_profile_id >= HW_PROFILE_MAX)
+		return  -1;
+
+	rte_spinlock_lock(&g_mtr_res.hw_plcr_res_lock);
+	g_mtr_res.hw_profile_refcnt[hw_profile_id]++;
+	rte_spinlock_unlock(&g_mtr_res.hw_plcr_res_lock);
+
+	PMD_DRV_LOG(INFO, "inc ref hw_profile_id  %d ref %d\n",
+		hw_profile_id, g_mtr_res.hw_profile_refcnt[hw_profile_id]);
+	return 0;
+}
+
+int zxdh_hw_profile_unref(struct rte_eth_dev *dev, uint8_t car_type,
+		uint16_t hw_profile_id, struct rte_mtr_error *error)
+{
+	if (hw_profile_id >= HW_PROFILE_MAX)
+		return  -1;
+
+	rte_spinlock_lock(&g_mtr_res.hw_plcr_res_lock);
+	PMD_DRV_LOG(INFO, "to del hw profile id %d  curref %d",
+		hw_profile_id, g_mtr_res.hw_profile_refcnt[hw_profile_id]);
+	if (g_mtr_res.hw_profile_refcnt[hw_profile_id] == 0) {
+		PMD_DRV_LOG(ERR, "del hw profile id %d  but ref 0", hw_profile_id);
+		return -1;
+	}
+	if (--g_mtr_res.hw_profile_refcnt[hw_profile_id] == 0) {
+		PMD_DRV_LOG(INFO, "del hw profile id %d ", hw_profile_id);
+		zxdh_hw_profile_free(dev, car_type, hw_profile_id, error);
+	}
+	rte_spinlock_unlock(&g_mtr_res.hw_plcr_res_lock);
+	return 0;
+}
+
+static int zxdh_hw_plcrflow_config(struct rte_eth_dev *dev, uint16_t hw_flow_id,
+				struct zxdh_mtr_object *mtr, struct rte_mtr_error *error)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int ret = 0;
+
+	if (hw->is_pf) {
+		uint64_t hw_profile_id = (uint64_t)mtr->profile->hw_profile_id;
+
+		ret = dpp_stat_car_queue_cfg_set(0, CAR_A, hw_flow_id,
+					1, mtr->enable, hw_profile_id);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "dpp_stat_car_queue_cfg_set failed flowid %d  profile id %d",
+							hw_flow_id, mtr->profile->hw_profile_id);
+			return -rte_mtr_error_set(error, ENOTSUP,
+					RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+					NULL, "Failed to  bind  plcr flow.");
+			;
+		}
+	} else {
+		PMD_DRV_LOG(INFO, "port 0x%x cfg  hw_flow_id %d hw_profile_id %d\n",
+			hw->vport.vport, hw_flow_id, mtr->profile->hw_profile_id);
+		struct zxdh_msg_info msg_info = {0};
+		struct zxdh_msg_reply_info reply_info = {0};
+		struct zxdh_plcr_flow_cfg *zxdh_plcr_flow_cfg = &msg_info.data.zxdh_plcr_flow_cfg;
+
+		zxdh_plcr_flow_cfg->car_type = CAR_A;
+		zxdh_plcr_flow_cfg->flow_id = hw_flow_id;
+		zxdh_plcr_flow_cfg->drop_flag = 1;
+		zxdh_plcr_flow_cfg->plcr_en = mtr->enable;
+		zxdh_plcr_flow_cfg->profile_id = mtr->profile->hw_profile_id;
+		msg_head_build(hw, ZXDH_PLCR_CAR_QUEUE_CFG_SET, &msg_info);
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info,
+				ZXDH_MSG_HEAD_LEN + sizeof(struct zxdh_plcr_flow_cfg),
+				&reply_info, sizeof(struct zxdh_msg_reply_info));
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				"Failed msg: port 0x%x msg type ZXDH_PLCR_CAR_QUEUE_CFG_SET",
+				hw->vport.vport);
+			return -rte_mtr_error_set(error, ENOTSUP,
+					RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+					NULL, "Failed to  bind  plcr flow.");
+		}
+	}
+	PMD_DRV_LOG(INFO, "  %s plcr flow %d to profile %d  ok\n",
+		mtr->enable ? "bind":"unbind", hw_flow_id, mtr->profile->hw_profile_id);
+	return ret;
+}
+/**
+ * Fun:
+ */
+static int zxdh_mtr_hw_counter_query(struct rte_eth_dev *dev, bool clear, bool dir,
+				struct zxdh_mtr_stats *mtr_stats, struct rte_mtr_error *error)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int ret = 0;
+
+	if (hw->is_pf) {
+		ret = zxdh_mtr_stats_get(hw->vport.vport, dir, mtr_stats);
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				"ZXDH_PORT_METER_STAT_GET port %u dir %d failed",
+				hw->vport.vport, dir);
+			return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_STATS,
+					NULL, "Failed to bind plcr flow.");
+		}
+	} else { /* send msg to pf */
+		PMD_DRV_LOG(INFO, "port 0x%x Send to pf\n", hw->vport.vport);
+		struct zxdh_msg_info msg_info = {0};
+		struct zxdh_msg_reply_info reply_info = {0};
+		struct zxdh_mtr_stats_query *zxdh_mtr_stats_query =
+					&msg_info.data.zxdh_mtr_stats_query;
+
+		zxdh_mtr_stats_query->direction = dir;
+		zxdh_mtr_stats_query->is_clr = !!clear;
+		msg_head_build(hw, ZXDH_PORT_METER_STAT_GET, &msg_info);
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, ZXDH_MSG_HEAD_LEN,
+					&reply_info, sizeof(struct zxdh_msg_reply_info));
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				"Failed to send msg: port 0x%x msg type ZXDH_PORT_METER_STAT_GET",
+				hw->vport.vport);
+			return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_STATS,
+					NULL, "Meter offload alloc profile failed");
+		}
+		struct zxdh_hw_mtr_stats *hw_mtr_stats = &reply_info.reply_body.hw_mtr_stats;
+
+		mtr_stats->n_bytes_dropped =
+			(uint64_t)(rte_le_to_cpu_32(hw_mtr_stats->n_bytes_dropped_hi)) << 32 |
+			rte_le_to_cpu_32(hw_mtr_stats->n_bytes_dropped_lo);
+		mtr_stats->n_pkts_dropped =
+			(uint64_t)(rte_le_to_cpu_32(hw_mtr_stats->n_pkts_dropped_hi)) << 32 |
+			rte_le_to_cpu_32(hw_mtr_stats->n_pkts_dropped_lo);
+	}
+	PMD_DRV_LOG(INFO, "get mtr stats ok, droppkt 0x%lx  dropbyte 0x%lx\n",
+			mtr_stats->n_pkts_dropped, mtr_stats->n_bytes_dropped);
+	return ret;
+}
+/**
+ * Fun:
+ */
+static int zxdh_mtr_profile_offload(struct rte_eth_dev *dev, struct zxdh_meter_profile *mp,
+				struct rte_mtr_meter_profile *profile,
+	struct rte_mtr_error *error)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint16_t hw_profile_owner_vport = GET_OWNER_PF_VPORT(hw->vport.vport);
+
+	mp->hw_profile_owner_vport = hw_profile_owner_vport;
+	uint16_t hw_profile_id = check_hw_profile_exist(&zxdh_shared_data->meter_profile_list,
+								profile, hw_profile_owner_vport);
+
+	if (hw_profile_id == HW_PROFILE_MAX) {
+		PMD_DRV_LOG(INFO, "to alloc hw_profile_id\n");
+		uint32_t ret = zxdh_hw_profile_alloc(dev, &hw_profile_id, error);
+
+		if (ret) {
+			PMD_DRV_LOG(ERR, "hw_profile alloc fail\n");
+			return ret;
+		}
+
+		plcr_param_build(profile, &mp->plcr_param, hw_profile_id);
+		ret = zxdh_hw_profile_config(dev, hw_profile_id, mp, error);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "zxdh_hw_profile_config fail\n");
+			hw_profile_id = HW_PROFILE_MAX;
+			return ret;
+		}
+	}
+	zxdh_hw_profile_ref(hw_profile_id);
+	mp->hw_profile_id = hw_profile_id;
+	PMD_DRV_LOG(INFO, "use hw_profile_id %u mp %p  hw_profile_owner_vport %x\n",
+			hw_profile_id, mp, hw_profile_owner_vport);
+	return 0;
+}
+
+static void zxdh_mtr_profile_res_free(struct rte_eth_dev *dev,
+				struct rte_mempool *mtr_profile_mp,
+				struct zxdh_meter_profile *meter_profile,
+				struct rte_mtr_error *error)
+{
+	PMD_DRV_LOG(INFO, "to free profile %d ref %d ",
+		meter_profile->meter_profile_id, meter_profile->ref_cnt);
+	if (meter_profile->ref_cnt == 0) {
+		PMD_DRV_LOG(INFO, "free profile mp %p", meter_profile);
+		MP_FREE_OBJ_FUNC(mtr_profile_mp, meter_profile);
+		return;
+	}
+	if (meter_profile->ref_cnt == 1) {
+		meter_profile->ref_cnt--;
+		zxdh_hw_profile_unref(dev, CAR_A, meter_profile->hw_profile_id, error);
+		/* Remove from list. */
+		PMD_DRV_LOG(INFO, "free profile  id %d  rm mp %p",
+			meter_profile->meter_profile_id, meter_profile);
+		TAILQ_REMOVE(&zxdh_shared_data->meter_profile_list, meter_profile, next);
+		PMD_DRV_LOG(INFO, "free profile mp %p", meter_profile);
+		MP_FREE_OBJ_FUNC(mtr_profile_mp, meter_profile);
+	} else {
+		PMD_DRV_LOG(INFO, "profile %d  ref  %d is busy",
+			meter_profile->meter_profile_id, meter_profile->ref_cnt);
+	}
+}
+static struct zxdh_mtr_object *zxdh_mtr_obj_alloc(struct rte_mempool *mtr_mp)
+{
+	struct zxdh_mtr_object *mtr_obj = NULL;
+
+	if (MP_ALLOC_OBJ_FUNC(mtr_mp, mtr_obj) != 0)
+		return NULL;
+
+	return  mtr_obj;
+}
+
+static void zxdh_mtr_obj_free(struct rte_eth_dev *dev, struct zxdh_mtr_object *mtr_obj)
+{
+	struct zxdh_mtr_list *mtr_list = &zxdh_shared_data->mtr_list;
+	struct rte_mempool *mtr_mp = zxdh_shared_data->mtr_mp;
+
+	PMD_DRV_LOG(INFO, "free port %d dir %d meter %d  mtr refcnt:%d ....",
+		dev->data->port_id, mtr_obj->direction, mtr_obj->meter_id, mtr_obj->mtr_ref_cnt);
+
+	if (mtr_obj->policy)
+		mtr_obj->policy->ref_cnt--;
+
+	if (mtr_obj->profile)
+		mtr_obj->profile->ref_cnt--;
+
+	PMD_DRV_LOG(INFO, "free port %d dir %d meter %d  profile refcnt:%d ",
+		dev->data->port_id, mtr_obj->direction,
+		mtr_obj->meter_id, mtr_obj->profile->ref_cnt);
+	if (mtr_obj && (--mtr_obj->mtr_ref_cnt == 0)) {
+		PMD_DRV_LOG(INFO, "rm  mtr %p refcnt:%d ....", mtr_obj, mtr_obj->mtr_ref_cnt);
+		/* Remove from the  meter list. */
+		TAILQ_REMOVE(mtr_list, mtr_obj, next);
+		MP_FREE_OBJ_FUNC(mtr_mp, mtr_obj);
+	}
+}
+
+static int zxdh_mtr_flow_offlad(struct rte_eth_dev *dev,
+				struct zxdh_mtr_object *mtr,
+				struct rte_mtr_error *error)
+{
+	uint16_t hw_flow_id;
+
+	hw_flow_id = zxdh_hw_flow_id_get(mtr->vfid, mtr->direction);
+	return zxdh_hw_plcrflow_config(dev, hw_flow_id, mtr, error);
+}
+
+/*
+ * Find meter by id.
+ */
+static struct zxdh_mtr_object *
+zxdh_mtr_find(uint32_t meter_id, uint16_t dpdk_portid)
+{
+	struct zxdh_mtr_list *mtr_list = &zxdh_shared_data->mtr_list;
+	struct zxdh_mtr_object *mtr = NULL;
+
+	TAILQ_FOREACH(mtr, mtr_list, next) {
+		PMD_DRV_LOG(INFO, "mtrlist head %p  mtr %p mtr->meterid %d to find mtrid %d",
+			TAILQ_FIRST(mtr_list), mtr, mtr->meter_id, meter_id);
+		if ((meter_id == mtr->meter_id) && (dpdk_portid == mtr->port_id))
+			return mtr;
+	}
+	return NULL;
+}
+
+/**
+ * Callback to add MTR profile.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] meter_profile_id
+ *   Meter profile id.
+ * @param[in] profile
+ *   Pointer to meter profile detail.
+ * @param[out] error
+ *   Pointer to the error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+zxdh_meter_profile_add(struct rte_eth_dev *dev,
+				uint32_t meter_profile_id,
+				struct rte_mtr_meter_profile *profile,
+				struct rte_mtr_error *error)
+{
+	struct zxdh_meter_profile *mp;
+	int ret;
+	/* Check input params. */
+
+	ret = zxdh_mtr_profile_validate(meter_profile_id, profile, error);
+	if (ret)
+		return ret;
+
+	mp = zxdh_mtr_profile_find_by_id(&zxdh_shared_data->meter_profile_list,
+			meter_profile_id, dev->data->port_id);
+	if (mp) {
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Meter profile  is exists.");
+	}
+	/* Meter profile memory allocation. */
+
+	mp = zxdh_mtr_profile_res_alloc(zxdh_shared_data->mtr_profile_mp);
+	if (mp == NULL) {
+		return -rte_mtr_error_set(error, ENOMEM,
+					RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					NULL, "Meter profile res memory alloc  failed.");
+	}
+	memset(mp, 0, sizeof(struct zxdh_meter_profile));
+	/* Fill profile info. */
+	mp->meter_profile_id = meter_profile_id;
+	mp->dpdk_port_id = dev->data->port_id;
+	mp->hw_profile_id = UINT16_MAX;
+	rte_memcpy(&mp->profile, profile, sizeof(struct rte_mtr_meter_profile));
+
+	ret = zxdh_mtr_profile_offload(dev, mp, profile, error);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "port %d profile id %d offload failed",
+			dev->data->port_id, meter_profile_id);
+		goto error;
+	}
+	/* Add to list. */
+	TAILQ_INSERT_TAIL(&zxdh_shared_data->meter_profile_list, mp, next);
+	PMD_DRV_LOG(INFO, "add profile id %d mp %p  mp->ref_cnt %d",
+		meter_profile_id, mp, mp->ref_cnt);
+	mp->ref_cnt++;
+
+	return 0;
+error:
+	zxdh_mtr_profile_res_free(dev, zxdh_shared_data->mtr_profile_mp, mp, error);
+	return ret;
+}
+
+/**
+ * Callback to delete MTR profile.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] meter_profile_id
+ *   Meter profile id.
+ * @param[out] error
+ *   Pointer to the error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+zxdh_meter_profile_delete(struct rte_eth_dev *dev,
+				uint32_t meter_profile_id,
+				struct rte_mtr_error *error)
+{
+	struct zxdh_meter_profile *mp;
+
+	mp = zxdh_mtr_profile_find_by_id(&zxdh_shared_data->meter_profile_list,
+			meter_profile_id, dev->data->port_id);
+	if (mp == NULL) {
+		PMD_DRV_LOG(INFO, "del profile id %d  unfind ", meter_profile_id);
+		return -rte_mtr_error_set(error, ENOENT,
+						RTE_MTR_ERROR_TYPE_METER_PROFILE,
+						&meter_profile_id,
+						 "Meter profile id is not exists.");
+	}
+
+	PMD_DRV_LOG(INFO, "del profile id %d   mp %p mp->ref_cnt %d",
+		meter_profile_id, mp, mp->ref_cnt);
+	zxdh_mtr_profile_res_free(dev, zxdh_shared_data->mtr_profile_mp, mp, error);
+	return 0;
+}
+
+static int zxdh_meter_policy_add(struct rte_eth_dev *dev,
+		uint32_t policy_id,
+		struct rte_mtr_meter_policy_params *policy,
+		struct rte_mtr_error *error)
+{
+	int ret = 0;
+	struct zxdh_meter_policy *mtr_policy = NULL;
+
+	if (policy_id >= ZXDH_MAX_POLICY_NUM)
+		return -rte_mtr_error_set(error, ENOTSUP,
+					RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					NULL, "policy ID is invalid. ");
+	mtr_policy = zxdh_mtr_policy_find_by_id(&zxdh_shared_data->mtr_policy_list,
+					policy_id, dev->data->port_id);
+	if (mtr_policy)
+		return -rte_mtr_error_set(error, EEXIST,
+					RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					NULL, "policy ID  exists. ");
+	ret = zxdh_policy_validate_actions(policy->actions, error);
+	if (ret) {
+		return -rte_mtr_error_set(error, ENOTSUP,
+				RTE_MTR_ERROR_TYPE_METER_POLICY,
+				NULL, "  only supports def action.");
+	}
+
+	mtr_policy = zxdh_mtr_policy_res_alloc(zxdh_shared_data->mtr_policy_mp);
+	if (mtr_policy == NULL) {
+		return -rte_mtr_error_set(error, ENOMEM,
+					RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					NULL, "Meter policy res memory alloc  failed.");
+	}
+	/* Fill profile info. */
+	memset(mtr_policy, 0, sizeof(struct zxdh_meter_policy));
+	mtr_policy->policy_id = policy_id;
+	mtr_policy->dpdk_port_id = dev->data->port_id;
+	rte_memcpy(&mtr_policy->policy, policy, sizeof(struct rte_mtr_meter_policy_params));
+	/* Add to list. */
+	TAILQ_INSERT_TAIL(&zxdh_shared_data->mtr_policy_list, mtr_policy, next);
+	mtr_policy->ref_cnt++;
+	PMD_DRV_LOG(INFO, "allic policy id %d ok %p ", mtr_policy->policy_id, mtr_policy);
+	return 0;
+}
+
+/**
+ * policy del
+ *
+ * @param[in] priv
+ *   Pointer to mlx5 private data structure.
+ * @param[in] meter_id
+ *   Meter id.
+ * @param[in] params
+ *   Pointer to rte meter parameters.
+ * @param[out] error
+ *   Pointer to rte meter error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int zxdh_meter_policy_delete(struct rte_eth_dev *dev,
+				uint32_t policy_id, struct rte_mtr_error *error)
+{
+	struct zxdh_meter_policy *mtr_policy = NULL;
+
+	if (policy_id >= ZXDH_MAX_POLICY_NUM)
+		return -rte_mtr_error_set(error, ENOTSUP,
+					RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					NULL, "policy ID is invalid. ");
+	mtr_policy = zxdh_mtr_policy_find_by_id(&zxdh_shared_data->mtr_policy_list,
+					policy_id, dev->data->port_id);
+
+	if (mtr_policy && (mtr_policy->ref_cnt == 1)) {
+		PMD_DRV_LOG(INFO, "free policy id %d %p ", mtr_policy->policy_id, mtr_policy);
+		TAILQ_REMOVE(&zxdh_shared_data->mtr_policy_list, mtr_policy, next);
+		MP_FREE_OBJ_FUNC(zxdh_shared_data->mtr_policy_mp, mtr_policy);
+	} else {
+		if (mtr_policy) {
+			PMD_DRV_LOG(INFO, " policy id %d ref %d is busy",
+				mtr_policy->policy_id, mtr_policy->ref_cnt);
+		} else {
+			PMD_DRV_LOG(ERR, " policy id %d  is not exist ", policy_id);
+			return -rte_mtr_error_set(error, ENOTSUP,
+					RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					NULL, "policy ID is  not exist. ");
+		}
+	}
+	return 0;
+}
+
+/**
+ * Check meter validation.
+ *
+ * @param[in] meter_id    Meter id.
+ * @param[in] params       Pointer to rte meter parameters.
+ * @param[out] error     Pointer to rte meter error structure.
+ ** @return   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+zxdh_meter_validate(uint32_t meter_id,
+			struct rte_mtr_params *params,
+			struct rte_mtr_error *error)
+{
+	/* Meter params must not be NULL. */
+	if (params == NULL)
+		return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+					NULL, "Meter object params null.");
+	/* Previous meter color is not supported. */
+	if (params->use_prev_mtr_color)
+		return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+					NULL,
+					"Previous meter color not supported.");
+	if (meter_id > MAX_MTR_NUM/2) {
+		return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+				NULL,
+				" meter id exceed 1024 unsupport ");
+	}
+	return 0;
+}
+
+/**
+ *
+ */
+static uint32_t dir_to_mtr_mode[] = {
+	EGR_FLAG_INGRESS_METER_EN_OFF,
+	EGR_FLAG_EGRESS_METER_EN_OFF,
+};
+
+static int set_mtr_enable(struct rte_eth_dev *dev, uint8_t dir,
+				bool enable, struct rte_mtr_error *error)
+{
+	struct zxdh_hw *priv = dev->data->dev_private;
+	int ret = 0;
+
+	if (priv->is_pf) {
+		struct zxdh_port_attr_set_msg attr_msg;
+
+		attr_msg.mode = dir_to_mtr_mode[dir];
+		attr_msg.value  = enable;
+		ret = proc_func[ZXDH_PORT_ATTRS_SET](priv, priv->vport.vport,
+				(void *)&attr_msg, NULL, 0);
+	} else {
+		struct zxdh_msg_info msg_info = {0};
+		struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_set_msg;
+
+		attr_msg->mode  = dir_to_mtr_mode[dir];
+		attr_msg->value = enable;
+		msg_head_build(priv, ZXDH_PORT_ATTRS_SET, &msg_info);
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info,
+			sizeof(struct zxdh_msg_head) + sizeof(struct zxdh_port_attr_set_msg),
+			NULL, 0);
+	}
+	if (ret) {
+		PMD_DRV_LOG(ERR, " port %d  mtr enable failed", priv->port_id);
+		return -rte_mtr_error_set(error, EEXIST,
+					RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					"Meter  enable failed.");
+	}
+	if (dir == INGRESS)
+		priv->i_mtr_en = !!enable;
+	else
+		priv->e_mtr_en = !!enable;
+
+	return ret;
+}
+static void build_actions(struct zxdh_meter_action  *mtr_action,
+								struct rte_mtr_params *params)
+{
+	mtr_action->stats_mask = params->stats_mask;
+	mtr_action->action[RTE_COLOR_RED] = MTR_POLICER_ACTION_DROP;
+}
+
+
+static int check_port_mtr_binded(struct rte_eth_dev *dev, uint32_t dir)
+{
+	struct zxdh_mtr_object *mtr_obj = NULL;
+
+	TAILQ_FOREACH(mtr_obj, &zxdh_shared_data->mtr_list, next) {
+		if (mtr_obj->direction != dir)
+			continue;
+		if (mtr_obj->port_id == dev->data->port_id) {
+			PMD_DRV_LOG(INFO, " port %d dir %d already bind meter %d .",
+				dev->data->port_id, dir, mtr_obj->meter_id);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+/**
+ * Create meter rules.
+ *
+ * @param[in] dev    Pointer to Ethernet device.
+ * @param[in] meter_id    Meter id.
+ * @param[in] params    Pointer to rte meter parameters.
+ * @param[in] shared    Meter shared with other flow or not.
+ * @param[out] error    Pointer to rte meter error structure.
+ * @return    0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+zxdh_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
+				struct rte_mtr_params *params, int shared,
+				struct rte_mtr_error *error)
+{
+	struct zxdh_hw *priv = dev->data->dev_private;
+	struct zxdh_mtr_list *mtr_list = &zxdh_shared_data->mtr_list;
+	struct zxdh_mtr_object *mtr;
+	struct zxdh_meter_profile *mtr_profile;
+	struct zxdh_meter_policy *mtr_policy;
+	uint8_t dir = 0; /* params->direction - 1; dir:  0 --ingress; 1--egress*/
+	int ret;
+
+	if (shared) {
+		return -rte_mtr_error_set(error, ENOTSUP,
+					RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					"Meter share is not supported");
+	}
+
+	/* Validate the parameters. */
+	ret = zxdh_meter_validate(meter_id, params, error);
+	if (ret)
+		return ret;
+
+	if (check_port_mtr_binded(dev, dir)) {
+		return -rte_mtr_error_set(error, EEXIST,
+				RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+				"Meter object already bind to dev.");
+
+	}
+
+	mtr_profile = zxdh_mtr_profile_find_by_id(&zxdh_shared_data->meter_profile_list,
+						params->meter_profile_id, dev->data->port_id);
+	/* Validate meter profile  id.*/
+	if (mtr_profile == NULL)
+		return -rte_mtr_error_set(error, EEXIST,
+				RTE_MTR_ERROR_TYPE_METER_PROFILE, &params->meter_profile_id,
+				"Meter profile object is not exists.");
+	mtr_profile->ref_cnt++;
+	PMD_DRV_LOG(INFO, "find profile %d %p ref %d ok",
+		params->meter_profile_id, mtr_profile, mtr_profile->ref_cnt);
+
+	mtr_policy = zxdh_mtr_policy_find_by_id(&zxdh_shared_data->mtr_policy_list,
+					params->meter_policy_id, dev->data->port_id);
+	/* Validate meter profile  id.*/
+	if (mtr_policy == NULL) {
+		ret = -rte_mtr_error_set(error, EEXIST,
+					RTE_MTR_ERROR_TYPE_METER_PROFILE, &params->meter_policy_id,
+					"Meter policy object is not exists.");
+		mtr_profile->ref_cnt--;
+		return ret;
+	}
+	mtr_policy->ref_cnt++;
+	PMD_DRV_LOG(INFO, "find policy %d mtr_policy->ref_cnt %d ok",
+		params->meter_policy_id, mtr_policy->ref_cnt);
+
+	/* Allocate the flow meter memory. */
+	mtr = zxdh_mtr_obj_alloc(zxdh_shared_data->mtr_mp);
+	if (mtr == NULL) {
+		ret = -rte_mtr_error_set(error, ENOMEM,
+					RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					"Memory alloc failed for meter.");
+		mtr_policy->ref_cnt--;
+		mtr_profile->ref_cnt--;
+		return ret;
+	}
+	memset(mtr, 0, sizeof(struct zxdh_mtr_object));
+	/* Fill the flow meter parameters. */
+	mtr->meter_id = meter_id;
+	mtr->profile = mtr_profile;
+
+	build_actions(&mtr->mtr_action, params);
+	TAILQ_INSERT_TAIL(mtr_list, mtr, next);
+	mtr->enable = !!params->meter_enable;
+	mtr->shared = !!shared;
+	mtr->mtr_ref_cnt++;
+	mtr->vfid = priv->vfid;
+	mtr->port_id = dev->data->port_id;
+	mtr->policy = mtr_policy;
+	mtr->direction = !!dir;
+	if (params->meter_enable) {
+		ret = zxdh_mtr_flow_offlad(dev, mtr, error);
+		if (ret)
+			goto error;
+	}
+	ret = set_mtr_enable(dev, mtr->direction, 1, error);
+	if (ret)
+		goto error;
+
+	return ret;
+error:
+	zxdh_mtr_obj_free(dev, mtr);
+	return ret;
+}
+
+/**
+ * Destroy meter rules.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] meter_id
+ *   Meter id.
+ * @param[out] error
+ *   Pointer to rte meter error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+zxdh_meter_destroy(struct rte_eth_dev *dev, uint32_t meter_id,
+			struct rte_mtr_error *error)
+{
+	struct zxdh_mtr_object *mtr;
+	/* Meter object must exist. */
+	mtr = zxdh_mtr_find(meter_id, dev->data->port_id);
+	if (mtr == NULL)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID,
+					  NULL, "Meter object id not valid.");
+	mtr->enable = 0;
+	set_mtr_enable(dev, mtr->direction, 0, error);
+
+	if (zxdh_mtr_flow_offlad(dev, mtr, error))
+		return -1;
+
+	zxdh_mtr_obj_free(dev, mtr);
+	return 0;
+}
+
+static int zxdh_mtr_stats_read(struct rte_eth_dev *dev,
+								uint32_t mtr_id,
+								struct rte_mtr_stats *stats,
+								uint64_t *stats_mask,
+								int clear,
+								struct rte_mtr_error *error)
+{
+	struct zxdh_mtr_stats mtr_stat = {0};
+	struct zxdh_mtr_object *mtr = NULL;
+	int ret = 0;
+	/* Meter object must exist. */
+	mtr = zxdh_mtr_find(mtr_id, dev->data->port_id);
+	if (mtr == NULL)
+		return -rte_mtr_error_set(error, ENOENT,
+					RTE_MTR_ERROR_TYPE_MTR_ID,
+					NULL, "Meter object id not valid.");
+	*stats_mask = RTE_MTR_STATS_N_BYTES_DROPPED | RTE_MTR_STATS_N_PKTS_DROPPED;
+	memset(&mtr_stat, 0, sizeof(mtr_stat));
+	ret = zxdh_mtr_hw_counter_query(dev, clear, mtr->direction, &mtr_stat, error);
+	if (ret)
+		goto error;
+	stats->n_bytes_dropped = mtr_stat.n_bytes_dropped;
+	stats->n_pkts_dropped = mtr_stat.n_pkts_dropped;
+
+	return 0;
+error:
+	return -rte_mtr_error_set(error, ret, RTE_MTR_ERROR_TYPE_STATS, NULL,
+				"Failed to read meter drop counters.");
+
+}
+static const struct rte_mtr_ops zxdh_mtr_ops = {
+	.capabilities_get = zxdh_meter_cap_get,
+	.meter_profile_add = zxdh_meter_profile_add,
+	.meter_profile_delete = zxdh_meter_profile_delete,
+	.create = zxdh_meter_create,
+	.destroy = zxdh_meter_destroy,
+	.meter_enable = NULL,
+	.meter_disable = NULL,
+	.meter_profile_update = NULL,
+	.meter_dscp_table_update = NULL,
+	.stats_update = NULL,
+	.stats_read = zxdh_mtr_stats_read,
+	.meter_policy_add = zxdh_meter_policy_add,
+	.meter_policy_delete = zxdh_meter_policy_delete,
+};
+
+/**
+ * Get meter operations.
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param arg
+ *   Pointer to set the mtr operations.
+ *
+ * @return
+ *   Always 0.
+ */
+int
+zxdh_meter_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
+{
+	*(const struct rte_mtr_ops **)arg = &zxdh_mtr_ops;
+	return 0;
+}
+
+void zxdh_mtr_release(struct rte_eth_dev *dev __rte_unused)
+{
+	struct zxdh_hw *priv = dev->data->dev_private;
+	struct zxdh_meter_profile *profile;
+	struct rte_mtr_error error = {0};
+	struct zxdh_mtr_object *mtr_obj;
+
+	RTE_TAILQ_FOREACH(mtr_obj, &zxdh_shared_data->mtr_list, next) {
+		if (mtr_obj->port_id == priv->port_id)
+			zxdh_mtr_obj_free(dev, mtr_obj);
+	}
+
+	RTE_TAILQ_FOREACH(profile, &zxdh_shared_data->meter_profile_list, next) {
+		if (profile->dpdk_port_id == priv->port_id)
+			zxdh_mtr_profile_res_free(dev, zxdh_shared_data->mtr_profile_mp,
+				profile, &error);
+	}
+
+	struct zxdh_meter_policy *policy;
+
+	RTE_TAILQ_FOREACH(policy, &zxdh_shared_data->mtr_policy_list, next) {
+		if (policy->dpdk_port_id == priv->port_id)
+			zxdh_mtr_policy_res_free(zxdh_shared_data->mtr_policy_mp, policy);
+	}
+}
diff --git a/drivers/net/zxdh/zxdh_mtr.h b/drivers/net/zxdh/zxdh_mtr.h
new file mode 100644
index 0000000000..883991e25b
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_mtr.h
@@ -0,0 +1,46 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#ifndef _ZXDH_MTR_H_
+#define _ZXDH_MTR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_ethdev.h>
+#include <rte_bitmap.h>
+#include <rte_mtr.h>
+#include <zxdh_mtr_drv.h>
+
+#define ZXDH_SRTCM_RATE_GRANULARITY  (1ULL<<16)
+#define ZXDH_SRTCM_RATE_MIN (1ULL<<16)
+
+struct zxdh_meter_action {
+	enum rte_mtr_policer_action action[RTE_COLORS];
+	uint64_t stats_mask;
+};
+
+struct zxdh_mtr_object {
+	TAILQ_ENTRY(zxdh_mtr_object) next;
+	uint8_t direction:1, /* 0:ingress, 1:egress */
+			shared:1,
+			enable:1,
+			rsv:5;
+	uint8_t rsv8;
+	uint16_t port_id;
+	uint16_t vfid;
+	uint16_t meter_id;
+	uint16_t mtr_ref_cnt;
+	uint16_t rsv16;
+	struct zxdh_meter_profile *profile;
+	struct zxdh_meter_policy *policy;
+	struct zxdh_meter_action  mtr_action;
+};
+/* MTR list. */
+TAILQ_HEAD(zxdh_mtr_list, zxdh_mtr_object);
+
+void zxdh_mtr_release(struct rte_eth_dev *dev);
+
+#endif /* _ZXDH_MR_H_ */
diff --git a/drivers/net/zxdh/zxdh_mtr_drv.c b/drivers/net/zxdh/zxdh_mtr_drv.c
new file mode 100644
index 0000000000..e1b6dede49
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_mtr_drv.c
@@ -0,0 +1,527 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#include <rte_mtr_driver.h>
+#include "zxdh_ethdev_ops.h"
+#include "zxdh_ethdev.h"
+
+#include "zxdh_logs.h"
+#include "zxdh_mtr_drv.h"
+#include "dpp_drv_qos.h"
+#include "dpp_dtb_table_api.h"
+
+/**
+ * Find meter profile by profile cfg.
+ * @param[in] priv
+ *   Pointer to zxdh_priv.
+ * @param [in] profile to be finded
+ *   Meter profile param.
+ * @param [out] meter_profile_id
+ *   finded Meter profile id.
+ * @param [out] mp
+ *   finded Meter profile info.
+ * @return
+ *    >0 cur profile exist, 0 not exist.
+ */
+int check_profile_exist(struct zxdh_mtr_profile_list *mpl,
+		uint32_t profile_id, uint16_t dpdk_port_id)
+{
+	struct zxdh_meter_profile *mp;
+
+	TAILQ_FOREACH(mp, mpl, next) {
+		if ((profile_id == mp->meter_profile_id) && (dpdk_port_id == mp->dpdk_port_id))
+			return 1;
+	}
+	return 0;
+}
+/**
+ * Validate the MTR profile.
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] meter_profile_id
+ *   Meter profile id.
+ * @param[in] profile
+ *   Pointer to meter profile detail.
+ * @param[out] error
+ *   Pointer to the error structure.
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+/*  Maximum value of srTCM metering parameters, unit_step: 64kb
+ *  61K~400000000(400G) bps, uint 64Kbps CBS/EBS/PBS max bucket depth 128MB
+ *  PPS: 1pps~600Mpps
+ */
+#define ZXDH_SRTCM_CIR_MIN_BPS  (61*(1ULL<<10))
+#define ZXDH_SRTCM_CIR_MAX_BPS  (400*(1ULL<<30))
+#define ZXDH_SRTCM_EBS_MAX_B    (128*(1ULL<<20))
+#define ZXDH_SRTCM_CBS_MAX_B    (128*(1ULL<<20))
+#define ZXDH_TRTCM_PBS_MAX_B    (128*(1ULL<<20))
+#define ZXDH_TRTCM_PIR_MAX_BPS  (400*(1ULL<<30))
+#define ZXDH_TRTCM_PIR_MIN_BPS  (61*(1ULL<<10))
+
+#define ZXDH_SRTCM_CIR_MIN_PPS  (1)
+#define ZXDH_SRTCM_CIR_MAX_PPS  (200*(1ULL<<20))
+#define ZXDH_SRTCM_CBS_MAX_P    (8192)
+#define ZXDH_SRTCM_EBS_MAX_P    (8192)
+#define ZXDH_TRTCM_PBS_MAX_P    (8192)
+#define ZXDH_TRTCM_PIR_MIN_PPS  (1)
+#define ZXDH_TRTCM_PIR_MAX_PPS  (200*(1ULL<<20))
+int zxdh_mtr_profile_validate(uint32_t meter_profile_id, struct rte_mtr_meter_profile *profile,
+	struct rte_mtr_error *error)
+{
+	if ((profile == NULL) || (meter_profile_id >= MAX_MTR_PROFILE_NUM)) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+			"Meter profile param id invalid or null.");
+	}
+	uint64_t cir_min, cir_max, cbs_max, ebs_max, pir_min, pir_max, pbs_max;
+
+	if (profile->packet_mode == 0) { /* bps */
+		cir_min = ZXDH_SRTCM_CIR_MIN_BPS/8;
+		cir_max = ZXDH_SRTCM_CIR_MAX_BPS/8;
+		cbs_max = ZXDH_SRTCM_CBS_MAX_B;
+		ebs_max = ZXDH_SRTCM_EBS_MAX_B;
+		pir_min = ZXDH_TRTCM_PIR_MIN_BPS/8;
+		pir_max = ZXDH_TRTCM_PIR_MAX_BPS/8;
+		pbs_max = ZXDH_TRTCM_PBS_MAX_B;
+	} else { /* pps */
+		cir_min = ZXDH_SRTCM_CIR_MIN_PPS;
+		cir_max = ZXDH_SRTCM_CIR_MAX_PPS;
+		cbs_max = ZXDH_SRTCM_CBS_MAX_P;
+		ebs_max = ZXDH_SRTCM_EBS_MAX_P;
+		pir_min = ZXDH_TRTCM_PIR_MIN_PPS;
+		pir_max = ZXDH_TRTCM_PIR_MAX_PPS;
+		pbs_max = ZXDH_TRTCM_PBS_MAX_P;
+	}
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+
+		PMD_DRV_LOG(INFO, "profile.cir 0x%lx cbs %lx mode %d  cir min %lx max %lx cbs max %lx",
+			profile->srtcm_rfc2697.cir, profile->srtcm_rfc2697.cbs,
+			profile->packet_mode, cir_min, cir_max, cbs_max);
+		/* Verify support for flow meter parameters. */
+		if (profile->srtcm_rfc2697.cir >= cir_min &&
+			profile->srtcm_rfc2697.cir < cir_max &&
+			profile->srtcm_rfc2697.cbs < cbs_max &&
+			profile->srtcm_rfc2697.cbs > 0 &&
+			profile->srtcm_rfc2697.ebs > 0 &&
+			profile->srtcm_rfc2697.ebs < ebs_max) {
+			goto check_exist;
+		} else {
+			return -rte_mtr_error_set
+					(error, ENOTSUP,
+					RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					NULL,
+					"Invalid metering parameters.");
+		}
+	} else if (profile->alg == RTE_MTR_TRTCM_RFC2698) {
+		PMD_DRV_LOG(INFO,
+			"profile.cir 0x%lx pir %lx cbs %lx mode %d cir min %lx max %lx pir_min %lx max %lx cbs max %lx",
+			profile->trtcm_rfc2698.cir, profile->trtcm_rfc2698.pir,
+			profile->trtcm_rfc2698.cbs, profile->packet_mode, cir_min,
+			cir_max, pir_min, pir_max, cbs_max);
+		/* Verify support for flow meter parameters. */
+		if (profile->trtcm_rfc2698.cir >= cir_min &&
+			profile->trtcm_rfc2698.cir < cir_max &&
+			profile->trtcm_rfc2698.cbs < cbs_max &&
+			profile->trtcm_rfc2698.cbs > 0 &&
+			profile->trtcm_rfc2698.pir >= pir_min &&
+			profile->trtcm_rfc2698.pir < pir_max &&
+			profile->trtcm_rfc2698.cir < profile->trtcm_rfc2698.pir &&
+			profile->trtcm_rfc2698.pbs > 0 &&
+			profile->trtcm_rfc2698.pbs < pbs_max) {
+			goto check_exist;
+		} else {
+			return -rte_mtr_error_set(error,
+				ENOTSUP, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+				"Invalid metering parameters.");
+		}
+	} else {
+		return -rte_mtr_error_set(error,
+			ENOTSUP, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+			"algorithm not supported");
+	}
+check_exist:
+	return 0;
+}
+/**
+ * in : vport
+ * out:
+ *   hw_profile_id: alloced hw profile id
+ *   error        : failed reason
+ * return:
+ *   0: success
+ *  -1: failed
+ */
+int zxdh_hw_profile_alloc_direct(uint16_t vport, DPP_PROFILE_TYPE car_type,
+		uint16_t *hw_profile_id, struct rte_mtr_error *error)
+{
+	ZXIC_UINT64 profile_id = HW_PROFILE_MAX;
+	int ret = dpp_car_profile_id_add(vport, car_type, &profile_id);
+
+	if (ret) {
+		PMD_DRV_LOG(ERR, "port %u alloc hw profile failed", vport);
+		return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL,
+			"Meter offload alloc profile failed");
+	}
+	*hw_profile_id = (uint16_t)profile_id;
+	if (*hw_profile_id == HW_PROFILE_MAX) {
+		return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL,
+			"Meter offload alloc profile id invalid");
+	}
+	PMD_DRV_LOG(INFO, "alloc hw profile id %u\n", *hw_profile_id);
+	return 0;
+}
+/**
+ * Fun:
+ */
+int zxdh_hw_profile_config_direct(DPP_PROFILE_TYPE car_type, uint16_t hw_profile_id,
+		struct zxdh_meter_profile *mp, struct rte_mtr_error *error)
+{
+	PMD_DRV_LOG(INFO, "hw_profile_owner_vport %d  hw_profile_id %x pktmode %d",
+		 mp->hw_profile_owner_vport, hw_profile_id, mp->profile.packet_mode);
+
+	int ret = dpp_car_profile_cfg_set(mp->hw_profile_owner_vport, car_type,
+				mp->profile.packet_mode, (uint32_t)hw_profile_id, &mp->plcr_param);
+
+	if (ret) {
+		PMD_DRV_LOG(ERR, " config hw profile %u failed", hw_profile_id);
+		return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+				"Meter offload cfg profile failed");
+	}
+	PMD_DRV_LOG(INFO, "config profile id %u ok\n", hw_profile_id);
+	return 0;
+}
+/**
+ * Fun:
+ */
+int zxdh_hw_profile_free_direct(uint16_t vport, DPP_PROFILE_TYPE car_type, uint16_t hw_profile_id,
+	struct rte_mtr_error *error)
+{
+	PMD_DRV_LOG(INFO, "free hw profile id %u", hw_profile_id);
+	int ret = dpp_car_profile_id_delete(vport, car_type, (uint64_t)hw_profile_id);
+
+	if (ret) {
+		PMD_DRV_LOG(ERR, "port %u free hw profile %u failed", vport, hw_profile_id);
+		return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL,
+			"Meter free profile failed");
+	}
+	PMD_DRV_LOG(INFO, "free hw_profile_id %u\n", hw_profile_id);
+	return 0;
+}
+/**
+ * Fun:
+ */
+int zxdh_hw_plcrflow_config_direct(DPP_PROFILE_TYPE car_type, uint16_t hw_flow_id,
+		bool enable, uint16_t hw_profile_id, struct rte_mtr_error *error)
+{
+	int ret = dpp_stat_car_queue_cfg_set(0, car_type, hw_flow_id,
+				1, enable, (uint64_t)hw_profile_id);
+
+	if (ret) {
+		PMD_DRV_LOG(ERR,
+			"dpp_stat_car_queue_cfg_set failed flowid %u profile id %u",
+			hw_flow_id, hw_profile_id);
+		return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+				NULL, "Failed to bind plcr flow.");
+	}
+	PMD_DRV_LOG(INFO, "bind plcr flow %u to profile %u ok\n", hw_flow_id, hw_profile_id);
+	return ret;
+}
+/**
+ * Fun:
+ */
+#define PORT_MTR_FID_BASE  8192
+uint16_t zxdh_hw_flow_id_get(uint16_t vfid, uint16_t dir)
+{
+	return vfid * 2 + PORT_MTR_FID_BASE + dir;
+}
+
+/**
+ * Fun:
+ */
+static int mtr_hw_stats_get(uint16_t vport, uint8_t direction,
+				struct zxdh_hw_mtr_stats *hw_mtr_stats)
+{
+	union VPORT v_port = {.vport = vport};
+	uint32_t stat_baseaddr = (direction == EGRESS) ?
+				DPP_MTR_STATS_EGRESS_BASE : DPP_MTR_STATS_INGRESS_BASE;
+	uint32_t idx = vport_to_vfid(v_port) + stat_baseaddr;
+
+	PMD_DRV_LOG(INFO, " get stat idx 0x%x\n", idx);
+	int ret = dpp_dtb_stat_ppu_cnt_get(DEVICE_NO, g_dtb_data.queueid,
+					STAT_128_MODE, idx, (uint32_t *)hw_mtr_stats);
+
+	if (ret) {
+		PMD_DRV_LOG(ERR, "get vport 0x%x (vfid 0x%x) dir %u stats failed",
+				vport, vport_to_vfid(v_port), direction);
+		return ret;
+	}
+	PMD_DRV_LOG(INFO, "get vport 0x%x (vfid 0x%x) dir %u stats",
+			vport, vport_to_vfid(v_port), direction);
+	return 0;
+}
+/**
+ * Fun:
+ */
+int zxdh_mtr_stats_get(uint16_t vport, int dir, struct zxdh_mtr_stats *mtr_stats)
+{
+	struct zxdh_hw_mtr_stats hw_mtr_stat = {0};
+	int ret = mtr_hw_stats_get(vport, dir, &hw_mtr_stat);
+
+	if (ret) {
+		PMD_DRV_LOG(ERR, "port %u dir %u get mtr stats failed", vport, dir);
+		return ret;
+	}
+	mtr_stats->n_bytes_dropped =
+		(uint64_t)(rte_le_to_cpu_32(hw_mtr_stat.n_bytes_dropped_hi)) << 32 |
+				   rte_le_to_cpu_32(hw_mtr_stat.n_bytes_dropped_lo);
+	mtr_stats->n_pkts_dropped =
+		(uint64_t)(rte_le_to_cpu_32(hw_mtr_stat.n_pkts_dropped_hi)) << 32 |
+				   rte_le_to_cpu_32(hw_mtr_stat.n_pkts_dropped_lo);
+	PMD_DRV_LOG(INFO, "get mtr stats ok, droppkt hi 0x%x lo 0x%x(0x%lx) dropbyes hi 0x%x lo 0x%x(0x%lx)\n",
+		hw_mtr_stat.n_pkts_dropped_hi, hw_mtr_stat.n_pkts_dropped_lo,
+		mtr_stats->n_pkts_dropped, hw_mtr_stat.n_bytes_dropped_hi,
+		hw_mtr_stat.n_bytes_dropped_lo, mtr_stats->n_bytes_dropped);
+
+	PMD_INIT_LOG(INFO, "get dev %u dir %u mtr stats ok\n", vport, dir);
+	return 0;
+}
+/**
+ * Fun:
+ */
+uint16_t check_hw_profile_exist(struct zxdh_mtr_profile_list *mpl,
+			struct rte_mtr_meter_profile *profile,
+			uint16_t hw_profile_owner_vport)
+{
+	struct zxdh_meter_profile *mp;
+
+	TAILQ_FOREACH(mp, mpl, next) {
+		if ((memcmp(profile, &mp->profile, sizeof(struct rte_mtr_meter_profile)) == 0) &&
+			(hw_profile_owner_vport == mp->hw_profile_owner_vport)) {
+			PMD_DRV_LOG(INFO, "profile exist mp %p\n", mp);
+			return mp->hw_profile_id;
+		}
+	}
+	return HW_PROFILE_MAX;
+}
+/**
+ * Fun:
+ */
+enum PLCR_CD {
+	PLCR_CD_SRTCM = 0,
+	PLCR_CD_TRTCM,
+	PLCR_CD_MEF101,
+};
+enum PLCR_CM {
+	PLCR_CM_BLIND = 0,
+	PLCR_CM_AWARE,
+};
+enum PLCR_CF {
+	PLCR_CF_UNOVERFLOW = 0,
+	PLCR_CF_OVERFLOW,
+};
+#define PLCR_STEP_SIZE  (61 * (1 << 10))
+void plcr_param_build(struct rte_mtr_meter_profile *profile, void *plcr_param, uint16_t profile_id)
+{
+	if (profile->packet_mode == 0) { /* bps */
+		DPP_STAT_CAR_PROFILE_CFG_T *p_car_byte_profile_cfg =
+				(DPP_STAT_CAR_PROFILE_CFG_T *)plcr_param;
+
+		p_car_byte_profile_cfg->profile_id = profile_id;
+		p_car_byte_profile_cfg->pkt_sign = profile->packet_mode;
+		p_car_byte_profile_cfg->cf = PLCR_CF_UNOVERFLOW;
+		p_car_byte_profile_cfg->cm = PLCR_CM_BLIND;
+		if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+			p_car_byte_profile_cfg->cd  = PLCR_CD_SRTCM;
+			p_car_byte_profile_cfg->cir =
+				profile->srtcm_rfc2697.cir * 8 / PLCR_STEP_SIZE;
+			p_car_byte_profile_cfg->cbs = profile->srtcm_rfc2697.cbs;
+			p_car_byte_profile_cfg->ebs = profile->srtcm_rfc2697.ebs;
+		} else {
+			p_car_byte_profile_cfg->cd  = PLCR_CD_TRTCM;
+			p_car_byte_profile_cfg->cir =
+				profile->trtcm_rfc2698.cir * 8 / PLCR_STEP_SIZE;
+			p_car_byte_profile_cfg->cbs = profile->trtcm_rfc2698.cbs;
+			p_car_byte_profile_cfg->eir = (profile->trtcm_rfc2698.pir -
+				profile->trtcm_rfc2698.cir) * 8 / PLCR_STEP_SIZE;
+			p_car_byte_profile_cfg->ebs =
+				profile->trtcm_rfc2698.pbs - profile->trtcm_rfc2698.cbs;
+		}
+
+	PMD_DRV_LOG(INFO, "param %p cir %x cbs %x eir %x ebs %x  profile id %d  pkt_sign %d",
+		p_car_byte_profile_cfg, p_car_byte_profile_cfg->cir,
+		p_car_byte_profile_cfg->cbs, p_car_byte_profile_cfg->eir,
+		p_car_byte_profile_cfg->ebs, p_car_byte_profile_cfg->profile_id,
+		p_car_byte_profile_cfg->pkt_sign);
+	} else {
+		DPP_STAT_CAR_PKT_PROFILE_CFG_T *p_car_pkt_profile_cfg =
+			(DPP_STAT_CAR_PKT_PROFILE_CFG_T *)plcr_param;
+
+		p_car_pkt_profile_cfg->profile_id = profile_id;
+		p_car_pkt_profile_cfg->pkt_sign = profile->packet_mode;
+
+		if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+			p_car_pkt_profile_cfg->cir = profile->srtcm_rfc2697.cir;
+			p_car_pkt_profile_cfg->cbs = profile->srtcm_rfc2697.cbs;
+		} else {
+			p_car_pkt_profile_cfg->cir = profile->trtcm_rfc2698.cir;
+			p_car_pkt_profile_cfg->cbs = profile->trtcm_rfc2698.cbs;
+		}
+	PMD_DRV_LOG(INFO, "param %p cir %x cbs %x  profile id %d  pkt_sign %d",
+		p_car_pkt_profile_cfg, p_car_pkt_profile_cfg->cir, p_car_pkt_profile_cfg->cbs,
+		p_car_pkt_profile_cfg->profile_id, p_car_pkt_profile_cfg->pkt_sign);
+	}
+}
+/**
+ * Convert wrong color setting action to verbose error.
+ * @param[in] action
+ *   Policy color action.
+ * @return
+ *   Verbose meter color error type.
+ */
+static inline enum rte_mtr_error_type action2error(enum rte_mtr_policer_action action)
+{
+	switch (action) {
+	case MTR_POLICER_ACTION_COLOR_GREEN:
+		return RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN;
+	case MTR_POLICER_ACTION_COLOR_YELLOW:
+		return RTE_MTR_ERROR_TYPE_POLICER_ACTION_YELLOW;
+	case MTR_POLICER_ACTION_COLOR_RED:
+		return RTE_MTR_ERROR_TYPE_POLICER_ACTION_RED;
+	default:
+		return RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	}
+}
+/**
+ * Fun:
+ */
+struct zxdh_meter_profile *zxdh_mtr_profile_res_alloc(struct rte_mempool *mtr_profile_mp)
+{
+	struct zxdh_meter_profile *meter_profile = NULL;
+
+	if (MP_ALLOC_OBJ_FUNC(mtr_profile_mp, meter_profile) != 0)
+		return NULL;
+
+	return meter_profile;
+}
+/**
+ * Fun:
+ */
+struct zxdh_meter_policy *zxdh_mtr_policy_res_alloc(struct rte_mempool *mtr_policy_mp)
+{
+	struct zxdh_meter_policy *policy = NULL;
+
+	rte_mempool_get(mtr_policy_mp, (void **)&policy);
+	PMD_DRV_LOG(INFO, "policy %p", policy);
+	return policy;
+}
+/**
+ * Fun:
+ */
+void zxdh_mtr_policy_res_free(struct rte_mempool *mtr_policy_mp, struct zxdh_meter_policy  *policy)
+{
+	PMD_DRV_LOG(INFO, "to free policy %d  ref  %d  ", policy->policy_id,  policy->ref_cnt);
+
+	if (policy && (--policy->ref_cnt == 0)) {
+		TAILQ_REMOVE(&zxdh_shared_data->mtr_policy_list, policy, next);
+		MP_FREE_OBJ_FUNC(mtr_policy_mp, policy);
+	}
+}
+/**
+ * Find meter profile by profile id.
+ * @param[in] priv
+ *   Pointer to zxdh_priv.
+ * @param[in] meter_profile_id
+ *   Meter profile id.
+ * @return
+ *  pointed to  finded Meter profile info on success. NULL for fail
+ */
+struct zxdh_meter_profile *zxdh_mtr_profile_find_by_id(struct zxdh_mtr_profile_list *mpl,
+			uint32_t meter_profile_id, uint16_t dpdk_portid)
+{
+	struct zxdh_meter_profile *mp = NULL;
+
+	TAILQ_FOREACH(mp, mpl, next) {
+		if ((meter_profile_id == mp->meter_profile_id) && (mp->dpdk_port_id == dpdk_portid))
+			return mp;
+
+	}
+	return NULL;
+}
+/**
+ * check policy action available.
+ * @param[in] actions[]
+ *    actions for different color. now only red to drop action is support.
+ * @param[out] error
+ *   Pointer to rte meter error structure.
+ * @return
+ *  >=0  action is ok
+ *  <0   action isnot supports
+ */
+int zxdh_policy_validate_actions(const struct rte_flow_action *actions[RTE_COLORS],
+		struct rte_mtr_error *error)
+{
+	if (!actions[RTE_COLOR_RED] || actions[RTE_COLOR_RED]->type != RTE_FLOW_ACTION_TYPE_DROP)
+		return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+			"Red color only supports drop action.");
+	return 0;
+}
+/**
+ * get policy by id.
+ * @param[in] policy_id
+ *   policy_id.
+ * @param[in] params
+ *   Pointer to rte meter parameters.
+ * @param[out] error
+ *   Pointer to rte meter error structure.
+ * @return
+ *  !NULL : point to the policy
+ *  NULL  : policy is not exist.
+ */
+struct zxdh_meter_policy *zxdh_mtr_policy_find_by_id(struct zxdh_mtr_policy_list *mtr_policy_list,
+	uint16_t policy_id, uint16_t dpdk_portid)
+{
+	struct zxdh_meter_policy *mtr_policy = NULL;
+
+	TAILQ_FOREACH(mtr_policy, mtr_policy_list, next) {
+		if ((policy_id == mtr_policy->policy_id) &&
+			(dpdk_portid == mtr_policy->dpdk_port_id))
+			return mtr_policy;
+	}
+	return NULL;
+}
+/**
+ * Callback to get MTR capabilities.
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[out] cap
+ *   Pointer to save MTR capabilities.
+ * @param[out] error
+ *   Pointer to the error structure.
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+#define SHARE_FLOW_MAX  2048
+int
+zxdh_meter_cap_get(struct rte_eth_dev *dev __rte_unused,
+	struct rte_mtr_capabilities *cap,
+	struct rte_mtr_error *error __rte_unused)
+{
+	struct rte_mtr_capabilities capa = {
+	.n_max = MAX_MTR_NUM,
+	.n_shared_max = SHARE_FLOW_MAX,
+	.meter_srtcm_rfc2697_n_max = MAX_MTR_PROFILE_NUM,
+	.meter_trtcm_rfc2698_n_max = MAX_MTR_PROFILE_NUM,
+	.color_aware_srtcm_rfc2697_supported = 1,
+	.color_aware_trtcm_rfc2698_supported = 1,
+	.meter_rate_max = ZXDH_SRTCM_CIR_MAX_BPS,
+	.meter_policy_n_max = ZXDH_MAX_POLICY_NUM,
+	.srtcm_rfc2697_byte_mode_supported   = 1,
+	.srtcm_rfc2697_packet_mode_supported = 1,
+	.trtcm_rfc2698_byte_mode_supported   = 1,
+	.trtcm_rfc2698_packet_mode_supported = 1,
+	.stats_mask = RTE_MTR_STATS_N_PKTS_DROPPED | RTE_MTR_STATS_N_BYTES_DROPPED,
+	};
+	memcpy(cap, &capa, sizeof(capa));
+	return 0;
+}
diff --git a/drivers/net/zxdh/zxdh_mtr_drv.h b/drivers/net/zxdh/zxdh_mtr_drv.h
new file mode 100644
index 0000000000..d49d1082fc
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_mtr_drv.h
@@ -0,0 +1,119 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#ifndef _ZXDH_MTR_DRV_H_
+#define _ZXDH_MTR_DRV_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_mempool.h>
+#include <ethdev_driver.h>
+#include <rte_mtr.h>
+#include "dpp_stat_car.h"
+#include "dpp_car_res.h"
+#include "dpp_drv_qos.h"
+
+#define MAX_MTR_NUM          2048
+#define ZXDH_MAX_POLICY_NUM  MAX_MTR_NUM
+
+/* Policer actions */
+enum rte_mtr_policer_action {
+	MTR_POLICER_ACTION_COLOR_GREEN = 0, /* Recolor the packet as green. */
+	MTR_POLICER_ACTION_COLOR_YELLOW,    /* Recolor the packet as yellow. */
+	MTR_POLICER_ACTION_COLOR_RED,       /* Recolor the packet as red. */
+	MTR_POLICER_ACTION_DROP,            /* Drop the packet. */
+};
+
+union zxdh_offload_profile_cfg {
+	DPP_STAT_CAR_PKT_PROFILE_CFG_T p_car_pkt_profile_cfg;
+	DPP_STAT_CAR_PROFILE_CFG_T     p_car_byte_profile_cfg;
+};
+
+/* meter profile structure. */
+struct zxdh_meter_profile {
+	TAILQ_ENTRY(zxdh_meter_profile) next; /* Pointer to the next flow meter structure. */
+	uint16_t dpdk_port_id;
+	uint16_t hw_profile_owner_vport;
+	uint16_t meter_profile_id;            /* software Profile id. */
+	uint16_t hw_profile_id;               /* hardware Profile id. */
+	struct rte_mtr_meter_profile profile; /* Profile detail. */
+	union zxdh_offload_profile_cfg plcr_param;
+	uint32_t ref_cnt;                     /* used count. */
+};
+TAILQ_HEAD(zxdh_mtr_profile_list, zxdh_meter_profile);
+
+struct zxdh_meter_policy {
+	TAILQ_ENTRY(zxdh_meter_policy) next;
+	uint16_t policy_id;
+	uint16_t ref_cnt;
+	uint16_t dpdk_port_id;
+	uint16_t rsv;
+	struct rte_mtr_meter_policy_params policy;
+};
+TAILQ_HEAD(zxdh_mtr_policy_list, zxdh_meter_policy);
+
+struct zxdh_mtr_stats {
+	uint64_t n_pkts_dropped;
+	uint64_t n_bytes_dropped;
+};
+struct zxdh_hw_mtr_stats {
+	uint32_t n_pkts_dropped_hi;
+	uint32_t n_pkts_dropped_lo;
+	uint32_t n_bytes_dropped_hi;
+	uint32_t n_bytes_dropped_lo;
+};
+
+#define HW_PROFILE_MAX       512
+#define MAX_MTR_PROFILE_NUM  HW_PROFILE_MAX
+/* hardware profile id resourse */
+struct zxdh_mtr_res {
+	rte_spinlock_t hw_plcr_res_lock;
+	uint32_t hw_profile_refcnt[HW_PROFILE_MAX];
+	struct rte_mtr_meter_profile profile[HW_PROFILE_MAX];
+};
+extern struct zxdh_mtr_res g_mtr_res;
+
+#define MP_ALLOC_OBJ_FUNC(mp, obj) rte_mempool_get(mp, (void **) &obj)
+#define MP_FREE_OBJ_FUNC(mp, obj) rte_mempool_put(mp, obj)
+
+int zxdh_mtr_profile_validate(uint32_t meter_profile_id,
+		struct rte_mtr_meter_profile *profile, struct rte_mtr_error *error);
+
+int check_profile_exist(struct zxdh_mtr_profile_list *mpl,
+		uint32_t profile_id, uint16_t dpdk_port_id);
+uint16_t check_hw_profile_exist(struct zxdh_mtr_profile_list *mp,
+		struct rte_mtr_meter_profile *profile, uint16_t hw_profile_owner_vport);
+int zxdh_hw_profile_alloc_direct(uint16_t vport, DPP_PROFILE_TYPE  car_type,
+		uint16_t *hw_profile_id, struct rte_mtr_error *error);
+int zxdh_hw_profile_config_direct(DPP_PROFILE_TYPE car_type,
+		uint16_t hw_profile_id, struct zxdh_meter_profile *mp, struct rte_mtr_error *error);
+int zxdh_hw_profile_free_direct(uint16_t vport,  DPP_PROFILE_TYPE  car_type,
+		uint16_t hw_profile_id, struct rte_mtr_error *error);
+int zxdh_hw_plcrflow_config_direct(DPP_PROFILE_TYPE car_type,
+		uint16_t hw_flow_id, bool enable,
+		uint16_t hw_profile_id, struct rte_mtr_error *error);
+void plcr_param_build(struct rte_mtr_meter_profile *profile, void *plcr_param, uint16_t profile_id);
+struct zxdh_meter_profile *zxdh_mtr_profile_find_by_id(struct zxdh_mtr_profile_list *mpl,
+		uint32_t meter_profile_id, uint16_t dpdk_portid);
+
+int zxdh_policy_validate_actions(const struct rte_flow_action *actions[RTE_COLORS],
+		struct rte_mtr_error *error);
+struct zxdh_meter_policy *zxdh_mtr_policy_find_by_id(struct zxdh_mtr_policy_list *mtr_policy_list,
+		uint16_t policy_id, uint16_t dpdk_portid);
+struct zxdh_meter_policy *zxdh_mtr_policy_res_alloc(struct rte_mempool *mtr_policy_mp);
+void zxdh_mtr_policy_res_free(struct rte_mempool *mtr_policy_mp, struct zxdh_meter_policy *policy);
+struct zxdh_meter_profile *zxdh_mtr_profile_res_alloc(struct rte_mempool *mtr_profile_mp);
+uint16_t zxdh_hw_flow_id_get(uint16_t vfid, uint16_t dir);
+int zxdh_meter_cap_get(struct rte_eth_dev *dev __rte_unused, struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error __rte_unused);
+int zxdh_meter_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg);
+int zxdh_mtr_stats_get(uint16_t vport, int dir, struct zxdh_mtr_stats *mtr_stats);
+void zxdh_mtr_stats_clear(uint16_t vport);
+
+int zxdh_hw_profile_ref(uint16_t hw_profile_id);
+int zxdh_hw_profile_unref(struct rte_eth_dev *dev, uint8_t car_type, uint16_t hw_profile_id,
+		struct rte_mtr_error *error);
+#endif /* _ZXDH_MTR_DRV_H_ */
diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c
new file mode 100644
index 0000000000..4c31612f08
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_pci.c
@@ -0,0 +1,499 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#include <stdint.h>
+
+#ifdef RTE_EXEC_ENV_LINUX
+ #include <dirent.h>
+ #include <fcntl.h>
+#endif
+
+#include <rte_io.h>
+#include <rte_bus.h>
+
+#include "zxdh_pci.h"
+#include "zxdh_logs.h"
+#include "zxdh_queue.h"
+
+/*
+ * Following macros are derived from linux/pci_regs.h, however,
+ * we can't simply include that header here, as there is no such
+ * file for non-Linux platform.
+ */
+#define PCI_CAPABILITY_LIST             0x34
+#define PCI_CAP_ID_VNDR                 0x09
+#define PCI_CAP_ID_MSIX                 0x11
+
+/*
+ * The remaining space is defined by each driver as the per-driver
+ * configuration space.
+ */
+#define ZXDH_PCI_CONFIG(hw)  (((hw)->use_msix == ZXDH_MSIX_ENABLED) ? 24 : 20)
+#define VQM_OFFSET           0x000000
+
+static inline int32_t check_vq_phys_addr_ok(struct virtqueue *vq)
+{
+	/**
+	 * Virtio PCI device ZXDH_PCI_QUEUE_PF register is 32bit,
+	 * and only accepts 32 bit page frame number.
+	 * Check if the allocated physical memory exceeds 16TB.
+	 */
+	if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> (ZXDH_PCI_QUEUE_ADDR_SHIFT + 32)) {
+		PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
+		return 0;
+	}
+	return 1;
+}
+static inline void io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
+{
+	rte_write32(val & ((1ULL << 32) - 1), lo);
+	rte_write32(val >> 32, hi);
+}
+
+static void modern_read_dev_config(struct zxdh_hw *hw,
+								   size_t offset,
+								   void *dst,
+								   int32_t length)
+{
+	int32_t i       = 0;
+	uint8_t *p      = NULL;
+	uint8_t old_gen = 0;
+	uint8_t new_gen = 0;
+
+	do {
+		old_gen = rte_read8(&hw->common_cfg->config_generation);
+
+		p = dst;
+		for (i = 0;  i < length; i++)
+			*p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
+
+		new_gen = rte_read8(&hw->common_cfg->config_generation);
+	} while (old_gen != new_gen);
+}
+
+static void modern_write_dev_config(struct zxdh_hw *hw,
+									size_t offset,
+									const void *src,
+									int32_t length)
+{
+	int32_t i = 0;
+	const uint8_t *p = src;
+
+	for (i = 0;  i < length; i++)
+		rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
+}
+
+static uint64_t modern_get_features(struct zxdh_hw *hw)
+{
+	uint32_t features_lo = 0;
+	uint32_t features_hi = 0;
+
+	rte_write32(0, &hw->common_cfg->device_feature_select);
+	features_lo = rte_read32(&hw->common_cfg->device_feature);
+
+	rte_write32(1, &hw->common_cfg->device_feature_select);
+	features_hi = rte_read32(&hw->common_cfg->device_feature);
+
+	return ((uint64_t)features_hi << 32) | features_lo;
+}
+
+static void modern_set_features(struct zxdh_hw *hw, uint64_t features)
+{
+
+	rte_write32(0, &hw->common_cfg->guest_feature_select);
+	rte_write32(features & ((1ULL << 32) - 1), &hw->common_cfg->guest_feature);
+	rte_write32(1, &hw->common_cfg->guest_feature_select);
+	rte_write32(features >> 32, &hw->common_cfg->guest_feature);
+}
+
+static uint8_t modern_get_status(struct zxdh_hw *hw)
+{
+	return rte_read8(&hw->common_cfg->device_status);
+}
+
+static void modern_set_status(struct zxdh_hw *hw, uint8_t status)
+{
+	rte_write8(status, &hw->common_cfg->device_status);
+}
+
+static uint8_t modern_get_isr(struct zxdh_hw *hw)
+{
+	return rte_read8(hw->isr);
+}
+
+static uint16_t modern_set_config_irq(struct zxdh_hw *hw, uint16_t vec)
+{
+	rte_write16(vec, &hw->common_cfg->msix_config);
+	return rte_read16(&hw->common_cfg->msix_config);
+}
+
+static uint16_t modern_set_queue_irq(struct zxdh_hw *hw, struct virtqueue *vq, uint16_t vec)
+{
+	rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+	rte_write16(vec, &hw->common_cfg->queue_msix_vector);
+	return rte_read16(&hw->common_cfg->queue_msix_vector);
+}
+
+static uint16_t modern_get_queue_num(struct zxdh_hw *hw, uint16_t queue_id)
+{
+	rte_write16(queue_id, &hw->common_cfg->queue_select);
+	return rte_read16(&hw->common_cfg->queue_size);
+}
+
+static void modern_set_queue_num(struct zxdh_hw *hw, uint16_t queue_id, uint16_t vq_size)
+{
+	rte_write16(queue_id, &hw->common_cfg->queue_select);
+	rte_write16(vq_size, &hw->common_cfg->queue_size);
+}
+
+static int32_t modern_setup_queue(struct zxdh_hw *hw, struct virtqueue *vq)
+{
+	uint64_t desc_addr  = 0;
+	uint64_t avail_addr = 0;
+	uint64_t used_addr  = 0;
+	uint16_t notify_off = 0;
+
+	if (!check_vq_phys_addr_ok(vq))
+		return -1;
+
+	desc_addr = vq->vq_ring_mem;
+	avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
+	if (vtpci_packed_queue(vq->hw)) {
+		used_addr = RTE_ALIGN_CEIL((avail_addr + sizeof(struct vring_packed_desc_event)),
+							ZXDH_PCI_VRING_ALIGN);
+	} else {
+		used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
+						ring[vq->vq_nentries]), ZXDH_PCI_VRING_ALIGN);
+	}
+
+	rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+	io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
+					   &hw->common_cfg->queue_desc_hi);
+	io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
+					   &hw->common_cfg->queue_avail_hi);
+	io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
+					   &hw->common_cfg->queue_used_hi);
+
+	notify_off = rte_read16(&hw->common_cfg->queue_notify_off); /* default 0 */
+	notify_off = 0;
+	vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
+			notify_off * hw->notify_off_multiplier);
+
+	rte_write16(1, &hw->common_cfg->queue_enable);
+
+	PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index);
+	PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr);
+	PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr);
+	PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr);
+	PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)", vq->notify_addr, notify_off);
+
+	return 0;
+}
+
+static void modern_del_queue(struct zxdh_hw *hw, struct virtqueue *vq)
+{
+	rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+	io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
+					   &hw->common_cfg->queue_desc_hi);
+	io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
+					   &hw->common_cfg->queue_avail_hi);
+	io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
+					   &hw->common_cfg->queue_used_hi);
+
+	rte_write16(0, &hw->common_cfg->queue_enable);
+}
+
+static void modern_notify_queue(struct zxdh_hw *hw, struct virtqueue *vq)
+{
+	uint32_t notify_data = 0;
+
+	if (!vtpci_with_feature(hw, ZXDH_F_NOTIFICATION_DATA)) {
+		rte_write16(vq->vq_queue_index, vq->notify_addr);
+		return;
+	}
+
+	if (vtpci_with_feature(hw, ZXDH_F_RING_PACKED)) {
+		/*
+		 * Bit[0:15]: vq queue index
+		 * Bit[16:30]: avail index
+		 * Bit[31]: avail wrap counter
+		 */
+		notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags &
+						VRING_PACKED_DESC_F_AVAIL)) << 31) |
+						((uint32_t)vq->vq_avail_idx << 16) |
+						vq->vq_queue_index;
+	} else {
+		/*
+		 * Bit[0:15]: vq queue index
+		 * Bit[16:31]: avail index
+		 */
+		notify_data = ((uint32_t)vq->vq_avail_idx << 16) | vq->vq_queue_index;
+	}
+	PMD_DRV_LOG(DEBUG, "queue:%d notify_data 0x%x notify_addr 0x%p",
+				 vq->vq_queue_index, notify_data, vq->notify_addr);
+	rte_write32(notify_data, vq->notify_addr);
+}
+
+const struct zxdh_pci_ops zxdh_modern_ops = {
+	.read_dev_cfg   = modern_read_dev_config,
+	.write_dev_cfg  = modern_write_dev_config,
+	.get_status     = modern_get_status,
+	.set_status     = modern_set_status,
+	.get_features   = modern_get_features,
+	.set_features   = modern_set_features,
+	.get_isr        = modern_get_isr,
+	.set_config_irq = modern_set_config_irq,
+	.set_queue_irq  = modern_set_queue_irq,</