[v2,4/8] net/rnp: add mbx basic api feature

Message ID 20230802081106.2340406-5-caowenbo@mucse.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series *** Add Support New Pmd Driver *** |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

11 Aug. 2, 2023, 8:11 a.m. UTC
  mbx base code is for communicate with the firmware

Signed-off-by: Wenbo Cao <caowenbo@mucse.com>
Suggested-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/rnp/base/rnp_hw.h | 107 ++++++-
 drivers/net/rnp/meson.build   |   1 +
 drivers/net/rnp/rnp.h         |  35 +++
 drivers/net/rnp/rnp_ethdev.c  |  70 ++++-
 drivers/net/rnp/rnp_logs.h    |   9 +
 drivers/net/rnp/rnp_mbx.c     | 522 ++++++++++++++++++++++++++++++++++
 drivers/net/rnp/rnp_mbx.h     | 139 +++++++++
 drivers/net/rnp/rnp_mbx_fw.h  |  22 ++
 8 files changed, 902 insertions(+), 3 deletions(-)
 create mode 100644 drivers/net/rnp/rnp_mbx.c
 create mode 100644 drivers/net/rnp/rnp_mbx.h
 create mode 100644 drivers/net/rnp/rnp_mbx_fw.h
  

Comments

Stephen Hemminger Aug. 2, 2023, 4:11 p.m. UTC | #1
On Wed,  2 Aug 2023 08:11:02 +0000
Wenbo Cao <caowenbo@mucse.com> wrote:

> +struct rnp_mbx_stats {
> +	u32 msgs_tx;
> +	u32 msgs_rx;
> +
> +	u32 acks;
> +	u32 reqs;
> +	u32 rsts;
> +};

Easy to cause wraparound of 32 bits in these stats.
Why use u32 and not uint32_t or uint64_t.

> +struct rnp_mbx_info {
> +	struct rnp_mbx_api ops;
> +	uint32_t usec_delay;    /* retry interval delay time */
> +	uint32_t timeout;       /* retry ops timeout limit */
> +	uint16_t size;          /* data buffer size*/
> +	uint16_t vf_num;        /* Virtual Function num */
> +	uint16_t pf_num;        /* Physical Function num */
> +	uint16_t sriov_st;      /* Sriov state */
> +	bool irq_enabled;
> +	union {
> +		struct {
> +			unsigned short pf_req;
> +			unsigned short pf_ack;
> +		};
> +		struct {
> +			unsigned short cpu_req;
> +			unsigned short cpu_ack;
> +		};
> +	};
> +	unsigned short vf_req[64];
> +	unsigned short vf_ack[64];
> +
> +	struct rnp_mbx_stats stats;
> +
> +	rte_atomic16_t state;
> +} __rte_cache_aligned;

You may want to reorder this data structure to reduce cache misses.
The last element is almost certainly on another cache line.
Also, if you want faster statistics should consider per core, per queue or per VF.
  
11 Aug. 3, 2023, 2:25 a.m. UTC | #2
Hello, Stephen
Fort this struct is used for communicate with firmware or
Pf communicate with vf.
It is used for sync the communicative event.
It isn't used for performance sampling.
For u32(uint32_t) is because of the hardware register is 32bit read/write.
So this counter size is enouth.
>+struct rnp_mbx_stats {
>...
>+}

Regards Wenbo
-----Original Message-----
From: Stephen Hemminger <stephen@networkplumber.org> 
Sent: 2023年8月3日 0:12
To: Wenbo Cao <caowenbo@mucse.com>
Cc: dev@dpdk.org; ferruh.yigit@intel.com; andrew.rybchenko@oktetlabs.ru
Subject: Re: [PATCH v2 4/8] net/rnp: add mbx basic api feature

On Wed,  2 Aug 2023 08:11:02 +0000
Wenbo Cao <caowenbo@mucse.com> wrote:

> +struct rnp_mbx_stats {
> +	u32 msgs_tx;
> +	u32 msgs_rx;
> +
> +	u32 acks;
> +	u32 reqs;
> +	u32 rsts;
> +};

Easy to cause wraparound of 32 bits in these stats.
Why use u32 and not uint32_t or uint64_t.

> +struct rnp_mbx_info {
> +	struct rnp_mbx_api ops;
> +	uint32_t usec_delay;    /* retry interval delay time */
> +	uint32_t timeout;       /* retry ops timeout limit */
> +	uint16_t size;          /* data buffer size*/
> +	uint16_t vf_num;        /* Virtual Function num */
> +	uint16_t pf_num;        /* Physical Function num */
> +	uint16_t sriov_st;      /* Sriov state */
> +	bool irq_enabled;
> +	union {
> +		struct {
> +			unsigned short pf_req;
> +			unsigned short pf_ack;
> +		};
> +		struct {
> +			unsigned short cpu_req;
> +			unsigned short cpu_ack;
> +		};
> +	};
> +	unsigned short vf_req[64];
> +	unsigned short vf_ack[64];
> +
> +	struct rnp_mbx_stats stats;
> +
> +	rte_atomic16_t state;
> +} __rte_cache_aligned;

You may want to reorder this data structure to reduce cache misses.
The last element is almost certainly on another cache line.
Also, if you want faster statistics should consider per core, per queue or
per VF.
  

Patch

diff --git a/drivers/net/rnp/base/rnp_hw.h b/drivers/net/rnp/base/rnp_hw.h
index 543011a64c..da3659bd67 100644
--- a/drivers/net/rnp/base/rnp_hw.h
+++ b/drivers/net/rnp/base/rnp_hw.h
@@ -4,15 +4,120 @@ 
 #ifndef __RNP_HW_H__
 #define __RNP_HW_H__
 
+#include <rte_io.h>
+#include <ethdev_driver.h>
+
+#include "rnp_osdep.h"
+
+static inline unsigned int rnp_rd_reg(volatile void *addr)
+{
+	unsigned int v = rte_read32(addr);
+
+	return v;
+}
+
+static inline void rnp_wr_reg(volatile void *reg, int val)
+{
+	rte_write32_relaxed((val), (reg));
+}
+
+#define mbx_rd32(hw, reg)	rnp_rd_reg((hw)->iobar4 + (reg))
+#define mbx_wr32(hw, reg, val)	rnp_wr_reg((hw)->iobar4 + (reg), (val))
+
+struct rnp_hw;
+/* Mbx Operate info */
+enum MBX_ID {
+	MBX_PF = 0,
+	MBX_VF,
+	MBX_CM3CPU,
+	MBX_FW = MBX_CM3CPU,
+	MBX_VFCNT
+};
+struct rnp_mbx_api {
+	void (*init_mbx)(struct rnp_hw *hw);
+	int32_t (*read)(struct rnp_hw *hw,
+			uint32_t *msg,
+			uint16_t size,
+			enum MBX_ID);
+	int32_t (*write)(struct rnp_hw *hw,
+			uint32_t *msg,
+			uint16_t size,
+			enum MBX_ID);
+	int32_t (*read_posted)(struct rte_eth_dev *dev,
+			uint32_t *msg,
+			uint16_t size,
+			enum MBX_ID);
+	int32_t (*write_posted)(struct rte_eth_dev *dev,
+			uint32_t *msg,
+			uint16_t size,
+			enum MBX_ID);
+	int32_t (*check_for_msg)(struct rnp_hw *hw, enum MBX_ID);
+	int32_t (*check_for_ack)(struct rnp_hw *hw, enum MBX_ID);
+	int32_t (*check_for_rst)(struct rnp_hw *hw, enum MBX_ID);
+	int32_t (*configure)(struct rnp_hw *hw, int nr_vec, bool enable);
+};
+
+struct rnp_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct rnp_mbx_info {
+	struct rnp_mbx_api ops;
+	uint32_t usec_delay;    /* retry interval delay time */
+	uint32_t timeout;       /* retry ops timeout limit */
+	uint16_t size;          /* data buffer size*/
+	uint16_t vf_num;        /* Virtual Function num */
+	uint16_t pf_num;        /* Physical Function num */
+	uint16_t sriov_st;      /* Sriov state */
+	bool irq_enabled;
+	union {
+		struct {
+			unsigned short pf_req;
+			unsigned short pf_ack;
+		};
+		struct {
+			unsigned short cpu_req;
+			unsigned short cpu_ack;
+		};
+	};
+	unsigned short vf_req[64];
+	unsigned short vf_ack[64];
+
+	struct rnp_mbx_stats stats;
+
+	rte_atomic16_t state;
+} __rte_cache_aligned;
+
+#define RNP_MAX_HW_PORT_PERR_PF (4)
 struct rnp_hw {
 	void *back;
 	char *iobar0;
 	uint32_t iobar0_len;
 	char *iobar4;
 	uint32_t iobar4_len;
+	char *link_sync;
+	char *dma_base;
+	char *eth_base;
+	char *veb_base;
+	char *mac_base[RNP_MAX_HW_PORT_PERR_PF];
+	char *msix_base;
+	/* === dma == */
+	char *dma_axi_en;
+	char *dma_axi_st;
 
 	uint16_t device_id;
 	uint16_t vendor_id;
-} __rte_cache_aligned;
+	uint16_t function;
+	uint16_t pf_vf_num;
+	uint16_t max_vfs;
+	void *cookie_pool;
+	char cookie_p_name[RTE_MEMZONE_NAMESIZE];
 
+	struct rnp_mbx_info mbx;
+} __rte_cache_aligned;
 #endif /* __RNP_H__*/
diff --git a/drivers/net/rnp/meson.build b/drivers/net/rnp/meson.build
index 36a1f7148d..38dbee5ca4 100644
--- a/drivers/net/rnp/meson.build
+++ b/drivers/net/rnp/meson.build
@@ -9,5 +9,6 @@  endif
 
 sources = files(
 		'rnp_ethdev.c',
+		'rnp_mbx.c',
 )
 includes += include_directories('base')
diff --git a/drivers/net/rnp/rnp.h b/drivers/net/rnp/rnp.h
index ea9d138673..437a2cc209 100644
--- a/drivers/net/rnp/rnp.h
+++ b/drivers/net/rnp/rnp.h
@@ -3,6 +3,7 @@ 
  */
 #ifndef __RNP_H__
 #define __RNP_H__
+#include <rte_log.h>
 
 #include "base/rnp_hw.h"
 
@@ -14,14 +15,17 @@ 
 
 struct rnp_eth_port {
 	void *adapt;
+	struct rnp_hw *hw;
 	struct rte_eth_dev *eth_dev;
 } __rte_cache_aligned;
 
 struct rnp_share_ops {
+	const struct rnp_mbx_api *mbx_api;
 } __rte_cache_aligned;
 
 struct rnp_eth_adapter {
 	struct rnp_hw hw;
+	uint16_t max_vfs;
 	struct rte_pci_device *pdev;
 	struct rte_eth_dev *eth_dev; /* master eth_dev */
 	struct rnp_eth_port *ports[RNP_MAX_PORT_OF_PF];
@@ -34,5 +38,36 @@  struct rnp_eth_adapter {
 	(((struct rnp_eth_port *)((eth_dev)->data->dev_private)))
 #define RNP_DEV_TO_ADAPTER(eth_dev) \
 	((struct rnp_eth_adapter *)(RNP_DEV_TO_PORT(eth_dev)->adapt))
+#define RNP_DEV_TO_HW(eth_dev) \
+	(&((struct rnp_eth_adapter *)(RNP_DEV_TO_PORT((eth_dev))->adapt))->hw)
+#define RNP_DEV_PP_PRIV_TO_MBX_OPS(dev) \
+	(((struct rnp_share_ops *)(dev)->process_private)->mbx_api)
+#define RNP_DEV_TO_MBX_OPS(dev)	RNP_DEV_PP_PRIV_TO_MBX_OPS(dev)
 
+static inline void rnp_reg_offset_init(struct rnp_hw *hw)
+{
+	uint16_t i;
+
+	if (hw->device_id == RNP_DEV_ID_N10G && hw->mbx.pf_num) {
+		hw->iobar4 += 0x100000;
+		hw->msix_base = hw->iobar4 + 0xa0000;
+		hw->msix_base += 0x200;
+	} else {
+		hw->msix_base = hw->iobar4 + 0xa0000;
+	}
+	/* === dma status/config====== */
+	hw->link_sync    = hw->iobar4 + 0x000c;
+	hw->dma_axi_en   = hw->iobar4 + 0x0010;
+	hw->dma_axi_st   = hw->iobar4 + 0x0014;
+
+	if (hw->mbx.pf_num)
+		hw->msix_base += 0x200;
+	/* === queue registers === */
+	hw->dma_base     = hw->iobar4 + 0x08000;
+	hw->veb_base     = hw->iobar4 + 0x0;
+	hw->eth_base     = hw->iobar4 + 0x10000;
+	/* mac */
+	for (i = 0; i < RNP_MAX_HW_PORT_PERR_PF; i++)
+		hw->mac_base[i] = hw->iobar4 + 0x60000 + 0x10000 * i;
+}
 #endif /* __RNP_H__ */
diff --git a/drivers/net/rnp/rnp_ethdev.c b/drivers/net/rnp/rnp_ethdev.c
index fc67515e04..47c5361f95 100644
--- a/drivers/net/rnp/rnp_ethdev.c
+++ b/drivers/net/rnp/rnp_ethdev.c
@@ -8,6 +8,7 @@ 
 #include <ethdev_driver.h>
 
 #include "rnp.h"
+#include "rnp_mbx.h"
 #include "rnp_logs.h"
 
 static int
@@ -89,6 +90,58 @@  rnp_alloc_eth_port(struct rte_pci_device *master_pci, char *name)
 	return NULL;
 }
 
+static void rnp_get_nic_attr(struct rnp_eth_adapter *adapter)
+{
+	RTE_SET_USED(adapter);
+}
+
+static int
+rnp_process_resource_init(struct rte_eth_dev *eth_dev)
+{
+	struct rnp_share_ops *share_priv;
+
+	/* allocate process_private memory this must can't
+	 * belone to the dpdk mem resource manager
+	 * such as from rte_malloc or rte_dma_zone..
+	 */
+	/* use the process_prive point to resolve secondary process
+	 * use point-func. This point is per process will be safe to cover.
+	 * This will cause secondary process core-dump because of IPC
+	 * Secondary will call primary process point func virt-address
+	 * secondary process don't alloc user/pmd to alloc or free
+	 * the memory of dpdk-mem resource it will cause hugepage
+	 * mem exception
+	 * be careful for secondary Process to use the share-mem of
+	 * point correlation
+	 */
+	share_priv = calloc(1, sizeof(*share_priv));
+	if (!share_priv) {
+		PMD_DRV_LOG(ERR, "calloc share_priv failed");
+		return -ENOMEM;
+	}
+	memset(share_priv, 0, sizeof(*share_priv));
+	eth_dev->process_private = share_priv;
+
+	return 0;
+}
+
+static void
+rnp_common_ops_init(struct rnp_eth_adapter *adapter)
+{
+	struct rnp_share_ops *share_priv;
+
+	share_priv = adapter->share_priv;
+	share_priv->mbx_api = &rnp_mbx_pf_ops;
+}
+
+static int
+rnp_special_ops_init(struct rte_eth_dev *eth_dev)
+{
+	RTE_SET_USED(eth_dev);
+
+	return 0;
+}
+
 static int
 rnp_eth_dev_init(struct rte_eth_dev *dev)
 {
@@ -124,6 +177,20 @@  rnp_eth_dev_init(struct rte_eth_dev *dev)
 	hw->device_id = pci_dev->id.device_id;
 	hw->vendor_id = pci_dev->id.vendor_id;
 	hw->device_id = pci_dev->id.device_id;
+	adapter->max_vfs = pci_dev->max_vfs;
+	ret = rnp_process_resource_init(dev);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "share prive resource init failed");
+		return ret;
+	}
+	adapter->share_priv = dev->process_private;
+	rnp_common_ops_init(adapter);
+	rnp_get_nic_attr(adapter);
+	/* We need Use Device Id To Change The Resource Mode */
+	rnp_special_ops_init(dev);
+	port->adapt = adapter;
+	port->hw = hw;
+	rnp_init_mbx_ops_pf(hw);
 	for (p_id = 0; p_id < adapter->num_ports; p_id++) {
 		/* port 0 resource has been alloced When Probe */
 		if (!p_id) {
@@ -158,11 +225,10 @@  rnp_eth_dev_init(struct rte_eth_dev *dev)
 			continue;
 		if (port->eth_dev) {
 			rnp_dev_close(port->eth_dev);
-			rte_eth_dev_release_port(port->eth_dev);
 			if (port->eth_dev->process_private)
 				free(port->eth_dev->process_private);
+			rte_eth_dev_release_port(port->eth_dev);
 		}
-		rte_free(port);
 	}
 	rte_free(adapter);
 
diff --git a/drivers/net/rnp/rnp_logs.h b/drivers/net/rnp/rnp_logs.h
index 1b3ee33745..f1648aabb5 100644
--- a/drivers/net/rnp/rnp_logs.h
+++ b/drivers/net/rnp/rnp_logs.h
@@ -13,6 +13,15 @@  extern int rnp_drv_logtype;
 #define RNP_PMD_DRV_LOG(level, fmt, args...) \
 	rte_log(RTE_LOG_##level, rnp_drv_logtype, \
 		"%s() " fmt, __func__, ##args)
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, rnp_drv_logtype, "%s(): " fmt, \
+			__func__, ## args)
+#define PMD_DRV_LOG(level, fmt, args...) \
+	PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#define RNP_PMD_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_##level, rnp_drv_logtype, \
+			"rnp_net: (%d) " fmt, __LINE__, ##args)
 #ifdef RTE_LIBRTE_RNP_DEBUG_RX
 extern int rnp_rx_logtype;
 #define RNP_PMD_RX_LOG(level, fmt, args...) \
diff --git a/drivers/net/rnp/rnp_mbx.c b/drivers/net/rnp/rnp_mbx.c
new file mode 100644
index 0000000000..79b214d99b
--- /dev/null
+++ b/drivers/net/rnp/rnp_mbx.c
@@ -0,0 +1,522 @@ 
+#include <rte_cycles.h>
+#include <rte_log.h>
+
+#include "rnp.h"
+#include "rnp_hw.h"
+#include "rnp_mbx.h"
+#include "rnp_mbx_fw.h"
+#include "rnp_logs.h"
+
+#define RNP_MAX_VF_FUNCTIONS	(64)
+/* == VEC == */
+#define VF2PF_MBOX_VEC(VF)	(0xa5100 + 4 * (VF))
+#define CPU2PF_MBOX_VEC		(0xa5300)
+
+/* == PF <--> VF mailbox ==== */
+#define SHARE_MEM_BYTES		(64) /* 64bytes */
+/* for PF1 rtl will remap 6000 to 0xb000 */
+#define PF_VF_SHM(vf)		((0xa6000) + (64 * (vf)))
+#define PF2VF_COUNTER(vf)	(PF_VF_SHM(vf) + 0)
+#define VF2PF_COUNTER(vf)	(PF_VF_SHM(vf) + 4)
+#define PF_VF_SHM_DATA(vf)	(PF_VF_SHM(vf) + 8)
+#define PF2VF_MBOX_CTRL(vf)	((0xa7100) + (4 * (vf)))
+#define PF_VF_MBOX_MASK_LO	((0xa7200))
+#define PF_VF_MBOX_MASK_HI	((0xa7300))
+
+/* === CPU <--> PF === */
+#define CPU_PF_SHM		(0xaa000)
+#define CPU2PF_COUNTER		(CPU_PF_SHM + 0)
+#define PF2CPU_COUNTER		(CPU_PF_SHM + 4)
+#define CPU_PF_SHM_DATA		(CPU_PF_SHM + 8)
+#define PF2CPU_MBOX_CTRL	(0xaa100)
+#define CPU_PF_MBOX_MASK	(0xaa300)
+
+/* === CPU <--> VF === */
+#define CPU_VF_SHM(vf)		(0xa8000 + (64 * (vf)))
+#define CPU2VF_COUNTER(vf)	(CPU_VF_SHM(vf) + 0)
+#define VF2CPU_COUNTER(vf)	(CPU_VF_SHM(vf) + 4)
+#define CPU_VF_SHM_DATA(vf)	(CPU_VF_SHM(vf) + 8)
+#define VF2CPU_MBOX_CTRL(vf)	(0xa9000 + 64 * (vf))
+#define CPU_VF_MBOX_MASK_LO(vf) (0xa9200 + 64 * (vf))
+#define CPU_VF_MBOX_MASK_HI(vf) (0xa9300 + 64 * (vf))
+
+#define MBOX_CTRL_REQ		(1 << 0)  /* WO */
+/* VF:WR, PF:RO */
+#define MBOX_CTRL_PF_HOLD_SHM	(1 << 3)  /* VF:RO, PF:WR */
+
+#define MBOX_IRQ_EN		(0)
+#define MBOX_IRQ_DISABLE	(1)
+
+/****************************PF MBX OPS************************************/
+static inline u16 rnp_mbx_get_req(struct rnp_hw *hw, int reg)
+{
+	rte_mb();
+	return mbx_rd32(hw, reg) & 0xffff;
+}
+
+static inline u16 rnp_mbx_get_ack(struct rnp_hw *hw, int reg)
+{
+	rte_mb();
+	return (mbx_rd32(hw, reg) >> 16) & 0xffff;
+}
+
+static inline void rnp_mbx_inc_pf_req(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int reg = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_COUNTER : PF2VF_COUNTER(mbx_id);
+	u32 v = mbx_rd32(hw, reg);
+	u16 req;
+
+	req = (v & 0xffff);
+	req++;
+	v &= ~(0x0000ffff);
+	v |= req;
+
+	rte_mb();
+	mbx_wr32(hw, reg, v);
+
+	/* update stats */
+	/* hw->mbx.stats.msgs_tx++; */
+}
+
+static inline void rnp_mbx_inc_pf_ack(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int reg = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_COUNTER : PF2VF_COUNTER(mbx_id);
+	u32 v = mbx_rd32(hw, reg);
+	u16 ack;
+
+	ack = (v >> 16) & 0xffff;
+	ack++;
+	v &= ~(0xffff0000);
+	v |= (ack << 16);
+
+	rte_mb();
+	mbx_wr32(hw, reg, v);
+
+	/* update stats */
+	/* hw->mbx.stats.msgs_rx++; */
+}
+
+/**
+ *  rnp_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification
+ **/
+static int32_t rnp_poll_for_msg(struct rte_eth_dev *dev, enum MBX_ID mbx_id)
+{
+	const struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+	struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !ops->check_for_msg)
+		goto out;
+
+	while (countdown && ops->check_for_msg(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		rte_delay_us_block(mbx->usec_delay);
+	}
+
+out:
+	return countdown ? 0 : -ETIME;
+}
+
+/**
+ *  rnp_poll_for_ack - Wait for message acknowledgment
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message acknowledgment
+ **/
+static int32_t rnp_poll_for_ack(struct rte_eth_dev *dev, enum MBX_ID mbx_id)
+{
+	const struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+	struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !ops->check_for_ack)
+		goto out;
+
+	while (countdown && ops->check_for_ack(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		rte_delay_us_block(mbx->usec_delay);
+	}
+
+out:
+	return countdown ? 0 : -ETIME;
+}
+
+/**
+ *  rnp_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+static int32_t
+rnp_read_posted_mbx_pf(struct rte_eth_dev *dev, u32 *msg, u16 size,
+		       enum MBX_ID mbx_id)
+{
+	const struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+	struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+	int32_t ret_val = -ETIME;
+
+	if (!ops->read || !countdown)
+		return -EOPNOTSUPP;
+
+	ret_val = rnp_poll_for_msg(dev, mbx_id);
+
+	/* if ack received read message, otherwise we timed out */
+	if (!ret_val)
+		return ops->read(hw, msg, size, mbx_id);
+	return ret_val;
+}
+
+/**
+ *  rnp_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+static int32_t
+rnp_write_posted_mbx_pf(struct rte_eth_dev *dev, u32 *msg, u16 size,
+			enum MBX_ID mbx_id)
+{
+	const struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+	struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int32_t ret_val = -ETIME;
+
+	/* exit if either we can't write or there isn't a defined timeout */
+	if (!ops->write || !mbx->timeout)
+		goto out;
+
+	/* send msg and hold buffer lock */
+	if (ops->write)
+		ret_val = ops->write(hw, msg, size, mbx_id);
+
+	/* if msg sent wait until we receive an ack */
+	if (!ret_val)
+		ret_val = rnp_poll_for_ack(dev, mbx_id);
+out:
+	return ret_val;
+}
+
+/**
+ *  rnp_check_for_msg_pf - checks to see if the VF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static int32_t rnp_check_for_msg_pf(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int32_t ret_val = -ETIME;
+
+	if (mbx_id == MBX_CM3CPU) {
+		if (rnp_mbx_get_req(hw, CPU2PF_COUNTER) != hw->mbx.cpu_req) {
+			ret_val = 0;
+			/* hw->mbx.stats.reqs++; */
+		}
+	} else {
+		if (rnp_mbx_get_req(hw, VF2PF_COUNTER(mbx_id)) !=
+				hw->mbx.vf_req[mbx_id]) {
+			ret_val = 0;
+			/* hw->mbx.stats.reqs++; */
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  rnp_check_for_ack_pf - checks to see if the VF has ACKed
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static int32_t rnp_check_for_ack_pf(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int32_t ret_val = -ETIME;
+
+	if (mbx_id == MBX_CM3CPU) {
+		if (rnp_mbx_get_ack(hw, CPU2PF_COUNTER) != hw->mbx.cpu_ack) {
+			ret_val = 0;
+			/* hw->mbx.stats.acks++; */
+		}
+	} else {
+		if (rnp_mbx_get_ack(hw, VF2PF_COUNTER(mbx_id)) != hw->mbx.vf_ack[mbx_id]) {
+			ret_val = 0;
+			/* hw->mbx.stats.acks++; */
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  rnp_obtain_mbx_lock_pf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: the VF index or CPU
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static int32_t rnp_obtain_mbx_lock_pf(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int32_t ret_val = -ETIME;
+	int try_cnt = 5000;  /* 500ms */
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_MBOX_CTRL : PF2VF_MBOX_CTRL(mbx_id);
+
+	while (try_cnt-- > 0) {
+		/* Take ownership of the buffer */
+		mbx_wr32(hw, CTRL_REG, MBOX_CTRL_PF_HOLD_SHM);
+
+		/* reserve mailbox for cm3 use */
+		if (mbx_rd32(hw, CTRL_REG) & MBOX_CTRL_PF_HOLD_SHM)
+			return 0;
+		rte_delay_us_block(100);
+	}
+
+	RNP_PMD_LOG(WARNING, "%s: failed to get:%d lock\n",
+			__func__, mbx_id);
+	return ret_val;
+}
+
+/**
+ *  rnp_write_mbx_pf - Places a message in the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: the VF index
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static int32_t rnp_write_mbx_pf(struct rnp_hw *hw, u32 *msg,
+				u16 size, enum MBX_ID mbx_id)
+{
+	u32 DATA_REG = (mbx_id == MBX_CM3CPU) ?
+		CPU_PF_SHM_DATA : PF_VF_SHM_DATA(mbx_id);
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_MBOX_CTRL : PF2VF_MBOX_CTRL(mbx_id);
+	int32_t ret_val = 0;
+	u32 stat __rte_unused;
+	u16 i;
+
+	if (size > RNP_VFMAILBOX_SIZE) {
+		RNP_PMD_LOG(ERR, "%s: size:%d should <%d\n", __func__,
+				size, RNP_VFMAILBOX_SIZE);
+		return -EINVAL;
+	}
+
+	/* lock the mailbox to prevent pf/vf/cpu race condition */
+	ret_val = rnp_obtain_mbx_lock_pf(hw, mbx_id);
+	if (ret_val) {
+		RNP_PMD_LOG(WARNING, "PF[%d] Can't Get Mbx-Lock Try Again\n",
+				hw->function);
+		return ret_val;
+	}
+
+	/* copy the caller specified message to the mailbox memory buffer */
+	for (i = 0; i < size; i++) {
+#ifdef MBX_WR_DEBUG
+		mbx_pwr32(hw, DATA_REG + i * 4, msg[i]);
+#else
+		mbx_wr32(hw, DATA_REG + i * 4, msg[i]);
+#endif
+	}
+
+	/* flush msg and acks as we are overwriting the message buffer */
+	if (mbx_id == MBX_CM3CPU)
+		hw->mbx.cpu_ack = rnp_mbx_get_ack(hw, CPU2PF_COUNTER);
+	else
+		hw->mbx.vf_ack[mbx_id] = rnp_mbx_get_ack(hw, VF2PF_COUNTER(mbx_id));
+
+	rnp_mbx_inc_pf_req(hw, mbx_id);
+	rte_mb();
+
+	rte_delay_us(300);
+
+	/* Interrupt VF/CM3 to tell it a message
+	 * has been sent and release buffer
+	 */
+	mbx_wr32(hw, CTRL_REG, MBOX_CTRL_REQ);
+
+	return 0;
+}
+
+/**
+ *  rnp_read_mbx_pf - Read a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  This function copies a message from the mailbox buffer to the caller's
+ *  memory buffer.  The presumption is that the caller knows that there was
+ *  a message due to a VF/CPU request so no polling for message is needed.
+ **/
+static int32_t rnp_read_mbx_pf(struct rnp_hw *hw, u32 *msg,
+			       u16 size, enum MBX_ID mbx_id)
+{
+	u32 BUF_REG  = (mbx_id == MBX_CM3CPU) ?
+		CPU_PF_SHM_DATA : PF_VF_SHM_DATA(mbx_id);
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_MBOX_CTRL : PF2VF_MBOX_CTRL(mbx_id);
+	int32_t ret_val = -EIO;
+	u32 stat __rte_unused, i;
+	if (size > RNP_VFMAILBOX_SIZE) {
+		RNP_PMD_LOG(ERR, "%s: size:%d should <%d\n", __func__,
+				size, RNP_VFMAILBOX_SIZE);
+		return -EINVAL;
+	}
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = rnp_obtain_mbx_lock_pf(hw, mbx_id);
+	if (ret_val)
+		goto out_no_read;
+
+	/* copy the message from the mailbox memory buffer */
+	for (i = 0; i < size; i++) {
+#ifdef MBX_RD_DEBUG
+		msg[i] = mbx_prd32(hw, BUF_REG + 4 * i);
+#else
+		msg[i] = mbx_rd32(hw, BUF_REG + 4 * i);
+#endif
+	}
+	mbx_wr32(hw, BUF_REG, 0);
+
+	/* update req. used by rnpvf_check_for_msg_vf  */
+	if (mbx_id == MBX_CM3CPU)
+		hw->mbx.cpu_req = rnp_mbx_get_req(hw, CPU2PF_COUNTER);
+	else
+		hw->mbx.vf_req[mbx_id] = rnp_mbx_get_req(hw, VF2PF_COUNTER(mbx_id));
+
+	/* this ack maybe too earier? */
+	/* Acknowledge receipt and release mailbox, then we're done */
+	rnp_mbx_inc_pf_ack(hw, mbx_id);
+
+	rte_mb();
+
+	/* free ownership of the buffer */
+	mbx_wr32(hw, CTRL_REG, 0);
+
+out_no_read:
+
+	return ret_val;
+}
+
+static void rnp_mbx_reset_pf(struct rnp_hw *hw)
+{
+	int v;
+
+	/* reset pf->cm3 status */
+	v = mbx_rd32(hw, CPU2PF_COUNTER);
+	hw->mbx.cpu_req = v & 0xffff;
+	hw->mbx.cpu_ack = (v >> 16) & 0xffff;
+	/* release   pf->cm3 buffer lock */
+	mbx_wr32(hw, PF2CPU_MBOX_CTRL, 0);
+
+	rte_mb();
+	/* enable irq to fw */
+	mbx_wr32(hw, CPU_PF_MBOX_MASK, 0);
+}
+
+static int get_pfvfnum(struct rnp_hw *hw)
+{
+	uint32_t addr_mask;
+	uint32_t offset;
+	uint32_t val;
+#define RNP_PF_NUM_REG       (0x75f000)
+#define RNP_PFVF_SHIFT       (4)
+#define RNP_PF_SHIFT         (6)
+#define RNP_PF_BIT_MASK      BIT(6)
+	addr_mask = hw->iobar0_len - 1;
+	offset = RNP_PF_NUM_REG & addr_mask;
+	val = rnp_rd_reg(hw->iobar0 + offset);
+
+	return val >> RNP_PFVF_SHIFT;
+}
+
+const struct rnp_mbx_api rnp_mbx_pf_ops = {
+	.read           = rnp_read_mbx_pf,
+	.write          = rnp_write_mbx_pf,
+	.read_posted    = rnp_read_posted_mbx_pf,
+	.write_posted   = rnp_write_posted_mbx_pf,
+	.check_for_msg  = rnp_check_for_msg_pf,
+	.check_for_ack  = rnp_check_for_ack_pf,
+};
+
+void *rnp_memzone_reserve(const char *name, unsigned int size)
+{
+#define NO_FLAGS 0
+	const struct rte_memzone *mz = NULL;
+
+	if (name) {
+		if (size) {
+			mz = rte_memzone_reserve(name, size,
+					rte_socket_id(), NO_FLAGS);
+			if (mz)
+				memset(mz->addr, 0, size);
+		} else {
+			mz = rte_memzone_lookup(name);
+		}
+		return mz ? mz->addr : NULL;
+	}
+	return NULL;
+}
+
+void rnp_init_mbx_ops_pf(struct rnp_hw *hw)
+{
+	struct rnp_eth_adapter *adapter = hw->back;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	struct mbx_req_cookie *cookie;
+	uint32_t vf_isolat_off;
+
+	mbx->size       = RNP_VFMAILBOX_SIZE;
+	mbx->usec_delay = RNP_MBX_DELAY_US;
+	mbx->timeout    = (RNP_MBX_TIMEOUT_SECONDS * 1000 * 1000) /
+		mbx->usec_delay;
+	if (hw->device_id == RNP_DEV_ID_N10G) {
+		vf_isolat_off = RNP_VF_ISOLATE_CTRL &
+			(hw->iobar0_len - 1);
+		rnp_wr_reg(hw->iobar0 + vf_isolat_off, 0);
+	}
+	mbx->sriov_st = 0;
+	hw->pf_vf_num = get_pfvfnum(hw);
+	mbx->vf_num = UINT16_MAX;
+	mbx->pf_num = (hw->pf_vf_num & RNP_PF_BIT_MASK) >> RNP_PF_SHIFT;
+	hw->function = mbx->pf_num;
+	/* Retrieving and storing the HW base address of device */
+	rnp_reg_offset_init(hw);
+	snprintf(hw->cookie_p_name, RTE_MEMZONE_NAMESIZE, "mbx_req_cookie%d_%d",
+			hw->function, adapter->eth_dev->data->port_id);
+	hw->cookie_pool = rnp_memzone_reserve(hw->cookie_p_name,
+			sizeof(struct mbx_req_cookie));
+
+	cookie = (struct mbx_req_cookie *)hw->cookie_pool;
+	if (cookie) {
+		cookie->timeout_ms = 1000;
+		cookie->magic = COOKIE_MAGIC;
+		cookie->priv_len = RNP_MAX_SHARE_MEM;
+	}
+
+	rnp_mbx_reset_pf(hw);
+}
diff --git a/drivers/net/rnp/rnp_mbx.h b/drivers/net/rnp/rnp_mbx.h
new file mode 100644
index 0000000000..87949c1726
--- /dev/null
+++ b/drivers/net/rnp/rnp_mbx.h
@@ -0,0 +1,139 @@ 
+#ifndef __TSRN10_MBX_H__
+#define __TSRN10_MBX_H__
+
+#define VF_NUM_MASK_TEMP	(0xff0)
+#define VF_NUM_OFF		(4)
+#define RNP_VF_NUM		(0x75f000)
+#define RNP_VF_NB_MASK		(0x3f)
+#define RNP_PF_NB_MASK		(0x40)
+#define RNP_VF_ISOLATE_CTRL	(0x7982fc)
+#define RNP_IS_SRIOV		BIT(7)
+#define RNP_SRIOV_ST_SHIFT	(24)
+#define RNP_VF_DEFAULT_PORT	(0)
+
+/* Mbx Ctrl state */
+#define RNP_VFMAILBOX_SIZE	(14) /* 16 32 bit words - 64 bytes */
+#define TSRN10_VFMBX_SIZE	(RNP_VFMAILBOX_SIZE)
+#define RNP_VT_MSGTYPE_ACK	(0x80000000)
+
+#define RNP_VT_MSGTYPE_NACK	(0x40000000)
+/* Messages below or'd with * this are the NACK */
+#define RNP_VT_MSGTYPE_CTS	(0x20000000)
+/* Indicates that VF is still
+ *clear to send requests
+ */
+#define RNP_VT_MSGINFO_SHIFT	(16)
+
+#define RNP_VT_MSGINFO_MASK	(0xFF << RNP_VT_MSGINFO_SHIFT)
+/* The mailbox memory size is 64 bytes accessed by 32-bit registers */
+#define RNP_VLVF_VIEN		(0x80000000) /* filter is valid */
+#define RNP_VLVF_ENTRIES	(64)
+#define RNP_VLVF_VLANID_MASK	(0x00000FFF)
+/* Every VF own 64 bytes mem for communitate accessed by 32-bit */
+
+#define RNP_VF_RESET		(0x01) /* VF requests reset */
+#define RNP_VF_SET_MAC_ADDR	(0x02) /* VF requests PF to set MAC addr */
+#define RNP_VF_SET_MULTICAST	(0x03) /* VF requests PF to set MC addr */
+#define RNP_VF_SET_VLAN		(0x04) /* VF requests PF to set VLAN */
+
+#define RNP_VF_SET_LPE		(0x05) /* VF requests PF to set VMOLR.LPE */
+#define RNP_VF_SET_MACVLAN	(0x06) /* VF requests PF for unicast filter */
+#define RNP_VF_GET_MACVLAN	(0x07) /* VF requests mac */
+#define RNP_VF_API_NEGOTIATE	(0x08) /* negotiate API version */
+#define RNP_VF_GET_QUEUES	(0x09) /* get queue configuration */
+#define RNP_VF_GET_LINK		(0x10) /* get link status */
+
+#define RNP_VF_SET_VLAN_STRIP	(0x0a) /* VF Requests PF to set VLAN STRIP */
+#define RNP_VF_REG_RD		(0x0b) /* VF Read Reg */
+#define RNP_VF_GET_MAX_MTU	(0x0c) /* VF Get Max Mtu */
+#define RNP_VF_SET_MTU		(0x0d) /* VF Set Mtu */
+#define RNP_VF_GET_FW		(0x0e) /* VF Get Firmware Version */
+
+#define RNP_PF_VFNUM_MASK	GENMASK(26, 21)
+
+#define RNP_PF_SET_FCS		(0x10) /* PF set fcs status */
+#define RNP_PF_SET_PAUSE	(0x11) /* PF set pause status */
+#define RNP_PF_SET_FT_PADDING	(0x12) /* PF set ft padding status */
+#define RNP_PF_SET_VLAN_FILTER	(0x13) /* PF set ntuple status */
+#define RNP_PF_SET_VLAN		(0x14)
+#define RNP_PF_SET_LINK		(0x15)
+#define RNP_PF_SET_SPEED_40G	BIT(8)
+#define RNP_PF_SET_SPEED_10G	BIT(7)
+#define RNP_PF_SET_SPEED_1G	BIT(5)
+#define RNP_PF_SET_SPEED_100M	BIT(3)
+
+#define RNP_PF_SET_MTU		(0x16)
+#define RNP_PF_SET_RESET	(0x17)
+#define RNP_PF_LINK_UP		BIT(31)
+#define RNP_PF_SPEED_MASK	GENMASK(15, 0)
+
+/* Define mailbox register bits */
+#define RNP_PF_REMOVE		(0x0f)
+
+/* Mailbox API ID VF Request */
+/* length of permanent address message returned from PF */
+#define RNP_VF_PERMADDR_MSG_LEN (11)
+#define RNP_VF_TX_QUEUES	(1) /* number of Tx queues supported */
+#define RNP_VF_RX_QUEUES	(2) /* number of Rx queues supported */
+#define RNP_VF_TRANS_VLAN	(3) /* Indication of port vlan */
+#define RNP_VF_DEF_QUEUE	(4) /* Default queue offset */
+/* word in permanent address message with the current multicast type */
+#define RNP_VF_VLAN_WORD	(5)
+#define RNP_VF_PHY_TYPE_WORD	(6)
+#define RNP_VF_FW_VERSION_WORD	(7)
+#define RNP_VF_LINK_STATUS_WORD	(8)
+#define RNP_VF_AXI_MHZ		(9)
+#define RNP_VF_RNP_VF_FEATURE	(10)
+#define RNP_VF_RNP_VF_FILTER_EN	BIT(0)
+
+#define RNP_LINK_SPEED_UNKNOWN 0
+#define RNP_LINK_SPEED_10_FULL    BIT(2)
+#define RNP_LINK_SPEED_100_FULL   BIT(3)
+#define RNP_LINK_SPEED_1GB_FULL   BIT(4)
+#define RNP_LINK_SPEED_10GB_FULL  BIT(5)
+#define RNP_LINK_SPEED_40GB_FULL  BIT(6)
+#define RNP_LINK_SPEED_25GB_FULL  BIT(7)
+#define RNP_LINK_SPEED_50GB_FULL  BIT(8)
+#define RNP_LINK_SPEED_100GB_FULL BIT(9)
+#define RNP_LINK_SPEED_10_HALF    BIT(10)
+#define RNP_LINK_SPEED_100_HALF   BIT(11)
+#define RNP_LINK_SPEED_1GB_HALF   BIT(12)
+
+/* Mailbox API ID PF Request */
+#define RNP_VF_MC_TYPE_WORD		(3)
+#define RNP_VF_DMA_VERSION_WORD		(4)
+/* Get Queue write-back reference value */
+#define RNP_PF_CONTROL_PRING_MSG	(0x0100) /* PF control message */
+
+#define TSRN10_MBX_VECTOR_ID            (0)
+#define TSRN10_PF2VF_MBX_VEC_CTR(n)     (0xa5000 + 0x4 * (n))
+
+#define RNP_VF_INIT_TIMEOUT		(200) /* Number of retries to clear RSTI */
+#define RNP_VF_MBX_INIT_TIMEOUT		(2000) /* number of retries on mailbox */
+
+#define MBOX_CTRL_REQ			(1 << 0) /* WO */
+#define MBOX_CTRL_VF_HOLD_SHM		(1 << 2) /* VF:WR, PF:RO */
+#define VF_NUM_MASK 0x3f
+#define VFNUM(num)			((num) & VF_NUM_MASK)
+
+#define PF_VF_SHM(vf)	\
+	((0xa6000) + (64 * (vf))) /* for PF1 rtl will remap 6000 to 0xb000 */
+#define PF2VF_COUNTER(vf)		(PF_VF_SHM(vf) + 0)
+#define VF2PF_COUNTER(vf)		(PF_VF_SHM(vf) + 4)
+#define PF_VF_SHM_DATA(vf)		(PF_VF_SHM(vf) + 8)
+#define VF2PF_MBOX_CTRL(vf)		((0xa7000) + (4 * (vf)))
+
+/* Error Codes */
+#define RNP_ERR_INVALID_MAC_ADDR	(-1)
+#define RNP_ERR_MBX			(-100)
+
+#define RNP_MBX_DELAY_US		(100) /* Delay us for Retry */
+/* Max Retry Time */
+#define RNP_MBX_TIMEOUT_SECONDS	(2) /* Max Retry Time 2s */
+#define RNP_ARRAY_OPCODE_OFFSET	(0)
+#define RNP_ARRAY_CTRL_OFFSET	(1)
+
+void rnp_init_mbx_ops_pf(struct rnp_hw *hw);
+extern const struct rnp_mbx_api rnp_mbx_pf_ops;
+void *rnp_memzone_reserve(const char *name, unsigned int size);
+#endif
diff --git a/drivers/net/rnp/rnp_mbx_fw.h b/drivers/net/rnp/rnp_mbx_fw.h
new file mode 100644
index 0000000000..439090b5a3
--- /dev/null
+++ b/drivers/net/rnp/rnp_mbx_fw.h
@@ -0,0 +1,22 @@ 
+#ifndef __RNP_MBX_FW_H__
+#define __RNP_MBX_FW_H__
+
+struct mbx_fw_cmd_reply;
+typedef void (*cookie_cb)(struct mbx_fw_cmd_reply *reply, void *priv);
+#define RNP_MAX_SHARE_MEM (8 * 8)
+struct mbx_req_cookie {
+	int magic;
+#define COOKIE_MAGIC 0xCE
+	cookie_cb cb;
+	int timeout_ms;
+	int errcode;
+
+	/* wait_queue_head_t wait; */
+	volatile int done;
+	int priv_len;
+	char priv[RNP_MAX_SHARE_MEM];
+};
+struct mbx_fw_cmd_reply {
+} __rte_cache_aligned;
+
+#endif /* __RNP_MBX_FW_H__*/