new file mode 100644
@@ -0,0 +1,24 @@
+#include "rnp.h"
+#include "rnp_api.h"
+
+int
+rnp_init_hw(struct rte_eth_dev *dev)
+{
+ struct rnp_mac_api *ops = RNP_DEV_TO_MAC_OPS(dev);
+ struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+
+ if (ops->init_hw)
+ return ops->init_hw(hw);
+ return -EOPNOTSUPP;
+}
+
+int
+rnp_reset_hw(struct rte_eth_dev *dev, struct rnp_hw *hw)
+{
+ struct rnp_mac_api *ops = RNP_DEV_TO_MAC_OPS(dev);
+
+ if (ops->reset_hw)
+ return ops->reset_hw(hw);
+ return -EOPNOTSUPP;
+}
+
new file mode 100644
@@ -0,0 +1,7 @@
+#ifndef __RNP_API_H__
+#define __RNP_API_H__
+int
+rnp_init_hw(struct rte_eth_dev *dev);
+int
+rnp_reset_hw(struct rte_eth_dev *dev, struct rnp_hw *hw);
+#endif /* __RNP_API_H__ */
new file mode 100644
@@ -0,0 +1,7 @@
+#ifndef __RNP_CFG_H__
+#define __RNP_CFG_H__
+#include "rnp_osdep.h"
+
+#define RNP_NIC_RESET _NIC_(0x0010)
+#define RNP_TX_QINQ_WORKAROUND _NIC_(0x801c)
+#endif /* __RNP_CFG_H__ */
new file mode 100644
@@ -0,0 +1,73 @@
+#ifndef __RNP_REGS_H__
+#define __RNP_REGS_H__
+
+#include "rnp_osdep.h"
+
+/* mac address offset */
+#define RNP_DMA_CTRL (0x4)
+#define RNP_VEB_BYPASS_EN BIT(4)
+#define RNP_DMA_MEM_CFG_LE (0 << 5)
+#define TSNR10_DMA_MEM_CFG_BE (1 << 5)
+#define RNP_DMA_SCATTER_MEM_SHIFT (16)
+
+#define RNP_FIRMWARE_SYNC (0xc)
+#define RNP_FIRMWARE_SYNC_MASK GENMASK(31, 16)
+#define RNP_FIRMWARE_SYNC_MAGIC (0xa5a40000)
+#define RNP_DRIVER_REMOVE (0x5a000000)
+/* 1BIT <-> 16 bytes Dma Addr Size*/
+#define RNP_DMA_SCATTER_MEM_MASK GENMASK(31, 16)
+#define RNP_DMA_TX_MAP_MODE_SHIFT (12)
+#define RNP_DMA_TX_MAP_MODE_MASK GENMASK(15, 12)
+#define RNP_DMA_RX_MEM_PAD_EN BIT(8)
+/* === queue register ===== */
+/* enable */
+#define RNP_DMA_RXQ_START(qid) _RING_(0x0010 + 0x100 * (qid))
+#define RNP_DMA_RXQ_READY(qid) _RING_(0x0014 + 0x100 * (qid))
+#define RNP_DMA_TXQ_START(qid) _RING_(0x0018 + 0x100 * (qid))
+#define RNP_DMA_TXQ_READY(qid) _RING_(0x001c + 0x100 * (qid))
+
+#define RNP_DMA_INT_STAT(qid) _RING_(0x0020 + 0x100 * (qid))
+#define RNP_DMA_INT_MASK(qid) _RING_(0x0024 + 0x100 * (qid))
+#define RNP_TX_INT_MASK BIT(1)
+#define RNP_RX_INT_MASK BIT(0)
+#define RNP_DMA_INT_CLER(qid) _RING_(0x0028 + 0x100 * (qid))
+
+/* rx-queue */
+#define RNP_DMA_RXQ_BASE_ADDR_HI(qid) _RING_(0x0030 + 0x100 * (qid))
+#define RNP_DMA_RXQ_BASE_ADDR_LO(qid) _RING_(0x0034 + 0x100 * (qid))
+#define RNP_DMA_RXQ_LEN(qid) _RING_(0x0038 + 0x100 * (qid))
+#define RNP_DMA_RXQ_HEAD(qid) _RING_(0x003c + 0x100 * (qid))
+#define RNP_DMA_RXQ_TAIL(qid) _RING_(0x0040 + 0x100 * (qid))
+#define RNP_DMA_RXQ_DESC_FETCH_CTRL(qid) _RING_(0x0044 + 0x100 * (qid))
+#define RNP_DMA_RXQ_INT_DELAY_TIMER(qid) _RING_(0x0048 + 0x100 * (qid))
+#define RNP_DMA_RXQ_INT_DELAY_PKTCNT(qidx) _RING_(0x004c + 0x100 * (qid))
+#define RNP_DMA_RXQ_RX_PRI_LVL(qid) _RING_(0x0050 + 0x100 * (qid))
+#define RNP_DMA_RXQ_DROP_TIMEOUT_TH(qid) _RING_(0x0054 + 0x100 * (qid))
+/* tx-queue */
+#define RNP_DMA_TXQ_BASE_ADDR_HI(qid) _RING_(0x0060 + 0x100 * (qid))
+#define RNP_DMA_TXQ_BASE_ADDR_LO(qid) _RING_(0x0064 + 0x100 * (qid))
+#define RNP_DMA_TXQ_LEN(qid) _RING_(0x0068 + 0x100 * (qid))
+#define RNP_DMA_TXQ_HEAD(qid) _RING_(0x006c + 0x100 * (qid))
+#define RNP_DMA_TXQ_TAIL(qid) _RING_(0x0070 + 0x100 * (qid))
+#define RNP_DMA_TXQ_DESC_FETCH_CTRL(qid) _RING_(0x0074 + 0x100 * (qid))
+#define RNP_DMA_TXQ_INT_DELAY_TIMER(qid) _RING_(0x0078 + 0x100 * (qid))
+#define RNP_DMA_TXQ_INT_DELAY_PKTCNT(qid) _RING_(0x007c + 0x100 * (qid))
+
+#define RNP_DMA_TXQ_PRI_LVL(qid) _RING_(0x0080 + 0x100 * (qid))
+#define RNP_DMA_TXQ_RATE_CTRL_TH(qid) _RING_(0x0084 + 0x100 * (qid))
+#define RNP_DMA_TXQ_RATE_CTRL_TM(qid) _RING_(0x0088 + 0x100 * (qid))
+
+/* VEB Table Register */
+#define RNP_VBE_MAC_LO(port, nr) _RING_(0x00a0 + (4 * (port)) + \
+ (0x100 * (nr)))
+#define RNP_VBE_MAC_HI(port, nr) _RING_(0x00b0 + (4 * (port)) + \
+ (0x100 * (nr)))
+#define RNP_VEB_VID_CFG(port, nr) _RING_(0x00c0 + (4 * (port)) + \
+ (0x100 * (nr)))
+#define RNP_VEB_VF_RING(port, nr) _RING_(0x00d0 + (4 * (port)) + \
+ (0x100 * (nr)))
+#define RNP_MAX_VEB_TB (64)
+#define RNP_VEB_RING_CFG_OFFSET (8)
+#define RNP_VEB_SWITCH_VF_EN BIT(7)
+#define MAX_VEB_TABLES_NUM (4)
+#endif /* RNP_DMA_REGS_H_ */
new file mode 100644
@@ -0,0 +1,124 @@
+#ifndef _RNP_ETH_REGS_H_
+#define _RNP_ETH_REGS_H_
+
+#include "rnp_osdep.h"
+
+/* PTP 1588 TM Offload */
+#define RNP_ETH_PTP_TX_STATUS(n) _ETH_(0x0400 + ((n) * 0x14))
+#define RNP_ETH_PTP_TX_HTIMES(n) _ETH_(0x0404 + ((n) * 0x14))
+#define RNP_ETH_PTP_TX_LTIMES(n) _ETH_(0x0408 + ((n) * 0x14))
+#define RNP_ETH_PTP_TX_TS_ST(n) _ETH_(0x040c + ((n) * 0x14))
+#define RNP_ETH_PTP_TX_CLEAR(n) _ETH_(0x0410 + ((n) * 0x14))
+
+#define RNP_ETH_ENGINE_BYPASS _ETH_(0x8000)
+#define RNP_EN_TUNNEL_VXLAN_PARSE _ETH_(0x8004)
+#define RNP_ETH_MAC_LOOPBACK _ETH_(0x8008)
+#define RNP_ETH_FIFO_CTRL _ETH_(0x800c)
+#define RNP_ETH_FOUR_FIFO BIT(0)
+#define RNP_ETH_TWO_FIFO BIT(1)
+#define RNP_ETH_ONE_FIFO BIT(2)
+#define RNP_FIFO_CFG_EN (0x1221)
+#define RNP_ETH_VXLAN_PORT_CTRL _ETH_(0x8010)
+#define RNP_ETH_VXLAN_DEF_PORT (4789)
+#define RNP_HOST_FILTER_EN _ETH_(0x801c)
+#define RNP_HW_SCTP_CKSUM_CTRL _ETH_(0x8038)
+#define RNP_HW_CHECK_ERR_CTRL _ETH_(0x8060)
+#define RNP_HW_ERR_HDR_LEN BIT(0)
+#define RNP_HW_ERR_PKTLEN BIT(1)
+#define RNP_HW_L3_CKSUM_ERR BIT(2)
+#define RNP_HW_L4_CKSUM_ERR BIT(3)
+#define RNP_HW_SCTP_CKSUM_ERR BIT(4)
+#define RNP_HW_INNER_L3_CKSUM_ERR BIT(5)
+#define RNP_HW_INNER_L4_CKSUM_ERR BIT(6)
+#define RNP_HW_CKSUM_ERR_MASK GENMASK(6, 2)
+#define RNP_HW_CHECK_ERR_MASK GENMASK(6, 0)
+#define RNP_HW_ERR_RX_ALL_MASK GENMASK(1, 0)
+
+#define RNP_REDIR_CTRL _ETH_(0x8030)
+#define RNP_VLAN_Q_STRIP_CTRL(n) _ETH_(0x8040 + 0x4 * ((n) / 32))
+/* This Just VLAN Master Switch */
+#define RNP_VLAN_TUNNEL_STRIP_EN _ETH_(0x8050)
+#define RNP_VLAN_TUNNEL_STRIP_MODE _ETH_(0x8054)
+#define RNP_VLAN_TUNNEL_STRIP_OUTER (0)
+#define RNP_VLAN_TUNNEL_STRIP_INNER (1)
+#define RNP_RSS_INNER_CTRL _ETH_(0x805c)
+#define RNP_INNER_RSS_EN (1)
+
+#define RNP_ETH_DEFAULT_RX_RING _ETH_(0x806c)
+#define RNP_RX_FC_HI_WATER(n) _ETH_(0x80c0 + ((n) * 0x8))
+#define RNP_RX_FC_LO_WATER(n) _ETH_(0x80c4 + ((n) * 0x8))
+
+#define RNP_RX_FIFO_FULL_THRETH(n) _ETH_(0x8070 + ((n) * 0x8))
+#define RNP_RX_WORKAROUND_VAL _ETH_(0x7ff)
+#define RNP_RX_DEFAULT_VAL _ETH_(0x270)
+
+#define RNP_MIN_FRAME_CTRL _ETH_(0x80f0)
+#define RNP_MAX_FRAME_CTRL _ETH_(0x80f4)
+
+#define RNP_RX_FC_ENABLE _ETH_(0x8520)
+#define RNP_RING_FC_EN(n) _ETH_(0x8524 + 0x4 * ((n) / 32))
+#define RNP_RING_FC_THRESH(n) _ETH_(0x8a00 + 0x4 * (n))
+
+/* Mac Host Filter */
+#define RNP_MAC_FCTRL _ETH_(0x9110)
+#define RNP_MAC_FCTRL_MPE BIT(8) /* Multicast Promiscuous En */
+#define RNP_MAC_FCTRL_UPE BIT(9) /* Unicast Promiscuous En */
+#define RNP_MAC_FCTRL_BAM BIT(10) /* Broadcast Accept Mode */
+#define RNP_MAC_FCTRL_BYPASS (RNP_MAC_FCTRL_MPE | \
+ RNP_MAC_FCTRL_UPE | \
+ RNP_MAC_FCTRL_BAM)
+/* MC UC Mac Hash Filter Ctrl */
+#define RNP_MAC_MCSTCTRL _ETH_(0x9114)
+#define RNP_MAC_HASH_MASK GENMASK(11, 0)
+#define RNP_MAC_MULTICASE_TBL_EN BIT(2)
+#define RNP_MAC_UNICASE_TBL_EN BIT(3)
+#define RNP_UC_HASH_TB(n) _ETH_(0xA800 + ((n) * 0x4))
+#define RNP_MC_HASH_TB(n) _ETH_(0xAC00 + ((n) * 0x4))
+
+#define RNP_VLAN_FILTER_CTRL _ETH_(0x9118)
+#define RNP_L2TYPE_FILTER_CTRL (RNP_VLAN_FILTER_CTRL)
+#define RNP_L2TYPE_FILTER_EN BIT(31)
+#define RNP_VLAN_FILTER_EN BIT(30)
+
+#define RNP_FC_PAUSE_FWD_ACT _ETH_(0x9280)
+#define RNP_FC_PAUSE_DROP BIT(31)
+#define RNP_FC_PAUSE_PASS (0)
+#define RNP_FC_PAUSE_TYPE _ETH_(0x9284)
+#define RNP_FC_PAUSE_POLICY_EN BIT(31)
+#define RNP_PAUSE_TYPE _ETH_(0x8808)
+
+#define RNP_INPUT_USE_CTRL _ETH_(0x91d0)
+#define RNP_INPUT_VALID_MASK (0xf)
+#define RNP_INPUT_POLICY(n) _ETH_(0x91e0 + ((n) * 0x4))
+/* RSS */
+#define RNP_RSS_MRQC_ADDR _ETH_(0x92a0)
+#define RNP_SRIOV_CTRL RNP_RSS_MRQC_ADDR
+#define RNP_SRIOV_ENABLE BIT(3)
+
+#define RNP_RSS_REDIR_TB(mac, idx) _ETH_(0xe000 + \
+ ((mac) * 0x200) + ((idx) * 0x4))
+#define RNP_RSS_KEY_TABLE(idx) _ETH_(0x92d0 + ((idx) * 0x4))
+/*=======================================================================
+ *HOST_MAC_ADDRESS_FILTER
+ *=======================================================================
+ */
+#define RNP_RAL_BASE_ADDR(vf_id) _ETH_(0xA000 + 0x04 * (vf_id))
+#define RNP_RAH_BASE_ADDR(vf_id) _ETH_(0xA400 + 0x04 * (vf_id))
+#define RNP_MAC_FILTER_EN BIT(31)
+
+/* ETH Statistic */
+#define RNP_ETH_RXTRANS_DROP(p_id) _ETH_((0x8904) + ((p_id) * (0x40)))
+#define RNP_ETH_RXTRANS_CAT_ERR(p_id) _ETH_((0x8928) + ((p_id) * (0x40)))
+#define RNP_ETH_TXTM_DROP _ETH_(0X0470)
+
+#define RNP_VFTA_BASE_ADDR _ETH_(0xB000)
+#define RNP_VFTA_HASH_TABLE(id) (RNP_VFTA_BASE_ADDR + 0x4 * (id))
+#define RNP_ETYPE_BASE_ADDR _ETH_(0xB300)
+#define RNP_MPSAR_BASE_ADDR(vf_id) _ETH_(0xB400 + 0x04 * (vf_id))
+#define RNP_PFVLVF_BASE_ADDR _ETH_(0xB600)
+#define RNP_PFVLVFB_BASE_ADDR _ETH_(0xB700)
+#define RNP_TUNNEL_PFVLVF_BASE_ADDR _ETH_(0xB800)
+#define RNP_TUNNEL_PFVLVFB_BASE_ADDR _ETH_(0xB900)
+
+#define RNP_TC_PORT_MAP_TB(port) _ETH_(0xe840 + 0x04 * (port))
+#endif /* RNP_ETH_REGS_H_ */
@@ -8,6 +8,9 @@
#include <ethdev_driver.h>
#include "rnp_osdep.h"
+#include "rnp_dma_regs.h"
+#include "rnp_eth_regs.h"
+#include "rnp_cfg.h"
static inline unsigned int rnp_rd_reg(volatile void *addr)
{
@@ -24,6 +27,9 @@ static inline void rnp_wr_reg(volatile void *reg, int val)
#define mbx_rd32(hw, reg) rnp_rd_reg((hw)->iobar4 + (reg))
#define mbx_wr32(hw, reg, val) rnp_wr_reg((hw)->iobar4 + (reg), (val))
+#define rnp_eth_rd(hw, off) rnp_rd_reg((char *)(hw)->eth_base + (off))
+#define rnp_eth_wr(hw, off, val) \
+ rnp_wr_reg((char *)(hw)->eth_base + (off), val)
struct rnp_hw;
/* Mbx Operate info */
enum MBX_ID {
@@ -93,6 +99,17 @@ struct rnp_mbx_info {
rte_atomic16_t state;
} __rte_cache_aligned;
+struct rnp_mac_api {
+ int32_t (*init_hw)(struct rnp_hw *hw);
+ int32_t (*reset_hw)(struct rnp_hw *hw);
+};
+
+struct rnp_mac_info {
+ uint8_t assign_addr[RTE_ETHER_ADDR_LEN];
+ uint8_t set_addr[RTE_ETHER_ADDR_LEN];
+ struct rnp_mac_api ops;
+} __rte_cache_aligned;
+
#define RNP_MAX_HW_PORT_PERR_PF (4)
struct rnp_hw {
void *back;
@@ -105,8 +122,10 @@ struct rnp_hw {
char *eth_base;
char *veb_base;
char *mac_base[RNP_MAX_HW_PORT_PERR_PF];
+ char *comm_reg_base;
char *msix_base;
/* === dma == */
+ char *dev_version;
char *dma_axi_en;
char *dma_axi_st;
@@ -114,10 +133,37 @@ struct rnp_hw {
uint16_t vendor_id;
uint16_t function;
uint16_t pf_vf_num;
+ int pfvfnum;
uint16_t max_vfs;
+
+ bool ncsi_en;
+ uint8_t ncsi_rar_entries;
+
+ int sgmii_phy_id;
+ int is_sgmii;
+ u16 phy_type;
+ uint8_t force_10g_1g_speed_ablity;
+ uint8_t force_speed_stat;
+#define FORCE_SPEED_STAT_DISABLED (0)
+#define FORCE_SPEED_STAT_1G (1)
+#define FORCE_SPEED_STAT_10G (2)
+ uint32_t speed;
+ unsigned int axi_mhz;
+
+ int fw_version; /* Primary FW Version */
+ uint32_t fw_uid; /* Subclass Fw Version */
+
+ int nic_mode;
+ unsigned char lane_mask;
+ int lane_of_port[4];
+ char phy_port_ids[4]; /* port id: for lane0~3: value: 0 ~ 7 */
+ uint8_t max_port_num; /* Max Port Num This PF Have */
+
void *cookie_pool;
char cookie_p_name[RTE_MEMZONE_NAMESIZE];
+ struct rnp_mac_info mac;
struct rnp_mbx_info mbx;
+ rte_spinlock_t fw_lock;
} __rte_cache_aligned;
#endif /* __RNP_H__*/
@@ -10,5 +10,8 @@ endif
sources = files(
'rnp_ethdev.c',
'rnp_mbx.c',
+ 'rnp_mbx_fw.c',
+ 'base/rnp_api.c',
)
+
includes += include_directories('base')
@@ -13,6 +13,20 @@
#define RNP_CFG_BAR (4)
#define RNP_PF_INFO_BAR (0)
+enum rnp_resource_share_m {
+ RNP_SHARE_CORPORATE = 0,
+ RNP_SHARE_INDEPEND,
+};
+/*
+ * Structure to store private data for each driver instance (for each port).
+ */
+enum rnp_work_mode {
+ RNP_SINGLE_40G = 0,
+ RNP_SINGLE_10G = 1,
+ RNP_DUAL_10G = 2,
+ RNP_QUAD_10G = 3,
+};
+
struct rnp_eth_port {
void *adapt;
struct rnp_hw *hw;
@@ -21,9 +35,12 @@ struct rnp_eth_port {
struct rnp_share_ops {
struct rnp_mbx_api mbx_api;
+ struct rnp_mac_api mac_api;
} __rte_cache_aligned;
struct rnp_eth_adapter {
+ enum rnp_work_mode mode;
+ enum rnp_resource_share_m s_mode; /* Port Resource Share Policy */
struct rnp_hw hw;
uint16_t max_vfs;
struct rte_pci_device *pdev;
@@ -31,7 +48,9 @@ struct rnp_eth_adapter {
struct rnp_eth_port *ports[RNP_MAX_PORT_OF_PF];
struct rnp_share_ops *share_priv;
+ int max_link_speed;
uint8_t num_ports; /* Cur Pf Has physical Port Num */
+ uint8_t lane_mask;
} __rte_cache_aligned;
#define RNP_DEV_TO_PORT(eth_dev) \
@@ -40,8 +59,13 @@ struct rnp_eth_adapter {
((struct rnp_eth_adapter *)(RNP_DEV_TO_PORT(eth_dev)->adapt))
#define RNP_DEV_TO_HW(eth_dev) \
(&((struct rnp_eth_adapter *)(RNP_DEV_TO_PORT((eth_dev))->adapt))->hw)
+#define RNP_HW_TO_ADAPTER(hw) \
+ ((struct rnp_eth_adapter *)((hw)->back))
#define RNP_DEV_PP_PRIV_TO_MBX_OPS(dev) \
(&((struct rnp_share_ops *)(dev)->process_private)->mbx_api)
+#define RNP_DEV_PP_PRIV_TO_MAC_OPS(dev) \
+ (&((struct rnp_share_ops *)(dev)->process_private)->mac_api)
+#define RNP_DEV_TO_MAC_OPS(dev) RNP_DEV_PP_PRIV_TO_MAC_OPS(dev)
#define RNP_DEV_TO_MBX_OPS(dev) RNP_DEV_PP_PRIV_TO_MBX_OPS(dev)
static inline void rnp_reg_offset_init(struct rnp_hw *hw)
@@ -56,10 +80,10 @@ static inline void rnp_reg_offset_init(struct rnp_hw *hw)
hw->msix_base = hw->iobar4 + 0xa0000;
}
/* === dma status/config====== */
+ hw->dev_version = hw->iobar4 + 0x0000;
hw->link_sync = hw->iobar4 + 0x000c;
hw->dma_axi_en = hw->iobar4 + 0x0010;
hw->dma_axi_st = hw->iobar4 + 0x0014;
-
if (hw->mbx.pf_num)
hw->msix_base += 0x200;
/* === queue registers === */
@@ -69,5 +93,7 @@ static inline void rnp_reg_offset_init(struct rnp_hw *hw)
/* mac */
for (i = 0; i < RNP_MAX_HW_PORT_PERR_PF; i++)
hw->mac_base[i] = hw->iobar4 + 0x60000 + 0x10000 * i;
+ /* === top reg === */
+ hw->comm_reg_base = hw->iobar4 + 0x30000;
}
#endif /* __RNP_H__ */
@@ -9,6 +9,8 @@
#include "rnp.h"
#include "rnp_mbx.h"
+#include "base/rnp_api.h"
+#include "rnp_mbx_fw.h"
#include "rnp_logs.h"
extern struct rnp_mbx_api rnp_mbx_pf_ops;
@@ -93,7 +95,30 @@ rnp_alloc_eth_port(struct rte_pci_device *master_pci, char *name)
static void rnp_get_nic_attr(struct rnp_eth_adapter *adapter)
{
- RTE_SET_USED(adapter);
+ struct rnp_hw *hw = &adapter->hw;
+ int lane_mask = 0, err, mode = 0;
+
+ rnp_mbx_link_event_enable(adapter->eth_dev, false);
+
+ err = rnp_mbx_get_capability(adapter->eth_dev, &lane_mask, &mode);
+ if (err < 0 || !lane_mask) {
+ PMD_DRV_LOG(ERR, "%s: mbx_get_capability error! errcode=%d\n",
+ __func__, hw->speed);
+ return;
+ }
+
+ adapter->num_ports = __builtin_popcount(lane_mask);
+ adapter->max_link_speed = hw->speed;
+ adapter->lane_mask = lane_mask;
+ adapter->mode = hw->nic_mode;
+
+ PMD_DRV_LOG(INFO, "max link speed:%d lane_mask:0x%x nic-mode:0x%x\n",
+ (int)adapter->max_link_speed,
+ (int)adapter->num_ports, adapter->mode);
+ if (adapter->num_ports && adapter->num_ports == 1)
+ adapter->s_mode = RNP_SHARE_CORPORATE;
+ else
+ adapter->s_mode = RNP_SHARE_INDEPEND;
}
static int
@@ -120,13 +145,78 @@ rnp_process_resource_init(struct rte_eth_dev *eth_dev)
PMD_DRV_LOG(ERR, "calloc share_priv failed");
return -ENOMEM;
}
-
+ memset(share_priv, 0, sizeof(*share_priv));
eth_dev->process_private = share_priv;
- adapter->share_priv = share_priv;
return 0;
}
+static int32_t rnp_reset_hw_pf(struct rnp_hw *hw)
+{
+ struct rnp_eth_adapter *adapter = hw->back;
+
+ rnp_wr_reg(hw->comm_reg_base + RNP_NIC_RESET, 0);
+ rte_wmb();
+ rnp_wr_reg(hw->comm_reg_base + RNP_NIC_RESET, 1);
+
+ rnp_mbx_fw_reset_phy(adapter->eth_dev);
+
+ PMD_DRV_LOG(INFO, "PF[%d] reset nic finish\n",
+ hw->function);
+ return 0;
+}
+
+static int32_t rnp_init_hw_pf(struct rnp_hw *hw)
+{
+ struct rnp_eth_adapter *adapter = RNP_HW_TO_ADAPTER(hw);
+ uint32_t version;
+ uint32_t reg;
+
+ PMD_INIT_FUNC_TRACE();
+ version = rnp_rd_reg(hw->dev_version);
+ PMD_DRV_LOG(INFO, "NIC HW Version:0x%.2x\n", version);
+
+ /* Disable Rx/Tx Dma */
+ rnp_wr_reg(hw->dma_axi_en, false);
+ /* Check Dma Chanle Status */
+ while (rnp_rd_reg(hw->dma_axi_st) == 0)
+ ;
+
+ /* Reset Nic All Hardware */
+ if (rnp_reset_hw(adapter->eth_dev, hw))
+ return -EPERM;
+
+ /* Rx Proto Offload No-BYPASS */
+ rnp_eth_wr(hw, RNP_ETH_ENGINE_BYPASS, false);
+ /* Enable Flow Filter Engine */
+ rnp_eth_wr(hw, RNP_HOST_FILTER_EN, true);
+ /* Enable VXLAN Parse */
+ rnp_eth_wr(hw, RNP_EN_TUNNEL_VXLAN_PARSE, true);
+ /* Enabled REDIR ACTION */
+ rnp_eth_wr(hw, RNP_REDIR_CTRL, true);
+
+ /* Setup Scatter DMA Mem Size */
+ reg = ((RTE_ETHER_MAX_LEN / 16) << RNP_DMA_SCATTER_MEM_SHIFT);
+ rnp_wr_reg(hw->iobar4 + RNP_DMA_CTRL, reg);
+#ifdef PHYTIUM_SUPPORT
+#define RNP_DMA_PADDING (1 << 8)
+ reg = rnp_rd_reg(hw->iobar4 + RNP_DMA_CTRL);
+ reg |= RNP_DMA_PADDING;
+ rnp_wr_reg(hw->iobar4 + RNP_DMA_CTRL, reg);
+#endif
+ /* Enable Rx/Tx Dma */
+ rnp_wr_reg(hw->dma_axi_en, 0b1111);
+
+ rnp_wr_reg(hw->comm_reg_base + RNP_TX_QINQ_WORKAROUND, 1);
+
+ return 0;
+}
+
+struct rnp_mac_api rnp_mac_ops = {
+ .reset_hw = rnp_reset_hw_pf,
+ .init_hw = rnp_init_hw_pf
+};
+
static void
rnp_common_ops_init(struct rnp_eth_adapter *adapter)
{
@@ -180,11 +270,12 @@ rnp_eth_dev_init(struct rte_eth_dev *dev)
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;
adapter->max_vfs = pci_dev->max_vfs;
- ret = rnp_process_resource_init(dev, adapter);
+ ret = rnp_process_resource_init(dev);
if (ret) {
PMD_DRV_LOG(ERR, "share prive resource init failed");
return ret;
}
+ adapter->share_priv = dev->process_private;
rnp_common_ops_init(adapter);
rnp_get_nic_attr(adapter);
/* We need Use Device Id To Change The Resource Mode */
new file mode 100644
@@ -0,0 +1,272 @@
+#include <linux/wait.h>
+#include <stdio.h>
+
+#include <rte_version.h>
+#include <ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_alarm.h>
+
+#include "rnp.h"
+#include "rnp_mbx.h"
+#include "rnp_mbx_fw.h"
+#include "rnp_logs.h"
+
+static int
+rnp_fw_send_cmd_wait(struct rte_eth_dev *dev, struct mbx_fw_cmd_req *req,
+ struct mbx_fw_cmd_reply *reply)
+{
+ struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+ struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+ int err;
+
+ rte_spinlock_lock(&hw->fw_lock);
+
+ err = ops->write_posted(dev, (u32 *)req,
+ (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+ if (err) {
+ RNP_PMD_LOG(ERR, "%s: write_posted failed! err:0x%x\n",
+ __func__, err);
+ rte_spinlock_unlock(&hw->fw_lock);
+ return err;
+ }
+
+ err = ops->read_posted(dev, (u32 *)reply, sizeof(*reply) / 4, MBX_FW);
+ rte_spinlock_unlock(&hw->fw_lock);
+ if (err) {
+ RNP_PMD_LOG(ERR,
+ "%s: read_posted failed! err:0x%x. "
+ "req-op:0x%x\n",
+ __func__,
+ err,
+ req->opcode);
+ goto err_quit;
+ }
+
+ if (reply->error_code) {
+ RNP_PMD_LOG(ERR,
+ "%s: reply err:0x%x. req-op:0x%x\n",
+ __func__,
+ reply->error_code,
+ req->opcode);
+ err = -reply->error_code;
+ goto err_quit;
+ }
+
+ return 0;
+err_quit:
+ RNP_PMD_LOG(ERR,
+ "%s:PF[%d]: req:%08x_%08x_%08x_%08x "
+ "reply:%08x_%08x_%08x_%08x\n",
+ __func__,
+ hw->function,
+ ((int *)req)[0],
+ ((int *)req)[1],
+ ((int *)req)[2],
+ ((int *)req)[3],
+ ((int *)reply)[0],
+ ((int *)reply)[1],
+ ((int *)reply)[2],
+ ((int *)reply)[3]);
+
+ return err;
+}
+
+static int rnp_mbx_fw_post_req(struct rte_eth_dev *dev,
+ struct mbx_fw_cmd_req *req,
+ struct mbx_req_cookie *cookie)
+{
+ struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+ struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+ int err = 0;
+ int timeout_cnt;
+#define WAIT_MS 10
+
+ cookie->done = 0;
+
+ rte_spinlock_lock(&hw->fw_lock);
+
+ /* down_interruptible(&pf_cpu_lock); */
+ err = ops->write(hw, (u32 *)req,
+ (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+ if (err) {
+ RNP_PMD_LOG(ERR, "rnp_write_mbx failed!\n");
+ goto quit;
+ }
+
+ timeout_cnt = cookie->timeout_ms / WAIT_MS;
+ while (timeout_cnt > 0) {
+ rte_delay_ms(WAIT_MS);
+ timeout_cnt--;
+ if (cookie->done)
+ break;
+ }
+
+quit:
+ rte_spinlock_unlock(&hw->fw_lock);
+ return err;
+}
+
+static int rnp_fw_get_capablity(struct rte_eth_dev *dev,
+ struct phy_abilities *abil)
+{
+ struct mbx_fw_cmd_reply reply;
+ struct mbx_fw_cmd_req req;
+ int err;
+
+ memset(&req, 0, sizeof(req));
+ memset(&reply, 0, sizeof(reply));
+
+ build_phy_abalities_req(&req, &req);
+
+ err = rnp_fw_send_cmd_wait(dev, &req, &reply);
+ if (err)
+ return err;
+
+ memcpy(abil, &reply.phy_abilities, sizeof(*abil));
+
+ return 0;
+}
+
+#define RNP_MBX_API_MAX_RETRY (10)
+int rnp_mbx_get_capability(struct rte_eth_dev *dev,
+ int *lane_mask,
+ int *nic_mode)
+{
+ struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+ struct phy_abilities ablity;
+ uint16_t temp_lmask;
+ uint16_t lane_bit = 0;
+ uint16_t retry = 0;
+ int lane_cnt = 0;
+ uint8_t lane_idx;
+ int err = -EIO;
+ uint8_t idx;
+
+ memset(&ablity, 0, sizeof(ablity));
+
+ /* enable CM3CPU to PF MBX IRQ */
+ do {
+ err = rnp_fw_get_capablity(dev, &ablity);
+ if (retry > RNP_MBX_API_MAX_RETRY)
+ break;
+ retry++;
+ } while (err);
+ if (!err) {
+ hw->lane_mask = ablity.lane_mask;
+ hw->nic_mode = ablity.nic_mode;
+ hw->pfvfnum = ablity.pfnum;
+ hw->fw_version = ablity.fw_version;
+ hw->axi_mhz = ablity.axi_mhz;
+ hw->fw_uid = ablity.fw_uid;
+ if (ablity.phy_type == PHY_TYPE_SGMII) {
+ hw->is_sgmii = 1;
+ hw->sgmii_phy_id = ablity.phy_id;
+ }
+
+ if (ablity.ext_ablity != 0xffffffff && ablity.e.valid) {
+ hw->ncsi_en = (ablity.e.ncsi_en == 1);
+ hw->ncsi_rar_entries = 1;
+ }
+
+ if (hw->nic_mode == RNP_SINGLE_10G &&
+ hw->fw_version >= 0x00050201 &&
+ ablity.speed == RTE_ETH_SPEED_NUM_10G) {
+ hw->force_speed_stat = FORCE_SPEED_STAT_DISABLED;
+ hw->force_10g_1g_speed_ablity = 1;
+ }
+
+ if (lane_mask)
+ *lane_mask = hw->lane_mask;
+ if (nic_mode)
+ *nic_mode = hw->nic_mode;
+
+ lane_cnt = __builtin_popcount(hw->lane_mask);
+ temp_lmask = hw->lane_mask;
+ for (idx = 0; idx < lane_cnt; idx++) {
+ hw->phy_port_ids[idx] = ablity.port_ids[idx];
+ lane_bit = ffs(temp_lmask) - 1;
+ lane_idx = ablity.port_ids[idx] % lane_cnt;
+ hw->lane_of_port[lane_idx] = lane_bit;
+ temp_lmask &= ~BIT(lane_bit);
+ }
+ hw->max_port_num = lane_cnt;
+ }
+
+ RNP_PMD_LOG(INFO,
+ "%s: nic-mode:%d lane_cnt:%d lane_mask:0x%x "
+ "pfvfnum:0x%x, fw_version:0x%08x, ports:%d-%d-%d-%d ncsi:en:%d\n",
+ __func__,
+ hw->nic_mode,
+ lane_cnt,
+ hw->lane_mask,
+ hw->pfvfnum,
+ ablity.fw_version,
+ ablity.port_ids[0],
+ ablity.port_ids[1],
+ ablity.port_ids[2],
+ ablity.port_ids[3],
+ hw->ncsi_en);
+
+ if (lane_cnt <= 0 || lane_cnt > 4)
+ return -EIO;
+
+ return err;
+}
+
+int rnp_mbx_link_event_enable(struct rte_eth_dev *dev, int enable)
+{
+ struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+ struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+ struct mbx_fw_cmd_reply reply;
+ struct mbx_fw_cmd_req req;
+ int err, v;
+
+ memset(&req, 0, sizeof(req));
+ memset(&reply, 0, sizeof(reply));
+
+ rte_spinlock_lock(&hw->fw_lock);
+ if (enable) {
+ v = rnp_rd_reg(hw->link_sync);
+ v &= ~RNP_FIRMWARE_SYNC_MASK;
+ v |= RNP_FIRMWARE_SYNC_MAGIC;
+ rnp_wr_reg(hw->link_sync, v);
+ } else {
+ rnp_wr_reg(hw->link_sync, 0);
+ }
+ rte_spinlock_unlock(&hw->fw_lock);
+
+ build_link_set_event_mask(&req, BIT(EVT_LINK_UP),
+ (enable & 1) << EVT_LINK_UP, &req);
+
+ rte_spinlock_lock(&hw->fw_lock);
+ err = ops->write_posted(dev, (u32 *)&req,
+ (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+ rte_spinlock_unlock(&hw->fw_lock);
+
+ rte_delay_ms(200);
+
+ return err;
+}
+
+int rnp_mbx_fw_reset_phy(struct rte_eth_dev *dev)
+{
+ struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+ struct mbx_fw_cmd_reply reply;
+ struct mbx_req_cookie *cookie;
+ struct mbx_fw_cmd_req req;
+
+ memset(&req, 0, sizeof(req));
+ memset(&reply, 0, sizeof(reply));
+
+ if (hw->mbx.irq_enabled) {
+ cookie = rnp_memzone_reserve(hw->cookie_p_name, 0);
+ if (!cookie)
+ return -ENOMEM;
+ memset(cookie->priv, 0, cookie->priv_len);
+ build_reset_phy_req(&req, cookie);
+ return rnp_mbx_fw_post_req(dev, &req, cookie);
+ }
+ build_reset_phy_req(&req, &req);
+
+ return rnp_fw_send_cmd_wait(dev, &req, &reply);
+}
@@ -16,7 +16,168 @@ struct mbx_req_cookie {
int priv_len;
char priv[RNP_MAX_SHARE_MEM];
};
+enum GENERIC_CMD {
+ /* link configuration admin commands */
+ GET_PHY_ABALITY = 0x0601,
+ RESET_PHY = 0x0603,
+ SET_EVENT_MASK = 0x0613,
+};
+
+enum link_event_mask {
+ EVT_LINK_UP = 1,
+ EVT_NO_MEDIA = 2,
+ EVT_LINK_FAULT = 3,
+ EVT_PHY_TEMP_ALARM = 4,
+ EVT_EXCESSIVE_ERRORS = 5,
+ EVT_SIGNAL_DETECT = 6,
+ EVT_AUTO_NEGOTIATION_DONE = 7,
+ EVT_MODULE_QUALIFICATION_FAILD = 8,
+ EVT_PORT_TX_SUSPEND = 9,
+};
+
+enum pma_type {
+ PHY_TYPE_NONE = 0,
+ PHY_TYPE_1G_BASE_KX,
+ PHY_TYPE_SGMII,
+ PHY_TYPE_10G_BASE_KR,
+ PHY_TYPE_25G_BASE_KR,
+ PHY_TYPE_40G_BASE_KR4,
+ PHY_TYPE_10G_BASE_SR,
+ PHY_TYPE_40G_BASE_SR4,
+ PHY_TYPE_40G_BASE_CR4,
+ PHY_TYPE_40G_BASE_LR4,
+ PHY_TYPE_10G_BASE_LR,
+ PHY_TYPE_10G_BASE_ER,
+};
+
+struct phy_abilities {
+ unsigned char link_stat;
+ unsigned char lane_mask;
+
+ int speed;
+ short phy_type;
+ short nic_mode;
+ short pfnum;
+ unsigned int fw_version;
+ unsigned int axi_mhz;
+ uint8_t port_ids[4];
+ uint32_t fw_uid;
+ uint32_t phy_id;
+
+ int wol_status;
+
+ union {
+ unsigned int ext_ablity;
+ struct {
+ unsigned int valid : 1;
+ unsigned int wol_en : 1;
+ unsigned int pci_preset_runtime_en : 1;
+ unsigned int smbus_en : 1;
+ unsigned int ncsi_en : 1;
+ unsigned int rpu_en : 1;
+ unsigned int v2 : 1;
+ unsigned int pxe_en : 1;
+ unsigned int mctp_en : 1;
+ } e;
+ };
+} __rte_packed __rte_aligned(4);
+
+/* firmware -> driver */
struct mbx_fw_cmd_reply {
-} __rte_cache_aligned;
+ /* fw must set: DD, CMP, Error(if error), copy value */
+ unsigned short flags;
+ /* from command: LB,RD,VFC,BUF,SI,EI,FE */
+ unsigned short opcode; /* 2-3: copy from req */
+ unsigned short error_code; /* 4-5: 0 if no error */
+ unsigned short datalen; /* 6-7: */
+ union {
+ struct {
+ unsigned int cookie_lo; /* 8-11: */
+ unsigned int cookie_hi; /* 12-15: */
+ };
+ void *cookie;
+ };
+ /* ===== data ==== [16-64] */
+ union {
+ struct phy_abilities phy_abilities;
+ };
+} __rte_packed __rte_aligned(4);
+
+#define MBX_REQ_HDR_LEN 24
+/* driver -> firmware */
+struct mbx_fw_cmd_req {
+ unsigned short flags; /* 0-1 */
+ unsigned short opcode; /* 2-3 enum LINK_ADM_CMD */
+ unsigned short datalen; /* 4-5 */
+ unsigned short ret_value; /* 6-7 */
+ union {
+ struct {
+ unsigned int cookie_lo; /* 8-11 */
+ unsigned int cookie_hi; /* 12-15 */
+ };
+ void *cookie;
+ };
+ unsigned int reply_lo; /* 16-19 5dw */
+ unsigned int reply_hi; /* 20-23 */
+ /* === data === [24-64] 7dw */
+ union {
+ struct {
+ int requestor;
+#define REQUEST_BY_DPDK 0xa1
+#define REQUEST_BY_DRV 0xa2
+#define REQUEST_BY_PXE 0xa3
+ } get_phy_ablity;
+
+ struct {
+ unsigned short enable_stat;
+ unsigned short event_mask; /* enum link_event_mask */
+ } stat_event_mask;
+ };
+} __rte_packed __rte_aligned(4);
+
+static inline void
+build_phy_abalities_req(struct mbx_fw_cmd_req *req, void *cookie)
+{
+ req->flags = 0;
+ req->opcode = GET_PHY_ABALITY;
+ req->datalen = 0;
+ req->reply_lo = 0;
+ req->reply_hi = 0;
+ req->cookie = cookie;
+}
+
+/* enum link_event_mask or */
+static inline void
+build_link_set_event_mask(struct mbx_fw_cmd_req *req,
+ unsigned short event_mask,
+ unsigned short enable,
+ void *cookie)
+{
+ req->flags = 0;
+ req->opcode = SET_EVENT_MASK;
+ req->datalen = sizeof(req->stat_event_mask);
+ req->cookie = cookie;
+ req->reply_lo = 0;
+ req->reply_hi = 0;
+ req->stat_event_mask.event_mask = event_mask;
+ req->stat_event_mask.enable_stat = enable;
+}
+
+static inline void
+build_reset_phy_req(struct mbx_fw_cmd_req *req,
+ void *cookie)
+{
+ req->flags = 0;
+ req->opcode = RESET_PHY;
+ req->datalen = 0;
+ req->reply_lo = 0;
+ req->reply_hi = 0;
+ req->cookie = cookie;
+}
+int rnp_mbx_get_capability(struct rte_eth_dev *dev,
+ int *lane_mask,
+ int *nic_mode);
+int rnp_mbx_link_event_enable(struct rte_eth_dev *dev, int enable);
+int rnp_mbx_fw_reset_phy(struct rte_eth_dev *dev);
#endif /* __RNP_MBX_FW_H__*/