@@ -1,19 +1,28 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef __ECORE_H
#define __ECORE_H
+#include <linux/if_ether.h>
+
+#define ECORE_ETH_ALEN ETH_ALEN
+#define ECORE_ETH_P_8021Q ETH_P_8021Q
+#define ECORE_ETH_P_8021AD ETH_P_8021AD
+#define UEFI
+
/* @DPDK */
+#define CONFIG_ECORE_BINARY_FW
+#undef CONFIG_ECORE_ZIPPED_FW
+
+#ifdef CONFIG_ECORE_BINARY_FW
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
-
-#define CONFIG_ECORE_BINARY_FW
-#undef CONFIG_ECORE_ZIPPED_FW
+#endif
#ifdef CONFIG_ECORE_ZIPPED_FW
#include <zlib.h>
@@ -29,8 +38,8 @@
#include "mcp_public.h"
#define ECORE_MAJOR_VERSION 8
-#define ECORE_MINOR_VERSION 40
-#define ECORE_REVISION_VERSION 26
+#define ECORE_MINOR_VERSION 62
+#define ECORE_REVISION_VERSION 4
#define ECORE_ENGINEERING_VERSION 0
#define ECORE_VERSION \
@@ -49,14 +58,23 @@
#define ECORE_WFQ_UNIT 100
#include "../qede_logs.h" /* @DPDK */
-#define ISCSI_BDQ_ID(_port_id) (_port_id)
-#define FCOE_BDQ_ID(_port_id) (_port_id + 2)
/* Constants */
#define ECORE_WID_SIZE (1024)
#define ECORE_MIN_WIDS (4)
/* Configurable */
#define ECORE_PF_DEMS_SIZE (4)
+#define ECORE_VF_DEMS_SIZE (32)
+#define ECORE_MIN_DPIS (4) /* The minimal number of DPIs required to
+ * load the driver. The number was
+ * arbitrarily set.
+ */
+/* Derived */
+#define ECORE_MIN_PWM_REGION (ECORE_WID_SIZE * ECORE_MIN_DPIS)
+
+#define ECORE_CXT_PF_CID (0xff)
+
+#define ECORE_HW_STOP_RETRY_LIMIT (10)
/* cau states */
enum ecore_coalescing_mode {
@@ -71,23 +89,29 @@ enum ecore_nvm_cmd {
ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE,
ECORE_EXT_PHY_FW_UPGRADE = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE,
- ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE,
ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ,
ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE,
ECORE_PHY_CORE_READ = DRV_MSG_CODE_PHY_CORE_READ,
ECORE_PHY_CORE_WRITE = DRV_MSG_CODE_PHY_CORE_WRITE,
+ ECORE_ENCRYPT_PASSWORD = DRV_MSG_CODE_ENCRYPT_PASSWORD,
ECORE_GET_MCP_NVM_RESP = 0xFFFFFF00
};
-#ifndef LINUX_REMOVE
-#if !defined(CONFIG_ECORE_L2)
+#if !defined(CONFIG_ECORE_L2) && !defined(CONFIG_ECORE_ROCE) && \
+ !defined(CONFIG_ECORE_FCOE) && !defined(CONFIG_ECORE_ISCSI) && \
+ !defined(CONFIG_ECORE_IWARP) && !defined(CONFIG_ECORE_OOO)
#define CONFIG_ECORE_L2
#define CONFIG_ECORE_SRIOV
-#endif
+#define CONFIG_ECORE_ROCE
+#define CONFIG_ECORE_IWARP
+#define CONFIG_ECORE_FCOE
+#define CONFIG_ECORE_ISCSI
+#define CONFIG_ECORE_LL2
+#define CONFIG_ECORE_OOO
#endif
/* helpers */
-#ifndef __EXTRACT__LINUX__
+#ifndef __EXTRACT__LINUX__IF__
#define MASK_FIELD(_name, _value) \
((_value) &= (_name##_MASK))
@@ -103,14 +127,16 @@ do { \
#define GET_FIELD(value, name) \
(((value) >> (name##_SHIFT)) & name##_MASK)
-#define GET_MFW_FIELD(name, field) \
+#define GET_MFW_FIELD(name, field) \
(((name) & (field ## _MASK)) >> (field ## _OFFSET))
#define SET_MFW_FIELD(name, field, value) \
do { \
- (name) &= ~((field ## _MASK)); \
+ (name) &= ~(field ## _MASK); \
(name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK)); \
} while (0)
+
+#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
#endif
static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
@@ -121,7 +147,7 @@ static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
return db_addr;
}
-static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
+static OSAL_INLINE u32 DB_ADDR_VF_E4(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
@@ -129,6 +155,17 @@ static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
return db_addr;
}
+static OSAL_INLINE u32 DB_ADDR_VF_E5(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ (cid * ECORE_VF_DEMS_SIZE);
+
+ return db_addr;
+}
+
+#define DB_ADDR_VF(dev, cid, DEMS) \
+ (ECORE_IS_E4(dev) ? DB_ADDR_VF_E4(cid, DEMS) : DB_ADDR_VF_E5(cid, DEMS))
+
#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
((sizeof(type_name) + (u32)(1 << (p_hwfn->p_dev->cache_shift)) - 1) & \
~((1 << (p_hwfn->p_dev->cache_shift)) - 1))
@@ -143,7 +180,84 @@ static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
#endif
#endif
-#ifndef __EXTRACT__LINUX__
+#ifndef __EXTRACT__LINUX__IF__
+#define ECORE_INT_DEBUG_SIZE_DEF _MB(2)
+struct ecore_internal_trace {
+ char *buf;
+ u32 size;
+ u64 prod;
+ osal_spinlock_t lock;
+};
+
+#define ECORE_DP_INT_LOG_MAX_STR_SIZE 256
+#define ECORE_DP_INT_LOG_DEFAULT_MASK (0xffffc3ff)
+
+#ifndef UEFI
+/* Debug print definitions */
+#define DP_INT_LOG(P_DEV, LEVEL, MODULE, fmt, ...) \
+do { \
+ if (OSAL_UNLIKELY((P_DEV)->dp_int_level > (LEVEL))) \
+ break; \
+ if (OSAL_UNLIKELY((P_DEV)->dp_int_level == ECORE_LEVEL_VERBOSE) && \
+ ((LEVEL) == ECORE_LEVEL_VERBOSE) && \
+ ((P_DEV)->dp_int_module & (MODULE)) == 0) \
+ break; \
+ \
+ OSAL_INT_DBG_STORE(P_DEV, fmt, \
+ __func__, __LINE__, \
+ (P_DEV)->name ? (P_DEV)->name : "", \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define DP_ERR(P_DEV, fmt, ...) \
+do { \
+ DP_INT_LOG((P_DEV), ECORE_LEVEL_ERR, 0, \
+ "ERR: [%s:%d(%s)]" fmt, ##__VA_ARGS__); \
+ PRINT_ERR((P_DEV)->dp_ctx, "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (P_DEV)->name ? (P_DEV)->name : "", \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define DP_NOTICE(P_DEV, is_assert, fmt, ...) \
+do { \
+ DP_INT_LOG((P_DEV), ECORE_LEVEL_NOTICE, 0, \
+ "NOTICE: [%s:%d(%s)]" fmt, ##__VA_ARGS__); \
+ if (OSAL_UNLIKELY((P_DEV)->dp_level <= ECORE_LEVEL_NOTICE)) { \
+ PRINT((P_DEV)->dp_ctx, "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (P_DEV)->name ? (P_DEV)->name : "", \
+ ##__VA_ARGS__); \
+ OSAL_ASSERT(!(is_assert)); \
+ } \
+} while (0)
+
+#define DP_INFO(P_DEV, fmt, ...) \
+do { \
+ DP_INT_LOG((P_DEV), ECORE_LEVEL_INFO, 0, \
+ "INFO: [%s:%d(%s)]" fmt, ##__VA_ARGS__); \
+ if (OSAL_UNLIKELY((P_DEV)->dp_level <= ECORE_LEVEL_INFO)) { \
+ PRINT((P_DEV)->dp_ctx, "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (P_DEV)->name ? (P_DEV)->name : "", \
+ ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#define DP_VERBOSE(P_DEV, module, fmt, ...) \
+do { \
+ DP_INT_LOG((P_DEV), ECORE_LEVEL_VERBOSE, module, \
+ "VERBOSE: [%s:%d(%s)]" fmt, ##__VA_ARGS__); \
+ if (OSAL_UNLIKELY(((P_DEV)->dp_level <= ECORE_LEVEL_VERBOSE) && \
+ ((P_DEV)->dp_module & module))) { \
+ PRINT((P_DEV)->dp_ctx, "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (P_DEV)->name ? (P_DEV)->name : "", \
+ ##__VA_ARGS__); \
+ } \
+} while (0)
+#endif
+
enum DP_LEVEL {
ECORE_LEVEL_VERBOSE = 0x0,
ECORE_LEVEL_INFO = 0x1,
@@ -181,6 +295,7 @@ enum DP_MODULE {
ECORE_MSG_SP = 0x100000,
ECORE_MSG_STORAGE = 0x200000,
ECORE_MSG_OOO = 0x200000,
+ ECORE_MSG_FS = 0x400000,
ECORE_MSG_CXT = 0x800000,
ECORE_MSG_LL2 = 0x1000000,
ECORE_MSG_ILT = 0x2000000,
@@ -188,13 +303,49 @@ enum DP_MODULE {
ECORE_MSG_DEBUG = 0x8000000,
/* to be added...up to 0x8000000 */
};
+
+/**
+ * @brief Convert from 32b debug param to two params of level and module
+
+ * @param debug
+ * @param p_dp_module
+ * @param p_dp_level
+ * @return void
+ *
+ * @note Input 32b decoding:
+ * b31 - enable all NOTICE prints. NOTICE prints are for deviation from
+ * the 'happy' flow, e.g. memory allocation failed.
+ * b30 - enable all INFO prints. INFO prints are for major steps in the
+ * flow and provide important parameters.
+ * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of
+ * that module. VERBOSE prints are for tracking the specific flow in low
+ * level.
+ *
+ * Notice that the level should be that of the lowest required logs.
+ */
+static OSAL_INLINE void ecore_config_debug(u32 debug, u32 *p_dp_module,
+ u8 *p_dp_level)
+{
+ *p_dp_level = ECORE_LEVEL_NOTICE;
+ *p_dp_module = 0;
+
+ if (debug & ECORE_LOG_VERBOSE_MASK) {
+ *p_dp_level = ECORE_LEVEL_VERBOSE;
+ *p_dp_module = (debug & 0x3FFFFFFF);
+ } else if (debug & ECORE_LOG_INFO_MASK) {
+ *p_dp_level = ECORE_LEVEL_INFO;
+ } else if (debug & ECORE_LOG_NOTICE_MASK) {
+ *p_dp_level = ECORE_LEVEL_NOTICE;
+ }
+}
+
#endif
#define for_each_hwfn(p_dev, i) for (i = 0; i < p_dev->num_hwfns; i++)
#define D_TRINE(val, cond1, cond2, true1, true2, def) \
- (val == (cond1) ? true1 : \
- (val == (cond2) ? true2 : def))
+ ((val) == (cond1) ? (true1) : \
+ ((val) == (cond2) ? (true2) : (def)))
/* forward */
struct ecore_ptt_pool;
@@ -210,6 +361,8 @@ struct ecore_igu_info;
struct ecore_mcp_info;
struct ecore_dcbx_info;
struct ecore_llh_info;
+struct ecore_fs_info_e4;
+struct ecore_fs_info_e5;
struct ecore_rt_data {
u32 *init_val;
@@ -233,6 +386,13 @@ enum ecore_tunn_clss {
MAX_ECORE_TUNN_CLSS,
};
+#ifndef __EXTRACT__LINUX__IF__
+enum ecore_tcp_ip_version {
+ ECORE_TCP_IPV4,
+ ECORE_TCP_IPV6,
+};
+#endif
+
struct ecore_tunn_update_type {
bool b_update_mode;
bool b_mode_enabled;
@@ -256,6 +416,9 @@ struct ecore_tunnel_info {
bool b_update_rx_cls;
bool b_update_tx_cls;
+
+ bool update_non_l2_vxlan;
+ bool non_l2_vxlan_enable;
};
/* The PCI personality is not quite synonymous to protocol ID:
@@ -279,6 +442,15 @@ struct ecore_qm_iids {
u32 tids;
};
+/* The PCI relax ordering is either taken care by management FW or can be
+ * enable/disable by ecore client.
+ */
+enum ecore_pci_rlx_odr {
+ ECORE_DEFAULT_RLX_ODR,
+ ECORE_ENABLE_RLX_ODR,
+ ECORE_DISABLE_RLX_ODR
+};
+
#define MAX_PF_PER_PORT 8
/* HW / FW resources, output of features supported below, most information
@@ -292,12 +464,16 @@ enum ecore_resources {
ECORE_RL,
ECORE_MAC,
ECORE_VLAN,
+ ECORE_VF_RDMA_CNQ_RAM,
ECORE_RDMA_CNQ_RAM,
ECORE_ILT,
- ECORE_LL2_QUEUE,
+ ECORE_LL2_RAM_QUEUE,
+ ECORE_LL2_CTX_QUEUE,
ECORE_CMDQS_CQS,
ECORE_RDMA_STATS_QUEUE,
ECORE_BDQ,
+ ECORE_VF_MAC_ADDR,
+ ECORE_GFS_PROFILE,
/* This is needed only internally for matching against the IGU.
* In case of legacy MFW, would be set to `0'.
@@ -317,6 +493,7 @@ enum ecore_feature {
ECORE_EXTRA_VF_QUE,
ECORE_VMQ,
ECORE_RDMA_CNQ,
+ ECORE_VF_RDMA_CNQ,
ECORE_ISCSI_CQ,
ECORE_FCOE_CQ,
ECORE_VF_L2_QUE,
@@ -335,6 +512,11 @@ enum ecore_port_mode {
ECORE_PORT_MODE_DE_1X25G,
ECORE_PORT_MODE_DE_4X25G,
ECORE_PORT_MODE_DE_2X10G,
+ ECORE_PORT_MODE_DE_2X50G_R1,
+ ECORE_PORT_MODE_DE_4X50G_R1,
+ ECORE_PORT_MODE_DE_1X100G_R2,
+ ECORE_PORT_MODE_DE_2X100G_R2,
+ ECORE_PORT_MODE_DE_1X100G_R4,
};
enum ecore_dev_cap {
@@ -345,7 +527,7 @@ enum ecore_dev_cap {
ECORE_DEV_CAP_IWARP
};
-#ifndef __EXTRACT__LINUX__
+#ifndef __EXTRACT__LINUX__IF__
enum ecore_hw_err_type {
ECORE_HW_ERR_FAN_FAIL,
ECORE_HW_ERR_MFW_RESP_FAIL,
@@ -356,10 +538,9 @@ enum ecore_hw_err_type {
};
#endif
-enum ecore_db_rec_exec {
- DB_REC_DRY_RUN,
- DB_REC_REAL_DEAL,
- DB_REC_ONCE,
+enum ecore_wol_support {
+ ECORE_WOL_SUPPORT_NONE,
+ ECORE_WOL_SUPPORT_PME,
};
struct ecore_hw_info {
@@ -382,7 +563,9 @@ struct ecore_hw_info {
((dev)->hw_info.personality == ECORE_PCI_FCOE)
#define ECORE_IS_ISCSI_PERSONALITY(dev) \
((dev)->hw_info.personality == ECORE_PCI_ISCSI)
-
+#define ECORE_IS_NVMETCP_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ISCSI && \
+ (dev)->is_nvmetcp)
/* Resource Allocation scheme results */
u32 resc_start[ECORE_MAX_RESC];
u32 resc_num[ECORE_MAX_RESC];
@@ -397,23 +580,22 @@ struct ecore_hw_info {
/* Amount of traffic classes HW supports */
u8 num_hw_tc;
-/* Amount of TCs which should be active according to DCBx or upper layer driver
- * configuration
- */
-
+/* Amount of TCs which should be active according to DCBx or upper layer driver configuration */
u8 num_active_tc;
/* The traffic class used by PF for it's offloaded protocol */
u8 offload_tc;
+ bool offload_tc_set;
+
+ bool multi_tc_roce_en;
+#define IS_ECORE_MULTI_TC_ROCE(p_hwfn) (!!((p_hwfn)->hw_info.multi_tc_roce_en))
u32 concrete_fid;
u16 opaque_fid;
u16 ovlan;
u32 part_num[4];
- unsigned char hw_mac_addr[ETH_ALEN];
- u64 node_wwn; /* For FCoE only */
- u64 port_wwn; /* For FCoE only */
+ unsigned char hw_mac_addr[ECORE_ETH_ALEN];
u16 num_iscsi_conns;
u16 num_fcoe_conns;
@@ -424,12 +606,16 @@ struct ecore_hw_info {
u32 port_mode;
u32 hw_mode;
- u32 device_capabilities;
+ u32 device_capabilities; /* @DPDK */
+#ifndef __EXTRACT__LINUX__THROW__
/* Default DCBX mode */
u8 dcbx_mode;
+#endif
u16 mtu;
+
+ enum ecore_wol_support b_wol_support;
};
/* maximun size of read/write commands (HW limit) */
@@ -470,38 +656,71 @@ struct ecore_wfq_data {
#define OFLD_GRP_SIZE 4
+struct ecore_offload_pq {
+ u8 port;
+ u8 tc;
+};
+
struct ecore_qm_info {
struct init_qm_pq_params *qm_pq_params;
struct init_qm_vport_params *qm_vport_params;
struct init_qm_port_params *qm_port_params;
u16 start_pq;
- u8 start_vport;
+ u16 start_vport;
+ u16 start_rl;
u16 pure_lb_pq;
- u16 offload_pq;
+ u16 first_ofld_pq;
+ u16 first_llt_pq;
u16 pure_ack_pq;
u16 ooo_pq;
+ u16 single_vf_rdma_pq;
u16 first_vf_pq;
u16 first_mcos_pq;
u16 first_rl_pq;
+ u16 first_ofld_grp_pq;
u16 num_pqs;
u16 num_vf_pqs;
+ u16 ilt_pf_pqs;
u8 num_vports;
+ u8 num_rls;
u8 max_phys_tcs_per_port;
u8 ooo_tc;
+ bool pq_overflow;
bool pf_rl_en;
bool pf_wfq_en;
bool vport_rl_en;
bool vport_wfq_en;
+ bool vf_rdma_en;
+#define IS_ECORE_QM_VF_RDMA(_p_hwfn) ((_p_hwfn)->qm_info.vf_rdma_en)
u8 pf_wfq;
u32 pf_rl;
struct ecore_wfq_data *wfq_data;
u8 num_pf_rls;
+ struct ecore_offload_pq offload_group[OFLD_GRP_SIZE];
+ u8 offload_group_count;
+#define IS_ECORE_OFLD_GRP(p_hwfn) ((p_hwfn)->qm_info.offload_group_count > 0)
+
+ /* Locks PQ getters against QM info initialization */
+ osal_spinlock_t qm_info_lock;
};
+#define ECORE_OVERFLOW_BIT 1
+
struct ecore_db_recovery_info {
- osal_list_t list;
- osal_spinlock_t lock;
- u32 db_recovery_counter;
+ osal_list_t list;
+ osal_spinlock_t lock;
+ u32 count;
+
+ /* PF doorbell overflow sticky indicator was cleared in the DORQ
+ * attention callback, but still needs to execute doorbell recovery.
+ * Full (REAL_DEAL) dorbell recovery is executed in the periodic
+ * handler.
+ * This value doesn't require a lock but must use atomic operations.
+ */
+ u32 overflow; /* @DPDK*/
+
+ /* Indicates that DORQ attention was handled in ecore_int_deassertion */
+ bool dorq_attn;
};
struct storm_stats {
@@ -553,18 +772,32 @@ enum ecore_mf_mode_bit {
/* Use stag for steering */
ECORE_MF_8021AD_TAGGING,
+ /* Allow DSCP to TC mapping */
+ ECORE_MF_DSCP_TO_TC_MAP,
+
/* Allow FIP discovery fallback */
ECORE_MF_FIP_SPECIAL,
+
+ /* Do not insert a vlan tag with id 0 */
+ ECORE_MF_DONT_ADD_VLAN0_TAG,
+
+ /* Allow VF RDMA */
+ ECORE_MF_VF_RDMA,
+
+ /* Allow RoCE LAG */
+ ECORE_MF_ROCE_LAG,
};
enum ecore_ufp_mode {
ECORE_UFP_MODE_ETS,
ECORE_UFP_MODE_VNIC_BW,
+ ECORE_UFP_MODE_UNKNOWN
};
enum ecore_ufp_pri_type {
ECORE_UFP_PRI_OS,
- ECORE_UFP_PRI_VNIC
+ ECORE_UFP_PRI_VNIC,
+ ECORE_UFP_PRI_UNKNOWN
};
struct ecore_ufp_info {
@@ -578,16 +811,85 @@ enum BAR_ID {
BAR_ID_1 /* Used for doorbells */
};
+#ifndef __EXTRACT__LINUX__IF__
+enum ecore_lag_type {
+ ECORE_LAG_TYPE_NONE,
+ ECORE_LAG_TYPE_ACTIVEACTIVE,
+ ECORE_LAG_TYPE_ACTIVEBACKUP
+};
+#endif
+
struct ecore_nvm_image_info {
u32 num_images;
struct bist_nvm_image_att *image_att;
bool valid;
};
+#define LAG_MAX_PORT_NUM 2
+
+struct ecore_lag_info {
+ enum ecore_lag_type lag_type;
+ void (*link_change_cb)(void *cxt);
+ void *cxt;
+ u8 port_num;
+ u32 active_ports; /* @DPDK */
+ u8 first_port;
+ u8 second_port;
+ bool is_master;
+ u8 master_pf;
+};
+
+/* PWM region specific data */
+struct ecore_dpi_info {
+ u16 wid_count;
+ u32 dpi_size;
+ u32 dpi_count;
+ u32 dpi_start_offset; /* this is used to calculate
+ * the doorbell address
+ */
+ u32 dpi_bit_shift_addr;
+};
+
+struct ecore_common_dpm_info {
+ u8 db_bar_no_edpm;
+ u8 mfw_no_edpm;
+ bool vf_cfg;
+};
+
+enum ecore_hsi_def_type {
+ ECORE_HSI_DEF_MAX_NUM_VFS,
+ ECORE_HSI_DEF_MAX_NUM_L2_QUEUES,
+ ECORE_HSI_DEF_MAX_NUM_PORTS,
+ ECORE_HSI_DEF_MAX_SB_PER_PATH,
+ ECORE_HSI_DEF_MAX_NUM_PFS,
+ ECORE_HSI_DEF_MAX_NUM_VPORTS,
+ ECORE_HSI_DEF_NUM_ETH_RSS_ENGINE,
+ ECORE_HSI_DEF_MAX_QM_TX_QUEUES,
+ ECORE_HSI_DEF_NUM_PXP_ILT_RECORDS,
+ ECORE_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
+ ECORE_HSI_DEF_MAX_QM_GLOBAL_RLS,
+ ECORE_HSI_DEF_MAX_PBF_CMD_LINES,
+ ECORE_HSI_DEF_MAX_BTB_BLOCKS,
+ ECORE_NUM_HSI_DEFS
+};
+
+enum ecore_rx_config_flags {
+ ECORE_RX_CONF_SKIP_ACCEPT_FLAGS_UPDATE,
+ ECORE_RX_CONF_SKIP_UCAST_FILTER_UPDATE,
+ ECORE_RX_CONF_SET_LB_VPORT
+};
+
+struct ecore_rx_config {
+ u32 flags; /* @DPDK */
+ u8 loopback_dst_vport_id;
+};
+
struct ecore_hwfn {
struct ecore_dev *p_dev;
u8 my_id; /* ID inside the PF */
-#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
+#define IS_LEAD_HWFN(_p_hwfn) (!((_p_hwfn)->my_id))
+#define IS_AFFIN_HWFN(_p_hwfn) \
+ ((_p_hwfn) == ECORE_AFFIN_HWFN((_p_hwfn)->p_dev))
u8 rel_pf_id; /* Relative to engine*/
u8 abs_pf_id;
#define ECORE_PATH_ID(_p_hwfn) \
@@ -597,10 +899,11 @@ struct ecore_hwfn {
u32 dp_module;
u8 dp_level;
+ u32 dp_int_module;
+ u8 dp_int_level;
char name[NAME_SIZE];
void *dp_ctx;
- bool first_on_engine;
bool hw_init_done;
u8 num_funcs_on_engine;
@@ -612,6 +915,8 @@ struct ecore_hwfn {
void OSAL_IOMEM *doorbells;
u64 db_phys_addr;
unsigned long db_size;
+ u64 reg_offset;
+ u64 db_offset;
/* PTT pool */
struct ecore_ptt_pool *p_ptt_pool;
@@ -638,6 +943,11 @@ struct ecore_hwfn {
struct ecore_ptt *p_main_ptt;
struct ecore_ptt *p_dpc_ptt;
+ /* PTP will be used only by the leading function.
+ * Usage of all PTP-apis should be synchronized as result.
+ */
+ struct ecore_ptt *p_ptp_ptt;
+
struct ecore_sb_sp_info *p_sp_sb;
struct ecore_sb_attn_info *p_sb_attn;
@@ -649,6 +959,7 @@ struct ecore_hwfn {
struct ecore_fcoe_info *p_fcoe_info;
struct ecore_rdma_info *p_rdma_info;
struct ecore_pf_params pf_params;
+ bool is_nvmetcp;
bool b_rdma_enabled_in_prs;
u32 rdma_prs_search_reg;
@@ -673,8 +984,8 @@ struct ecore_hwfn {
/* QM init */
struct ecore_qm_info qm_info;
-#ifdef CONFIG_ECORE_ZIPPED_FW
/* Buffer for unzipping firmware data */
+#ifdef CONFIG_ECORE_ZIPPED_FW
void *unzip_buf;
#endif
@@ -682,19 +993,12 @@ struct ecore_hwfn {
void *dbg_user_info;
struct virt_mem_desc dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE];
- struct z_stream_s *stream;
-
- /* PWM region specific data */
- u32 dpi_size;
- u32 dpi_count;
- u32 dpi_start_offset; /* this is used to
- * calculate th
- * doorbell address
- */
+ struct ecore_dpi_info dpi_info;
- /* If one of the following is set then EDPM shouldn't be used */
+ struct ecore_common_dpm_info dpm_info;
+ u8 roce_edpm_mode;
u8 dcbx_no_edpm;
- u8 db_bar_no_edpm;
+ u8 num_vf_cnqs;
/* L2-related */
struct ecore_l2_info *p_l2_info;
@@ -708,11 +1012,26 @@ struct ecore_hwfn {
* struct ecore_hw_prepare_params by ecore client.
*/
bool b_en_pacing;
+ struct ecore_lag_info lag_info;
/* Nvm images number and attributes */
- struct ecore_nvm_image_info nvm_info;
+ struct ecore_nvm_image_info nvm_info;
+
+ /* Flow steering info */
+ union {
+ struct ecore_fs_info_e4 *e4;
+ struct ecore_fs_info_e5 *e5;
+ void *info;
+ } fs_info;
- struct phys_mem_desc *fw_overlay_mem;
+ /* Flow steering statistics accuracy */
+ u8 fs_accuracy;
+
+ struct phys_mem_desc *fw_overlay_mem;
+ enum _ecore_status_t (*p_dummy_cb)
+ (struct ecore_hwfn *p_hwfn, void *cookie);
+ /* Rx configuration */
+ struct ecore_rx_config rx_conf;
/* @DPDK */
struct ecore_ptt *p_arfs_ptt;
@@ -722,18 +1041,22 @@ struct ecore_hwfn {
u32 iov_task_flags;
};
+#ifndef __EXTRACT__LINUX__THROW__
enum ecore_mf_mode {
ECORE_MF_DEFAULT,
ECORE_MF_OVLAN,
ECORE_MF_NPAR,
ECORE_MF_UFP,
};
+#endif
+#ifndef __EXTRACT__LINUX__IF__
enum ecore_dev_type {
ECORE_DEV_TYPE_BB,
ECORE_DEV_TYPE_AH,
ECORE_DEV_TYPE_E5,
};
+#endif
/* @DPDK */
enum ecore_dbg_features {
@@ -765,7 +1088,12 @@ struct ecore_dev {
u8 dp_level;
char name[NAME_SIZE];
void *dp_ctx;
+ struct ecore_internal_trace internal_trace;
+ u8 dp_int_level;
+ u32 dp_int_module;
+/* for work DP_* macros with cdev, hwfn, etc */
+ struct ecore_dev *p_dev;
enum ecore_dev_type type;
/* Translate type/revision combo into the proper conditions */
#define ECORE_IS_BB(dev) ((dev)->type == ECORE_DEV_TYPE_BB)
@@ -781,6 +1109,8 @@ struct ecore_dev {
#define ECORE_IS_E4(dev) (ECORE_IS_BB(dev) || ECORE_IS_AH(dev))
#define ECORE_IS_E5(dev) ((dev)->type == ECORE_DEV_TYPE_E5)
+#define ECORE_E5_MISSING_CODE OSAL_BUILD_BUG_ON(false)
+
u16 vendor_id;
u16 device_id;
#define ECORE_DEV_ID_MASK 0xff00
@@ -829,21 +1159,18 @@ struct ecore_dev {
#define CHIP_BOND_ID_MASK 0xff
#define CHIP_BOND_ID_SHIFT 0
- u8 num_engines;
u8 num_ports;
u8 num_ports_in_engine;
- u8 num_funcs_in_port;
u8 path_id;
- u32 mf_bits;
+ u32 mf_bits; /* @DPDK */
+#ifndef __EXTRACT__LINUX__THROW__
enum ecore_mf_mode mf_mode;
-#define IS_MF_DEFAULT(_p_hwfn) \
- (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
-#define IS_MF_SI(_p_hwfn) \
- (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
-#define IS_MF_SD(_p_hwfn) \
- (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
+#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
+#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
+#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
+#endif
int pcie_width;
int pcie_speed;
@@ -852,7 +1179,9 @@ struct ecore_dev {
u8 mcp_rev;
u8 boot_mode;
- u8 wol;
+ /* WoL related configurations */
+ u8 wol_config;
+ u8 wol_mac[ECORE_ETH_ALEN];
u32 int_mode;
enum ecore_coalescing_mode int_coalescing_mode;
@@ -907,6 +1236,7 @@ struct ecore_dev {
u32 rdma_max_sge;
u32 rdma_max_inline;
u32 rdma_max_srq_sge;
+ u8 ilt_page_size;
struct ecore_eth_stats *reset_stats;
struct ecore_fw_data *fw_data;
@@ -916,8 +1246,7 @@ struct ecore_dev {
/* Recovery */
bool recov_in_prog;
-/* Indicates whether should prevent attentions from being reasserted */
-
+ /* Indicates whether should prevent attentions from being reasserted */
bool attn_clr_en;
/* Indicates whether allowing the MFW to collect a crash dump */
@@ -926,6 +1255,9 @@ struct ecore_dev {
/* Indicates if the reg_fifo is checked after any register access */
bool chk_reg_fifo;
+ /* Indicates the monitored address by ecore_rd()/ecore_wr() */
+ u32 monitored_hw_addr;
+
#ifndef ASIC_ONLY
bool b_is_emul_full;
bool b_is_emul_mac;
@@ -937,6 +1269,9 @@ struct ecore_dev {
/* Indicates whether this PF serves a storage target */
bool b_is_target;
+ /* Instruct driver to read statistics from the specified bin id */
+ u16 stats_bin_id;
+
#ifdef CONFIG_ECORE_BINARY_FW /* @DPDK */
void *firmware;
u64 fw_len;
@@ -952,23 +1287,6 @@ struct ecore_dev {
struct rte_pci_device *pci_dev;
};
-enum ecore_hsi_def_type {
- ECORE_HSI_DEF_MAX_NUM_VFS,
- ECORE_HSI_DEF_MAX_NUM_L2_QUEUES,
- ECORE_HSI_DEF_MAX_NUM_PORTS,
- ECORE_HSI_DEF_MAX_SB_PER_PATH,
- ECORE_HSI_DEF_MAX_NUM_PFS,
- ECORE_HSI_DEF_MAX_NUM_VPORTS,
- ECORE_HSI_DEF_NUM_ETH_RSS_ENGINE,
- ECORE_HSI_DEF_MAX_QM_TX_QUEUES,
- ECORE_HSI_DEF_NUM_PXP_ILT_RECORDS,
- ECORE_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
- ECORE_HSI_DEF_MAX_QM_GLOBAL_RLS,
- ECORE_HSI_DEF_MAX_PBF_CMD_LINES,
- ECORE_HSI_DEF_MAX_BTB_BLOCKS,
- ECORE_NUM_HSI_DEFS
-};
-
u32 ecore_get_hsi_def_val(struct ecore_dev *p_dev,
enum ecore_hsi_def_type type);
@@ -1039,6 +1357,11 @@ int ecore_device_num_ports(struct ecore_dev *p_dev);
void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
u8 *mac);
+#define ECORE_TOS_ECN_SHIFT 0
+#define ECORE_TOS_ECN_MASK 0x3
+#define ECORE_TOS_DSCP_SHIFT 2
+#define ECORE_TOS_DSCP_MASK 0x3f
+
/* Flags for indication of required queues */
#define PQ_FLAGS_RLS (1 << 0)
#define PQ_FLAGS_MCOS (1 << 1)
@@ -1046,26 +1369,50 @@ void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
#define PQ_FLAGS_OOO (1 << 3)
#define PQ_FLAGS_ACK (1 << 4)
#define PQ_FLAGS_OFLD (1 << 5)
-#define PQ_FLAGS_VFS (1 << 6)
-#define PQ_FLAGS_LLT (1 << 7)
+#define PQ_FLAGS_GRP (1 << 6)
+#define PQ_FLAGS_VFS (1 << 7)
+#define PQ_FLAGS_LLT (1 << 8)
+#define PQ_FLAGS_MTC (1 << 9)
+#define PQ_FLAGS_VFR (1 << 10)
+#define PQ_FLAGS_VSR (1 << 11)
/* physical queue index for cm context intialization */
u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags);
u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
+u16 ecore_get_cm_pq_idx_vf_rdma(struct ecore_hwfn *p_hwfn, u16 vf);
+
u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl);
+u16 ecore_get_cm_pq_idx_grp(struct ecore_hwfn *p_hwfn, u8 idx);
+u16 ecore_get_cm_pq_idx_ofld_mtc(struct ecore_hwfn *p_hwfn, u16 idx, u8 tc);
+u16 ecore_get_cm_pq_idx_llt_mtc(struct ecore_hwfn *p_hwfn, u16 idx, u8 tc);
+u16 ecore_get_cm_pq_idx_ll2(struct ecore_hwfn *p_hwfn, u8 tc);
-/* qm vport for rate limit configuration */
-u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl);
+/* qm vport/rl for rate limit configuration */
+u16 ecore_get_pq_vport_id_from_rl(struct ecore_hwfn *p_hwfn, u16 rl);
+u16 ecore_get_pq_vport_id_from_vf(struct ecore_hwfn *p_hwfn, u16 vf);
+u16 ecore_get_pq_rl_id_from_rl(struct ecore_hwfn *p_hwfn, u16 rl);
+u16 ecore_get_pq_rl_id_from_vf(struct ecore_hwfn *p_hwfn, u16 vf);
const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
/* doorbell recovery mechanism */
void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn);
-void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
- enum ecore_db_rec_exec);
-
-bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn);
+void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t
+ecore_db_rec_flush_queue(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u32 usage_cnt_reg, u32 *count);
+#define ECORE_DB_REC_COUNT 1000
+#define ECORE_DB_REC_INTERVAL 100
+
+bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn,
+ struct ecore_common_dpm_info *dpm_info);
+
+enum _ecore_status_t ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dpi_info *dpi_info,
+ u32 pwm_region_size,
+ u32 n_cpus);
/* amount of resources used in qm init */
u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
@@ -1074,9 +1421,18 @@ u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
+void ecore_hw_info_set_offload_tc(struct ecore_hw_info *p_info, u8 tc);
+u8 ecore_get_offload_tc(struct ecore_hwfn *p_hwfn);
+
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
ecore_device_num_ports((_p_hwfn)->p_dev))
+enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev, u8 rel_ppfid,
+ u8 *p_abs_ppfid);
+enum _ecore_status_t ecore_llh_map_ppfid_to_pfid(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 ppfid, u8 pfid);
+
/* The PFID<->PPFID calculation is based on the relative index of a PF on its
* port. In BB there is a bug in the LLH in which the PPFID is actually engine
* based, and thus it equals the PFID.
@@ -1110,6 +1466,12 @@ enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev);
void ecore_set_platform_str(struct ecore_hwfn *p_hwfn,
char *buf_str, u32 buf_size);
+#define LNX_STATIC
+#define IFDEF_HAS_IFLA_VF_RATE
+#define ENDIF_HAS_IFLA_VF_RATE
+#define IFDEF_DEFINE_IFLA_VF_SPOOFCHK
+#define ENDIF_DEFINE_IFLA_VF_SPOOFCHK
+
#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
#define TSTORM_QZONE_SIZE(dev) \
(ECORE_IS_E4(dev) ? TSTORM_QZONE_SIZE_E4 : TSTORM_QZONE_SIZE_E5)
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
#ifndef __ATTN_VALUES_H__
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#include "bcm_osal.h"
#include "reg_addr.h"
#include "common_hsi.h"
@@ -37,13 +37,11 @@
#define TM_ELEM_SIZE 4
/* ILT constants */
-#define ILT_DEFAULT_HW_P_SIZE 4
-
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
/* ILT entry structure */
-#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
#define ILT_ENTRY_PHY_ADDR_SHIFT 0
#define ILT_ENTRY_VALID_MASK 0x1ULL
#define ILT_ENTRY_VALID_SHIFT 52
@@ -81,11 +79,11 @@ union e5_type1_task_context {
};
struct src_ent {
- u8 opaque[56];
+ u8 opaque[56];
u64 next;
};
-#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
#define CONN_CXT_SIZE(p_hwfn) \
@@ -93,8 +91,6 @@ struct src_ent {
ALIGNED_TYPE_SIZE(union e4_conn_context, (p_hwfn)) : \
ALIGNED_TYPE_SIZE(union e5_conn_context, (p_hwfn)))
-#define SRQ_CXT_SIZE (sizeof(struct regpair) * 8) /* @DPDK */
-
#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
(ECORE_IS_E4(((p_hwfn)->p_dev)) ? \
ALIGNED_TYPE_SIZE(union e4_type0_task_context, (p_hwfn)) : \
@@ -117,9 +113,12 @@ static bool src_proto(enum protocol_type type)
type == PROTOCOLID_IWARP;
}
-static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
+static bool tm_cid_proto(enum protocol_type type)
{
- return type == PROTOCOLID_TOE;
+ return type == PROTOCOLID_ISCSI ||
+ type == PROTOCOLID_FCOE ||
+ type == PROTOCOLID_ROCE ||
+ type == PROTOCOLID_IWARP;
}
static bool tm_tid_proto(enum protocol_type type)
@@ -133,8 +132,8 @@ struct ecore_cdu_iids {
u32 per_vf_cids;
};
-static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr,
- struct ecore_cdu_iids *iids)
+static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr,
+ struct ecore_cdu_iids *iids)
{
u32 type;
@@ -146,8 +145,8 @@ static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr,
/* counts the iids for the Searcher block configuration */
struct ecore_src_iids {
- u32 pf_cids;
- u32 per_vf_cids;
+ u32 pf_cids;
+ u32 per_vf_cids;
};
static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
@@ -156,6 +155,9 @@ static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
u32 i;
for (i = 0; i < MAX_CONN_TYPES; i++) {
+ if (!src_proto(i))
+ continue;
+
iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
}
@@ -167,24 +169,39 @@ static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
/* counts the iids for the Timers block configuration */
struct ecore_tm_iids {
u32 pf_cids;
- u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
+ u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
u32 pf_tids_total;
u32 per_vf_cids;
u32 per_vf_tids;
};
static void ecore_cxt_tm_iids(struct ecore_hwfn *p_hwfn,
- struct ecore_cxt_mngr *p_mngr,
struct ecore_tm_iids *iids)
{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct ecore_conn_type_cfg *p_cfg;
bool tm_vf_required = false;
bool tm_required = false;
- u32 i, j;
+ int i, j;
- for (i = 0; i < MAX_CONN_TYPES; i++) {
+ /* Timers is a special case -> we don't count how many cids require
+ * timers but what's the max cid that will be used by the timer block.
+ * therefore we traverse in reverse order, and once we hit a protocol
+ * that requires the timers memory, we'll sum all the protocols up
+ * to that one.
+ */
+ for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
p_cfg = &p_mngr->conn_cfg[i];
+ /* In E5 the CORE CIDs are allocated first, and not according to
+ * its 'enum protocol_type' value. To not miss the count of the
+ * CORE CIDs by a protocol that requires the timers memory, but
+ * with a lower 'enum protocol_type' value - the addition of the
+ * CORE CIDs is done outside the loop.
+ */
+ if (ECORE_IS_E5(p_hwfn->p_dev) && (i == PROTOCOLID_CORE))
+ continue;
+
if (tm_cid_proto(i) || tm_required) {
if (p_cfg->cid_count)
tm_required = true;
@@ -196,6 +213,7 @@ static void ecore_cxt_tm_iids(struct ecore_hwfn *p_hwfn,
if (p_cfg->cids_per_vf)
tm_vf_required = true;
+ iids->per_vf_cids += p_cfg->cids_per_vf;
}
if (tm_tid_proto(i)) {
@@ -215,6 +233,15 @@ static void ecore_cxt_tm_iids(struct ecore_hwfn *p_hwfn,
}
}
+ if (ECORE_IS_E5(p_hwfn->p_dev)) {
+ p_cfg = &p_mngr->conn_cfg[PROTOCOLID_CORE];
+
+ if (tm_required)
+ iids->pf_cids += p_cfg->cid_count;
+ if (tm_vf_required)
+ iids->per_vf_cids += p_cfg->cids_per_vf;
+ }
+
iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN);
iids->per_vf_cids = ROUNDUP(iids->per_vf_cids, TM_ALIGN);
iids->per_vf_tids = ROUNDUP(iids->per_vf_tids, TM_ALIGN);
@@ -251,7 +278,7 @@ static void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
}
- iids->vf_cids += vf_cids * p_mngr->vf_count;
+ iids->vf_cids = vf_cids;
iids->tids += vf_tids * p_mngr->vf_count;
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
@@ -259,8 +286,8 @@ static void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
iids->cids, iids->vf_cids, iids->tids, vf_tids);
}
-static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
- u32 seg)
+static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
+ u32 seg)
{
struct ecore_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
u32 i;
@@ -275,24 +302,18 @@ static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
return OSAL_NULL;
}
-static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
-{
- struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
-
- p_mgr->srq_count = num_srqs;
-}
-
-u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
+/* This function was written under the assumption that all the ILT clients
+ * share the same ILT page size (although it is not required).
+ */
+u32 ecore_cxt_get_ilt_page_size(struct ecore_hwfn *p_hwfn)
{
- struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
-
- return p_mgr->srq_count;
+ return ILT_PAGE_IN_BYTES(p_hwfn->p_dev->ilt_page_size);
}
/* set the iids (cid/tid) count per protocol */
static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
- enum protocol_type type,
- u32 cid_count, u32 vf_cid_cnt)
+ enum protocol_type type,
+ u32 cid_count, u32 vf_cid_cnt)
{
struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
struct ecore_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
@@ -301,8 +322,9 @@ static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
p_conn->cids_per_vf = ROUNDUP(vf_cid_cnt, DQ_RANGE_ALIGN);
}
-u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
- enum protocol_type type, u32 *vf_cid)
+u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *vf_cid)
{
if (vf_cid)
*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
@@ -310,28 +332,41 @@ u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
}
-u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
- enum protocol_type type)
+u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u8 vf_id)
{
- return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
+ if (vf_id != ECORE_CXT_PF_CID)
+ return p_hwfn->p_cxt_mngr->acquired_vf[type][vf_id].start_cid;
+ else
+ return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
}
u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
- enum protocol_type type)
+ enum protocol_type type,
+ u8 vf_id)
{
+ struct ecore_conn_type_cfg *p_conn_cfg;
u32 cnt = 0;
int i;
- for (i = 0; i < TASK_SEGMENTS; i++)
- cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
+ p_conn_cfg = &p_hwfn->p_cxt_mngr->conn_cfg[type];
+
+ if (vf_id != ECORE_CXT_PF_CID)
+ return p_conn_cfg->tid_seg[TASK_SEGMENT_VF].count;
+
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++)
+ cnt += p_conn_cfg->tid_seg[i].count;
return cnt;
}
-static OSAL_INLINE void
-ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
- enum protocol_type proto,
- u8 seg, u8 seg_type, u32 count, bool has_fl)
+static void ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type proto,
+ u8 seg,
+ u8 seg_type,
+ u32 count,
+ bool has_fl)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct ecore_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
@@ -342,12 +377,13 @@ ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
}
/* the *p_line parameter must be either 0 for the first invocation or the
- * value returned in the previous invocation.
+ value returned in the previous invocation.
*/
-static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
- struct ecore_ilt_cli_blk *p_blk,
- u32 start_line,
- u32 total_size, u32 elem_size)
+static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
+ struct ecore_ilt_cli_blk *p_blk,
+ u32 start_line,
+ u32 total_size,
+ u32 elem_size)
{
u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
@@ -362,10 +398,11 @@ static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
p_blk->start_line = start_line;
}
-static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
- struct ecore_ilt_client_cfg *p_cli,
- struct ecore_ilt_cli_blk *p_blk,
- u32 *p_line, enum ilt_clients client_id)
+static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
+ struct ecore_ilt_client_cfg *p_cli,
+ struct ecore_ilt_cli_blk *p_blk,
+ u32 *p_line,
+ enum ilt_clients client_id)
{
if (!p_blk->total_size)
return;
@@ -378,8 +415,7 @@ static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
p_cli->last.val = *p_line - 1;
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
- "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x"
- " [Real %08x] Start line %d\n",
+ "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
client_id, p_cli->first.val, p_cli->last.val,
p_blk->total_size, p_blk->real_size_in_page,
p_blk->start_line);
@@ -388,7 +424,7 @@ static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
static void ecore_ilt_get_dynamic_line_range(struct ecore_hwfn *p_hwfn,
enum ilt_clients ilt_client,
u32 *dynamic_line_offset,
- u32 *dynamic_line_cnt)
+ u32 *dynamic_line_cnt, u8 is_vf)
{
struct ecore_ilt_client_cfg *p_cli;
struct ecore_conn_type_cfg *p_cfg;
@@ -404,9 +440,23 @@ static void ecore_ilt_get_dynamic_line_range(struct ecore_hwfn *p_hwfn,
p_cfg = &p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE];
cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
- (u32)CONN_CXT_SIZE(p_hwfn);
-
- *dynamic_line_cnt = p_cfg->cid_count / cxts_per_p;
+ (u32)CONN_CXT_SIZE(p_hwfn);
+
+ *dynamic_line_cnt = is_vf ? p_cfg->cids_per_vf / cxts_per_p :
+ p_cfg->cid_count / cxts_per_p;
+
+ /* In E5 the CORE CIDs are allocated before the ROCE CIDs */
+ if (*dynamic_line_cnt && ECORE_IS_E5(p_hwfn->p_dev)) {
+ u32 roce_cid_cnt = is_vf ? p_cfg->cids_per_vf :
+ p_cfg->cid_count;
+ u32 core_cid_cnt;
+
+ p_cfg = &p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_CORE];
+ core_cid_cnt = p_cfg->cid_count;
+ *dynamic_line_offset = 1 + (core_cid_cnt / cxts_per_p);
+ *dynamic_line_cnt = ((core_cid_cnt + roce_cid_cnt) /
+ cxts_per_p) - *dynamic_line_offset;
+ }
}
}
@@ -424,7 +474,7 @@ ecore_cxt_set_blk(struct ecore_ilt_cli_blk *p_blk)
{
p_blk->total_size = 0;
return p_blk;
- }
+}
static u32
ecore_cxt_src_elements(struct ecore_cxt_mngr *p_mngr)
@@ -435,9 +485,9 @@ ecore_cxt_src_elements(struct ecore_cxt_mngr *p_mngr)
OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
ecore_cxt_src_iids(p_mngr, &src_iids);
- /* Both the PF and VFs searcher connections are stored in the per PF
- * database. Thus sum the PF searcher cids and all the VFs searcher
- * cids.
+ /* Both the PF and VFs searcher connections are stored
+ * in the per PF database. Thus sum the PF searcher
+ * cids and all the VFs searcher cids.
*/
elem_num = src_iids.pf_cids +
src_iids.per_vf_cids * p_mngr->vf_count;
@@ -450,16 +500,34 @@ ecore_cxt_src_elements(struct ecore_cxt_mngr *p_mngr)
return elem_num;
}
-enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
+static void eocre_ilt_blk_reset(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+ u32 cli_idx, blk_idx;
+
+ for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
+ for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
+ clients[cli_idx].pf_blks[blk_idx].total_size = 0;
+ for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
+ clients[cli_idx].vf_blks[blk_idx].total_size = 0;
+ }
+}
+
+enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn,
+ u32 *line_count)
{
- u32 curr_line, total, i, task_size, line, total_size, elem_size;
+ u32 total, i, task_size, line, total_size, elem_size;
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u32 curr_line, prev_line, total_lines;
struct ecore_ilt_client_cfg *p_cli;
struct ecore_ilt_cli_blk *p_blk;
struct ecore_cdu_iids cdu_iids;
struct ecore_qm_iids qm_iids;
struct ecore_tm_iids tm_iids;
struct ecore_tid_seg *p_seg;
+ u16 num_vf_pqs;
+ int ret;
OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));
OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));
@@ -467,8 +535,14 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
+ /* Reset all the ILT blocks at the beginning of ILT compute - this
+ * is done in order to prevent memory allocation for irrelevant blocks
+ * afterwards (e.g. VF timer block after disabling VF-RDMA).
+ */
+ eocre_ilt_blk_reset(p_hwfn);
+
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
- "hwfn [%d] - Set context mngr starting line to be 0x%08x\n",
+ "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
/* CDUC */
@@ -494,7 +568,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
ecore_ilt_get_dynamic_line_range(p_hwfn, ILT_CLI_CDUC,
&p_blk->dynamic_line_offset,
- &p_blk->dynamic_line_cnt);
+ &p_blk->dynamic_line_cnt, IOV_PF);
/* CDUC VF */
p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
@@ -510,6 +584,10 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUC);
+ ecore_ilt_get_dynamic_line_range(p_hwfn, ILT_CLI_CDUC,
+ &p_blk->dynamic_line_offset,
+ &p_blk->dynamic_line_cnt, IOV_VF);
+
/* CDUT PF */
p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
p_cli->first.val = curr_line;
@@ -525,8 +603,39 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
p_mngr->task_type_size[p_seg->type]);
+ prev_line = curr_line;
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUT);
+ total_lines = curr_line - prev_line;
+
+ switch (i) {
+ case ECORE_CXT_ISCSI_TID_SEG:
+ p_mngr->iscsi_task_pages = (u16)total_lines;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "CDUT ILT Info: iscsi_task_pages %hu\n",
+ p_mngr->iscsi_task_pages);
+ break;
+ case ECORE_CXT_FCOE_TID_SEG:
+ p_mngr->fcoe_task_pages = (u16)total_lines;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "CDUT ILT Info: fcoe_task_pages %hu\n",
+ p_mngr->fcoe_task_pages);
+ break;
+ case ECORE_CXT_ROCE_TID_SEG:
+ p_mngr->roce_task_pages = (u16)total_lines;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "CDUT ILT Info: roce_task_pages %hu\n",
+ p_mngr->roce_task_pages);
+ break;
+ case ECORE_CXT_ETH_TID_SEG:
+ p_mngr->eth_task_pages = (u16)total_lines;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "CDUT ILT Info: eth_task_pages %hu\n",
+ p_mngr->eth_task_pages);
+ break;
+ default:
+ break;
+ }
}
/* next the 'init' task memory (forced load memory) */
@@ -535,8 +644,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
if (!p_seg || p_seg->count == 0)
continue;
- p_blk =
- ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
+ p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
if (!p_seg->has_fl_mem) {
/* The segment is active (total size pf 'working'
@@ -590,8 +698,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
ILT_CLI_CDUT);
/* 'init' memory */
- p_blk =
- ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
+ p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
if (!p_seg->has_fl_mem) {
/* see comment above */
line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
@@ -599,7 +706,8 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
} else {
task_size = p_mngr->task_type_size[p_seg->type];
ecore_ilt_cli_blk_fill(p_cli, p_blk,
- curr_line, total, task_size);
+ curr_line, total,
+ task_size);
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUT);
}
@@ -624,23 +732,29 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
+ ecore_cxt_qm_iids(p_hwfn, &qm_iids);
+
/* At this stage, after the first QM configuration, the PF PQs amount
* is the highest possible. Save this value at qm_info->ilt_pf_pqs to
* detect overflows in the future.
* Even though VF PQs amount can be larger than VF count, use vf_count
* because each VF requires only the full amount of CIDs.
*/
- ecore_cxt_qm_iids(p_hwfn, &qm_iids);
+ qm_info->ilt_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
+ if (ECORE_IS_VF_RDMA(p_hwfn))
+ num_vf_pqs = RESC_NUM(p_hwfn, ECORE_PQ) - qm_info->ilt_pf_pqs;
+ else
+ num_vf_pqs = (u16)p_mngr->vf_count;
+
total = ecore_qm_pf_mem_size(p_hwfn, qm_iids.cids,
qm_iids.vf_cids, qm_iids.tids,
- p_hwfn->qm_info.num_pqs + OFLD_GRP_SIZE,
- p_hwfn->qm_info.num_vf_pqs);
+ qm_info->ilt_pf_pqs,
+ num_vf_pqs);
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
- "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d,"
- " num_vf_pqs=%d, memory_size=%d)\n",
+ "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, pf_pqs=%d, vf_pqs=%d, memory_size=%d)\n",
qm_iids.cids, qm_iids.vf_cids, qm_iids.tids,
- p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
+ qm_info->ilt_pf_pqs, p_mngr->vf_count, total);
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000,
QM_PQ_ELEMENT_SIZE);
@@ -650,7 +764,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
/* TM PF */
p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
- ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
+ ecore_cxt_tm_iids(p_hwfn, &tm_iids);
total = tm_iids.pf_cids + tm_iids.pf_tids_total;
if (total) {
p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
@@ -668,12 +782,14 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
if (total) {
p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[0]);
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
- total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+ total * TM_ELEM_SIZE,
+ TM_ELEM_SIZE);
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
p_cli->vf_total_lines = curr_line - p_blk->start_line;
+
for (i = 1; i < p_mngr->vf_count; i++) {
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
@@ -696,30 +812,54 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
p_cli->pf_total_lines = curr_line - p_blk->start_line;
}
- /* TSDM (SRQ CONTEXT) */
- total = ecore_cxt_get_srq_count(p_hwfn);
-
- if (total) {
- p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
- p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
- ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
- total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
-
- ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
- ILT_CLI_TSDM);
- p_cli->pf_total_lines = curr_line - p_blk->start_line;
- }
+ *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
RESC_NUM(p_hwfn, ECORE_ILT)) {
- DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
- curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
return ECORE_INVAL;
}
return ECORE_SUCCESS;
}
+u32 ecore_cxt_cfg_ilt_compute_excess(struct ecore_hwfn *p_hwfn, u32 used_lines)
+{
+ struct ecore_ilt_client_cfg *p_cli;
+ u32 excess_lines, available_lines;
+ struct ecore_cxt_mngr *p_mngr;
+ u32 ilt_page_size, elem_size;
+ struct ecore_tid_seg *p_seg;
+ int i;
+
+ available_lines = RESC_NUM(p_hwfn, ECORE_ILT);
+ excess_lines = used_lines - available_lines;
+
+ if (!excess_lines)
+ return 0;
+
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return 0;
+
+ p_mngr = p_hwfn->p_cxt_mngr;
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ elem_size = p_mngr->task_type_size[p_seg->type];
+ if (!elem_size)
+ continue;
+
+ return (ilt_page_size / elem_size) * excess_lines;
+ }
+
+ DP_ERR(p_hwfn, "failed computing excess ILT lines\n");
+ return 0;
+}
+
static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
{
struct ecore_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
@@ -810,6 +950,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
if (rc)
goto t2_fail;
+
/* Set the t2 pointers */
/* entries per page - must be a power of two */
@@ -829,7 +970,8 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
u32 j;
for (j = 0; j < ent_num - 1; j++) {
- val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
+ val = p_ent_phys +
+ (j + 1) * sizeof(struct src_ent);
entries[j].next = OSAL_CPU_TO_BE64(val);
}
@@ -849,11 +991,11 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
return rc;
}
-#define for_each_ilt_valid_client(pos, clients) \
- for (pos = 0; pos < MAX_ILT_CLIENTS; pos++) \
- if (!clients[pos].active) { \
- continue; \
- } else \
+#define for_each_ilt_valid_client(pos, clients) \
+ for (pos = 0; pos < MAX_ILT_CLIENTS; pos++) \
+ if (!(clients)[pos].active) { \
+ continue; \
+ } else \
/* Total number of ILT lines used by this PF */
@@ -885,24 +1027,26 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
if (p_dma->virt_addr)
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
- p_dma->p_virt,
- p_dma->phys_addr, p_dma->size);
+ p_dma->virt_addr,
+ p_dma->phys_addr,
+ p_dma->size);
p_dma->virt_addr = OSAL_NULL;
}
OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
p_mngr->ilt_shadow = OSAL_NULL;
}
-static enum _ecore_status_t
-ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
- struct ecore_ilt_cli_blk *p_blk,
- enum ilt_clients ilt_client, u32 start_line_offset)
+static enum _ecore_status_t ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ilt_cli_blk *p_blk,
+ enum ilt_clients ilt_client,
+ u32 start_line_offset)
{
struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
u32 lines, line, sz_left, lines_to_skip, first_skipped_line;
/* Special handling for RoCE that supports dynamic allocation */
- if (ilt_client == ILT_CLI_CDUT || ilt_client == ILT_CLI_TSDM)
+ if (ECORE_IS_RDMA_PERSONALITY(p_hwfn) &&
+ ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
return ECORE_SUCCESS;
if (!p_blk->total_size)
@@ -910,7 +1054,8 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
sz_left = p_blk->total_size;
lines_to_skip = p_blk->dynamic_line_cnt;
- lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
+ lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) -
+ lines_to_skip;
line = p_blk->start_line + start_line_offset -
p_hwfn->p_cxt_mngr->pf_start_line;
first_skipped_line = line + p_blk->dynamic_line_offset;
@@ -926,7 +1071,6 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
}
size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);
-
/* @DPDK */
#define ILT_BLOCK_ALIGN_SIZE 0x1000
p_virt = OSAL_DMA_ALLOC_COHERENT_ALIGNED(p_hwfn->p_dev,
@@ -941,9 +1085,8 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
ilt_shadow[line].size = size;
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
- "ILT shadow: Line [%d] Physical 0x%lx"
- " Virtual %p Size %d\n",
- line, (unsigned long)p_phys, p_virt, size);
+ "ILT shadow: Line [%d] Physical 0x%" PRIx64 " Virtual %p Size %d\n",
+ line, (u64)p_phys, p_virt, size);
sz_left -= size;
line++;
@@ -955,7 +1098,7 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
{
- struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct ecore_ilt_client_cfg *clients = p_mngr->clients;
struct ecore_ilt_cli_blk *p_blk;
u32 size, i, j, k;
@@ -965,7 +1108,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
size * sizeof(struct phys_mem_desc));
- if (!p_mngr->ilt_shadow) {
+ if (p_mngr->ilt_shadow == OSAL_NULL) {
DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n");
rc = ECORE_NOMEM;
goto ilt_shadow_fail;
@@ -995,6 +1138,8 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
}
}
+ p_mngr->ilt_shadow_size = size;
+
return ECORE_SUCCESS;
ilt_shadow_fail:
@@ -1013,6 +1158,9 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
+ if (!p_mngr->acquired_vf[type])
+ continue;
+
for (vf = 0; vf < max_num_vfs; vf++) {
OSAL_FREE(p_hwfn->p_dev,
p_mngr->acquired_vf[type][vf].cid_map);
@@ -1025,8 +1173,8 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
static enum _ecore_status_t
__ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
- u32 cid_start, u32 cid_count,
- struct ecore_cid_acquired_map *p_map)
+ u32 cid_start, u32 cid_count,
+ struct ecore_cid_acquired_map *p_map)
{
u32 size;
@@ -1060,8 +1208,8 @@ ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type, u32 start_cid,
p_cfg = &p_mngr->conn_cfg[type];
- /* Handle PF maps */
- p_map = &p_mngr->acquired[type];
+ /* Handle PF maps */
+ p_map = &p_mngr->acquired[type];
rc = __ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
p_cfg->cid_count, p_map);
if (rc != ECORE_SUCCESS)
@@ -1086,7 +1234,23 @@ static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
u32 type;
enum _ecore_status_t rc;
+ /* Set the CORE CIDs to be first so it can have a global range ID */
+ if (ECORE_IS_E5(p_hwfn->p_dev)) {
+ rc = ecore_cid_map_alloc_single(p_hwfn, PROTOCOLID_CORE,
+ start_cid, vf_start_cid);
+ if (rc != ECORE_SUCCESS)
+ goto cid_map_fail;
+
+ start_cid = p_mngr->conn_cfg[PROTOCOLID_CORE].cid_count;
+
+ /* Add to VFs the required offset to be after the CORE CIDs */
+ vf_start_cid = start_cid;
+ }
+
for (type = 0; type < MAX_CONN_TYPES; type++) {
+ if (ECORE_IS_E5(p_hwfn->p_dev) && (type == PROTOCOLID_CORE))
+ continue;
+
rc = ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
vf_start_cid);
if (rc != ECORE_SUCCESS)
@@ -1107,6 +1271,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_cid_acquired_map *acquired_vf;
struct ecore_ilt_client_cfg *clients;
+ struct ecore_hw_sriov_info *p_iov;
struct ecore_cxt_mngr *p_mngr;
u32 i, max_num_vfs;
@@ -1116,6 +1281,9 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
return ECORE_NOMEM;
}
+ /* Set the cxt mangr pointer priori to further allocations */
+ p_hwfn->p_cxt_mngr = p_mngr;
+
/* Initialize ILT client registers */
clients = p_mngr->clients;
clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
@@ -1144,16 +1312,22 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
/* default ILT page size for all clients is 64K */
for (i = 0; i < MAX_ILT_CLIENTS; i++)
- p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+ p_mngr->clients[i].p_size.val = p_hwfn->p_dev->ilt_page_size;
+ /* Initialize task sizes */
/* due to removal of ISCSI/FCoE files union type0_task_context
* task_type_size will be 0. So hardcoded for now.
*/
p_mngr->task_type_size[0] = 512; /* @DPDK */
p_mngr->task_type_size[1] = 128; /* @DPDK */
- if (p_hwfn->p_dev->p_iov_info)
- p_mngr->vf_count = p_hwfn->p_dev->p_iov_info->total_vfs;
+ p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn);
+
+ p_iov = p_hwfn->p_dev->p_iov_info;
+ if (p_iov) {
+ p_mngr->vf_count = p_iov->total_vfs;
+ p_mngr->first_vf_in_pf = p_iov->first_vf_in_pf;
+ }
/* Initialize the dynamic ILT allocation mutex */
#ifdef CONFIG_ECORE_LOCK_ALLOC
@@ -1164,9 +1338,6 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
#endif
OSAL_MUTEX_INIT(&p_mngr->mutex);
- /* Set the cxt mangr pointer prior to further allocations */
- p_hwfn->p_cxt_mngr = p_mngr;
-
max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
for (i = 0; i < MAX_CONN_TYPES; i++) {
acquired_vf = OSAL_CALLOC(p_hwfn->p_dev, GFP_KERNEL,
@@ -1177,7 +1348,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
return ECORE_NOMEM;
}
- p_mngr->acquired_vf[i] = acquired_vf;
+ p_hwfn->p_cxt_mngr->acquired_vf[i] = acquired_vf;
}
return ECORE_SUCCESS;
@@ -1185,7 +1356,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
{
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc;
/* Allocate the ILT shadow table */
rc = ecore_ilt_shadow_alloc(p_hwfn);
@@ -1194,10 +1365,11 @@ enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
goto tables_alloc_fail;
}
- /* Allocate the T2 table */
+ /* Allocate the T2 tables */
rc = ecore_cxt_src_t2_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n");
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate src T2 memory\n");
goto tables_alloc_fail;
}
@@ -1334,7 +1506,7 @@ void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
static void ecore_cdu_init_common(struct ecore_hwfn *p_hwfn)
{
- u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+ u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
/* CDUC - connection configuration */
page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
@@ -1390,7 +1562,8 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
- CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
+ CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET
};
static const u32 rt_type_offset_fl_arr[] = {
@@ -1404,7 +1577,6 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
/* There are initializations only for CDUT during pf Phase */
for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
- /* Segment 0 */
p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
if (!p_seg)
continue;
@@ -1415,22 +1587,40 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
* Page size is larger than 32K!
*/
offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
- (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
- p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+ (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
cdu_seg_params = 0;
SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
- STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
+ STORE_RT_REG(p_hwfn, rt_type_offset_arr[i],
+ cdu_seg_params);
offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
- (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
- p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+ (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i],
+ cdu_seg_params);
+ }
+ /* Init VF (single) segment */
+ p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
+ if (p_seg) {
+ /* VF has a single segment so the offset is 0 by definition.
+ * The offset expresses where this segment starts relative to
+ * the VF section in CDUT. Since there is a single segment it
+ * will be 0 by definition
+ */
+ offset = 0;
cdu_seg_params = 0;
SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
- STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
+ STORE_RT_REG(p_hwfn, rt_type_offset_arr[TASK_SEGMENT_VF],
+ cdu_seg_params);
}
}
@@ -1459,12 +1649,35 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
/* CM PF */
static void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
{
- STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
- ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, ecore_get_cm_pq_idx(p_hwfn,
+ PQ_FLAGS_LB));
+}
+
+#define GLB_MAX_ICID_RT_OFFSET(id) \
+ DORQ_REG_GLB_MAX_ICID_ ## id ## _RT_OFFSET
+#define GLB_RANGE2CONN_TYPE_RT_OFFSET(id) \
+ DORQ_REG_GLB_RANGE2CONN_TYPE_ ## id ## _RT_OFFSET
+
+static void ecore_dq_init_common(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 dq_core_max_cid;
+
+ if (!ECORE_IS_E5(p_hwfn->p_dev))
+ return;
+
+ dq_core_max_cid = p_mngr->conn_cfg[PROTOCOLID_CORE].cid_count >>
+ DQ_RANGE_SHIFT;
+ STORE_RT_REG(p_hwfn, GLB_MAX_ICID_RT_OFFSET(0), dq_core_max_cid);
+
+ /* Range ID #1 is an empty range */
+ STORE_RT_REG(p_hwfn, GLB_MAX_ICID_RT_OFFSET(1), dq_core_max_cid);
+
+ STORE_RT_REG(p_hwfn, GLB_RANGE2CONN_TYPE_RT_OFFSET(0), PROTOCOLID_CORE);
}
/* DQ PF */
-static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
+static void ecore_dq_init_pf_e4(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
@@ -1505,11 +1718,10 @@ static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
- /* Connection types 6 & 7 are not in use, yet they must be configured
- * as the highest possible connection. Not configuring them means the
- * defaults will be used, and with a large number of cids a bug may
- * occur, if the defaults will be smaller than dq_pf_max_cid /
- * dq_vf_max_cid.
+ /* Connection types 6 & 7 are not in use, but still must be configured
+ * as the highest possible connection. Not configuring them means that
+ * the defaults will be used, and with a large number of cids a bug may
+ * occur, if the defaults are smaller than dq_pf_max_cid/dq_vf_max_cid.
*/
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
@@ -1518,6 +1730,90 @@ static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
}
+#define PRV_MAX_ICID_RT_OFFSET(pfvf, id) \
+ DORQ_REG_PRV_ ## pfvf ## _MAX_ICID_ ## id ## _RT_OFFSET
+#define PRV_RANGE2CONN_TYPE_RT_OFFSET(pfvf, id) \
+ DORQ_REG_PRV_ ## pfvf ## _RANGE2CONN_TYPE_ ## id ## _RT_OFFSET
+
+static void ecore_dq_init_pf_e5(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 dq_pf_max_cid, dq_vf_max_cid, type;
+
+ /* The private ranges should start after the CORE's global range */
+ dq_pf_max_cid = p_mngr->conn_cfg[PROTOCOLID_CORE].cid_count >>
+ DQ_RANGE_SHIFT;
+ dq_vf_max_cid = dq_pf_max_cid;
+
+ /* Range ID #2 */
+ if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
+ type = PROTOCOLID_ISCSI;
+ else if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
+ type = PROTOCOLID_FCOE;
+ else if (ECORE_IS_ROCE_PERSONALITY(p_hwfn))
+ type = PROTOCOLID_ROCE;
+ else /* ETH or ETH_IWARP */
+ type = PROTOCOLID_ETH;
+
+ dq_pf_max_cid += p_mngr->conn_cfg[type].cid_count >> DQ_RANGE_SHIFT;
+ dq_vf_max_cid += p_mngr->conn_cfg[type].cids_per_vf >> DQ_RANGE_SHIFT;
+ STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(PF, 2), dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, PRV_RANGE2CONN_TYPE_RT_OFFSET(PF, 2), type);
+ STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(VF, 2), dq_vf_max_cid);
+ STORE_RT_REG(p_hwfn, PRV_RANGE2CONN_TYPE_RT_OFFSET(VF, 2), type);
+
+ /* Range ID #3 */
+ if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
+ dq_pf_max_cid += p_mngr->conn_cfg[PROTOCOLID_ETH].cid_count >>
+ DQ_RANGE_SHIFT;
+ dq_vf_max_cid +=
+ p_mngr->conn_cfg[PROTOCOLID_ETH].cids_per_vf >>
+ DQ_RANGE_SHIFT;
+ STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(PF, 3),
+ dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, PRV_RANGE2CONN_TYPE_RT_OFFSET(PF, 3),
+ PROTOCOLID_ETH);
+ STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(VF, 3),
+ dq_vf_max_cid);
+ STORE_RT_REG(p_hwfn, PRV_RANGE2CONN_TYPE_RT_OFFSET(VF, 3),
+ PROTOCOLID_ETH);
+ } else if (ECORE_IS_IWARP_PERSONALITY(p_hwfn)) {
+ dq_pf_max_cid += p_mngr->conn_cfg[PROTOCOLID_IWARP].cid_count >>
+ DQ_RANGE_SHIFT;
+ dq_vf_max_cid +=
+ p_mngr->conn_cfg[PROTOCOLID_IWARP].cids_per_vf >>
+ DQ_RANGE_SHIFT;
+ STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(PF, 3),
+ dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, PRV_RANGE2CONN_TYPE_RT_OFFSET(PF, 3),
+ PROTOCOLID_IWARP);
+ STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(VF, 3),
+ dq_vf_max_cid);
+ STORE_RT_REG(p_hwfn, PRV_RANGE2CONN_TYPE_RT_OFFSET(VF, 3),
+ PROTOCOLID_IWARP);
+ } else {
+ /* Range ID #3 is an empty range */
+ STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(PF, 3),
+ dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(VF, 3),
+ dq_vf_max_cid);
+ }
+
+ /* Range IDs #4 and #5 are empty ranges */
+ STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(PF, 4), dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(VF, 4), dq_vf_max_cid);
+ STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(PF, 5), dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, PRV_MAX_ICID_RT_OFFSET(VF, 5), dq_vf_max_cid);
+}
+
+static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ if (ECORE_IS_E4(p_hwfn->p_dev))
+ ecore_dq_init_pf_e4(p_hwfn);
+ else
+ ecore_dq_init_pf_e5(p_hwfn);
+}
+
static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
{
struct ecore_ilt_client_cfg *ilt_clients;
@@ -1529,7 +1825,8 @@ static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
ilt_clients[i].first.reg,
ilt_clients[i].first.val);
STORE_RT_REG(p_hwfn,
- ilt_clients[i].last.reg, ilt_clients[i].last.val);
+ ilt_clients[i].last.reg,
+ ilt_clients[i].last.val);
STORE_RT_REG(p_hwfn,
ilt_clients[i].p_size.reg,
ilt_clients[i].p_size.val);
@@ -1585,7 +1882,8 @@ static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn)
blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
if (p_cli->active) {
STORE_RT_REG(p_hwfn,
- PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
+ PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
STORE_RT_REG(p_hwfn,
PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
p_cli->pf_total_lines);
@@ -1606,8 +1904,8 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
ecore_ilt_bounds_init(p_hwfn);
ecore_ilt_vf_bounds_init(p_hwfn);
- p_mngr = p_hwfn->p_cxt_mngr;
- p_shdw = p_mngr->ilt_shadow;
+ p_mngr = p_hwfn->p_cxt_mngr;
+ p_shdw = p_mngr->ilt_shadow;
clients = p_hwfn->p_cxt_mngr->clients;
for_each_ilt_valid_client(i, clients) {
@@ -1616,13 +1914,13 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
*/
line = clients[i].first.val - p_mngr->pf_start_line;
rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
- clients[i].first.val * ILT_ENTRY_IN_REGS;
+ clients[i].first.val * ILT_ENTRY_IN_REGS;
for (; line <= clients[i].last.val - p_mngr->pf_start_line;
line++, rt_offst += ILT_ENTRY_IN_REGS) {
u64 ilt_hw_entry = 0;
- /** p_virt could be OSAL_NULL incase of dynamic
+ /** virt_addr could be OSAL_NULL incase of dynamic
* allocation
*/
if (p_shdw[line].virt_addr != OSAL_NULL) {
@@ -1631,12 +1929,10 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
(p_shdw[line].phys_addr >> 12));
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
- "Setting RT[0x%08x] from"
- " ILT[0x%08x] [Client is %d] to"
- " Physical addr: 0x%lx\n",
+ "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to "
+ "Physical addr: 0x%" PRIx64 "\n",
rt_offst, line, i,
- (unsigned long)(p_shdw[line].
- phys_addr >> 12));
+ (u64)(p_shdw[line].phys_addr >> 12));
}
STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
@@ -1673,65 +1969,114 @@ static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
conn_num);
}
-/* Timers PF */
-#define TM_CFG_NUM_IDS_SHIFT 0
-#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
-#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
-#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
-#define TM_CFG_PARENT_PF_SHIFT 25
-#define TM_CFG_PARENT_PF_MASK 0x7ULL
-
-#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
-#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
+/* Timers PF - configuration memory for the connections and the tasks */
+
+/* Common parts to connections and tasks */
+#define TM_CFG_NUM_IDS_SHIFT 0
+#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
+/* BB */
+#define TM_CFG_PRE_SCAN_OFFSET_BB_SHIFT 16
+#define TM_CFG_PRE_SCAN_OFFSET_BB_MASK 0x1FFULL
+#define TM_CFG_PARENT_PF_BB_SHIFT 26
+#define TM_CFG_PARENT_PF_BB_MASK 0x7ULL
+/* AH */
+#define TM_CFG_PRE_SCAN_OFFSET_AH_SHIFT 16
+#define TM_CFG_PRE_SCAN_OFFSET_AH_MASK 0x3FFULL
+#define TM_CFG_PARENT_PF_AH_SHIFT 26
+#define TM_CFG_PARENT_PF_AH_MASK 0xFULL
+/* E5 */
+#define TM_CFG_PRE_SCAN_OFFSET_E5_SHIFT 16
+#define TM_CFG_PRE_SCAN_OFFSET_E5_MASK 0x3FFULL
+#define TM_CFG_PARENT_PF_E5_SHIFT 26
+#define TM_CFG_PARENT_PF_E5_MASK 0xFULL
+
+/* Connections specific */
+#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
+#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+/* Tasks specific */
+#define TM_CFG_TID_OFFSET_SHIFT 30
+#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
+#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
+#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+static void ecore_tm_cfg_set_parent_pf(struct ecore_dev *p_dev, u64 *cfg_word,
+ u8 val)
+{
+ if (ECORE_IS_BB(p_dev))
+ SET_FIELD(*cfg_word, TM_CFG_PARENT_PF_BB, val);
+ else if (ECORE_IS_AH(p_dev))
+ SET_FIELD(*cfg_word, TM_CFG_PARENT_PF_AH, val);
+ else /* E5 */
+ SET_FIELD(*cfg_word, TM_CFG_PARENT_PF_E5, val);
+}
-#define TM_CFG_TID_OFFSET_SHIFT 30
-#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
-#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
-#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
+static void ecore_tm_cfg_set_pre_scan_offset(struct ecore_dev *p_dev,
+ u64 *cfg_word, u8 val)
+{
+ if (ECORE_IS_BB(p_dev))
+ SET_FIELD(*cfg_word, TM_CFG_PRE_SCAN_OFFSET_BB, val);
+ else if (ECORE_IS_AH(p_dev))
+ SET_FIELD(*cfg_word, TM_CFG_PRE_SCAN_OFFSET_AH, val);
+ else /* E5 */
+ SET_FIELD(*cfg_word, TM_CFG_PRE_SCAN_OFFSET_E5, val);
+}
static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 active_seg_mask = 0, tm_offset, rt_reg;
+ u32 *p_cfg_word_32, cfg_word_size;
struct ecore_tm_iids tm_iids;
u64 cfg_word;
u8 i;
OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
- ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
+ ecore_cxt_tm_iids(p_hwfn, &tm_iids);
/* @@@TBD No pre-scan for now */
- cfg_word = 0;
- SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
- SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
- SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
+ ecore_tm_cfg_set_parent_pf(p_hwfn->p_dev, &cfg_word, p_hwfn->rel_pf_id);
+ ecore_tm_cfg_set_pre_scan_offset(p_hwfn->p_dev, &cfg_word, 0);
+ if (ECORE_IS_E4(p_hwfn->p_dev))
SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
+ /* Each CONFIG_CONN_MEM row in E5 is 32 bits and not 64 bits as in E4 */
+ p_cfg_word_32 = (u32 *)&cfg_word;
+ cfg_word_size = ECORE_IS_E4(p_hwfn->p_dev) ? sizeof(cfg_word)
+ : sizeof(*p_cfg_word_32);
+
/* Note: We assume consecutive VFs for a PF */
for (i = 0; i < p_mngr->vf_count; i++) {
rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
- (sizeof(cfg_word) / sizeof(u32)) *
- (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
- STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ (cfg_word_size / sizeof(u32)) *
+ (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
+ if (ECORE_IS_E4(p_hwfn->p_dev))
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ else
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, *p_cfg_word_32);
}
cfg_word = 0;
SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
- SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
- SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
- SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
+ ecore_tm_cfg_set_parent_pf(p_hwfn->p_dev, &cfg_word, 0); /* n/a for PF */
+ ecore_tm_cfg_set_pre_scan_offset(p_hwfn->p_dev, &cfg_word, 0);
+ if (ECORE_IS_E4(p_hwfn->p_dev))
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
- (sizeof(cfg_word) / sizeof(u32)) *
- (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
- STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ (cfg_word_size / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
+ if (ECORE_IS_E4(p_hwfn->p_dev))
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ else
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, *p_cfg_word_32);
- /* enable scan */
+ /* enable scan for PF */
STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
- tm_iids.pf_cids ? 0x1 : 0x0);
-
- /* @@@TBD how to enable the scan for the VFs */
+ tm_iids.pf_cids ? 0x1 : 0x0);
tm_offset = tm_iids.per_vf_cids;
@@ -1739,14 +2084,15 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
for (i = 0; i < p_mngr->vf_count; i++) {
cfg_word = 0;
SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
- SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
- SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ ecore_tm_cfg_set_pre_scan_offset(p_hwfn->p_dev, &cfg_word, 0);
+ ecore_tm_cfg_set_parent_pf(p_hwfn->p_dev, &cfg_word,
+ p_hwfn->rel_pf_id);
SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
- (sizeof(cfg_word) / sizeof(u32)) *
- (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
}
@@ -1755,15 +2101,15 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
cfg_word = 0;
SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
- SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
- SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
+ ecore_tm_cfg_set_pre_scan_offset(p_hwfn->p_dev, &cfg_word, 0);
+ ecore_tm_cfg_set_parent_pf(p_hwfn->p_dev, &cfg_word, 0);
SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
- (sizeof(cfg_word) / sizeof(u32)) *
- (NUM_OF_VFS(p_hwfn->p_dev) +
- p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->p_dev) +
+ p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
@@ -1771,9 +2117,43 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
tm_offset += tm_iids.pf_tids[i];
}
+ if (ECORE_IS_RDMA_PERSONALITY(p_hwfn))
+ active_seg_mask = 0;
+
STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
+}
- /* @@@TBD how to enable the scan for the VFs */
+void ecore_tm_clear_vf_ilt(struct ecore_hwfn *p_hwfn, u16 vf_idx)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_ilt_client_cfg *p_cli;
+ struct phys_mem_desc *shadow_line;
+ struct ecore_ilt_cli_blk *p_blk;
+ u32 shadow_start_line, line;
+ u32 i;
+
+ p_cli = &p_mngr->clients[ILT_CLI_TM];
+ p_blk = &p_cli->vf_blks[0];
+ line = p_blk->start_line + vf_idx * p_cli->vf_total_lines;
+ shadow_start_line = line - p_mngr->pf_start_line;
+
+ for (i = 0; i < p_cli->vf_total_lines; i++) {
+ shadow_line = &p_mngr->ilt_shadow[shadow_start_line + i];
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "zeroing ILT for VF %d line %d address %p size %d\n",
+ vf_idx, i, shadow_line->virt_addr, shadow_line->size);
+
+ if (shadow_line->virt_addr != OSAL_NULL)
+ OSAL_MEM_ZERO(shadow_line->virt_addr, shadow_line->size);
+ }
+}
+
+static void ecore_prs_init_common(struct ecore_hwfn *p_hwfn)
+{
+ if ((p_hwfn->hw_info.personality == ECORE_PCI_FCOE) &&
+ p_hwfn->pf_params.fcoe_pf_params.is_target)
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
}
static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
@@ -1781,6 +2161,7 @@ static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct ecore_conn_type_cfg *p_fcoe;
struct ecore_tid_seg *p_tid;
+ u32 max_tid;
p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
@@ -1789,15 +2170,23 @@ static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
return;
p_tid = &p_fcoe->tid_seg[ECORE_CXT_FCOE_TID_SEG];
- STORE_RT_REG_AGG(p_hwfn,
- PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
- p_tid->count);
+ max_tid = p_tid->count - 1;
+ if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
+ STORE_RT_REG_AGG(p_hwfn,
+ PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
+ max_tid);
+ } else {
+ STORE_RT_REG_AGG(p_hwfn,
+ PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
+ max_tid);
+ }
}
void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
{
- /* CDU configuration */
ecore_cdu_init_common(p_hwfn);
+ ecore_prs_init_common(p_hwfn);
+ ecore_dq_init_common(p_hwfn);
}
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
@@ -1807,7 +2196,10 @@ void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
ecore_dq_init_pf(p_hwfn);
ecore_cdu_init_pf(p_hwfn);
ecore_ilt_init_pf(p_hwfn);
- ecore_src_init_pf(p_hwfn);
+
+ if (!ECORE_IS_E5(p_hwfn->p_dev))
+ ecore_src_init_pf(p_hwfn);
+
ecore_tm_init_pf(p_hwfn);
ecore_prs_init_pf(p_hwfn);
}
@@ -1850,7 +2242,7 @@ enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
return ECORE_NORESOURCES;
}
- OSAL_SET_BIT(rel_cid, p_map->cid_map);
+ OSAL_NON_ATOMIC_SET_BIT(rel_cid, p_map->cid_map);
*p_cid = rel_cid + p_map->start_cid;
@@ -1890,15 +2282,16 @@ static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
break;
}
}
+
if (*p_type == MAX_CONN_TYPES) {
- DP_NOTICE(p_hwfn, true, "Invalid CID %d vfid %02x", cid, vfid);
+ DP_NOTICE(p_hwfn, false, "Invalid CID %d vfid %02x\n", cid, vfid);
goto fail;
}
rel_cid = cid - (*pp_map)->start_cid;
- if (!OSAL_GET_BIT(rel_cid, (*pp_map)->cid_map)) {
- DP_NOTICE(p_hwfn, true,
- "CID %d [vifd %02x] not acquired", cid, vfid);
+ if (!OSAL_TEST_BIT(rel_cid, (*pp_map)->cid_map)) {
+ DP_NOTICE(p_hwfn, false,
+ "CID %d [vifd %02x] not acquired\n", cid, vfid);
goto fail;
}
@@ -1975,29 +2368,38 @@ enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].virt_addr +
- p_info->iid % cxts_per_p * conn_cxt_size;
+ p_info->iid % cxts_per_p * conn_cxt_size;
DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),
- "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
- (p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid);
+ "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+ (p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid);
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn,
+ u32 rdma_tasks, u32 eth_tasks)
{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+
/* Set the number of required CORE connections */
- u32 core_cids = 1; /* SPQ */
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_ptt *p_ptt;
+ u32 core_cids = 1; /* SPQ */
+ u32 vf_core_cids = 0;
- ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
+ ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids,
+ vf_core_cids);
switch (p_hwfn->hw_info.personality) {
case ECORE_PCI_ETH:
- {
+ {
u32 count = 0;
struct ecore_eth_pf_params *p_params =
- &p_hwfn->pf_params.eth_pf_params;
+ &p_hwfn->pf_params.eth_pf_params;
+
+ p_mngr->task_ctx_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
if (!p_params->num_vf_cons)
p_params->num_vf_cons = ETH_PF_PARAMS_VF_CONS_DEFAULT;
@@ -2005,17 +2407,108 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
p_params->num_cons,
p_params->num_vf_cons);
+ ecore_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ETH,
+ ECORE_CXT_ETH_TID_SEG,
+ ETH_CDU_TASK_SEG_TYPE,
+ eth_tasks, false);
+
+#ifdef CONFIG_ECORE_FS
+ if (ECORE_IS_E5(p_hwfn->p_dev))
+ p_hwfn->fs_info.e5->tid_count = eth_tasks;
+#endif
+
count = p_params->num_arfs_filters;
- if (!OSAL_GET_BIT(ECORE_MF_DISABLE_ARFS,
+ if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS,
&p_hwfn->p_dev->mf_bits))
p_hwfn->p_cxt_mngr->arfs_count = count;
break;
- }
+ }
default:
+ rc = ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_tid_mem *p_info)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 proto, seg, total_lines, i, shadow_line;
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_fl_seg;
+ struct ecore_tid_seg *p_seg_info;
+
+ /* Verify the personality */
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_FCOE:
+ proto = PROTOCOLID_FCOE;
+ seg = ECORE_CXT_FCOE_TID_SEG;
+ break;
+ case ECORE_PCI_ISCSI:
+ proto = PROTOCOLID_ISCSI;
+ seg = ECORE_CXT_ISCSI_TID_SEG;
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ if (!p_cli->active)
+ return ECORE_INVAL;
+
+ p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+ if (!p_seg_info->has_fl_mem)
return ECORE_INVAL;
+
+ p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+ total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
+ p_fl_seg->real_size_in_page);
+
+ for (i = 0; i < total_lines; i++) {
+ shadow_line = i + p_fl_seg->start_line -
+ p_hwfn->p_cxt_mngr->pf_start_line;
+ p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr;
}
+ p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
+ p_fl_seg->real_size_in_page;
+ p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
+ p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
+ p_info->tid_size;
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_cxt_get_iid_info(struct ecore_hwfn *p_hwfn,
+ enum ecore_cxt_elem_type elem_type,
+ struct ecore_ilt_client_cfg **pp_cli,
+ struct ecore_ilt_cli_blk **pp_blk,
+ u32 *elem_size, bool is_vf)
+{
+ struct ecore_ilt_client_cfg *p_cli;
+
+ switch (elem_type) {
+ case ECORE_ELEM_CXT:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ *elem_size = CONN_CXT_SIZE(p_hwfn);
+ *pp_blk = is_vf ? &p_cli->vf_blks[CDUC_BLK] :
+ &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case ECORE_ELEM_ETH_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ *elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ *pp_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ETH_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "ECORE_INVALID elem type = %d", elem_type);
+ return ECORE_INVAL;
+ }
+
+ *pp_cli = p_cli;
return ECORE_SUCCESS;
}
@@ -2026,8 +2519,13 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
enum _ecore_status_t
ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
enum ecore_cxt_elem_type elem_type,
- u32 iid)
+ u32 iid, u8 vf_id)
{
+ /* TODO
+ * Check to see if we need to do anything differeny if this is
+ * called on behalf of VF.
+ */
+
u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
struct ecore_ilt_client_cfg *p_cli;
struct ecore_ilt_cli_blk *p_blk;
@@ -2036,33 +2534,25 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
u64 ilt_hw_entry;
void *p_virt;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ bool is_vf = (vf_id != ECORE_CXT_PF_CID);
- switch (elem_type) {
- case ECORE_ELEM_CXT:
- p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
- elem_size = CONN_CXT_SIZE(p_hwfn);
- p_blk = &p_cli->pf_blks[CDUC_BLK];
- break;
- case ECORE_ELEM_SRQ:
- p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
- elem_size = SRQ_CXT_SIZE;
- p_blk = &p_cli->pf_blks[SRQ_BLK];
- break;
- case ECORE_ELEM_TASK:
- p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
- elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
- p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
- break;
- default:
- DP_NOTICE(p_hwfn, false,
- "ECORE_INVALID elem type = %d", elem_type);
- return ECORE_INVAL;
- }
+ rc = ecore_cxt_get_iid_info(p_hwfn, elem_type, &p_cli, &p_blk,
+ &elem_size, is_vf);
+ if (rc)
+ return rc;
/* Calculate line in ilt */
hw_p_size = p_cli->p_size.val;
elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
- line = p_blk->start_line + (iid / elems_per_p);
+ if (is_vf)
+ /* start_line - where the VF sections starts (p_blk is VF's one)
+ * (vf_id * p_cli->vf_total_lines) - Where this VF starts
+ */
+ line = p_blk->start_line +
+ (vf_id * p_cli->vf_total_lines) + (iid / elems_per_p);
+ else
+ line = p_blk->start_line + (iid / elems_per_p);
+
shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
/* If line is already allocated, do nothing, otherwise allocate it and
@@ -2070,7 +2560,9 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
* This section can be run in parallel from different contexts and thus
* a mutex protection is needed.
*/
-
+#ifdef _NTDDK_
+#pragma warning(suppress : 28121)
+#endif
OSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex);
if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
@@ -2106,14 +2598,13 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
SET_FIELD(ilt_hw_entry,
ILT_ENTRY_PHY_ADDR,
- (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr >> 12));
-
-/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
+ (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr >>
+ 12));
+ /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry,
reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
OSAL_NULL /* default parameters */);
-
out1:
ecore_ptt_release(p_hwfn, p_ptt);
out0:
@@ -2125,77 +2616,119 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
/* This function is very RoCE oriented, if another protocol in the future
* will want this feature we'll need to modify the function to be more generic
*/
-static enum _ecore_status_t
+enum _ecore_status_t
ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
enum ecore_cxt_elem_type elem_type,
- u32 start_iid, u32 count)
+ u32 start_iid, u32 count, u8 vf_id)
{
- u32 start_line, end_line, shadow_start_line, shadow_end_line;
+ u32 end_iid, start_line, end_line, shadow_start_line, shadow_end_line;
+ u32 start_offset, end_offset, start_iid_offset, end_iid_offset;
u32 reg_offset, elem_size, hw_p_size, elems_per_p;
+ bool b_skip_start = false, b_skip_end = false;
+ bool is_vf = (vf_id != ECORE_CXT_PF_CID);
struct ecore_ilt_client_cfg *p_cli;
struct ecore_ilt_cli_blk *p_blk;
- u32 end_iid = start_iid + count;
+ struct phys_mem_desc *ilt_page;
struct ecore_ptt *p_ptt;
u64 ilt_hw_entry = 0;
- u32 i;
+ u32 i, abs_line;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
- switch (elem_type) {
- case ECORE_ELEM_CXT:
- p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
- elem_size = CONN_CXT_SIZE(p_hwfn);
- p_blk = &p_cli->pf_blks[CDUC_BLK];
- break;
- case ECORE_ELEM_SRQ:
- p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
- elem_size = SRQ_CXT_SIZE;
- p_blk = &p_cli->pf_blks[SRQ_BLK];
- break;
- case ECORE_ELEM_TASK:
- p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
- elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
- p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
- break;
- default:
- DP_NOTICE(p_hwfn, false,
- "ECORE_INVALID elem type = %d", elem_type);
- return ECORE_INVAL;
- }
+ /* in case this client has no ILT lines, no need to free anything */
+ if (count == 0)
+ return ECORE_SUCCESS;
- /* Calculate line in ilt */
+ rc = ecore_cxt_get_iid_info(p_hwfn, elem_type, &p_cli, &p_blk,
+ &elem_size, is_vf);
+ if (rc)
+ return rc;
+
+ /* Calculate lines in ILT.
+ * Skip the start line if 'start_iid' is not the first element in page.
+ * Skip the end line if 'end_iid' is not the last element in page.
+ */
hw_p_size = p_cli->p_size.val;
elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ end_iid = start_iid + count - 1;
+
start_line = p_blk->start_line + (start_iid / elems_per_p);
end_line = p_blk->start_line + (end_iid / elems_per_p);
- if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
- end_line--;
+
+ if (is_vf) {
+ start_line += (vf_id * p_cli->vf_total_lines);
+ end_line += (vf_id * p_cli->vf_total_lines);
+ }
+
+ if (start_iid % elems_per_p)
+ b_skip_start = true;
+
+ if ((end_iid % elems_per_p) != (elems_per_p - 1))
+ b_skip_end = true;
+
+ start_iid_offset = (start_iid % elems_per_p) * elem_size;
+ end_iid_offset = ((end_iid % elems_per_p) + 1) * elem_size;
shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
- DP_NOTICE(p_hwfn, false,
- "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
+ DP_NOTICE(p_hwfn, false, "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
return ECORE_TIMEOUT;
}
- for (i = shadow_start_line; i < shadow_end_line; i++) {
- if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
+ /* This piece of code takes care of freeing the ILT specific range, as
+ * well as setting it to zero.
+ * In case the start or the end lines of the range share other iids,
+ * they should not be freed, but only be set to zero.
+ * The reason for zeroing the lines is to prevent future access to the
+ * old iids.
+ *
+ * For example, lets assume the RDMA tids of VF0 occupy 3.5 lines.
+ * Now we run a test which uses all the tids and then perform VF FLR.
+ * During the FLR we will free the first 3 lines but not the 4th.
+ * If we won't zero the first half of the 4th line, a new VF0 might try
+ * to use the old tids which are stored there, and this will lead to an
+ * error.
+ */
+ for (i = shadow_start_line; i <= shadow_end_line; i++) {
+ ilt_page = &p_hwfn->p_cxt_mngr->ilt_shadow[i];
+
+ if (!ilt_page->virt_addr) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "Virtual address of ILT shadow line %u is NULL\n", i);
+ continue;
+ }
+
+ start_offset = (i == shadow_start_line && b_skip_start) ?
+ start_iid_offset : 0;
+ end_offset = (i == shadow_end_line && b_skip_end) ?
+ end_iid_offset : ilt_page->size;
+
+ OSAL_MEM_ZERO((u8 *)ilt_page->virt_addr + start_offset,
+ end_offset - start_offset);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "Zeroing shadow line %u start offset %x end offset %x\n",
+ i, start_offset, end_offset);
+
+ if (end_offset - start_offset < ilt_page->size)
continue;
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
+ ilt_page->virt_addr,
+ ilt_page->phys_addr,
+ ilt_page->size);
- p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = OSAL_NULL;
- p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
- p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
+ ilt_page->virt_addr = OSAL_NULL;
+ ilt_page->phys_addr = 0;
+ ilt_page->size = 0;
/* compute absolute offset */
+ abs_line = p_hwfn->p_cxt_mngr->pf_start_line + i;
reg_offset = PSWRQ2_REG_ILT_MEMORY +
- ((start_line++) * ILT_REG_SIZE_IN_BYTES *
- ILT_ENTRY_IN_REGS);
+ (abs_line * ILT_REG_SIZE_IN_BYTES *
+ ILT_ENTRY_IN_REGS);
/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
* wide-bus.
@@ -2212,6 +2745,75 @@ ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
+ u32 tid,
+ u8 ctx_type,
+ void **pp_task_ctx)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_tid_seg *p_seg_info;
+ struct ecore_ilt_cli_blk *p_seg;
+ u32 num_tids_per_block;
+ u32 tid_size, ilt_idx;
+ u32 total_lines;
+ u32 proto, seg;
+
+ /* Verify the personality */
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_FCOE:
+ proto = PROTOCOLID_FCOE;
+ seg = ECORE_CXT_FCOE_TID_SEG;
+ break;
+ case ECORE_PCI_ISCSI:
+ proto = PROTOCOLID_ISCSI;
+ seg = ECORE_CXT_ISCSI_TID_SEG;
+ break;
+ case ECORE_PCI_ETH_RDMA:
+ case ECORE_PCI_ETH_IWARP:
+ case ECORE_PCI_ETH_ROCE:
+ case ECORE_PCI_ETH:
+ /* All ETH personalities refer to Ethernet TIDs since RDMA does
+ * not use this API.
+ */
+ proto = PROTOCOLID_ETH;
+ seg = ECORE_CXT_ETH_TID_SEG;
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ if (!p_cli->active)
+ return ECORE_INVAL;
+
+ p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+ if (ctx_type == ECORE_CTX_WORKING_MEM) {
+ p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
+ } else if (ctx_type == ECORE_CTX_FL_MEM) {
+ if (!p_seg_info->has_fl_mem)
+ return ECORE_INVAL;
+ p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+ } else {
+ return ECORE_INVAL;
+ }
+ total_lines = DIV_ROUND_UP(p_seg->total_size,
+ p_seg->real_size_in_page);
+ tid_size = p_mngr->task_type_size[p_seg_info->type];
+ num_tids_per_block = p_seg->real_size_in_page / tid_size;
+
+ if (total_lines < tid / num_tids_per_block)
+ return ECORE_INVAL;
+
+ ilt_idx = tid / num_tids_per_block + p_seg->start_line -
+ p_mngr->pf_start_line;
+ *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr +
+ (tid % num_tids_per_block) * tid_size;
+
+ return ECORE_SUCCESS;
+}
+
static u16 ecore_blk_calculate_pages(struct ecore_ilt_cli_blk *p_blk)
{
if (p_blk->real_size_in_page == 0)
@@ -1,25 +1,35 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef _ECORE_CID_
#define _ECORE_CID_
#include "ecore_hsi_common.h"
+#include "ecore_hsi_eth.h"
#include "ecore_proto_if.h"
#include "ecore_cxt_api.h"
-/* Tasks segments definitions */
-#define ECORE_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI /* 0 */
-#define ECORE_CXT_FCOE_TID_SEG PROTOCOLID_FCOE /* 1 */
-#define ECORE_CXT_ROCE_TID_SEG PROTOCOLID_ROCE /* 2 */
+/* Tasks segments definitions (keeping this numbering is necessary) */
+#define ECORE_CXT_ISCSI_TID_SEG 0 /* PROTOCOLID_ISCSI */
+#define ECORE_CXT_FCOE_TID_SEG 1 /* PROTOCOLID_FCOE */
+#define ECORE_CXT_ROCE_TID_SEG 2 /* PROTOCOLID_ROCE */
+#define ECORE_CXT_ETH_TID_SEG 3
enum ecore_cxt_elem_type {
ECORE_ELEM_CXT,
ECORE_ELEM_SRQ,
- ECORE_ELEM_TASK
+ ECORE_ELEM_RDMA_TASK,
+ ECORE_ELEM_ETH_TASK,
+ ECORE_ELEM_XRC_SRQ,
+};
+
+/* @DPDK */
+enum ecore_iov_is_vf_or_pf {
+ IOV_PF = 0, /* This is a PF instance. */
+ IOV_VF = 1 /* This is a VF instance. */
};
u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
@@ -27,11 +37,12 @@ u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
u32 *vf_cid);
u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
- enum protocol_type type);
+ enum protocol_type type,
+ u8 vf_id);
u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
- enum protocol_type type);
-u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn);
+ enum protocol_type type,
+ u8 vf_id);
/**
* @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
@@ -40,16 +51,27 @@ u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn);
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn,
+ u32 rdma_tasks, u32 eth_tasks);
/**
* @brief ecore_cxt_cfg_ilt_compute - compute ILT init parameters
*
* @param p_hwfn
+ * @param last_line
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn,
+ u32 *last_line);
+
+/**
+ * @brief ecore_cxt_cfg_ilt_compute_excess - how many lines can be decreased
+ *
+ * @param p_hwfn
+ * @param used_lines
+ */
+u32 ecore_cxt_cfg_ilt_compute_excess(struct ecore_hwfn *p_hwfn, u32 used_lines);
/**
* @brief ecore_cxt_mngr_alloc - Allocate and init the context manager struct
@@ -68,8 +90,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn);
void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn);
/**
- * @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired
- * map
+ * @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
*
* @param p_hwfn
*
@@ -85,8 +106,7 @@ enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn);
void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn);
/**
- * @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per
- * path.
+ * @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
*
* @param p_hwfn
*/
@@ -121,7 +141,16 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
-#define ECORE_CXT_PF_CID (0xff)
+/**
+ * @brief Reconfigures QM from a non-sleepable context.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_qm_reconf_intr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
/**
* @brief ecore_cxt_release - Release a cid
@@ -177,28 +206,39 @@ enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param elem_type
* @param iid
+ * @param vf_id
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
enum ecore_cxt_elem_type elem_type,
- u32 iid);
+ u32 iid, u8 vf_id);
/**
- * @brief ecore_cxt_free_proto_ilt - function frees ilt pages
- * associated with the protocol passed.
+ * @brief ecore_cxt_free_ilt_range - function frees ilt pages
+ * associated with the protocol and element type passed.
*
* @param p_hwfn
* @param proto
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
- enum protocol_type proto);
+enum _ecore_status_t
+ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
+ enum ecore_cxt_elem_type elem_type,
+ u32 start_iid, u32 count, u8 vf_id);
#define ECORE_CTX_WORKING_MEM 0
#define ECORE_CTX_FL_MEM 1
+enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
+ u32 tid,
+ u8 ctx_type,
+ void **task_ctx);
+
+u32 ecore_cxt_get_ilt_page_size(struct ecore_hwfn *p_hwfn);
+
+u32 ecore_cxt_get_total_srq_count(struct ecore_hwfn *p_hwfn);
/* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON
@@ -206,20 +246,20 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
#define NUM_TASK_PF_SEGMENTS 4
#define NUM_TASK_VF_SEGMENTS 1
-/* PF per protocol configuration object */
+/* PF per protocl configuration object */
#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
struct ecore_tid_seg {
- u32 count;
- u8 type;
- bool has_fl_mem;
+ u32 count;
+ u8 type;
+ bool has_fl_mem;
};
struct ecore_conn_type_cfg {
- u32 cid_count;
- u32 cids_per_vf;
- struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
+ u32 cid_count;
+ u32 cids_per_vf;
+ struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
};
/* ILT Client configuration,
@@ -240,7 +280,7 @@ struct ilt_cfg_pair {
};
struct ecore_ilt_cli_blk {
- u32 total_size; /* 0 means not active */
+ u32 total_size; /* 0 means not active */
u32 real_size_in_page;
u32 start_line;
u32 dynamic_line_offset;
@@ -248,29 +288,29 @@ struct ecore_ilt_cli_blk {
};
struct ecore_ilt_client_cfg {
- bool active;
+ bool active;
/* ILT boundaries */
- struct ilt_cfg_pair first;
- struct ilt_cfg_pair last;
- struct ilt_cfg_pair p_size;
+ struct ilt_cfg_pair first;
+ struct ilt_cfg_pair last;
+ struct ilt_cfg_pair p_size;
/* ILT client blocks for PF */
- struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
- u32 pf_total_lines;
+ struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+ u32 pf_total_lines;
/* ILT client blocks for VFs */
- struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
- u32 vf_total_lines;
+ struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+ u32 vf_total_lines;
};
#define MAP_WORD_SIZE sizeof(unsigned long)
#define BITS_PER_MAP_WORD (MAP_WORD_SIZE * 8)
struct ecore_cid_acquired_map {
- u32 start_cid;
- u32 max_count;
- u32 *cid_map;
+ u32 start_cid;
+ u32 max_count;
+ u32 *cid_map; /* @DPDK */
};
struct ecore_src_t2 {
@@ -281,7 +321,7 @@ struct ecore_src_t2 {
};
struct ecore_cxt_mngr {
- /* Per protocol configuration */
+ /* Per protocl configuration */
struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
/* computed ILT structure */
@@ -300,15 +340,14 @@ struct ecore_cxt_mngr {
struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
struct ecore_cid_acquired_map *acquired_vf[MAX_CONN_TYPES];
- /* ILT shadow table */
+ /* ILT shadow table */
struct phys_mem_desc *ilt_shadow;
u32 ilt_shadow_size;
u32 pf_start_line;
/* Mutex for a dynamic ILT allocation */
- osal_mutex_t mutex;
+ osal_mutex_t mutex;
- /* SRC T2 */
struct ecore_src_t2 src_t2;
/* The infrastructure originally was very generic and context/task
@@ -317,19 +356,24 @@ struct ecore_cxt_mngr {
* needing for a given block we'd iterate over all the relevant
* connection-types.
* But since then we've had some additional resources, some of which
- * require memory which is independent of the general context/task
+ * require memory which is indepent of the general context/task
* scheme. We add those here explicitly per-feature.
*/
/* total number of SRQ's for this hwfn */
u32 srq_count;
+ u32 xrc_srq_count;
+ u32 vfs_srq_count;
/* Maximal number of L2 steering filters */
u32 arfs_count;
/* TODO - VF arfs filters ? */
- u8 task_type_id;
+ u16 iscsi_task_pages;
+ u16 fcoe_task_pages;
+ u16 roce_task_pages;
+ u16 eth_task_pages;
u16 task_ctx_size;
u16 conn_ctx_size;
};
@@ -338,4 +382,6 @@ u16 ecore_get_cdut_num_pf_init_pages(struct ecore_hwfn *p_hwfn);
u16 ecore_get_cdut_num_vf_init_pages(struct ecore_hwfn *p_hwfn);
u16 ecore_get_cdut_num_pf_work_pages(struct ecore_hwfn *p_hwfn);
u16 ecore_get_cdut_num_vf_work_pages(struct ecore_hwfn *p_hwfn);
+
+void ecore_tm_clear_vf_ilt(struct ecore_hwfn *p_hwfn, u16 vf_idx);
#endif /* _ECORE_CID_ */
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef __ECORE_CXT_API_H__
#define __ECORE_CXT_API_H__
@@ -24,15 +24,26 @@ struct ecore_tid_mem {
};
/**
-* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
-*
-*
-* @param p_hwfn
-* @param p_info in/out
-*
-* @return enum _ecore_status_t
-*/
+ * @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
+ *
+ *
+ * @param p_hwfn
+ * @param p_info in/out
+ *
+ * @return enum _ecore_status_t
+ */
enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
struct ecore_cxt_info *p_info);
+/**
+ * @brief ecore_cxt_get_tid_mem_info
+ *
+ * @param p_hwfn
+ * @param p_info
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_tid_mem *p_info);
+
#endif
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_sp_commands.h"
@@ -15,6 +15,10 @@
#define ECORE_DCBX_MAX_MIB_READ_TRY (100)
#define ECORE_ETH_TYPE_DEFAULT (0)
+#define ECORE_ETH_TYPE_ROCE (0x8915)
+#define ECORE_UDP_PORT_TYPE_ROCE_V2 (0x12B7)
+#define ECORE_ETH_TYPE_FCOE (0x8906)
+#define ECORE_TCP_PORT_ISCSI (0xCBC)
#define ECORE_DCBX_INVALID_PRIORITY 0xFF
@@ -22,7 +26,7 @@
* the traffic class corresponding to the priority.
*/
#define ECORE_DCBX_PRIO2TC(prio_tc_tbl, prio) \
- ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
+ ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0xF)
static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap)
{
@@ -70,6 +74,56 @@ static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
return !!(ethtype && (proto_id == ECORE_ETH_TYPE_DEFAULT));
}
+static bool ecore_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool port;
+
+ if (ieee)
+ port = ecore_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_TCP_PORT);
+ else
+ port = ecore_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == ECORE_TCP_PORT_ISCSI));
+}
+
+static bool ecore_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool ethtype;
+
+ if (ieee)
+ ethtype = ecore_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = ecore_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == ECORE_ETH_TYPE_FCOE));
+}
+
+static bool ecore_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool ethtype;
+
+ if (ieee)
+ ethtype = ecore_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = ecore_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == ECORE_ETH_TYPE_ROCE));
+}
+
+static bool ecore_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool port;
+
+ if (ieee)
+ port = ecore_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_UDP_PORT);
+ else
+ port = ecore_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == ECORE_UDP_PORT_TYPE_ROCE_V2));
+}
+
static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
u16 proto_id, bool ieee)
{
@@ -92,7 +146,7 @@ ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_results *p_data)
{
enum dcbx_protocol_type id;
- int i;
+ u32 i;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "DCBX negotiated: %d\n",
p_data->dcbx_enabled);
@@ -101,10 +155,8 @@ ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
id = ecore_dcbx_app_update[i].id;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "%s info: update %d, enable %d, prio %d, tc %d,"
- " num_active_tc %d dscp_enable = %d dscp_val = %d\n",
- ecore_dcbx_app_update[i].name,
- p_data->arr[id].update,
+ "%s info: update %d, enable %d, prio %d, tc %d, num_active_tc %d dscp_enable = %d dscp_val = %d\n",
+ ecore_dcbx_app_update[i].name, p_data->arr[id].update,
p_data->arr[id].enable, p_data->arr[id].priority,
p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc,
p_data->arr[id].dscp_enable,
@@ -130,7 +182,7 @@ u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri)
static void
ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- bool enable, u8 prio, u8 tc,
+ bool app_tlv, bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type,
enum ecore_pci_personality personality)
{
@@ -143,21 +195,21 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
p_data->arr[type].dscp_enable = false;
p_data->arr[type].dscp_val = 0;
} else {
- p_data->arr[type].dscp_enable = true;
+ p_data->arr[type].dscp_enable = enable;
}
+
p_data->arr[type].update = UPDATE_DCB_DSCP;
- /* Do not add valn tag 0 when DCB is enabled and port is in UFP mode */
- if (OSAL_GET_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
+ if (OSAL_TEST_BIT(ECORE_MF_DONT_ADD_VLAN0_TAG, &p_hwfn->p_dev->mf_bits))
p_data->arr[type].dont_add_vlan0 = true;
/* QM reconf data */
- if (p_hwfn->hw_info.personality == personality)
- p_hwfn->hw_info.offload_tc = tc;
+ if (app_tlv && p_hwfn->hw_info.personality == personality)
+ ecore_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
/* Configure dcbx vlan priority in doorbell block for roce EDPM */
- if (OSAL_GET_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits) &&
- type == DCBX_PROTOCOL_ROCE) {
+ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits) &&
+ (type == DCBX_PROTOCOL_ROCE)) {
ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1);
}
@@ -167,12 +219,12 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
static void
ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- bool enable, u8 prio, u8 tc,
+ bool app_tlv, bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type)
{
enum ecore_pci_personality personality;
enum dcbx_protocol_type id;
- int i;
+ u32 i;
for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
id = ecore_dcbx_app_update[i].id;
@@ -182,7 +234,7 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
personality = ecore_dcbx_app_update[i].personality;
- ecore_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
+ ecore_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable,
prio, tc, type, personality);
}
}
@@ -221,12 +273,22 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
u32 app_prio_bitmap, u16 id,
enum dcbx_protocol_type *type, bool ieee)
{
- if (ecore_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
+ if (ecore_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_FCOE;
+ } else if (ecore_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_ROCE;
+ } else if (ecore_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_ISCSI;
+ } else if (ecore_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ETH;
+ } else if (ecore_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_ROCE_V2;
+ } else if (ecore_dcbx_iwarp_tlv(p_hwfn, app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_IWARP;
} else {
*type = DCBX_MAX_PROTOCOL_TYPE;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "No action required, App TLV entry = 0x%x\n",
+ "No action required, App TLV entry = 0x%x\n",
app_prio_bitmap);
return false;
}
@@ -287,13 +349,13 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enable = true;
}
- ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt,
+ ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true,
enable, priority, tc, type);
}
}
/* If Eth TLV is not detected, use UFP TC as default TC */
- if (OSAL_GET_BIT(ECORE_MF_UFP_SPECIFIC,
+ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC,
&p_hwfn->p_dev->mf_bits) && !eth_tlv)
p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc;
@@ -310,9 +372,9 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
continue;
/* if no app tlv was present, don't override in FW */
- ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt,
- p_data->arr[DCBX_PROTOCOL_ETH].enable,
- priority, tc, type);
+ ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false,
+ p_data->arr[DCBX_PROTOCOL_ETH].enable,
+ priority, tc, type);
}
return ECORE_SUCCESS;
@@ -399,16 +461,14 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
read_count++;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "mib type = %d, try count = %d prefix seq num ="
- " %d suffix seq num = %d\n",
+ "mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
type, read_count, prefix_seq_num, suffix_seq_num);
} while ((prefix_seq_num != suffix_seq_num) &&
(read_count < ECORE_DCBX_MAX_MIB_READ_TRY));
if (read_count >= ECORE_DCBX_MAX_MIB_READ_TRY) {
DP_ERR(p_hwfn,
- "MIB read err, mib type = %d, try count ="
- " %d prefix seq num = %d suffix seq num = %d\n",
+ "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
type, read_count, prefix_seq_num, suffix_seq_num);
rc = ECORE_IO;
}
@@ -423,12 +483,36 @@ ecore_dcbx_get_priority_info(struct ecore_hwfn *p_hwfn,
{
u8 val;
+ p_prio->roce = ECORE_DCBX_INVALID_PRIORITY;
+ p_prio->roce_v2 = ECORE_DCBX_INVALID_PRIORITY;
+ p_prio->iscsi = ECORE_DCBX_INVALID_PRIORITY;
+ p_prio->fcoe = ECORE_DCBX_INVALID_PRIORITY;
+
+ if (p_results->arr[DCBX_PROTOCOL_ROCE].update &&
+ p_results->arr[DCBX_PROTOCOL_ROCE].enable)
+ p_prio->roce = p_results->arr[DCBX_PROTOCOL_ROCE].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_ROCE_V2].update &&
+ p_results->arr[DCBX_PROTOCOL_ROCE_V2].enable) {
+ val = p_results->arr[DCBX_PROTOCOL_ROCE_V2].priority;
+ p_prio->roce_v2 = val;
+ }
+
+ if (p_results->arr[DCBX_PROTOCOL_ISCSI].update &&
+ p_results->arr[DCBX_PROTOCOL_ISCSI].enable)
+ p_prio->iscsi = p_results->arr[DCBX_PROTOCOL_ISCSI].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_FCOE].update &&
+ p_results->arr[DCBX_PROTOCOL_FCOE].enable)
+ p_prio->fcoe = p_results->arr[DCBX_PROTOCOL_FCOE].priority;
+
if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
p_results->arr[DCBX_PROTOCOL_ETH].enable)
p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "Priorities: eth %d\n",
+ "Priorities: iscsi %d, roce %d, roce v2 %d, fcoe %d, eth %d\n",
+ p_prio->iscsi, p_prio->roce, p_prio->roce_v2, p_prio->fcoe,
p_prio->eth);
}
@@ -474,8 +558,7 @@ ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
entry->sf_ieee = ECORE_DCBX_SF_IEEE_UDP_PORT;
break;
case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
- entry->sf_ieee =
- ECORE_DCBX_SF_IEEE_TCP_UDP_PORT;
+ entry->sf_ieee = ECORE_DCBX_SF_IEEE_TCP_UDP_PORT;
break;
}
} else {
@@ -506,6 +589,7 @@ ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
p_params->pfc.willing = GET_MFW_FIELD(pfc, DCBX_PFC_WILLING);
p_params->pfc.max_tc = GET_MFW_FIELD(pfc, DCBX_PFC_CAPS);
+ p_params->pfc.mbc = GET_MFW_FIELD(pfc, DCBX_PFC_MBC);
p_params->pfc.enabled = GET_MFW_FIELD(pfc, DCBX_PFC_ENABLED);
pfc_map = GET_MFW_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
@@ -518,9 +602,9 @@ ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n",
+ "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d mbc = %d\n",
p_params->pfc.willing, pfc_map, p_params->pfc.max_tc,
- p_params->pfc.enabled);
+ p_params->pfc.enabled, p_params->pfc.mbc);
}
static void
@@ -540,7 +624,12 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
p_params->ets_willing, p_params->ets_enabled,
p_params->ets_cbs, p_ets->pri_tc_tbl[0],
p_params->max_ets_tc);
-
+ if (p_params->ets_enabled && !p_params->max_ets_tc) {
+ p_params->max_ets_tc = ECORE_MAX_PFC_PRIORITIES;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "ETS params: max_ets_tc is forced to %d\n",
+ p_params->max_ets_tc);
+ }
/* 8 bit tsa and bw data corresponding to each of the 8 TC's are
* encoded in a type u32 array of size 2.
*/
@@ -600,8 +689,8 @@ ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
params->remote.valid = true;
}
-static void ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
- struct ecore_dcbx_get *params)
+static void ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
{
struct ecore_dcbx_dscp_params *p_dscp;
struct dcb_dscp_map *p_dscp_map;
@@ -616,7 +705,7 @@ static void ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
* where each entry holds the 4bit priority map for 8 dscp entries.
*/
for (i = 0, entry = 0; i < ECORE_DCBX_DSCP_SIZE / 8; i++) {
- pri_map = OSAL_BE32_TO_CPU(p_dscp_map->dscp_pri_map[i]);
+ pri_map = p_dscp_map->dscp_pri_map[i];
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "elem %d pri_map 0x%x\n",
entry, pri_map);
for (j = 0; j < ECORE_DCBX_DSCP_SIZE / 8; j++, entry++)
@@ -785,7 +874,7 @@ ecore_dcbx_read_operational_mib(struct ecore_hwfn *p_hwfn,
OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port, operational_dcbx_mib);
+ offsetof(struct public_port, operational_dcbx_mib);
data.mib = &p_hwfn->p_dcbx_info->operational;
data.size = sizeof(struct dcbx_mib);
rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
@@ -803,7 +892,7 @@ ecore_dcbx_read_remote_mib(struct ecore_hwfn *p_hwfn,
OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port, remote_dcbx_mib);
+ offsetof(struct public_port, remote_dcbx_mib);
data.mib = &p_hwfn->p_dcbx_info->remote;
data.size = sizeof(struct dcbx_mib);
rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
@@ -819,7 +908,7 @@ ecore_dcbx_read_local_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port, local_admin_dcbx_mib);
+ offsetof(struct public_port, local_admin_dcbx_mib);
data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
data.size = sizeof(struct dcbx_local_params);
ecore_memcpy_from(p_hwfn, p_ptt, data.local_admin,
@@ -867,6 +956,61 @@ static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
}
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_dscp_map_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ bool b_en)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u8 ppfid, abs_ppfid, pfid;
+ u32 addr, val;
+ u16 fid;
+ enum _ecore_status_t rc;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_DSCP_TO_TC_MAP, &p_dev->mf_bits))
+ return ECORE_INVAL;
+
+ if (ECORE_IS_E4(p_hwfn->p_dev)) {
+ addr = NIG_REG_DSCP_TO_TC_MAP_ENABLE_BB_K2;
+ val = b_en ? 0x1 : 0x0;
+ } else { /* E5 */
+ addr = NIG_REG_LLH_TC_CLS_DSCP_MODE_E5;
+ val = b_en ? 0x2 /* L2-PRI if exists, else L3-DSCP */
+ : 0x0; /* L2-PRI only */
+ }
+
+ if (!ECORE_IS_AH(p_dev))
+ return ecore_all_ppfids_wr(p_hwfn, p_ptt, addr, val);
+
+ /* Workaround for a HW bug in E4 (only AH is affected):
+ * Instead of writing to "NIG_REG_DSCP_TO_TC_MAP_ENABLE[ppfid]", write
+ * to "NIG_REG_DSCP_TO_TC_MAP_ENABLE[n]", where "n" is the "pfid" which
+ * is read from "NIG_REG_LLH_PPFID2PFID_TBL[ppfid]".
+ */
+ for (ppfid = 0; ppfid < ecore_llh_get_num_ppfid(p_dev); ppfid++) {
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Cannot just take "rel_pf_id" since a ppfid could have been
+ * loaned to another pf (e.g. RDMA bonding).
+ */
+ pfid = (u8)ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_PPFID2PFID_TBL_0 +
+ abs_ppfid * 0x4);
+
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, pfid);
+ ecore_fid_pretend(p_hwfn, p_ptt, fid);
+
+ ecore_wr(p_hwfn, p_ptt, addr, val);
+
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+ p_hwfn->rel_pf_id);
+ ecore_fid_pretend(p_hwfn, p_ptt, fid);
+ }
+
return ECORE_SUCCESS;
}
@@ -893,7 +1037,7 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
/* reconfigure tcs of QM queues according
* to negotiation results
*/
- ecore_qm_reconf(p_hwfn, p_ptt);
+ ecore_qm_reconf_intr(p_hwfn, p_ptt);
/* update storm FW with negotiation results */
ecore_sp_pf_update_dcbx(p_hwfn);
@@ -902,20 +1046,33 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
ecore_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
- /* Update the DSCP to TC mapping enable bit if required */
- if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
- p_hwfn->p_dcbx_info->dscp_nig_update) {
- u8 val = !!p_hwfn->p_dcbx_info->get.dscp.enabled;
- u32 addr = NIG_REG_DSCP_TO_TC_MAP_ENABLE_BB_K2;
+ if (type == ECORE_DCBX_OPERATIONAL_MIB) {
+ struct ecore_dcbx_results *p_data;
+ u16 val;
+
+ /* Enable/disable the DSCP to TC mapping if required */
+ if (p_hwfn->p_dcbx_info->dscp_nig_update) {
+ bool b_en = p_hwfn->p_dcbx_info->get.dscp.enabled;
+
+ rc = ecore_dcbx_dscp_map_enable(p_hwfn, p_ptt, b_en);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to update the DSCP to TC mapping enable bit\n");
+ return rc;
+ }
- rc = ecore_all_ppfids_wr(p_hwfn, p_ptt, addr, val);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, false,
- "Failed to update the DSCP to TC mapping enable bit\n");
- return rc;
+ p_hwfn->p_dcbx_info->dscp_nig_update = false;
}
- p_hwfn->p_dcbx_info->dscp_nig_update = false;
+ /* Configure in NIG which protocols support EDPM and should
+ * honor PFC.
+ */
+ p_data = &p_hwfn->p_dcbx_info->results;
+ val = (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE].tc) |
+ (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE_V2].tc);
+ val <<= NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT;
+ val |= NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_EDPM_CTRL, val);
}
OSAL_DCBX_AEN(p_hwfn, type);
@@ -925,6 +1082,15 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
{
+#ifndef __EXTRACT__LINUX__
+ OSAL_BUILD_BUG_ON(ECORE_LLDP_CHASSIS_ID_STAT_LEN !=
+ LLDP_CHASSIS_ID_STAT_LEN);
+ OSAL_BUILD_BUG_ON(ECORE_LLDP_PORT_ID_STAT_LEN !=
+ LLDP_PORT_ID_STAT_LEN);
+ OSAL_BUILD_BUG_ON(ECORE_DCBX_MAX_APP_PROTOCOL !=
+ DCBX_MAX_APP_PROTOCOL);
+#endif
+
p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(*p_hwfn->p_dcbx_info));
if (!p_hwfn->p_dcbx_info) {
@@ -942,6 +1108,7 @@ enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn)
{
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_dcbx_info);
+ p_hwfn->p_dcbx_info = OSAL_NULL;
}
static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
@@ -963,17 +1130,62 @@ void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct protocol_dcb_data *p_dcb_data;
u8 update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
+ p_dest->update_fcoe_dcb_data_mode = update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update;
+ p_dest->update_roce_dcb_data_mode = update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update;
+ p_dest->update_rroce_dcb_data_mode = update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update;
+ p_dest->update_iscsi_dcb_data_mode = update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
p_dest->update_eth_dcb_data_mode = update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_IWARP].update;
p_dest->update_iwarp_dcb_data_mode = update_flag;
+ p_dcb_data = &p_dest->fcoe_dcb_data;
+ ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
+ p_dcb_data = &p_dest->roce_dcb_data;
+ ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE);
+ p_dcb_data = &p_dest->rroce_dcb_data;
+ ecore_dcbx_update_protocol_data(p_dcb_data, p_src,
+ DCBX_PROTOCOL_ROCE_V2);
+ p_dcb_data = &p_dest->iscsi_dcb_data;
+ ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI);
p_dcb_data = &p_dest->eth_dcb_data;
ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
p_dcb_data = &p_dest->iwarp_dcb_data;
ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_IWARP);
}
+bool ecore_dcbx_get_dscp_state(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_dcbx_get *p_dcbx_info = &p_hwfn->p_dcbx_info->get;
+
+ return p_dcbx_info->dscp.enabled;
+}
+
+u8 ecore_dcbx_get_priority_tc(struct ecore_hwfn *p_hwfn, u8 pri)
+{
+ struct ecore_dcbx_get *dcbx_info = &p_hwfn->p_dcbx_info->get;
+
+ if (pri >= ECORE_MAX_PFC_PRIORITIES) {
+ DP_ERR(p_hwfn, "Invalid priority %d\n", pri);
+ return ECORE_DCBX_DEFAULT_TC;
+ }
+
+ if (!dcbx_info->operational.valid) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Dcbx parameters not available\n");
+ return ECORE_DCBX_DEFAULT_TC;
+ }
+
+ return dcbx_info->operational.params.ets_pri_tc_tbl[pri];
+}
+
enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *p_get,
enum ecore_mib_read_type type)
@@ -981,6 +1193,11 @@ enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt;
enum _ecore_status_t rc;
+#ifndef ASIC_ONLY
+ if (!ecore_mcp_is_init(p_hwfn))
+ return ECORE_INVAL;
+#endif
+
if (IS_VF(p_hwfn->p_dev))
return ECORE_INVAL;
@@ -1008,24 +1225,16 @@ ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
u8 pfc_map = 0;
int i;
- if (p_params->pfc.willing)
- *pfc |= DCBX_PFC_WILLING_MASK;
- else
- *pfc &= ~DCBX_PFC_WILLING_MASK;
-
- if (p_params->pfc.enabled)
- *pfc |= DCBX_PFC_ENABLED_MASK;
- else
- *pfc &= ~DCBX_PFC_ENABLED_MASK;
-
- *pfc &= ~DCBX_PFC_CAPS_MASK;
- *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_OFFSET;
+ SET_MFW_FIELD(*pfc, DCBX_PFC_ERROR, 0);
+ SET_MFW_FIELD(*pfc, DCBX_PFC_WILLING, p_params->pfc.willing ? 1 : 0);
+ SET_MFW_FIELD(*pfc, DCBX_PFC_ENABLED, p_params->pfc.enabled ? 1 : 0);
+ SET_MFW_FIELD(*pfc, DCBX_PFC_CAPS, (u32)p_params->pfc.max_tc);
+ SET_MFW_FIELD(*pfc, DCBX_PFC_MBC, p_params->pfc.mbc ? 1 : 0);
for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
if (p_params->pfc.prio[i])
pfc_map |= (1 << i);
- *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
- *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_OFFSET);
+ SET_MFW_FIELD(*pfc, DCBX_PFC_PRI_EN_BITMAP, pfc_map);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "pfc = 0x%x\n", *pfc);
}
@@ -1039,23 +1248,14 @@ ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
u32 val;
int i;
- if (p_params->ets_willing)
- p_ets->flags |= DCBX_ETS_WILLING_MASK;
- else
- p_ets->flags &= ~DCBX_ETS_WILLING_MASK;
-
- if (p_params->ets_cbs)
- p_ets->flags |= DCBX_ETS_CBS_MASK;
- else
- p_ets->flags &= ~DCBX_ETS_CBS_MASK;
-
- if (p_params->ets_enabled)
- p_ets->flags |= DCBX_ETS_ENABLED_MASK;
- else
- p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
-
- p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
- p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_OFFSET;
+ SET_MFW_FIELD(p_ets->flags, DCBX_ETS_WILLING,
+ p_params->ets_willing ? 1 : 0);
+ SET_MFW_FIELD(p_ets->flags, DCBX_ETS_CBS,
+ p_params->ets_cbs ? 1 : 0);
+ SET_MFW_FIELD(p_ets->flags, DCBX_ETS_ENABLED,
+ p_params->ets_enabled ? 1 : 0);
+ SET_MFW_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS,
+ (u32)p_params->max_ets_tc);
bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
@@ -1089,66 +1289,55 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
u32 *entry;
int i;
- if (p_params->app_willing)
- p_app->flags |= DCBX_APP_WILLING_MASK;
- else
- p_app->flags &= ~DCBX_APP_WILLING_MASK;
-
- if (p_params->app_valid)
- p_app->flags |= DCBX_APP_ENABLED_MASK;
- else
- p_app->flags &= ~DCBX_APP_ENABLED_MASK;
-
- p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
- p_app->flags |= (u32)p_params->num_app_entries <<
- DCBX_APP_NUM_ENTRIES_OFFSET;
+ SET_MFW_FIELD(p_app->flags, DCBX_APP_WILLING,
+ p_params->app_willing ? 1 : 0);
+ SET_MFW_FIELD(p_app->flags, DCBX_APP_ENABLED,
+ p_params->app_valid ? 1 : 0);
+ SET_MFW_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES,
+ (u32)p_params->num_app_entries);
for (i = 0; i < p_params->num_app_entries; i++) {
entry = &p_app->app_pri_tbl[i].entry;
*entry = 0;
if (ieee) {
- *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
+ SET_MFW_FIELD(*entry, DCBX_APP_SF_IEEE, 0);
+ SET_MFW_FIELD(*entry, DCBX_APP_SF, 0);
switch (p_params->app_entry[i].sf_ieee) {
case ECORE_DCBX_SF_IEEE_ETHTYPE:
- *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
- DCBX_APP_SF_IEEE_OFFSET);
- *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
- DCBX_APP_SF_OFFSET);
+ SET_MFW_FIELD(*entry, DCBX_APP_SF_IEEE,
+ (u32)DCBX_APP_SF_IEEE_ETHTYPE);
+ SET_MFW_FIELD(*entry, DCBX_APP_SF,
+ (u32)DCBX_APP_SF_ETHTYPE);
break;
case ECORE_DCBX_SF_IEEE_TCP_PORT:
- *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
- DCBX_APP_SF_IEEE_OFFSET);
- *entry |= ((u32)DCBX_APP_SF_PORT <<
- DCBX_APP_SF_OFFSET);
+ SET_MFW_FIELD(*entry, DCBX_APP_SF_IEEE,
+ (u32)DCBX_APP_SF_IEEE_TCP_PORT);
+ SET_MFW_FIELD(*entry, DCBX_APP_SF,
+ (u32)DCBX_APP_SF_PORT);
break;
case ECORE_DCBX_SF_IEEE_UDP_PORT:
- *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
- DCBX_APP_SF_IEEE_OFFSET);
- *entry |= ((u32)DCBX_APP_SF_PORT <<
- DCBX_APP_SF_OFFSET);
+ SET_MFW_FIELD(*entry, DCBX_APP_SF_IEEE,
+ (u32)DCBX_APP_SF_IEEE_UDP_PORT);
+ SET_MFW_FIELD(*entry, DCBX_APP_SF,
+ (u32)DCBX_APP_SF_PORT);
break;
case ECORE_DCBX_SF_IEEE_TCP_UDP_PORT:
- *entry |= (u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
- DCBX_APP_SF_IEEE_OFFSET;
- *entry |= ((u32)DCBX_APP_SF_PORT <<
- DCBX_APP_SF_OFFSET);
+ SET_MFW_FIELD(*entry, DCBX_APP_SF_IEEE,
+ (u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT);
+ SET_MFW_FIELD(*entry, DCBX_APP_SF,
+ (u32)DCBX_APP_SF_PORT);
break;
}
} else {
- *entry &= ~DCBX_APP_SF_MASK;
- if (p_params->app_entry[i].ethtype)
- *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
- DCBX_APP_SF_OFFSET);
- else
- *entry |= ((u32)DCBX_APP_SF_PORT <<
- DCBX_APP_SF_OFFSET);
+ SET_MFW_FIELD(*entry, DCBX_APP_SF,
+ p_params->app_entry[i].ethtype ?
+ (u32)DCBX_APP_SF_ETHTYPE :
+ (u32)DCBX_APP_SF_PORT);
}
- *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
- *entry |= ((u32)p_params->app_entry[i].proto_id <<
- DCBX_APP_PROTOCOL_ID_OFFSET);
- *entry &= ~DCBX_APP_PRI_MAP_MASK;
- *entry |= ((u32)(p_params->app_entry[i].prio) <<
- DCBX_APP_PRI_MAP_OFFSET);
+ SET_MFW_FIELD(*entry, DCBX_APP_PROTOCOL_ID,
+ (u32)p_params->app_entry[i].proto_id);
+ SET_MFW_FIELD(*entry, DCBX_APP_PRI_MAP,
+ (u32)(1 << p_params->app_entry[i].prio));
}
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_app->flags);
@@ -1200,9 +1389,8 @@ ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
OSAL_MEMCPY(p_dscp_map, &p_hwfn->p_dcbx_info->dscp_map,
sizeof(*p_dscp_map));
- p_dscp_map->flags &= ~DCB_DSCP_ENABLE_MASK;
- if (p_params->dscp.enabled)
- p_dscp_map->flags |= DCB_DSCP_ENABLE_MASK;
+ SET_MFW_FIELD(p_dscp_map->flags, DCB_DSCP_ENABLE,
+ p_params->dscp.enabled ? 1 : 0);
for (i = 0, entry = 0; i < 8; i++) {
val = 0;
@@ -1210,7 +1398,7 @@ ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
val |= (((u32)p_params->dscp.dscp_pri_map[entry]) <<
(j * 4));
- p_dscp_map->dscp_pri_map[i] = OSAL_CPU_TO_BE32(val);
+ p_dscp_map->dscp_pri_map[i] = val;
}
p_hwfn->p_dcbx_info->dscp_nig_update = true;
@@ -1231,10 +1419,10 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_set *params,
bool hw_commit)
{
+ u32 resp = 0, param = 0, drv_mb_param = 0;
struct dcbx_local_params local_admin;
struct ecore_dcbx_mib_meta_data data;
struct dcb_dscp_map dscp_map;
- u32 resp = 0, param = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
@@ -1263,8 +1451,9 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
data.size);
}
+ SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_LLDP_SEND, 1);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
- 1 << DRV_MB_PARAM_LLDP_SEND_OFFSET, &resp, ¶m);
+ drv_mb_param, &resp, ¶m);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false,
"Failed to send DCBX update request\n");
@@ -1276,7 +1465,7 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_set *params)
{
struct ecore_dcbx_get *dcbx_info;
- int rc;
+ enum _ecore_status_t rc;
if (p_hwfn->p_dcbx_info->set.config.valid) {
OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set,
@@ -1557,6 +1746,11 @@ ecore_dcbx_get_dscp_priority(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *p_dcbx_info;
enum _ecore_status_t rc;
+ if (IS_VF(p_hwfn->p_dev)) {
+ DP_ERR(p_hwfn->p_dev, "ecore rdma get dscp priority not supported for VF.\n");
+ return ECORE_INVAL;
+ }
+
if (dscp_index >= ECORE_DCBX_DSCP_SIZE) {
DP_ERR(p_hwfn, "Invalid dscp index %d\n", dscp_index);
return ECORE_INVAL;
@@ -1588,6 +1782,11 @@ ecore_dcbx_set_dscp_priority(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_dcbx_set dcbx_set;
enum _ecore_status_t rc;
+ if (IS_VF(p_hwfn->p_dev)) {
+ DP_ERR(p_hwfn->p_dev, "ecore rdma set dscp priority not supported for VF.\n");
+ return ECORE_INVAL;
+ }
+
if (dscp_index >= ECORE_DCBX_DSCP_SIZE ||
pri_val >= ECORE_MAX_PFC_PRIORITIES) {
DP_ERR(p_hwfn, "Invalid dscp params: index = %d pri = %d\n",
@@ -1605,3 +1804,58 @@ ecore_dcbx_set_dscp_priority(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return ecore_dcbx_config_params(p_hwfn, p_ptt, &dcbx_set, 1);
}
+
+enum _ecore_status_t
+ecore_lldp_get_stats(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_stats *p_params)
+{
+ u32 mcp_resp = 0, mcp_param = 0, drv_mb_param = 0, addr, val;
+ struct lldp_stats_stc lldp_stats;
+ enum _ecore_status_t rc;
+
+ switch (p_params->agent) {
+ case ECORE_LLDP_NEAREST_BRIDGE:
+ val = LLDP_NEAREST_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
+ val = LLDP_NEAREST_NON_TPMR_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
+ val = LLDP_NEAREST_CUSTOMER_BRIDGE;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid agent type %d\n", p_params->agent);
+ return ECORE_INVAL;
+ }
+
+ SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_LLDP_STATS_AGENT, val);
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_LLDP_STATS,
+ drv_mb_param, &mcp_resp, &mcp_param);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "GET_LLDP_STATS failed, error = %d\n", rc);
+ return rc;
+ }
+
+ addr = p_hwfn->mcp_info->drv_mb_addr +
+ OFFSETOF(struct public_drv_mb, union_data);
+
+ ecore_memcpy_from(p_hwfn, p_ptt, &lldp_stats, addr, sizeof(lldp_stats));
+
+ p_params->tx_frames = lldp_stats.tx_frames_total;
+ p_params->rx_frames = lldp_stats.rx_frames_total;
+ p_params->rx_discards = lldp_stats.rx_frames_discarded;
+ p_params->rx_age_outs = lldp_stats.rx_age_outs;
+
+ return ECORE_SUCCESS;
+}
+
+bool ecore_dcbx_is_enabled(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_dcbx_operational_params *op_params =
+ &p_hwfn->p_dcbx_info->get.operational;
+
+ if (op_params->valid && op_params->enabled)
+ return true;
+
+ return false;
+}
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef __ECORE_DCBX_H__
#define __ECORE_DCBX_H__
@@ -45,18 +45,26 @@ struct ecore_dcbx_mib_meta_data {
/* ECORE local interface routines */
enum _ecore_status_t
-ecore_dcbx_mib_update_event(struct ecore_hwfn *, struct ecore_ptt *,
- enum ecore_mib_read_type);
+ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type);
enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn);
void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn);
void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
+#define ECORE_DCBX_DEFAULT_TC 0
+
+u8 ecore_dcbx_get_priority_tc(struct ecore_hwfn *p_hwfn, u8 pri);
+
+bool ecore_dcbx_get_dscp_state(struct ecore_hwfn *p_hwfn);
+
/* Returns TOS value for a given priority */
u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri);
enum _ecore_status_t
ecore_lldp_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+bool ecore_dcbx_is_enabled(struct ecore_hwfn *p_hwfn);
+
#endif /* __ECORE_DCBX_H__ */
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef __ECORE_DCBX_API_H__
#define __ECORE_DCBX_API_H__
@@ -30,7 +30,7 @@ struct ecore_dcbx_app_data {
bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */
};
-#ifndef __EXTRACT__LINUX__
+#ifndef __EXTRACT__LINUX__IF__
enum dcbx_protocol_type {
DCBX_PROTOCOL_ISCSI,
DCBX_PROTOCOL_FCOE,
@@ -72,6 +72,7 @@ struct ecore_dcbx_app_prio {
struct ecore_dbcx_pfc_params {
bool willing;
bool enabled;
+ bool mbc;
u8 prio[ECORE_MAX_PFC_PRIORITIES];
u8 max_tc;
};
@@ -86,7 +87,6 @@ enum ecore_dcbx_sf_ieee_type {
struct ecore_app_entry {
bool ethtype;
enum ecore_dcbx_sf_ieee_type sf_ieee;
- bool enabled;
u8 prio;
u16 proto_id;
enum dcbx_protocol_type proto_type;
@@ -199,17 +199,26 @@ struct ecore_lldp_sys_tlvs {
u16 buf_size;
};
-enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *,
- struct ecore_dcbx_get *,
- enum ecore_mib_read_type);
+struct ecore_lldp_stats {
+ enum ecore_lldp_agent agent;
+ u32 tx_frames;
+ u32 rx_frames;
+ u32 rx_discards;
+ u32 rx_age_outs;
+};
+
+enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *p_get,
+ enum ecore_mib_read_type type);
-enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *,
- struct ecore_dcbx_set *);
+enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_set
+ *params);
-enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *,
- struct ecore_ptt *,
- struct ecore_dcbx_set *,
- bool);
+enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_set *params,
+ bool hw_commit);
enum _ecore_status_t ecore_lldp_register_tlv(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -238,6 +247,11 @@ enum _ecore_status_t
ecore_dcbx_set_dscp_priority(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u8 dscp_index, u8 pri_val);
+enum _ecore_status_t
+ecore_lldp_get_stats(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_stats *p_params);
+
+#ifndef __EXTRACT__LINUX__C__
static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
{DCBX_PROTOCOL_ISCSI, "ISCSI", ECORE_PCI_ISCSI},
{DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE},
@@ -246,5 +260,6 @@ static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
{DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH},
{DCBX_PROTOCOL_IWARP, "IWARP", ECORE_PCI_ETH_IWARP}
};
+#endif
#endif /* __ECORE_DCBX_API_H__ */
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#include "bcm_osal.h"
#include "reg_addr.h"
#include "ecore_gtt_reg_addr.h"
@@ -27,8 +27,15 @@
#include "ecore_iro.h"
#include "nvm_cfg.h"
#include "ecore_dcbx.h"
+#include <linux/pci_regs.h> /* @DPDK */
#include "ecore_l2.h"
+#ifdef _NTDDK_
+#pragma warning(push)
+#pragma warning(disable : 28167)
+#pragma warning(disable : 28123)
+#endif
+
/* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM
* registers involved are not split and thus configuration is a race where
* some of the PFs configuration might be lost.
@@ -43,6 +50,11 @@ static u32 qm_lock_ref_cnt;
static bool b_ptt_gtt_init;
#endif
+void ecore_set_ilt_page_size(struct ecore_dev *p_dev, u8 ilt_page_size)
+{
+ p_dev->ilt_page_size = ilt_page_size;
+}
+
/******************** Doorbell Recovery *******************/
/* The doorbell recovery mechanism consists of a list of entries which represent
* doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
@@ -60,10 +72,11 @@ struct ecore_db_recovery_entry {
u8 hwfn_idx;
};
+/* @DPDK */
/* display a single doorbell recovery entry */
-void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn,
- struct ecore_db_recovery_entry *db_entry,
- const char *action)
+static void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_db_recovery_entry *db_entry,
+ const char *action)
{
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
action, db_entry, db_entry->db_addr, db_entry->db_data,
@@ -72,17 +85,40 @@ void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn,
db_entry->hwfn_idx);
}
-/* doorbell address sanity (address within doorbell bar range) */
-bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr,
- void *db_data)
+/* find hwfn according to the doorbell address */
+static struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr)
{
- /* make sure doorbell address is within the doorbell bar */
- if (db_addr < p_dev->doorbells || (u8 *)db_addr >
- (u8 *)p_dev->doorbells + p_dev->db_size) {
+ struct ecore_hwfn *p_hwfn;
+
+ /* in CMT doorbell bar is split down the middle between engine 0 and enigne 1 */
+ if (ECORE_IS_CMT(p_dev))
+ p_hwfn = db_addr < p_dev->hwfns[1].doorbells ?
+ &p_dev->hwfns[0] : &p_dev->hwfns[1];
+ else
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+
+ return p_hwfn;
+}
+
+/* doorbell address sanity (address within doorbell bar range) */
+static bool ecore_db_rec_sanity(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ enum ecore_db_rec_width db_width,
+ void *db_data)
+{
+ struct ecore_hwfn *p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
+ u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64;
+
+ /* make sure doorbell address is within the doorbell bar */
+ if (db_addr < p_hwfn->doorbells ||
+ (u8 OSAL_IOMEM *)db_addr + width >
+ (u8 OSAL_IOMEM *)p_hwfn->doorbells + p_hwfn->db_size) {
OSAL_WARN(true,
"Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
- db_addr, p_dev->doorbells,
- (u8 *)p_dev->doorbells + p_dev->db_size);
+ db_addr, p_hwfn->doorbells,
+ (u8 OSAL_IOMEM *)p_hwfn->doorbells + p_hwfn->db_size);
+
return false;
}
@@ -95,24 +131,6 @@ bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr,
return true;
}
-/* find hwfn according to the doorbell address */
-struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev,
- void OSAL_IOMEM *db_addr)
-{
- struct ecore_hwfn *p_hwfn;
-
- /* In CMT doorbell bar is split down the middle between engine 0 and
- * enigne 1
- */
- if (ECORE_IS_CMT(p_dev))
- p_hwfn = db_addr < p_dev->hwfns[1].doorbells ?
- &p_dev->hwfns[0] : &p_dev->hwfns[1];
- else
- p_hwfn = ECORE_LEADING_HWFN(p_dev);
-
- return p_hwfn;
-}
-
/* add a new entry to the doorbell recovery mechanism */
enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
void OSAL_IOMEM *db_addr,
@@ -123,14 +141,8 @@ enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
struct ecore_db_recovery_entry *db_entry;
struct ecore_hwfn *p_hwfn;
- /* shortcircuit VFs, for now */
- if (IS_VF(p_dev)) {
- DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
- return ECORE_SUCCESS;
- }
-
/* sanitize doorbell address */
- if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
+ if (!ecore_db_rec_sanity(p_dev, db_addr, db_width, db_data))
return ECORE_INVAL;
/* obtain hwfn from doorbell address */
@@ -171,16 +183,6 @@ enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
enum _ecore_status_t rc = ECORE_INVAL;
struct ecore_hwfn *p_hwfn;
- /* shortcircuit VFs, for now */
- if (IS_VF(p_dev)) {
- DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
- return ECORE_SUCCESS;
- }
-
- /* sanitize doorbell address */
- if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
- return ECORE_INVAL;
-
/* obtain hwfn from doorbell address */
p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
@@ -190,12 +192,9 @@ enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
&p_hwfn->db_recovery_info.list,
list_entry,
struct ecore_db_recovery_entry) {
- /* search according to db_data addr since db_addr is not unique
- * (roce)
- */
+ /* search according to db_data addr since db_addr is not unique (roce) */
if (db_entry->db_data == db_data) {
- ecore_db_recovery_dp_entry(p_hwfn, db_entry,
- "Deleting");
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting");
OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
&p_hwfn->db_recovery_info.list);
rc = ECORE_SUCCESS;
@@ -217,40 +216,40 @@ enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
}
/* initialize the doorbell recovery mechanism */
-enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn)
+static enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn)
{
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting up db recovery\n");
/* make sure db_size was set in p_dev */
- if (!p_hwfn->p_dev->db_size) {
+ if (!p_hwfn->db_size) {
DP_ERR(p_hwfn->p_dev, "db_size not set\n");
return ECORE_INVAL;
}
OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list);
#ifdef CONFIG_ECORE_LOCK_ALLOC
- if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock))
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock,
+ "db_recov_lock"))
return ECORE_NOMEM;
#endif
OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock);
- p_hwfn->db_recovery_info.db_recovery_counter = 0;
+ p_hwfn->db_recovery_info.count = 0;
return ECORE_SUCCESS;
}
/* destroy the doorbell recovery mechanism */
-void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn)
+static void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn)
{
struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Tearing down db recovery\n");
if (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
- DP_VERBOSE(p_hwfn, false, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
while (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
- db_entry = OSAL_LIST_FIRST_ENTRY(
- &p_hwfn->db_recovery_info.list,
- struct ecore_db_recovery_entry,
- list_entry);
+ db_entry = OSAL_LIST_FIRST_ENTRY(&p_hwfn->db_recovery_info.list,
+ struct ecore_db_recovery_entry,
+ list_entry);
ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
&p_hwfn->db_recovery_info.list);
@@ -260,17 +259,34 @@ void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn)
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->db_recovery_info.lock);
#endif
- p_hwfn->db_recovery_info.db_recovery_counter = 0;
+ p_hwfn->db_recovery_info.count = 0;
}
/* print the content of the doorbell recovery mechanism */
void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn)
{
struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+ u32 dp_module;
+ u8 dp_level;
DP_NOTICE(p_hwfn, false,
- "Dispalying doorbell recovery database. Counter was %d\n",
- p_hwfn->db_recovery_info.db_recovery_counter);
+ "Displaying doorbell recovery database. Counter is %d\n",
+ p_hwfn->db_recovery_info.count);
+
+ if (IS_PF(p_hwfn->p_dev))
+ if (p_hwfn->pf_iov_info->max_db_rec_count > 0)
+ DP_NOTICE(p_hwfn, false,
+ "Max VF counter is %u (VF %u)\n",
+ p_hwfn->pf_iov_info->max_db_rec_count,
+ p_hwfn->pf_iov_info->max_db_rec_vfid);
+
+ /* Save dp_module/dp_level values and enable ECORE_MSG_SPQ verbosity
+ * to force print the db entries.
+ */
+ dp_module = p_hwfn->dp_module;
+ p_hwfn->dp_module |= ECORE_MSG_SPQ;
+ dp_level = p_hwfn->dp_level;
+ p_hwfn->dp_level = ECORE_LEVEL_VERBOSE;
/* protect the list */
OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
@@ -282,28 +298,27 @@ void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn)
}
OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+
+ /* Get back to saved dp_module/dp_level values */
+ p_hwfn->dp_module = dp_module;
+ p_hwfn->dp_level = dp_level;
}
/* ring the doorbell of a single doorbell recovery entry */
-void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
- struct ecore_db_recovery_entry *db_entry,
- enum ecore_db_rec_exec db_exec)
+static void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
+ struct ecore_db_recovery_entry *db_entry)
{
/* Print according to width */
if (db_entry->db_width == DB_REC_WIDTH_32B)
- DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %x\n",
- db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing",
- db_entry->db_addr, *(u32 *)db_entry->db_data);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "ringing doorbell address %p data %x\n",
+ db_entry->db_addr,
+ *(u32 *)db_entry->db_data);
else
- DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %lx\n",
- db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing",
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "ringing doorbell address %p data %" PRIx64 "\n",
db_entry->db_addr,
- *(unsigned long *)(db_entry->db_data));
-
- /* Sanity */
- if (!ecore_db_rec_sanity(p_hwfn->p_dev, db_entry->db_addr,
- db_entry->db_data))
- return;
+ *(u64 *)(db_entry->db_data));
/* Flush the write combined buffer. Since there are multiple doorbelling
* entities using the same address, if we don't flush, a transaction
@@ -312,14 +327,12 @@ void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
OSAL_WMB(p_hwfn->p_dev);
/* Ring the doorbell */
- if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
- if (db_entry->db_width == DB_REC_WIDTH_32B)
- DIRECT_REG_WR(p_hwfn, db_entry->db_addr,
- *(u32 *)(db_entry->db_data));
- else
- DIRECT_REG_WR64(p_hwfn, db_entry->db_addr,
- *(u64 *)(db_entry->db_data));
- }
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DIRECT_REG_WR(p_hwfn, db_entry->db_addr,
+ *(u32 *)(db_entry->db_data));
+ else
+ DIRECT_REG_WR64(p_hwfn, db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
/* Flush the write combined buffer. Next doorbell may come from a
* different entity to the same address...
@@ -328,30 +341,21 @@ void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
}
/* traverse the doorbell recovery entry list and ring all the doorbells */
-void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
- enum ecore_db_rec_exec db_exec)
+void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn)
{
struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
- if (db_exec != DB_REC_ONCE) {
- DP_NOTICE(p_hwfn, false, "Executing doorbell recovery. Counter was %d\n",
- p_hwfn->db_recovery_info.db_recovery_counter);
-
- /* track amount of times recovery was executed */
- p_hwfn->db_recovery_info.db_recovery_counter++;
- }
+ DP_NOTICE(p_hwfn, false,
+ "Executing doorbell recovery. Counter is %d\n",
+ ++p_hwfn->db_recovery_info.count);
/* protect the list */
OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
OSAL_LIST_FOR_EACH_ENTRY(db_entry,
&p_hwfn->db_recovery_info.list,
list_entry,
- struct ecore_db_recovery_entry) {
- ecore_db_recovery_ring(p_hwfn, db_entry, db_exec);
- if (db_exec == DB_REC_ONCE)
- break;
- }
-
+ struct ecore_db_recovery_entry)
+ ecore_db_recovery_ring(p_hwfn, db_entry);
OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
}
/******************** Doorbell Recovery end ****************/
@@ -364,7 +368,7 @@ enum ecore_llh_filter_type {
};
struct ecore_llh_mac_filter {
- u8 addr[ETH_ALEN];
+ u8 addr[ECORE_ETH_ALEN];
};
struct ecore_llh_protocol_filter {
@@ -661,16 +665,15 @@ ecore_llh_shadow_remove_all_filters(struct ecore_dev *p_dev, u8 ppfid)
return ECORE_SUCCESS;
}
-static enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev,
- u8 rel_ppfid, u8 *p_abs_ppfid)
+enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev, u8 rel_ppfid,
+ u8 *p_abs_ppfid)
{
struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
- u8 ppfids = p_llh_info->num_ppfid - 1;
if (rel_ppfid >= p_llh_info->num_ppfid) {
DP_NOTICE(p_dev, false,
- "rel_ppfid %d is not valid, available indices are 0..%hhd\n",
- rel_ppfid, ppfids);
+ "rel_ppfid %d is not valid, available indices are 0..%hhu\n",
+ rel_ppfid, p_llh_info->num_ppfid - 1);
return ECORE_INVAL;
}
@@ -679,6 +682,24 @@ static enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev,
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_llh_map_ppfid_to_pfid(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 ppfid, u8 pfid)
+{
+ u8 abs_ppfid;
+ u32 addr;
+ enum _ecore_status_t rc;
+
+ rc = ecore_abs_ppfid(p_hwfn->p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4;
+ ecore_wr(p_hwfn, p_ptt, addr, pfid);
+
+ return ECORE_SUCCESS;
+}
+
static enum _ecore_status_t
__ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
@@ -791,21 +812,21 @@ static enum _ecore_status_t ecore_llh_hw_init_pf(struct ecore_hwfn *p_hwfn,
bool avoid_eng_affin)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
- u8 ppfid, abs_ppfid;
+ u8 ppfid;
enum _ecore_status_t rc;
for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
- u32 addr;
-
- rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
- if (rc != ECORE_SUCCESS)
+ rc = ecore_llh_map_ppfid_to_pfid(p_hwfn, p_ptt, ppfid,
+ p_hwfn->rel_pf_id);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "Failed to map ppfid %d to pfid %d\n",
+ ppfid, p_hwfn->rel_pf_id);
return rc;
-
- addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4;
- ecore_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id);
+ }
}
- if (OSAL_GET_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
+ if (OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
!ECORE_IS_FCOE_PERSONALITY(p_hwfn)) {
rc = ecore_llh_add_mac_filter(p_dev, 0,
p_hwfn->hw_info.hw_mac_addr);
@@ -833,7 +854,10 @@ enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev)
return p_dev->l2_affin_hint ? ECORE_ENG1 : ECORE_ENG0;
}
-/* TBD - should be removed when these definitions are available in reg_addr.h */
+/* TBD -
+ * When the relevant definitions are available in reg_addr.h, the SHIFT
+ * definitions should be removed, and the MASK definitions should be revised.
+ */
#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK 0x3
#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT 0
#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK 0x3
@@ -939,7 +963,7 @@ enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev,
return rc;
}
-struct ecore_llh_filter_details {
+struct ecore_llh_filter_e4_details {
u64 value;
u32 mode;
u32 protocol_type;
@@ -948,10 +972,10 @@ struct ecore_llh_filter_details {
};
static enum _ecore_status_t
-ecore_llh_access_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx,
- struct ecore_llh_filter_details *p_details,
- bool b_write_access)
+ecore_llh_access_filter_e4(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx,
+ struct ecore_llh_filter_e4_details *p_details,
+ bool b_write_access)
{
u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
struct dmae_params params;
@@ -1035,16 +1059,16 @@ ecore_llh_access_filter(struct ecore_hwfn *p_hwfn,
}
static enum _ecore_status_t
-ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ecore_llh_add_filter_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type,
u32 high, u32 low)
{
- struct ecore_llh_filter_details filter_details;
+ struct ecore_llh_filter_e4_details filter_details;
filter_details.enable = 1;
filter_details.value = ((u64)high << 32) | low;
filter_details.hdr_sel =
- OSAL_GET_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits) ?
+ OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits) ?
1 : /* inner/encapsulated header */
0; /* outer/tunnel header */
filter_details.protocol_type = filter_prot_type;
@@ -1052,42 +1076,104 @@ ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1 : /* protocol-based classification */
0; /* MAC-address based classification */
- return ecore_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
- &filter_details,
- true /* write access */);
+ return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ &filter_details,
+ true /* write access */);
}
static enum _ecore_status_t
-ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn,
+ecore_llh_remove_filter_e4(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)
{
- struct ecore_llh_filter_details filter_details;
+ struct ecore_llh_filter_e4_details filter_details;
OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
- return ecore_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
- &filter_details,
- true /* write access */);
+ return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ &filter_details,
+ true /* write access */);
+}
+
+/* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
+ * Should be removed when the function is implemented.
+ */
+static enum _ecore_status_t
+ecore_llh_add_filter_e5(struct ecore_hwfn OSAL_UNUSED * p_hwfn,
+ struct ecore_ptt OSAL_UNUSED * p_ptt,
+ u8 OSAL_UNUSED abs_ppfid, u8 OSAL_UNUSED filter_idx,
+ u8 OSAL_UNUSED filter_prot_type, u32 OSAL_UNUSED high,
+ u32 OSAL_UNUSED low)
+{
+ ECORE_E5_MISSING_CODE;
+
+ return ECORE_SUCCESS;
+}
+
+/* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
+ * Should be removed when the function is implemented.
+ */
+static enum _ecore_status_t
+ecore_llh_remove_filter_e5(struct ecore_hwfn OSAL_UNUSED * p_hwfn,
+ struct ecore_ptt OSAL_UNUSED * p_ptt,
+ u8 OSAL_UNUSED abs_ppfid,
+ u8 OSAL_UNUSED filter_idx)
+{
+ ECORE_E5_MISSING_CODE;
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type, u32 high,
+ u32 low)
+{
+ if (ECORE_IS_E4(p_hwfn->p_dev))
+ return ecore_llh_add_filter_e4(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx, filter_prot_type,
+ high, low);
+ else /* E5 */
+ return ecore_llh_add_filter_e5(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx, filter_prot_type,
+ high, low);
+}
+
+static enum _ecore_status_t
+ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u8 filter_idx)
+{
+ if (ECORE_IS_E4(p_hwfn->p_dev))
+ return ecore_llh_remove_filter_e4(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx);
+ else /* E5 */
+ return ecore_llh_remove_filter_e5(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx);
}
enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
- u8 mac_addr[ETH_ALEN])
+ u8 mac_addr[ECORE_ETH_ALEN])
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
union ecore_llh_filter filter;
u8 filter_idx, abs_ppfid;
+ struct ecore_ptt *p_ptt;
u32 high, low, ref_cnt;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
+ return rc;
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ DP_NOTICE(p_dev, false, "Setting MAC to LLH is not supported to VF\n");
+ return ECORE_NOTIMPL;
+ }
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
if (p_ptt == OSAL_NULL)
return ECORE_AGAIN;
- if (!OSAL_GET_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
- goto out;
-
OSAL_MEM_ZERO(&filter, sizeof(filter));
- OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN);
+ OSAL_MEMCPY(filter.mac.addr, mac_addr, ECORE_ETH_ALEN);
rc = ecore_llh_shadow_add_filter(p_dev, ppfid,
ECORE_LLH_FILTER_TYPE_MAC,
&filter, &filter_idx, &ref_cnt);
@@ -1204,6 +1290,22 @@ ecore_llh_protocol_filter_to_hilo(struct ecore_dev *p_dev,
return ECORE_SUCCESS;
}
+enum _ecore_status_t
+ecore_llh_add_dst_tcp_port_filter(struct ecore_dev *p_dev, u16 dest_port)
+{
+ return ecore_llh_add_protocol_filter(p_dev, 0,
+ ECORE_LLH_FILTER_TCP_DEST_PORT,
+ ECORE_LLH_DONT_CARE, dest_port);
+}
+
+enum _ecore_status_t
+ecore_llh_add_src_tcp_port_filter(struct ecore_dev *p_dev, u16 src_port)
+{
+ return ecore_llh_add_protocol_filter(p_dev, 0,
+ ECORE_LLH_FILTER_TCP_SRC_PORT,
+ src_port, ECORE_LLH_DONT_CARE);
+}
+
enum _ecore_status_t
ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
enum ecore_llh_prot_filter_type_t type,
@@ -1212,15 +1314,15 @@ ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
u8 filter_idx, abs_ppfid, type_bitmap;
- char str[32];
union ecore_llh_filter filter;
u32 high, low, ref_cnt;
+ char str[32];
enum _ecore_status_t rc = ECORE_SUCCESS;
if (p_ptt == OSAL_NULL)
return ECORE_AGAIN;
- if (!OSAL_GET_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
goto out;
rc = ecore_llh_protocol_filter_stringify(p_dev, type,
@@ -1275,23 +1377,33 @@ ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
}
void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
- u8 mac_addr[ETH_ALEN])
+ u8 mac_addr[ECORE_ETH_ALEN])
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
union ecore_llh_filter filter;
u8 filter_idx, abs_ppfid;
+ struct ecore_ptt *p_ptt;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 ref_cnt;
- if (p_ptt == OSAL_NULL)
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
return;
- if (!OSAL_GET_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
- goto out;
+ if (ECORE_IS_NVMETCP_PERSONALITY(p_hwfn))
+ return;
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ DP_NOTICE(p_dev, false, "Removing MAC from LLH is not supported to VF\n");
+ return;
+ }
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+
+ if (p_ptt == OSAL_NULL)
+ return;
OSAL_MEM_ZERO(&filter, sizeof(filter));
- OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN);
+ OSAL_MEMCPY(filter.mac.addr, mac_addr, ECORE_ETH_ALEN);
rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx,
&ref_cnt);
if (rc != ECORE_SUCCESS)
@@ -1326,6 +1438,22 @@ void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
ecore_ptt_release(p_hwfn, p_ptt);
}
+void ecore_llh_remove_dst_tcp_port_filter(struct ecore_dev *p_dev,
+ u16 dest_port)
+{
+ ecore_llh_remove_protocol_filter(p_dev, 0,
+ ECORE_LLH_FILTER_TCP_DEST_PORT,
+ ECORE_LLH_DONT_CARE, dest_port);
+}
+
+void ecore_llh_remove_src_tcp_port_filter(struct ecore_dev *p_dev,
+ u16 src_port)
+{
+ ecore_llh_remove_protocol_filter(p_dev, 0,
+ ECORE_LLH_FILTER_TCP_SRC_PORT,
+ src_port, ECORE_LLH_DONT_CARE);
+}
+
void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
enum ecore_llh_prot_filter_type_t type,
u16 source_port_or_eth_type,
@@ -1334,15 +1462,15 @@ void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
u8 filter_idx, abs_ppfid;
- char str[32];
union ecore_llh_filter filter;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ char str[32];
u32 ref_cnt;
if (p_ptt == OSAL_NULL)
return;
- if (!OSAL_GET_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
goto out;
rc = ecore_llh_protocol_filter_stringify(p_dev, type,
@@ -1396,8 +1524,8 @@ void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid)
if (p_ptt == OSAL_NULL)
return;
- if (!OSAL_GET_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
- !OSAL_GET_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
+ !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
goto out;
rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
@@ -1411,7 +1539,7 @@ void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid)
for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
filter_idx++) {
rc = ecore_llh_remove_filter(p_hwfn, p_ptt,
- abs_ppfid, filter_idx);
+ abs_ppfid, filter_idx);
if (rc != ECORE_SUCCESS)
goto out;
}
@@ -1423,8 +1551,8 @@ void ecore_llh_clear_all_filters(struct ecore_dev *p_dev)
{
u8 ppfid;
- if (!OSAL_GET_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
- !OSAL_GET_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
+ !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
return;
for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++)
@@ -1450,22 +1578,18 @@ enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-enum _ecore_status_t
-ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
+static enum _ecore_status_t
+ecore_llh_dump_ppfid_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 ppfid)
{
- struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
- struct ecore_llh_filter_details filter_details;
+ struct ecore_llh_filter_e4_details filter_details;
u8 abs_ppfid, filter_idx;
u32 addr;
enum _ecore_status_t rc;
- if (!p_ptt)
- return ECORE_AGAIN;
-
rc = ecore_abs_ppfid(p_hwfn->p_dev, ppfid, &abs_ppfid);
if (rc != ECORE_SUCCESS)
- goto out;
+ return rc;
addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
DP_NOTICE(p_hwfn, false,
@@ -1476,22 +1600,46 @@ ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
filter_idx++) {
OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
- rc = ecore_llh_access_filter(p_hwfn, p_ptt, abs_ppfid,
- filter_idx, &filter_details,
- false /* read access */);
+ rc = ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx, &filter_details,
+ false /* read access */);
if (rc != ECORE_SUCCESS)
- goto out;
+ return rc;
DP_NOTICE(p_hwfn, false,
- "filter %2hhd: enable %d, value 0x%016lx, mode %d, protocol_type 0x%x, hdr_sel 0x%x\n",
+ "filter %2hhd: enable %d, value 0x%016" PRIx64 ", mode %d, protocol_type 0x%x, hdr_sel 0x%x\n",
filter_idx, filter_details.enable,
- (unsigned long)filter_details.value,
- filter_details.mode,
+ filter_details.value, filter_details.mode,
filter_details.protocol_type, filter_details.hdr_sel);
}
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_dump_ppfid_e5(struct ecore_hwfn OSAL_UNUSED * p_hwfn,
+ struct ecore_ptt OSAL_UNUSED * p_ptt,
+ u8 OSAL_UNUSED ppfid)
+{
+ ECORE_E5_MISSING_CODE;
+
+ return ECORE_NOTIMPL;
+}
+
+enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ enum _ecore_status_t rc;
+
+ if (p_ptt == OSAL_NULL)
+ return ECORE_AGAIN;
+
+ if (ECORE_IS_E4(p_dev))
+ rc = ecore_llh_dump_ppfid_e4(p_hwfn, p_ptt, ppfid);
+ else /* E5 */
+ rc = ecore_llh_dump_ppfid_e5(p_hwfn, p_ptt, ppfid);
-out:
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -1513,15 +1661,6 @@ enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev)
/******************************* NIG LLH - End ********************************/
-/* Configurable */
-#define ECORE_MIN_DPIS (4) /* The minimal num of DPIs required to
- * load the driver. The number was
- * arbitrarily set.
- */
-
-/* Derived */
-#define ECORE_MIN_PWM_REGION (ECORE_WID_SIZE * ECORE_MIN_DPIS)
-
static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum BAR_ID bar_id)
@@ -1544,18 +1683,18 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
if (ECORE_IS_CMT(p_hwfn->p_dev)) {
DP_INFO(p_hwfn,
"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
- val = BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+ return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
} else {
DP_INFO(p_hwfn,
"BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
- val = 512 * 1024;
+ return 512 * 1024;
}
-
- return val;
}
-void ecore_init_dp(struct ecore_dev *p_dev,
- u32 dp_module, u8 dp_level, void *dp_ctx)
+void ecore_init_dp(struct ecore_dev *p_dev,
+ u32 dp_module,
+ u8 dp_level,
+ void *dp_ctx)
{
u32 i;
@@ -1571,6 +1710,70 @@ void ecore_init_dp(struct ecore_dev *p_dev,
}
}
+void ecore_init_int_dp(struct ecore_dev *p_dev, u32 dp_module, u8 dp_level)
+{
+ u32 i;
+
+ p_dev->dp_int_level = dp_level;
+ p_dev->dp_int_module = dp_module;
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ p_hwfn->dp_int_level = dp_level;
+ p_hwfn->dp_int_module = dp_module;
+ }
+}
+
+#define ECORE_DP_INT_LOG_MAX_STR_SIZE 256 /* @DPDK */
+
+void ecore_dp_internal_log(struct ecore_dev *p_dev, char *fmt, ...)
+{
+ char buff[ECORE_DP_INT_LOG_MAX_STR_SIZE];
+ struct ecore_internal_trace *p_int_log;
+ u32 len, partial_len;
+ unsigned long flags;
+ osal_va_list args;
+ char *buf = buff;
+ u32 prod;
+
+ if (!p_dev)
+ return;
+
+ p_int_log = &p_dev->internal_trace;
+
+ if (!p_int_log->buf)
+ return;
+
+ OSAL_VA_START(args, fmt);
+ len = OSAL_VSNPRINTF(buf, ECORE_DP_INT_LOG_MAX_STR_SIZE, fmt, args);
+ OSAL_VA_END(args);
+
+ if (len > ECORE_DP_INT_LOG_MAX_STR_SIZE) {
+ len = ECORE_DP_INT_LOG_MAX_STR_SIZE;
+ buf[len - 1] = '\n';
+ }
+
+ partial_len = len;
+
+ OSAL_SPIN_LOCK_IRQSAVE(&p_int_log->lock, flags);
+ prod = p_int_log->prod % p_int_log->size;
+
+ if (p_int_log->size - prod <= len) {
+ partial_len = p_int_log->size - prod;
+ OSAL_MEMCPY(p_int_log->buf + prod, buf, partial_len);
+ p_int_log->prod += partial_len;
+ prod = p_int_log->prod % p_int_log->size;
+ buf += partial_len;
+ partial_len = len - partial_len;
+ }
+
+ OSAL_MEMCPY(p_int_log->buf + prod, buf, partial_len);
+
+ p_int_log->prod += partial_len;
+
+ OSAL_SPIN_UNLOCK_IRQSAVE(&p_int_log->lock, flags);
+}
+
enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev)
{
u8 i;
@@ -1581,19 +1784,32 @@ enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev)
p_hwfn->p_dev = p_dev;
p_hwfn->my_id = i;
p_hwfn->b_active = false;
+ p_hwfn->p_dummy_cb = ecore_int_dummy_comp_cb;
#ifdef CONFIG_ECORE_LOCK_ALLOC
- if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock))
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock,
+ "dma_info_lock"))
goto handle_err;
#endif
OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock);
}
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (OSAL_SPIN_LOCK_ALLOC(&p_dev->hwfns[0], &p_dev->internal_trace.lock,
+ "internal_trace_lock"))
+ goto handle_err;
+#endif
+ OSAL_SPIN_LOCK_INIT(&p_dev->internal_trace.lock);
+
+ p_dev->p_dev = p_dev;
/* hwfn 0 is always active */
p_dev->hwfns[0].b_active = true;
/* set the default cache alignment to 128 (may be overridden later) */
p_dev->cache_shift = 7;
+
+ p_dev->ilt_page_size = ECORE_DEFAULT_ILT_PAGE_SIZE;
+
return ECORE_SUCCESS;
#ifdef CONFIG_ECORE_LOCK_ALLOC
handle_err:
@@ -1612,9 +1828,16 @@ static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
+ qm_info->qm_pq_params = OSAL_NULL;
OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
+ qm_info->qm_vport_params = OSAL_NULL;
OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
+ qm_info->qm_port_params = OSAL_NULL;
OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
+ qm_info->wfq_data = OSAL_NULL;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_DEALLOC(&qm_info->qm_info_lock);
+#endif
}
static void ecore_dbg_user_data_free(struct ecore_hwfn *p_hwfn)
@@ -1627,49 +1850,66 @@ void ecore_resc_free(struct ecore_dev *p_dev)
{
int i;
- if (IS_VF(p_dev)) {
- for_each_hwfn(p_dev, i)
- ecore_l2_free(&p_dev->hwfns[i]);
- return;
- }
-
- OSAL_FREE(p_dev, p_dev->fw_data);
-
OSAL_FREE(p_dev, p_dev->reset_stats);
+ p_dev->reset_stats = OSAL_NULL;
- ecore_llh_free(p_dev);
+ if (IS_PF(p_dev)) {
+ OSAL_FREE(p_dev, p_dev->fw_data);
+ p_dev->fw_data = OSAL_NULL;
+
+ ecore_llh_free(p_dev);
+ }
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ ecore_spq_free(p_hwfn);
+ ecore_l2_free(p_hwfn);
+
+ if (IS_VF(p_dev)) {
+ ecore_db_recovery_teardown(p_hwfn);
+ continue;
+ }
+
ecore_cxt_mngr_free(p_hwfn);
ecore_qm_info_free(p_hwfn);
- ecore_spq_free(p_hwfn);
ecore_eq_free(p_hwfn);
ecore_consq_free(p_hwfn);
ecore_int_free(p_hwfn);
+
ecore_iov_free(p_hwfn);
- ecore_l2_free(p_hwfn);
ecore_dmae_info_free(p_hwfn);
ecore_dcbx_info_free(p_hwfn);
ecore_dbg_user_data_free(p_hwfn);
- ecore_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
- /* @@@TBD Flush work-queue ? */
+ ecore_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem);
+ /* @@@TBD Flush work-queue ?*/
/* destroy doorbell recovery mechanism */
ecore_db_recovery_teardown(p_hwfn);
}
+
+ if (IS_PF(p_dev)) {
+ OSAL_FREE(p_dev, p_dev->fw_data);
+ p_dev->fw_data = OSAL_NULL;
+ }
}
/******************** QM initialization *******************/
-
-/* bitmaps for indicating active traffic classes.
- * Special case for Arrowhead 4 port
- */
+/* bitmaps for indicating active traffic classes. Special case for Arrowhead 4 port */
/* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */
-#define ACTIVE_TCS_BMAP 0x9f
-/* 0..3 actually used, OOO and high priority stuff all use 3 */
-#define ACTIVE_TCS_BMAP_4PORT_K2 0xf
+#define ACTIVE_TCS_BMAP_E4 0x9f
+#define ACTIVE_TCS_BMAP_E5 0x1f /* 0..3 actualy used, 4 serves OOO */
+#define ACTIVE_TCS_BMAP_4PORT_K2 0xf /* 0..3 actually used, OOO and high priority stuff all use 3 */
+
+#define ACTIVE_TCS_BMAP(_p_hwfn) \
+ (ECORE_IS_E4((_p_hwfn)->p_dev) ? \
+ ACTIVE_TCS_BMAP_E4 : ACTIVE_TCS_BMAP_E5)
+
+static u16 ecore_init_qm_get_num_active_vfs(struct ecore_hwfn *p_hwfn)
+{
+ return IS_ECORE_SRIOV(p_hwfn->p_dev) ?
+ p_hwfn->pf_iov_info->max_active_vfs : 0;
+}
/* determines the physical queue flags for a given PF. */
static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
@@ -1680,8 +1920,10 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
flags = PQ_FLAGS_LB;
/* feature flags */
- if (IS_ECORE_SRIOV(p_hwfn->p_dev))
+ if (ecore_init_qm_get_num_active_vfs(p_hwfn))
flags |= PQ_FLAGS_VFS;
+
+ /* @DPDK */
if (IS_ECORE_PACING(p_hwfn))
flags |= PQ_FLAGS_RLS;
@@ -1691,27 +1933,11 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
if (!IS_ECORE_PACING(p_hwfn))
flags |= PQ_FLAGS_MCOS;
break;
- case ECORE_PCI_FCOE:
- flags |= PQ_FLAGS_OFLD;
- break;
- case ECORE_PCI_ISCSI:
- flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
- break;
- case ECORE_PCI_ETH_ROCE:
- flags |= PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
- if (!IS_ECORE_PACING(p_hwfn))
- flags |= PQ_FLAGS_MCOS;
- break;
- case ECORE_PCI_ETH_IWARP:
- flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
- if (!IS_ECORE_PACING(p_hwfn))
- flags |= PQ_FLAGS_MCOS;
- break;
default:
- DP_ERR(p_hwfn, "unknown personality %d\n",
- p_hwfn->hw_info.personality);
+ DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality);
return 0;
}
+
return flags;
}
@@ -1723,8 +1949,53 @@ u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn)
u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn)
{
- return IS_ECORE_SRIOV(p_hwfn->p_dev) ?
- p_hwfn->p_dev->p_iov_info->total_vfs : 0;
+ return IS_ECORE_SRIOV(p_hwfn->p_dev) ? p_hwfn->p_dev->p_iov_info->total_vfs : 0;
+}
+
+static u16 ecore_init_qm_get_num_vfs_pqs(struct ecore_hwfn *p_hwfn)
+{
+ u16 num_pqs, num_vfs = ecore_init_qm_get_num_active_vfs(p_hwfn);
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+ /* One L2 PQ per VF */
+ num_pqs = num_vfs;
+
+ /* Separate RDMA PQ per VF */
+ if ((PQ_FLAGS_VFR & pq_flags))
+ num_pqs += num_vfs;
+
+ /* Separate RDMA PQ for all VFs */
+ if ((PQ_FLAGS_VSR & pq_flags))
+ num_pqs += 1;
+
+ return num_pqs;
+}
+
+static bool ecore_lag_support(struct ecore_hwfn *p_hwfn)
+{
+ return (ECORE_IS_AH(p_hwfn->p_dev) &&
+ ECORE_IS_ROCE_PERSONALITY(p_hwfn) &&
+ OSAL_TEST_BIT(ECORE_MF_ROCE_LAG, &p_hwfn->p_dev->mf_bits));
+}
+
+static u8 ecore_init_qm_get_num_mtc_tcs(struct ecore_hwfn *p_hwfn)
+{
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+ if (!(PQ_FLAGS_MTC & pq_flags))
+ return 1;
+
+ return ecore_init_qm_get_num_tcs(p_hwfn);
+}
+
+static u8 ecore_init_qm_get_num_mtc_pqs(struct ecore_hwfn *p_hwfn)
+{
+ u32 num_ports, num_tcs;
+
+ num_ports = ecore_lag_support(p_hwfn) ? LAG_MAX_PORT_NUM : 1;
+ num_tcs = ecore_init_qm_get_num_mtc_tcs(p_hwfn);
+
+ return num_ports * num_tcs;
}
#define NUM_DEFAULT_RLS 1
@@ -1733,21 +2004,13 @@ u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
{
u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
- /* num RLs can't exceed resource amount of rls or vports or the
- * dcqcn qps
- */
- num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
- RESC_NUM(p_hwfn, ECORE_VPORT));
+ /* num RLs can't exceed resource amount of rls or vports or the dcqcn qps */
+ num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL), RESC_NUM(p_hwfn,
+ ECORE_VPORT));
- /* make sure after we reserve the default and VF rls we'll have
- * something left
- */
- if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) {
- DP_NOTICE(p_hwfn, false,
- "no rate limiters left for PF rate limiting"
- " [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs);
+ /* make sure after we reserve the default and VF rls we'll have something left */
+ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
return 0;
- }
/* subtract rls necessary for VFs and one default one for the PF */
num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
@@ -1755,17 +2018,40 @@ u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
return num_pf_rls;
}
+static u16 ecore_init_qm_get_num_rls(struct ecore_hwfn *p_hwfn)
+{
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+ u16 num_rls = 0;
+
+ num_rls += (!!(PQ_FLAGS_RLS & pq_flags)) *
+ ecore_init_qm_get_num_pf_rls(p_hwfn);
+
+ /* RL for each VF L2 PQ */
+ num_rls += (!!(PQ_FLAGS_VFS & pq_flags)) *
+ ecore_init_qm_get_num_active_vfs(p_hwfn);
+
+ /* RL for each VF RDMA PQ */
+ num_rls += (!!(PQ_FLAGS_VFR & pq_flags)) *
+ ecore_init_qm_get_num_active_vfs(p_hwfn);
+
+ /* RL for VF RDMA single PQ */
+ num_rls += (!!(PQ_FLAGS_VSR & pq_flags));
+
+ return num_rls;
+}
+
u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn)
{
u32 pq_flags = ecore_get_pq_flags(p_hwfn);
- /* all pqs share the same vport (hence the 1 below), except for vfs
- * and pf_rl pqs
- */
- return (!!(PQ_FLAGS_RLS & pq_flags)) *
- ecore_init_qm_get_num_pf_rls(p_hwfn) +
- (!!(PQ_FLAGS_VFS & pq_flags)) *
- ecore_init_qm_get_num_vfs(p_hwfn) + 1;
+ /* all pqs share the same vport (hence the 1 below), except for vfs and pf_rl pqs */
+ return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn) + 1;
+}
+
+static u8 ecore_init_qm_get_group_count(struct ecore_hwfn *p_hwfn)
+{
+ return p_hwfn->qm_info.offload_group_count;
}
/* calc amount of PQs according to the requested flags */
@@ -1773,16 +2059,15 @@ u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn)
{
u32 pq_flags = ecore_get_pq_flags(p_hwfn);
- return (!!(PQ_FLAGS_RLS & pq_flags)) *
- ecore_init_qm_get_num_pf_rls(p_hwfn) +
- (!!(PQ_FLAGS_MCOS & pq_flags)) *
- ecore_init_qm_get_num_tcs(p_hwfn) +
+ return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_MCOS & pq_flags)) * ecore_init_qm_get_num_tcs(p_hwfn) +
(!!(PQ_FLAGS_LB & pq_flags)) +
(!!(PQ_FLAGS_OOO & pq_flags)) +
(!!(PQ_FLAGS_ACK & pq_flags)) +
- (!!(PQ_FLAGS_OFLD & pq_flags)) +
- (!!(PQ_FLAGS_VFS & pq_flags)) *
- ecore_init_qm_get_num_vfs(p_hwfn);
+ (!!(PQ_FLAGS_OFLD & pq_flags)) * ecore_init_qm_get_num_mtc_pqs(p_hwfn) +
+ (!!(PQ_FLAGS_GRP & pq_flags)) * OFLD_GRP_SIZE +
+ (!!(PQ_FLAGS_LLT & pq_flags)) * ecore_init_qm_get_num_mtc_pqs(p_hwfn) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs_pqs(p_hwfn);
}
/* initialize the top level QM params */
@@ -1793,7 +2078,8 @@ static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
/* pq and vport bases for this PF */
qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
- qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+ qm_info->start_vport = (u16)RESC_START(p_hwfn, ECORE_VPORT);
+ qm_info->start_rl = (u16)RESC_START(p_hwfn, ECORE_RL);
/* rate limiting and weighted fair queueing are always enabled */
qm_info->vport_rl_en = 1;
@@ -1803,15 +2089,11 @@ static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2;
/* in AH 4 port we have fewer TCs per port */
- qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
- NUM_OF_PHYS_TCS;
+ qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS;
- /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and
- * 4 otherwise
- */
+ /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 4 otherwise */
if (!qm_info->ooo_tc)
- qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
- DCBX_TCP_OOO_TC;
+ qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : DCBX_TCP_OOO_TC;
}
/* initialize qm vport params */
@@ -1834,7 +2116,7 @@ static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
/* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
- ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP;
+ ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP(p_hwfn);
for (i = 0; i < num_ports; i++) {
struct init_qm_port_params *p_qm_port =
@@ -1862,11 +2144,14 @@ static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn)
qm_info->num_pqs = 0;
qm_info->num_vports = 0;
+ qm_info->num_rls = 0;
qm_info->num_pf_rls = 0;
qm_info->num_vf_pqs = 0;
qm_info->first_vf_pq = 0;
qm_info->first_mcos_pq = 0;
qm_info->first_rl_pq = 0;
+ qm_info->single_vf_rdma_pq = 0;
+ qm_info->pq_overflow = false;
}
static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
@@ -1876,18 +2161,13 @@ static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
qm_info->num_vports++;
if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
- DP_ERR(p_hwfn,
- "vport overflow! qm_info->num_vports %d,"
- " qm_init_get_num_vports() %d\n",
- qm_info->num_vports,
- ecore_init_qm_get_num_vports(p_hwfn));
+ DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+ qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn));
}
/* initialize a single pq and manage qm_info resources accounting.
- * The pq_init_flags param determines whether the PQ is rate limited
- * (for VF or PF)
- * and whether a new vport is allocated to the pq or not (i.e. vport will be
- * shared)
+ * The pq_init_flags param determines whether the PQ is rate limited (for VF or PF)
+ * and whether a new vport is allocated to the pq or not (i.e. vport will be shared)
*/
/* flags for pq init */
@@ -1898,65 +2178,108 @@ static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
/* defines for pq init */
#define PQ_INIT_DEFAULT_WRR_GROUP 1
#define PQ_INIT_DEFAULT_TC 0
-#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc)
-static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
- struct ecore_qm_info *qm_info,
- u8 tc, u32 pq_init_flags)
+void ecore_hw_info_set_offload_tc(struct ecore_hw_info *p_info, u8 tc)
{
- u16 pq_idx = qm_info->num_pqs, max_pq =
- ecore_init_qm_get_num_pqs(p_hwfn);
+ p_info->offload_tc = tc;
+ p_info->offload_tc_set = true;
+}
- if (pq_idx > max_pq)
- DP_ERR(p_hwfn,
- "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+static bool ecore_is_offload_tc_set(struct ecore_hwfn *p_hwfn)
+{
+ return p_hwfn->hw_info.offload_tc_set;
+}
+
+u8 ecore_get_offload_tc(struct ecore_hwfn *p_hwfn)
+{
+ if (ecore_is_offload_tc_set(p_hwfn))
+ return p_hwfn->hw_info.offload_tc;
+
+ return PQ_INIT_DEFAULT_TC;
+}
+
+static void ecore_init_qm_pq_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_qm_info *qm_info,
+ u8 tc, u32 pq_init_flags, u8 port)
+{
+ u16 pq_idx = qm_info->num_pqs, max_pq = ecore_init_qm_get_num_pqs(p_hwfn);
+ u16 num_pf_pqs;
+
+ if (pq_idx > max_pq) {
+ qm_info->pq_overflow = true;
+ DP_ERR(p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+ }
/* init pq params */
- qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id;
- qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
- qm_info->num_vports;
+ qm_info->qm_pq_params[pq_idx].port_id = port;
+ qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports;
qm_info->qm_pq_params[pq_idx].tc_id = tc;
qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
- qm_info->qm_pq_params[pq_idx].rl_valid =
- (pq_init_flags & PQ_INIT_PF_RL ||
- pq_init_flags & PQ_INIT_VF_RL);
- /* The "rl_id" is set as the "vport_id" */
- qm_info->qm_pq_params[pq_idx].rl_id =
- qm_info->qm_pq_params[pq_idx].vport_id;
+ if (pq_init_flags & (PQ_INIT_PF_RL | PQ_INIT_VF_RL)) {
+ qm_info->qm_pq_params[pq_idx].rl_valid = 1;
+ qm_info->qm_pq_params[pq_idx].rl_id =
+ qm_info->start_rl + qm_info->num_rls++;
+ }
/* qm params accounting */
qm_info->num_pqs++;
+ if (pq_init_flags & PQ_INIT_VF_RL) {
+ qm_info->num_vf_pqs++;
+ } else {
+ num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
+ if (qm_info->ilt_pf_pqs && num_pf_pqs > qm_info->ilt_pf_pqs) {
+ qm_info->pq_overflow = true;
+ DP_ERR(p_hwfn,
+ "ilt overflow! num_pf_pqs %d, qm_info->ilt_pf_pqs %d\n",
+ num_pf_pqs, qm_info->ilt_pf_pqs);
+ }
+ }
+
if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
qm_info->num_vports++;
if (pq_init_flags & PQ_INIT_PF_RL)
qm_info->num_pf_rls++;
- if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
- DP_ERR(p_hwfn,
- "vport overflow! qm_info->num_vports %d,"
- " qm_init_get_num_vports() %d\n",
- qm_info->num_vports,
- ecore_init_qm_get_num_vports(p_hwfn));
+ if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) {
+ qm_info->pq_overflow = true;
+ DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+ qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn));
+ }
- if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn))
- DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d,"
- " qm_init_get_num_pf_rls() %d\n",
- qm_info->num_pf_rls,
- ecore_init_qm_get_num_pf_rls(p_hwfn));
+ if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn)) {
+ qm_info->pq_overflow = true;
+ DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n",
+ qm_info->num_pf_rls, ecore_init_qm_get_num_pf_rls(p_hwfn));
+ }
+}
+
+/* init one qm pq, assume port of the PF */
+static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
+ struct ecore_qm_info *qm_info,
+ u8 tc, u32 pq_init_flags)
+{
+ ecore_init_qm_pq_port(p_hwfn, qm_info, tc, pq_init_flags, p_hwfn->port_id);
}
/* get pq index according to PQ_FLAGS */
static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
- u32 pq_flags)
+ unsigned long pq_flags)
{
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
/* Can't have multiple flags set here */
- if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags,
- sizeof(pq_flags)) > 1)
+ if (OSAL_BITMAP_WEIGHT(&pq_flags,
+ sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
+ DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
+ goto err;
+ }
+
+ if (!(ecore_get_pq_flags(p_hwfn) & pq_flags)) {
+ DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
goto err;
+ }
switch (pq_flags) {
case PQ_FLAGS_RLS:
@@ -1970,16 +2293,20 @@ static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
case PQ_FLAGS_ACK:
return &qm_info->pure_ack_pq;
case PQ_FLAGS_OFLD:
- return &qm_info->offload_pq;
+ return &qm_info->first_ofld_pq;
+ case PQ_FLAGS_LLT:
+ return &qm_info->first_llt_pq;
case PQ_FLAGS_VFS:
return &qm_info->first_vf_pq;
+ case PQ_FLAGS_GRP:
+ return &qm_info->first_ofld_grp_pq;
+ case PQ_FLAGS_VSR:
+ return &qm_info->single_vf_rdma_pq;
default:
goto err;
}
-
err:
- DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
- return OSAL_NULL;
+ return &qm_info->start_pq;
}
/* save pq index in qm info */
@@ -1991,59 +2318,285 @@ static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn,
*base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
}
+static u16 ecore_qm_get_start_pq(struct ecore_hwfn *p_hwfn)
+{
+ u16 start_pq;
+
+ OSAL_SPIN_LOCK(&p_hwfn->qm_info.qm_info_lock);
+ start_pq = p_hwfn->qm_info.start_pq;
+ OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
+
+ return start_pq;
+}
+
/* get tx pq index, with the PQ TX base already set (ready for context init) */
u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags)
{
- u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+ u16 *base_pq_idx;
+ u16 pq_idx;
+
+ OSAL_SPIN_LOCK(&p_hwfn->qm_info.qm_info_lock);
+ base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+ pq_idx = *base_pq_idx + CM_TX_PQ_BASE;
+ OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
+
+ return pq_idx;
+}
+
+u16 ecore_get_cm_pq_idx_grp(struct ecore_hwfn *p_hwfn, u8 idx)
+{
+ u16 pq_idx = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_GRP);
+ u8 max_idx = ecore_init_qm_get_group_count(p_hwfn);
- return *base_pq_idx + CM_TX_PQ_BASE;
+ if (max_idx == 0) {
+ DP_ERR(p_hwfn, "pq with flag 0x%x do not exist\n",
+ PQ_FLAGS_GRP);
+ return ecore_qm_get_start_pq(p_hwfn);
+ }
+
+ if (idx > max_idx)
+ DP_ERR(p_hwfn, "idx %d must be smaller than %d\n", idx, max_idx);
+
+ return pq_idx + (idx % max_idx);
}
u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
{
+ u16 pq_idx = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS);
u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn);
+ if (max_tc == 0) {
+ DP_ERR(p_hwfn, "pq with flag 0x%x do not exist\n",
+ PQ_FLAGS_MCOS);
+ return ecore_qm_get_start_pq(p_hwfn);
+ }
+
if (tc > max_tc)
DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
- return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
+ return pq_idx + (tc % max_tc);
+}
+
+static u8 ecore_qm_get_pqs_per_vf(struct ecore_hwfn *p_hwfn)
+{
+ u8 pqs_per_vf;
+ u32 pq_flags;
+
+ /* When VFR is set, there is pair of PQs per VF. If VSR is set,
+ * no additional action required in computing the per VF PQ.
+ */
+ OSAL_SPIN_LOCK(&p_hwfn->qm_info.qm_info_lock);
+ pq_flags = ecore_get_pq_flags(p_hwfn);
+ pqs_per_vf = (PQ_FLAGS_VFR & pq_flags) ? 2 : 1;
+ OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
+
+ return pqs_per_vf;
}
u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
{
- u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn);
+ u16 pq_idx = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS);
+ u16 max_vf = ecore_init_qm_get_num_active_vfs(p_hwfn);
+ u8 pqs_per_vf;
+
+ if (max_vf == 0) {
+ DP_ERR(p_hwfn, "pq with flag 0x%x do not exist\n",
+ PQ_FLAGS_VFS);
+ return ecore_qm_get_start_pq(p_hwfn);
+ }
if (vf > max_vf)
DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
- return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
+ pqs_per_vf = ecore_qm_get_pqs_per_vf(p_hwfn);
+
+ return pq_idx + ((vf % max_vf) * pqs_per_vf);
+}
+
+u16 ecore_get_cm_pq_idx_vf_rdma(struct ecore_hwfn *p_hwfn, u16 vf)
+{
+ u32 pq_flags;
+ u16 pq_idx;
+
+ OSAL_SPIN_LOCK(&p_hwfn->qm_info.qm_info_lock);
+ pq_flags = ecore_get_pq_flags(p_hwfn);
+ OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
+
+ /* If VSR is set, dedicated single PQ for VFs RDMA */
+ if (PQ_FLAGS_VSR & pq_flags)
+ pq_idx = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VSR);
+ else
+ pq_idx = ecore_get_cm_pq_idx_vf(p_hwfn, vf);
+
+ /* If VFR is set, VF's 2nd PQ is for RDMA */
+ if ((PQ_FLAGS_VFR & pq_flags))
+ pq_idx++;
+
+ return pq_idx;
}
u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
{
+ u16 pq_idx = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS);
u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
- /* for rate limiters, it is okay to use the modulo behavior - no
- * DP_ERR
+ if (max_rl == 0) {
+ DP_ERR(p_hwfn, "pq with flag 0x%x do not exist\n",
+ PQ_FLAGS_RLS);
+ return ecore_qm_get_start_pq(p_hwfn);
+ }
+
+ /* When an invalid RL index is requested, return the highest
+ * available RL PQ. "max_rl - 1" is the relative index of the
+ * last PQ reserved for RLs.
*/
- return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + (rl % max_rl);
+ if (rl >= max_rl) {
+ DP_ERR(p_hwfn,
+ "rl %hu is not a valid rate limiter, returning rl %hu\n",
+ rl, max_rl - 1);
+ return pq_idx + max_rl - 1;
+ }
+
+ return pq_idx + rl;
}
-u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
+static u16 ecore_get_qm_pq_from_cm_pq(struct ecore_hwfn *p_hwfn, u16 cm_pq_id)
{
- u16 start_pq, pq, qm_pq_idx;
+ u16 start_pq = ecore_qm_get_start_pq(p_hwfn);
- pq = ecore_get_cm_pq_idx_rl(p_hwfn, rl);
- start_pq = p_hwfn->qm_info.start_pq;
- qm_pq_idx = pq - start_pq - CM_TX_PQ_BASE;
+ return cm_pq_id - CM_TX_PQ_BASE - start_pq;
+}
+
+static u16 ecore_get_vport_id_from_pq(struct ecore_hwfn *p_hwfn, u16 pq_id)
+{
+ u16 vport_id;
+
+ OSAL_SPIN_LOCK(&p_hwfn->qm_info.qm_info_lock);
+ vport_id = p_hwfn->qm_info.qm_pq_params[pq_id].vport_id;
+ OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
+
+ return vport_id;
+}
+
+static u16 ecore_get_rl_id_from_pq(struct ecore_hwfn *p_hwfn, u16 pq_id)
+{
+ u16 rl_id;
+
+ OSAL_SPIN_LOCK(&p_hwfn->qm_info.qm_info_lock);
+ rl_id = p_hwfn->qm_info.qm_pq_params[pq_id].rl_id;
+ OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
- if (qm_pq_idx > p_hwfn->qm_info.num_pqs) {
+ return rl_id;
+}
+
+u16 ecore_get_pq_vport_id_from_rl(struct ecore_hwfn *p_hwfn, u16 rl)
+{
+ u16 cm_pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, rl);
+ u16 qm_pq_id = ecore_get_qm_pq_from_cm_pq(p_hwfn, cm_pq_id);
+
+ return ecore_get_vport_id_from_pq(p_hwfn, qm_pq_id);
+}
+
+u16 ecore_get_pq_vport_id_from_vf(struct ecore_hwfn *p_hwfn, u16 vf)
+{
+ u16 cm_pq_id = ecore_get_cm_pq_idx_vf(p_hwfn, vf);
+ u16 qm_pq_id = ecore_get_qm_pq_from_cm_pq(p_hwfn, cm_pq_id);
+
+ return ecore_get_vport_id_from_pq(p_hwfn, qm_pq_id);
+}
+
+u16 ecore_get_pq_rl_id_from_rl(struct ecore_hwfn *p_hwfn, u16 rl)
+{
+ u16 cm_pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, rl);
+ u16 qm_pq_id = ecore_get_qm_pq_from_cm_pq(p_hwfn, cm_pq_id);
+
+ return ecore_get_rl_id_from_pq(p_hwfn, qm_pq_id);
+}
+
+u16 ecore_get_pq_rl_id_from_vf(struct ecore_hwfn *p_hwfn, u16 vf)
+{
+ u16 cm_pq_id = ecore_get_cm_pq_idx_vf(p_hwfn, vf);
+ u16 qm_pq_id = ecore_get_qm_pq_from_cm_pq(p_hwfn, cm_pq_id);
+
+ return ecore_get_rl_id_from_pq(p_hwfn, qm_pq_id);
+}
+
+static u16 ecore_get_cm_pq_offset_mtc(struct ecore_hwfn *p_hwfn,
+ u16 idx, u8 tc)
+{
+ u16 pq_offset = 0, max_pqs;
+ u8 num_ports, num_tcs;
+
+ num_ports = ecore_lag_support(p_hwfn) ? LAG_MAX_PORT_NUM : 1;
+ num_tcs = ecore_init_qm_get_num_mtc_tcs(p_hwfn);
+
+ /* add the port offset */
+ pq_offset += (idx % num_ports) * num_tcs;
+ /* add the tc offset */
+ pq_offset += tc % num_tcs;
+
+ /* Verify that the pq returned is within pqs range */
+ max_pqs = ecore_init_qm_get_num_mtc_pqs(p_hwfn);
+ if (pq_offset >= max_pqs) {
DP_ERR(p_hwfn,
- "qm_pq_idx %d must be smaller than %d\n",
- qm_pq_idx, p_hwfn->qm_info.num_pqs);
+ "pq_offset %d must be smaller than %d (idx %d tc %d)\n",
+ pq_offset, max_pqs, idx, tc);
+ return 0;
}
- return p_hwfn->qm_info.qm_pq_params[qm_pq_idx].vport_id;
+ return pq_offset;
+}
+
+u16 ecore_get_cm_pq_idx_ofld_mtc(struct ecore_hwfn *p_hwfn,
+ u16 idx, u8 tc)
+{
+ u16 first_ofld_pq, pq_offset;
+
+#ifdef CONFIG_DCQCN
+ if (p_hwfn->p_rdma_info->roce.dcqcn_enabled)
+ return ecore_get_cm_pq_idx_rl(p_hwfn, idx);
+#endif
+
+ first_ofld_pq = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ pq_offset = ecore_get_cm_pq_offset_mtc(p_hwfn, idx, tc);
+
+ return first_ofld_pq + pq_offset;
+}
+
+u16 ecore_get_cm_pq_idx_llt_mtc(struct ecore_hwfn *p_hwfn,
+ u16 idx, u8 tc)
+{
+ u16 first_llt_pq, pq_offset;
+
+#ifdef CONFIG_DCQCN
+ if (p_hwfn->p_rdma_info->roce.dcqcn_enabled)
+ return ecore_get_cm_pq_idx_rl(p_hwfn, idx);
+#endif
+
+ first_llt_pq = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT);
+ pq_offset = ecore_get_cm_pq_offset_mtc(p_hwfn, idx, tc);
+
+ return first_llt_pq + pq_offset;
+}
+
+u16 ecore_get_cm_pq_idx_ll2(struct ecore_hwfn *p_hwfn, u8 tc)
+{
+ switch (tc) {
+ case PURE_LB_TC:
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+ case PKT_LB_TC:
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
+ default:
+#ifdef CONFIG_DCQCN
+ /* In RoCE, when DCQCN is enabled, there are no OFLD pqs,
+ * get the first RL pq.
+ */
+ if (ECORE_IS_ROCE_PERSONALITY(p_hwfn) &&
+ p_hwfn->p_rdma_info->roce.dcqcn_enabled)
+ return ecore_get_cm_pq_idx_rl(p_hwfn, 0);
+#endif
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ }
}
/* Functions for creating specific types of pqs */
@@ -2077,7 +2630,39 @@ static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn)
return;
ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
- ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+ ecore_init_qm_pq(p_hwfn, qm_info, ecore_get_offload_tc(p_hwfn),
+ PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_mtc_pqs(struct ecore_hwfn *p_hwfn)
+{
+ u8 num_tcs = ecore_init_qm_get_num_mtc_tcs(p_hwfn);
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 second_port = p_hwfn->port_id;
+ u8 first_port = p_hwfn->port_id;
+ u8 tc;
+
+ /* if lag is not active, init all pqs with p_hwfn's default port */
+ if (ecore_lag_is_active(p_hwfn)) {
+ first_port = p_hwfn->lag_info.first_port;
+ second_port = p_hwfn->lag_info.second_port;
+ }
+
+ /* override pq's TC if offload TC is set */
+ for (tc = 0; tc < num_tcs; tc++)
+ ecore_init_qm_pq_port(p_hwfn, qm_info,
+ ecore_is_offload_tc_set(p_hwfn) ?
+ p_hwfn->hw_info.offload_tc : tc,
+ PQ_INIT_SHARE_VPORT,
+ first_port);
+ if (ecore_lag_support(p_hwfn))
+ /* initialize second port's pqs even if lag is not active */
+ for (tc = 0; tc < num_tcs; tc++)
+ ecore_init_qm_pq_port(p_hwfn, qm_info,
+ ecore_is_offload_tc_set(p_hwfn) ?
+ p_hwfn->hw_info.offload_tc : tc,
+ PQ_INIT_SHARE_VPORT,
+ second_port);
}
static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
@@ -2088,7 +2673,36 @@ static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
return;
ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
- ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+ ecore_init_qm_mtc_pqs(p_hwfn);
+}
+
+static void ecore_init_qm_low_latency_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
+ ecore_init_qm_mtc_pqs(p_hwfn);
+}
+
+static void ecore_init_qm_offload_pq_group(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 idx;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_GRP))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_GRP, qm_info->num_pqs);
+
+ /* iterate over offload pqs */
+ for (idx = 0; idx < ecore_init_qm_get_group_count(p_hwfn); idx++) {
+ ecore_init_qm_pq_port(p_hwfn, qm_info, qm_info->offload_group[idx].tc,
+ PQ_INIT_SHARE_VPORT,
+ qm_info->offload_group[idx].port);
+ }
}
static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
@@ -2104,34 +2718,76 @@ static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
}
+static void ecore_init_qm_vf_single_rdma_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+ if (!(pq_flags & PQ_FLAGS_VSR))
+ return;
+
+ /* ecore_init_qm_pq_params() is going to increment vport ID anyway,
+ * so keep it shared here so we don't waste a vport.
+ */
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VSR, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, ecore_get_offload_tc(p_hwfn),
+ PQ_INIT_VF_RL | PQ_INIT_SHARE_VPORT);
+}
+
static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn)
{
+ u16 vf_idx, num_vfs = ecore_init_qm_get_num_active_vfs(p_hwfn);
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+ u32 l2_pq_init_flags = PQ_INIT_VF_RL;
- if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
+ if (!(pq_flags & PQ_FLAGS_VFS))
return;
+ /* Mark PQ starting VF range */
ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
- qm_info->num_vf_pqs = num_vfs;
- for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
+ /* If VFR is set, the L2 PQ will share the rate limiter with the rdma PQ */
+ if (pq_flags & PQ_FLAGS_VFR)
+ l2_pq_init_flags |= PQ_INIT_SHARE_VPORT;
+
+ /* Init the per PF PQs */
+ for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) {
+ /* Per VF L2 PQ */
ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC,
- PQ_INIT_VF_RL);
+ l2_pq_init_flags);
+
+ /* Per VF Rdma PQ */
+ if (pq_flags & PQ_FLAGS_VFR)
+ ecore_init_qm_pq(p_hwfn, qm_info,
+ ecore_get_offload_tc(p_hwfn),
+ PQ_INIT_VF_RL);
+ }
}
static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn)
{
u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn);
+ struct ecore_lag_info *lag_info = &p_hwfn->lag_info;
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 port = p_hwfn->port_id, tc;
if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
return;
ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
- for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
- ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC,
- PQ_INIT_PF_RL);
+ tc = ecore_get_offload_tc(p_hwfn);
+ for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) {
+ /* if lag is present, set these pqs per port according to parity */
+ if (lag_info->is_master &&
+ lag_info->lag_type != ECORE_LAG_TYPE_NONE &&
+ lag_info->port_num > 0)
+ port = (pf_rls_idx % lag_info->port_num == 0) ?
+ lag_info->first_port : lag_info->second_port;
+
+ ecore_init_qm_pq_port(p_hwfn, qm_info, tc, PQ_INIT_PF_RL,
+ port);
+ }
}
static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
@@ -2154,24 +2810,68 @@ static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
/* pq for offloaded protocol */
ecore_init_qm_offload_pq(p_hwfn);
- /* done sharing vports */
+ /* low latency pq */
+ ecore_init_qm_low_latency_pq(p_hwfn);
+
+ /* per offload group pqs */
+ ecore_init_qm_offload_pq_group(p_hwfn);
+
+ /* Single VF-RDMA PQ, in case there weren't enough for each VF */
+ ecore_init_qm_vf_single_rdma_pq(p_hwfn);
+
+ /* PF done sharing vports, advance vport for first VF.
+ * Vport ID is incremented in a separate function because we can't
+ * rely on the last PF PQ to not use PQ_INIT_SHARE_VPORT, which can
+ * be different in every QM reconfiguration.
+ */
ecore_init_qm_advance_vport(p_hwfn);
/* pqs for vfs */
ecore_init_qm_vf_pqs(p_hwfn);
}
-/* compare values of getters against resources amounts */
-static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn)
+/* Finds the optimal features configuration to maximize PQs utilization */
+static enum _ecore_status_t ecore_init_qm_features(struct ecore_hwfn *p_hwfn)
{
- if (ecore_init_qm_get_num_vports(p_hwfn) >
- RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ if (ecore_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, ECORE_VPORT)) {
DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
return ECORE_INVAL;
}
- if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) {
- DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+ if (ecore_init_qm_get_num_pf_rls(p_hwfn) == 0) {
+ if (IS_ECORE_PACING(p_hwfn)) {
+ DP_ERR(p_hwfn, "No rate limiters available for PF\n");
+ return ECORE_INVAL;
+ }
+ }
+
+ /* For VF RDMA try to provide 2 PQs (separate PQ for RDMA) per VF */
+ if (ECORE_IS_RDMA_PERSONALITY(p_hwfn) && ECORE_IS_VF_RDMA(p_hwfn) &&
+ ecore_init_qm_get_num_active_vfs(p_hwfn))
+ p_hwfn->qm_info.vf_rdma_en = true;
+
+ while (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ) ||
+ ecore_init_qm_get_num_rls(p_hwfn) > RESC_NUM(p_hwfn, ECORE_RL)) {
+ if (IS_ECORE_QM_VF_RDMA(p_hwfn)) {
+ p_hwfn->qm_info.vf_rdma_en = false;
+ DP_NOTICE(p_hwfn, false,
+ "PQ per rdma vf was disabled to reduce requested amount of pqs/rls. A single PQ for all rdma VFs will be used\n");
+ continue;
+ }
+
+ if (IS_ECORE_MULTI_TC_ROCE(p_hwfn)) {
+ p_hwfn->hw_info.multi_tc_roce_en = false;
+ DP_NOTICE(p_hwfn, false,
+ "multi-tc roce was disabled to reduce requested amount of pqs/rls\n");
+ continue;
+ }
+
+ DP_ERR(p_hwfn,
+ "Requested amount: %d pqs %d rls, Actual amount: %d pqs %d rls\n",
+ ecore_init_qm_get_num_pqs(p_hwfn),
+ ecore_init_qm_get_num_rls(p_hwfn),
+ RESC_NUM(p_hwfn, ECORE_PQ),
+ RESC_NUM(p_hwfn, ECORE_RL));
return ECORE_INVAL;
}
@@ -2189,35 +2889,38 @@ static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
struct init_qm_pq_params *pq;
int i, tc;
+ if (qm_info->pq_overflow)
+ return;
+
/* top level params */
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "qm init top level params: start_pq %d, start_vport %d,"
- " pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
- qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq,
- qm_info->offload_pq, qm_info->pure_ack_pq);
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d,"
- " num_vports %d, max_phys_tcs_per_port %d\n",
- qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs,
- qm_info->num_vf_pqs, qm_info->num_vports,
- qm_info->max_phys_tcs_per_port);
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d,"
- " pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
- qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en,
- qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl,
- qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "qm init params: pq_flags 0x%x, num_pqs %d, num_vf_pqs %d, start_pq %d\n",
+ ecore_get_pq_flags(p_hwfn), qm_info->num_pqs,
+ qm_info->num_vf_pqs, qm_info->start_pq);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "qm init params: pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d\n",
+ qm_info->pf_rl_en, qm_info->pf_wfq_en,
+ qm_info->vport_rl_en, qm_info->vport_wfq_en);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "qm init params: num_vports %d, start_vport %d, num_rls %d, num_pf_rls %d, start_rl %d, pf_rl %d\n",
+ qm_info->num_vports, qm_info->start_vport,
+ qm_info->num_rls, qm_info->num_pf_rls,
+ qm_info->start_rl, qm_info->pf_rl);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "qm init params: pure_lb_pq %d, ooo_pq %d, pure_ack_pq %d, first_ofld_pq %d, first_llt_pq %d\n",
+ qm_info->pure_lb_pq, qm_info->ooo_pq, qm_info->pure_ack_pq,
+ qm_info->first_ofld_pq, qm_info->first_llt_pq);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "qm init params: single_vf_rdma_pq %d, first_vf_pq %d, max_phys_tcs_per_port %d, pf_wfq %d\n",
+ qm_info->single_vf_rdma_pq, qm_info->first_vf_pq,
+ qm_info->max_phys_tcs_per_port, qm_info->pf_wfq);
/* port table */
for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) {
port = &qm_info->qm_port_params[i];
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "port idx %d, active %d, active_phys_tcs %d,"
- " num_pbf_cmd_lines %d, num_btb_blocks %d,"
- " reserved %d\n",
- i, port->active, port->active_phys_tcs,
- port->num_pbf_cmd_lines, port->num_btb_blocks,
- port->reserved);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
+ i, port->active, port->active_phys_tcs, port->num_pbf_cmd_lines,
+ port->num_btb_blocks, port->reserved);
}
/* vport table */
@@ -2226,8 +2929,7 @@ static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, wfq %d, first_tx_pq_id [ ",
qm_info->start_vport + i, vport->wfq);
for (tc = 0; tc < NUM_OF_TCS; tc++)
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ",
- vport->first_tx_pq_id[tc]);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", vport->first_tx_pq_id[tc]);
DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n");
}
@@ -2270,19 +2972,66 @@ static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn)
* 4. activate init tool in QM_PF stage
* 5. send an sdm_qm_cmd through rbc interface to release the QM
*/
-enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static enum _ecore_status_t __ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_can_sleep)
{
- struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- bool b_rc;
+ struct ecore_resc_unlock_params resc_unlock_params;
+ struct ecore_resc_lock_params resc_lock_params;
+ bool b_rc, b_mfw_unlock = true;
+ struct ecore_qm_info *qm_info;
enum _ecore_status_t rc = ECORE_SUCCESS;
- /* multiple flows can issue qm reconf. Need to lock */
+ qm_info = &p_hwfn->qm_info;
+
+ /* Obtain MFW resource lock to sync with PFs with driver instances not
+ * covered by the static global qm_lock (monolithic, dpdk, PDA).
+ */
+ ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
+ ECORE_RESC_LOCK_QM_RECONF, false);
+ resc_lock_params.sleep_b4_retry = b_can_sleep;
+ rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
+
+ /* If lock is taken we must abort. If MFW does not support the feature
+ * or took too long to acquire the lock we soldier on.
+ */
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL && rc != ECORE_TIMEOUT) {
+ DP_ERR(p_hwfn,
+ "QM reconf MFW lock is stuck. Failing reconf flow\n");
+ return ECORE_INVAL;
+ }
+
+ /* if MFW doesn't support, no need to unlock. There is no harm in
+ * trying, but we would need to tweak the rc value in case of
+ * ECORE_NOTIMPL, so seems nicer to avoid.
+ */
+ if (rc == ECORE_NOTIMPL)
+ b_mfw_unlock = false;
+
+ /* Multiple hwfn flows can issue qm reconf. Need to lock between hwfn
+ * flows.
+ */
OSAL_SPIN_LOCK(&qm_lock);
+ /* qm_info is invalid while this lock is taken */
+ OSAL_SPIN_LOCK(&p_hwfn->qm_info.qm_info_lock);
+
+ rc = ecore_init_qm_features(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
+ goto unlock;
+ }
+
/* initialize ecore's qm data structure */
ecore_init_qm_info(p_hwfn);
+ OSAL_SPIN_UNLOCK(&p_hwfn->qm_info.qm_info_lock);
+
+ if (qm_info->pq_overflow) {
+ rc = ECORE_INVAL;
+ goto unlock;
+ }
+
/* stop PF's qm queues */
b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
qm_info->start_pq, qm_info->num_pqs);
@@ -2291,9 +3040,6 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
goto unlock;
}
- /* clear the QM_PF runtime phase leftovers from previous init */
- ecore_init_clear_rt_data(p_hwfn);
-
/* prepare QM portion of runtime array */
ecore_qm_init_pf(p_hwfn, p_ptt, false);
@@ -2310,39 +3056,66 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
unlock:
OSAL_SPIN_UNLOCK(&qm_lock);
+ if (b_mfw_unlock)
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+
return rc;
}
+enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ return __ecore_qm_reconf(p_hwfn, p_ptt, true);
+}
+
+enum _ecore_status_t ecore_qm_reconf_intr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ return __ecore_qm_reconf(p_hwfn, p_ptt, false);
+}
+
static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
{
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u16 max_pqs_num, max_vports_num;
enum _ecore_status_t rc;
- rc = ecore_init_qm_sanity(p_hwfn);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_info->qm_info_lock,
+ "qm_info_lock");
+ if (rc)
+ goto alloc_err;
+#endif
+ OSAL_SPIN_LOCK_INIT(&qm_info->qm_info_lock);
+
+ rc = ecore_init_qm_features(p_hwfn);
if (rc != ECORE_SUCCESS)
goto alloc_err;
+ max_pqs_num = (u16)RESC_NUM(p_hwfn, ECORE_PQ);
+ max_vports_num = (u16)RESC_NUM(p_hwfn, ECORE_VPORT);
+
qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(struct init_qm_pq_params) *
- ecore_init_qm_get_num_pqs(p_hwfn));
+ max_pqs_num);
if (!qm_info->qm_pq_params)
goto alloc_err;
qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- sizeof(struct init_qm_vport_params) *
- ecore_init_qm_get_num_vports(p_hwfn));
+ sizeof(struct init_qm_vport_params) *
+ max_vports_num);
if (!qm_info->qm_vport_params)
goto alloc_err;
qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- sizeof(struct init_qm_port_params) *
- p_hwfn->p_dev->num_ports_in_engine);
+ sizeof(struct init_qm_port_params) *
+ p_hwfn->p_dev->num_ports_in_engine);
if (!qm_info->qm_port_params)
goto alloc_err;
qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(struct ecore_wfq_data) *
- ecore_init_qm_get_num_vports(p_hwfn));
+ max_vports_num);
if (!qm_info->wfq_data)
goto alloc_err;
@@ -2355,25 +3128,256 @@ static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
}
/******************** End QM initialization ***************/
-enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
+static enum _ecore_status_t ecore_lag_create_slave(struct ecore_hwfn *p_hwfn,
+ u8 master_pfid)
+{
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ u8 slave_ppfid = 1; /* TODO: Need some sort of resource management function
+ * to return a free entry
+ */
+ enum _ecore_status_t rc;
+
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ rc = ecore_llh_map_ppfid_to_pfid(p_hwfn, p_ptt, slave_ppfid,
+ master_pfid);
+ ecore_ptt_release(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Protocol filter for RoCE v1 */
+ rc = ecore_llh_add_protocol_filter(p_hwfn->p_dev, slave_ppfid,
+ ECORE_LLH_FILTER_ETHERTYPE, 0x8915,
+ ECORE_LLH_DONT_CARE);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Protocol filter for RoCE v2 */
+ return ecore_llh_add_protocol_filter(p_hwfn->p_dev, slave_ppfid,
+ ECORE_LLH_FILTER_UDP_DEST_PORT,
+ ECORE_LLH_DONT_CARE, 4791);
+}
+
+static void ecore_lag_destroy_slave(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ u8 slave_ppfid = 1; /* Need some sort of resource management function
+ * to return a free entry
+ */
+
+ /* Protocol filter for RoCE v1 */
+ ecore_llh_remove_protocol_filter(p_hwfn->p_dev, slave_ppfid,
+ ECORE_LLH_FILTER_ETHERTYPE,
+ 0x8915, ECORE_LLH_DONT_CARE);
+
+ /* Protocol filter for RoCE v2 */
+ ecore_llh_remove_protocol_filter(p_hwfn->p_dev, slave_ppfid,
+ ECORE_LLH_FILTER_UDP_DEST_PORT,
+ ECORE_LLH_DONT_CARE, 4791);
+
+ if (p_ptt) {
+ ecore_llh_map_ppfid_to_pfid(p_hwfn, p_ptt, slave_ppfid,
+ p_hwfn->rel_pf_id);
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+}
+
+/* Map ports:
+ * port 0/2 - 0/2
+ * port 1/3 - 1/3
+ * If port 0/2 is down, map both to port 1/3, if port 1/3 is down, map both to
+ * port 0/2, and if both are down, it doesn't really matter.
+ */
+static void ecore_lag_map_ports(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_lag_info *lag_info = &p_hwfn->lag_info;
+
+ /* for now support only 2 ports in the bond */
+ if (lag_info->master_pf == 0) {
+ lag_info->first_port = (lag_info->active_ports & (1 << 0)) ? 0 : 1;
+ lag_info->second_port = (lag_info->active_ports & (1 << 1)) ? 1 : 0;
+ } else if (lag_info->master_pf == 2) {
+ lag_info->first_port = (lag_info->active_ports & (1 << 2)) ? 2 : 3;
+ lag_info->second_port = (lag_info->active_ports & (1 << 3)) ? 3 : 2;
+ }
+ lag_info->port_num = LAG_MAX_PORT_NUM;
+}
+
+/* The following function strongly assumes two ports only */
+static enum _ecore_status_t ecore_lag_create_master(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ enum _ecore_status_t rc;
+
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ ecore_lag_map_ports(p_hwfn);
+ rc = ecore_qm_reconf_intr(p_hwfn, p_ptt);
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+/* The following function strongly assumes two ports only */
+static enum _ecore_status_t ecore_lag_destroy_master(struct ecore_hwfn *p_hwfn)
{
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ enum _ecore_status_t rc;
+
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ p_hwfn->qm_info.offload_group_count = 0;
+
+ rc = ecore_qm_reconf_intr(p_hwfn, p_ptt);
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_lag_create(struct ecore_dev *dev,
+ enum ecore_lag_type lag_type,
+ void (*link_change_cb)(void *cxt),
+ void *cxt,
+ u8 active_ports)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(dev);
+ u8 master_pfid = p_hwfn->abs_pf_id < 2 ? 0 : 2;
+
+ if (!ecore_lag_support(p_hwfn)) {
+ DP_NOTICE(p_hwfn, false, "RDMA bonding will not be configured - only supported on AH devices on default mode\n");
+ return ECORE_INVAL;
+ }
+
+ /* TODO: Check Supported MFW */
+ p_hwfn->lag_info.lag_type = lag_type;
+ p_hwfn->lag_info.link_change_cb = link_change_cb;
+ p_hwfn->lag_info.cxt = cxt;
+ p_hwfn->lag_info.active_ports = active_ports;
+ p_hwfn->lag_info.is_master = p_hwfn->abs_pf_id == master_pfid;
+ p_hwfn->lag_info.master_pf = master_pfid;
+
+ /* Configure RX for LAG */
+ if (p_hwfn->lag_info.is_master)
+ return ecore_lag_create_master(p_hwfn);
+
+ return ecore_lag_create_slave(p_hwfn, master_pfid);
+}
+
+/* Modify the link state of a given port */
+enum _ecore_status_t ecore_lag_modify(struct ecore_dev *dev,
+ u8 port_id,
+ u8 link_active)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ struct ecore_lag_info *lag_info = &p_hwfn->lag_info;
enum _ecore_status_t rc = ECORE_SUCCESS;
- enum dbg_status debug_status = DBG_STATUS_OK;
- int i;
+ unsigned long active_ports;
+ u8 curr_active;
- if (IS_VF(p_dev)) {
- for_each_hwfn(p_dev, i) {
- rc = ecore_l2_alloc(&p_dev->hwfns[i]);
- if (rc != ECORE_SUCCESS)
- return rc;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Active ports changed before %x link active %x port_id=%d\n",
+ lag_info->active_ports, link_active, port_id);
+
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ active_ports = lag_info->active_ports;
+ curr_active = !!OSAL_TEST_BIT(port_id, &lag_info->active_ports);
+ if (curr_active != link_active) {
+ OSAL_TEST_AND_FLIP_BIT(port_id, &lag_info->active_ports);
+
+ /* Reconfigure QM according to active_ports */
+ if (lag_info->is_master) {
+ ecore_lag_map_ports(p_hwfn);
+ rc = ecore_qm_reconf_intr(p_hwfn, p_ptt);
}
- return rc;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Active ports changed before %lx after %x\n",
+ active_ports, lag_info->active_ports);
+ } else {
+ /* No change in active ports, triggered from port event */
+ /* call dcbx related code */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Nothing changed\n");
}
- p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
- sizeof(*p_dev->fw_data));
- if (!p_dev->fw_data)
- return ECORE_NOMEM;
+ ecore_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+enum _ecore_status_t ecore_lag_destroy(struct ecore_dev *dev)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(dev);
+ struct ecore_lag_info *lag_info = &p_hwfn->lag_info;
+
+ lag_info->lag_type = ECORE_LAG_TYPE_NONE;
+ lag_info->link_change_cb = OSAL_NULL;
+ lag_info->cxt = OSAL_NULL;
+
+ if (!lag_info->is_master) {
+ ecore_lag_destroy_slave(p_hwfn);
+ return ECORE_SUCCESS;
+ }
+
+ return ecore_lag_destroy_master(p_hwfn);
+}
+
+bool ecore_lag_is_active(struct ecore_hwfn *p_hwfn)
+{
+ return !(p_hwfn->lag_info.lag_type == ECORE_LAG_TYPE_NONE);
+}
+
+static enum _ecore_status_t ecore_cxt_calculate_tasks(struct ecore_hwfn *p_hwfn,
+ u32 *rdma_tasks,
+ u32 *eth_tasks,
+ u32 excess_tasks)
+{
+ u32 eth_tasks_tmp, rdma_tasks_tmp, reduced_tasks = 0;
+
+/* @DPDK */
+#define ECORE_ETH_MAX_TIDS 0 /* !CONFIG_ECORE_FS */
+ /* ETH tasks are used for GFS stats counters in AHP */
+ *eth_tasks = ECORE_IS_E5(p_hwfn->p_dev) ? ECORE_ETH_MAX_TIDS : 0;
+
+ /* No tasks requested. If there are no excess tasks, return.
+ * If there are excess tasks and ILT lines needs to be reduced,
+ * it can't be done by reducing number of tasks, return failure.
+ */
+ /* DPDK */
+ if (*eth_tasks == 0)
+ return excess_tasks ? ECORE_INVAL : ECORE_SUCCESS;
+
+ /* Can not reduce enough tasks */
+ if (excess_tasks > *eth_tasks)
+ return ECORE_INVAL;
+
+ eth_tasks_tmp = *eth_tasks;
+
+ while (reduced_tasks < excess_tasks) {
+ eth_tasks_tmp >>= 1;
+ /* DPDK */
+ reduced_tasks = (*eth_tasks) - (eth_tasks_tmp);
+ }
+
+ *eth_tasks = eth_tasks_tmp;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
+{
+ u32 rdma_tasks = 0, eth_tasks = 0, excess_tasks = 0, line_count;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int i;
+
+ if (IS_PF(p_dev)) {
+ p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+ sizeof(*p_dev->fw_data));
+ if (!p_dev->fw_data)
+ return ECORE_NOMEM;
+ }
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
@@ -2384,6 +3388,26 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
if (rc)
goto alloc_err;
+ rc = ecore_l2_alloc(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ goto alloc_err;
+
+ if (IS_VF(p_dev)) {
+ /* Allocating the entire spq struct although only the
+ * async_comp callbacks are used by VF.
+ */
+ rc = ecore_spq_alloc(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ continue;
+ }
+
+ /* ecore_iov_alloc must be called before ecore_cxt_set_pf_params()*/
+ rc = ecore_iov_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
/* First allocate the context manager structure */
rc = ecore_cxt_mngr_alloc(p_hwfn);
if (rc)
@@ -2392,7 +3416,8 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
/* Set the HW cid/tid numbers (in the context manager)
* Must be done prior to any further computations.
*/
- rc = ecore_cxt_set_pf_params(p_hwfn);
+ ecore_cxt_calculate_tasks(p_hwfn, &rdma_tasks, ð_tasks, 0);
+ rc = ecore_cxt_set_pf_params(p_hwfn, rdma_tasks, eth_tasks);
if (rc)
goto alloc_err;
@@ -2404,9 +3429,70 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
ecore_init_qm_info(p_hwfn);
/* Compute the ILT client partition */
- rc = ecore_cxt_cfg_ilt_compute(p_hwfn);
- if (rc)
- goto alloc_err;
+ rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count);
+ if (rc) {
+ u32 ilt_page_size_kb =
+ ecore_cxt_get_ilt_page_size(p_hwfn) >> 10;
+
+ DP_NOTICE(p_hwfn, false,
+ "Requested %u lines but only %u are available. line size is %u KB; 0x%x rdma tasks 0x%x eth tasks; VF RDMA %s\n",
+ line_count, RESC_NUM(p_hwfn, ECORE_ILT),
+ ilt_page_size_kb, rdma_tasks, eth_tasks,
+ ECORE_IS_VF_RDMA(p_hwfn) ?
+ "enabled" : "disabled");
+
+ /* Calculate the number of tasks need to be reduced
+ * in order to have a successful ILT computation.
+ */
+ excess_tasks = ecore_cxt_cfg_ilt_compute_excess(p_hwfn, line_count);
+ if (!excess_tasks)
+ goto alloc_err;
+
+ rc = ecore_cxt_calculate_tasks(p_hwfn, &rdma_tasks,
+ ð_tasks,
+ excess_tasks);
+ if (!rc) {
+ DP_NOTICE(p_hwfn, false,
+ "Re-computing after reducing tasks (0x%x rdma tasks, 0x%x eth tasks)\n",
+ rdma_tasks, eth_tasks);
+ rc = ecore_cxt_set_pf_params(p_hwfn, rdma_tasks,
+ eth_tasks);
+ if (rc)
+ goto alloc_err;
+
+ rc = ecore_cxt_cfg_ilt_compute(p_hwfn,
+ &line_count);
+ if (rc)
+ DP_NOTICE(p_hwfn, false,
+ "Requested %u lines but only %u are available.\n",
+ line_count,
+ RESC_NUM(p_hwfn, ECORE_ILT));
+ }
+
+ if (rc && ECORE_IS_VF_RDMA(p_hwfn)) {
+ DP_NOTICE(p_hwfn, false,
+ "Re-computing after disabling VF RDMA\n");
+ p_hwfn->pf_iov_info->rdma_enable = false;
+
+ /* After disabling VF RDMA, we must call
+ * ecore_cxt_set_pf_params(), in order to
+ * recalculate the VF cids/tids amount.
+ */
+ rc = ecore_cxt_set_pf_params(p_hwfn, rdma_tasks,
+ eth_tasks);
+ if (rc)
+ goto alloc_err;
+
+ rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count);
+ }
+
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "ILT compute failed - requested %u lines but only %u are available. Need to increase ILT line size (current size is %u KB)\n",
+ line_count, RESC_NUM(p_hwfn, ECORE_ILT), ilt_page_size_kb);
+ goto alloc_err;
+ }
+ }
/* CID map / ILT shadow table / T2
* The talbes sizes are determined by the computations above
@@ -2428,62 +3514,12 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
if (rc)
goto alloc_err;
- rc = ecore_iov_alloc(p_hwfn);
- if (rc)
- goto alloc_err;
-
/* EQ */
n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain);
- if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
- /* Calculate the EQ size
- * ---------------------
- * Each ICID may generate up to one event at a time i.e.
- * the event must be handled/cleared before a new one
- * can be generated. We calculate the sum of events per
- * protocol and create an EQ deep enough to handle the
- * worst case:
- * - Core - according to SPQ.
- * - RoCE - per QP there are a couple of ICIDs, one
- * responder and one requester, each can
- * generate an EQE => n_eqes_qp = 2 * n_qp.
- * Each CQ can generate an EQE. There are 2 CQs
- * per QP => n_eqes_cq = 2 * n_qp.
- * Hence the RoCE total is 4 * n_qp or
- * 2 * num_cons.
- * - ENet - There can be up to two events per VF. One
- * for VF-PF channel and another for VF FLR
- * initial cleanup. The number of VFs is
- * bounded by MAX_NUM_VFS_BB, and is much
- * smaller than RoCE's so we avoid exact
- * calculation.
- */
- if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
- num_cons =
- ecore_cxt_get_proto_cid_count(
- p_hwfn,
- PROTOCOLID_ROCE,
- OSAL_NULL);
- num_cons *= 2;
- } else {
- num_cons = ecore_cxt_get_proto_cid_count(
- p_hwfn,
- PROTOCOLID_IWARP,
- OSAL_NULL);
- }
- n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
- } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
- num_cons =
- ecore_cxt_get_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI,
- OSAL_NULL);
- n_eqes += 2 * num_cons;
- }
-
- if (n_eqes > 0xFFFF) {
- DP_ERR(p_hwfn, "Cannot allocate 0x%x EQ elements."
- "The maximum of a u16 chain is 0x%x\n",
- n_eqes, 0xFFFF);
- goto alloc_no_mem;
+ if (n_eqes > ECORE_EQ_MAX_ELEMENTS) {
+ DP_INFO(p_hwfn, "EQs maxing out at 0x%x elements\n",
+ ECORE_EQ_MAX_ELEMENTS);
+ n_eqes = ECORE_EQ_MAX_ELEMENTS;
}
rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes);
@@ -2494,14 +3530,11 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
if (rc)
goto alloc_err;
- rc = ecore_l2_alloc(p_hwfn);
- if (rc != ECORE_SUCCESS)
- goto alloc_err;
-
/* DMA info initialization */
rc = ecore_dmae_info_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, false, "Failed to allocate memory for dmae_info structure\n");
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate memory for dmae_info structure\n");
goto alloc_err;
}
@@ -2513,36 +3546,28 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
goto alloc_err;
}
- debug_status = OSAL_DBG_ALLOC_USER_DATA(p_hwfn,
- &p_hwfn->dbg_user_info);
- if (debug_status) {
+ rc = OSAL_DBG_ALLOC_USER_DATA(p_hwfn, &p_hwfn->dbg_user_info);
+ if (rc) {
DP_NOTICE(p_hwfn, false,
"Failed to allocate dbg user info structure\n");
- rc = (enum _ecore_status_t)debug_status;
goto alloc_err;
}
+ } /* hwfn loop */
- debug_status = OSAL_DBG_ALLOC_USER_DATA(p_hwfn,
- &p_hwfn->dbg_user_info);
- if (debug_status) {
- DP_NOTICE(p_hwfn, false,
- "Failed to allocate dbg user info structure\n");
- rc = (enum _ecore_status_t)debug_status;
+ if (IS_PF(p_dev)) {
+ rc = ecore_llh_alloc(p_dev);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "Failed to allocate memory for the llh_info structure\n");
goto alloc_err;
}
- } /* hwfn loop */
-
- rc = ecore_llh_alloc(p_dev);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_dev, true,
- "Failed to allocate memory for the llh_info structure\n");
- goto alloc_err;
}
p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->reset_stats));
if (!p_dev->reset_stats) {
- DP_NOTICE(p_dev, false, "Failed to allocate reset statistics\n");
+ DP_NOTICE(p_dev, false,
+ "Failed to allocate reset statistics\n");
goto alloc_no_mem;
}
@@ -2560,8 +3585,10 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
int i;
if (IS_VF(p_dev)) {
- for_each_hwfn(p_dev, i)
+ for_each_hwfn(p_dev, i) {
ecore_l2_setup(&p_dev->hwfns[i]);
+ }
+
return;
}
@@ -2592,36 +3619,37 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 id, bool is_vf)
{
- u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
+ u32 count = FINAL_CLEANUP_POLL_CNT, poll_time = FINAL_CLEANUP_POLL_TIME;
+ u32 command = 0, addr;
enum _ecore_status_t rc = ECORE_TIMEOUT;
#ifndef ASIC_ONLY
if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) ||
- CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ (ECORE_IS_E4(p_hwfn->p_dev) && CHIP_REV_IS_SLOW(p_hwfn->p_dev))) {
DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n");
return ECORE_SUCCESS;
}
+
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ poll_time *= 10;
#endif
addr = GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
+ USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
if (is_vf)
id += 0x10;
- command |= X_FINAL_CLEANUP_AGG_INT <<
- SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
- command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
- command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
- command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
-
-/* Make sure notification is not set before initiating final cleanup */
+ SET_FIELD(command, SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX,
+ X_FINAL_CLEANUP_AGG_INT);
+ SET_FIELD(command, SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE, 1);
+ SET_FIELD(command, SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT, id);
+ SET_FIELD(command, SDM_OP_GEN_COMP_TYPE, SDM_COMP_TYPE_AGG_INT);
+ /* Make sure notification is not set before initiating final cleanup */
if (REG_RD(p_hwfn, addr)) {
DP_NOTICE(p_hwfn, false,
- "Unexpected; Found final cleanup notification");
- DP_NOTICE(p_hwfn, false,
- " before initiating final cleanup\n");
+ "Unexpected; Found final cleanup notification before initiating final cleanup\n");
REG_WR(p_hwfn, addr, 0);
}
@@ -2633,13 +3661,12 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
/* Poll until completion */
while (!REG_RD(p_hwfn, addr) && count--)
- OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME);
+ OSAL_MSLEEP(poll_time);
if (REG_RD(p_hwfn, addr))
rc = ECORE_SUCCESS;
else
- DP_NOTICE(p_hwfn, true,
- "Failed to receive FW final cleanup notification\n");
+ DP_NOTICE(p_hwfn, true, "Failed to receive FW final cleanup notification\n");
/* Cleanup afterwards */
REG_WR(p_hwfn, addr, 0);
@@ -2655,13 +3682,15 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
hw_mode |= 1 << MODE_BB;
} else if (ECORE_IS_AH(p_hwfn->p_dev)) {
hw_mode |= 1 << MODE_K2;
+ } else if (ECORE_IS_E5(p_hwfn->p_dev)) {
+ hw_mode |= 1 << MODE_E5;
} else {
DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n",
p_hwfn->p_dev->type);
return ECORE_INVAL;
}
- /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */
+ /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE*/
switch (p_hwfn->p_dev->num_ports_in_engine) {
case 1:
hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
@@ -2673,13 +3702,12 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
break;
default:
- DP_NOTICE(p_hwfn, true,
- "num_ports_in_engine = %d not supported\n",
+ DP_NOTICE(p_hwfn, true, "num_ports_in_engine = %d not supported\n",
p_hwfn->p_dev->num_ports_in_engine);
return ECORE_INVAL;
}
- if (OSAL_GET_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits))
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits))
hw_mode |= 1 << MODE_MF_SD;
else
hw_mode |= 1 << MODE_MF_SI;
@@ -2711,8 +3739,6 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
}
#ifndef ASIC_ONLY
-#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0
-
/* MFW-replacement initializations for emulation */
static enum _ecore_status_t ecore_hw_init_chip(struct ecore_dev *p_dev,
struct ecore_ptt *p_ptt)
@@ -2728,17 +3754,23 @@ static enum _ecore_status_t ecore_hw_init_chip(struct ecore_dev *p_dev,
return ECORE_INVAL;
}
- pl_hv = ECORE_IS_BB(p_dev) ? 0x1 : 0x401;
+ if (ECORE_IS_BB(p_dev))
+ pl_hv = 0x1;
+ else if (ECORE_IS_AH(p_dev))
+ pl_hv = 0x401;
+ else /* E5 */
+ pl_hv = 0x4601;
ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
- if (ECORE_IS_AH(p_dev))
- ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5, 0x3ffffff);
+ if (ECORE_IS_AH(p_dev) || ECORE_IS_E5(p_dev))
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5,
+ 0x3ffffff);
/* Initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
if (ECORE_IS_BB(p_dev))
ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4);
- if (ECORE_IS_AH(p_dev)) {
+ if (ECORE_IS_AH(p_dev) || ECORE_IS_E5(p_dev)) {
/* 2 for 4-port, 1 for 2-port, 0 for 1-port */
ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
p_dev->num_ports_in_engine >> 1);
@@ -2778,6 +3810,22 @@ static enum _ecore_status_t ecore_hw_init_chip(struct ecore_dev *p_dev,
*/
ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_PRTY_STS_WR_H_0, 0x8);
+ if (ECORE_IS_E5(p_dev)) {
+ /* Clock enable for CSCLK and PCE_CLK */
+ ecore_wr(p_hwfn, p_ptt,
+ PGL2PEM_REG_PEM_NATIVE_E5 + PEM_REG_PEM_CLK_EN_E5,
+ 0x0);
+
+ /* Allow the traffic to be sent out the PCIe link */
+ ecore_wr(p_hwfn, p_ptt,
+ PGL2PEM_REG_PEM_NATIVE_E5 + PEM_REG_PEM_DIS_PORT_E5,
+ 0x1);
+
+ /* Enable zone_b access of VFs to SDM */
+ ecore_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_DISABLE_ZONE_B_ACCESS_OF_VF_E5, 0x0);
+ }
+
/* Configure PSWRQ2_REG_WR_MBS0 according to the MaxPayloadSize field in
* the PCI configuration space. The value is common for all PFs, so it
* is okay to do it according to the first loading PF.
@@ -2796,6 +3844,18 @@ static enum _ecore_status_t ecore_hw_init_chip(struct ecore_dev *p_dev,
/* Configure the PGLUE_B to discard mode */
ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_DISCARD_NBLOCK, 0x3f);
+ /* Workaround for a HW bug in E5 to allow VFs of PFs > 0 */
+ if (ECORE_IS_E5(p_dev)) {
+ u32 addr, timer_ctl;
+
+ addr = PGLCS_REG_PGL_CS + PCIEIP_REG_PCIEEP_TIMER_CTL_E5;
+ timer_ctl = ecore_rd(p_hwfn, p_ptt, addr);
+ timer_ctl = (timer_ctl &
+ ~PCIEIP_REG_PCIEEP_TIMER_CTL_MFUNCN_E5) |
+ (0xf & PCIEIP_REG_PCIEEP_TIMER_CTL_MFUNCN_E5);
+ ecore_wr(p_hwfn, p_ptt, addr, timer_ctl);
+ }
+
return ECORE_SUCCESS;
}
#endif
@@ -2827,7 +3887,8 @@ static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
continue;
ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
- p_block->function_id, 0, 0);
+ p_block->function_id,
+ 0, 0);
STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
sb_entry);
}
@@ -2898,7 +3959,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
u8 vf_id, max_num_vfs;
u16 num_pfs, pf_id;
u32 concrete_fid;
- enum _ecore_status_t rc = ECORE_SUCCESS;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
ecore_init_cau_rt_data(p_dev);
@@ -2975,6 +4036,26 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
/* pretend to original PF */
ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+#ifndef ASIC_ONLY
+ /* Clear the FIRST_VF register for all PFs */
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) &&
+ ECORE_IS_E5(p_hwfn->p_dev) &&
+ IS_PF_SRIOV(p_hwfn)) {
+ u8 pf_id;
+
+ for (pf_id = 0; pf_id < MAX_NUM_PFS_E5; pf_id++) {
+ /* pretend to the relevant PF */
+ ecore_fid_pretend(p_hwfn, p_ptt,
+ FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, pf_id));
+ ecore_wr(p_hwfn, p_ptt, PGLCS_REG_FIRST_VF_K2_E5, 0);
+ }
+
+ /* pretend to the original PF */
+ ecore_fid_pretend(p_hwfn, p_ptt,
+ FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, p_hwfn->rel_pf_id));
+ }
+#endif
+
return rc;
}
@@ -2984,9 +4065,12 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
#define PMEG_IF_BYTE_COUNT 8
-static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 addr, u64 data, u8 reg_type, u8 port)
+static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr,
+ u64 data,
+ u8 reg_type,
+ u8 port)
{
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
@@ -2998,7 +4082,8 @@ static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB,
(ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) &
- 0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT));
+ 0xffff00fe) |
+ (8 << PMEG_IF_BYTE_COUNT));
ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB,
(reg_type << 25) | (addr << 8) | port);
ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff);
@@ -3023,36 +4108,34 @@ static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn,
{
u8 loopback = 0, port = p_hwfn->port_id * 2;
- /* XLPORT MAC MODE *//* 0 Quad, 4 Single... */
- ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
- port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG,
+ (0x4 << 4) | 0x4, 1, port); /* XLPORT MAC MODE */ /* 0 Quad, 4 Single... */
ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port);
- /* XLMAC: SOFT RESET */
- ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x40, 0, port);
- /* XLMAC: Port Speed >= 10Gbps */
- ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 0x40, 0, port);
- /* XLMAC: Max Size */
- ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 0x3fff, 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL,
+ 0x40, 0, port); /*XLMAC: SOFT RESET */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE,
+ 0x40, 0, port); /*XLMAC: Port Speed >= 10Gbps */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE,
+ 0x3fff, 0, port); /* XLMAC: Max Size */
ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL,
0x01000000800ULL | (0xa << 12) | ((u64)1 << 38),
0, port);
- ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 0x7c000, 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL,
+ 0x7c000, 0, port);
ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL,
0x30ffffc000ULL, 0, port);
- ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 0,
- port); /* XLMAC: TX_EN, RX_EN */
- /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */
- ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL,
- 0x1003 | (loopback << 2), 0, port);
- /* Enabled Parallel PFC interface */
- ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1, 0, port);
-
- /* XLPORT port enable */
- ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2),
+ 0, port); /* XLMAC: TX_EN, RX_EN */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2),
+ 0, port); /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG,
+ 1, 0, port); /* Enabled Parallel PFC interface */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG,
+ 0xf, 1, port); /* XLPORT port enable */
}
static void ecore_emul_link_init_ah(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+ struct ecore_ptt *p_ptt)
{
u32 mac_base, mac_config_val = 0xa853;
u8 port = p_hwfn->port_id;
@@ -3089,6 +4172,187 @@ static void ecore_emul_link_init_ah(struct ecore_hwfn *p_hwfn,
mac_config_val);
}
+static struct {
+ u32 offset;
+ u32 value;
+} ecore_e5_emul_mac_init[] = {
+ {RCE_4CH_REG_MAC_RS_TX_0_MAC_TX_MAX_PKT_LEN_E5, 0x3fff},
+ {RCE_4CH_REG_MAC_RS_TX_1_MAC_TX_MAX_PKT_LEN_E5, 0x3fff},
+ {RCE_4CH_REG_MAC_RS_TX_2_MAC_TX_MAX_PKT_LEN_E5, 0x3fff},
+ {RCE_4CH_REG_MAC_RS_TX_3_MAC_TX_MAX_PKT_LEN_E5, 0x3fff},
+ {RCE_4CH_REG_MAC_RS_RX_0_RX_MAX_PKT_LEN_E5, 0x3fff},
+ {RCE_4CH_REG_MAC_RS_RX_1_RX_MAX_PKT_LEN_E5, 0x3fff},
+ {RCE_4CH_REG_MAC_RS_RX_2_RX_MAX_PKT_LEN_E5, 0x3fff},
+ {RCE_4CH_REG_MAC_RS_RX_3_RX_MAX_PKT_LEN_E5, 0x3fff},
+ {RCE_4CH_REG_APP_FIFO_0_TX_SOF_THRESH_E5, 0x3},
+ {RCE_4CH_REG_MAC_RS_TX_0_TX_MAC_CFG_E5, 0x68094e5},
+ {RCE_4CH_REG_MAC_RS_TX_0_STN_ADD_47_32_E5, 0xa868},
+ {RCE_4CH_REG_MAC_RS_TX_0_STN_ADD_31_0_E5, 0xaf88776f},
+ {RCE_4CH_REG_MAC_RS_TX_0_TX_MAC_FC_CONFIG0_E5, 0x7f97},
+ {RCE_4CH_REG_MAC_RS_TX_0_TX_MAC_FC_CONFIG2_E5, 0x11},
+ {RCE_4CH_REG_MAC_RS_TX_0_TX_START_PRE_SMD_E5, 0xb37f4cd5},
+ {RCE_4CH_REG_MAC_RS_TX_0_RS_CTRL_E5, 0x8},
+ {RCE_4CH_REG_MAC_RS_TX_0_TX_IS_FIFO_THRESH_E5, 0x83},
+ {RCE_4CH_REG_MAC_RS_RX_0_RX_MAC_CFG_E5, 0x15d5},
+ {RCE_4CH_REG_MAC_RS_RX_0_RX_MAC_FC_CONFIG_E5, 0xff08},
+ {RCE_4CH_REG_MAC_RS_RX_0_RS_CTRL_E5, 0x4},
+ {RCE_4CH_REG_PCS_RX_0_RX_PCS_CFG_E5, 0x1808a03},
+ {RCE_4CH_REG_PCS_RX_0_RX_PCS_CFG2_E5, 0x2060c202},
+ {RCE_4CH_REG_PCS_RX_0_RX_PCS_ALIGN_CFG_E5, 0xa00},
+ {RCE_4CH_REG_PCS_TX_0_TX_PCS_CFG_E5, 0x8a03},
+ {RCE_4CH_REG_PCS_TX_0_TX_PCS_CFG2_E5, 0x5a0610},
+ {RCE_4CH_REG_PCS_TX_0_TX_PCS_CTRL_ALIGN_PERIOD_E5, 0xa00},
+ {RCE_4CH_REG_PCS_TX_0_TX_PCS_CTRL_ENCODER_E5, 0xc00000},
+ {RCE_4CH_REG_MAC_RS_RX_0_RX_EXPRESS_SMD0_3_E5, 0xffffffd5},
+ {RCE_4CH_REG_APP_FIFO_1_TX_SOF_THRESH_E5, 0x3},
+ {RCE_4CH_REG_MAC_RS_TX_1_TX_MAC_CFG_E5, 0x68494e5},
+ {RCE_4CH_REG_MAC_RS_TX_1_STN_ADD_47_32_E5, 0x44b2},
+ {RCE_4CH_REG_MAC_RS_TX_1_STN_ADD_31_0_E5, 0x20123eee},
+ {RCE_4CH_REG_MAC_RS_TX_1_TX_MAC_FC_CONFIG0_E5, 0x7f97},
+ {RCE_4CH_REG_MAC_RS_TX_1_TX_MAC_FC_CONFIG2_E5, 0x11},
+ {RCE_4CH_REG_MAC_RS_TX_1_TX_START_PRE_SMD_E5, 0xb37f4cd5},
+ {RCE_4CH_REG_MAC_RS_TX_1_RS_CTRL_E5, 0x8},
+ {RCE_4CH_REG_MAC_RS_TX_1_TX_IS_FIFO_THRESH_E5, 0x83},
+ {RCE_4CH_REG_MAC_RS_RX_1_RX_MAC_CFG_E5, 0x415d5},
+ {RCE_4CH_REG_MAC_RS_RX_1_RX_MAC_FC_CONFIG_E5, 0xff08},
+ {RCE_4CH_REG_MAC_RS_RX_1_RS_CTRL_E5, 0x4},
+ {RCE_4CH_REG_MAC_RS_RX_1_RX_START_PRE_SMD_VC0_E5, 0xffffffff},
+ {RCE_4CH_REG_MAC_RS_RX_1_RX_START_PRE_SMD_VC1_E5, 0xb37f4ce6},
+ {RCE_4CH_REG_MAC_RS_RX_1_RX_CONT_FRAG_SMD_VC0_E5, 0xffffffff},
+ {RCE_4CH_REG_MAC_RS_RX_1_RX_CONT_FRAG_SMD_VC1_E5, 0x2a9e5261},
+ {RCE_4CH_REG_MAC_RS_RX_1_RX_EXP_VER_SMD_VC0_3_E5, 0xffff07ff},
+ {RCE_4CH_REG_MAC_RS_RX_1_RX_EXP_RES_SMD_VC0_3_E5, 0xffff19ff},
+ {RCE_4CH_REG_PCS_RX_1_RX_PCS_CFG_E5, 0x1808a03},
+ {RCE_4CH_REG_PCS_RX_1_RX_PCS_CFG2_E5, 0x2060c202},
+ {RCE_4CH_REG_PCS_RX_1_RX_PCS_ALIGN_CFG_E5, 0xa00},
+ {RCE_4CH_REG_PCS_TX_1_TX_PCS_CFG_E5, 0x8a03},
+ {RCE_4CH_REG_PCS_TX_1_TX_PCS_CFG2_E5, 0x5a0610},
+ {RCE_4CH_REG_PCS_TX_1_TX_PCS_CTRL_ALIGN_PERIOD_E5, 0xa00},
+ {RCE_4CH_REG_PCS_TX_1_TX_PCS_CTRL_ENCODER_E5, 0xc00000},
+ {RCE_4CH_REG_MAC_RS_RX_1_RX_EXPRESS_SMD0_3_E5, 0xffffd5ff},
+ {RCE_4CH_REG_APP_FIFO_2_TX_SOF_THRESH_E5, 0x3},
+ {RCE_4CH_REG_MAC_RS_TX_2_TX_MAC_CFG_E5, 0x68894e5},
+ {RCE_4CH_REG_MAC_RS_TX_2_STN_ADD_47_32_E5, 0xf1c0},
+ {RCE_4CH_REG_MAC_RS_TX_2_STN_ADD_31_0_E5, 0xabc40c51},
+ {RCE_4CH_REG_MAC_RS_TX_2_TX_MAC_FC_CONFIG0_E5, 0x7f97},
+ {RCE_4CH_REG_MAC_RS_TX_2_TX_MAC_FC_CONFIG2_E5, 0x15},
+ {RCE_4CH_REG_MAC_RS_TX_2_TX_START_PRE_SMD_E5, 0xb37f4cd5},
+ {RCE_4CH_REG_MAC_RS_TX_2_RS_CTRL_E5, 0x8},
+ {RCE_4CH_REG_MAC_RS_TX_2_TX_IS_FIFO_THRESH_E5, 0x83},
+ {RCE_4CH_REG_MAC_RS_RX_2_RX_MAC_CFG_E5, 0x815d5},
+ {RCE_4CH_REG_MAC_RS_RX_2_RX_MAC_FC_CONFIG_E5, 0xff08},
+ {RCE_4CH_REG_MAC_RS_RX_2_RS_CTRL_E5, 0x4},
+ {RCE_4CH_REG_MAC_RS_RX_2_RX_START_PRE_SMD_VC0_E5, 0xffffffff},
+ {RCE_4CH_REG_MAC_RS_RX_2_RX_START_PRE_SMD_VC2_E5, 0xb37f4ce6},
+ {RCE_4CH_REG_MAC_RS_RX_2_RX_CONT_FRAG_SMD_VC0_E5, 0xffffffff},
+ {RCE_4CH_REG_MAC_RS_RX_2_RX_CONT_FRAG_SMD_VC2_E5, 0x2a9e5261},
+ {RCE_4CH_REG_MAC_RS_RX_2_RX_EXP_VER_SMD_VC0_3_E5, 0xff07ffff},
+ {RCE_4CH_REG_MAC_RS_RX_2_RX_EXP_RES_SMD_VC0_3_E5, 0xff19ffff},
+ {RCE_4CH_REG_PCS_RX_2_RX_PCS_CFG_E5, 0x1808a03},
+ {RCE_4CH_REG_PCS_RX_2_RX_PCS_CFG2_E5, 0x2060c202},
+ {RCE_4CH_REG_PCS_RX_2_RX_PCS_ALIGN_CFG_E5, 0xa00},
+ {RCE_4CH_REG_PCS_TX_2_TX_PCS_CFG_E5, 0x8a03},
+ {RCE_4CH_REG_PCS_TX_2_TX_PCS_CFG2_E5, 0x5a0610},
+ {RCE_4CH_REG_PCS_TX_2_TX_PCS_CTRL_ALIGN_PERIOD_E5, 0xa00},
+ {RCE_4CH_REG_PCS_TX_2_TX_PCS_CTRL_ENCODER_E5, 0xc00000},
+ {RCE_4CH_REG_MAC_RS_RX_2_RX_EXPRESS_SMD0_3_E5, 0xffd5ffff},
+ {RCE_4CH_REG_APP_FIFO_3_TX_SOF_THRESH_E5, 0x3},
+ {RCE_4CH_REG_MAC_RS_TX_3_TX_MAC_CFG_E5, 0x68c94e5},
+ {RCE_4CH_REG_MAC_RS_TX_3_STN_ADD_47_32_E5, 0xa235},
+ {RCE_4CH_REG_MAC_RS_TX_3_STN_ADD_31_0_E5, 0x23650a9d},
+ {RCE_4CH_REG_MAC_RS_TX_3_TX_MAC_FC_CONFIG0_E5, 0x7f97},
+ {RCE_4CH_REG_MAC_RS_TX_3_TX_MAC_FC_CONFIG2_E5, 0x1f},
+ {RCE_4CH_REG_MAC_RS_TX_3_TX_START_PRE_SMD_E5, 0xb37f4cd5},
+ {RCE_4CH_REG_MAC_RS_TX_3_RS_CTRL_E5, 0x8},
+ {RCE_4CH_REG_MAC_RS_TX_3_TX_IS_FIFO_THRESH_E5, 0x83},
+ {RCE_4CH_REG_MAC_RS_RX_3_RX_MAC_CFG_E5, 0xc15d5},
+ {RCE_4CH_REG_MAC_RS_RX_3_RX_MAC_FC_CONFIG_E5, 0xff08},
+ {RCE_4CH_REG_MAC_RS_RX_3_RS_CTRL_E5, 0x4},
+ {RCE_4CH_REG_MAC_RS_RX_3_RX_START_PRE_SMD_VC0_E5, 0xffffffff},
+ {RCE_4CH_REG_MAC_RS_RX_3_RX_START_PRE_SMD_VC3_E5, 0xb37f4ce6},
+ {RCE_4CH_REG_MAC_RS_RX_3_RX_CONT_FRAG_SMD_VC0_E5, 0xffffffff},
+ {RCE_4CH_REG_MAC_RS_RX_3_RX_CONT_FRAG_SMD_VC3_E5, 0x2a9e5261},
+ {RCE_4CH_REG_MAC_RS_RX_3_RX_EXP_VER_SMD_VC0_3_E5, 0x7ffffff},
+ {RCE_4CH_REG_MAC_RS_RX_3_RX_EXP_RES_SMD_VC0_3_E5, 0x19ffffff},
+ {RCE_4CH_REG_PCS_RX_3_RX_PCS_CFG_E5, 0x1808a03},
+ {RCE_4CH_REG_PCS_RX_3_RX_PCS_CFG2_E5, 0x2060c202},
+ {RCE_4CH_REG_PCS_RX_3_RX_PCS_ALIGN_CFG_E5, 0xa00},
+ {RCE_4CH_REG_PCS_TX_3_TX_PCS_CFG_E5, 0x8a03},
+ {RCE_4CH_REG_PCS_TX_3_TX_PCS_CFG2_E5, 0x5a0610},
+ {RCE_4CH_REG_PCS_TX_3_TX_PCS_CTRL_ALIGN_PERIOD_E5, 0xa00},
+ {RCE_4CH_REG_PCS_TX_3_TX_PCS_CTRL_ENCODER_E5, 0xc00000},
+ {RCE_4CH_REG_MAC_RS_RX_3_RX_EXPRESS_SMD0_3_E5, 0xd5ffffff},
+ {RCE_4CH_REG_MAC_TOP_TS_TO_PC_MAP_E5, 0x650},
+ {RCE_4CH_REG_PCS_TOP_PCS_SEL_MAP_E5, 0xb6d249},
+ {RCE_4CH_REG_PCS_TOP_PMA_TX_FIFO_CFG3_E5, 0x5294a},
+ {RCE_4CH_REG_PCS_TOP_RX_PMA_SEL_MAP1_E5, 0x18022},
+ {RCE_4CH_REG_PCS_TOP_TX_PMA_SEL_MAP1_E5, 0x443},
+ {RCE_4CH_REG_MAC_RS_TX_0_TX_MAC_CFG_E5, 0x68094e6},
+ {RCE_4CH_REG_MAC_RS_RX_0_RX_MAC_CFG_E5, 0x15d6},
+ {RCE_4CH_REG_PCS_RX_0_RX_PCS_CFG_E5, 0x1808a02},
+ {RCE_4CH_REG_PCS_TX_0_TX_PCS_CFG_E5, 0x8a02},
+ {RCE_4CH_REG_MAC_RS_TX_1_TX_MAC_CFG_E5, 0x68494e6},
+ {RCE_4CH_REG_MAC_RS_RX_1_RX_MAC_CFG_E5, 0x415d6},
+ {RCE_4CH_REG_PCS_RX_1_RX_PCS_CFG_E5, 0x1808a02},
+ {RCE_4CH_REG_PCS_TX_1_TX_PCS_CFG_E5, 0x8a02},
+ {RCE_4CH_REG_MAC_RS_TX_2_TX_MAC_CFG_E5, 0x68894e6},
+ {RCE_4CH_REG_MAC_RS_RX_2_RX_MAC_CFG_E5, 0x815d6},
+ {RCE_4CH_REG_PCS_RX_2_RX_PCS_CFG_E5, 0x1808a02},
+ {RCE_4CH_REG_PCS_TX_2_TX_PCS_CFG_E5, 0x8a02},
+ {RCE_4CH_REG_MAC_RS_TX_3_TX_MAC_CFG_E5, 0x68c94e6},
+ {RCE_4CH_REG_MAC_RS_RX_3_RX_MAC_CFG_E5, 0xc15d6},
+ {RCE_4CH_REG_PCS_RX_3_RX_PCS_CFG_E5, 0x1808a02},
+ {RCE_4CH_REG_PCS_TX_3_TX_PCS_CFG_E5, 0x8a02},
+ {RCE_4CH_REG_PCS_TOP_PCS_RESET_E5, 0xd},
+ {RCE_4CH_REG_PCS_TOP_PCS_RESET_E5, 0x5},
+ {RCE_4CH_REG_PCS_TOP_PCS_RESET_E5, 0x4},
+ {RCE_4CH_REG_PCS_TOP_PCS_RESET_E5, 0x0},
+ {RCE_4CH_REG_PCS_TOP_PMA_RX_FIFO_CFG3_E5, 0xf0},
+ {RCE_4CH_REG_PCS_TOP_PMA_TX_FIFO_CFG5_E5, 0xf0},
+ {RCE_4CH_REG_APP_FIFO_0_APP_FIFO_CFG_E5, 0x43c},
+ {RCE_4CH_REG_APP_FIFO_1_APP_FIFO_CFG_E5, 0x43c},
+ {RCE_4CH_REG_APP_FIFO_2_APP_FIFO_CFG_E5, 0x43c},
+ {RCE_4CH_REG_APP_FIFO_3_APP_FIFO_CFG_E5, 0x43c},
+};
+
+static void ecore_emul_link_init_e5(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 val;
+
+ /* Init the MAC if exists and disable external loopback.
+ * Without MAC - configure a loopback in the NIG.
+ */
+ if (p_hwfn->p_dev->b_is_emul_mac) {
+ u32 i, base = NWM_REG_MAC_E5;
+ osal_size_t size;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "E5 emulation: Initializing the MAC\n");
+
+ size = sizeof(ecore_e5_emul_mac_init) /
+ sizeof(ecore_e5_emul_mac_init[0]);
+ for (i = 0; i < size; i++)
+ ecore_wr(p_hwfn, p_ptt,
+ base + ecore_e5_emul_mac_init[i].offset,
+ ecore_e5_emul_mac_init[i].value);
+
+ /* MISCS_REG_ECO_RESERVED[31:30]: enable/disable ext loopback */
+ val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
+ val &= ~0xc0000000;
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED, val);
+ } else {
+ u8 port_mode = 0x2;
+
+ /* NIG_REG_ECO_RESERVED[1:0]: NIG loopback.
+ * '2' - ports "0 <-> 1" and "2 <-> 3".
+ */
+ val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ECO_RESERVED);
+ val = (val & ~0x3) | port_mode;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_ECO_RESERVED, val);
+ }
+}
+
static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
@@ -3100,8 +4364,10 @@ static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
if (ECORE_IS_BB(p_hwfn->p_dev))
ecore_emul_link_init_bb(p_hwfn, p_ptt);
- else
+ else if (ECORE_IS_AH(p_hwfn->p_dev))
ecore_emul_link_init_ah(p_hwfn, p_ptt);
+ else /* E5 */
+ ecore_emul_link_init_e5(p_hwfn, p_ptt);
return;
}
@@ -3110,15 +4376,15 @@ static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 port)
{
int port_offset = port ? 0x800 : 0;
- u32 xmac_rxctrl = 0;
+ u32 xmac_rxctrl = 0;
/* Reset of XMAC */
/* FIXME: move to common start */
ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
- MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
+ MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
OSAL_MSLEEP(1);
ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
- MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
+ MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1);
@@ -3152,28 +4418,45 @@ static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,
}
#endif
-static u32 ecore_hw_norm_region_conn(struct ecore_hwfn *p_hwfn)
+static u32 ecore_hw_get_norm_region_conn(struct ecore_hwfn *p_hwfn)
{
u32 norm_region_conn;
- /* The order of CIDs allocation is according to the order of
+ /* In E4, the order of CIDs allocation is according to the order of
* 'enum protocol_type'. Therefore, the number of CIDs for the normal
* region is calculated based on the CORE CIDs, in case of non-ETH
* personality, and otherwise - based on the ETH CIDs.
+ * In E5 there is an exception - the CORE CIDs are allocated first.
+ * Therefore, the calculation should consider every possible
+ * personality.
*/
norm_region_conn =
- ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
+ ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE,
+ ECORE_CXT_PF_CID) +
ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
OSAL_NULL) +
ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
OSAL_NULL);
+ if (ECORE_IS_E5(p_hwfn->p_dev)) {
+ norm_region_conn +=
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ISCSI,
+ OSAL_NULL) +
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_FCOE,
+ OSAL_NULL) +
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ROCE,
+ OSAL_NULL);
+ }
+
return norm_region_conn;
}
-static enum _ecore_status_t
+enum _ecore_status_t
ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
+ struct ecore_ptt *p_ptt,
+ struct ecore_dpi_info *dpi_info,
+ u32 pwm_region_size,
+ u32 n_cpus)
{
u32 dpi_bit_shift, dpi_count, dpi_page_size;
u32 min_dpis;
@@ -3202,20 +4485,16 @@ ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
*/
n_wids = OSAL_MAX_T(u32, ECORE_MIN_WIDS, n_cpus);
dpi_page_size = ECORE_WID_SIZE * OSAL_ROUNDUP_POW_OF_TWO(n_wids);
- dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) &
- ~(OSAL_PAGE_SIZE - 1);
+ dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) & ~(OSAL_PAGE_SIZE - 1);
dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096);
dpi_count = pwm_region_size / dpi_page_size;
min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis);
- /* Update hwfn */
- p_hwfn->dpi_size = dpi_page_size;
- p_hwfn->dpi_count = dpi_count;
-
- /* Update registers */
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
+ dpi_info->dpi_size = dpi_page_size;
+ dpi_info->dpi_count = dpi_count;
+ ecore_wr(p_hwfn, p_ptt, dpi_info->dpi_bit_shift_addr, dpi_bit_shift);
if (dpi_count < min_dpis)
return ECORE_NORESOURCES;
@@ -3223,15 +4502,11 @@ ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-enum ECORE_ROCE_EDPM_MODE {
- ECORE_ROCE_EDPM_MODE_ENABLE = 0,
- ECORE_ROCE_EDPM_MODE_FORCE_ON = 1,
- ECORE_ROCE_EDPM_MODE_DISABLE = 2,
-};
-
-bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn)
+bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn,
+ struct ecore_common_dpm_info *dpm_info)
{
- if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm)
+ if (p_hwfn->dcbx_no_edpm || dpm_info->db_bar_no_edpm ||
+ dpm_info->mfw_no_edpm)
return false;
return true;
@@ -3241,20 +4516,24 @@ static enum _ecore_status_t
ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ struct ecore_common_dpm_info *dpm_info;
u32 norm_region_conn, min_addr_reg1;
+ struct ecore_dpi_info *dpi_info;
u32 pwm_regsize, norm_regsize;
- u32 db_bar_size, n_cpus;
+ u32 db_bar_size, n_cpus = 1;
u32 roce_edpm_mode;
- u32 pf_dems_shift;
+ u32 dems_shift;
enum _ecore_status_t rc = ECORE_SUCCESS;
u8 cond;
db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
+
+ /* In CMT, doorbell bar should be split over both engines */
if (ECORE_IS_CMT(p_hwfn->p_dev))
db_bar_size /= 2;
/* Calculate doorbell regions
- * -----------------------------------
+ * --------------------------
* The doorbell BAR is made of two regions. The first is called normal
* region and the second is called PWM region. In the normal region
* each ICID has its own set of addresses so that writing to that
@@ -3267,7 +4546,7 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
* connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is
* in units of 4,096 bytes.
*/
- norm_region_conn = ecore_hw_norm_region_conn(p_hwfn);
+ norm_region_conn = ecore_hw_get_norm_region_conn(p_hwfn);
norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * norm_region_conn,
OSAL_PAGE_SIZE);
min_addr_reg1 = norm_regsize / 4096;
@@ -3288,15 +4567,32 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
return ECORE_NORESOURCES;
}
- /* Calculate number of DPIs */
- roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
+ dpi_info = &p_hwfn->dpi_info;
+ dpm_info = &p_hwfn->dpm_info;
+
+ dpi_info->dpi_bit_shift_addr = DORQ_REG_PF_DPI_BIT_SHIFT;
+ dpm_info->vf_cfg = false;
+
+ if (p_hwfn->roce_edpm_mode <= ECORE_ROCE_EDPM_MODE_DISABLE) {
+ roce_edpm_mode = p_hwfn->roce_edpm_mode;
+ } else {
+ DP_ERR(p_hwfn->p_dev,
+ "roce edpm mode was configured to an illegal value of %u. Resetting it to 0-Enable EDPM if BAR size is adequate\n",
+ p_hwfn->roce_edpm_mode);
+
+ /* Reset this field in order to save this check for a VF */
+ p_hwfn->roce_edpm_mode = 0;
+ roce_edpm_mode = 0;
+ }
+
if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) ||
((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) {
/* Either EDPM is mandatory, or we are attempting to allocate a
* WID per CPU.
*/
n_cpus = OSAL_NUM_CPUS();
- rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+ rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, dpi_info,
+ pwm_regsize, n_cpus);
}
cond = ((rc != ECORE_SUCCESS) &&
@@ -3308,38 +4604,55 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
* allocated a WID per CPU.
*/
n_cpus = 1;
- rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
-
- /* If we entered this flow due to DCBX then the DPM register is
- * already configured.
- */
+ rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, dpi_info,
+ pwm_regsize, n_cpus);
}
- DP_INFO(p_hwfn,
- "doorbell bar: normal_region_size=%d, pwm_region_size=%d",
- norm_regsize, pwm_regsize);
- DP_INFO(p_hwfn,
- " dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
- p_hwfn->dpi_size, p_hwfn->dpi_count,
- (!ecore_edpm_enabled(p_hwfn)) ?
- "disabled" : "enabled");
+ dpi_info->wid_count = (u16)n_cpus;
/* Check return codes from above calls */
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn,
- "Failed to allocate enough DPIs\n");
+ "Failed to allocate enough DPIs. Allocated %d but the current minimum is set to %d. You can reduce this minimum down to %d via user configuration min_dpis or by disabling EDPM via user configuration roce_edpm_mode\n",
+ dpi_info->dpi_count,
+ p_hwfn->pf_params.rdma_pf_params.min_dpis,
+ ECORE_MIN_DPIS);
+ DP_ERR(p_hwfn,
+ "PF doorbell bar: normal_region_size=0x%x, pwm_region_size=0x%x, dpi_size=0x%x, dpi_count=%d, roce_edpm=%s, page_size=%u\n",
+ norm_regsize, pwm_regsize, dpi_info->dpi_size,
+ dpi_info->dpi_count,
+ (!ecore_edpm_enabled(p_hwfn, dpm_info)) ?
+ "disabled" : "enabled", OSAL_PAGE_SIZE);
+
return ECORE_NORESOURCES;
}
+ DP_INFO(p_hwfn,
+ "PF doorbell bar: db_bar_size=0x%x normal_region_size=0x%x, pwm_region_size=0x%x, dpi_size=0x%x, dpi_count=%d, roce_edpm=%s, page_size=%u\n",
+ db_bar_size, norm_regsize, pwm_regsize, dpi_info->dpi_size,
+ dpi_info->dpi_count, (!ecore_edpm_enabled(p_hwfn, dpm_info)) ?
+ "disabled" : "enabled", OSAL_PAGE_SIZE);
+
/* Update hwfn */
- p_hwfn->dpi_start_offset = norm_regsize;
+ dpi_info->dpi_start_offset = norm_regsize; /* this is later used to
+ * calculate the doorbell
+ * address
+ */
- /* Update registers */
- /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
- pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4);
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
+ /* Update the DORQ registers.
+ * DEMS size is configured as log2 of DWORDs, hence the division by 4.
+ */
+
+ dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, dems_shift);
ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
+ if (ECORE_IS_E5(p_hwfn->p_dev)) {
+ dems_shift = OSAL_LOG2(ECORE_VF_DEMS_SIZE / 4);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_VF_ICID_BIT_SHIFT_NORM,
+ dems_shift);
+ }
+
return ECORE_SUCCESS;
}
@@ -3389,7 +4702,7 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
* The ppfid should be set in the vector, except in BB which has
* a bug in the LLH where the ppfid is actually engine based.
*/
- if (OSAL_GET_BIT(ECORE_MF_NEED_DEF_PF, &p_dev->mf_bits)) {
+ if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_dev->mf_bits)) {
u8 pf_id = p_hwfn->rel_pf_id;
if (!ECORE_IS_BB(p_dev))
@@ -3411,8 +4724,8 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
{
u8 rel_pf_id = p_hwfn->rel_pf_id;
u32 prs_reg;
- enum _ecore_status_t rc = ECORE_SUCCESS;
- u16 ctrl;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 ctrl = 0;
int pos;
if (p_hwfn->mcp_info) {
@@ -3444,13 +4757,12 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
/* Enable classification by MAC if needed */
if (hw_mode & (1 << MODE_MF_SI)) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "Configuring TAGMAC_CLS_TYPE\n");
- STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET,
- 1);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n");
+ STORE_RT_REG(p_hwfn,
+ NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
}
- /* Protocl Configuration - @@@TBD - should we set 0 otherwise? */
+ /* Protocl Configuration - @@@TBD - should we set 0 otherwise?*/
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
(p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
@@ -3480,30 +4792,50 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
/* Pure runtime initializations - directly to the HW */
ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
- /* PCI relaxed ordering causes a decrease in the performance on some
- * systems. Till a root cause is found, disable this attribute in the
- * PCI config space.
+ #if 0 /* @DPDK */
+ /* PCI relaxed ordering is generally beneficial for performance,
+ * but can hurt performance or lead to instability on some setups.
+ * If management FW is taking care of it go with that, otherwise
+ * disable to be on the safe side.
*/
- /* Not in use @DPDK
- * pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
- * if (!pos) {
- * DP_NOTICE(p_hwfn, true,
- * "Failed to find the PCIe Cap\n");
- * return ECORE_IO;
- * }
- * OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl);
- * ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
- * OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl);
- */
+ pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
+ if (!pos) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to find the PCI Express Capability structure in the PCI config space\n");
+ return ECORE_IO;
+ }
+
+ OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl);
+
+ if (p_params->pci_rlx_odr_mode == ECORE_ENABLE_RLX_ODR) {
+ ctrl |= PCI_EXP_DEVCTL_RELAX_EN;
+ OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev,
+ pos + PCI_EXP_DEVCTL, ctrl);
+ } else if (p_params->pci_rlx_odr_mode == ECORE_DISABLE_RLX_ODR) {
+ ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev,
+ pos + PCI_EXP_DEVCTL, ctrl);
+ } else if (ecore_mcp_rlx_odr_supported(p_hwfn)) {
+ DP_INFO(p_hwfn, "PCI relax ordering configured by MFW\n");
+ } else {
+ ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev,
+ pos + PCI_EXP_DEVCTL, ctrl);
+ }
+ #endif /* @DPDK */
rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS)
return rc;
+ rc = ecore_iov_init_vf_doorbell_bar(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS && ECORE_IS_VF_RDMA(p_hwfn))
+ p_hwfn->pf_iov_info->rdma_enable = false;
+
/* Use the leading hwfn since in CMT only NIG #0 is operational */
if (IS_LEAD_HWFN(p_hwfn)) {
rc = ecore_llh_hw_init_pf(p_hwfn, p_ptt,
- p_params->avoid_eng_affin);
+ p_params->avoid_eng_affin);
if (rc != ECORE_SUCCESS)
return rc;
}
@@ -3518,44 +4850,69 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_params->p_tunn,
p_params->allow_npar_tx_switch);
if (rc) {
- DP_NOTICE(p_hwfn, true,
- "Function start ramrod failed\n");
+ DP_NOTICE(p_hwfn, true, "Function start ramrod failed\n");
return rc;
}
+
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+ "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
- (1 << 2));
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, (1 << 2));
ecore_wr(p_hwfn, p_ptt,
PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
0x100);
}
+
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH registers after start PFn\n");
+ "PRS_REG_SEARCH registers after start PFn\n");
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
+ "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
+ "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
+ "PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
+ "PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt,
- PRS_REG_SEARCH_TCP_FIRST_FRAG);
+ PRS_REG_SEARCH_TCP_FIRST_FRAG);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
- prs_reg);
+ "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+ "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+ }
+
+#ifndef ASIC_ONLY
+ /* Configure the first VF number for the current PF and the following PF */
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) &&
+ ECORE_IS_E5(p_hwfn->p_dev) &&
+ IS_PF_SRIOV(p_hwfn)) {
+ struct ecore_hw_sriov_info *p_iov_info = p_hwfn->p_dev->p_iov_info;
+
+ ecore_wr(p_hwfn, p_ptt, PGLCS_REG_FIRST_VF_K2_E5,
+ p_iov_info->first_vf_in_pf);
+
+ /* If this is not the last PF, pretend to the next PF and set first VF register */
+ if ((rel_pf_id + 1) < MAX_NUM_PFS_E5) {
+ /* pretend to the following PF */
+ ecore_fid_pretend(p_hwfn, p_ptt,
+ FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+ (rel_pf_id + 1)));
+ ecore_wr(p_hwfn, p_ptt, PGLCS_REG_FIRST_VF_K2_E5,
+ p_iov_info->first_vf_in_pf + p_iov_info->total_vfs);
+ /* pretend to the original PF */
+ ecore_fid_pretend(p_hwfn, p_ptt,
+ FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, rel_pf_id));
+ }
}
+#endif
+
return ECORE_SUCCESS;
}
@@ -3582,14 +4939,14 @@ enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
if (val != set_val) {
DP_NOTICE(p_hwfn, true,
"PFID_ENABLE_MASTER wasn't changed after a second\n");
- return ECORE_UNKNOWN_ERROR;
+ return ECORE_AGAIN;
}
return ECORE_SUCCESS;
}
static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_main_ptt)
+ struct ecore_ptt *p_main_ptt)
{
/* Read shadow of current MFW mailbox */
ecore_mcp_read_mb(p_hwfn, p_main_ptt);
@@ -3598,13 +4955,6 @@ static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
p_hwfn->mcp_info->mfw_mb_length);
}
-static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
- 1 << p_hwfn->abs_pf_id);
-}
-
static enum _ecore_status_t
ecore_fill_load_req_params(struct ecore_hwfn *p_hwfn,
struct ecore_load_req_params *p_load_req,
@@ -3666,8 +5016,8 @@ ecore_fill_load_req_params(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
- struct ecore_hw_init_params *p_params)
+static enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_hw_init_params *p_params)
{
if (p_params->p_tunn) {
ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
@@ -3679,16 +5029,23 @@ enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ 1 << p_hwfn->abs_pf_id);
+}
+
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
struct ecore_hw_init_params *p_params)
{
struct ecore_load_req_params load_req_params;
u32 load_code, resp, param, drv_mb_param;
+ enum _ecore_status_t rc = ECORE_SUCCESS, cancel_load_rc;
bool b_default_mtu = true;
struct ecore_hwfn *p_hwfn;
const u32 *fw_overlays;
u32 fw_overlays_len;
- enum _ecore_status_t rc = ECORE_SUCCESS;
u16 ether_type;
int i;
@@ -3718,19 +5075,22 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
continue;
}
+ /* Some flows may keep variable set */
+ p_hwfn->mcp_info->mcp_handling_status = 0;
+
rc = ecore_calc_hw_mode(p_hwfn);
if (rc != ECORE_SUCCESS)
return rc;
- if (IS_PF(p_dev) && (OSAL_GET_BIT(ECORE_MF_8021Q_TAGGING,
+ if (IS_PF(p_dev) && (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
&p_dev->mf_bits) ||
- OSAL_GET_BIT(ECORE_MF_8021AD_TAGGING,
+ OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
&p_dev->mf_bits))) {
- if (OSAL_GET_BIT(ECORE_MF_8021Q_TAGGING,
+ if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
&p_dev->mf_bits))
- ether_type = ETHER_TYPE_VLAN;
+ ether_type = ECORE_ETH_P_8021Q;
else
- ether_type = ETHER_TYPE_QINQ;
+ ether_type = ECORE_ETH_P_8021AD;
STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
ether_type);
STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
@@ -3761,10 +5121,15 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
"Load request was sent. Load code: 0x%x\n",
load_code);
+ /* Only relevant for recovery:
+ * Clear the indication after LOAD_REQ is responded by the MFW.
+ */
+ p_dev->recov_in_prog = false;
+
ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
/* CQ75580:
- * When coming back from hiberbate state, the registers from
+ * When coming back from hibernate state, the registers from
* which shadow is read initially are not initialized. It turns
* out that these registers get initialized during the call to
* ecore_mcp_load_req request. So we need to reread them here
@@ -3775,18 +5140,9 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
*/
ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
- /* Only relevant for recovery:
- * Clear the indication after the LOAD_REQ command is responded
- * by the MFW.
- */
- p_dev->recov_in_prog = false;
-
- p_hwfn->first_on_engine = (load_code ==
- FW_MSG_CODE_DRV_LOAD_ENGINE);
-
if (!qm_lock_ref_cnt) {
#ifdef CONFIG_ECORE_LOCK_ALLOC
- rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock);
+ rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock, "qm_lock");
if (rc) {
DP_ERR(p_hwfn, "qm_lock allocation failed\n");
goto qm_lock_fail;
@@ -3804,8 +5160,16 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
rc = ecore_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->rel_pf_id, false);
if (rc != ECORE_SUCCESS) {
- ecore_hw_err_notify(p_hwfn,
- ECORE_HW_ERR_RAMROD_FAIL);
+ u8 str[ECORE_HW_ERR_MAX_STR_SIZE];
+
+ OSAL_SNPRINTF((char *)str,
+ ECORE_HW_ERR_MAX_STR_SIZE,
+ "Final cleanup failed\n");
+ DP_NOTICE(p_hwfn, false, "%s", str);
+ ecore_hw_err_notify(p_hwfn, p_hwfn->p_main_ptt,
+ ECORE_HW_ERR_RAMROD_FAIL,
+ str,
+ OSAL_STRLEN((char *)str) + 1);
goto load_err;
}
}
@@ -3839,6 +5203,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (!p_hwfn->fw_overlay_mem) {
DP_NOTICE(p_hwfn, false,
"Failed to allocate fw overlay memory\n");
+ rc = ECORE_NOMEM;
goto load_err;
}
@@ -3848,13 +5213,13 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
p_hwfn->hw_info.hw_mode);
if (rc != ECORE_SUCCESS)
break;
- /* Fall into */
+ /* Fall through */
case FW_MSG_CODE_DRV_LOAD_PORT:
rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode);
if (rc != ECORE_SUCCESS)
break;
- /* Fall into */
+ /* Fall through */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode,
@@ -3876,8 +5241,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, false,
- "Sending load done failed, rc = %d\n", rc);
+ DP_NOTICE(p_hwfn, false, "Sending load done failed, rc = %d\n", rc);
if (rc == ECORE_NOMEM) {
DP_NOTICE(p_hwfn, false,
"Sending load done was failed due to memory allocation failure\n");
@@ -3889,10 +5253,11 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"sending phony dcbx set command to trigger DCBx attention handling\n");
+ drv_mb_param = 0;
+ SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_DCBX_NOTIFY, 1);
rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_SET_DCBX,
- 1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp,
- ¶m);
+ drv_mb_param, &resp, ¶m);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false,
"Failed to send DCBX attention request\n");
@@ -3907,24 +5272,11 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
p_hwfn = ECORE_LEADING_HWFN(p_dev);
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+ drv_mb_param = 0;
+ SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_DUMMY_OEM_UPDATES, 1);
rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_GET_OEM_UPDATES,
- 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET,
- &resp, ¶m);
- if (rc != ECORE_SUCCESS)
- DP_NOTICE(p_hwfn, false,
- "Failed to send GET_OEM_UPDATES attention request\n");
- }
-
- if (IS_PF(p_dev)) {
- /* Get pre-negotiated values for stag, bandwidth etc. */
- p_hwfn = ECORE_LEADING_HWFN(p_dev);
- DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
- "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
- rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_GET_OEM_UPDATES,
- 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET,
- &resp, ¶m);
+ drv_mb_param, &resp, ¶m);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false,
"Failed to send GET_OEM_UPDATES attention request\n");
@@ -3948,7 +5300,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
rc = ecore_mcp_ov_update_driver_state(p_hwfn,
p_hwfn->p_main_ptt,
- ECORE_OV_DRIVER_STATE_DISABLED);
+ ECORE_OV_DRIVER_STATE_DISABLED);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn, "Failed to update driver state\n");
@@ -3967,11 +5319,16 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
qm_lock_fail:
#endif
- /* The MFW load lock should be released regardless of success or failure
- * of initialization.
- * TODO: replace this with an attempt to send cancel_load.
+ /* The MFW load lock should be released also when initialization fails.
+ * If supported, use a cancel_load request to update the MFW with the
+ * load failure.
*/
- ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
+ cancel_load_rc = ecore_mcp_cancel_load_req(p_hwfn, p_hwfn->p_main_ptt);
+ if (cancel_load_rc == ECORE_NOTIMPL) {
+ DP_INFO(p_hwfn,
+ "Send a load done request instead of cancel load\n");
+ ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
+ }
return rc;
}
@@ -3985,11 +5342,15 @@ static void ecore_hw_timers_stop(struct ecore_dev *p_dev,
/* close timers */
ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
- for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog;
- i++) {
+
+ if (p_dev->recov_in_prog)
+ return;
+
+ for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT; i++) {
if ((!ecore_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN)) &&
- (!ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
+ (!ecore_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK)))
break;
/* Dependent on number of connection/tasks, possibly
@@ -4052,10 +5413,10 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
ecore_vf_pf_int_cleanup(p_hwfn);
rc = ecore_vf_pf_reset(p_hwfn);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"ecore_vf_pf_reset failed. rc = %d.\n",
rc);
- rc2 = ECORE_UNKNOWN_ERROR;
+ rc2 = rc;
}
continue;
}
@@ -4130,12 +5491,6 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
/* Need to wait 1ms to guarantee SBs are cleared */
OSAL_MSLEEP(1);
- if (IS_LEAD_HWFN(p_hwfn) &&
- OSAL_GET_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
- !ECORE_IS_FCOE_PERSONALITY(p_hwfn))
- ecore_llh_remove_mac_filter(p_dev, 0,
- p_hwfn->hw_info.hw_mac_addr);
-
if (!p_dev->recov_in_prog) {
ecore_verify_reg_val(p_hwfn, p_ptt,
QM_REG_USG_CNT_PF_TX, 0);
@@ -4148,6 +5503,12 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+ if (IS_LEAD_HWFN(p_hwfn) &&
+ OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
+ !ECORE_IS_FCOE_PERSONALITY(p_hwfn))
+ ecore_llh_remove_mac_filter(p_dev, 0,
+ p_hwfn->hw_info.hw_mac_addr);
+
--qm_lock_ref_cnt;
#ifdef CONFIG_ECORE_LOCK_ALLOC
if (!qm_lock_ref_cnt)
@@ -4179,8 +5540,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
* only after all transactions have stopped for all active
* hw-functions.
*/
- rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
- false);
+ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
"ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
@@ -4208,18 +5568,11 @@ enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
if (!p_ptt)
return ECORE_AGAIN;
- DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
- "Shutting down the fastpath\n");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Shutting down the fastpath\n");
ecore_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
- ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
- ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
- ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
- ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
- ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
-
/* @@@TBD - clean transmission queues (5.b) */
/* @@@TBD - clean BTB (5.c) */
@@ -4245,20 +5598,172 @@ enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
if (!p_ptt)
return ECORE_AGAIN;
- /* If roce info is allocated it means roce is initialized and should
- * be enabled in searcher.
- */
- if (p_hwfn->p_rdma_info) {
- if (p_hwfn->b_rdma_enabled_in_prs)
- ecore_wr(p_hwfn, p_ptt,
- p_hwfn->rdma_prs_search_reg, 0x1);
- ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x1);
+ /* Re-open incoming traffic */
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u32 hw_addr, u32 val)
+{
+ if (ECORE_IS_BB(p_hwfn->p_dev))
+ ecore_wr(p_hwfn, p_ptt, hw_addr, val);
+ else
+ ecore_mcp_wol_wr(p_hwfn, p_ptt, hw_addr, val);
+}
+
+enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev, u32 reg_idx,
+ u32 pattern_size, u32 crc)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_ptt *p_ptt;
+ u32 reg_len = 0;
+ u32 reg_crc = 0;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ /* Get length and CRC register offsets */
+ switch (reg_idx) {
+ case 0:
+ reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_LEN_BB :
+ WOL_REG_ACPI_PAT_0_LEN_K2_E5;
+ reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_CRC_BB :
+ WOL_REG_ACPI_PAT_0_CRC_K2_E5;
+ break;
+ case 1:
+ reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_LEN_BB :
+ WOL_REG_ACPI_PAT_1_LEN_K2_E5;
+ reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_CRC_BB :
+ WOL_REG_ACPI_PAT_1_CRC_K2_E5;
+ break;
+ case 2:
+ reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_LEN_BB :
+ WOL_REG_ACPI_PAT_2_LEN_K2_E5;
+ reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_CRC_BB :
+ WOL_REG_ACPI_PAT_2_CRC_K2_E5;
+ break;
+ case 3:
+ reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_LEN_BB :
+ WOL_REG_ACPI_PAT_3_LEN_K2_E5;
+ reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_CRC_BB :
+ WOL_REG_ACPI_PAT_3_CRC_K2_E5;
+ break;
+ case 4:
+ reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_LEN_BB :
+ WOL_REG_ACPI_PAT_4_LEN_K2_E5;
+ reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_CRC_BB :
+ WOL_REG_ACPI_PAT_4_CRC_K2_E5;
+ break;
+ case 5:
+ reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_LEN_BB :
+ WOL_REG_ACPI_PAT_5_LEN_K2_E5;
+ reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_CRC_BB :
+ WOL_REG_ACPI_PAT_5_CRC_K2_E5;
+ break;
+ case 6:
+ reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_LEN_BB :
+ WOL_REG_ACPI_PAT_6_LEN_K2_E5;
+ reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_CRC_BB :
+ WOL_REG_ACPI_PAT_6_CRC_K2_E5;
+ break;
+ case 7:
+ reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_LEN_BB :
+ WOL_REG_ACPI_PAT_7_LEN_K2_E5;
+ reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_CRC_BB :
+ WOL_REG_ACPI_PAT_7_CRC_K2_E5;
+ break;
+ default:
+ rc = ECORE_UNKNOWN_ERROR;
+ goto out;
+ }
+
+ /* Align pattern size to 4 */
+ while (pattern_size % 4)
+ pattern_size++;
+
+ /* Write pattern length */
+ ecore_wol_wr(p_hwfn, p_ptt, reg_len, pattern_size);
+
+ /* Write crc value*/
+ ecore_wol_wr(p_hwfn, p_ptt, reg_crc, crc);
+
+ DP_INFO(p_dev,
+ "idx[%d] reg_crc[0x%x=0x%08x] "
+ "reg_len[0x%x=0x%x]\n",
+ reg_idx, reg_crc, crc, reg_len, pattern_size);
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+void ecore_wol_buffer_clear(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ const u32 wake_buffer_clear_offset =
+ ECORE_IS_BB(p_hwfn->p_dev) ?
+ NIG_REG_WAKE_BUFFER_CLEAR_BB : WOL_REG_WAKE_BUFFER_CLEAR_K2_E5;
+
+ DP_INFO(p_hwfn->p_dev,
+ "reset "
+ "REG_WAKE_BUFFER_CLEAR offset=0x%08x\n",
+ wake_buffer_clear_offset);
+
+ ecore_wol_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 1);
+ ecore_wol_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 0);
+}
+
+enum _ecore_status_t ecore_get_wake_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_wake_info *wake_info)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 *buf = OSAL_NULL;
+ u32 i = 0;
+ const u32 reg_wake_buffer_offest =
+ ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_BUFFER_BB :
+ WOL_REG_WAKE_BUFFER_K2_E5;
+
+ wake_info->wk_info = ecore_rd(p_hwfn, p_ptt,
+ ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_INFO_BB :
+ WOL_REG_WAKE_INFO_K2_E5);
+ wake_info->wk_details = ecore_rd(p_hwfn, p_ptt,
+ ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_DETAILS_BB :
+ WOL_REG_WAKE_DETAILS_K2_E5);
+ wake_info->wk_pkt_len = ecore_rd(p_hwfn, p_ptt,
+ ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_PKT_LEN_BB :
+ WOL_REG_WAKE_PKT_LEN_K2_E5);
+
+ DP_INFO(p_dev,
+ "REG_WAKE_INFO=0x%08x "
+ "REG_WAKE_DETAILS=0x%08x "
+ "REG_WAKE_PKT_LEN=0x%08x\n",
+ wake_info->wk_info,
+ wake_info->wk_details,
+ wake_info->wk_pkt_len);
+
+ buf = (u32 *)wake_info->wk_buffer;
+
+ for (i = 0; i < (wake_info->wk_pkt_len / sizeof(u32)); i++) {
+ if ((i * sizeof(u32)) >= sizeof(wake_info->wk_buffer)) {
+ DP_INFO(p_dev,
+ "i index to 0 high=%d\n",
+ i);
+ break;
+ }
+ buf[i] = ecore_rd(p_hwfn, p_ptt,
+ reg_wake_buffer_offest + (i * sizeof(u32)));
+ DP_INFO(p_dev, "wk_buffer[%u]: 0x%08x\n",
+ i, buf[i]);
}
- /* Re-open incoming traffic */
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
- ecore_ptt_release(p_hwfn, p_ptt);
+ ecore_wol_buffer_clear(p_hwfn, p_ptt);
return ECORE_SUCCESS;
}
@@ -4268,13 +5773,14 @@ static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
{
ecore_ptt_pool_free(p_hwfn);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info);
+ p_hwfn->hw_info.p_igu_info = OSAL_NULL;
}
/* Setup bar access */
static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
{
/* clear indirect access */
- if (ECORE_IS_AH(p_hwfn->p_dev)) {
+ if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) {
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -4306,7 +5812,7 @@ static void get_function_id(struct ecore_hwfn *p_hwfn)
{
/* ME Register */
p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
- PXP_PF_ME_OPAQUE_ADDR);
+ PXP_PF_ME_OPAQUE_ADDR);
p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
@@ -4322,22 +5828,65 @@ static void get_function_id(struct ecore_hwfn *p_hwfn)
p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
}
-static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
+void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
{
u32 *feat_num = p_hwfn->hw_info.feat_num;
struct ecore_sb_cnt_info sb_cnt;
- u32 non_l2_sbs = 0;
+ u32 non_l2_sbs = 0, non_l2_iov_sbs = 0;
OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt));
ecore_int_get_num_sbs(p_hwfn, &sb_cnt);
+#ifdef CONFIG_ECORE_ROCE
+ /* Roce CNQ require each: 1 status block. 1 CNQ, we divide the
+ * status blocks equally between L2 / RoCE but with consideration as
+ * to how many l2 queues / cnqs we have
+ */
+ if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
+#ifndef __EXTRACT__LINUX__THROW__
+ u32 max_cnqs;
+#endif
+
+ feat_num[ECORE_RDMA_CNQ] =
+ OSAL_MIN_T(u32,
+ sb_cnt.cnt / 2,
+ RESC_NUM(p_hwfn, ECORE_RDMA_CNQ_RAM));
+
+ feat_num[ECORE_VF_RDMA_CNQ] =
+ OSAL_MIN_T(u32,
+ sb_cnt.iov_cnt / 2,
+ RESC_NUM(p_hwfn, ECORE_VF_RDMA_CNQ_RAM));
+
+#ifndef __EXTRACT__LINUX__THROW__
+ /* Upper layer might require less */
+ max_cnqs = (u32)p_hwfn->pf_params.rdma_pf_params.max_cnqs;
+ if (max_cnqs) {
+ if (max_cnqs == ECORE_RDMA_PF_PARAMS_CNQS_NONE)
+ max_cnqs = 0;
+ feat_num[ECORE_RDMA_CNQ] =
+ OSAL_MIN_T(u32,
+ feat_num[ECORE_RDMA_CNQ],
+ max_cnqs);
+ feat_num[ECORE_VF_RDMA_CNQ] =
+ OSAL_MIN_T(u32,
+ feat_num[ECORE_VF_RDMA_CNQ],
+ max_cnqs);
+ }
+#endif
+
+ non_l2_sbs = feat_num[ECORE_RDMA_CNQ];
+ non_l2_iov_sbs = feat_num[ECORE_VF_RDMA_CNQ];
+ }
+#endif
+
/* L2 Queues require each: 1 status block. 1 L2 queue */
if (ECORE_IS_L2_PERSONALITY(p_hwfn)) {
/* Start by allocating VF queues, then PF's */
feat_num[ECORE_VF_L2_QUE] =
OSAL_MIN_T(u32,
- RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
- sb_cnt.iov_cnt);
+ sb_cnt.iov_cnt - non_l2_iov_sbs,
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
+
feat_num[ECORE_PF_L2_QUE] =
OSAL_MIN_T(u32,
sb_cnt.cnt - non_l2_sbs,
@@ -4345,36 +5894,12 @@ static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
}
- if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
- ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) {
- u32 *p_storage_feat = ECORE_IS_FCOE_PERSONALITY(p_hwfn) ?
- &feat_num[ECORE_FCOE_CQ] :
- &feat_num[ECORE_ISCSI_CQ];
- u32 limit = sb_cnt.cnt;
-
- /* The number of queues should not exceed the number of FP SBs.
- * In storage target, the queues are divided into pairs of a CQ
- * and a CmdQ, and each pair uses a single SB. The limit in
- * this case should allow a max ratio of 2:1 instead of 1:1.
- */
- if (p_hwfn->p_dev->b_is_target)
- limit *= 2;
- *p_storage_feat = OSAL_MIN_T(u32, limit,
- RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
-
- /* @DPDK */
- /* The size of "cq_cmdq_sb_num_arr" in the fcoe/iscsi init
- * ramrod is limited to "NUM_OF_GLOBAL_QUEUES / 2".
- */
- *p_storage_feat = OSAL_MIN_T(u32, *p_storage_feat,
- (NUM_OF_GLOBAL_QUEUES / 2));
- }
-
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
- "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
+ "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #PF_ROCE_CNQ=%d #VF_ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
(int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
(int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ),
+ (int)FEAT_NUM(p_hwfn, ECORE_VF_RDMA_CNQ),
(int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ),
(int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ),
(int)sb_cnt.cnt);
@@ -4397,18 +5922,26 @@ const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
return "MAC";
case ECORE_VLAN:
return "VLAN";
+ case ECORE_VF_RDMA_CNQ_RAM:
+ return "VF_RDMA_CNQ_RAM";
case ECORE_RDMA_CNQ_RAM:
return "RDMA_CNQ_RAM";
case ECORE_ILT:
return "ILT";
- case ECORE_LL2_QUEUE:
- return "LL2_QUEUE";
+ case ECORE_LL2_RAM_QUEUE:
+ return "LL2_RAM_QUEUE";
+ case ECORE_LL2_CTX_QUEUE:
+ return "LL2_CTX_QUEUE";
case ECORE_CMDQS_CQS:
return "CMDQS_CQS";
case ECORE_RDMA_STATS_QUEUE:
return "RDMA_STATS_QUEUE";
case ECORE_BDQ:
return "BDQ";
+ case ECORE_VF_MAC_ADDR:
+ return "VF_MAC_ADDR";
+ case ECORE_GFS_PROFILE:
+ return "GFS_PROFILE";
case ECORE_SB:
return "SB";
default:
@@ -4467,7 +6000,8 @@ static u32 ecore_hsi_def_val[][MAX_CHIP_IDS] = {
u32 ecore_get_hsi_def_val(struct ecore_dev *p_dev, enum ecore_hsi_def_type type)
{
- enum chip_ids chip_id = ECORE_IS_BB(p_dev) ? CHIP_BB : CHIP_K2;
+ enum chip_ids chip_id = ECORE_IS_BB(p_dev) ? CHIP_BB :
+ ECORE_IS_AH(p_dev) ? CHIP_K2 : CHIP_E5;
if (type >= ECORE_NUM_HSI_DEFS) {
DP_ERR(p_dev, "Unexpected HSI definition type [%d]\n", type);
@@ -4482,18 +6016,13 @@ ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
u32 resc_max_val, mcp_resp;
- u8 res_id;
+ u8 num_vf_cnqs, res_id;
enum _ecore_status_t rc;
+ num_vf_cnqs = p_hwfn->num_vf_cnqs;
+
for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
- /* @DPDK */
switch (res_id) {
- case ECORE_LL2_QUEUE:
- case ECORE_RDMA_CNQ_RAM:
- case ECORE_RDMA_STATS_QUEUE:
- case ECORE_BDQ:
- resc_max_val = 0;
- break;
default:
continue;
}
@@ -4523,6 +6052,7 @@ enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u8 num_vf_cnqs = p_hwfn->num_vf_cnqs;
switch (res_id) {
case ECORE_L2_QUEUE:
@@ -4549,37 +6079,57 @@ enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
case ECORE_ILT:
*p_resc_num = NUM_OF_PXP_ILT_RECORDS(p_dev) / num_funcs;
break;
- case ECORE_LL2_QUEUE:
+ case ECORE_LL2_RAM_QUEUE:
*p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs;
break;
+ case ECORE_LL2_CTX_QUEUE:
+ *p_resc_num = MAX_NUM_LL2_RX_CTX_QUEUES / num_funcs;
+ break;
+ case ECORE_VF_RDMA_CNQ_RAM:
+ *p_resc_num = num_vf_cnqs / num_funcs;
+ break;
case ECORE_RDMA_CNQ_RAM:
case ECORE_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
- /* @DPDK */
- *p_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs;
+ *p_resc_num = (NUM_OF_GLOBAL_QUEUES - num_vf_cnqs) / num_funcs;
break;
case ECORE_RDMA_STATS_QUEUE:
*p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(p_dev) / num_funcs;
break;
case ECORE_BDQ:
- /* @DPDK */
+ if (p_hwfn->hw_info.personality != ECORE_PCI_ISCSI &&
+ p_hwfn->hw_info.personality != ECORE_PCI_FCOE)
+ *p_resc_num = 0;
+ else
+ *p_resc_num = 1;
+ break;
+ case ECORE_VF_MAC_ADDR:
*p_resc_num = 0;
break;
- default:
+ case ECORE_GFS_PROFILE:
+ *p_resc_num = ECORE_IS_E5(p_hwfn->p_dev) ?
+ GFS_PROFILE_MAX_ENTRIES / num_funcs : 0;
+ break;
+ case ECORE_SB:
+ /* Since we want its value to reflect whether MFW supports
+ * the new scheme, have a default of 0.
+ */
+ *p_resc_num = 0;
break;
+ default:
+ return ECORE_INVAL;
}
-
switch (res_id) {
case ECORE_BDQ:
if (!*p_resc_num)
*p_resc_start = 0;
- break;
- case ECORE_SB:
- /* Since we want its value to reflect whether MFW supports
- * the new scheme, have a default of 0.
- */
- *p_resc_num = 0;
+ else if (p_hwfn->p_dev->num_ports_in_engine == 4)
+ *p_resc_start = p_hwfn->port_id;
+ else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI)
+ *p_resc_start = p_hwfn->port_id;
+ else if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
+ *p_resc_start = p_hwfn->port_id + 2;
break;
default:
*p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
@@ -4609,8 +6159,19 @@ __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
return rc;
}
+ /* TODO: Add MFW support for GFS profiles resource */
+ if (res_id == ECORE_GFS_PROFILE) {
+ DP_INFO(p_hwfn,
+ "Resource %d [%s]: Applying default values [%d,%d]\n",
+ res_id, ecore_hw_get_resc_name(res_id),
+ dflt_resc_num, dflt_resc_start);
+ *p_resc_num = dflt_resc_num;
+ *p_resc_start = dflt_resc_start;
+ return ECORE_SUCCESS;
+ }
+
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) && !ECORE_IS_E5(p_hwfn->p_dev)) {
*p_resc_num = dflt_resc_num;
*p_resc_start = dflt_resc_start;
goto out;
@@ -4620,9 +6181,8 @@ __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
&mcp_resp, p_resc_num, p_resc_start);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
- "MFW response failure for an allocation request for"
- " resource %d [%s]\n",
+ DP_NOTICE(p_hwfn, false,
+ "MFW response failure for an allocation request for resource %d [%s]\n",
res_id, ecore_hw_get_resc_name(res_id));
return rc;
}
@@ -4634,12 +6194,9 @@ __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
*/
if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
DP_INFO(p_hwfn,
- "Failed to receive allocation info for resource %d [%s]."
- " mcp_resp = 0x%x. Applying default values"
- " [%d,%d].\n",
+ "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
res_id, ecore_hw_get_resc_name(res_id), mcp_resp,
dflt_resc_num, dflt_resc_start);
-
*p_resc_num = dflt_resc_num;
*p_resc_start = dflt_resc_start;
goto out;
@@ -4659,6 +6216,37 @@ __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
}
}
out:
+ /* PQs have to divide by 8 [that's the HW granularity].
+ * Reduce number so it would fit.
+ */
+ if ((res_id == ECORE_PQ) &&
+ ((*p_resc_num % 8) || (*p_resc_start % 8))) {
+ DP_INFO(p_hwfn,
+ "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
+ *p_resc_num, (*p_resc_num) & ~0x7,
+ *p_resc_start, (*p_resc_start) & ~0x7);
+ *p_resc_num &= ~0x7;
+ *p_resc_start &= ~0x7;
+ }
+
+ /* The last RSS engine is used by the FW for TPA hash calculation.
+ * Old MFW versions allocate it to the drivers, so it needs to be
+ * truncated.
+ */
+ if (res_id == ECORE_RSS_ENG &&
+ *p_resc_num &&
+ (*p_resc_start + *p_resc_num - 1) ==
+ NUM_OF_RSS_ENGINES(p_hwfn->p_dev))
+ --*p_resc_num;
+
+ /* In case personality is not RDMA or there is no separate doorbell bar
+ * for VFs, VF-RDMA will not be supported, thus no need CNQs for VFs.
+ */
+ if ((res_id == ECORE_VF_RDMA_CNQ_RAM) &&
+ (!ECORE_IS_RDMA_PERSONALITY(p_hwfn) ||
+ !ecore_iov_vf_db_bar_size(p_hwfn, p_hwfn->p_main_ptt)))
+ *p_resc_num = 0;
+
return ECORE_SUCCESS;
}
@@ -4697,7 +6285,7 @@ static enum _ecore_status_t ecore_hw_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
/* 4-ports mode has limitations that should be enforced:
* - BB: the MFW can access only PPFIDs which their corresponding PFIDs
* belong to this certain port.
- * - AH: only 4 PPFIDs per port are available.
+ * - AH/E5: only 4 PPFIDs per port are available.
*/
if (ecore_device_num_ports(p_dev) == 4) {
u8 mask;
@@ -4762,9 +6350,17 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
* Old drivers that don't acquire the lock can run in parallel, and
* their allocation values won't be affected by the updated max values.
*/
+
ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
ECORE_RESC_LOCK_RESC_ALLOC, false);
+ /* Changes on top of the default values to accommodate parallel attempts
+ * of several PFs.
+ * [10 x 10 msec by default ==> 20 x 50 msec]
+ */
+ resc_lock_params.retry_num *= 2;
+ resc_lock_params.retry_interval *= 5;
+
rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
return rc;
@@ -4774,8 +6370,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
} else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) {
DP_NOTICE(p_hwfn, false,
"Failed to acquire the resource lock for the resource allocation commands\n");
- rc = ECORE_BUSY;
- goto unlock_and_exit;
+ return ECORE_BUSY;
} else {
rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
@@ -4818,7 +6413,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
if (!(p_dev->b_is_emul_full)) {
resc_num[ECORE_PQ] = 32;
resc_start[ECORE_PQ] = resc_num[ECORE_PQ] *
- p_hwfn->enabled_func_idx;
+ p_hwfn->enabled_func_idx;
}
/* For AH emulation, since we have a possible maximal number of
@@ -4832,23 +6427,38 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
if (!p_hwfn->rel_pf_id) {
resc_num[ECORE_ILT] =
OSAL_MAX_T(u32, resc_num[ECORE_ILT],
- roce_min_ilt_lines);
+ roce_min_ilt_lines);
} else if (resc_num[ECORE_ILT] < roce_min_ilt_lines) {
resc_start[ECORE_ILT] += roce_min_ilt_lines -
resc_num[ECORE_ILT];
}
}
+
+ /* [E5 emulation] MF + SRIOV:
+ * The 14 low PFs have 16 VFs while the 2 high PFs have 8 VFs.
+ * Need to adjust the vport values so that each PF/VF will have
+ * a vport:
+ * 256 vports = 14 x (PF + 16 VFs) + 2 x (PF + 8 VFs)
+ */
+ if (ECORE_IS_E5(p_dev) &&
+ p_hwfn->num_funcs_on_engine == 16 &&
+ IS_ECORE_SRIOV(p_dev)) {
+ u8 id = p_hwfn->abs_pf_id;
+
+ resc_num[ECORE_VPORT] = (id < 14) ? 17 : 9;
+ resc_start[ECORE_VPORT] = (id < 14) ?
+ (id * 17) :
+ (14 * 17 + (id - 14) * 9);
+ }
}
#endif
/* Sanity for ILT */
max_ilt_lines = NUM_OF_PXP_ILT_RECORDS(p_dev);
if (RESC_END(p_hwfn, ECORE_ILT) > max_ilt_lines) {
- DP_NOTICE(p_hwfn, true,
- "Can't assign ILT pages [%08x,...,%08x]\n",
- RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn,
- ECORE_ILT) -
- 1);
+ DP_NOTICE(p_hwfn, true, "Can't assign ILT pages [%08x,...,%08x]\n",
+ RESC_START(p_hwfn, ECORE_ILT),
+ RESC_END(p_hwfn, ECORE_ILT) - 1);
return ECORE_INVAL;
}
@@ -4875,6 +6485,26 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
return rc;
}
+static bool ecore_is_dscp_mapping_allowed(struct ecore_hwfn *p_hwfn,
+ u32 mf_mode)
+{
+ /* HW bug in E4:
+ * The NIG accesses the "NIG_REG_DSCP_TO_TC_MAP_ENABLE" PORT_PF register
+ * with the PFID, as set in "NIG_REG_LLH_PPFID2PFID_TBL", instead of
+ * with the PPFID. AH is being affected from this bug, and thus DSCP to
+ * TC mapping is only allowed on the following configurations:
+ * - QPAR, 2-ports
+ * - QPAR, 4-ports, single PF on a port
+ * There is no impact on BB since it has another bug in which the PPFID
+ * is actually engine based.
+ */
+ return !ECORE_IS_AH(p_hwfn->p_dev) ||
+ (mf_mode == NVM_CFG1_GLOB_MF_MODE_DEFAULT &&
+ (ecore_device_num_ports(p_hwfn->p_dev) == 2 ||
+ (ecore_device_num_ports(p_hwfn->p_dev) == 4 &&
+ p_hwfn->num_funcs_on_port == 1)));
+}
+
#ifndef ASIC_ONLY
static enum _ecore_status_t
ecore_emul_hw_get_nvm_info(struct ecore_hwfn *p_hwfn)
@@ -4885,7 +6515,8 @@ ecore_emul_hw_get_nvm_info(struct ecore_hwfn *p_hwfn)
/* The MF mode on emulation is either default or NPAR 1.0 */
p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
1 << ECORE_MF_LLH_PROTO_CLSS |
- 1 << ECORE_MF_LL2_NON_UNICAST;
+ 1 << ECORE_MF_LL2_NON_UNICAST |
+ 1 << ECORE_MF_DSCP_TO_TC_MAP;
if (p_hwfn->num_funcs_on_port > 1)
p_dev->mf_bits |= 1 << ECORE_MF_INTER_PF_SWITCH |
1 << ECORE_MF_DISABLE_ARFS;
@@ -4902,14 +6533,16 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_hw_prepare_params *p_params)
{
- u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode;
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
+ u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
struct ecore_mcp_link_capabilities *p_caps;
struct ecore_mcp_link_params *link;
enum _ecore_status_t rc;
+ u32 dcbx_mode; /* __LINUX__THROW__ */
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) ||
+ p_hwfn->mcp_info->recovery_mode)
return ecore_emul_hw_get_nvm_info(p_hwfn);
#endif
@@ -4924,18 +6557,16 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
}
-/* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
-
+ /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
- addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
OFFSETOF(struct nvm_cfg1, glob) +
OFFSETOF(struct nvm_cfg1_glob, core_cfg);
core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
- switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
- NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
+ switch (GET_MFW_FIELD(core_cfg, NVM_CFG1_GLOB_NETWORK_PORT_MODE)) {
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G;
break;
@@ -4969,20 +6600,35 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G;
break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G_R1;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X50G_R1;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G_R2;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X100G_R2;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G_R4;
+ break;
default:
DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n",
core_cfg);
break;
}
+#ifndef __EXTRACT__LINUX__THROW__
/* Read DCBX configuration */
port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
dcbx_mode = ecore_rd(p_hwfn, p_ptt,
port_cfg_addr +
OFFSETOF(struct nvm_cfg1_port, generic_cont0));
- dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK)
- >> NVM_CFG1_PORT_DCBX_MODE_OFFSET;
+ dcbx_mode = GET_MFW_FIELD(dcbx_mode, NVM_CFG1_PORT_DCBX_MODE);
switch (dcbx_mode) {
case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC:
p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC;
@@ -4996,12 +6642,13 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
default:
p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED;
}
+#endif
/* Read default link configuration */
link = &p_hwfn->mcp_info->link_input;
p_caps = &p_hwfn->mcp_info->link_capabilities;
port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
link_temp = ecore_rd(p_hwfn, p_ptt,
port_cfg_addr +
OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
@@ -5012,8 +6659,7 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
link_temp = ecore_rd(p_hwfn, p_ptt,
port_cfg_addr +
OFFSETOF(struct nvm_cfg1_port, link_settings));
- switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
- NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
+ switch (GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_LINK_SPEED)) {
case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
link->speed.autoneg = true;
break;
@@ -5023,6 +6669,9 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
link->speed.forced_speed = 10000;
break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_20G:
+ link->speed.forced_speed = 20000;
+ break;
case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
link->speed.forced_speed = 25000;
break;
@@ -5036,27 +6685,56 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
link->speed.forced_speed = 100000;
break;
default:
- DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp);
+ DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n",
+ link_temp);
}
- p_caps->default_speed = link->speed.forced_speed;
+ p_caps->default_speed = link->speed.forced_speed; /* __LINUX__THROW__ */
p_caps->default_speed_autoneg = link->speed.autoneg;
- link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
- link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
+ link_temp = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL);
link->pause.autoneg = !!(link_temp &
- NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
link->pause.forced_rx = !!(link_temp &
- NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
link->pause.forced_tx = !!(link_temp &
- NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
link->loopback_mode = 0;
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
+ link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port,
+ link_settings));
+ switch (GET_MFW_FIELD(link_temp,
+ NVM_CFG1_PORT_FEC_FORCE_MODE)) {
+ case NVM_CFG1_PORT_FEC_FORCE_MODE_NONE:
+ p_caps->fec_default |= ECORE_MCP_FEC_NONE;
+ break;
+ case NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE:
+ p_caps->fec_default |= ECORE_MCP_FEC_FIRECODE;
+ break;
+ case NVM_CFG1_PORT_FEC_FORCE_MODE_RS:
+ p_caps->fec_default |= ECORE_MCP_FEC_RS;
+ break;
+ case NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO:
+ p_caps->fec_default |= ECORE_MCP_FEC_AUTO;
+ break;
+ default:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "unknown FEC mode in 0x%08x\n", link_temp);
+ }
+ } else {
+ p_caps->fec_default = ECORE_MCP_FEC_UNSUPPORTED;
+ }
+
+ link->fec = p_caps->fec_default;
+
if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr +
OFFSETOF(struct nvm_cfg1_port, ext_phy));
- link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK;
- link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET;
+ link_temp = GET_MFW_FIELD(link_temp,
+ NVM_CFG1_PORT_EEE_POWER_SAVING_MODE);
p_caps->default_eee = ECORE_MCP_EEE_ENABLED;
link->eee.enable = true;
switch (link_temp) {
@@ -5083,100 +6761,202 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED;
}
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
+ u32 mask;
+ link_temp = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, extended_speed));
+ mask = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_EXTENDED_SPEED);
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN)
+ link->ext_speed.autoneg = true;
+ link->ext_speed.forced_speed = 0;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G)
+ link->ext_speed.forced_speed |= ECORE_EXT_SPEED_1G;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G)
+ link->ext_speed.forced_speed |= ECORE_EXT_SPEED_10G;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G)
+ link->ext_speed.forced_speed |= ECORE_EXT_SPEED_20G;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G)
+ link->ext_speed.forced_speed |= ECORE_EXT_SPEED_25G;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G)
+ link->ext_speed.forced_speed |= ECORE_EXT_SPEED_40G;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R)
+ link->ext_speed.forced_speed |= ECORE_EXT_SPEED_50G_R;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2)
+ link->ext_speed.forced_speed |= ECORE_EXT_SPEED_50G_R2;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2)
+ link->ext_speed.forced_speed |= ECORE_EXT_SPEED_100G_R2;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4)
+ link->ext_speed.forced_speed |= ECORE_EXT_SPEED_100G_R4;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4)
+ link->ext_speed.forced_speed |= ECORE_EXT_SPEED_100G_P4;
+
+ mask = GET_MFW_FIELD(link_temp,
+ NVM_CFG1_PORT_EXTENDED_SPEED_CAP);
+ link->ext_speed.advertised_speeds = 0;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED)
+ link->ext_speed.advertised_speeds |=
+ ECORE_EXT_SPEED_MASK_RES;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G)
+ link->ext_speed.advertised_speeds |=
+ ECORE_EXT_SPEED_MASK_1G;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G)
+ link->ext_speed.advertised_speeds |=
+ ECORE_EXT_SPEED_MASK_10G;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G)
+ link->ext_speed.advertised_speeds |=
+ ECORE_EXT_SPEED_MASK_20G;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G)
+ link->ext_speed.advertised_speeds |=
+ ECORE_EXT_SPEED_MASK_25G;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G)
+ link->ext_speed.advertised_speeds |=
+ ECORE_EXT_SPEED_MASK_40G;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R)
+ link->ext_speed.advertised_speeds |=
+ ECORE_EXT_SPEED_MASK_50G_R;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2)
+ link->ext_speed.advertised_speeds |=
+ ECORE_EXT_SPEED_MASK_50G_R2;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2)
+ link->ext_speed.advertised_speeds |=
+ ECORE_EXT_SPEED_MASK_100G_R2;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4)
+ link->ext_speed.advertised_speeds |=
+ ECORE_EXT_SPEED_MASK_100G_R4;
+ if (mask & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4)
+ link->ext_speed.advertised_speeds |=
+ ECORE_EXT_SPEED_MASK_100G_P4;
+
+ link->ext_fec_mode = ecore_rd(p_hwfn, p_ptt, port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port,
+ extended_fec_mode));
+ p_caps->default_ext_speed_caps =
+ link->ext_speed.advertised_speeds;
+ p_caps->default_ext_speed = link->ext_speed.forced_speed;
+ p_caps->default_ext_autoneg = link->ext_speed.autoneg;
+ p_caps->default_ext_fec = link->ext_fec_mode;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Read default extended link config: An 0x%08x Speed 0x%08x, Adv. Speed 0x%08x, AN 0x%02x, FEC: 0x%2x\n",
+ link->ext_speed.autoneg,
+ link->ext_speed.forced_speed,
+ link->ext_speed.advertised_speeds,
+ link->ext_speed.autoneg, link->ext_fec_mode);
+ }
+
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n EEE: %02x [%08x usec]",
+ "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec], FEC: 0x%2x\n",
link->speed.forced_speed, link->speed.advertised_speeds,
link->speed.autoneg, link->pause.autoneg,
- p_caps->default_eee, p_caps->eee_lpi_timer);
-
- /* Read Multi-function information from shmem */
- addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- OFFSETOF(struct nvm_cfg1, glob) +
- OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
+ p_caps->default_eee, p_caps->eee_lpi_timer,
+ p_caps->fec_default);
- generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
- mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
- NVM_CFG1_GLOB_MF_MODE_OFFSET;
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
+ generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
+ mf_mode = GET_MFW_FIELD(generic_cont0, NVM_CFG1_GLOB_MF_MODE);
- switch (mf_mode) {
- case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
- p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS;
- break;
- case NVM_CFG1_GLOB_MF_MODE_UFP:
- p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_UFP:
+ p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
1 << ECORE_MF_UFP_SPECIFIC |
- 1 << ECORE_MF_8021Q_TAGGING;
- break;
- case NVM_CFG1_GLOB_MF_MODE_BD:
- p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
+ 1 << ECORE_MF_8021Q_TAGGING |
+ 1 << ECORE_MF_DONT_ADD_VLAN0_TAG;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_BD:
+ p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
1 << ECORE_MF_LLH_PROTO_CLSS |
1 << ECORE_MF_8021AD_TAGGING |
- 1 << ECORE_MF_FIP_SPECIAL;
- break;
- case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
- p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
+ 1 << ECORE_MF_FIP_SPECIAL |
+ 1 << ECORE_MF_DONT_ADD_VLAN0_TAG;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
1 << ECORE_MF_LLH_PROTO_CLSS |
1 << ECORE_MF_LL2_NON_UNICAST |
1 << ECORE_MF_INTER_PF_SWITCH |
1 << ECORE_MF_DISABLE_ARFS;
- break;
- case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
- p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+ p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
1 << ECORE_MF_LLH_PROTO_CLSS |
- 1 << ECORE_MF_LL2_NON_UNICAST;
- if (ECORE_IS_BB(p_hwfn->p_dev))
- p_hwfn->p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF;
- break;
- }
- DP_INFO(p_hwfn, "Multi function mode is 0x%x\n",
- p_hwfn->p_dev->mf_bits);
+ 1 << ECORE_MF_LL2_NON_UNICAST |
+ 1 << ECORE_MF_VF_RDMA |
+ 1 << ECORE_MF_ROCE_LAG;
+ if (ECORE_IS_BB(p_dev))
+ p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF;
+ break;
+ }
- if (ECORE_IS_CMT(p_hwfn->p_dev))
- p_hwfn->p_dev->mf_bits |= (1 << ECORE_MF_DISABLE_ARFS);
+ DP_INFO(p_dev, "Multi function mode is 0x%x\n",
+ p_dev->mf_bits);
- /* It's funny since we have another switch, but it's easier
- * to throw this away in linux this way. Long term, it might be
- * better to have have getters for needed ECORE_MF_* fields,
- * convert client code and eliminate this.
- */
- switch (mf_mode) {
- case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
- case NVM_CFG1_GLOB_MF_MODE_BD:
- p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
- break;
- case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
- p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR;
- break;
- case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
- p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
- break;
- case NVM_CFG1_GLOB_MF_MODE_UFP:
- p_hwfn->p_dev->mf_mode = ECORE_MF_UFP;
- break;
+ /* In CMT the PF is unknown when the GFS block processes the
+ * packet. Therefore cannot use searcher as it has a per PF
+ * database, and thus ARFS must be disabled.
+ *
+ */
+ if (ECORE_IS_CMT(p_dev))
+ p_dev->mf_bits |= 1 << ECORE_MF_DISABLE_ARFS;
+
+ if (ecore_is_dscp_mapping_allowed(p_hwfn, mf_mode))
+ p_dev->mf_bits |= 1 << ECORE_MF_DSCP_TO_TC_MAP;
+
+#ifndef __EXTRACT__LINUX__THROW__
+ /* It's funny since we have another switch, but it's easier
+ * to throw this away in linux this way. Long term, it might be
+ * better to have getters for needed ECORE_MF_* fields,
+ * convert client code and eliminate this.
+ */
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ case NVM_CFG1_GLOB_MF_MODE_BD:
+ p_dev->mf_mode = ECORE_MF_OVLAN;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ p_dev->mf_mode = ECORE_MF_NPAR;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+ p_dev->mf_mode = ECORE_MF_DEFAULT;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_UFP:
+ p_dev->mf_mode = ECORE_MF_UFP;
+ break;
+ }
+#endif
}
- /* Read Multi-function information from shmem */
+ /* Read device capabilities information from shmem */
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
OFFSETOF(struct nvm_cfg1, glob) +
OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
- OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
- &p_hwfn->hw_info.device_capabilities);
+ OSAL_NON_ATOMIC_SET_BIT(ECORE_DEV_CAP_ETH,
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
- OSAL_SET_BIT(ECORE_DEV_CAP_FCOE,
- &p_hwfn->hw_info.device_capabilities);
+ OSAL_NON_ATOMIC_SET_BIT(ECORE_DEV_CAP_FCOE,
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
- OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI,
- &p_hwfn->hw_info.device_capabilities);
+ OSAL_NON_ATOMIC_SET_BIT(ECORE_DEV_CAP_ISCSI,
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
- OSAL_SET_BIT(ECORE_DEV_CAP_ROCE,
- &p_hwfn->hw_info.device_capabilities);
+ OSAL_NON_ATOMIC_SET_BIT(ECORE_DEV_CAP_ROCE,
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP)
- OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
- &p_hwfn->hw_info.device_capabilities);
+ OSAL_NON_ATOMIC_SET_BIT(ECORE_DEV_CAP_IWARP,
+ &p_hwfn->hw_info.device_capabilities);
rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
@@ -5190,11 +6970,14 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
- u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
+ u8 num_funcs, enabled_func_idx, num_funcs_on_port;
struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 reg_function_hide;
+ /* Default "worst-case" values */
num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
+ enabled_func_idx = p_hwfn->rel_pf_id;
+ num_funcs_on_port = MAX_PF_PER_PORT;
/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
* in the other bits are selected.
@@ -5207,21 +6990,23 @@ static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
reg_function_hide = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
if (reg_function_hide & 0x1) {
+ u32 enabled_funcs, eng_mask, low_pfs_mask, port_mask, tmp;
+
+ /* Get the number of the enabled functions on the device */
+ enabled_funcs = (reg_function_hide & 0xffff) ^ 0xfffe;
+
+ /* Get the number of the enabled functions on the engine */
if (ECORE_IS_BB(p_dev)) {
- if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev)) {
- num_funcs = 0;
+ if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev))
eng_mask = 0xaaaa;
- } else {
- num_funcs = 1;
- eng_mask = 0x5554;
- }
+ else
+ eng_mask = 0x5555;
} else {
- num_funcs = 1;
- eng_mask = 0xfffe;
+ eng_mask = 0xffff;
}
- /* Get the number of the enabled functions on the engine */
- tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
+ num_funcs = 0;
+ tmp = enabled_funcs & eng_mask;
while (tmp) {
if (tmp & 0x1)
num_funcs++;
@@ -5229,17 +7014,35 @@ static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
}
/* Get the PF index within the enabled functions */
- low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
- tmp = reg_function_hide & eng_mask & low_pfs_mask;
+ low_pfs_mask = (1 << ECORE_LEADING_HWFN(p_dev)->abs_pf_id) - 1;
+ enabled_func_idx = 0;
+ tmp = enabled_funcs & eng_mask & low_pfs_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ enabled_func_idx++;
+ tmp >>= 0x1;
+ }
+
+ /* Get the number of functions on the port */
+ if (ecore_device_num_ports(p_hwfn->p_dev) == 4)
+ port_mask = 0x1111 << (p_hwfn->abs_pf_id % 4);
+ else if (ecore_device_num_ports(p_hwfn->p_dev) == 2)
+ port_mask = 0x5555 << (p_hwfn->abs_pf_id % 2);
+ else /* single port */
+ port_mask = 0xffff;
+
+ num_funcs_on_port = 0;
+ tmp = enabled_funcs & port_mask;
while (tmp) {
if (tmp & 0x1)
- enabled_func_idx--;
+ num_funcs_on_port++;
tmp >>= 0x1;
}
}
p_hwfn->num_funcs_on_engine = num_funcs;
p_hwfn->enabled_func_idx = enabled_func_idx;
+ p_hwfn->num_funcs_on_port = num_funcs_on_port;
#ifndef ASIC_ONLY
if (CHIP_REV_IS_FPGA(p_dev)) {
@@ -5250,14 +7053,15 @@ static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
#endif
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
- "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
- p_hwfn->rel_pf_id, p_hwfn->abs_pf_id,
- p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
+ "PF {abs %d, rel %d}: enabled func %d, %d funcs on engine, %d funcs on port [reg_function_hide 0x%x]\n",
+ p_hwfn->abs_pf_id, p_hwfn->rel_pf_id,
+ p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine,
+ p_hwfn->num_funcs_on_port, reg_function_hide);
}
#ifndef ASIC_ONLY
static void ecore_emul_hw_info_port_num(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+ struct ecore_ptt *p_ptt)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
u32 eco_reserved;
@@ -5265,22 +7069,22 @@ static void ecore_emul_hw_info_port_num(struct ecore_hwfn *p_hwfn,
/* MISCS_REG_ECO_RESERVED[15:12]: num of ports in an engine */
eco_reserved = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
switch ((eco_reserved & 0xf000) >> 12) {
- case 1:
- p_dev->num_ports_in_engine = 1;
- break;
- case 3:
- p_dev->num_ports_in_engine = 2;
- break;
- case 0xf:
- p_dev->num_ports_in_engine = 4;
- break;
- default:
- DP_NOTICE(p_hwfn, false,
+ case 1:
+ p_dev->num_ports_in_engine = 1;
+ break;
+ case 3:
+ p_dev->num_ports_in_engine = 2;
+ break;
+ case 0xf:
+ p_dev->num_ports_in_engine = 4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
"Emulation: Unknown port mode [ECO_RESERVED 0x%08x]\n",
eco_reserved);
p_dev->num_ports_in_engine = 1; /* Default to something */
break;
- }
+ }
p_dev->num_ports = p_dev->num_ports_in_engine *
ecore_device_num_engines(p_dev);
@@ -5307,7 +7111,7 @@ static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
}
#endif
- /* In CMT there is always only one port */
+ /* In CMT there is always only one port */
if (ECORE_IS_CMT(p_dev)) {
p_dev->num_ports_in_engine = 1;
p_dev->num_ports = 1;
@@ -5355,8 +7159,7 @@ static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn,
p_caps->eee_speed_caps = 0;
eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
OFFSETOF(struct public_port, eee_status));
- eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >>
- EEE_SUPPORTED_SPEED_OFFSET;
+ eee_status = GET_MFW_FIELD(eee_status, EEE_SUPPORTED_SPEED);
if (eee_status & EEE_1G_SUPPORTED)
p_caps->eee_speed_caps |= ECORE_EEE_1G_ADV;
if (eee_status & EEE_10G_ADV)
@@ -5377,26 +7180,41 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
}
/* Since all information is common, only first hwfns should do this */
- if (IS_LEAD_HWFN(p_hwfn) && !IS_ECORE_PACING(p_hwfn)) {
+ if (IS_LEAD_HWFN(p_hwfn) && !IS_ECORE_PACING(p_hwfn) &&
+ !p_params->b_sriov_disable) {
rc = ecore_iov_hw_info(p_hwfn);
+
if (rc != ECORE_SUCCESS) {
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res =
- ECORE_HW_PREPARE_BAD_IOV;
+ ECORE_HW_PREPARE_BAD_IOV;
else
return rc;
}
}
+ /* The following order should be kept:
+ * (1) ecore_hw_info_port_num(),
+ * (2) ecore_get_num_funcs(),
+ * (3) ecore_mcp_get_capabilities, and
+ * (4) ecore_hw_get_nvm_info(),
+ * since (2) depends on (1) and (4) depends on both.
+ * In addition, can send the MFW a MB command only after (1) is called.
+ */
if (IS_LEAD_HWFN(p_hwfn))
ecore_hw_info_port_num(p_hwfn, p_ptt);
+ ecore_get_num_funcs(p_hwfn, p_ptt);
+
ecore_mcp_get_capabilities(p_hwfn, p_ptt);
rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params);
if (rc != ECORE_SUCCESS)
return rc;
+ if (p_hwfn->mcp_info->recovery_mode)
+ return ECORE_SUCCESS;
+
rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS) {
if (p_params->b_relaxed_probe)
@@ -5405,16 +7223,18 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
}
+ OSAL_NUM_FUNCS_IS_SET(p_hwfn);
+
#ifndef ASIC_ONLY
if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
#endif
OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr,
- p_hwfn->mcp_info->func_info.mac, ETH_ALEN);
+ p_hwfn->mcp_info->func_info.mac, ECORE_ETH_ALEN);
#ifndef ASIC_ONLY
} else {
- static u8 mcp_hw_mac[6] = { 0, 2, 3, 4, 5, 6 };
+ static u8 mcp_hw_mac[6] = {0, 2, 3, 4, 5, 6};
- OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN);
+ OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ECORE_ETH_ALEN);
p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id;
}
#endif
@@ -5422,7 +7242,7 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
if (ecore_mcp_is_init(p_hwfn)) {
if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET)
p_hwfn->hw_info.ovlan =
- p_hwfn->mcp_info->func_info.ovlan;
+ p_hwfn->mcp_info->func_info.ovlan;
ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
@@ -5443,7 +7263,7 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
/* AH emulation:
* Allow only PF0 to be RoCE to overcome a lack of ILT lines.
- */
+ */
if (ECORE_IS_AH(p_hwfn->p_dev) && p_hwfn->rel_pf_id)
p_hwfn->hw_info.personality = ECORE_PCI_ETH;
else
@@ -5451,6 +7271,9 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
}
#endif
+ if (ECORE_IS_ROCE_PERSONALITY(p_hwfn))
+ p_hwfn->hw_info.multi_tc_roce_en = 1;
+
/* although in BB some constellations may support more than 4 tcs,
* that can result in performance penalty in some cases. 4
* represents a good tradeoff between performance and flexibility.
@@ -5465,16 +7288,18 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
*/
p_hwfn->hw_info.num_active_tc = 1;
- ecore_get_num_funcs(p_hwfn, p_ptt);
-
if (ecore_mcp_is_init(p_hwfn))
p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
- /* In case of forcing the driver's default resource allocation, calling
- * ecore_hw_get_resc() should come after initializing the personality
- * and after getting the number of functions, since the calculation of
- * the resources/features depends on them.
- * This order is not harmful if not forcing.
+ /* In case the current MF mode doesn't support VF-RDMA, there is no
+ * need in VF CNQs.
+ */
+ if (!OSAL_TEST_BIT(ECORE_MF_VF_RDMA, &p_hwfn->p_dev->mf_bits))
+ p_hwfn->num_vf_cnqs = 0;
+
+ /* Due to a dependency, ecore_hw_get_resc() should be called after
+ * getting the number of functions on an engine, and after initializing
+ * the personality.
*/
rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc);
if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
@@ -5485,7 +7310,7 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
}
-#define ECORE_MAX_DEVICE_NAME_LEN (8)
+#define ECORE_MAX_DEVICE_NAME_LEN (8)
void ecore_get_dev_name(struct ecore_dev *p_dev, u8 *name, u8 max_chars)
{
@@ -5493,7 +7318,8 @@ void ecore_get_dev_name(struct ecore_dev *p_dev, u8 *name, u8 max_chars)
n = OSAL_MIN_T(u8, max_chars, ECORE_MAX_DEVICE_NAME_LEN);
OSAL_SNPRINTF((char *)name, n, "%s %c%d",
- ECORE_IS_BB(p_dev) ? "BB" : "AH",
+ ECORE_IS_BB(p_dev) ? "BB"
+ : ECORE_IS_AH(p_dev) ? "AH" : "E5",
'A' + p_dev->chip_rev, (int)p_dev->chip_metal);
}
@@ -5519,6 +7345,9 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
case ECORE_DEV_ID_MASK_AH:
p_dev->type = ECORE_DEV_TYPE_AH;
break;
+ case ECORE_DEV_ID_MASK_E5:
+ p_dev->type = ECORE_DEV_TYPE_E5;
+ break;
default:
DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n",
p_dev->device_id);
@@ -5545,8 +7374,7 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
/* For some reason we have problems with this register
* in BB B0 emulation; Simply assume no CMT
*/
- DP_NOTICE(p_dev->hwfns, false,
- "device on emul - assume no CMT\n");
+ DP_NOTICE(p_dev->hwfns, false, "device on emul - assume no CMT\n");
p_dev->num_hwfns = 1;
}
#endif
@@ -5558,7 +7386,7 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
DP_INFO(p_dev->hwfns,
"Chip details - %s %c%d, Num: %04x Rev: %02x Bond id: %02x Metal: %02x\n",
- ECORE_IS_BB(p_dev) ? "BB" : "AH",
+ ECORE_IS_BB(p_dev) ? "BB" : ECORE_IS_AH(p_dev) ? "AH" : "E5",
'A' + p_dev->chip_rev, (int)p_dev->chip_metal,
p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
p_dev->chip_metal);
@@ -5568,6 +7396,7 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
"The chip type/rev (BB A0) is not supported!\n");
return ECORE_ABORTED;
}
+
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
@@ -5581,7 +7410,7 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
/* MISCS_REG_ECO_RESERVED[28]: emulation build w/ or w/o MAC */
p_dev->b_is_emul_mac = !!(tmp & (1 << 28));
- DP_NOTICE(p_hwfn, false,
+ DP_NOTICE(p_hwfn, false,
"Emulation: Running on a %s build %s MAC\n",
p_dev->b_is_emul_full ? "full" : "reduced",
p_dev->b_is_emul_mac ? "with" : "without");
@@ -5591,8 +7420,7 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-#ifndef LINUX_REMOVE
-void ecore_prepare_hibernate(struct ecore_dev *p_dev)
+void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev)
{
int j;
@@ -5602,19 +7430,41 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev)
for_each_hwfn(p_dev, j) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
- DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
- "Mark hw/fw uninitialized\n");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Mark hw/fw uninitialized\n");
p_hwfn->hw_init_done = false;
ecore_ptt_invalidate(p_hwfn);
}
}
-#endif
+
+void ecore_hw_hibernate_resume(struct ecore_dev *p_dev)
+{
+ int j = 0;
+
+ if (IS_VF(p_dev))
+ return;
+
+ for_each_hwfn(p_dev, j) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+
+ ecore_hw_hwfn_prepare(p_hwfn);
+
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, false, "ptt acquire failed\n");
+ } else {
+ ecore_load_mcp_offsets(p_hwfn, p_ptt);
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "Reinitialized hw after low power state\n");
+ }
+}
static enum _ecore_status_t
ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
void OSAL_IOMEM *p_doorbells, u64 db_phys_addr,
+ unsigned long db_size,
struct ecore_hw_prepare_params *p_params)
{
struct ecore_mdump_retain_data mdump_retain;
@@ -5626,14 +7476,18 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
p_hwfn->regview = p_regview;
p_hwfn->doorbells = p_doorbells;
p_hwfn->db_phys_addr = db_phys_addr;
+ p_hwfn->db_size = db_size;
+ p_hwfn->reg_offset = (u8 *)p_hwfn->regview -
+ (u8 *)p_hwfn->p_dev->regview;
+ p_hwfn->db_offset = (u8 *)p_hwfn->doorbells -
+ (u8 *)p_hwfn->p_dev->doorbells;
if (IS_VF(p_dev))
return ecore_vf_hw_prepare(p_hwfn, p_params);
/* Validate that chip access is feasible */
if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
- DP_ERR(p_hwfn,
- "Reading the ME register returns all Fs; Preventing further chip access\n");
+ DP_ERR(p_hwfn, "Reading the ME register returns all Fs; Preventing further chip access\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME;
return ECORE_INVAL;
@@ -5677,6 +7531,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
if (val != 1) {
DP_ERR(p_hwfn,
"PTT and GTT init in PGLUE_B didn't complete\n");
+ rc = ECORE_BUSY;
goto err1;
}
@@ -5710,21 +7565,19 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
goto err2;
}
- /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is
- * called, since among others it sets the ports number in an engine.
- */
if (p_params->initiate_pf_flr && IS_LEAD_HWFN(p_hwfn) &&
!p_dev->recov_in_prog) {
rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+ }
- /* Workaround for MFW issue where PF FLR does not cleanup
- * IGU block
- */
- if (!(p_hwfn->mcp_info->capabilities &
- FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP))
- ecore_pf_flr_igu_cleanup(p_hwfn);
+ /* NVRAM info initialization and population */
+ rc = ecore_mcp_nvm_info_populate(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to populate nvm info shadow\n");
+ goto err2;
}
/* Check if mdump logs/data are present and update the epoch value */
@@ -5733,7 +7586,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
&mdump_info);
if (rc == ECORE_SUCCESS && mdump_info.num_of_logs)
DP_NOTICE(p_hwfn, false,
- "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
+ "mdump data available.\n");
rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt,
&mdump_retain);
@@ -5753,8 +7606,9 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
- goto err2;
+ goto err3;
}
+
#ifndef ASIC_ONLY
if (CHIP_REV_IS_FPGA(p_dev)) {
if (ECORE_IS_AH(p_dev)) {
@@ -5772,6 +7626,8 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
#endif
return rc;
+err3:
+ ecore_mcp_nvm_info_free(p_hwfn);
err2:
if (IS_LEAD_HWFN(p_hwfn))
ecore_iov_free_hw_info(p_dev);
@@ -5789,9 +7645,13 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
enum _ecore_status_t rc;
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
+ p_dev->monitored_hw_addr = p_params->monitored_hw_addr;
p_dev->allow_mdump = p_params->allow_mdump;
p_hwfn->b_en_pacing = p_params->b_en_pacing;
p_dev->b_is_target = p_params->b_is_target;
+ p_hwfn->roce_edpm_mode = p_params->roce_edpm_mode;
+ p_hwfn->num_vf_cnqs = p_params->num_vf_cnqs;
+ p_hwfn->fs_accuracy = p_params->fs_accuracy;
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
@@ -5799,7 +7659,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
/* Initialize the first hwfn - will learn number of hwfns */
rc = ecore_hw_prepare_single(p_hwfn, p_dev->regview,
p_dev->doorbells, p_dev->db_phys_addr,
- p_params);
+ p_dev->db_size, p_params);
if (rc != ECORE_SUCCESS)
return rc;
@@ -5825,13 +7685,14 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
db_phys_addr = p_dev->db_phys_addr + offset;
p_dev->hwfns[1].b_en_pacing = p_params->b_en_pacing;
+ p_dev->hwfns[1].num_vf_cnqs = p_params->num_vf_cnqs;
/* prepare second hw function */
rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
p_doorbell, db_phys_addr,
- p_params);
+ p_dev->db_size, p_params);
/* in case of error, need to free the previously
- * initiliazed hwfn 0.
+ * initialized hwfn 0.
*/
if (rc != ECORE_SUCCESS) {
if (p_params->b_relaxed_probe)
@@ -5840,6 +7701,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
if (IS_PF(p_dev)) {
ecore_init_free(p_hwfn);
+ ecore_mcp_nvm_info_free(p_hwfn);
ecore_mcp_free(p_hwfn);
ecore_hw_hwfn_free(p_hwfn);
} else {
@@ -5854,12 +7716,13 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
void ecore_hw_remove(struct ecore_dev *p_dev)
{
- struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
int i;
- if (IS_PF(p_dev))
+ if (IS_PF(p_dev)) {
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
- ECORE_OV_DRIVER_STATE_NOT_LOADED);
+ ECORE_OV_DRIVER_STATE_NOT_LOADED);
+ }
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
@@ -5870,6 +7733,7 @@ void ecore_hw_remove(struct ecore_dev *p_dev)
}
ecore_init_free(p_hwfn);
+ ecore_mcp_nvm_info_free(p_hwfn);
ecore_hw_hwfn_free(p_hwfn);
ecore_mcp_free(p_hwfn);
@@ -5878,6 +7742,9 @@ void ecore_hw_remove(struct ecore_dev *p_dev)
#endif
}
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_DEALLOC(&p_dev->internal_trace.lock);
+#endif
ecore_iov_free_hw_info(p_dev);
}
@@ -5903,7 +7770,7 @@ static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev,
p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys,
- ECORE_CHAIN_PAGE_SIZE);
+ p_chain->page_size);
p_virt = p_virt_next;
p_phys = p_phys_next;
@@ -6168,11 +8035,16 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
u16 src_id, u16 *dst_id)
{
+ if (!RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
+ DP_NOTICE(p_hwfn, false, "No L2 queue is available\n");
+ return ECORE_INVAL;
+ }
+
if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
u16 min, max;
min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE);
- max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+ max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE) - 1;
DP_NOTICE(p_hwfn, true,
"l2_queue id [%d] is not valid, available indices [%d - %d]\n",
src_id, min, max);
@@ -6188,11 +8060,16 @@ enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
u8 src_id, u8 *dst_id)
{
+ if (!RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ DP_NOTICE(p_hwfn, false, "No vport is available\n");
+ return ECORE_INVAL;
+ }
+
if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
u8 min, max;
min = (u8)RESC_START(p_hwfn, ECORE_VPORT);
- max = min + RESC_NUM(p_hwfn, ECORE_VPORT);
+ max = min + RESC_NUM(p_hwfn, ECORE_VPORT) - 1;
DP_NOTICE(p_hwfn, true,
"vport id [%d] is not valid, available indices [%d - %d]\n",
src_id, min, max);
@@ -6208,11 +8085,16 @@ enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
u8 src_id, u8 *dst_id)
{
+ if (!RESC_NUM(p_hwfn, ECORE_RSS_ENG)) {
+ DP_NOTICE(p_hwfn, false, "No RSS engine is available\n");
+ return ECORE_INVAL;
+ }
+
if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) {
u8 min, max;
min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG);
- max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG);
+ max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG) - 1;
DP_NOTICE(p_hwfn, true,
"rss_eng id [%d] is not valid, available indices [%d - %d]\n",
src_id, min, max);
@@ -6229,17 +8111,17 @@ enum _ecore_status_t
ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- if (OSAL_GET_BIT(ECORE_MF_NEED_DEF_PF, &p_hwfn->p_dev->mf_bits)) {
+ if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_hwfn->p_dev->mf_bits)) {
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR,
1 << p_hwfn->abs_pf_id / 2);
ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0);
return ECORE_SUCCESS;
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "This function can't be set as default\n");
+ return ECORE_INVAL;
}
-
- DP_NOTICE(p_hwfn, false,
- "This function can't be set as default\n");
- return ECORE_INVAL;
}
static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
@@ -6366,7 +8248,6 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
return ECORE_INVAL;
}
-
timeset = (u8)(coalesce >> timer_res);
rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
@@ -6385,7 +8266,7 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
/* Calculate final WFQ values for all vports and configure it.
* After this configuration each vport must have
- * approx min rate = wfq * min_pf_rate / ECORE_WFQ_UNIT
+ * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT
*/
static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -6400,7 +8281,7 @@ static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
vport_params[i].wfq = (wfq_speed * ECORE_WFQ_UNIT) /
- min_pf_rate;
+ min_pf_rate;
ecore_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
vport_params[i].wfq);
@@ -6408,6 +8289,7 @@ static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
}
static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn)
+
{
int i;
@@ -6447,8 +8329,7 @@ static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
num_vports = p_hwfn->qm_info.num_vports;
-/* Accounting for the vports which are configured for WFQ explicitly */
-
+ /* Accounting for the vports which are configured for WFQ explicitly */
for (i = 0; i < num_vports; i++) {
u32 tmp_speed;
@@ -6466,24 +8347,23 @@ static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
/* validate possible error cases */
if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
- vport_id, req_rate, min_pf_rate);
+ DP_ERR(p_hwfn,
+ "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ vport_id, req_rate, min_pf_rate);
return ECORE_INVAL;
}
/* TBD - for number of vports greater than 100 */
if (num_vports > ECORE_WFQ_UNIT) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Number of vports is greater than %d\n",
- ECORE_WFQ_UNIT);
+ DP_ERR(p_hwfn, "Number of vports is greater than %d\n",
+ ECORE_WFQ_UNIT);
return ECORE_INVAL;
}
if (total_req_min_rate > min_pf_rate) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
- total_req_min_rate, min_pf_rate);
+ DP_ERR(p_hwfn,
+ "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
+ total_req_min_rate, min_pf_rate);
return ECORE_INVAL;
}
@@ -6493,9 +8373,9 @@ static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
/* validate if non requested get < 1% of min bw */
if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
- left_rate_per_vp, min_pf_rate);
+ DP_ERR(p_hwfn,
+ "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ left_rate_per_vp, min_pf_rate);
return ECORE_INVAL;
}
@@ -6621,8 +8501,8 @@ void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
/* TBD - for multiple hardware functions - that is 100 gig */
if (ECORE_IS_CMT(p_dev)) {
- DP_VERBOSE(p_dev, ECORE_MSG_LINK,
- "WFQ configuration is not supported for this device\n");
+ DP_ERR(p_dev,
+ "WFQ configuration is not supported for this device\n");
return;
}
@@ -6778,7 +8658,7 @@ void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
sizeof(*p_hwfn->qm_info.wfq_data) *
- p_hwfn->qm_info.num_vports);
+ p_hwfn->qm_info.num_vports);
}
int ecore_device_num_engines(struct ecore_dev *p_dev)
@@ -6804,6 +8684,15 @@ void ecore_set_fw_mac_addr(__le16 *fw_msb,
((u8 *)fw_lsb)[1] = mac[4];
}
+void ecore_set_dev_access_enable(struct ecore_dev *p_dev, bool b_enable)
+{
+ if (p_dev->recov_in_prog != !b_enable) {
+ DP_INFO(p_dev, "%s access to the device\n",
+ b_enable ? "Enable" : "Disable");
+ p_dev->recov_in_prog = !b_enable;
+ }
+}
+
void ecore_set_platform_str(struct ecore_hwfn *p_hwfn,
char *buf_str, u32 buf_size)
{
@@ -6819,5 +8708,19 @@ void ecore_set_platform_str(struct ecore_hwfn *p_hwfn,
bool ecore_is_mf_fip_special(struct ecore_dev *p_dev)
{
- return !!OSAL_GET_BIT(ECORE_MF_FIP_SPECIAL, &p_dev->mf_bits);
+ return !!OSAL_TEST_BIT(ECORE_MF_FIP_SPECIAL, &p_dev->mf_bits);
}
+
+bool ecore_is_dscp_to_tc_capable(struct ecore_dev *p_dev)
+{
+ return !!OSAL_TEST_BIT(ECORE_MF_DSCP_TO_TC_MAP, &p_dev->mf_bits);
+}
+
+u8 ecore_get_num_funcs_on_engine(struct ecore_hwfn *p_hwfn)
+{
+ return p_hwfn->num_funcs_on_engine;
+}
+
+#ifdef _NTDDK_
+#pragma warning(pop)
+#endif
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef __ECORE_DEV_API_H__
#define __ECORE_DEV_API_H__
@@ -11,6 +11,15 @@
#include "ecore_chain.h"
#include "ecore_int_api.h"
+#define ECORE_DEFAULT_ILT_PAGE_SIZE 4
+
+struct ecore_wake_info {
+ u32 wk_info;
+ u32 wk_details;
+ u32 wk_pkt_len;
+ u8 wk_buffer[256];
+};
+
/**
* @brief ecore_init_dp - initialize the debug level
*
@@ -24,6 +33,26 @@ void ecore_init_dp(struct ecore_dev *p_dev,
u8 dp_level,
void *dp_ctx);
+/**
+ * @brief ecore_init_int_dp - initialize the internal debug level
+ *
+ * @param p_dev
+ * @param dp_module
+ * @param dp_level
+ */
+void ecore_init_int_dp(struct ecore_dev *p_dev,
+ u32 dp_module,
+ u8 dp_level);
+
+/**
+ * @brief ecore_dp_internal_log - store into internal log
+ *
+ * @param p_dev
+ * @param buf
+ * @param len
+ */
+void ecore_dp_internal_log(struct ecore_dev *cdev, char *fmt, ...);
+
/**
* @brief ecore_init_struct - initialize the device structure to
* its defaults
@@ -84,7 +113,7 @@ struct ecore_drv_load_params {
#define ECORE_LOAD_REQ_LOCK_TO_NONE 255
/* Action to take in case the MFW doesn't support timeout values other
- * than default and none.
+ * then default and none.
*/
enum ecore_mfw_timeout_fallback mfw_timeout_fallback;
@@ -104,10 +133,12 @@ struct ecore_hw_init_params {
/* Interrupt mode [msix, inta, etc.] to use */
enum ecore_int_mode int_mode;
- /* NPAR tx switching to be used for vports configured for tx-switching
- */
+ /* NPAR tx switching to be used for vports configured for tx-switching */
bool allow_npar_tx_switch;
+ /* PCI relax ordering to be configured by MFW or ecore client */
+ enum ecore_pci_rlx_odr pci_rlx_odr_mode;
+
/* Binary fw data pointer in binary fw file */
const u8 *bin_fw_data;
@@ -161,61 +192,23 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
*/
enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
-#ifndef LINUX_REMOVE
/**
- * @brief ecore_prepare_hibernate -should be called when
+ * @brief ecore_hw_hibernate_prepare -should be called when
* the system is going into the hibernate state
*
* @param p_dev
*
*/
-void ecore_prepare_hibernate(struct ecore_dev *p_dev);
-
-enum ecore_db_rec_width {
- DB_REC_WIDTH_32B,
- DB_REC_WIDTH_64B,
-};
-
-enum ecore_db_rec_space {
- DB_REC_KERNEL,
- DB_REC_USER,
-};
+void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev);
/**
- * @brief db_recovery_add - add doorbell information to the doorbell
- * recovery mechanism.
+ * @brief ecore_hw_hibernate_resume -should be called when the system is
+ resuming from D3 power state and before calling ecore_hw_init.
*
- * @param p_dev
- * @param db_addr - doorbell address
- * @param db_data - address of where db_data is stored
- * @param db_width - doorbell is 32b pr 64b
- * @param db_space - doorbell recovery addresses are user or kernel space
- */
-enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
- void OSAL_IOMEM *db_addr,
- void *db_data,
- enum ecore_db_rec_width db_width,
- enum ecore_db_rec_space db_space);
-
-/**
- * @brief db_recovery_del - remove doorbell information from the doorbell
- * recovery mechanism. db_data serves as key (db_addr is not unique).
+ * @param p_hwfn
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address where db_data is stored. Serves as key for the
- * entry to delete.
*/
-enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
- void OSAL_IOMEM *db_addr,
- void *db_data);
-
-static OSAL_INLINE bool ecore_is_mf_ufp(struct ecore_hwfn *p_hwfn)
-{
- return !!OSAL_GET_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits);
-}
-
-#endif
+void ecore_hw_hibernate_resume(struct ecore_dev *p_dev);
/**
* @brief ecore_hw_start_fastpath -restart fastpath traffic,
@@ -246,6 +239,12 @@ enum ecore_hw_prepare_result {
ECORE_HW_PREPARE_BAD_IGU,
};
+enum ECORE_ROCE_EDPM_MODE {
+ ECORE_ROCE_EDPM_MODE_ENABLE = 0,
+ ECORE_ROCE_EDPM_MODE_FORCE_ON = 1,
+ ECORE_ROCE_EDPM_MODE_DISABLE = 2,
+};
+
struct ecore_hw_prepare_params {
/* Personality to initialize */
int personality;
@@ -256,6 +255,9 @@ struct ecore_hw_prepare_params {
/* Check the reg_fifo after any register access */
bool chk_reg_fifo;
+ /* Monitored address by ecore_rd()/ecore_wr() */
+ u32 monitored_hw_addr;
+
/* Request the MFW to initiate PF FLR */
bool initiate_pf_flr;
@@ -278,8 +280,20 @@ struct ecore_hw_prepare_params {
/* Indicates whether this PF serves a storage target */
bool b_is_target;
+ /* EDPM can be enabled/forced_on/disabled */
+ u8 roce_edpm_mode;
+
/* retry count for VF acquire on channel timeout */
u8 acquire_retry_cnt;
+
+ /* Num of VF CNQs resources that will be requested */
+ u8 num_vf_cnqs;
+
+ /* Flow steering statistics accuracy */
+ u8 fs_accuracy;
+
+ /* Disable SRIOV */
+ bool b_sriov_disable;
};
/**
@@ -300,6 +314,43 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
*/
void ecore_hw_remove(struct ecore_dev *p_dev);
+/**
+ * @brief ecore_set_nwuf_reg -
+ *
+ * @param p_dev
+ * @param reg_idx - Index of the pattern register
+ * @param pattern_size - size of pattern
+ * @param crc - CRC value of patter & mask
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev,
+ u32 reg_idx, u32 pattern_size, u32 crc);
+
+/**
+ * @brief ecore_get_wake_info - get magic packet buffer
+ *
+ * @param p_hwfn
+ * @param p_ppt
+ * @param wake_info - pointer to ecore_wake_info buffer
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_get_wake_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_wake_info *wake_info);
+
+/**
+ * @brief ecore_wol_buffer_clear - Clear magic package buffer
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return void
+ */
+void ecore_wol_buffer_clear(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
/**
* @brief ecore_ptt_acquire - Allocate a PTT window
*
@@ -325,6 +376,18 @@ struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+/**
+ * @brief ecore_get_dev_name - get device name, e.g., "BB B0"
+ *
+ * @param p_hwfn
+ * @param name - this is where the name will be written to
+ * @param max_chars - maximum chars that can be written to name including '\0'
+ */
+void ecore_get_dev_name(struct ecore_dev *p_dev,
+ u8 *name,
+ u8 max_chars);
+
+#ifndef __EXTRACT__LINUX__IF__
struct ecore_eth_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
@@ -417,6 +480,7 @@ struct ecore_eth_stats {
struct ecore_eth_stats_ah ah;
};
};
+#endif
/**
* @brief ecore_chain_alloc - Allocate and initialize a chain
@@ -488,6 +552,7 @@ enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
u8 src_id,
u8 *dst_id);
+#define ECORE_LLH_DONT_CARE 0
/**
* @brief ecore_llh_get_num_ppfid - Return the allocated number of LLH filter
* banks that are allocated to the PF.
@@ -548,7 +613,7 @@ enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev,
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
- u8 mac_addr[ETH_ALEN]);
+ u8 mac_addr[ECORE_ETH_ALEN]);
/**
* @brief ecore_llh_remove_mac_filter - Remove a LLH MAC filter from the given
@@ -559,7 +624,7 @@ enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
* @param mac_addr - MAC to remove
*/
void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
- u8 mac_addr[ETH_ALEN]);
+ u8 mac_addr[ECORE_ETH_ALEN]);
enum ecore_llh_prot_filter_type_t {
ECORE_LLH_FILTER_ETHERTYPE,
@@ -571,6 +636,30 @@ enum ecore_llh_prot_filter_type_t {
ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT
};
+/**
+ * @brief ecore_llh_add_dst_tcp_port_filter - Add a destination tcp port
+ * LLH filter into the given filter bank.
+ *
+ * @param p_dev
+ * @param dest_port - destination port to add
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_llh_add_dst_tcp_port_filter(struct ecore_dev *p_dev, u16 dest_port);
+
+/**
+ * @brief ecore_llh_add_src_tcp_port_filter - Add a source tcp port filter
+ * into the given filter bank.
+ *
+ * @param p_dev
+ * @param src_port - source port to add
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_llh_add_src_tcp_port_filter(struct ecore_dev *p_dev, u16 src_port);
+
/**
* @brief ecore_llh_add_protocol_filter - Add a LLH protocol filter into the
* given filter bank.
@@ -588,6 +677,30 @@ ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
enum ecore_llh_prot_filter_type_t type,
u16 source_port_or_eth_type, u16 dest_port);
+/**
+ * @brief ecore_llh_remove_dst_tcp_port_filter - Remove a destination tcp
+ * port LLH filter from the given filter bank.
+ *
+ * @param p_dev
+ * @param dest_port - destination port
+ *
+ * @return enum _ecore_status_t
+ */
+void ecore_llh_remove_dst_tcp_port_filter(struct ecore_dev *p_dev,
+ u16 dest_port);
+
+/**
+ * @brief ecore_llh_remove_src_tcp_port_filter - Remove a source tcp
+ * port LLH filter from the given filter bank.
+ *
+ * @param p_dev
+ * @param src_port - source port
+ *
+ * @return enum _ecore_status_t
+ */
+void ecore_llh_remove_src_tcp_port_filter(struct ecore_dev *p_dev,
+ u16 src_port);
+
/**
* @brief ecore_llh_remove_protocol_filter - Remove a LLH protocol filter from
* the given filter bank.
@@ -677,6 +790,16 @@ enum _ecore_status_t
ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
u16 tx_coal, void *p_handle);
+/**
+ * @brief - Recalculate feature distributions based on HW resources and
+ * user inputs. Currently this affects RDMA_CNQ, PF_L2_QUE and VF_L2_QUE.
+ * As a result, this must not be called while RDMA is active or while VFs
+ * are enabled.
+ *
+ * @param p_hwfn
+ */
+void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn);
+
/**
* @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
*
@@ -690,6 +813,116 @@ enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool b_enable);
+#ifndef __EXTRACT__LINUX__IF__
+enum ecore_db_rec_width {
+ DB_REC_WIDTH_32B,
+ DB_REC_WIDTH_64B,
+};
+
+enum ecore_db_rec_space {
+ DB_REC_KERNEL,
+ DB_REC_USER,
+};
+#endif
+
+/**
+ * @brief db_recovery_add - add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @param p_dev
+ * @param db_addr - doorbell address
+ * @param db_data - address of where db_data is stored
+ * @param db_width - doorbell is 32b pr 64b
+ * @param db_space - doorbell recovery addresses are user or kernel space
+ */
+enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data,
+ enum ecore_db_rec_width db_width,
+ enum ecore_db_rec_space db_space);
+
+/**
+ * @brief db_recovery_del - remove doorbell information from the doorbell
+ * recovery mechanism. db_data serves as key (db_addr is not unique).
+ *
+ * @param cdev
+ * @param db_addr - doorbell address
+ * @param db_data - address where db_data is stored. Serves as key for the
+ * entry to delete.
+ */
+enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data);
+
+#ifndef __EXTRACT__LINUX__THROW__
+static OSAL_INLINE bool ecore_is_mf_ufp(struct ecore_hwfn *p_hwfn)
+{
+ return !!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits);
+}
+#endif
+
+/**
+ * @brief ecore_set_dev_access_enable - Enable or disable access to the device
+ *
+ * @param p_hwfn
+ * @param b_enable - true/false
+ */
+void ecore_set_dev_access_enable(struct ecore_dev *p_dev, bool b_enable);
+
+/**
+ * @brief ecore_set_ilt_page_size - Set ILT page size
+ *
+ * @param p_dev
+ * @param ilt_size
+ *
+ * @return enum _ecore_status_t
+ */
+void ecore_set_ilt_page_size(struct ecore_dev *p_dev, u8 ilt_size);
+
+/**
+ * @brief Create Lag
+ *
+ * two ports of the same device are bonded or unbonded,
+ * or link status changes.
+ *
+ * @param lag_type: LAG_TYPE_NONE: Disable lag
+ * LAG_TYPE_ACTIVEACTIVE: Utilize all ports
+ * LAG_TYPE_ACTIVEBACKUP: Configure all queues to
+ * active port
+ * @param active_ports: Bitmap, each bit represents whether the
+ * port is active or not (1 - active )
+ * @param link_change_cb: Callback function to call if port
+ * settings change such as dcbx.
+ * @param cxt: Parameter will be passed to the
+ * link_change_cb function
+ *
+ * @param p_hwfn
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_lag_create(struct ecore_dev *dev,
+ enum ecore_lag_type lag_type,
+ void (*link_change_cb)(void *cxt),
+ void *cxt,
+ u8 active_ports);
+/**
+ * @brief Modify lag link status of a given port
+ *
+ * @param port_id: the port id that change
+ * @param link_active: current link state
+ */
+enum _ecore_status_t ecore_lag_modify(struct ecore_dev *dev,
+ u8 port_id,
+ u8 link_active);
+
+/**
+ * @brief Exit lag mode
+ *
+ * @param p_hwfn
+ */
+enum _ecore_status_t ecore_lag_destroy(struct ecore_dev *dev);
+
+bool ecore_lag_is_active(struct ecore_hwfn *p_hwfn);
+
/**
* @brief Whether FIP discovery fallback special mode is enabled or not.
*
@@ -698,4 +931,23 @@ enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
* @return true if device is in FIP special mode, false otherwise.
*/
bool ecore_is_mf_fip_special(struct ecore_dev *p_dev);
+
+/**
+ * @brief Whether device allows DSCP to TC mapping or not.
+ *
+ * @param cdev
+ *
+ * @return true if device allows dscp to tc mapping.
+ */
+bool ecore_is_dscp_to_tc_capable(struct ecore_dev *p_dev);
+
+/**
+ * @brief Returns the number of PFs.
+ *
+ * @param p_hwfn
+ *
+ * @return u8 - Number of PFs.
+ */
+u8 ecore_get_num_funcs_on_engine(struct ecore_hwfn *p_hwfn);
+
#endif
@@ -1,60 +1,75 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef GTT_REG_ADDR_H
#define GTT_REG_ADDR_H
-/* Win 2 */
-//Access:RW DataWidth:0x20 //
-#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
+/**
+ * Win 2
+ */
+#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL /* Access:RW DataWidth:0x20 */
-/* Win 3 */
-//Access:RW DataWidth:0x20 //
-#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
+/**
+ * Win 3
+ */
+#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL /* Access:RW DataWidth:0x20 */
-/* Win 4 */
-//Access:RW DataWidth:0x20 //
-#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
+/**
+ * Win 4
+ */
+#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL /* Access:RW DataWidth:0x20 */
-/* Win 5 */
-//Access:RW DataWidth:0x20 //
-#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
+/**
+ * Win 5
+ */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL /* Access:RW DataWidth:0x20 */
-/* Win 6 */
-//Access:RW DataWidth:0x20 //
-#define GTT_BAR0_MAP_REG_MSDM_RAM_2048 0x013000UL
+/**
+ * Win 6
+ */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_2048 0x013000UL /* Access:RW DataWidth:0x20 */
-/* Win 7 */
-//Access:RW DataWidth:0x20 //
-#define GTT_BAR0_MAP_REG_USDM_RAM 0x014000UL
+/**
+ * Win 7
+ */
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x014000UL /* Access:RW DataWidth:0x20 */
-/* Win 8 */
-//Access:RW DataWidth:0x20 //
-#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x015000UL
+/**
+ * Win 8
+ */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x015000UL /* Access:RW DataWidth:0x20 */
-/* Win 9 */
-//Access:RW DataWidth:0x20 //
-#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x016000UL
+/**
+ * Win 9
+ */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x016000UL /* Access:RW DataWidth:0x20 */
-/* Win 10 */
-//Access:RW DataWidth:0x20 //
-#define GTT_BAR0_MAP_REG_XSDM_RAM 0x017000UL
+/**
+ * Win 10
+ */
+#define GTT_BAR0_MAP_REG_XSDM_RAM 0x017000UL /* Access:RW DataWidth:0x20 */
-/* Win 11 */
-//Access:RW DataWidth:0x20 //
-#define GTT_BAR0_MAP_REG_XSDM_RAM_1024 0x018000UL
+/**
+ * Win 11
+ */
+#define GTT_BAR0_MAP_REG_XSDM_RAM_1024 0x018000UL /* Access:RW DataWidth:0x20 */
-/* Win 12 */
-//Access:RW DataWidth:0x20 //
-#define GTT_BAR0_MAP_REG_YSDM_RAM 0x019000UL
+/**
+ * Win 12
+ */
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x019000UL /* Access:RW DataWidth:0x20 */
-/* Win 13 */
-//Access:RW DataWidth:0x20 //
-#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL
+/**
+ * Win 13
+ */
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL /* Access:RW DataWidth:0x20 */
-/* Win 14 */
+/**
+ * Win 14
+ */
+#define GTT_BAR0_MAP_REG_IGU_CMD_EXT_E5 0x01b000UL /* Access:RW DataWidth:0x20 */
#endif
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef __PREVENT_PXP_GLOBAL_WIN__
static u32 pxp_global_win[] = {
@@ -3191,7 +3191,7 @@ enum nvm_image_type {
NVM_TYPE_INIT_HW = 0x19,
NVM_TYPE_DEFAULT_CFG = 0x1a,
NVM_TYPE_MDUMP = 0x1b,
- NVM_TYPE_META = 0x1c,
+ NVM_TYPE_NVM_META = 0x1c, /* @DPDK */
NVM_TYPE_ISCSI_CFG = 0x1d,
NVM_TYPE_FCOE_CFG = 0x1f,
NVM_TYPE_ETH_PHY_FW1 = 0x20,
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#include "bcm_osal.h"
#include "ecore_hsi_common.h"
#include "ecore_status.h"
@@ -14,6 +14,14 @@
#include "ecore_iov_api.h"
#include "ecore_gtt_values.h"
#include "ecore_dev_api.h"
+#include "ecore_mcp.h"
+
+#ifdef _NTDDK_
+#pragma warning(push)
+#pragma warning(disable : 28167)
+#pragma warning(disable : 28123)
+#pragma warning(disable : 28121)
+#endif
#ifndef ASIC_ONLY
#define ECORE_EMUL_FACTOR 2000
@@ -26,19 +34,19 @@
#define ECORE_BAR_INVALID_OFFSET (OSAL_CPU_TO_LE32(-1))
struct ecore_ptt {
- osal_list_entry_t list_entry;
- unsigned int idx;
- struct pxp_ptt_entry pxp;
- u8 hwfn_id;
+ osal_list_entry_t list_entry;
+ unsigned int idx;
+ struct pxp_ptt_entry pxp;
+ u8 hwfn_id;
};
struct ecore_ptt_pool {
- osal_list_t free_list;
- osal_spinlock_t lock; /* ptt synchronized access */
- struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
+ osal_list_t free_list;
+ osal_spinlock_t lock; /* ptt synchronized access */
+ struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
};
-void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
+static void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
{
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
p_hwfn->p_ptt_pool = OSAL_NULL;
@@ -71,12 +79,13 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
p_hwfn->p_ptt_pool = p_pool;
#ifdef CONFIG_ECORE_LOCK_ALLOC
- if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) {
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock, "ptt_lock")) {
__ecore_ptt_pool_free(p_hwfn);
return ECORE_NOMEM;
}
#endif
OSAL_SPIN_LOCK_INIT(&p_pool->lock);
+
return ECORE_SUCCESS;
}
@@ -122,10 +131,10 @@ struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
/* Take the free PTT from the list */
for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
+
if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) {
- p_ptt = OSAL_LIST_FIRST_ENTRY(
- &p_hwfn->p_ptt_pool->free_list,
- struct ecore_ptt, list_entry);
+ p_ptt = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_ptt_pool->free_list,
+ struct ecore_ptt, list_entry);
OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
&p_hwfn->p_ptt_pool->free_list);
@@ -141,17 +150,15 @@ struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
OSAL_MSLEEP(1);
}
- DP_NOTICE(p_hwfn, true,
- "PTT acquire timeout - failed to allocate PTT\n");
+ DP_NOTICE(p_hwfn, true, "PTT acquire timeout - failed to allocate PTT\n");
return OSAL_NULL;
}
-void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
/* This PTT should not be set to pretend if it is being released */
- /* TODO - add some pretend sanity checks, to make sure pretend
- * isn't set on this ptt
- */
+ /* TODO - add some pretend sanity checks, to make sure pretend isn't set on this ptt */
OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
@@ -167,17 +174,18 @@ static u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
{
return PXP_PF_WINDOW_ADMIN_PER_PF_START +
- p_ptt->idx * sizeof(struct pxp_ptt_entry);
+ p_ptt->idx * sizeof(struct pxp_ptt_entry);
}
u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
{
return PXP_EXTERNAL_BAR_PF_WINDOW_START +
- p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
+ p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
}
void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 new_hw_addr)
+ struct ecore_ptt *p_ptt,
+ u32 new_hw_addr)
{
u32 prev_hw_addr;
@@ -201,7 +209,8 @@ void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
}
static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 hw_addr)
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr)
{
u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
u32 offset;
@@ -257,8 +266,8 @@ static bool ecore_is_reg_fifo_empty(struct ecore_hwfn *p_hwfn,
return is_empty;
}
-void ecore_wr(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 hw_addr, u32 val)
+void ecore_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr,
+ u32 val)
{
bool prev_fifo_err;
u32 bar_addr;
@@ -271,13 +280,16 @@ void ecore_wr(struct ecore_hwfn *p_hwfn,
"bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
bar_addr, hw_addr, val);
+ OSAL_WARN((hw_addr == p_hwfn->p_dev->monitored_hw_addr),
+ "accessed hw_addr 0x%x\n", hw_addr);
+
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
OSAL_UDELAY(100);
#endif
OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
- "reg_fifo err was caused by a call to ecore_wr(0x%x, 0x%x)\n",
+ "reg_fifo error was caused hw_addr 0x%x, val 0x%x\n",
hw_addr, val);
}
@@ -295,6 +307,9 @@ u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
"bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
bar_addr, hw_addr, val);
+ OSAL_WARN((hw_addr == p_hwfn->p_dev->monitored_hw_addr),
+ "accessed hw_addr 0x%x\n", hw_addr);
+
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
OSAL_UDELAY(100);
@@ -307,12 +322,20 @@ u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
return val;
}
+enum memcpy_action {
+ TO_DEVICE,
+ FROM_DEVICE,
+ MEMZERO_DEVICE
+};
+
static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
void *addr,
- u32 hw_addr, osal_size_t n, bool to_device)
+ u32 hw_addr,
+ osal_size_t n,
+ enum memcpy_action action)
{
- u32 dw_count, *host_addr, hw_offset;
+ u32 dw_count, *host_addr = OSAL_NULL, hw_offset;
osal_size_t quota, done = 0;
u32 OSAL_IOMEM *reg_addr;
@@ -328,16 +351,30 @@ static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
}
dw_count = quota / 4;
- host_addr = (u32 *)((u8 *)addr + done);
+ if (addr)
+ host_addr = (u32 *)((u8 *)addr + done);
reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
- if (to_device)
+ switch (action) {
+ case TO_DEVICE:
while (dw_count--)
- DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
- else
+ DIRECT_REG_WR(p_hwfn, reg_addr++,
+ *host_addr++);
+ break;
+ case FROM_DEVICE:
while (dw_count--)
- *host_addr++ = DIRECT_REG_RD(p_hwfn,
- reg_addr++);
+ *host_addr++ =
+ DIRECT_REG_RD(p_hwfn, reg_addr++);
+ break;
+ case MEMZERO_DEVICE:
+ while (dw_count--)
+ DIRECT_REG_WR(p_hwfn, reg_addr++, 0);
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true,
+ "Invalid memcpy_action %d\n", action);
+ return;
+ }
done += quota;
}
@@ -351,7 +388,7 @@ void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
"hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
hw_addr, dest, hw_addr, (unsigned long)n);
- ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
+ ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, FROM_DEVICE);
}
void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
@@ -362,7 +399,19 @@ void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
"hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
hw_addr, hw_addr, src, (unsigned long)n);
- ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
+ ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, TO_DEVICE);
+}
+
+void ecore_memzero_hw(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr,
+ osal_size_t n)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "hw_addr 0x%x, hw_addr 0x%x, size %lu\n",
+ hw_addr, hw_addr, (unsigned long)n);
+
+ ecore_memcpy_hw(p_hwfn, p_ptt, OSAL_NULL, hw_addr, n, MEMZERO_DEVICE);
}
void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
@@ -373,8 +422,9 @@ void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
-/* Every pretend undos prev pretends, including previous port pretend */
-
+ /* Every pretend undos previous pretends, including
+ * previous port pretend.
+ */
SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
@@ -388,7 +438,7 @@ void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
REG_WR(p_hwfn,
ecore_ptt_config_addr(p_ptt) +
OFFSETOF(struct pxp_ptt_entry, pretend),
- *(u32 *)&p_ptt->pxp.pretend);
+ *(u32 *)&p_ptt->pxp.pretend);
}
void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
@@ -404,10 +454,11 @@ void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
REG_WR(p_hwfn,
ecore_ptt_config_addr(p_ptt) +
OFFSETOF(struct pxp_ptt_entry, pretend),
- *(u32 *)&p_ptt->pxp.pretend);
+ *(u32 *)&p_ptt->pxp.pretend);
}
-void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+void ecore_port_unpretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
u16 control = 0;
@@ -420,7 +471,7 @@ void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
REG_WR(p_hwfn,
ecore_ptt_config_addr(p_ptt) +
OFFSETOF(struct pxp_ptt_entry, pretend),
- *(u32 *)&p_ptt->pxp.pretend);
+ *(u32 *)&p_ptt->pxp.pretend);
}
void ecore_port_fid_pretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
@@ -458,23 +509,15 @@ u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
return concrete_fid;
}
-/* Not in use @DPDK
- * Ecore HW lock
- * =============
- * Although the implementation is ready, today we don't have any flow that
- * utliizes said locks - and we want to keep it this way.
- * If this changes, this needs to be revisted.
- */
-
/* DMAE */
#define ECORE_DMAE_FLAGS_IS_SET(params, flag) \
((params) != OSAL_NULL && \
GET_FIELD((params)->flags, DMAE_PARAMS_##flag))
-static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
- const u8 is_src_type_grc,
- const u8 is_dst_type_grc,
+static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
+ const u8 is_src_type_grc,
+ const u8 is_dst_type_grc,
struct dmae_params *p_params)
{
u8 src_pf_id, dst_pf_id, port_id;
@@ -485,62 +528,61 @@ static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
* 0- The source is the PCIe
* 1- The source is the GRC.
*/
- opcode |= (is_src_type_grc ? dmae_cmd_src_grc : dmae_cmd_src_pcie) <<
- DMAE_CMD_SRC_SHIFT;
+ SET_FIELD(opcode, DMAE_CMD_SRC,
+ (is_src_type_grc ? dmae_cmd_src_grc
+ : dmae_cmd_src_pcie));
src_pf_id = ECORE_DMAE_FLAGS_IS_SET(p_params, SRC_PF_VALID) ?
p_params->src_pf_id : p_hwfn->rel_pf_id;
- opcode |= (src_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
- DMAE_CMD_SRC_PF_ID_SHIFT;
+ SET_FIELD(opcode, DMAE_CMD_SRC_PF_ID, src_pf_id);
/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
- opcode |= (is_dst_type_grc ? dmae_cmd_dst_grc : dmae_cmd_dst_pcie) <<
- DMAE_CMD_DST_SHIFT;
+ SET_FIELD(opcode, DMAE_CMD_DST,
+ (is_dst_type_grc ? dmae_cmd_dst_grc
+ : dmae_cmd_dst_pcie));
dst_pf_id = ECORE_DMAE_FLAGS_IS_SET(p_params, DST_PF_VALID) ?
p_params->dst_pf_id : p_hwfn->rel_pf_id;
- opcode |= (dst_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
- DMAE_CMD_DST_PF_ID_SHIFT;
+ SET_FIELD(opcode, DMAE_CMD_DST_PF_ID, dst_pf_id);
/* DMAE_E4_TODO need to check which value to specify here. */
- /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
+ /* SET_FIELD(opcode, DMAE_CMD_C_DST, !b_complete_to_host);*/
/* Whether to write a completion word to the completion destination:
* 0-Do not write a completion word
* 1-Write the completion word
*/
- opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
- opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
+ SET_FIELD(opcode, DMAE_CMD_COMP_WORD_EN, 1);
+ SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
if (ECORE_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
- opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
+ SET_FIELD(opcode, DMAE_CMD_COMP_FUNC, 1);
/* swapping mode 3 - big endian there should be a define ifdefed in
* the HSI somewhere. Since it is currently
*/
- opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
+ SET_FIELD(opcode, DMAE_CMD_ENDIANITY_MODE, DMAE_CMD_ENDIANITY);
port_id = (ECORE_DMAE_FLAGS_IS_SET(p_params, PORT_VALID)) ?
p_params->port_id : p_hwfn->port_id;
- opcode |= port_id << DMAE_CMD_PORT_ID_SHIFT;
+ SET_FIELD(opcode, DMAE_CMD_PORT_ID, port_id);
/* reset source address in next go */
- opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
+ SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
/* reset dest address in next go */
- opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
+ SET_FIELD(opcode, DMAE_CMD_DST_ADDR_RESET, 1);
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
if (ECORE_DMAE_FLAGS_IS_SET(p_params, SRC_VF_VALID)) {
- opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
- opcode_b |= (p_params->src_vf_id << DMAE_CMD_SRC_VF_ID_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_SRC_VF_ID_VALID, 1);
+ SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, p_params->src_vf_id);
} else {
- opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
- DMAE_CMD_SRC_VF_ID_SHIFT);
+ SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, 0xFF);
}
if (ECORE_DMAE_FLAGS_IS_SET(p_params, DST_VF_VALID)) {
- opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
- opcode_b |= p_params->dst_vf_id << DMAE_CMD_DST_VF_ID_SHIFT;
+ SET_FIELD(opcode, DMAE_CMD_DST_VF_ID_VALID, 1);
+ SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, p_params->dst_vf_id);
} else {
- opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+ SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, 0xFF);
}
p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
@@ -549,7 +591,8 @@ static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
{
- OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4);
+ OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) !=
+ 31 * 4);
/* All the DMAE 'go' registers form an array in internal memory */
return DMAE_REG_GO_C0 + (idx << 2);
@@ -567,8 +610,7 @@ static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
DP_NOTICE(p_hwfn, true,
"source or destination address 0 idx_cmd=%d\n"
- "opcode = [0x%08x,0x%04x] len=0x%x"
- " src=0x%x:%x dst=0x%x:%x\n",
+ "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
idx_cmd,
OSAL_LE32_TO_CPU(p_command->opcode),
OSAL_LE16_TO_CPU(p_command->opcode_b),
@@ -582,8 +624,7 @@ static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
}
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]"
- "len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+ "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
idx_cmd,
OSAL_LE32_TO_CPU(p_command->opcode),
OSAL_LE16_TO_CPU(p_command->opcode_b),
@@ -602,7 +643,7 @@ static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
*/
for (i = 0; i < DMAE_CMD_SIZE; i++) {
u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
- *(((u32 *)p_command) + i) : 0;
+ *(((u32 *)p_command) + i) : 0;
ecore_wr(p_hwfn, p_ptt,
DMAE_REG_CMD_MEM +
@@ -611,7 +652,8 @@ static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
}
ecore_wr(p_hwfn, p_ptt,
- ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
+ ecore_dmae_idx_to_go_cmd(idx_cmd),
+ DMAE_GO_VALUE);
return ecore_status;
}
@@ -630,7 +672,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
goto err;
}
- p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
*p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
sizeof(struct dmae_cmd));
if (*p_cmd == OSAL_NULL) {
@@ -690,7 +732,8 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
}
}
-static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
+static enum _ecore_status_t
+ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
{
u32 wait_cnt_limit = 10000, wait_cnt = 0;
enum _ecore_status_t ecore_status = ECORE_SUCCESS;
@@ -713,14 +756,14 @@ static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
if (++wait_cnt > wait_cnt_limit) {
DP_NOTICE(p_hwfn->p_dev, false,
- "Timed-out waiting for operation to"
- " complete. Completion word is 0x%08x"
- " expected 0x%08x.\n",
+ "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
*p_hwfn->dmae_info.p_completion_word,
DMAE_COMPLETION_VAL);
+ OSAL_WARN(true, "Dumping stack");
ecore_status = ECORE_TIMEOUT;
break;
}
+
/* to sync the completion_word since we are not
* using the volatile keyword for p_completion_word
*/
@@ -739,12 +782,13 @@ enum ecore_dmae_address_type {
ECORE_DMAE_ADDRESS_GRC
};
-static enum _ecore_status_t
-ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u64 src_addr,
- u64 dst_addr,
- u8 src_type, u8 dst_type, u32 length_dw)
+static enum _ecore_status_t ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 src_addr,
+ u64 dst_addr,
+ u8 src_type,
+ u8 dst_type,
+ u32 length_dw)
{
dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
@@ -756,7 +800,7 @@ ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(src_addr));
cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(src_addr));
break;
- /* for virt source addresses we use the intermediate buffer. */
+ /* for virtual source addresses we use the intermediate buffer. */
case ECORE_DMAE_ADDRESS_HOST_VIRT:
cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
@@ -774,7 +818,7 @@ ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(dst_addr));
cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(dst_addr));
break;
- /* for virt destination address we use the intermediate buff. */
+ /* for virtual destination addresses we use the intermediate buffer. */
case ECORE_DMAE_ADDRESS_HOST_VIRT:
cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
@@ -784,18 +828,20 @@ ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
}
cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw);
-
+#ifndef __EXTRACT__LINUX__
if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
OSAL_DMA_SYNC(p_hwfn->p_dev,
(void *)HILO_U64(cmd->src_addr_hi,
cmd->src_addr_lo),
length_dw * sizeof(u32), false);
+#endif
ecore_dmae_post_command(p_hwfn, p_ptt);
ecore_status = ecore_dmae_operation_wait(p_hwfn);
+#ifndef __EXTRACT__LINUX__
/* TODO - is it true ? */
if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
@@ -803,13 +849,14 @@ ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
(void *)HILO_U64(cmd->src_addr_hi,
cmd->src_addr_lo),
length_dw * sizeof(u32), true);
+#endif
if (ecore_status != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false,
- "Wait Failed. source_addr 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x, intermediate buffer 0x%lx.\n",
- (unsigned long)src_addr, (unsigned long)dst_addr,
- length_dw,
- (unsigned long)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
+ "Wait Failed. source_addr 0x%" PRIx64 ", grc_addr 0x%" PRIx64 ", "
+ "size_in_dwords 0x%x, intermediate buffer 0x%" PRIx64 ".\n",
+ src_addr, dst_addr, length_dw,
+ (u64)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
return ecore_status;
}
@@ -821,15 +868,12 @@ ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-static enum _ecore_status_t
-ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u64 src_addr,
- u64 dst_addr,
- u8 src_type,
- u8 dst_type,
- u32 size_in_dwords,
- struct dmae_params *p_params)
+static enum _ecore_status_t ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 src_addr, u64 dst_addr,
+ u8 src_type, u8 dst_type,
+ u32 size_in_dwords,
+ struct dmae_params *p_params)
{
dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
@@ -841,37 +885,39 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
if (!p_hwfn->dmae_info.b_mem_ready) {
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
- (unsigned long)src_addr, src_type,
- (unsigned long)dst_addr, dst_type,
+ "No buffers allocated. Avoid DMAE transaction "
+ "[{src: addr 0x%" PRIx64 ", type %d}, "
+ "{dst: addr 0x%" PRIx64 ", type %d}, size %d].\n",
+ src_addr, src_type, dst_addr, dst_type,
size_in_dwords);
return ECORE_NOMEM;
}
if (p_hwfn->p_dev->recov_in_prog) {
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
- (unsigned long)src_addr, src_type,
- (unsigned long)dst_addr, dst_type,
+ "Recovery is in progress. Avoid DMAE transaction "
+ "[{src: addr 0x%" PRIx64 ", type %d}, "
+ "{dst: addr 0x%" PRIx64 ", type %d}, size %d].\n",
+ src_addr, src_type, dst_addr, dst_type,
size_in_dwords);
- /* Return success to let the flow to be completed successfully
- * w/o any error handling.
- */
+
+ /* Let the flow complete w/o any error handling */
return ECORE_SUCCESS;
}
if (!cmd) {
DP_NOTICE(p_hwfn, true,
- "ecore_dmae_execute_sub_operation failed. Invalid state. source_addr 0x%lx, destination addr 0x%lx, size_in_dwords 0x%x\n",
- (unsigned long)src_addr,
- (unsigned long)dst_addr,
- length_cur);
+ "ecore_dmae_execute_sub_operation failed. Invalid state. "
+ "source_addr 0x%" PRIx64 ", destination addr 0x%" PRIx64 ", "
+ "size_in_dwords 0x%x\n",
+ src_addr, dst_addr, length_cur);
return ECORE_INVAL;
}
ecore_dmae_opcode(p_hwfn,
(src_type == ECORE_DMAE_ADDRESS_GRC),
- (dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
+ (dst_type == ECORE_DMAE_ADDRESS_GRC),
+ p_params);
cmd->comp_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
cmd->comp_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
@@ -913,14 +959,18 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
dst_type,
length_cur);
if (ecore_status != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, false,
- "ecore_dmae_execute_sub_operation Failed"
- " with error 0x%x. source_addr 0x%lx,"
- " dest addr 0x%lx, size_in_dwords 0x%x\n",
- ecore_status, (unsigned long)src_addr,
- (unsigned long)dst_addr, length_cur);
-
- ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
+ u8 str[ECORE_HW_ERR_MAX_STR_SIZE];
+
+ OSAL_SNPRINTF((char *)str, ECORE_HW_ERR_MAX_STR_SIZE,
+ "ecore_dmae_execute_sub_operation Failed with error 0x%x. "
+ "source_addr 0x%" PRIx64 ", "
+ "destination addr 0x%" PRIx64 ", size_in_dwords 0x%x\n",
+ ecore_status, src_addr, dst_addr,
+ length_cur);
+ DP_NOTICE(p_hwfn, false, "%s", str);
+ ecore_hw_err_notify(p_hwfn, p_ptt,
+ ECORE_HW_ERR_DMAE_FAIL, str,
+ OSAL_STRLEN((char *)str) + 1);
break;
}
}
@@ -973,12 +1023,11 @@ enum _ecore_status_t ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
return rc;
}
-enum _ecore_status_t
-ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- dma_addr_t source_addr,
- dma_addr_t dest_addr,
- u32 size_in_dwords,
+enum _ecore_status_t ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords,
struct dmae_params *p_params)
{
enum _ecore_status_t rc;
@@ -989,26 +1038,31 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
dest_addr,
ECORE_DMAE_ADDRESS_HOST_PHYS,
ECORE_DMAE_ADDRESS_HOST_PHYS,
- size_in_dwords, p_params);
+ size_in_dwords,
+ p_params);
OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
return rc;
}
-void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
- enum ecore_hw_err_type err_type)
+void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_hw_err_type err_type, u8 *p_buf, u32 size)
{
/* Fan failure cannot be masked by handling of another HW error */
if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
- "Recovery is in progress."
- "Avoid notifying about HW error %d.\n",
+ "Recovery is in progress. Avoid notifying about HW error %d.\n",
err_type);
return;
}
OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
+
+ if (p_buf != OSAL_NULL)
+ ecore_mcp_send_raw_debug_data(p_hwfn, p_ptt, p_buf, size);
+
+ ecore_mcp_gen_mdump_idlechk(p_hwfn, p_ptt);
}
enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
@@ -1042,9 +1096,9 @@ enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
OSAL_MEM_ZERO((u8 *)p_virt + size, size);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "DMAE sanity [%s]: src_addr={phys 0x%lx, virt %p}, dst_addr={phys 0x%lx, virt %p}, size 0x%x\n",
- phase, (unsigned long)p_phys, p_virt,
- (unsigned long)(p_phys + size),
+ "DMAE sanity [%s]: src_addr={phys 0x%" PRIx64 ", virt %p}, "
+ "dst_addr={phys 0x%" PRIx64 ", virt %p}, size 0x%x\n",
+ phase, (u64)p_phys, p_virt, (u64)(p_phys + size),
(u8 *)p_virt + size, size);
rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
@@ -1066,10 +1120,9 @@ enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
if (*p_tmp != val) {
DP_NOTICE(p_hwfn, false,
- "DMAE sanity [%s]: addr={phys 0x%lx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
+ "DMAE sanity [%s]: addr={phys 0x%" PRIx64 ", virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
phase,
- (unsigned long)p_phys +
- ((u8 *)p_tmp - (u8 *)p_virt),
+ (u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt),
p_tmp, *p_tmp, val);
rc = ECORE_UNKNOWN_ERROR;
goto out;
@@ -1085,13 +1138,15 @@ void ecore_ppfid_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u8 abs_ppfid, u32 hw_addr, u32 val)
{
u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
+ u16 fid;
+
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, pfid);
+ ecore_fid_pretend(p_hwfn, p_ptt, fid);
- ecore_fid_pretend(p_hwfn, p_ptt,
- pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
ecore_wr(p_hwfn, p_ptt, hw_addr, val);
- ecore_fid_pretend(p_hwfn, p_ptt,
- p_hwfn->rel_pf_id <<
- PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
+ ecore_fid_pretend(p_hwfn, p_ptt, fid);
}
u32 ecore_ppfid_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
@@ -1099,13 +1154,19 @@ u32 ecore_ppfid_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
{
u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
u32 val;
+ u16 fid;
+
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, pfid);
+ ecore_fid_pretend(p_hwfn, p_ptt, fid);
- ecore_fid_pretend(p_hwfn, p_ptt,
- pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
val = ecore_rd(p_hwfn, p_ptt, hw_addr);
- ecore_fid_pretend(p_hwfn, p_ptt,
- p_hwfn->rel_pf_id <<
- PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
+ ecore_fid_pretend(p_hwfn, p_ptt, fid);
return val;
}
+
+#ifdef _NTDDK_
+#pragma warning(pop)
+#endif
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef __ECORE_HW_H__
#define __ECORE_HW_H__
@@ -42,17 +42,19 @@ enum reserved_ptts {
#endif
#define DMAE_CMD_SIZE 14
-/* size of DMAE command structure to fill.. DMAE_CMD_SIZE-5 */
+
+/* Size of DMAE command structure to fill.. DMAE_CMD_SIZE-5 */
#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5)
-/* Minimum wait for dmae opertaion to complete 2 milliseconds */
+
+/* Minimum wait for dmae operation to complete 2 milliseconds */
#define DMAE_MIN_WAIT_TIME 0x2
#define DMAE_MAX_CLIENTS 32
/**
-* @brief ecore_gtt_init - Initialize GTT windows
-*
-* @param p_hwfn
-*/
+ * @brief ecore_gtt_init - Initialize GTT windows
+ *
+ * @param p_hwfn
+ */
void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
/**
@@ -85,7 +87,7 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
*
* @return u32
*/
-u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt);
+u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_set_win - Set PTT Window's GRC BAR address
@@ -133,6 +135,21 @@ u32 ecore_rd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 hw_addr);
+/**
+ * @brief ecore_memset_hw - set (n / 4) DWORDs from BAR using the given ptt
+ * with DWORD value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param value
+ * @param hw_addr
+ * @param n
+ */
+void ecore_memzero_hw(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr,
+ osal_size_t n);
+
/**
* @brief ecore_memcpy_from - copy n bytes from BAR using the given
* ptt
@@ -228,7 +245,7 @@ u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid);
* which is part of p_hwfn.
* @param p_hwfn
*/
-enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_dmae_info_free - Free the dmae_info structure
@@ -236,7 +253,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
*
* @param p_hwfn
*/
-void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn);
+void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_dmae_host2grc - copy data from source address to
@@ -307,8 +324,20 @@ enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
const u8 *fw_data);
-void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
- enum ecore_hw_err_type err_type);
+#define ECORE_HW_ERR_MAX_STR_SIZE 256
+
+/**
+ * @brief ecore_hw_err_notify - Notify upper layer driver and management FW
+ * about a HW error.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param err_type
+ * @param p_buf - debug data buffer to send to the MFW
+ * @param size - buffer size
+ */
+void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_hw_err_type err_type, u8 *p_buf, u32 size);
/**
* @brief ecore_ppfid_wr - Write value to BAR using the given ptt while
@@ -1,37 +1,26 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef _ECORE_IGU_DEF_H_
#define _ECORE_IGU_DEF_H_
/* Fields of IGU PF CONFIGRATION REGISTER */
-/* function enable */
-#define IGU_PF_CONF_FUNC_EN (0x1 << 0)
-/* MSI/MSIX enable */
-#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1)
-/* INT enable */
-#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2)
-/* attention enable */
-#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3)
-/* single ISR mode enable */
-#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4)
-/* simd all ones mode */
-#define IGU_PF_CONF_SIMD_MODE (0x1 << 5)
+#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
/* Fields of IGU VF CONFIGRATION REGISTER */
-/* function enable */
-#define IGU_VF_CONF_FUNC_EN (0x1 << 0)
-/* MSI/MSIX enable */
-#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1)
-/* single ISR mode enable */
-#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4)
-/* Parent PF */
-#define IGU_VF_CONF_PARENT_MASK (0xF)
-/* Parent PF */
-#define IGU_VF_CONF_PARENT_SHIFT 5
+#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
+#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */
/* Igu control commands
*/
@@ -47,11 +36,11 @@ struct igu_ctrl_reg {
u32 ctrl_data;
#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */
#define IGU_CTRL_REG_FID_SHIFT 0
-#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_MASK 0x1FFF /* Command address */
#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
-#define IGU_CTRL_REG_RESERVED_MASK 0x1
-#define IGU_CTRL_REG_RESERVED_SHIFT 28
-#define IGU_CTRL_REG_TYPE_MASK 0x1U /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_RESERVED_MASK 0x3
+#define IGU_CTRL_REG_RESERVED_SHIFT 29
+#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
#define IGU_CTRL_REG_TYPE_SHIFT 31
};
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#include "bcm_osal.h"
#include "ecore_hw.h"
#include "ecore_init_ops.h"
@@ -38,8 +38,7 @@ static u16 task_region_offsets_e5[1][NUM_OF_CONNECTION_TYPES_E5] = {
/* General constants */
#define QM_PQ_MEM_4KB(pq_size) \
(pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
-#define QM_PQ_SIZE_256B(pq_size) \
- (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
+#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
#define QM_INVALID_PQ_ID 0xffff
/* Max link speed (in Mbps) */
@@ -50,11 +49,12 @@ static u16 task_region_offsets_e5[1][NUM_OF_CONNECTION_TYPES_E5] = {
#define QM_BYTE_CRD_EN 1
/* Other PQ constants */
-#define QM_OTHER_PQS_PER_PF 4
+#define QM_OTHER_PQS_PER_PF_E4 4
+#define QM_OTHER_PQS_PER_PF_E5 4
/* VOQ constants */
-#define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2)
-#define VOQS_BIT_MASK ((1 << MAX_NUM_VOQS) - 1)
+#define MAX_NUM_VOQS_E4 (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2)
+#define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
/* WFQ constants: */
@@ -65,7 +65,8 @@ static u16 task_region_offsets_e5[1][NUM_OF_CONNECTION_TYPES_E5] = {
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
/* Bit of PF in WFQ VP PQ map */
-#define QM_WFQ_VP_PQ_PF_SHIFT 5
+#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
+#define QM_WFQ_VP_PQ_PF_E5_SHIFT 6
/* 0x9000 = 4*9*1024 */
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
@@ -73,6 +74,9 @@ static u16 task_region_offsets_e5[1][NUM_OF_CONNECTION_TYPES_E5] = {
/* Max WFQ increment value is 0.7 * upper bound */
#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
+/* Number of VOQs in E5 QmWfqCrd register */
+#define QM_WFQ_CRD_E5_NUM_VOQS 16
+
/* RL constants: */
/* Period in us */
@@ -89,8 +93,7 @@ static u16 task_region_offsets_e5[1][NUM_OF_CONNECTION_TYPES_E5] = {
* this point.
*/
#define QM_RL_INC_VAL(rate) \
- OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
- (8 * 100)), 1)
+ OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / (8 * 100)), 1)
/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
#define QM_PF_RL_UPPER_BOUND 62500000
@@ -99,8 +102,7 @@ static u16 task_region_offsets_e5[1][NUM_OF_CONNECTION_TYPES_E5] = {
#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
/* Vport RL Upper bound, link speed is in Mpbs */
-#define QM_VP_RL_UPPER_BOUND(speed) \
- ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
+#define QM_VP_RL_UPPER_BOUND(speed) ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
/* Max Vport RL increment value is the Vport RL upper bound */
#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
@@ -116,22 +118,24 @@ static u16 task_region_offsets_e5[1][NUM_OF_CONNECTION_TYPES_E5] = {
/* Command Queue constants: */
/* Pure LB CmdQ lines (+spare) */
-#define PBF_CMDQ_PURE_LB_LINES 150
+#define PBF_CMDQ_PURE_LB_LINES_E4 150
+#define PBF_CMDQ_PURE_LB_LINES_E5 75
+
+#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
- (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
- ext_voq * \
- (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + (ext_voq) * \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
- (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
- ext_voq * \
- (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
- PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + (ext_voq) * \
+ (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
-#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
-((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+/* Returns the VOQ line credit for the specified number of PBF command lines.
+ * PBF lines are specified in 256b units in E4, and in 512b units in E5.
+ */
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines, is_e5) \
+ ((((pbf_cmd_lines) - 4) * (is_e5 ? 4 : 2)) | QM_LINE_CRD_REG_SIGN_BIT)
/* BTB: blocks constants (block size = 256B) */
@@ -151,7 +155,7 @@ static u16 task_region_offsets_e5[1][NUM_OF_CONNECTION_TYPES_E5] = {
#define QM_STOP_CMD_STRUCT_SIZE 2
#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
-#define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
+#define QM_STOP_CMD_PAUSE_MASK_MASK ((u64)-1)
#define QM_STOP_CMD_GROUP_ID_OFFSET 1
#define QM_STOP_CMD_GROUP_ID_SHIFT 16
#define QM_STOP_CMD_GROUP_ID_MASK 15
@@ -185,25 +189,31 @@ static u16 task_region_offsets_e5[1][NUM_OF_CONNECTION_TYPES_E5] = {
((rl_valid ? 1 : 0) << 22) | (((rl_id) & 255) << 24) | \
(((rl_id) >> 8) << 9))
-#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) (XSEM_REG_FAST_MEMORY + \
- SEM_FAST_REG_INT_RAM + XSTORM_PQ_INFO_OFFSET(pq_id))
+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + XSTORM_PQ_INFO_OFFSET(pq_id))
/******************** INTERNAL IMPLEMENTATION *********************/
/* Prepare PF RL enable/disable runtime init values */
-static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
+static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn,
+ bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
+ u8 num_ext_voqs = ECORE_IS_E5(p_hwfn->p_dev) ? QM_E5_NUM_EXT_VOQ : MAX_NUM_VOQS_E4;
+ u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
+
/* Enable RLs for all VOQs */
- STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
- VOQS_BIT_MASK);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, (u32)voq_bit_mask);
+#ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
+ if (num_ext_voqs >= 32)
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
+ (u32)(voq_bit_mask >> 32));
+#endif
/* Write RL period */
- STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
- QM_RL_PERIOD_CLK_25M);
- STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
- QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
/* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
@@ -213,184 +223,193 @@ static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
}
/* Prepare PF WFQ enable/disable runtime init values */
-static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
+static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn,
+ bool pf_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
/* Set credit threshold for QM bypass flow */
if (pf_wfq_en && QM_BYPASS_EN)
- STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
- QM_WFQ_UPPER_BOUND);
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, QM_WFQ_UPPER_BOUND);
}
/* Prepare global RL enable/disable runtime init values */
static void ecore_enable_global_rl(struct ecore_hwfn *p_hwfn,
bool global_rl_en)
{
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
- global_rl_en ? 1 : 0);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, global_rl_en ? 1 : 0);
if (global_rl_en) {
/* Write RL period (use timer 0 only) */
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
- QM_RL_PERIOD_CLK_25M);
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
- QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
/* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
- STORE_RT_REG(p_hwfn,
- QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
QM_VP_RL_BYPASS_THRESH_SPEED);
}
}
/* Prepare VPORT WFQ enable/disable runtime init values */
-static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
+static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn,
+ bool vport_wfq_en)
{
- STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
- vport_wfq_en ? 1 : 0);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, vport_wfq_en ? 1 : 0);
/* Set credit threshold for QM bypass flow */
if (vport_wfq_en && QM_BYPASS_EN)
- STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
- QM_WFQ_UPPER_BOUND);
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, QM_WFQ_UPPER_BOUND);
}
/* Prepare runtime init values to allocate PBF command queue lines for
- * the specified VOQ
+ * the specified VOQ.
*/
static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
- u8 voq,
+ u8 ext_voq,
u16 cmdq_lines)
{
- u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+ u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines, ECORE_IS_E5(p_hwfn->p_dev));
- OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
- (u32)cmdq_lines);
- STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
- STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
- qm_line_crd);
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), (u32)cmdq_lines);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq, qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq, qm_line_crd);
}
/* Prepare runtime init values to allocate PBF command queue lines. */
static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
u8 max_ports_per_engine,
u8 max_phys_tcs_per_port,
- struct init_qm_port_params
- port_params[MAX_NUM_PORTS])
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
- u8 tc, voq, port_id, num_tcs_in_port;
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
+ u8 num_ext_voqs = ECORE_IS_E5(p_hwfn->p_dev) ? QM_E5_NUM_EXT_VOQ : MAX_NUM_VOQS_E4;
/* Clear PBF lines of all VOQs */
- for (voq = 0; voq < MAX_NUM_VOQS; voq++)
- STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+ for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
- u16 phys_lines, phys_lines_per_tc;
+ u16 phys_lines, phys_lines_per_tc, pure_lb_lines;
if (!port_params[port_id].active)
continue;
/* Find number of command queue lines to divide between the
- * active physical TCs.
+ * active physical TCs. In E5, 1/8 of the lines are reserved.
+ * the lines for pure LB TC are subtracted.
*/
phys_lines = port_params[port_id].num_pbf_cmd_lines;
- phys_lines -= PBF_CMDQ_PURE_LB_LINES;
+ if (ECORE_IS_E5(p_hwfn->p_dev)) {
+ phys_lines -= DIV_ROUND_UP(phys_lines, PBF_CMDQ_LINES_E5_RSVD_RATIO);
+ pure_lb_lines = PBF_CMDQ_PURE_LB_LINES_E5;
+ /* E5 TGFS workaround use 2 TCs for PURE_LB */
+ if (((port_params[port_id].active_phys_tcs >>
+ PBF_TGFS_PURE_LB_TC_E5) & 0x1) == 0) {
+ phys_lines -= PBF_CMDQ_PURE_LB_LINES_E5;
+
+ /* Init registers for PBF_TGFS_PURE_LB_TC_E5 */
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PBF_TGFS_PURE_LB_TC_E5,
+ max_phys_tcs_per_port);
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, pure_lb_lines);
+ } else {
+ DP_NOTICE(p_hwfn, true, "Error : TC %d used for TGFS ramrods workaround. Must not used by driver.\n",
+ PBF_TGFS_PURE_LB_TC_E5);
+ }
+ } else {
+ pure_lb_lines = PBF_CMDQ_PURE_LB_LINES_E4;
+ }
+ phys_lines -= pure_lb_lines;
/* Find #lines per active physical TC */
num_tcs_in_port = 0;
for (tc = 0; tc < max_phys_tcs_per_port; tc++)
- if (((port_params[port_id].active_phys_tcs >> tc) &
- 0x1) == 1)
+ if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1)
num_tcs_in_port++;
phys_lines_per_tc = phys_lines / num_tcs_in_port;
/* Init registers per active TC */
for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
- voq = VOQ(port_id, tc, max_phys_tcs_per_port);
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1)
- ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
- phys_lines_per_tc);
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port);
+ if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1)
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, phys_lines_per_tc);
}
/* Init registers for pure LB TC */
- voq = VOQ(port_id, PURE_LB_TC, max_phys_tcs_per_port);
- ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
- PBF_CMDQ_PURE_LB_LINES);
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port);
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, pure_lb_lines);
}
}
-/*
- * Prepare runtime init values to allocate guaranteed BTB blocks for the
+/* Prepare runtime init values to allocate guaranteed BTB blocks for the
* specified port. The guaranteed BTB space is divided between the TCs as
* follows (shared space Is currently not used):
* 1. Parameters:
- * B BTB blocks for this port
- * C Number of physical TCs for this port
+ * B - BTB blocks for this port
+ * C - Number of physical TCs for this port
* 2. Calculation:
- * a. 38 blocks (9700B jumbo frame) are allocated for global per port
- * headroom
- * b. B = B 38 (remainder after global headroom allocation)
- * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
- * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
- * e. B/C blocks are allocated for each physical TC.
+ * a. 38 blocks (9700B jumbo frame) are allocated for global per port
+ * headroom.
+ * b. B = B - 38 (remainder after global headroom allocation).
+ * c. MAX(38,B/(C + 0.7)) blocks are allocated for the pure LB VOQ.
+ * d. B = B - MAX(38, B/(C + 0.7)) (remainder after pure LB allocation).
+ * e. B/C blocks are allocated for each physical TC.
* Assumptions:
* - MTU is up to 9700 bytes (38 blocks)
* - All TCs are considered symmetrical (same rate and packet size)
- * - No optimization for lossy TC (all are considered lossless). Shared space is
- * not enabled and allocated for each TC.
+ * - No optimization for lossy TC (all are considered lossless). Shared space
+ * is not enabled and allocated for each TC.
*/
static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
u8 max_ports_per_engine,
u8 max_phys_tcs_per_port,
- struct init_qm_port_params
- port_params[MAX_NUM_PORTS])
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
- u8 tc, voq, port_id, num_tcs_in_port;
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
if (!port_params[port_id].active)
continue;
/* Subtract headroom blocks */
- usable_blocks = port_params[port_id].num_btb_blocks -
- BTB_HEADROOM_BLOCKS;
+ usable_blocks = port_params[port_id].num_btb_blocks - BTB_HEADROOM_BLOCKS;
/* Find blocks per physical TC. use factor to avoid floating
* arithmethic.
*/
num_tcs_in_port = 0;
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
- if (((port_params[port_id].active_phys_tcs >> tc) &
- 0x1) == 1)
+ if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1)
num_tcs_in_port++;
pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
- (num_tcs_in_port * BTB_PURE_LB_FACTOR +
- BTB_PURE_LB_RATIO);
+ (num_tcs_in_port * BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
- pure_lb_blocks /
- BTB_PURE_LB_FACTOR);
- phys_blocks = (usable_blocks - pure_lb_blocks) /
- num_tcs_in_port;
+ pure_lb_blocks / BTB_PURE_LB_FACTOR);
+ phys_blocks = (usable_blocks - pure_lb_blocks) / num_tcs_in_port;
/* Init physical TCs */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
- if (((port_params[port_id].active_phys_tcs >> tc) &
- 0x1) == 1) {
- voq = VOQ(port_id, tc, max_phys_tcs_per_port);
- STORE_RT_REG(p_hwfn,
- PBF_BTB_GUARANTEED_RT_OFFSET(voq),
- phys_blocks);
+ if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) {
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
+ phys_blocks);
}
}
/* Init pure LB TC */
- voq = VOQ(port_id, PURE_LB_TC, max_phys_tcs_per_port);
- STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
- pure_lb_blocks);
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), pure_lb_blocks);
+
+ if (ECORE_IS_E5(p_hwfn->p_dev) &&
+ (((port_params[port_id].active_phys_tcs >>
+ PBF_TGFS_PURE_LB_TC_E5) & 0x1) == 0)) {
+ /* Init registers for PBF_TGFS_PURE_LB_TC_E5 */
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PBF_TGFS_PURE_LB_TC_E5,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), pure_lb_blocks);
+ }
}
}
@@ -418,12 +437,18 @@ static int ecore_global_rl_rt_init(struct ecore_hwfn *p_hwfn,
return -1;
}
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
- (u32)QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
- upper_bound);
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id,
- inc_val);
+ STORE_RT_REG(p_hwfn, rl_id < QM_REG_RLGLBLCRD_RT_SIZE ?
+ QM_REG_RLGLBLCRD_RT_OFFSET + rl_id :
+ QM_REG_RLGLBLCRD_MSB_RT_OFFSET + rl_id - QM_REG_RLGLBLCRD_RT_SIZE,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, rl_id < QM_REG_RLGLBLUPPERBOUND_RT_SIZE ?
+ QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id :
+ QM_REG_RLGLBLUPPERBOUND_MSB_RT_OFFSET + rl_id -
+ QM_REG_RLGLBLUPPERBOUND_RT_SIZE, upper_bound);
+ STORE_RT_REG(p_hwfn, rl_id < QM_REG_RLGLBLINCVAL_RT_SIZE ?
+ QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id :
+ QM_REG_RLGLBLINCVAL_MSB_RT_OFFSET + rl_id - QM_REG_RLGLBLINCVAL_RT_SIZE,
+ inc_val);
}
return 0;
@@ -431,19 +456,19 @@ static int ecore_global_rl_rt_init(struct ecore_hwfn *p_hwfn,
/* Prepare Tx PQ mapping runtime init values for the specified PF */
static int ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 pf_id,
- u8 max_phys_tcs_per_port,
- bool is_pf_loading,
- u32 num_pf_cids,
- u32 num_vf_cids,
- u16 start_pq,
- u16 num_pf_pqs,
- u16 num_vf_pqs,
+ struct ecore_ptt *p_ptt,
+ u8 pf_id,
+ u8 max_phys_tcs_per_port,
+ bool is_pf_loading,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u16 start_pq,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs,
u16 start_vport,
- u32 base_mem_addr_4kb,
- struct init_qm_pq_params *pq_params,
- struct init_qm_vport_params *vport_params)
+ u32 base_mem_addr_4kb,
+ struct init_qm_pq_params *pq_params,
+ struct init_qm_vport_params *vport_params)
{
/* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
@@ -465,14 +490,11 @@ static int ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* Set mapping from PQ group to PF */
for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
- STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
- (u32)(pf_id));
+ STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, (u32)(pf_id));
/* Set PQ sizes */
- STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
- QM_PQ_SIZE_256B(num_pf_cids));
- STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
- QM_PQ_SIZE_256B(num_vf_cids));
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, QM_PQ_SIZE_256B(num_pf_cids));
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, QM_PQ_SIZE_256B(num_vf_cids));
/* Go over all Tx PQs */
for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
@@ -480,26 +502,24 @@ static int ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
bool is_vf_pq;
u8 ext_voq;
- ext_voq = VOQ(pq_params[i].port_id, pq_params[i].tc_id,
- max_phys_tcs_per_port);
+ ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id, pq_params[i].tc_id,
+ max_phys_tcs_per_port);
is_vf_pq = (i >= num_pf_pqs);
/* Update first Tx PQ of VPORT/TC */
vport_id_in_pf = pq_params[i].vport_id - start_vport;
- first_tx_pq_id =
- vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
+ first_tx_pq_id = vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
if (first_tx_pq_id == QM_INVALID_PQ_ID) {
u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
- (pf_id << QM_WFQ_VP_PQ_PF_SHIFT);
+ (pf_id << (ECORE_IS_E5(p_hwfn->p_dev) ?
+ QM_WFQ_VP_PQ_PF_E5_SHIFT : QM_WFQ_VP_PQ_PF_E4_SHIFT));
/* Create new VP PQ */
- vport_params[vport_id_in_pf].
- first_tx_pq_id[pq_params[i].tc_id] = pq_id;
+ vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id] = pq_id;
first_tx_pq_id = pq_id;
/* Map VP PQ to VOQ and PF */
- STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
- first_tx_pq_id, map_val);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id, map_val);
}
/* Prepare PQ map entry */
@@ -518,8 +538,7 @@ static int ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
}
/* Set PQ base address */
- STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
- mem_addr_4kb);
+ STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb);
/* Clear PQ pointer table entry (64 bit) */
if (is_pf_loading)
@@ -529,19 +548,16 @@ static int ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* Write PQ info to RAM */
#if (WRITE_PQ_INFO_TO_RAM != 0)
- pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
- pq_params[i].tc_id,
- pq_params[i].port_id,
- pq_params[i].rl_valid,
+ pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id, pq_params[i].tc_id,
+ pq_params[i].port_id, pq_params[i].rl_valid,
pq_params[i].rl_id);
- ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
- pq_info);
+ ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), pq_info);
#endif
/* If VF PQ, add indication to PQ VF mask */
if (is_vf_pq) {
tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
- (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
+ (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
mem_addr_4kb += vport_pq_mem_4kb;
} else {
mem_addr_4kb += pq_mem_4kb;
@@ -551,8 +567,8 @@ static int ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* Store Tx PQ VF mask to size select register */
for (i = 0; i < num_tx_pq_vf_masks; i++)
if (tx_pq_vf_mask[i])
- STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
- i, tx_pq_vf_mask[i]);
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
+ tx_pq_vf_mask[i]);
return 0;
}
@@ -566,7 +582,7 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
u32 base_mem_addr_4kb)
{
u32 pq_size, pq_mem_4kb, mem_addr_4kb;
- u16 i, j, pq_id, pq_group;
+ u16 i, j, pq_id, pq_group, qm_other_pqs_per_pf;
/* A single other PQ group is used in each PF, where PQ group i is used
* in PF i.
@@ -575,26 +591,23 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
pq_size = num_pf_cids + num_tids;
pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
mem_addr_4kb = base_mem_addr_4kb;
+ qm_other_pqs_per_pf = ECORE_IS_E5(p_hwfn->p_dev) ? QM_OTHER_PQS_PER_PF_E5 :
+ QM_OTHER_PQS_PER_PF_E4;
/* Map PQ group to PF */
- STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
- (u32)(pf_id));
+ STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, (u32)(pf_id));
/* Set PQ sizes */
- STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
- QM_PQ_SIZE_256B(pq_size));
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size));
- for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
- i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < qm_other_pqs_per_pf; i++, pq_id++) {
/* Set PQ base address */
- STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
- mem_addr_4kb);
+ STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, mem_addr_4kb);
/* Clear PQ pointer table entry */
if (is_pf_loading)
for (j = 0; j < 2; j++)
- STORE_RT_REG(p_hwfn,
- QM_REG_PTRTBLOTHER_RT_OFFSET +
+ STORE_RT_REG(p_hwfn, QM_REG_PTRTBLOTHER_RT_OFFSET +
(pq_id * 2) + j, 0);
mem_addr_4kb += pq_mem_4kb;
@@ -612,30 +625,30 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_pq_params *pq_params)
{
u32 inc_val, crd_reg_offset;
- u8 voq;
+ u8 ext_voq;
u16 i;
inc_val = QM_WFQ_INC_VAL(pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true,
- "Invalid PF WFQ weight configuration\n");
+ DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n");
return -1;
}
for (i = 0; i < num_tx_pqs; i++) {
- voq = VOQ(pq_params[i].port_id, pq_params[i].tc_id,
- max_phys_tcs_per_port);
- crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
- QM_REG_WFQPFCRD_RT_OFFSET :
- QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
- voq * MAX_NUM_PFS_BB +
- (pf_id % MAX_NUM_PFS_BB);
- OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
- (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id, pq_params[i].tc_id,
+ max_phys_tcs_per_port);
+ crd_reg_offset = ECORE_IS_E5(p_hwfn->p_dev) ?
+ (ext_voq < QM_WFQ_CRD_E5_NUM_VOQS ? QM_REG_WFQPFCRD_RT_OFFSET :
+ QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+ (ext_voq % QM_WFQ_CRD_E5_NUM_VOQS) * MAX_NUM_PFS_E5 + pf_id :
+ (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
+ QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+ ext_voq * MAX_NUM_PFS_BB + (pf_id % MAX_NUM_PFS_BB);
+ OVERWRITE_RT_REG(p_hwfn, crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
}
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET +
- pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, QM_WFQ_UPPER_BOUND |
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
return 0;
@@ -644,21 +657,21 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
/* Prepare PF RL runtime init values for the specified PF.
* Return -1 on error.
*/
-static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
+static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 pf_id,
+ u32 pf_rl)
{
u32 inc_val;
inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_PF_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true,
- "Invalid PF rate limit configuration\n");
+ DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n");
return -1;
}
- STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, QM_PF_RL_UPPER_BOUND |
(u32)QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
- QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
return 0;
@@ -682,8 +695,7 @@ static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
inc_val = QM_WFQ_INC_VAL(vport_params[vport_id].wfq);
if (inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true,
- "Invalid VPORT WFQ weight configuration\n");
+ DP_NOTICE(p_hwfn, true, "Invalid VPORT WFQ weight configuration\n");
return -1;
}
@@ -693,10 +705,9 @@ static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
if (vp_pq_id == QM_INVALID_PQ_ID)
continue;
- STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
- vp_pq_id, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET +
- vp_pq_id, inc_val);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + vp_pq_id,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + vp_pq_id, inc_val);
}
}
@@ -708,16 +719,14 @@ static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
{
u32 reg_val, i;
- for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
- i++) {
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val; i++) {
OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
}
/* Check if timeout while waiting for SDM command ready */
if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
- "Timeout waiting for QM SDM cmd ready signal\n");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Timeout when waiting for QM SDM command ready signal\n");
return false;
}
@@ -726,9 +735,9 @@ static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 cmd_addr,
- u32 cmd_data_lsb,
- u32 cmd_data_msb)
+ u32 cmd_addr,
+ u32 cmd_data_lsb,
+ u32 cmd_data_msb)
{
if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
return false;
@@ -744,16 +753,30 @@ static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
/******************** INTERFACE IMPLEMENTATION *********************/
+u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
+ u8 port_id,
+ u8 tc,
+ u8 max_phys_tcs_per_port)
+{
+ if (tc == PURE_LB_TC)
+ return NUM_OF_PHYS_TCS * (ECORE_IS_E5(p_hwfn->p_dev) ? MAX_NUM_PORTS_E5 :
+ MAX_NUM_PORTS_BB) + port_id;
+ else
+ return port_id * (ECORE_IS_E5(p_hwfn->p_dev) ? NUM_OF_PHYS_TCS :
+ max_phys_tcs_per_port) + tc;
+}
+
u32 ecore_qm_pf_mem_size(struct ecore_hwfn *p_hwfn,
u32 num_pf_cids,
- u32 num_vf_cids,
- u32 num_tids,
- u16 num_pf_pqs,
- u16 num_vf_pqs)
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs)
{
return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
- QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
- QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+ QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
+ QM_PQ_MEM_4KB(num_pf_cids + num_tids) * (ECORE_IS_E5(p_hwfn->p_dev) ?
+ QM_OTHER_PQS_PER_PF_E5 : QM_OTHER_PQS_PER_PF_E4);
}
int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
@@ -763,24 +786,21 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
bool pf_wfq_en,
bool global_rl_en,
bool vport_wfq_en,
- struct init_qm_port_params
- port_params[MAX_NUM_PORTS],
+ struct init_qm_port_params port_params[MAX_NUM_PORTS],
struct init_qm_global_rl_params
- global_rl_params[COMMON_MAX_QM_GLOBAL_RLS])
+ global_rl_params[COMMON_MAX_QM_GLOBAL_RLS])
{
u32 mask = 0;
/* Init AFullOprtnstcCrdMask */
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
- QM_OPPOR_LINE_VOQ_DEF);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ, QM_OPPOR_LINE_VOQ_DEF);
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, pf_wfq_en);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, vport_wfq_en);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, pf_rl_en);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL, global_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, pf_wfq_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, vport_wfq_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, pf_rl_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL, global_rl_en ? 1 : 0);
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY,
- QM_OPPOR_PQ_EMPTY_DEF);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
/* Enable/disable PF RL */
@@ -796,12 +816,10 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
/* Init PBF CMDQ line credit */
- ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
- max_phys_tcs_per_port, port_params);
+ ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine, max_phys_tcs_per_port, port_params);
/* Init BTB blocks in PBF */
- ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
- max_phys_tcs_per_port, port_params);
+ ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine, max_phys_tcs_per_port, port_params);
ecore_global_rl_rt_init(p_hwfn, global_rl_params);
@@ -830,33 +848,27 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
u16 vport_id;
u8 tc;
- other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
- QM_OTHER_PQS_PER_PF;
+ other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) * (ECORE_IS_E5(p_hwfn->p_dev) ?
+ QM_OTHER_PQS_PER_PF_E5 : QM_OTHER_PQS_PER_PF_E4);
/* Clear first Tx PQ ID array for each VPORT */
for (vport_id = 0; vport_id < num_vports; vport_id++)
for (tc = 0; tc < NUM_OF_TCS; tc++)
- vport_params[vport_id].first_tx_pq_id[tc] =
- QM_INVALID_PQ_ID;
+ vport_params[vport_id].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
/* Map Other PQs (if any) */
-#if QM_OTHER_PQS_PER_PF > 0
- ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids,
- num_tids, 0);
-#endif
+ if ((ECORE_IS_E5(p_hwfn->p_dev) ? QM_OTHER_PQS_PER_PF_E5 : QM_OTHER_PQS_PER_PF_E4) > 0)
+ ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids, num_tids, 0);
/* Map Tx PQs */
- if (ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
- is_pf_loading, num_pf_cids, num_vf_cids,
- start_pq, num_pf_pqs, num_vf_pqs,
- start_vport, other_mem_size_4kb, pq_params,
- vport_params))
+ if (ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port, is_pf_loading,
+ num_pf_cids, num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
+ start_vport, other_mem_size_4kb, pq_params, vport_params))
return -1;
/* Init PF WFQ */
if (pf_wfq)
- if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq,
- max_phys_tcs_per_port,
+ if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq, max_phys_tcs_per_port,
num_pf_pqs + num_vf_pqs, pq_params))
return -1;
@@ -872,14 +884,15 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
}
int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
+ struct ecore_ptt *p_ptt,
+ u8 pf_id,
+ u16 pf_wfq)
{
u32 inc_val;
inc_val = QM_WFQ_INC_VAL(pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true,
- "Invalid PF WFQ weight configuration\n");
+ DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -889,19 +902,19 @@ int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
}
int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
+ struct ecore_ptt *p_ptt,
+ u8 pf_id,
+ u32 pf_rl)
{
u32 inc_val;
inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_PF_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true,
- "Invalid PF rate limit configuration\n");
+ DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n");
return -1;
}
- ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
- (u32)QM_RL_CRD_REG_SIGN_BIT);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
return 0;
@@ -918,22 +931,20 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
inc_val = QM_WFQ_INC_VAL(wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true,
- "Invalid VPORT WFQ weight configuration\n");
+ DP_NOTICE(p_hwfn, true, "Invalid VPORT WFQ configuration.\n");
return -1;
}
/* A VPORT can have several VPORT PQ IDs for various TCs */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
vp_pq_id = first_tx_pq_id[tc];
- if (vp_pq_id != QM_INVALID_PQ_ID) {
- ecore_wr(p_hwfn, p_ptt,
- QM_REG_WFQVPWEIGHT + vp_pq_id * 4, inc_val);
- }
+ if (vp_pq_id == QM_INVALID_PQ_ID)
+ continue;
+ ecore_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + vp_pq_id * 4, inc_val);
}
return 0;
- }
+}
int ecore_init_global_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -948,37 +959,13 @@ int ecore_init_global_rl(struct ecore_hwfn *p_hwfn,
return -1;
}
- ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + rl_id * 4,
- (u32)QM_RL_CRD_REG_SIGN_BIT);
- ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
-
- return 0;
-}
-
-int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 vport_id,
- u32 vport_rl,
- u32 link_speed)
-{
- u32 inc_val, max_qm_global_rls = ECORE_IS_E5(p_hwfn->p_dev) ? MAX_QM_GLOBAL_RLS_E5
- : MAX_QM_GLOBAL_RLS_E4;
-
- if (vport_id >= max_qm_global_rls) {
- DP_NOTICE(p_hwfn, true,
- "Invalid VPORT ID for rate limiter configuration\n");
- return -1;
- }
-
- inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
- if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
- DP_NOTICE(p_hwfn, true,
- "Invalid VPORT rate-limit configuration\n");
- return -1;
- }
-
- ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
- (u32)QM_RL_CRD_REG_SIGN_BIT);
- ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+ ecore_wr(p_hwfn, p_ptt, rl_id < QM_REG_RLGLBLCRD_SIZE ?
+ QM_REG_RLGLBLCRD + rl_id * 4 :
+ QM_REG_RLGLBLCRD_MSB_E5 + (rl_id - QM_REG_RLGLBLCRD_SIZE) * 4,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ ecore_wr(p_hwfn, p_ptt, rl_id < QM_REG_RLGLBLINCVAL_SIZE ?
+ QM_REG_RLGLBLINCVAL + rl_id * 4 :
+ QM_REG_RLGLBLINCVAL_MSB_E5 + (rl_id - QM_REG_RLGLBLINCVAL_SIZE) * 4, inc_val);
return 0;
}
@@ -986,9 +973,11 @@ int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool is_release_cmd,
- bool is_tx_pq, u16 start_pq, u16 num_pqs)
+ bool is_tx_pq,
+ u16 start_pq,
+ u16 num_pqs)
{
- u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
+ u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = {0};
u32 pq_mask = 0, last_pq, pq_id;
last_pq = start_pq + num_pqs - 1;
@@ -1003,16 +992,13 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
/* If last PQ or end of PQ mask, write command */
- if ((pq_id == last_pq) ||
- (pq_id % QM_STOP_PQ_MASK_WIDTH ==
- (QM_STOP_PQ_MASK_WIDTH - 1))) {
- QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
- pq_mask);
- QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
- pq_id / QM_STOP_PQ_MASK_WIDTH);
- if (!ecore_send_qm_cmd
- (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
- cmd_arr[1]))
+ if ((pq_id == last_pq) || (pq_id % QM_STOP_PQ_MASK_WIDTH ==
+ (QM_STOP_PQ_MASK_WIDTH - 1))) {
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK, pq_mask);
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+ GROUP_ID, pq_id / QM_STOP_PQ_MASK_WIDTH);
+ if (!ecore_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
+ cmd_arr[0], cmd_arr[1]))
return false;
pq_mask = 0;
}
@@ -1029,8 +1015,7 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
#define NIG_ETS_MIN_WFQ_BYTES 1600
/* NIG: ETS constants */
-#define NIG_ETS_UP_BOUND(weight, mtu) \
- (2 * ((weight) > (mtu) ? (weight) : (mtu)))
+#define NIG_ETS_UP_BOUND(weight, mtu) (2 * ((weight) > (mtu) ? (weight) : (mtu)))
/* NIG: RL constants */
@@ -1046,8 +1031,7 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
/* Rate in mbps */
#define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
-#define NIG_RL_MAX_VAL(inc_val, mtu) \
- (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
+#define NIG_RL_MAX_VAL(inc_val, mtu) (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
/* NIG: packet prioritry configuration constants */
#define NIG_PRIORITY_MAP_TC_BITS 4
@@ -1055,7 +1039,8 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- struct init_ets_req *req, bool is_lb)
+ struct init_ets_req *req,
+ bool is_lb)
{
u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
u32 tc_bound_base_addr, tc_bound_addr_diff;
@@ -1063,8 +1048,7 @@ void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
u8 tc, num_tc, tc_client_offset;
num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
- tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
- NIG_TX_ETS_CLIENT_OFFSET;
+ tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET;
min_weight = 0xffffffff;
tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
@@ -1098,17 +1082,14 @@ void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
}
/* Write SP map */
- ecore_wr(p_hwfn, p_ptt,
- is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
- NIG_REG_TX_ARB_CLIENT_IS_STRICT,
- (sp_tc_map << tc_client_offset));
+ ecore_wr(p_hwfn, p_ptt, is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
+ NIG_REG_TX_ARB_CLIENT_IS_STRICT, (sp_tc_map << tc_client_offset));
/* Write WFQ map */
- ecore_wr(p_hwfn, p_ptt,
- is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
- NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
- (wfq_tc_map << tc_client_offset));
- /* write WFQ weights */
+ ecore_wr(p_hwfn, p_ptt, is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
+ NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ, (wfq_tc_map << tc_client_offset));
+
+ /* Write WFQ weights */
for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
u32 byte_weight;
@@ -1117,8 +1098,7 @@ void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
continue;
/* Translate weight to bytes */
- byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
- min_weight;
+ byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) / min_weight;
/* Write WFQ weight */
ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
@@ -1138,62 +1118,50 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
u32 ctrl, inc_val, reg_offset;
u8 tc;
- /* Disable global MAC+LB RL */
- ctrl =
- NIG_RL_BASE_TYPE <<
- NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
+ /* Disable global MAC + LB RL */
+ ctrl = NIG_RL_BASE_TYPE <<
+ NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
- /* Configure and enable global MAC+LB RL */
+ /* Configure and enable global MAC + LB RL */
if (req->lb_mac_rate) {
/* Configure */
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
NIG_RL_PERIOD_CLK_25M);
inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
- inc_val);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE, inc_val);
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
NIG_RL_MAX_VAL(inc_val, req->mtu));
/* Enable */
- ctrl |=
- 1 <<
- NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
+ ctrl |= 1 << NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
}
/* Disable global LB-only RL */
- ctrl =
- NIG_RL_BASE_TYPE <<
- NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
+ ctrl = NIG_RL_BASE_TYPE << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
/* Configure and enable global LB-only RL */
if (req->lb_rate) {
/* Configure */
- ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
- NIG_RL_PERIOD_CLK_25M);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M);
inc_val = NIG_RL_INC_VAL(req->lb_rate);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
- inc_val);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE, inc_val);
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
NIG_RL_MAX_VAL(inc_val, req->mtu));
/* Enable */
- ctrl |=
- 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
+ ctrl |= 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
}
/* Per-TC RLs */
- for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
- tc++, reg_offset += 4) {
+ for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS; tc++, reg_offset += 4) {
/* Disable TC RL */
- ctrl =
- NIG_RL_BASE_TYPE <<
- NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
+ ctrl = NIG_RL_BASE_TYPE <<
+ NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
/* Configure and enable TC RL */
if (!req->tc_rate[tc])
@@ -1203,16 +1171,13 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
reg_offset, NIG_RL_PERIOD_CLK_25M);
inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
- reg_offset, inc_val);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 + reg_offset, inc_val);
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
/* Enable */
- ctrl |= 1 <<
- NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
- ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
- reg_offset, ctrl);
+ ctrl |= 1 << NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
}
}
@@ -1228,8 +1193,7 @@ void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
if (!req->pri[pri].valid)
continue;
- pri_tc_mask |= (req->pri[pri].tc_id <<
- (pri * NIG_PRIORITY_MAP_TC_BITS));
+ pri_tc_mask |= (req->pri[pri].tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS));
tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
}
@@ -1238,10 +1202,8 @@ void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
/* Write TC -> priority mask */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
- ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
- tc_pri_mask[tc]);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
- tc_pri_mask[tc]);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4, tc_pri_mask[tc]);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4, tc_pri_mask[tc]);
}
}
@@ -1251,18 +1213,17 @@ void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
/* PRS: ETS configuration constants */
#define PRS_ETS_MIN_WFQ_BYTES 1600
-#define PRS_ETS_UP_BOUND(weight, mtu) \
- (2 * ((weight) > (mtu) ? (weight) : (mtu)))
+#define PRS_ETS_UP_BOUND(weight, mtu) (2 * ((weight) > (mtu) ? (weight) : (mtu)))
void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, struct init_ets_req *req)
+ struct ecore_ptt *p_ptt,
+ struct init_ets_req *req)
{
u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
- tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
- PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
+ tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
@@ -1284,14 +1245,13 @@ void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
min_weight = tc_req->weight;
}
- /* write SP map */
+ /* Write SP map */
ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
- /* write WFQ map */
- ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
- wfq_tc_map);
+ /* Write WFQ map */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ, wfq_tc_map);
- /* write WFQ weights */
+ /* Write WFQ weights */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
u32 byte_weight;
@@ -1300,17 +1260,15 @@ void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
continue;
/* Translate weight to bytes */
- byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
- min_weight;
+ byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) / min_weight;
/* Write WFQ weight */
- ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
- tc_weight_addr_diff, byte_weight);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 +
+ tc * tc_weight_addr_diff, byte_weight);
/* Write WFQ upper bound */
ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
- tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
- req->mtu));
+ tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight, req->mtu));
}
}
@@ -1327,16 +1285,15 @@ void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
/* Temporary big RAM allocation - should be updated */
void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
+ struct ecore_ptt *p_ptt,
+ struct init_brb_ram_req *req)
{
u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
u32 active_port_blocks, reg_offset = 0;
u8 port, active_ports = 0;
- tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
- BRB_BLOCK_SIZE);
- min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
- BRB_BLOCK_SIZE);
+ tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
+ min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
BRB_TOTAL_RAM_BLOCKS_BB;
@@ -1354,26 +1311,20 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
u8 tc;
/* Calculate per-port sizes */
- tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
- BRB_BLOCK_SIZE);
- port_blocks = req->num_active_tcs[port] ? active_port_blocks :
- 0;
- port_guaranteed_blocks = req->num_active_tcs[port] *
- tc_guaranteed_blocks;
+ tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
+ port_blocks = req->num_active_tcs[port] ? active_port_blocks : 0;
+ port_guaranteed_blocks = req->num_active_tcs[port] * tc_guaranteed_blocks;
port_shared_blocks = port_blocks - port_guaranteed_blocks;
- full_xoff_th = req->num_active_tcs[port] *
- BRB_MIN_BLOCKS_PER_TC;
+ full_xoff_th = req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC;
full_xon_th = full_xoff_th + min_pkt_size_blocks;
pause_xoff_th = tc_headroom_blocks;
pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
/* Init total size per port */
- ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
- port_blocks);
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4, port_blocks);
/* Init shared size per port */
- ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
- port_shared_blocks);
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4, port_shared_blocks);
for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
/* Clear init values for non-active TCs */
@@ -1386,43 +1337,33 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
}
/* Init guaranteed size per TC */
- ecore_wr(p_hwfn, p_ptt,
- BRB_REG_TC_GUARANTIED_0 + reg_offset,
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_TC_GUARANTIED_0 + reg_offset,
tc_guaranteed_blocks);
- ecore_wr(p_hwfn, p_ptt,
- BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
- BRB_HYST_BLOCKS);
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_GUARANTIED_HYST_0 +
+ reg_offset, BRB_HYST_BLOCKS);
/* Init pause/full thresholds per physical TC - for
* loopback traffic.
*/
- ecore_wr(p_hwfn, p_ptt,
- BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
reg_offset, full_xoff_th);
- ecore_wr(p_hwfn, p_ptt,
- BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
reg_offset, full_xon_th);
- ecore_wr(p_hwfn, p_ptt,
- BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
reg_offset, pause_xoff_th);
- ecore_wr(p_hwfn, p_ptt,
- BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
reg_offset, pause_xon_th);
/* Init pause/full thresholds per physical TC - for
* main traffic.
*/
- ecore_wr(p_hwfn, p_ptt,
- BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
reg_offset, full_xoff_th);
- ecore_wr(p_hwfn, p_ptt,
- BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
reg_offset, full_xon_th);
- ecore_wr(p_hwfn, p_ptt,
- BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
reg_offset, pause_xoff_th);
- ecore_wr(p_hwfn, p_ptt,
- BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
reg_offset, pause_xon_th);
}
}
@@ -1431,12 +1372,14 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
#endif /* UNUSED_HSI_FUNC */
#ifndef UNUSED_HSI_FUNC
-#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
- do { \
- u32 i; \
- for (i = 0; i < (arr_size); i++) \
- ecore_wr(dev, ptt, ((addr) + (4 * i)), \
- ((u32 *)&(arr))[i]); \
+/* @DPDK */
+#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
+ do { \
+ u32 i; \
+ \
+ for (i = 0; i < (arr_size); i++) \
+ ecore_wr(dev, ptt, ((addr) + (4 * i)), \
+ ((u32 *)&(arr))[i]); \
} while (0)
#ifndef DWORDS_TO_BYTES
@@ -1445,25 +1388,27 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
/**
- * @brief ecore_dmae_to_grc - is an internal function - writes from host to
- * wide-bus registers (split registers are not supported yet)
+ * @brief ecore_dmae_to_grc - is an internal function - writes from host to wide-bus registers
+ * (split registers are not supported yet)
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
- * @param pData - pointer to source data.
+ * @param p_data - pointer to source data.
* @param addr - Destination register address.
* @param len_in_dwords - data length in DWARDS (u32)
*/
+
static int ecore_dmae_to_grc(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 *pData,
- u32 addr,
- u32 len_in_dwords)
+ struct ecore_ptt *p_ptt,
+ u32 *p_data,
+ u32 addr,
+ u32 len_in_dwords)
+
{
struct dmae_params params;
bool read_using_dmae = false;
- if (!pData)
+ if (!p_data)
return -1;
/* Set DMAE params */
@@ -1472,17 +1417,17 @@ static int ecore_dmae_to_grc(struct ecore_hwfn *p_hwfn,
SET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 1);
/* Execute DMAE command */
- read_using_dmae = !ecore_dmae_host2grc(p_hwfn, p_ptt,
- (u64)(osal_uintptr_t)(pData),
+ read_using_dmae = !ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)(p_data),
addr, len_in_dwords, ¶ms);
if (!read_using_dmae)
- DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
- "Failed writing to chip using DMAE, using GRC instead\n");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Failed writing to chip using DMAE, using GRC instead\n");
+
/* If not read using DMAE, read using GRC */
- if (!read_using_dmae)
+ if (!read_using_dmae) {
/* write to registers using GRC */
- ARR_REG_WR(p_hwfn, p_ptt, addr, pData, len_in_dwords);
+ ARR_REG_WR(p_hwfn, p_ptt, addr, p_data, len_in_dwords);
+ }
return len_in_dwords;
}
@@ -1497,12 +1442,13 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
#endif /* UNUSED_HSI_FUNC */
#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
-(var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
-#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
-#define PRS_ETH_OUTPUT_FORMAT -46832
+ (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
+#define PRS_ETH_TUNN_OUTPUT_FORMAT_E4 0xF4DAB910
+#define PRS_ETH_OUTPUT_FORMAT_E4 0xFFFF4910
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u16 dest_port)
+ struct ecore_ptt *p_ptt,
+ u16 dest_port)
{
/* Update PRS register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
@@ -1515,7 +1461,8 @@ void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
}
void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, bool vxlan_enable)
+ struct ecore_ptt *p_ptt,
+ bool vxlan_enable)
{
u32 reg_val;
@@ -1524,31 +1471,38 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_ENCAPSULATION_TYPE_EN_FLAGS_VXLAN_ENABLE_SHIFT,
vxlan_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val) { /* TODO: handle E5 init */
- reg_val = ecore_rd(p_hwfn, p_ptt,
- PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ if ((reg_val != 0) && (!ECORE_IS_E5(p_hwfn->p_dev))) {
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
- /* Update output only if tunnel blocks not included. */
- if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ /* @DPDK */
+ /* Update output Format only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT_E4)
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT_E4);
}
/* Update NIG register */
reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
- NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
- vxlan_enable);
+ NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT, vxlan_enable);
ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
- /* Update DORQ register */
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN_BB_K2,
- vxlan_enable ? 1 : 0);
+ /* Update PBF register */
+ if (ECORE_IS_E5(p_hwfn->p_dev)) {
+ reg_val = ecore_rd(p_hwfn, p_ptt, PBF_REG_TUNNEL_ENABLES_E5);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PBF_REG_TUNNEL_ENABLES_TUNNEL_VXLAN_EN_E5_SHIFT, vxlan_enable);
+ ecore_wr(p_hwfn, p_ptt, PBF_REG_TUNNEL_ENABLES_E5, reg_val);
+ } else { /* Update DORQ register */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN_BB_K2,
+ vxlan_enable ? 1 : 0);
+ }
}
void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- bool eth_gre_enable, bool ip_gre_enable)
+ struct ecore_ptt *p_ptt,
+ bool eth_gre_enable,
+ bool ip_gre_enable)
{
u32 reg_val;
@@ -1561,35 +1515,44 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
PRS_ENCAPSULATION_TYPE_EN_FLAGS_IP_OVER_GRE_ENABLE_SHIFT,
ip_gre_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val) { /* TODO: handle E5 init */
- reg_val = ecore_rd(p_hwfn, p_ptt,
- PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ if ((reg_val != 0) && (!ECORE_IS_E5(p_hwfn->p_dev))) {
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
- /* Update output only if tunnel blocks not included. */
- if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ /* @DPDK */
+ /* Update output Format only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT_E4)
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT_E4);
}
/* Update NIG register */
reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
- NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
- eth_gre_enable);
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
- NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
- ip_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
+ eth_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
+ ip_gre_enable);
ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
- /* Update DORQ registers */
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN_BB_K2,
- eth_gre_enable ? 1 : 0);
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN_BB_K2,
- ip_gre_enable ? 1 : 0);
+ /* Update PBF register */
+ if (ECORE_IS_E5(p_hwfn->p_dev)) {
+ reg_val = ecore_rd(p_hwfn, p_ptt, PBF_REG_TUNNEL_ENABLES_E5);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PBF_REG_TUNNEL_ENABLES_TUNNEL_GRE_ETH_EN_E5_SHIFT, eth_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PBF_REG_TUNNEL_ENABLES_TUNNEL_GRE_IP_EN_E5_SHIFT, ip_gre_enable);
+ ecore_wr(p_hwfn, p_ptt, PBF_REG_TUNNEL_ENABLES_E5, reg_val);
+ } else { /* Update DORQ registers */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN_BB_K2,
+ eth_gre_enable ? 1 : 0);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN_BB_K2,
+ ip_gre_enable ? 1 : 0);
+ }
}
void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u16 dest_port)
+ struct ecore_ptt *p_ptt,
+ u16 dest_port)
+
{
/* Update PRS register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
@@ -1603,7 +1566,8 @@ void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- bool eth_geneve_enable, bool ip_geneve_enable)
+ bool eth_geneve_enable,
+ bool ip_geneve_enable)
{
u32 reg_val;
@@ -1616,35 +1580,42 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
PRS_ENCAPSULATION_TYPE_EN_FLAGS_IP_OVER_GENEVE_ENABLE_SHIFT,
ip_geneve_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val) { /* TODO: handle E5 init */
- reg_val = ecore_rd(p_hwfn, p_ptt,
- PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ if ((reg_val != 0) && (!ECORE_IS_E5(p_hwfn->p_dev))) {
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
- /* Update output only if tunnel blocks not included. */
- if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ /* @DPDK */
+ /* Update output Format only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT_E4)
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT_E4);
}
/* Update NIG register */
- ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
- eth_geneve_enable ? 1 : 0);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
- ip_geneve_enable ? 1 : 0);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, eth_geneve_enable ? 1 : 0);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
/* EDPM with geneve tunnel not supported in BB */
if (ECORE_IS_BB_B0(p_hwfn->p_dev))
return;
- /* Update DORQ registers */
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
- eth_geneve_enable ? 1 : 0);
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
- ip_geneve_enable ? 1 : 0);
+ /* Update PBF register */
+ if (ECORE_IS_E5(p_hwfn->p_dev)) {
+ reg_val = ecore_rd(p_hwfn, p_ptt, PBF_REG_TUNNEL_ENABLES_E5);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PBF_REG_TUNNEL_ENABLES_TUNNEL_NGE_ETH_EN_E5_SHIFT, eth_geneve_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PBF_REG_TUNNEL_ENABLES_TUNNEL_NGE_IP_EN_E5_SHIFT, ip_geneve_enable);
+ ecore_wr(p_hwfn, p_ptt, PBF_REG_TUNNEL_ENABLES_E5, reg_val);
+ } else { /* Update DORQ registers */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
+ eth_geneve_enable ? 1 : 0);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
+ ip_geneve_enable ? 1 : 0);
+ }
}
#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
-#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT_E4 0xC8DAB910
void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -1658,13 +1629,14 @@ void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
/* set VXLAN_NO_L2_ENABLE mask */
cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
- if (enable) {
+ if ((enable) && (!ECORE_IS_E5(p_hwfn->p_dev))) {
/* set VXLAN_NO_L2_ENABLE flag */
reg_val |= cfg_mask;
/* update PRS FIC Format register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
+ (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT_E4);
+ } else {
/* clear VXLAN_NO_L2_ENABLE flag */
reg_val &= ~cfg_mask;
}
@@ -1683,9 +1655,10 @@ void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)
+
void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 pf_id)
+ struct ecore_ptt *p_ptt,
+ u16 pf_id)
{
struct regpair ram_line;
OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
@@ -1699,35 +1672,32 @@ void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
/* Zero ramline */
- ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
- sizeof(ram_line) / REG_SIZE);
+ ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * pf_id, sizeof(ram_line) / REG_SIZE);
}
-
+/* @DPDK */
void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+ struct ecore_ptt *p_ptt)
{
u32 rfs_cm_hdr_event_id;
/* Set RFS event ID to be awakened i Tstorm By Prs */
rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
- rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
- PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
- rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
- PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
}
void ecore_gft_config(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 pf_id,
- bool tcp,
- bool udp,
- bool ipv4,
- bool ipv6,
- enum gft_profile_type profile_type)
+ struct ecore_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4,
+ bool ipv6,
+ enum gft_profile_type profile_type)
{
u32 reg_val, cam_line, search_non_ip_as_gft;
struct regpair ram_line = { 0 };
@@ -1740,8 +1710,7 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
/* Set RFS event ID to be awakened i Tstorm By Prs */
- reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
- PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
@@ -1756,13 +1725,11 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
/* Filters are per PF!! */
- SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
- GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK, GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
if (!(tcp && udp)) {
- SET_FIELD(cam_line,
- GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
if (tcp)
SET_FIELD(cam_line,
@@ -1785,10 +1752,8 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
}
/* Write characteristics to cam */
- ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
- cam_line);
- cam_line = ecore_rd(p_hwfn, p_ptt,
- PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, cam_line);
+ cam_line = ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
/* Write line to RAM - compare to filter 4 tuple */
@@ -1823,19 +1788,15 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
search_non_ip_as_gft = 1;
}
- ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT,
- search_non_ip_as_gft);
- ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
- sizeof(ram_line) / REG_SIZE);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
+ ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * pf_id, sizeof(ram_line) / REG_SIZE);
/* Set default profile so that no filter match will happen */
ram_line.lo = 0xffffffff;
ram_line.hi = 0x3ff;
- ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
- PRS_GFT_CAM_LINES_NO_MATCH,
- sizeof(ram_line) / REG_SIZE);
+ ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH, sizeof(ram_line) / REG_SIZE);
/* Enable gft search */
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
@@ -1844,14 +1805,16 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
#endif /* UNUSED_HSI_FUNC */
-/* Configure VF zone size mode */
-void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u16 mode,
+/* Configure VF zone size mode*/
+void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 mode,
bool runtime_init)
{
u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
u32 msdm_vf_offset_mask;
+ if (ECORE_IS_E5(p_hwfn->p_dev))
+ DP_NOTICE(p_hwfn, true, "config_vf_zone_size_mode: Not supported in E5'\n");
+
if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
msdm_vf_size_log += 1;
else if (mode == VF_ZONE_SIZE_MODE_QUAD)
@@ -1860,55 +1823,51 @@ void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
if (runtime_init) {
- STORE_RT_REG(p_hwfn,
- PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
- msdm_vf_size_log);
- STORE_RT_REG(p_hwfn,
- PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
- msdm_vf_offset_mask);
+ STORE_RT_REG(p_hwfn, PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET, msdm_vf_size_log);
+ STORE_RT_REG(p_hwfn, PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET, msdm_vf_offset_mask);
} else {
- ecore_wr(p_hwfn, p_ptt,
- PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
- ecore_wr(p_hwfn, p_ptt,
- PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
}
}
/* Get mstorm statistics for offset by VF zone size mode */
-u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
- u16 stat_cnt_id,
+u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn, u16 stat_cnt_id,
u16 vf_zone_size_mode)
{
u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
- if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
- (stat_cnt_id > MAX_NUM_PFS)) {
+ if (ECORE_IS_E5(p_hwfn->p_dev))
+ return offset;
+
+ if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) && (stat_cnt_id > MAX_NUM_PFS)) {
if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
- (stat_cnt_id - MAX_NUM_PFS);
+ (stat_cnt_id - MAX_NUM_PFS);
else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
- (stat_cnt_id - MAX_NUM_PFS);
+ (stat_cnt_id - MAX_NUM_PFS);
}
return offset;
}
/* Get mstorm VF producer offset by VF zone size mode */
-u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
- u8 vf_id,
- u8 vf_queue_id,
+u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8 vf_queue_id,
u16 vf_zone_size_mode)
{
u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
+ if (ECORE_IS_E5(p_hwfn->p_dev))
+ DP_NOTICE(p_hwfn, true, "get_mstorm_eth_vf_prods_offset: Not supported in E5. Use queue zone instead.'\n");
+
if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
vf_id;
else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
- vf_id;
+ vf_id;
}
return offset;
@@ -1919,12 +1878,10 @@ u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
#endif
static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
-/* Calculate and return CDU validation byte per connection type / region /
- * cid
- */
+/* Calculate and return CDU validation byte per connection type/region/cid */
static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
{
- static u8 crc8_table_valid; /*automatically initialized to 0*/
+ static u8 crc8_table_valid; /*automatically initialized to 0*/
u8 crc, validation_byte = 0;
u32 validation_string = 0;
u32 data_to_crc;
@@ -1934,62 +1891,55 @@ static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
crc8_table_valid = 1;
}
- /*
- * The CRC is calculated on the String-to-compress:
- * [31:8] = {CID[31:20],CID[11:0]}
- * [7:4] = Region
- * [3:0] = Type
+ /* The CRC is calculated on the String-to-compress:
+ * [31:8] = {CID[31:20],CID[11:0]}
+ * [ 7:4] = Region
+ * [ 3:0] = Type
*/
-#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \
- CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
- validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+ validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
#endif
-#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \
- CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
- validation_string |= ((region & 0xF) << 4);
+#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+ validation_string |= ((region & 0xF) << 4);
#endif
-#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \
- CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
- validation_string |= (conn_type & 0xF);
+#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+ validation_string |= (conn_type & 0xF);
#endif
/* Convert to big-endian and calculate CRC8*/
data_to_crc = OSAL_BE32_TO_CPU(validation_string);
- crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
- CRC8_INIT_VALUE);
+ crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE);
/* The validation byte [7:0] is composed:
* for type A validation
- * [7] = active configuration bit
- * [6:0] = crc[6:0]
+ * [7] = active configuration bit
+ * [6:0] = crc[6:0]
*
* for type B validation
- * [7] = active configuration bit
- * [6:3] = connection_type[3:0]
- * [2:0] = crc[2:0]
+ * [7] = active configuration bit
+ * [6:3] = connection_type[3:0]
+ * [2:0] = crc[2:0]
*/
validation_byte |= ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >>
CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
-#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \
- CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
- validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+ validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
#else
- validation_byte |= crc & 0x7F;
+ validation_byte |= crc & 0x7F;
#endif
return validation_byte;
}
/* Calcualte and set validation bytes for session context */
-void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
- void *p_ctx_mem, u16 ctx_size,
+void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem, u16 ctx_size,
u8 ctx_type, u32 cid)
{
u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
- p_ctx = (u8 * const)p_ctx_mem;
+ p_ctx = (u8 *const)p_ctx_mem;
if (ECORE_IS_E5(p_hwfn->p_dev)) {
x_val_ptr = &p_ctx[con_region_offsets_e5[0][ctx_type]];
@@ -2009,12 +1959,12 @@ void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
}
/* Calcualte and set validation bytes for task context */
-void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 tid)
+void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 tid)
{
u8 *p_ctx, *region1_val_ptr;
- p_ctx = (u8 * const)p_ctx_mem;
+ p_ctx = (u8 *const)p_ctx_mem;
region1_val_ptr = ECORE_IS_E5(p_hwfn->p_dev) ?
&p_ctx[task_region_offsets_e5[0][ctx_type]] :
@@ -2026,13 +1976,12 @@ void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
}
/* Memset session context to 0 while preserving validation bytes */
-void ecore_memset_session_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
- u32 ctx_size, u8 ctx_type)
+void ecore_memset_session_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
{
u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
u8 x_val, t_val, u_val;
- p_ctx = (u8 * const)p_ctx_mem;
+ p_ctx = (u8 *const)p_ctx_mem;
if (ECORE_IS_E5(p_hwfn->p_dev)) {
x_val_ptr = &p_ctx[con_region_offsets_e5[0][ctx_type]];
@@ -2056,13 +2005,12 @@ void ecore_memset_session_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
}
/* Memset task context to 0 while preserving validation bytes */
-void ecore_memset_task_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
- u32 ctx_size, u8 ctx_type)
+void ecore_memset_task_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
{
u8 *p_ctx, *region1_val_ptr;
u8 region1_val;
- p_ctx = (u8 * const)p_ctx_mem;
+ p_ctx = (u8 *const)p_ctx_mem;
region1_val_ptr = ECORE_IS_E5(p_hwfn->p_dev) ?
&p_ctx[task_region_offsets_e5[0][ctx_type]] :
@@ -2076,8 +2024,7 @@ void ecore_memset_task_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
}
/* Enable and configure context validation */
-void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
u32 ctx_validation;
@@ -2094,33 +2041,77 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
-#define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4)
-#define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4)
+/*******************************************************************************
+ * File name : rdma_init.c
+ * Author : Michael Shteinbok
+ *******************************************************************************
+ *******************************************************************************
+ * Description:
+ * RDMA HSI functions
+ *
+ *******************************************************************************
+ * Notes: This is the input to the auto generated file drv_init_fw_funcs.c
+ *
+ *******************************************************************************/
-static u32 ecore_get_overlay_addr_ram_addr(struct ecore_hwfn *p_hwfn,
- u8 storm_id)
+static u32 ecore_get_rdma_assert_ram_addr(struct ecore_hwfn *p_hwfn,
+ u8 storm_id)
{
switch (storm_id) {
case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
- TSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
- MSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
- USTORM_OVERLAY_BUF_ADDR_OFFSET;
+ USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
- XSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
- YSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
- PSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+
+ default: return 0;
+ }
+}
+
+void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 assert_level[NUM_STORMS])
+{
+ u8 storm_id;
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ u32 ram_addr = ecore_get_rdma_assert_ram_addr(p_hwfn, storm_id);
+
+ ecore_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
+ }
+}
+
+
+
+
+#define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4)
+#define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4)
+
+
+static u32 ecore_get_overlay_addr_ram_addr(struct ecore_hwfn *p_hwfn,
+ u8 storm_id)
+{
+ switch (storm_id) {
+ case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + TSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + MSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + USTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + XSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + YSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + PSTORM_OVERLAY_BUF_ADDR_OFFSET;
default: return 0;
}
}
struct phys_mem_desc *ecore_fw_overlay_mem_alloc(struct ecore_hwfn *p_hwfn,
- const u32 *const fw_overlay_in_buf,
- u32 buf_size_in_bytes)
+ const u32 *const fw_overlay_in_buf,
+ u32 buf_size_in_bytes)
{
u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0;
struct phys_mem_desc *allocated_mem;
@@ -2128,15 +2119,12 @@ struct phys_mem_desc *ecore_fw_overlay_mem_alloc(struct ecore_hwfn *p_hwfn,
if (!buf_size)
return OSAL_NULL;
- allocated_mem = (struct phys_mem_desc *)OSAL_ZALLOC(p_hwfn->p_dev,
- GFP_KERNEL,
- NUM_STORMS *
- sizeof(struct phys_mem_desc));
+ allocated_mem = (struct phys_mem_desc *)OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ NUM_STORMS * sizeof(struct phys_mem_desc));
if (!allocated_mem)
return OSAL_NULL;
- OSAL_MEMSET(allocated_mem, 0, NUM_STORMS *
- sizeof(struct phys_mem_desc));
+ OSAL_MEMSET(allocated_mem, 0, NUM_STORMS * sizeof(struct phys_mem_desc));
/* For each Storm, set physical address in RAM */
while (buf_offset < buf_size) {
@@ -2145,19 +2133,16 @@ struct phys_mem_desc *ecore_fw_overlay_mem_alloc(struct ecore_hwfn *p_hwfn,
u32 storm_buf_size;
u8 storm_id;
- hdr =
- (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset];
- storm_buf_size = GET_FIELD(hdr->data,
- FW_OVERLAY_BUF_HDR_BUF_SIZE);
+ hdr = (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset];
+ storm_buf_size = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_BUF_SIZE);
storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
storm_mem_desc = allocated_mem + storm_id;
storm_mem_desc->size = storm_buf_size * sizeof(u32);
/* Allocate physical memory for Storm's overlays buffer */
- storm_mem_desc->virt_addr =
- OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
- &storm_mem_desc->phys_addr,
- storm_mem_desc->size);
+ storm_mem_desc->virt_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &storm_mem_desc->phys_addr,
+ storm_mem_desc->size);
if (!storm_mem_desc->virt_addr)
break;
@@ -2165,8 +2150,7 @@ struct phys_mem_desc *ecore_fw_overlay_mem_alloc(struct ecore_hwfn *p_hwfn,
buf_offset += OVERLAY_HDR_SIZE_DWORDS;
/* Copy Storm's overlays buffer to allocated memory */
- OSAL_MEMCPY(storm_mem_desc->virt_addr,
- &fw_overlay_in_buf[buf_offset],
+ OSAL_MEMCPY(storm_mem_desc->virt_addr, &fw_overlay_in_buf[buf_offset],
storm_mem_desc->size);
/* Advance to next Storm */
@@ -2175,7 +2159,7 @@ struct phys_mem_desc *ecore_fw_overlay_mem_alloc(struct ecore_hwfn *p_hwfn,
/* If memory allocation has failed, free all allocated memory */
if (buf_offset < buf_size) {
- ecore_fw_overlay_mem_free(p_hwfn, allocated_mem);
+ ecore_fw_overlay_mem_free(p_hwfn, &allocated_mem);
return OSAL_NULL;
}
@@ -2189,8 +2173,8 @@ void ecore_fw_overlay_init_ram(struct ecore_hwfn *p_hwfn,
u8 storm_id;
for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
- struct phys_mem_desc *storm_mem_desc =
- (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+ struct phys_mem_desc *storm_mem_desc = (struct phys_mem_desc *)fw_overlay_mem +
+ storm_id;
u32 ram_addr, i;
/* Skip Storms with no FW overlays */
@@ -2203,31 +2187,29 @@ void ecore_fw_overlay_init_ram(struct ecore_hwfn *p_hwfn,
/* Write Storm's overlay physical address to RAM */
for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32))
- ecore_wr(p_hwfn, p_ptt, ram_addr,
- ((u32 *)&storm_mem_desc->phys_addr)[i]);
+ ecore_wr(p_hwfn, p_ptt, ram_addr, ((u32 *)&storm_mem_desc->phys_addr)[i]);
}
}
void ecore_fw_overlay_mem_free(struct ecore_hwfn *p_hwfn,
- struct phys_mem_desc *fw_overlay_mem)
+ struct phys_mem_desc **fw_overlay_mem)
{
u8 storm_id;
- if (!fw_overlay_mem)
+ if (!fw_overlay_mem || !(*fw_overlay_mem))
return;
for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
- struct phys_mem_desc *storm_mem_desc =
- (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+ struct phys_mem_desc *storm_mem_desc = (struct phys_mem_desc *)*fw_overlay_mem +
+ storm_id;
/* Free Storm's physical memory */
if (storm_mem_desc->virt_addr)
- OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
- storm_mem_desc->virt_addr,
- storm_mem_desc->phys_addr,
- storm_mem_desc->size);
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, storm_mem_desc->virt_addr,
+ storm_mem_desc->phys_addr, storm_mem_desc->size);
}
/* Free allocated virtual memory */
- OSAL_FREE(p_hwfn->p_dev, fw_overlay_mem);
+ OSAL_FREE(p_hwfn->p_dev, *fw_overlay_mem);
+ *fw_overlay_mem = OSAL_NULL;
}
@@ -1,21 +1,35 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef _INIT_FW_FUNCS_H
#define _INIT_FW_FUNCS_H
#include "ecore_hsi_common.h"
#include "ecore_hsi_eth.h"
+#include "ecore_hsi_func_common.h"
+#define NUM_STORMS 6
-/* Returns the VOQ based on port and TC */
-#define VOQ(port, tc, max_phys_tcs_per_port) \
- ((tc) == PURE_LB_TC ? NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + (port) : \
- (port) * (max_phys_tcs_per_port) + (tc))
+/* Forward declarations */
struct init_qm_pq_params;
+/**
+ * @brief ecore_get_ext_voq - Return the external VOQ ID
+ *
+ * @param p_hwfn - HW device data
+ * @param port_id - port ID
+ * @param tc_id - traffic class ID
+ * @param max_phys_tcs_per_port - max number of physical TCs per port in HW
+ *
+ * @return The external VOQ ID.
+ */
+u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
+ u8 port_id,
+ u8 tc,
+ u8 max_phys_tcs_per_port);
+
/**
* @brief ecore_qm_pf_mem_size - Prepare QM ILT sizes
*
@@ -33,22 +47,22 @@ struct init_qm_pq_params;
*/
u32 ecore_qm_pf_mem_size(struct ecore_hwfn *p_hwfn,
u32 num_pf_cids,
- u32 num_vf_cids,
- u32 num_tids,
- u16 num_pf_pqs,
- u16 num_vf_pqs);
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs);
/**
- * @brief ecore_qm_common_rt_init - Prepare QM runtime init values for engine
- * phase
+ * @brief ecore_qm_common_rt_init - Prepare QM runtime init values for the
+ * engine phase.
*
- * @param p_hwfn
- * @param max_ports_per_engine - max number of ports per engine in HW
+ * @param p_hwfn - HW device data
+ * @param max_ports_per_engine - max number of ports per engine in HW
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
- * @param pf_rl_en - enable per-PF rate limiters
- * @param pf_wfq_en - enable per-PF WFQ
+ * @param pf_rl_en - enable per-PF rate limiters
+ * @param pf_wfq_en - enable per-PF WFQ
* @param global_rl_en - enable global rate limiters
- * @param vport_wfq_en - enable per-VPORT WFQ
+ * @param vport_wfq_en - enable per-VPORT WFQ
* @param port_params - array with parameters for each port.
* @param global_rl_params - array with parameters for each global RL.
* If OSAL_NULL, global RLs are not configured.
@@ -62,36 +76,39 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
bool pf_wfq_en,
bool global_rl_en,
bool vport_wfq_en,
- struct init_qm_port_params port_params[MAX_NUM_PORTS],
- struct init_qm_global_rl_params
- global_rl_params[COMMON_MAX_QM_GLOBAL_RLS]);
+ struct init_qm_port_params port_params[MAX_NUM_PORTS],
+ struct init_qm_global_rl_params
+ global_rl_params[COMMON_MAX_QM_GLOBAL_RLS]);
/**
- * @brief ecore_qm_pf_rt_init Prepare QM runtime init values for the PF phase
+ * @brief ecore_qm_pf_rt_init - Prepare QM runtime init values for the PF phase
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param pf_id - PF ID
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
* @param is_pf_loading - indicates if the PF is currently loading,
* i.e. it has no allocated QM resources.
- * @param num_pf_cids - number of connections used by this PF
- * @param num_vf_cids - number of connections used by VFs of this PF
- * @param num_tids - number of tasks used by this PF
- * @param start_pq - first Tx PQ ID associated with this PF
- * @param num_pf_pqs - number of Tx PQs associated with this PF
- * (non-VF)
- * @param num_vf_pqs - number of Tx PQs associated with a VF
- * @param start_vport - first VPORT ID associated with this PF
- * @param num_vports - number of VPORTs associated with this PF
- * @param pf_wfq - WFQ weight. if PF WFQ is globally disabled, the weight must
- * be 0. otherwise, the weight must be non-zero.
- * @param pf_rl - rate limit in Mb/sec units. a value of 0 means don't
- * configure. ignored if PF RL is globally disabled.
- * @param pq_params - array of size (num_pf_pqs+num_vf_pqs) with parameters for
- * each Tx PQ associated with the specified PF.
- * @param vport_params - array of size num_vports with parameters for each
- * associated VPORT.
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param start_pq - first Tx PQ ID associated with this PF
+ * @param num_pf_pqs - number of Tx PQs associated with this PF
+ * (non-VF)
+ * @param num_vf_pqs - number of Tx PQs associated with a VF
+ * @param start_vport - first VPORT ID associated with this PF
+ * @param num_vports - number of VPORTs associated with this PF
+ * @param pf_wfq - WFQ weight. if PF WFQ is globally disabled,
+ * the weight must be 0. otherwise, the weight
+ * must be non-zero.
+ * @param pf_rl - rate limit in Mb/sec units. a value of 0
+ * means don't configure. ignored if PF RL is
+ * globally disabled.
+ * @param pq_params - array of size (num_pf_pqs + num_vf_pqs) with
+ * parameters for each Tx PQ associated with the
+ * specified PF.
+ * @param vport_params - array of size num_vports with parameters for
+ * each associated VPORT.
*
* @return 0 on success, -1 on error.
*/
@@ -114,50 +131,50 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_vport_params *vport_params);
/**
- * @brief ecore_init_pf_wfq Initializes the WFQ weight of the specified PF
+ * @brief ecore_init_pf_wfq - Initializes the WFQ weight of the specified PF
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param pf_id - PF ID
- * @param pf_wfq - WFQ weight. Must be non-zero.
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_wfq - WFQ weight. Must be non-zero.
*
* @return 0 on success, -1 on error.
*/
int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 pf_id,
- u16 pf_wfq);
+ struct ecore_ptt *p_ptt,
+ u8 pf_id,
+ u16 pf_wfq);
/**
* @brief ecore_init_pf_rl - Initializes the rate limit of the specified PF
*
* @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
+ * @param p_ptt - ptt window used for writing the registers
* @param pf_id - PF ID
* @param pf_rl - rate limit in Mb/sec units
*
* @return 0 on success, -1 on error.
*/
int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 pf_id,
- u32 pf_rl);
+ struct ecore_ptt *p_ptt,
+ u8 pf_id,
+ u32 pf_rl);
/**
- * @brief ecore_init_vport_wfq Initializes the WFQ weight of specified VPORT
+ * @brief ecore_init_vport_wfq - Initializes the WFQ weight of the specified VPORT
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
- * with the VPORT for each TC. This array is filled by
- * ecore_qm_pf_rt_init
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers
+ * @param first_tx_pq_id - An array containing the first Tx PQ ID associated
+ * with the VPORT for each TC. This array is filled by
+ * ecore_qm_pf_rt_init
* @param wfq - WFQ weight. Must be non-zero.
*
* @return 0 on success, -1 on error.
*/
int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 first_tx_pq_id[NUM_OF_TCS],
+ struct ecore_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS],
u16 wfq);
/**
@@ -177,142 +194,124 @@ int ecore_init_global_rl(struct ecore_hwfn *p_hwfn,
u32 rate_limit);
/**
- * @brief ecore_init_vport_rl - Initializes the rate limit of the specified
- * VPORT.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers
- * @param vport_id - VPORT ID
- * @param vport_rl - rate limit in Mb/sec units
- * @param link_speed - link speed in Mbps.
- *
- * @return 0 on success, -1 on error.
- */
-int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 vport_id,
- u32 vport_rl,
- u32 link_speed);
-
-/**
- * @brief ecore_send_qm_stop_cmd Sends a stop command to the QM
+ * @brief ecore_send_qm_stop_cmd - Sends a stop command to the QM
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers
* @param is_release_cmd - true for release, false for stop.
- * @param is_tx_pq - true for Tx PQs, false for Other PQs.
- * @param start_pq - first PQ ID to stop
- * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ * @param is_tx_pq - true for Tx PQs, false for Other PQs.
+ * @param start_pq - first PQ ID to stop
+ * @param num_pqs - Number of PQs to stop, starting from start_pq.
*
- * @return bool, true if successful, false if timeout occurred while waiting
- * for QM command done.
+ * @return bool, true if successful, false if timeout occurred while waiting for
+ * QM command done.
*/
bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- bool is_release_cmd,
- bool is_tx_pq,
- u16 start_pq,
- u16 num_pqs);
+ struct ecore_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq,
+ u16 start_pq,
+ u16 num_pqs);
+
#ifndef UNUSED_HSI_FUNC
/**
- * @brief ecore_init_nig_ets - initializes the NIG ETS arbiter
+ * @brief ecore_init_nig_ets - Initializes the NIG ETS arbiter
*
* Based on weight/priority requirements per-TC.
*
- * @param p_ptt - ptt window used for writing the registers.
- * @param req - the NIG ETS initialization requirements.
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the NIG ETS initialization requirements.
* @param is_lb - if set, the loopback port arbiter is initialized, otherwise
* the physical port arbiter is initialized. The pure-LB TC
* requirements are ignored when is_lb is cleared.
*/
void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct init_ets_req *req,
- bool is_lb);
+ struct ecore_ptt *p_ptt,
+ struct init_ets_req *req,
+ bool is_lb);
/**
- * @brief ecore_init_nig_lb_rl - initializes the NIG LB RLs
+ * @brief ecore_init_nig_lb_rl - Initializes the NIG LB RLs
*
* Based on global and per-TC rate requirements
*
- * @param p_ptt - ptt window used for writing the registers.
- * @param req - the NIG LB RLs initialization requirements.
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the NIG LB RLs initialization requirements.
*/
void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct init_nig_lb_rl_req *req);
+ struct ecore_ptt *p_ptt,
+ struct init_nig_lb_rl_req *req);
+
#endif /* UNUSED_HSI_FUNC */
/**
- * @brief ecore_init_nig_pri_tc_map - initializes the NIG priority to TC map.
+ * @brief ecore_init_nig_pri_tc_map - Initializes the NIG priority to TC map.
*
* Assumes valid arguments.
*
- * @param p_ptt - ptt window used for writing the registers.
- * @param req - required mapping from prioirties to TCs.
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - required mapping from prioirties to TCs.
*/
void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct init_nig_pri_tc_map_req *req);
+ struct ecore_ptt *p_ptt,
+ struct init_nig_pri_tc_map_req *req);
#ifndef UNUSED_HSI_FUNC
+
/**
- * @brief ecore_init_prs_ets - initializes the PRS Rx ETS arbiter
+ * @brief ecore_init_prs_ets - Initializes the PRS Rx ETS arbiter
*
* Based on weight/priority requirements per-TC.
*
- * @param p_ptt - ptt window used for writing the registers.
- * @param req - the PRS ETS initialization requirements.
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the PRS ETS initialization requirements.
*/
void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct init_ets_req *req);
-#endif /* UNUSED_HSI_FUNC */
+ struct ecore_ptt *p_ptt,
+ struct init_ets_req *req);
+#endif /* UNUSED_HSI_FUNC */
#ifndef UNUSED_HSI_FUNC
+
/**
- * @brief ecore_init_brb_ram - initializes BRB RAM sizes per TC
+ * @brief ecore_init_brb_ram - Initializes BRB RAM sizes per TC.
*
* Based on weight/priority requirements per-TC.
*
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
- * @param req - the BRB RAM initialization requirements.
+ * @param req - the BRB RAM initialization requirements.
*/
void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct init_brb_ram_req *req);
-#endif /* UNUSED_HSI_FUNC */
-
-/**
- * @brief ecore_set_vxlan_no_l2_enable - enable or disable VXLAN no L2 parsing
- *
- * @param p_ptt - ptt window used for writing the registers.
- * @param enable - VXLAN no L2 enable flag.
- */
-void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- bool enable);
+ struct ecore_ptt *p_ptt,
+ struct init_brb_ram_req *req);
+#endif /* UNUSED_HSI_FUNC */
#ifndef UNUSED_HSI_FUNC
+
/**
* @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
- * input ethType should Be called
- * once per port.
+ * input ethType. should Be called once per port.
*
- * @param p_hwfn - HW device data
+ * @param p_hwfn - HW device data
* @param ethType - etherType to configure
*/
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
u32 ethType);
+
#endif /* UNUSED_HSI_FUNC */
/**
- * @brief ecore_set_vxlan_dest_port - initializes vxlan tunnel destination udp
+ * @brief ecore_set_vxlan_dest_port - Initializes vxlan tunnel destination udp
* port.
*
* @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers.
+ * @param p_ptt - ptt window used for writing the registers.
* @param dest_port - vxlan destination udp port.
*/
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
@@ -320,23 +319,23 @@ void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
u16 dest_port);
/**
- * @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW
+ * @brief ecore_set_vxlan_enable - Enable or disable VXLAN tunnel in HW
*
* @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers.
- * @param vxlan_enable - vxlan enable flag.
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param vxlan_enable - vxlan enable flag.
*/
void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool vxlan_enable);
/**
- * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
+ * @brief ecore_set_gre_enable - Enable or disable GRE tunnel in HW
*
* @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers.
- * @param eth_gre_enable - eth GRE enable enable flag.
- * @param ip_gre_enable - IP GRE enable enable flag.
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_gre_enable - eth GRE enable flag.
+ * @param ip_gre_enable - IP GRE enable flag.
*/
void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -344,11 +343,11 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
bool ip_gre_enable);
/**
- * @brief ecore_set_geneve_dest_port - initializes geneve tunnel destination
- * udp port
+ * @brief ecore_set_geneve_dest_port - Initializes geneve tunnel destination
+ * udp port.
*
* @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers.
+ * @param p_ptt - ptt window used for writing the registers.
* @param dest_port - geneve destination udp port.
*/
void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
@@ -356,24 +355,36 @@ void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
u16 dest_port);
/**
- * @brief ecore_set_geneve_enable - enable or disable GRE tunnel in HW
+ * @brief ecore_set_geneve_enable - Enable or disable GRE tunnel in HW
*
* @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers.
- * @param eth_geneve_enable - eth GENEVE enable enable flag.
- * @param ip_geneve_enable - IP GENEVE enable enable flag.
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_geneve_enable - eth GENEVE enable flag.
+ * @param ip_geneve_enable - IP GENEVE enable flag.
*/
void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool eth_geneve_enable,
bool ip_geneve_enable);
+
+/**
+ * @brief ecore_set_vxlan_no_l2_enable - enable or disable VXLAN no L2 parsing
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param enable - VXLAN no L2 enable flag.
+ */
+void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool enable);
+
#ifndef UNUSED_HSI_FUNC
/**
-* @brief ecore_set_gft_event_id_cm_hdr - configure GFT event id and cm header
-*
-* @param p_ptt - ptt window used for writing the registers.
-*/
+ * @brief ecore_set_gft_event_id_cm_hdr - Configure GFT event id and cm header
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ */
void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
@@ -385,21 +396,21 @@ void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
* @param pf_id - pf on which to disable GFT.
*/
void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 pf_id);
+ struct ecore_ptt *p_ptt,
+ u16 pf_id);
/**
* @brief ecore_gft_config - Enable and configure HW for GFT
-*
+ *
* @param p_hwfn - HW device data
-* @param p_ptt - ptt window used for writing the registers.
+ * @param p_ptt - ptt window used for writing the registers.
* @param pf_id - pf on which to enable GFT.
-* @param tcp - set profile tcp packets.
-* @param udp - set profile udp packet.
-* @param ipv4 - set profile ipv4 packet.
-* @param ipv6 - set profile ipv6 packet.
+ * @param tcp - set profile tcp packets.
+ * @param udp - set profile udp packet.
+ * @param ipv4 - set profile ipv4 packet.
+ * @param ipv6 - set profile ipv6 packet.
* @param profile_type - define packet same fields. Use enum gft_profile_type.
-*/
+ */
void ecore_gft_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id,
@@ -408,54 +419,60 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
bool ipv4,
bool ipv6,
enum gft_profile_type profile_type);
+
#endif /* UNUSED_HSI_FUNC */
/**
-* @brief ecore_config_vf_zone_size_mode - Configure VF zone size mode. Must be
-* used before first ETH queue started.
-*
+ * @brief ecore_config_vf_zone_size_mode - Configure VF zone size mode. Must be
+ * used before first ETH queue started.
+ *
* @param p_hwfn - HW device data
-* @param p_ptt - ptt window used for writing the registers. Don't care
+ * @param p_ptt - ptt window used for writing the registers. Don't care
* if runtime_init used.
-* @param mode - VF zone size mode. Use enum vf_zone_size_mode.
+ * @param mode - VF zone size mode. Use enum vf_zone_size_mode.
* @param runtime_init - Set 1 to init runtime registers in engine phase.
* Set 0 if VF zone size mode configured after engine
* phase.
-*/
-void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt
- *p_ptt, u16 mode, bool runtime_init);
+ */
+void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 mode,
+ bool runtime_init);
/**
* @brief ecore_get_mstorm_queue_stat_offset - Get mstorm statistics offset by
* VF zone size mode.
-*
+ *
* @param p_hwfn - HW device data
-* @param stat_cnt_id - statistic counter id
-* @param vf_zone_size_mode - VF zone size mode. Use enum vf_zone_size_mode.
-*/
+ * @param stat_cnt_id - statistic counter id
+ * @param vf_zone_size_mode - VF zone size mode. Use enum vf_zone_size_mode.
+ */
u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
- u16 stat_cnt_id, u16 vf_zone_size_mode);
+ u16 stat_cnt_id,
+ u16 vf_zone_size_mode);
/**
* @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone
* size mode.
-*
+ *
* @param p_hwfn - HW device data
-* @param vf_id - vf id.
-* @param vf_queue_id - per VF rx queue id.
-* @param vf_zone_size_mode - vf zone size mode. Use enum vf_zone_size_mode.
-*/
-u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8
- vf_queue_id, u16 vf_zone_size_mode);
+ * @param vf_id - vf id.
+ * @param vf_queue_id - per VF rx queue id.
+ * @param vf_zone_size_mode - vf zone size mode. Use enum vf_zone_size_mode.
+ */
+u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
+ u8 vf_id,
+ u8 vf_queue_id,
+ u16 vf_zone_size_mode);
+
/**
* @brief ecore_enable_context_validation - Enable and configure context
- * validation.
+ * validation.
*
* @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers.
*/
-void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
/**
* @brief ecore_calc_session_ctx_validation - Calcualte validation byte for
* session context.
@@ -480,7 +497,7 @@ void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - context size.
* @param ctx_type - context type.
- * @param tid - context tid.
+ * @param tid - context tid.
*/
void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn,
void *p_ctx_mem,
@@ -492,10 +509,10 @@ void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn,
* @brief ecore_memset_session_ctx - Memset session context to 0 while
* preserving validation bytes.
*
- * @param p_hwfn - HW device data
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * @param p_hwfn - HW device data
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
*/
void ecore_memset_session_ctx(struct ecore_hwfn *p_hwfn,
void *p_ctx_mem,
@@ -507,9 +524,9 @@ void ecore_memset_session_ctx(struct ecore_hwfn *p_hwfn,
* validation bytes.
*
* @param p_hwfn - HW device data
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
*/
void ecore_memset_task_ctx(struct ecore_hwfn *p_hwfn,
void *p_ctx_mem,
@@ -517,57 +534,41 @@ void ecore_memset_task_ctx(struct ecore_hwfn *p_hwfn,
u8 ctx_type);
-/*******************************************************************************
- * File name : rdma_init.h
- * Author : Michael Shteinbok
- *******************************************************************************
- *******************************************************************************
- * Description:
- * RDMA HSI functions header
- *
- *******************************************************************************
- * Notes: This is the input to the auto generated file drv_init_fw_funcs.h
- *
- *******************************************************************************
- */
-#define NUM_STORMS 6
-
-
-
-/**
+ /**
* @brief ecore_set_rdma_error_level - Sets the RDMA assert level.
* If the severity of the error will be
- * above the level, the FW will assert.
+ * above the level, the FW will assert.
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers
* @param assert_level - An array of assert levels for each storm.
*/
void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 assert_level[NUM_STORMS]);
+ struct ecore_ptt *p_ptt,
+ u8 assert_level[NUM_STORMS]);
+
+
/**
- * @brief ecore_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory
+ * @brief ecore_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory.
*
- * @param p_hwfn - HW device data
+ * @param p_hwfn - HW device data
* @param fw_overlay_in_buf - the input FW overlay buffer.
- * @param buf_size - the size of the input FW overlay buffer in bytes.
- * must be aligned to dwords.
+ * @param buf_size - the size of the input FW overlay buffer in bytes.
+ * must be aligned to dwords.
* @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory.
*
- * @return a pointer to the allocated overlays memory, or OSAL_NULL in case of
- * failures.
+ * @return a pointer to the allocated overlays memory, or OSAL_NULL in case of failures.
*/
struct phys_mem_desc *ecore_fw_overlay_mem_alloc(struct ecore_hwfn *p_hwfn,
- const u32 *const fw_overlay_in_buf,
- u32 buf_size_in_bytes);
+ const u32 *const fw_overlay_in_buf,
+ u32 buf_size_in_bytes);
/**
* @brief ecore_fw_overlay_init_ram - Initializes the FW overlay RAM.
*
- * @param p_hwfn - HW device data.
- * @param p_ptt - ptt window used for writing the registers.
- * @param fw_overlay_mem - the allocated FW overlay memory.
+ * @param p_hwfn - HW device data.
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param fw_overlay_mem - the allocated FW overlay memory.
*/
void ecore_fw_overlay_init_ram(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -576,10 +577,10 @@ void ecore_fw_overlay_init_ram(struct ecore_hwfn *p_hwfn,
/**
* @brief ecore_fw_overlay_mem_free - Frees the FW overlay memory.
*
- * @param p_hwfn - HW device data.
- * @param fw_overlay_mem - the allocated FW overlay memory to free.
+ * @param p_hwfn - HW device data.
+ * @param fw_overlay_mem - pointer to the allocated FW overlay memory to free.
*/
void ecore_fw_overlay_mem_free(struct ecore_hwfn *p_hwfn,
- struct phys_mem_desc *fw_overlay_mem);
+ struct phys_mem_desc **fw_overlay_mem);
#endif
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
/* include the precompiled configuration values - only once */
#include "bcm_osal.h"
#include "ecore_hsi_common.h"
@@ -13,6 +13,14 @@
#include "ecore_rt_defs.h"
#include "ecore_init_fw_funcs.h"
+#ifndef CONFIG_ECORE_BINARY_FW
+#ifdef CONFIG_ECORE_ZIPPED_FW
+#include "ecore_init_values_zipped.h"
+#else
+#include "ecore_init_values.h"
+#endif
+#endif
+
#include "ecore_iro_values.h"
#include "ecore_sriov.h"
#include "reg_addr.h"
@@ -23,19 +31,13 @@
void ecore_init_iro_array(struct ecore_dev *p_dev)
{
- p_dev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET;
-}
-
-/* Runtime configuration helpers */
-void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
-{
- int i;
+ u32 offset = ECORE_IS_E5(p_dev) ? E5_IRO_ARR_OFFSET : E4_IRO_ARR_OFFSET;
- for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
- p_hwfn->rt_data.b_valid[i] = false;
+ p_dev->iro_arr = iro_arr + offset;
}
-void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
+void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn,
+ u32 rt_offset, u32 val)
{
if (rt_offset >= RUNTIME_ARRAY_SIZE) {
DP_ERR(p_hwfn,
@@ -49,7 +51,8 @@ void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
}
void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
- u32 rt_offset, u32 *p_val, osal_size_t size)
+ u32 rt_offset, u32 *p_val,
+ osal_size_t size)
{
osal_size_t i;
@@ -64,6 +67,7 @@ void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
for (i = 0; i < size / sizeof(u32); i++) {
p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
p_hwfn->rt_data.b_valid[rt_offset + i] = true;
+
}
}
@@ -71,11 +75,12 @@ static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 addr,
u16 rt_offset,
- u16 size, bool b_must_dmae)
+ u16 size,
+ bool b_must_dmae)
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
- u16 i, segment;
+ u16 i, j, segment;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* Since not all RT entries are initialized, go over the RT and
@@ -89,7 +94,9 @@ static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
* simply write the data instead of using dmae.
*/
if (!b_must_dmae) {
- ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
+ ecore_wr(p_hwfn, p_ptt, addr + (i << 2),
+ p_init_val[i]);
+ p_valid[i] = false;
continue;
}
@@ -105,6 +112,10 @@ static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
+ /* invalidate after writing */
+ for (j = i; j < i + segment; j++)
+ p_valid[j] = false;
+
/* Jump over the entire segment, including invalid entry */
i += segment;
}
@@ -128,6 +139,7 @@ enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
sizeof(u32) * RUNTIME_ARRAY_SIZE);
if (!rt_data->init_val) {
OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
+ rt_data->b_valid = OSAL_NULL;
return ECORE_NOMEM;
}
@@ -137,18 +149,18 @@ enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
void ecore_init_free(struct ecore_hwfn *p_hwfn)
{
OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
+ p_hwfn->rt_data.init_val = OSAL_NULL;
OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
+ p_hwfn->rt_data.b_valid = OSAL_NULL;
}
static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 addr,
- u32 dmae_data_offset,
- u32 size, const u32 *p_buf,
- bool b_must_dmae,
- bool b_can_dmae)
+ struct ecore_ptt *p_ptt,
+ u32 addr, u32 dmae_data_offset,
+ u32 size, const u32 *p_buf,
+ bool b_must_dmae, bool b_can_dmae)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
/* Perform DMAE only for lengthy enough sections or for wide-bus */
#ifndef ASIC_ONLY
@@ -185,7 +197,7 @@ static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(¶ms, 0, sizeof(params));
SET_FIELD(params.flags, DMAE_PARAMS_RW_REPL_SRC, 0x1);
return ecore_dmae_host2grc(p_hwfn, p_ptt,
- (osal_uintptr_t)&zero_buffer[0],
+ (osal_uintptr_t)(&zero_buffer[0]),
addr, fill_count, ¶ms);
}
@@ -199,6 +211,7 @@ static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, addr, fill);
}
+
static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_write_op *cmd,
@@ -219,28 +232,29 @@ static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
array_data = p_dev->fw_data->arr_data;
- hdr = (union init_array_hdr *)
- (uintptr_t)(array_data + dmae_array_offset);
+ hdr = (union init_array_hdr *)(array_data +
+ dmae_array_offset);
data = OSAL_LE32_TO_CPU(hdr->raw.data);
switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
case INIT_ARR_ZIPPED:
#ifdef CONFIG_ECORE_ZIPPED_FW
offset = dmae_array_offset + 1;
- input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
+ input_len = GET_FIELD(data,
+ INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
max_size = MAX_ZIPPED_SIZE * 4;
OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
- (u8 *)(uintptr_t)&array_data[offset],
- max_size,
- (u8 *)p_hwfn->unzip_buf);
+ (u8 *)&array_data[offset],
+ max_size, (u8 *)p_hwfn->unzip_buf);
if (output_len) {
rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
output_len,
p_hwfn->unzip_buf,
b_must_dmae, b_can_dmae);
} else {
- DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n");
+ DP_NOTICE(p_hwfn, true,
+ "Failed to unzip dmae data\n");
rc = ECORE_INVAL;
}
#else
@@ -250,27 +264,27 @@ static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
#endif
break;
case INIT_ARR_PATTERN:
- {
- u32 repeats = GET_FIELD(data,
+ {
+ u32 repeats = GET_FIELD(data,
INIT_ARRAY_PATTERN_HDR_REPETITIONS);
- u32 i;
-
- size = GET_FIELD(data,
- INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
-
- for (i = 0; i < repeats; i++, addr += size << 2) {
- rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
- dmae_array_offset +
- 1, size, array_data,
- b_must_dmae,
- b_can_dmae);
- if (rc)
- break;
+ u32 i;
+
+ size = GET_FIELD(data,
+ INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
+
+ for (i = 0; i < repeats; i++, addr += size << 2) {
+ rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset + 1,
+ size, array_data,
+ b_must_dmae, b_can_dmae);
+ if (rc)
+ break;
}
break;
}
case INIT_ARR_STANDARD:
- size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
+ size = GET_FIELD(data,
+ INIT_ARRAY_STANDARD_HDR_SIZE);
rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
dmae_array_offset + 1,
size, array_data,
@@ -290,13 +304,12 @@ static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
- enum _ecore_status_t rc = ECORE_SUCCESS;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
/* Sanitize */
if (b_must_dmae && !b_can_dmae) {
DP_NOTICE(p_hwfn, true,
- "Need to write to %08x for Wide-bus but DMAE isn't"
- " allowed\n",
+ "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
addr);
return ECORE_INVAL;
}
@@ -345,7 +358,8 @@ static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
/* init_ops read/poll commands */
static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, struct init_read_op *cmd)
+ struct ecore_ptt *p_ptt,
+ struct init_read_op *cmd)
{
bool (*comp_check)(u32 val, u32 expected_val);
u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
@@ -384,14 +398,16 @@ static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
data = OSAL_LE32_TO_CPU(cmd->expected_val);
for (i = 0;
- i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) {
+ i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data);
+ i++) {
OSAL_UDELAY(delay);
val = ecore_rd(p_hwfn, p_ptt, addr);
}
if (i == ECORE_INIT_MAX_POLL_COUNT)
DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
- addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
+ addr,
+ OSAL_LE32_TO_CPU(cmd->expected_val), val,
OSAL_LE32_TO_CPU(cmd->op_data));
}
@@ -469,7 +485,9 @@ static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- int phase, int phase_id, int modes)
+ int phase,
+ int phase_id,
+ int modes)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
bool b_dmae = (phase != PHASE_ENGINE);
@@ -531,6 +549,7 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
}
#ifdef CONFIG_ECORE_ZIPPED_FW
OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
+ p_hwfn->unzip_buf = OSAL_NULL;
#endif
return rc;
}
@@ -553,19 +572,19 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
return ECORE_INVAL;
}
- buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)fw_data;
+ buf_hdr = (struct bin_buffer_hdr *)fw_data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
- fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(fw_data + offset));
+ fw->fw_ver_info = (struct fw_ver_info *)(fw_data + offset);
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
- fw->init_ops = (union init_op *)((uintptr_t)(fw_data + offset));
+ fw->init_ops = (union init_op *)(fw_data + offset);
offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
- fw->arr_data = (u32 *)((uintptr_t)(fw_data + offset));
+ fw->arr_data = (u32 *)(fw_data + offset);
offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
- fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset));
+ fw->modes_tree_buf = (u8 *)(fw_data + offset);
len = buf_hdr[BIN_BUF_INIT_CMD].length;
fw->init_ops_size = len / sizeof(struct init_raw_op);
@@ -585,8 +604,10 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
fw->arr_data = (u32 *)init_val;
fw->modes_tree_buf = (u8 *)modes_tree_buf;
fw->init_ops_size = init_ops_size;
- fw->fw_overlays = fw_overlays;
- fw->fw_overlays_len = sizeof(fw_overlays);
+ fw->fw_overlays_e4 = e4_overlays;
+ fw->fw_overlays_e4_len = sizeof(e4_overlays);
+ fw->fw_overlays_e5 = e5_overlays;
+ fw->fw_overlays_e5_len = sizeof(e5_overlays);
#endif
return ECORE_SUCCESS;
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef __ECORE_INIT_OPS__
#define __ECORE_INIT_OPS__
@@ -29,7 +29,7 @@ void ecore_init_iro_array(struct ecore_dev *p_dev);
* @return _ecore_status_t
*/
enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
+ struct ecore_ptt *p_ptt,
int phase,
int phase_id,
int modes);
@@ -52,15 +52,6 @@ enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn);
*/
void ecore_init_free(struct ecore_hwfn *p_hwfn);
-
-/**
- * @brief ecore_init_clear_rt_data - Clears the runtime init array.
- *
- *
- * @param p_hwfn
- */
-void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn);
-
/**
* @brief ecore_init_store_rt_reg - Store a configuration value in the RT array.
*
@@ -70,8 +61,8 @@ void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn);
* @param val
*/
void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn,
- u32 rt_offset,
- u32 val);
+ u32 rt_offset,
+ u32 val);
#define STORE_RT_REG(hwfn, offset, val) \
ecore_init_store_rt_reg(hwfn, offset, val)
@@ -1,14 +1,13 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
-#include <rte_string_fns.h>
-
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_spq.h"
+#include "reg_addr.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore_init_ops.h"
#include "ecore_rt_defs.h"
@@ -20,10 +19,32 @@
#include "ecore_hw_defs.h"
#include "ecore_hsi_common.h"
#include "ecore_mcp.h"
+#include "../qede_debug.h" /* @DPDK */
+
+#ifdef DIAG
+/* This is nasty, but diag is using the drv_dbg_fw_funcs.c [non-ecore flavor],
+ * and so the functions are lacking ecore prefix.
+ * If there would be other clients needing this [or if the content that isn't
+ * really optional there would increase], we'll need to re-think this.
+ */
+enum dbg_status dbg_read_attn(struct ecore_hwfn *dev,
+ struct ecore_ptt *ptt,
+ enum block_id block,
+ enum dbg_attn_type attn_type,
+ bool clear_status,
+ struct dbg_attn_block_result *results);
+
+const char *dbg_get_status_str(enum dbg_status status);
+
+#define ecore_dbg_read_attn(hwfn, ptt, id, type, clear, results) \
+ dbg_read_attn(hwfn, ptt, id, type, clear, results)
+#define ecore_dbg_get_status_str(status) \
+ dbg_get_status_str(status)
+#endif
struct ecore_pi_info {
ecore_int_comp_cb_t comp_cb;
- void *cookie; /* Will be sent to the compl cb function */
+ void *cookie; /* Will be sent to the completion callback function */
};
struct ecore_sb_sp_info {
@@ -47,17 +68,16 @@ struct aeu_invert_reg_bit {
#define ATTENTION_PARITY (1 << 0)
-#define ATTENTION_LENGTH_MASK (0x00000ff0)
+#define ATTENTION_LENGTH_MASK (0xff)
#define ATTENTION_LENGTH_SHIFT (4)
-#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
- ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_LENGTH(flags) (GET_FIELD((flags), ATTENTION_LENGTH))
#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
ATTENTION_PARITY)
/* Multiple bits start with this offset */
-#define ATTENTION_OFFSET_MASK (0x000ff000)
+#define ATTENTION_OFFSET_MASK (0xff)
#define ATTENTION_OFFSET_SHIFT (12)
#define ATTENTION_BB_MASK (0xf)
@@ -78,79 +98,83 @@ struct aeu_invert_reg {
struct aeu_invert_reg_bit bits[32];
};
-#define MAX_ATTN_GRPS (8)
-#define NUM_ATTN_REGS (9)
+#define MAX_ATTN_GRPS 8
+#define NUM_ATTN_REGS_E4 9
+#define NUM_ATTN_REGS_E5 10
+#define MAX_NUM_ATTN_REGS NUM_ATTN_REGS_E5
+#define NUM_ATTN_REGS(p_hwfn) \
+ (ECORE_IS_E4((p_hwfn)->p_dev) ? NUM_ATTN_REGS_E4 : NUM_ATTN_REGS_E5)
static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
{
u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
- DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp);
- ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff);
+ DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
+ tmp);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
+ 0xffffffff);
return ECORE_SUCCESS;
}
-#define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000)
-#define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14)
-#define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0)
-#define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6)
-#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020)
-#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5)
-#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e)
-#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1)
-#define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1)
-#define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0)
-#define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1)
-#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
-#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
-#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
-#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e)
-#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
-#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20)
-#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
-#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0)
-#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
-#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000)
-#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
-#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000)
-#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
+/* Register PSWHST_REG_VF_DISABLED_ERROR_DATA */
+#define ECORE_PSWHST_ATTN_DISABLED_PF_MASK (0xf)
+#define ECORE_PSWHST_ATTN_DISABLED_PF_SHIFT (14)
+#define ECORE_PSWHST_ATTN_DISABLED_VF_MASK (0xff)
+#define ECORE_PSWHST_ATTN_DISABLED_VF_SHIFT (6)
+#define ECORE_PSWHST_ATTN_DISABLED_VALID_MASK (0x1)
+#define ECORE_PSWHST_ATTN_DISABLED_VALID_SHIFT (5)
+#define ECORE_PSWHST_ATTN_DISABLED_CLIENT_MASK (0xf)
+#define ECORE_PSWHST_ATTN_DISABLED_CLIENT_SHIFT (1)
+#define ECORE_PSWHST_ATTN_DISABLED_WRITE_MASK (0x1)
+#define ECORE_PSWHST_ATTN_DISABLED_WRITE_SHIFT (0)
+
+/* Register PSWHST_REG_VF_DISABLED_ERROR_VALID */
+#define ECORE_PSWHST_ATTN_VF_DISABLED_MASK (0x1)
+#define ECORE_PSWHST_ATTN_VF_DISABLED_SHIFT (0)
+
+/* Register PSWHST_REG_INCORRECT_ACCESS_VALID */
+#define ECORE_PSWHST_ATTN_INCORRECT_ACCESS_MASK (0x1)
+#define ECORE_PSWHST_ATTN_INCORRECT_ACCESS_SHIFT (0)
+
+/* Register PSWHST_REG_INCORRECT_ACCESS_DATA */
+#define ECORE_PSWHST_ATTN_INCORRECT_ACCESS_WR_MASK (0x1)
+#define ECORE_PSWHST_ATTN_INCORRECT_ACCESS_WR_SHIFT (0)
+#define ECORE_PSWHST_ATTN_INCORRECT_ACCESS_CLIENT_MASK (0xf)
+#define ECORE_PSWHST_ATTN_INCORRECT_ACCESS_CLIENT_SHIFT (1)
+#define ECORE_PSWHST_ATTN_INCORRECT_ACCESS_VF_VALID_MASK (0x1)
+#define ECORE_PSWHST_ATTN_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
+#define ECORE_PSWHST_ATTN_INCORRECT_ACCESS_VF_ID_MASK (0xff)
+#define ECORE_PSWHST_ATTN_INCORRECT_ACCESS_VF_ID_SHIFT (6)
+#define ECORE_PSWHST_ATTN_INCORRECT_ACCESS_PF_ID_MASK (0xf)
+#define ECORE_PSWHST_ATTN_INCORRECT_ACCESS_PF_ID_SHIFT (14)
+#define ECORE_PSWHST_ATTN_INCORRECT_ACCESS_BYTE_EN_MASK (0xff)
+#define ECORE_PSWHST_ATTN_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
+
static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
{
- u32 tmp =
- ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PSWHST_REG_VF_DISABLED_ERROR_VALID);
+ u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, PSWHST_REG_VF_DISABLED_ERROR_VALID);
/* Disabled VF access */
- if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
+ if (GET_FIELD(tmp, ECORE_PSWHST_ATTN_VF_DISABLED)) {
u32 addr, data;
addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
PSWHST_REG_VF_DISABLED_ERROR_DATA);
- DP_INFO(p_hwfn->p_dev,
- "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]"
- " Write [0x%02x] Addr [0x%08x]\n",
- (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK)
- >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
- (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK)
- >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
- (u8)((data &
- ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
- ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
- (u8)((data &
- ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
- ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
- (u8)((data &
- ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
- ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
+ DP_INFO(p_hwfn->p_dev, "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x] Write [0x%02x] Addr [0x%08x]\n",
+ (u8)(GET_FIELD(data, ECORE_PSWHST_ATTN_DISABLED_PF)),
+ (u8)(GET_FIELD(data, ECORE_PSWHST_ATTN_DISABLED_VF)),
+ (u8)(GET_FIELD(data, ECORE_PSWHST_ATTN_DISABLED_VALID)),
+ (u8)(GET_FIELD(data, ECORE_PSWHST_ATTN_DISABLED_CLIENT)),
+ (u8)(GET_FIELD(data, ECORE_PSWHST_ATTN_DISABLED_WRITE)),
addr);
}
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
PSWHST_REG_INCORRECT_ACCESS_VALID);
- if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
+ if (GET_FIELD(tmp, ECORE_PSWHST_ATTN_INCORRECT_ACCESS)) {
u32 addr, data, length;
addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
@@ -160,29 +184,14 @@ static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
PSWHST_REG_INCORRECT_ACCESS_LENGTH);
- DP_INFO(p_hwfn->p_dev,
- "Incorrect access to %08x of length %08x - PF [%02x]"
- " VF [%04x] [valid %02x] client [%02x] write [%02x]"
- " Byte-Enable [%04x] [%08x]\n",
+ DP_INFO(p_hwfn->p_dev, "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
addr, length,
- (u8)((data &
- ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
- ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
- (u8)((data &
- ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
- ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
- (u8)((data &
- ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
- ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
- (u8)((data &
- ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
- ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
- (u8)((data &
- ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
- ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
- (u8)((data &
- ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
- ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
+ (u8)(GET_FIELD(data, ECORE_PSWHST_ATTN_INCORRECT_ACCESS_PF_ID)),
+ (u8)(GET_FIELD(data, ECORE_PSWHST_ATTN_INCORRECT_ACCESS_VF_ID)),
+ (u8)(GET_FIELD(data, ECORE_PSWHST_ATTN_INCORRECT_ACCESS_VF_VALID)),
+ (u8)(GET_FIELD(data, ECORE_PSWHST_ATTN_INCORRECT_ACCESS_CLIENT)),
+ (u8)(GET_FIELD(data, ECORE_PSWHST_ATTN_INCORRECT_ACCESS_WR)),
+ (u8)(GET_FIELD(data, ECORE_PSWHST_ATTN_INCORRECT_ACCESS_BYTE_EN)),
data);
}
@@ -193,42 +202,41 @@ static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
}
/* Register GRC_REG_TIMEOUT_ATTN_ACCESS_VALID */
-#define ECORE_GRC_ATTENTION_VALID_BIT_MASK (0x1)
-#define ECORE_GRC_ATTENTION_VALID_BIT_SHIFT (0)
-
-#define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0)
-#define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23)
-#define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24)
+#define ECORE_GRC_ATTENTION_VALID_BIT_MASK (0x1)
+#define ECORE_GRC_ATTENTION_VALID_BIT_SHIFT (0)
+
+/* Register GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 */
+#define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
+#define ECORE_GRC_ATTENTION_ADDRESS_SHIFT (0)
+#define ECORE_GRC_ATTENTION_RDWR_BIT_MASK (0x1)
+#define ECORE_GRC_ATTENTION_RDWR_BIT_SHIFT (23)
+#define ECORE_GRC_ATTENTION_MASTER_MASK (0xf)
#define ECORE_GRC_ATTENTION_MASTER_SHIFT (24)
+
+/* Register GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 */
#define ECORE_GRC_ATTENTION_PF_MASK (0xf)
-#define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4)
+#define ECORE_GRC_ATTENTION_PF_SHIFT (0)
+#define ECORE_GRC_ATTENTION_VF_MASK (0xff)
#define ECORE_GRC_ATTENTION_VF_SHIFT (4)
-#define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14)
+#define ECORE_GRC_ATTENTION_PRIV_MASK (0x3)
#define ECORE_GRC_ATTENTION_PRIV_SHIFT (14)
+
+/* Constant value for ECORE_GRC_ATTENTION_PRIV field */
#define ECORE_GRC_ATTENTION_PRIV_VF (0)
+
static const char *grc_timeout_attn_master_to_str(u8 master)
{
switch (master) {
- case 1:
- return "PXP";
- case 2:
- return "MCP";
- case 3:
- return "MSDM";
- case 4:
- return "PSDM";
- case 5:
- return "YSDM";
- case 6:
- return "USDM";
- case 7:
- return "TSDM";
- case 8:
- return "XSDM";
- case 9:
- return "DBU";
- case 10:
- return "DMAE";
+ case 1: return "PXP";
+ case 2: return "MCP";
+ case 3: return "MSDM";
+ case 4: return "PSDM";
+ case 5: return "YSDM";
+ case 6: return "USDM";
+ case 7: return "TSDM";
+ case 8: return "XSDM";
+ case 9: return "DBU";
+ case 10: return "DMAE";
default:
return "Unknown";
}
@@ -255,21 +263,19 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
+ /* ECORE_GRC_ATTENTION_ADDRESS: register to bytes address format */
DP_NOTICE(p_hwfn->p_dev, false,
"GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
tmp2, tmp,
- (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to"
+ GET_FIELD(tmp, ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to"
: "Read from",
- (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
+ GET_FIELD(tmp, ECORE_GRC_ATTENTION_ADDRESS) << 2,
grc_timeout_attn_master_to_str(
- (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
- ECORE_GRC_ATTENTION_MASTER_SHIFT),
- (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
- (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
- ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
+ GET_FIELD(tmp, ECORE_GRC_ATTENTION_MASTER)),
+ GET_FIELD(tmp2, ECORE_GRC_ATTENTION_PF),
+ (GET_FIELD(tmp2, ECORE_GRC_ATTENTION_PRIV) ==
ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
- (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
- ECORE_GRC_ATTENTION_VF_SHIFT);
+ GET_FIELD(tmp2, ECORE_GRC_ATTENTION_VF));
/* Clean the validity bit */
ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
@@ -278,19 +284,41 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
return rc;
}
-#define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
-#define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
-#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
-#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
-#define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
-#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
-#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
-#define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
-#define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22)
-#define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
-#define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
-#define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
-#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
+/* Register PGLUE_B_REG_TX_ERR_RD_DETAILS and
+ * Register PGLUE_B_REG_TX_ERR_WR_DETAILS
+ */
+#define ECORE_PGLUE_ATTN_DETAILS_VF_VALID_MASK (0x1)
+#define ECORE_PGLUE_ATTN_DETAILS_VF_VALID_SHIFT (19)
+#define ECORE_PGLUE_ATTN_DETAILS_PFID_MASK (0xf)
+#define ECORE_PGLUE_ATTN_DETAILS_PFID_SHIFT (20)
+#define ECORE_PGLUE_ATTN_DETAILS_VFID_MASK (0xff)
+#define ECORE_PGLUE_ATTN_DETAILS_VFID_SHIFT (24)
+
+/* Register PGLUE_B_REG_TX_ERR_RD_DETAILS2 and
+ * Register PGLUE_B_REG_TX_ERR_WR_DETAILS2
+ */
+#define ECORE_PGLUE_ATTN_DETAILS2_WAS_ERR_MASK (0x1)
+#define ECORE_PGLUE_ATTN_DETAILS2_WAS_ERR_SHIFT (21)
+#define ECORE_PGLUE_ATTN_DETAILS2_BME_MASK (0x1)
+#define ECORE_PGLUE_ATTN_DETAILS2_BME_SHIFT (22)
+#define ECORE_PGLUE_ATTN_DETAILS2_FID_EN_MASK (0x1)
+#define ECORE_PGLUE_ATTN_DETAILS2_FID_EN_SHIFT (23)
+#define ECORE_PGLUE_ATTN_DETAILS2_RD_VALID_MASK (0x1)
+#define ECORE_PGLUE_ATTN_DETAILS2_RD_VALID_SHIFT (26)
+#define ECORE_PGLUE_ATTN_DETAILS2_WR_VALID_MASK (0x1)
+#define ECORE_PGLUE_ATTN_DETAILS2_WR_VALID_SHIFT (29)
+
+/* Register PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL */
+#define ECORE_PGLUE_ATTN_ICPL_VALID_MASK (0x1)
+#define ECORE_PGLUE_ATTN_ICPL_VALID_SHIFT (23)
+
+/* Register PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS */
+#define ECORE_PGLUE_ATTN_ZLR_VALID_MASK (0x1)
+#define ECORE_PGLUE_ATTN_ZLR_VALID_SHIFT (25)
+
+/* Register PGLUE_B_REG_VF_ILT_ERR_DETAILS2 */
+#define ECORE_PGLUE_ATTN_ILT_VALID_MASK (0x1)
+#define ECORE_PGLUE_ATTN_ILT_VALID_SHIFT (23)
enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -300,7 +328,7 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
char str[512] = {0};
tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
- if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
+ if (GET_FIELD(tmp, ECORE_PGLUE_ATTN_DETAILS2_WR_VALID)) {
u32 addr_lo, addr_hi, details;
addr_lo = ecore_rd(p_hwfn, p_ptt,
@@ -310,23 +338,19 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS);
OSAL_SNPRINTF(str, 512,
- "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
- addr_hi, addr_lo, details,
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
- tmp,
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
- 1 : 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
- 1 : 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
- 1 : 0));
+ "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)(GET_FIELD(details,
+ ECORE_PGLUE_ATTN_DETAILS_PFID)),
+ (u8)(GET_FIELD(details,
+ ECORE_PGLUE_ATTN_DETAILS_VFID)),
+ (u8)(GET_FIELD(details,
+ ECORE_PGLUE_ATTN_DETAILS_VF_VALID) ? 1 : 0),
+ tmp,
+ (u8)(GET_FIELD(tmp, ECORE_PGLUE_ATTN_DETAILS2_WAS_ERR) ? 1 : 0),
+ (u8)(GET_FIELD(tmp, ECORE_PGLUE_ATTN_DETAILS2_BME) ? 1 : 0),
+ (u8)(GET_FIELD(tmp, ECORE_PGLUE_ATTN_DETAILS2_FID_EN) ? 1 : 0));
+
if (is_hw_init)
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "%s", str);
else
@@ -334,7 +358,7 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
}
tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
- if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
+ if (GET_FIELD(tmp, ECORE_PGLUE_ATTN_DETAILS2_RD_VALID)) {
u32 addr_lo, addr_hi, details;
addr_lo = ecore_rd(p_hwfn, p_ptt,
@@ -347,29 +371,23 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, false,
"Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
addr_hi, addr_lo, details,
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
+ (u8)(GET_FIELD(details,
+ ECORE_PGLUE_ATTN_DETAILS_PFID)),
+ (u8)(GET_FIELD(details,
+ ECORE_PGLUE_ATTN_DETAILS_VFID)),
+ (u8)(GET_FIELD(details, ECORE_PGLUE_ATTN_DETAILS_VF_VALID) ? 1 : 0),
tmp,
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
- 1 : 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
- 1 : 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
- 1 : 0));
+ (u8)(GET_FIELD(tmp, ECORE_PGLUE_ATTN_DETAILS2_WAS_ERR) ? 1 : 0),
+ (u8)(GET_FIELD(tmp, ECORE_PGLUE_ATTN_DETAILS2_BME) ? 1 : 0),
+ (u8)(GET_FIELD(tmp, ECORE_PGLUE_ATTN_DETAILS2_FID_EN) ? 1 : 0));
}
tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
- if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
- DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp);
+ if (GET_FIELD(tmp, ECORE_PGLUE_ATTN_ICPL_VALID))
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "ICPL error - %08x\n", tmp);
tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
- if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
+ if (GET_FIELD(tmp, ECORE_PGLUE_ATTN_ZLR_VALID)) {
u32 addr_hi, addr_lo;
addr_lo = ecore_rd(p_hwfn, p_ptt,
@@ -378,12 +396,12 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
DP_NOTICE(p_hwfn, false,
- "ICPL erorr - %08x [Address %08x:%08x]\n",
+ "ZLR error - %08x [Address %08x:%08x]\n",
tmp, addr_hi, addr_lo);
}
tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
- if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
+ if (GET_FIELD(tmp, ECORE_PGLUE_ATTN_ILT_VALID)) {
u32 addr_hi, addr_lo, details;
addr_lo = ecore_rd(p_hwfn, p_ptt,
@@ -411,9 +429,12 @@ static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
{
- DP_NOTICE(p_hwfn, false, "FW assertion!\n");
+ u8 str[ECORE_HW_ERR_MAX_STR_SIZE];
- ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
+ OSAL_SNPRINTF((char *)str, ECORE_HW_ERR_MAX_STR_SIZE, "FW assertion!\n");
+ DP_NOTICE(p_hwfn, false, "%s", str);
+ ecore_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, ECORE_HW_ERR_FW_ASSERT,
+ str, OSAL_STRLEN((char *)str) + 1);
return ECORE_INVAL;
}
@@ -432,91 +453,212 @@ ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
-#define ECORE_DB_REC_COUNT 1000
-#define ECORE_DB_REC_INTERVAL 100
-
-static enum _ecore_status_t ecore_db_rec_flush_queue(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+/* Wait for usage to zero or count to run out. This is necessary since EDPM
+ * doorbell transactions can take multiple 64b cycles, and as such can "split"
+ * over the pci. Possibly, the doorbell drop can happen with half an EDPM in
+ * the queue and other half dropped. Another EDPM doorbell to the same address
+ * (from doorbell recovery mechanism or from the doorbelling entity) could have
+ * first half dropped and second half interperted as continuation of the first.
+ * To prevent such malformed doorbells from reaching the device, flush the
+ * queue before releaseing the overflow sticky indication.
+ * For PFs/VFs which do not edpm, flushing is not necessary.
+ */
+enum _ecore_status_t
+ecore_db_rec_flush_queue(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u32 usage_cnt_reg, u32 *count)
{
- u32 count = ECORE_DB_REC_COUNT;
- u32 usage = 1;
+ u32 usage = ecore_rd(p_hwfn, p_ptt, usage_cnt_reg);
- /* wait for usage to zero or count to run out. This is necessary since
- * EDPM doorbell transactions can take multiple 64b cycles, and as such
- * can "split" over the pci. Possibly, the doorbell drop can happen with
- * half an EDPM in the queue and other half dropped. Another EDPM
- * doorbell to the same address (from doorbell recovery mechanism or
- * from the doorbelling entity) could have first half dropped and second
- * half interperted as continuation of the first. To prevent such
- * malformed doorbells from reaching the device, flush the queue before
- * releaseing the overflow sticky indication.
+ /* Flush any pedning (e)dpm as they may never arrive.
+ * This command will flush pending dpms for all customer PFs and VFs
+ * of this DORQ block, and will force them to use a regular doorbell.
*/
- while (count-- && usage) {
- usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
+ while (*count && usage) {
+ (*count)--;
OSAL_UDELAY(ECORE_DB_REC_INTERVAL);
+ usage = ecore_rd(p_hwfn, p_ptt, usage_cnt_reg);
}
/* should have been depleted by now */
if (usage) {
DP_NOTICE(p_hwfn->p_dev, false,
- "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
- ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage);
+ "Flushing DORQ failed, usage was %u after %d usec\n",
+ usage, ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT);
+
return ECORE_TIMEOUT;
}
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_db_rec_handler(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+void ecore_db_rec_handler(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
- u32 overflow;
+ u32 attn_ovfl, cur_ovfl, flush_count = ECORE_DB_REC_COUNT;
enum _ecore_status_t rc;
- overflow = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
- DP_NOTICE(p_hwfn, false, "PF Overflow sticky 0x%x\n", overflow);
- if (!overflow) {
- ecore_db_recovery_execute(p_hwfn, DB_REC_ONCE);
- return ECORE_SUCCESS;
- }
-
- if (ecore_edpm_enabled(p_hwfn)) {
- rc = ecore_db_rec_flush_queue(p_hwfn, p_ptt);
- if (rc != ECORE_SUCCESS)
- return rc;
- }
+ /* If PF overflow was caught by DORQ attn callback, PF must execute
+ * doorbell recovery. Reading the bit is atomic because the dorq
+ * attention handler is setting it in interrupt context.
+ */
+ attn_ovfl = OSAL_TEST_AND_CLEAR_BIT(ECORE_OVERFLOW_BIT,
+ &p_hwfn->db_recovery_info.overflow);
- /* flush any pedning (e)dpm as they may never arrive */
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+ cur_ovfl = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
- /* release overflow sticky indication (stop silently dropping
- * everything)
+ /* Check if sticky overflow indication is set, or it was caught set
+ * during the DORQ attention callback.
*/
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+ if (cur_ovfl || attn_ovfl) {
+ DP_NOTICE(p_hwfn, false,
+ "PF Overflow sticky: attn %u current %u\n",
+ attn_ovfl ? 1 : 0, cur_ovfl ? 1 : 0);
+
+ /* In case the PF is capable of DPM, i.e. it has sufficient
+ * doorbell BAR, the DB queue must be flushed.
+ * Note that DCBX events which can modify the DPM register
+ * state aren't taking into account because they can change
+ * dynamically.
+ */
+ if (cur_ovfl && !p_hwfn->dpm_info.db_bar_no_edpm) {
+ rc = ecore_db_rec_flush_queue(p_hwfn, p_ptt,
+ DORQ_REG_PF_USAGE_CNT,
+ &flush_count);
+ if (rc != ECORE_SUCCESS)
+ return;
+ }
- /* repeat all last doorbells (doorbell drop recovery) */
- ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
+ /* release overflow sticky indication (stop silently dropping
+ * everything).
+ */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
- return ECORE_SUCCESS;
+ /* VF doorbell recovery in next iov handler for all VFs.
+ * Setting the bit is atomic because the iov handler is reading
+ * it in a parallel flow.
+ */
+ if (cur_ovfl && IS_PF_SRIOV_ALLOC(p_hwfn))
+ OSAL_SET_BIT(ECORE_OVERFLOW_BIT,
+ &p_hwfn->pf_iov_info->overflow);
+ }
+
+ /* Even if the PF didn't overflow, some of its child VFs may have.
+ * Either way, schedule VFs handler for doorbell recovery.
+ */
+ if (IS_PF_SRIOV_ALLOC(p_hwfn))
+ OSAL_IOV_DB_REC_HANDLER(p_hwfn);
+
+ if (cur_ovfl || attn_ovfl)
+ ecore_db_recovery_execute(p_hwfn);
}
-static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
+static void ecore_dorq_attn_overflow(struct ecore_hwfn *p_hwfn)
{
- u32 int_sts, first_drop_reason, details, address, all_drops_reason;
+ u32 overflow, i, flush_delay_count = ECORE_DB_REC_COUNT;
struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+ struct ecore_vf_info *p_vf;
enum _ecore_status_t rc;
- int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
- DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n",
- int_sts);
+ overflow = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+ if (overflow) {
+ /* PF doorbell recovery in next periodic handler.
+ * Setting the bit is atomic because the db_rec handler is
+ * reading it in the periodic handler flow.
+ */
+ OSAL_SET_BIT(ECORE_OVERFLOW_BIT,
+ &p_hwfn->db_recovery_info.overflow);
+
+ /* VF doorbell recovery in next iov handler for all VFs.
+ * Setting the bit is atomic because the iov handler is reading
+ * it in a parallel flow.
+ */
+ if (IS_PF_SRIOV_ALLOC(p_hwfn))
+ OSAL_SET_BIT(ECORE_OVERFLOW_BIT,
+ &p_hwfn->pf_iov_info->overflow);
+
+ /* In case the PF is capable of DPM, i.e. it has sufficient
+ * doorbell BAR, the DB queue must be flushed.
+ * Note that DCBX events which can modify the DPM register
+ * state aren't taking into account because they can change
+ * dynamically.
+ */
+ if (!p_hwfn->dpm_info.db_bar_no_edpm) {
+ rc = ecore_db_rec_flush_queue(p_hwfn, p_ptt,
+ DORQ_REG_PF_USAGE_CNT,
+ &flush_delay_count);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+ }
+
+ /* Allow DORQ to process doorbells from this PF again */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+ } else {
+ /* Checking for VF overflows is only needed if the PF didn't
+ * overflow. If the PF did overflow, ecore_iov_db_rec_handler()
+ * will be scheduled by the periodic doorbell recovery handler
+ * anyway.
+ */
+ ecore_for_each_vf(p_hwfn, i) {
+ p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
+ ecore_fid_pretend(p_hwfn, p_ptt,
+ (u16)p_vf->concrete_fid);
+ overflow = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_VF_OVFL_STICKY);
+ if (overflow) {
+ /* VF doorbell recovery in next iov handler.
+ * Setting the bit is atomic because the iov
+ * handler is reading it in a parallel flow.
+ */
+ OSAL_SET_BIT(ECORE_OVERFLOW_BIT,
+ &p_vf->db_recovery_info.overflow);
+
+ if (!p_vf->db_recovery_info.edpm_disabled) {
+ rc = ecore_db_rec_flush_queue(p_hwfn,
+ p_ptt, DORQ_REG_VF_USAGE_CNT,
+ &flush_delay_count);
+ /* Do not clear VF sticky for this VF
+ * if flush failed.
+ */
+ if (rc != ECORE_SUCCESS)
+ continue;
+ }
+
+ /* Allow DORQ to process doorbells from this
+ * VF again.
+ */
+ ecore_wr(p_hwfn, p_ptt,
+ DORQ_REG_VF_OVFL_STICKY, 0x0);
+ }
+ }
+
+ ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+out:
+ /* Schedule the handler even if overflow was not detected. If an
+ * overflow occurred after reading the sticky values, there won't be
+ * another attention.
+ */
+ OSAL_DB_REC_OCCURRED(p_hwfn);
+}
+
+static enum _ecore_status_t ecore_dorq_attn_int_sts(struct ecore_hwfn *p_hwfn)
+{
+ u32 int_sts, first_drop_reason, details, address, all_drops_reason;
+ struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt;
/* int_sts may be zero since all PFs were interrupted for doorbell
* overflow but another one already handled it. Can abort here. If
- * This PF also requires overflow recovery we will be interrupted again
+ * This PF also requires overflow recovery we will be interrupted again.
+ * The masked almost full indication may also be set. Ignoring.
*/
- if (!int_sts)
+ int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
+ if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
return ECORE_SUCCESS;
+ DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n",
+ int_sts);
+
/* check if db_drop or overflow happened */
if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
@@ -539,29 +681,20 @@ static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
"Size\t\t0x%04x\t\t(in bytes)\n"
"1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
"Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n",
- address,
- GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE),
+ address, GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE),
GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4,
first_drop_reason, all_drops_reason);
- rc = ecore_db_rec_handler(p_hwfn, p_ptt);
- OSAL_DB_REC_OCCURRED(p_hwfn);
- if (rc != ECORE_SUCCESS)
- return rc;
-
/* clear the doorbell drop details and prepare for next drop */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
- /* mark interrupt as handeld (note: even if drop was due to a
- * different reason than overflow we mark as handled)
+ /* mark interrupt as handeld (note: even if drop was due to a diffrent
+ * reason than overflow we mark as handled)
*/
ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR,
- DORQ_REG_INT_STS_DB_DROP |
- DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
+ DORQ_REG_INT_STS_DB_DROP | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
- /* if there are no indications otherthan drop indications,
- * success
- */
+ /* if there are no indications otherthan drop indications, success */
if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
@@ -574,6 +707,31 @@ static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
return ECORE_INVAL;
}
+static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ p_hwfn->db_recovery_info.dorq_attn = true;
+ ecore_dorq_attn_overflow(p_hwfn);
+
+ return ecore_dorq_attn_int_sts(p_hwfn);
+}
+
+/* Handle a corner case race condition where another PF clears the DORQ's
+ * attention bit in MISCS bitmap before this PF got to read it. This is needed
+ * only for DORQ attentions because it doesn't send another attention later if
+ * the original issue was not resolved. If DORQ attention was already handled,
+ * its callback already set a flag to indicate it.
+ */
+static void ecore_dorq_attn_handler(struct ecore_hwfn *p_hwfn)
+{
+ if (p_hwfn->db_recovery_info.dorq_attn)
+ goto out;
+
+ /* Call DORQ callback if the attention was missed */
+ ecore_dorq_attn_cb(p_hwfn);
+out:
+ p_hwfn->db_recovery_info.dorq_attn = false;
+}
+
static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
{
#ifndef ASIC_ONLY
@@ -587,12 +745,10 @@ static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
TM_REG_INT_STS_1_PEND_CONN_SCAN))
- DP_INFO(p_hwfn,
- "TM attention on emulation - most likely"
- " results of clock-ratios\n");
+ DP_INFO(p_hwfn, "TM attention on emulation - most likely results of clock-ratios\n");
val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
- TM_REG_INT_MASK_1_PEND_TASK_SCAN;
+ TM_REG_INT_MASK_1_PEND_TASK_SCAN;
ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
return ECORE_SUCCESS;
@@ -626,191 +782,213 @@ aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
};
/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
-static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
+static struct aeu_invert_reg aeu_descs[MAX_NUM_ATTN_REGS] = {
{
- { /* After Invert 1 */
- {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
- MAX_BLOCK_ID},
- }
- },
+ { /* After Invert 1 */
+ {"GPIO0 function%d", FIELD_VALUE(ATTENTION_LENGTH, 32), OSAL_NULL,
+ MAX_BLOCK_ID},
+ }
+ },
{
- { /* After Invert 2 */
- {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
- {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
- {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb,
- BLOCK_PGLUE_B},
- {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
- {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
- {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
- {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
- {"SW timers #%d",
- (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT),
- OSAL_NULL, MAX_BLOCK_ID},
- {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
- BLOCK_PGLCS},
- }
- },
+ { /* After Invert 2 */
+ {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb, BLOCK_PGLUE_B},
+ {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"SW timers #%d", FIELD_VALUE(ATTENTION_LENGTH, 8) |
+ FIELD_VALUE(ATTENTION_OFFSET, 1), OSAL_NULL, MAX_BLOCK_ID},
+ {"PCIE glue/PXP VPD %d", FIELD_VALUE(ATTENTION_LENGTH, 16), OSAL_NULL,
+ BLOCK_PGLCS},
+ }
+ },
{
- { /* After Invert 3 */
- {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
- MAX_BLOCK_ID},
- }
- },
+ { /* After Invert 3 */
+ {"General Attention %d", FIELD_VALUE(ATTENTION_LENGTH, 32), OSAL_NULL,
+ MAX_BLOCK_ID},
+ }
+ },
{
- { /* After Invert 4 */
- {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
- ecore_fw_assertion, MAX_BLOCK_ID},
- {"General Attention %d",
- (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT),
- OSAL_NULL, MAX_BLOCK_ID},
- {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
- ecore_general_attention_35, MAX_BLOCK_ID},
- {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
- ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
- OSAL_NULL, BLOCK_NWS},
- {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
- ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
- OSAL_NULL, BLOCK_NWS},
- {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
- ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
- OSAL_NULL, BLOCK_NWM},
- {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
- ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
- OSAL_NULL, BLOCK_NWM},
- {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
- {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
- {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
- {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
- {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
- {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+ { /* After Invert 4 */
+ {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
+ ecore_fw_assertion, MAX_BLOCK_ID},
+ {"General Attention %d", FIELD_VALUE(ATTENTION_LENGTH, 2) |
+ FIELD_VALUE(ATTENTION_OFFSET, 33), OSAL_NULL, MAX_BLOCK_ID},
+ {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
+ ecore_general_attention_35, MAX_BLOCK_ID},
+ {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), OSAL_NULL,
+ BLOCK_NWS},
+ {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), OSAL_NULL,
+ BLOCK_NWS},
+ {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), OSAL_NULL,
+ BLOCK_NWM},
+ {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), OSAL_NULL,
+ BLOCK_NWM},
+ {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
+ {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+ {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
{"OPTE", ATTENTION_PAR, OSAL_NULL, BLOCK_OPTE},
{"MCP", ATTENTION_PAR, OSAL_NULL, BLOCK_MCP},
{"MS", ATTENTION_SINGLE, OSAL_NULL, BLOCK_MS},
{"UMAC", ATTENTION_SINGLE, OSAL_NULL, BLOCK_UMAC},
{"LED", ATTENTION_SINGLE, OSAL_NULL, BLOCK_LED},
{"BMBN", ATTENTION_SINGLE, OSAL_NULL, BLOCK_BMBN},
- {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
- {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
+ {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
{"BMB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
- {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
- {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
- {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
- }
- },
+ {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
+ {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
+ {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
+ }
+ },
{
- { /* After Invert 5 */
- {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
- {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
- {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
- {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
- {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
- {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
- {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
- {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
- {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
- {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
- {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
- {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
- {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
- {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
- {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
- {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
- }
- },
+ { /* After Invert 5 */
+ {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
+ {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
+ {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
+ {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
+ {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
+ {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
+ {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
+ {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
+ {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
+ {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
+ {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
+ {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
+ {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
+ {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
+ {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
+ {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
+ }
+ },
{
- { /* After Invert 6 */
- {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
- {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
- {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
- {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
- {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
- {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
- {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
- {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
- {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
- {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
- {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
- {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
- {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
- {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
- {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
- {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
- }
- },
+ { /* After Invert 6 */
+ {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
+ {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
+ {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
+ {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
+ {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
+ {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
+ {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
+ {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
+ {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
+ {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
+ {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
+ {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
+ {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
+ {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
+ {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
+ {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
+ }
+ },
{
- { /* After Invert 7 */
- {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
- {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
- {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
- {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
- {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
- {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
- {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
- {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
- {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
- {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
- {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
- {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
- {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
- {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
- {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
- {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
- {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
- }
- },
+ { /* After Invert 7 */
+ {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
+ {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
+ {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
+ {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
+ {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+ {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
+ {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
+ {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
+ {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
+ {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
+ {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
+ {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
+ {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
+ {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
+ {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
+ {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+ {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
+ }
+ },
{
- { /* After Invert 8 */
- {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
- {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
- {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
- {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
- {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
- {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
- {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
- {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
- {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
- {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
- {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
- {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
- {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
- {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
- {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
- {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
- {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
- {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
- {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
- {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
- {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
- MAX_BLOCK_ID},
- }
- },
+ { /* After Invert 8 */
+ {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
+ {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
+ {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
+ {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
+ {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
+ {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
+ {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
+ {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
+ {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
+ {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
+ {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
+ {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+ {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"Reserved %d", FIELD_VALUE(ATTENTION_LENGTH, 2), OSAL_NULL, MAX_BLOCK_ID },
+ }
+ },
{
- { /* After Invert 9 */
- {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
- {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
- MAX_BLOCK_ID},
- {"AVS", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
- ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_UMP_TX), OSAL_NULL,
- BLOCK_AVS_WRAP},
- {"AVS", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
- ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_SCPAD), OSAL_NULL,
- BLOCK_AVS_WRAP},
- {"PCIe core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
- {"PCIe link up", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
- {"PCIe hot reset", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
- {"Reserved %d", (9 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
- MAX_BLOCK_ID},
- }
- },
+ { /* After Invert 9 */
+ {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"AVS", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_UMP_TX), OSAL_NULL,
+ BLOCK_AVS_WRAP},
+ {"AVS", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_SCPAD), OSAL_NULL,
+ BLOCK_AVS_WRAP},
+ {"PCIe core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+ {"PCIe link up", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+ {"PCIe hot reset", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+ {"YPLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YPLD},
+ {"PTLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTLD},
+ {"RGFS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RGFS},
+ {"TGFS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TGFS},
+ {"RGSRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RGSRC},
+ {"TGSRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TGSRC},
+ {"RGFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RGFC},
+ {"TGFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TGFC},
+ {"Reserved %d", FIELD_VALUE(ATTENTION_LENGTH, 9), OSAL_NULL, MAX_BLOCK_ID},
+ }
+ },
+ {
+ { /* After Invert 10 */
+ {"BMB_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_BMB_PD},
+ {"CFC_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CFC_PD},
+ {"MS_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_MS_PD},
+ {"HOST_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_HOST_PD},
+ {"NMC_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_NMC_PD},
+ {"NM_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_NM_PD},
+ {"NW_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_NW_PD},
+ {"PS_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PS_PD},
+ {"PXP_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PXP_PD},
+ {"QM_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_QM_PD},
+ {"TS_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_TS_PD},
+ {"TX_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_TX_PD},
+ {"US_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_US_PD},
+ {"XS_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_XS_PD},
+ {"YS_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_YS_PD},
+ {"RX_PD", ATTENTION_SINGLE, OSAL_NULL, BLOCK_RX_PD},
+ {"Reserved %d", FIELD_VALUE(ATTENTION_LENGTH, 16), OSAL_NULL, MAX_BLOCK_ID},
+ }
+ },
};
static struct aeu_invert_reg_bit *
@@ -823,8 +1001,7 @@ ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
return p_bit;
- return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
- ATTENTION_BB_SHIFT];
+ return &aeu_descs_special[GET_FIELD(p_bit->flags, ATTENTION_BB)];
}
static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
@@ -838,23 +1015,23 @@ static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
#define ATTN_BITS_MASKABLE (0x3ff)
struct ecore_sb_attn_info {
/* Virtual & Physical address of the SB */
- struct atten_status_block *sb_attn;
- dma_addr_t sb_phys;
+ struct atten_status_block *sb_attn;
+ dma_addr_t sb_phys;
/* Last seen running index */
- u16 index;
+ u16 index;
/* A mask of the AEU bits resulting in a parity error */
- u32 parity_mask[NUM_ATTN_REGS];
+ u32 parity_mask[MAX_NUM_ATTN_REGS];
/* A pointer to the attention description structure */
- struct aeu_invert_reg *p_aeu_desc;
+ struct aeu_invert_reg *p_aeu_desc;
/* Previously asserted attentions, which are still unasserted */
- u16 known_attn;
+ u16 known_attn;
/* Cleanup address for the link's general hw attention */
- u32 mfw_attn_addr;
+ u32 mfw_attn_addr;
};
static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
@@ -912,10 +1089,11 @@ static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
/* FIXME - this will change once we'll have GOOD gtt definitions */
DIRECT_REG_WR(p_hwfn,
- (u8 OSAL_IOMEM *) p_hwfn->regview +
+ (u8 OSAL_IOMEM *)p_hwfn->regview +
GTT_BAR0_MAP_REG_IGU_CMD +
((IGU_CMD_ATTN_BIT_SET_UPPER -
- IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
+ IGU_CMD_INT_ACK_BASE) << 3),
+ (u32)asserted_bits);
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
asserted_bits);
@@ -923,12 +1101,24 @@ static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+/* @DPDK */
static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
enum block_id id, enum dbg_attn_type type,
bool b_clear)
{
- /* @DPDK */
- DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type);
+ struct dbg_attn_block_result attn_results;
+ enum dbg_status status;
+
+ OSAL_MEMSET(&attn_results, 0, sizeof(attn_results));
+
+ status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
+ b_clear, &attn_results);
+ if (status != DBG_STATUS_OK)
+ DP_NOTICE(p_hwfn, true,
+ "Failed to parse attention information [status: %s]\n",
+ qed_dbg_get_status_str(status));
+ else
+ qed_dbg_parse_attn(p_hwfn, &attn_results);
}
/**
@@ -967,23 +1157,24 @@ ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
b_fatal = true;
/* Print HW block interrupt registers */
- if (p_aeu->block_index != MAX_BLOCK_ID) {
+ if (p_aeu->block_index != MAX_BLOCK_ID)
ecore_int_attn_print(p_hwfn, p_aeu->block_index,
ATTN_TYPE_INTERRUPT, !b_fatal);
-}
- /* @DPDK */
/* Reach assertion if attention is fatal */
- if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) {
+ if (b_fatal) {
+ u8 str[ECORE_HW_ERR_MAX_STR_SIZE];
+
+ OSAL_SNPRINTF((char *)str, ECORE_HW_ERR_MAX_STR_SIZE,
+ "`%s': Fatal attention\n", p_bit_name);
#ifndef ASIC_ONLY
- DP_NOTICE(p_hwfn, !CHIP_REV_IS_EMUL(p_hwfn->p_dev),
- "`%s': Fatal attention\n", p_bit_name);
+ DP_NOTICE(p_hwfn, !CHIP_REV_IS_EMUL(p_hwfn->p_dev), "%s", str);
#else
- DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
- p_bit_name);
+ DP_NOTICE(p_hwfn, true, "%s", str);
#endif
-
- ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
+ ecore_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt,
+ ECORE_HW_ERR_HW_ATTN, str,
+ OSAL_STRLEN((char *)str) + 1);
}
/* Prevent this Attention from being asserted in the future */
@@ -996,7 +1187,7 @@ ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
u32 mask = ~bitmask;
val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
- DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n",
+ DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
p_bit_name);
}
@@ -1042,11 +1233,15 @@ static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
}
#define MISC_REG_AEU_AFTER_INVERT_IGU(n) \
- (MISC_REG_AEU_AFTER_INVERT_1_IGU + (n) * 0x4)
+ ((n) < NUM_ATTN_REGS_E4 ? \
+ MISC_REG_AEU_AFTER_INVERT_1_IGU + (n) * 0x4 : \
+ MISC_REG_AEU_AFTER_INVERT_10_IGU_E5)
#define MISC_REG_AEU_ENABLE_IGU_OUT(n, group) \
- (MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (n) * 0x4 + \
- (group) * 0x4 * NUM_ATTN_REGS)
+ ((n) < NUM_ATTN_REGS_E4 ? \
+ (MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (n) * 0x4 + \
+ (group) * 0x4 * NUM_ATTN_REGS_E4) : \
+ (MISC_REG_AEU_ENABLE10_IGU_OUT_0_E5 + (group) * 0x4))
/**
* @brief - handles deassertion of previously asserted attentions.
@@ -1059,21 +1254,22 @@ static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
u16 deasserted_bits)
{
+ u8 i, j, k, bit_idx, num_attn_regs = NUM_ATTN_REGS(p_hwfn);
struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
- u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
- u8 i, j, k, bit_idx;
+ u32 aeu_inv_arr[MAX_NUM_ATTN_REGS], aeu_mask, aeu_en, en;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* Read the attention registers in the AEU */
- for (i = 0; i < NUM_ATTN_REGS; i++) {
+ for (i = 0; i < num_attn_regs; i++) {
aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
MISC_REG_AEU_AFTER_INVERT_IGU(i));
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
- "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
+ "Deasserted bits [%d]: %08x\n",
+ i, aeu_inv_arr[i]);
}
/* Handle parity attentions first */
- for (i = 0; i < NUM_ATTN_REGS; i++) {
+ for (i = 0; i < num_attn_regs; i++) {
struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
u32 parities;
@@ -1105,7 +1301,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
if (!(deasserted_bits & (1 << k)))
continue;
- for (i = 0; i < NUM_ATTN_REGS; i++) {
+ for (i = 0; i < num_attn_regs; i++) {
u32 bits;
aeu_en = MISC_REG_AEU_ENABLE_IGU_OUT(i, k);
@@ -1121,11 +1317,12 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
* previous assertion.
*/
for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
- unsigned long int bitmask;
+ unsigned long bitmask;
u8 bit, bit_len;
/* Need to account bits with changed meaning */
p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
+ p_aeu = ecore_int_aeu_translate(p_hwfn, p_aeu);
bit = bit_idx;
bit_len = ATTENTION_LENGTH(p_aeu->flags);
@@ -1160,9 +1357,9 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
p_aeu->bit_name,
num);
else
- strlcpy(bit_name,
- p_aeu->bit_name,
- sizeof(bit_name));
+ OSAL_STRLCPY(bit_name,
+ p_aeu->bit_name,
+ 30);
/* We now need to pass bitmask in its
* correct position.
@@ -1182,13 +1379,17 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
}
}
+ /* Handle missed DORQ attention */
+ ecore_dorq_attn_handler(p_hwfn);
+
/* Clear IGU indication for the deasserted bits */
/* FIXME - this will change once we'll have GOOD gtt definitions */
DIRECT_REG_WR(p_hwfn,
- (u8 OSAL_IOMEM *) p_hwfn->regview +
+ (u8 OSAL_IOMEM *)p_hwfn->regview +
GTT_BAR0_MAP_REG_IGU_CMD +
((IGU_CMD_ATTN_BIT_CLR_UPPER -
- IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits));
+ IGU_CMD_INT_ACK_BASE) << 3),
+ ~((u32)deasserted_bits));
/* Unmask deasserted attentions in IGU */
aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
@@ -1215,6 +1416,8 @@ static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
*/
do {
index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
+ /* Make sure reading index is not optimized out */
+ OSAL_RMB(p_hwfn->p_dev);
attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
} while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
@@ -1226,9 +1429,9 @@ static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
* attention with no previous attention
*/
asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
- ~p_sb_attn_sw->known_attn;
+ ~p_sb_attn_sw->known_attn;
deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
- p_sb_attn_sw->known_attn;
+ p_sb_attn_sw->known_attn;
if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
DP_INFO(p_hwfn,
@@ -1236,7 +1439,8 @@ static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
index, attn_bits, attn_acks, asserted_bits,
deasserted_bits, p_sb_attn_sw->known_attn);
else if (asserted_bits == 0x100)
- DP_INFO(p_hwfn, "MFW indication via attention\n");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "MFW indication via attention\n");
else
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
"MFW indication [deassertion]\n");
@@ -1259,12 +1463,14 @@ static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
struct igu_prod_cons_update igu_ack;
OSAL_MEMSET(&igu_ack, 0, sizeof(struct igu_prod_cons_update));
- igu_ack.sb_id_and_flags =
- ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
- (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
- (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
- (IGU_SEG_ACCESS_ATTN <<
- IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+ SET_FIELD(igu_ack.sb_id_and_flags, IGU_PROD_CONS_UPDATE_SB_INDEX,
+ ack_cons);
+ SET_FIELD(igu_ack.sb_id_and_flags, IGU_PROD_CONS_UPDATE_UPDATE_FLAG,
+ 1);
+ SET_FIELD(igu_ack.sb_id_and_flags, IGU_PROD_CONS_UPDATE_ENABLE_INT,
+ IGU_INT_NOP);
+ SET_FIELD(igu_ack.sb_id_and_flags, IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS,
+ IGU_SEG_ACCESS_ATTN);
DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
@@ -1293,8 +1499,7 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
sb_info = &p_hwfn->p_sp_sb->sb_info;
if (!sb_info) {
- DP_ERR(p_hwfn->p_dev,
- "Status block is NULL - cannot ack interrupts\n");
+ DP_ERR(p_hwfn->p_dev, "Status block is NULL - cannot ack interrupts\n");
return;
}
@@ -1302,7 +1507,7 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
return;
}
- sb_attn = p_hwfn->p_sb_attn;
+ sb_attn = p_hwfn->p_sb_attn;
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
p_hwfn, p_hwfn->my_id);
@@ -1315,8 +1520,7 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
/* Gather Interrupts/Attentions information */
if (!sb_info->sb_virt) {
DP_ERR(p_hwfn->p_dev,
- "Interrupt Status block is NULL -"
- " cannot check for new interrupts!\n");
+ "Interrupt Status block is NULL - cannot check for new interrupts!\n");
} else {
u32 tmp_index = sb_info->sb_ack;
rc = ecore_sb_update_sb_idx(sb_info);
@@ -1326,9 +1530,7 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
}
if (!sb_attn || !sb_attn->sb_attn) {
- DP_ERR(p_hwfn->p_dev,
- "Attentions Status block is NULL -"
- " cannot check for new attentions!\n");
+ DP_ERR(p_hwfn->p_dev, "Attentions Status block is NULL - cannot check for new attentions!\n");
} else {
u16 tmp_index = sb_attn->index;
@@ -1344,8 +1546,7 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
return;
}
-/* Check the validity of the DPC ptt. If not ack interrupts and fail */
-
+ /* Check the validity of the DPC ptt. If not ack interrupts and fail */
if (!p_hwfn->p_dpc_ptt) {
DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
@@ -1392,7 +1593,9 @@ static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
p_sb->sb_phys,
SB_ATTN_ALIGNED_SIZE(p_hwfn));
}
+
OSAL_FREE(p_hwfn->p_dev, p_sb);
+ p_hwfn->p_sb_attn = OSAL_NULL;
}
static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
@@ -1414,10 +1617,11 @@ static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- void *sb_virt_addr, dma_addr_t sb_phy_addr)
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr)
{
struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
- int i, j, k;
+ int i, j, k, num_attn_regs = NUM_ATTN_REGS(p_hwfn);
sb_info->sb_attn = sb_virt_addr;
sb_info->sb_phys = sb_phy_addr;
@@ -1426,8 +1630,8 @@ static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
sb_info->p_aeu_desc = aeu_descs;
/* Calculate Parity Masks */
- OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
- for (i = 0; i < NUM_ATTN_REGS; i++) {
+ OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * MAX_NUM_ATTN_REGS);
+ for (i = 0; i < num_attn_regs; i++) {
/* j is array index, k is bit index */
for (j = 0, k = 0; k < 32; j++) {
struct aeu_invert_reg_bit *p_aeu;
@@ -1445,7 +1649,7 @@ static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
/* Set the address of cleanup for the mcp attention */
sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
- MISC_REG_AEU_GENERAL_ATTN_0;
+ MISC_REG_AEU_GENERAL_ATTN_0;
ecore_int_sb_attn_setup(p_hwfn, p_ptt);
}
@@ -1590,25 +1794,23 @@ void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
/* Wide-bus, initialize via DMAE */
u64 phys_addr = (u64)sb_phys;
- ecore_dmae_host2grc(p_hwfn, p_ptt,
- (u64)(osal_uintptr_t)&phys_addr,
+ ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&phys_addr,
CAU_REG_SB_ADDR_MEMORY +
igu_sb_id * sizeof(u64), 2,
OSAL_NULL /* default parameters */);
- ecore_dmae_host2grc(p_hwfn, p_ptt,
- (u64)(osal_uintptr_t)&sb_entry,
+ ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
igu_sb_id * sizeof(u64), 2,
OSAL_NULL /* default parameters */);
} else {
/* Initialize Status Block Address */
STORE_RT_REG_AGG(p_hwfn,
- CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
- igu_sb_id * 2, sb_phys);
+ CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + igu_sb_id * 2,
+ sb_phys);
STORE_RT_REG_AGG(p_hwfn,
- CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
- igu_sb_id * 2, sb_entry);
+ CAU_REG_SB_VAR_MEMORY_RT_OFFSET + igu_sb_id * 2,
+ sb_entry);
}
/* Configure pi coalescing if set */
@@ -1650,7 +1852,8 @@ void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
}
void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *sb_info)
{
/* zero status block and ack counter */
sb_info->sb_ack = 0;
@@ -1734,7 +1937,8 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_sb_info *sb_info,
void *sb_virt_addr,
- dma_addr_t sb_phy_addr, u16 sb_id)
+ dma_addr_t sb_phy_addr,
+ u16 sb_id)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
@@ -1764,7 +1968,7 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
/* Let the igu info reference the client's SB info */
if (sb_id != ECORE_SP_SB_ID) {
- if (IS_PF(p_hwfn->p_dev)) {
+ if (IS_PF(p_dev)) {
struct ecore_igu_info *p_info;
struct ecore_igu_block *p_block;
@@ -1778,24 +1982,51 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info);
}
}
+
#ifdef ECORE_CONFIG_DIRECT_HWFN
sb_info->p_hwfn = p_hwfn;
#endif
- sb_info->p_dev = p_hwfn->p_dev;
+ sb_info->p_dev = p_dev;
/* The igu address will hold the absolute address that needs to be
* written to for a specific status block
*/
- if (IS_PF(p_hwfn->p_dev))
+ if (IS_PF(p_dev)) {
+ u32 igu_cmd_gtt_win = ECORE_IS_E4(p_dev) ?
+ GTT_BAR0_MAP_REG_IGU_CMD :
+ GTT_BAR0_MAP_REG_IGU_CMD_EXT_E5;
+
sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_IGU_CMD +
- (sb_info->igu_sb_id << 3);
+ igu_cmd_gtt_win +
+ (sb_info->igu_sb_id << 3);
+ } else {
+ u32 igu_bar_offset, int_ack_base;
+
+ if (ECORE_IS_E4(p_dev)) {
+ igu_bar_offset = PXP_VF_BAR0_START_IGU;
+ int_ack_base = IGU_CMD_INT_ACK_BASE;
+ } else {
+ igu_bar_offset = PXP_VF_BAR0_START_IGU2;
+ int_ack_base = IGU_CMD_INT_ACK_E5_BASE -
+ (IGU_CMD_INT_ACK_RESERVED_UPPER + 1);
+
+ /* Force emulator to use legacy IGU for lower SBs */
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ int_ack_base = IGU_CMD_INT_ACK_BASE;
+ if (sb_info->igu_sb_id <
+ MAX_SB_PER_PATH_E5_BC_MODE)
+ igu_bar_offset = PXP_VF_BAR0_START_IGU;
+ else
+ igu_bar_offset =
+ PXP_VF_BAR0_START_IGU2 -
+ PXP_VF_BAR0_IGU_LENGTH;
+ }
+ }
- else
sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
- PXP_VF_BAR0_START_IGU +
- ((IGU_CMD_INT_ACK_BASE +
- sb_info->igu_sb_id) << 3);
+ igu_bar_offset +
+ ((int_ack_base + sb_info->igu_sb_id) << 3);
+ }
sb_info->flags |= ECORE_SB_INFO_INIT;
@@ -1855,6 +2086,7 @@ static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
}
OSAL_FREE(p_hwfn->p_dev, p_sb);
+ p_hwfn->p_sp_sb = OSAL_NULL;
}
static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
@@ -1874,7 +2106,8 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
/* SB ring */
p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
- &p_phys, SB_ALIGNED_SIZE(p_hwfn));
+ &p_phys,
+ SB_ALIGNED_SIZE(p_hwfn));
if (!p_virt) {
DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n");
OSAL_FREE(p_hwfn->p_dev, p_sb);
@@ -1892,24 +2125,42 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+enum _ecore_status_t
+ecore_int_dummy_comp_cb(struct ecore_hwfn OSAL_UNUSED * p_hwfn,
+ void OSAL_UNUSED * cookie)
+{
+ /* Empty completion callback for avoid race */
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
ecore_int_comp_cb_t comp_cb,
void *cookie,
- u8 *sb_idx, __le16 **p_fw_cons)
+ u8 *sb_idx,
+ __le16 **p_fw_cons)
{
- struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+ struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
enum _ecore_status_t rc = ECORE_NOMEM;
u8 pi;
/* Look for a free index */
for (pi = 0; pi < p_sp_sb->pi_info_arr_size; pi++) {
- if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
+ if ((p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL) &&
+ (p_sp_sb->pi_info_arr[pi].comp_cb != p_hwfn->p_dummy_cb))
continue;
- p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
p_sp_sb->pi_info_arr[pi].cookie = cookie;
*sb_idx = pi;
*p_fw_cons = &p_sp_sb->sb_info.sb_pi_array[pi];
+ **p_fw_cons = 0;
+
+ /* The callback might be called in parallel from
+ * ecore_int_sp_dpc(), so make sure that the cookie is set and
+ * that the PI producer is zeroed before setting the callback.
+ */
+ OSAL_BARRIER(p_hwfn->p_dev);
+ p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+
rc = ECORE_SUCCESS;
break;
}
@@ -1917,15 +2168,19 @@ enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
return rc;
}
-enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
+enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn,
+ u8 pi)
{
struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
return ECORE_NOMEM;
- p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
- p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
+ /* In order to prevent damage in possible race set dummy callback and
+ * don't clear cookie instead of set NULL to table entry
+ */
+ p_sp_sb->pi_info_arr[pi].comp_cb = p_hwfn->p_dummy_cb;
+
return ECORE_SUCCESS;
}
@@ -1935,7 +2190,7 @@ u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
}
void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
+ struct ecore_ptt *p_ptt,
enum ecore_int_mode int_mode)
{
u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
@@ -1974,8 +2229,7 @@ static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
{
#ifndef ASIC_ONLY
if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
- DP_INFO(p_hwfn,
- "FPGA - Don't enable Attentions in IGU and MISC\n");
+ DP_INFO(p_hwfn, "FPGA - Don't enable Attentions in IGU and MISC\n");
return;
}
#endif
@@ -2004,8 +2258,7 @@ ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
- "Slowpath IRQ request failed\n");
+ DP_NOTICE(p_hwfn, true, "Slowpath IRQ request failed\n");
return ECORE_NORESOURCES;
}
p_hwfn->b_int_requested = true;
@@ -2019,8 +2272,8 @@ ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
}
-void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
p_hwfn->b_int_enabled = 0;
@@ -2033,12 +2286,13 @@ void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
#define IGU_CLEANUP_SLEEP_LENGTH (1000)
static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 igu_sb_id,
+ u16 igu_sb_id,
bool cleanup_set,
u16 opaque_fid)
{
- u32 data = 0, cmd_ctrl = 0, sb_bit, sb_bit_addr, pxp_addr;
+ u32 data = 0, cmd_ctrl = 0, sb_bit, sb_bit_addr;
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH, val;
+ u32 pxp_addr, int_ack_base;
u8 type = 0;
OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
@@ -2054,7 +2308,9 @@ static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
/* Set the control register */
- pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
+ int_ack_base = ECORE_IS_E4(p_hwfn->p_dev) ? IGU_CMD_INT_ACK_BASE
+ : IGU_CMD_INT_ACK_E5_BASE;
+ pxp_addr = int_ack_base + igu_sb_id;
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
@@ -2136,8 +2392,9 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
}
void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- bool b_set, bool b_slowpath)
+ struct ecore_ptt *p_ptt,
+ bool b_set,
+ bool b_slowpath)
{
struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
struct ecore_igu_block *p_block;
@@ -2344,10 +2601,11 @@ static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
/* Fill the block information */
- p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
+ p_block->function_id = GET_FIELD(val,
+ IGU_MAPPING_LINE_FUNCTION_NUMBER);
p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
- p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
-
+ p_block->vector_number = GET_FIELD(val,
+ IGU_MAPPING_LINE_VECTOR_NUMBER);
p_block->igu_sb_id = igu_sb_id;
}
@@ -2507,6 +2765,12 @@ ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return ECORE_INVAL;
}
+ if (p_block == OSAL_NULL) {
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
+ "SB address (p_block) is NULL\n");
+ return ECORE_INVAL;
+ }
+
/* At this point, p_block points to the SB we want to relocate */
if (b_to_vf) {
p_block->status &= ~ECORE_IGU_STATUS_PF;
@@ -2546,6 +2810,11 @@ ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--;
}
+ /* clean up PF`s SB before assigning it to VF */
+ if (b_to_vf)
+ ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1,
+ p_hwfn->hw_info.opaque_fid);
+
/* Update the IGU and CAU with the new configuration */
SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
p_block->function_id);
@@ -2567,6 +2836,11 @@ ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
igu_sb_id, p_block->function_id,
p_block->is_pf, p_block->vector_number);
+ /* clean up new assigned PF`s SB */
+ if (p_block->is_pf)
+ ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1,
+ p_hwfn->hw_info.opaque_fid);
+
return ECORE_SUCCESS;
}
@@ -2620,6 +2894,7 @@ static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
{
OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
+ p_hwfn->sp_dpc = OSAL_NULL;
}
enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
@@ -2754,35 +3029,3 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-
-void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn)
-{
- struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
- struct ecore_ptt *p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
- RESERVED_PTT_DPC);
- int i;
-
- /* Do not reorder the following cleanup sequence */
- /* Ack all attentions */
- ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ACK_BITS, 0xfff);
-
- /* Clear driver attention */
- ecore_wr(p_hwfn, p_dpc_ptt,
- ((p_hwfn->rel_pf_id << 3) + MISC_REG_AEU_GENERAL_ATTN_0), 0);
-
- /* Clear per-PF IGU registers to restore them as if the IGU
- * was reset for this PF
- */
- ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
- ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
- ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
-
- /* Execute IGU clean up*/
- ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_FUNCTIONAL_CLEANUP, 1);
-
- /* Clear Stats */
- ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_OF_INTA_ASSERTED, 0);
-
- for (i = 0; i < IGU_REG_PBA_STS_PF_SIZE; i++)
- ecore_wr(p_hwfn, p_ptt, IGU_REG_PBA_STS_PF + i * 4, 0);
-}
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef __ECORE_INT_H__
#define __ECORE_INT_H__
@@ -24,15 +24,15 @@
#define ECORE_SB_INVALID_IDX 0xffff
struct ecore_igu_block {
- u8 status;
+ u8 status;
#define ECORE_IGU_STATUS_FREE 0x01
#define ECORE_IGU_STATUS_VALID 0x02
#define ECORE_IGU_STATUS_PF 0x04
#define ECORE_IGU_STATUS_DSB 0x08
- u8 vector_number;
- u8 function_id;
- u8 is_pf;
+ u8 vector_number;
+ u8 function_id;
+ u8 is_pf;
/* Index inside IGU [meant for back reference] */
u16 igu_sb_id;
@@ -91,12 +91,14 @@ u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
*/
struct ecore_igu_block *
ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf);
+
/* TODO Names of function may change... */
-void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- bool b_set, bool b_slowpath);
+void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_set,
+ bool b_slowpath);
-void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn);
+void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_int_igu_read_cam - Reads the IGU CAM.
@@ -109,11 +111,11 @@ void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn);
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
-typedef enum _ecore_status_t (*ecore_int_comp_cb_t) (struct ecore_hwfn *p_hwfn,
- void *cookie);
+typedef enum _ecore_status_t(*ecore_int_comp_cb_t)(struct ecore_hwfn *p_hwfn,
+ void *cookie);
/**
* @brief ecore_int_register_cb - Register callback func for
* slowhwfn statusblock.
@@ -134,10 +136,11 @@ typedef enum _ecore_status_t (*ecore_int_comp_cb_t) (struct ecore_hwfn *p_hwfn,
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
- ecore_int_comp_cb_t comp_cb,
- void *cookie,
- u8 *sb_idx, __le16 **p_fw_cons);
+enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
+ ecore_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx,
+ __le16 **p_fw_cons);
/**
* @brief ecore_int_unregister_cb - Unregisters callback
* function from sp sb.
@@ -149,7 +152,8 @@ enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi);
+enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn,
+ u8 pi);
/**
* @brief ecore_int_get_sp_sb_id - Get the slowhwfn sb id.
@@ -187,10 +191,12 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
* @param vf_number
* @param vf_valid
*/
-void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- dma_addr_t sb_phys,
- u16 igu_sb_id, u16 vf_number, u8 vf_valid);
+void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ dma_addr_t sb_phys,
+ u16 igu_sb_id,
+ u16 vf_number,
+ u8 vf_valid);
/**
* @brief ecore_int_alloc
@@ -216,7 +222,8 @@ void ecore_int_free(struct ecore_hwfn *p_hwfn);
* @param p_hwfn
* @param p_ptt
*/
-void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+void ecore_int_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
/**
* @brief - Enable Interrupt & Attention for hw function
@@ -258,6 +265,9 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool is_hw_init);
-void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn);
+
+enum _ecore_status_t
+ecore_int_dummy_comp_cb(struct ecore_hwfn OSAL_UNUSED * p_hwfn,
+ void OSAL_UNUSED * cookie);
#endif /* __ECORE_INT_H__ */
@@ -1,18 +1,33 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef __ECORE_INT_API_H__
#define __ECORE_INT_API_H__
+#include "common_hsi.h"
+
#ifndef __EXTRACT__LINUX__
#define ECORE_SB_IDX 0x0002
#define RX_PI 0
#define TX_PI(tc) (RX_PI + 1 + tc)
+#define LL2_VF_RX_PI_E4 9
+#define LL2_VF_TX_PI_E4 10
+#define LL2_VF_RX_PI_E5 6
+#define LL2_VF_TX_PI_E5 7
+
+#define LL2_VF_RX_PI(_p_hwfn) \
+ (ECORE_IS_E4((_p_hwfn)->p_dev) ? \
+ LL2_VF_RX_PI_E4 : LL2_VF_RX_PI_E5)
+
+#define LL2_VF_TX_PI(_p_hwfn) \
+ (ECORE_IS_E4((_p_hwfn)->p_dev) ? \
+ LL2_VF_TX_PI_E4 : LL2_VF_TX_PI_E5)
+
#ifndef ECORE_INT_MODE
#define ECORE_INT_MODE
enum ecore_int_mode {
@@ -31,7 +46,7 @@ struct ecore_sb_info {
#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
dma_addr_t sb_phys;
- u32 sb_ack; /* Last given ack */
+ u32 sb_ack; /* Last given ack */
u16 igu_sb_id;
void OSAL_IOMEM *igu_addr;
u8 flags;
@@ -52,22 +67,22 @@ struct ecore_sb_info_dbg {
struct ecore_sb_cnt_info {
/* Original, current, and free SBs for PF */
- int orig;
- int cnt;
- int free_cnt;
+ u32 orig;
+ u32 cnt;
+ u32 free_cnt;
/* Original, current and free SBS for child VFs */
- int iov_orig;
- int iov_cnt;
- int free_cnt_iov;
+ u32 iov_orig;
+ u32 iov_cnt;
+ u32 free_cnt_iov;
};
static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
{
u32 prod = 0;
- u16 rc = 0;
+ u16 rc = 0;
- /* barrier(); status block is written to by the chip */
+ /* barrier(); */ /* status block is written to by the chip */
/* FIXME: need some sort of barrier. */
prod = OSAL_LE32_TO_CPU(*sb_info->sb_prod_index) &
STATUS_BLOCK_PROD_INDEX_MASK;
@@ -81,7 +96,6 @@ static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
}
/**
- *
* @brief This function creates an update command for interrupts that is
* written to the IGU.
*
@@ -98,19 +112,29 @@ static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info,
enum igu_int_cmd int_cmd, u8 upd_flg)
{
struct igu_prod_cons_update igu_ack;
+#ifndef ECORE_CONFIG_DIRECT_HWFN
+ u32 val;
+#endif
+ if (sb_info->p_dev->int_mode == ECORE_INT_MODE_POLL)
+ return;
OSAL_MEMSET(&igu_ack, 0, sizeof(struct igu_prod_cons_update));
- igu_ack.sb_id_and_flags =
- ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
- (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
- (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
- (IGU_SEG_ACCESS_REG << IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+ SET_FIELD(igu_ack.sb_id_and_flags, IGU_PROD_CONS_UPDATE_SB_INDEX,
+ sb_info->sb_ack);
+ SET_FIELD(igu_ack.sb_id_and_flags, IGU_PROD_CONS_UPDATE_UPDATE_FLAG,
+ upd_flg);
+ SET_FIELD(igu_ack.sb_id_and_flags, IGU_PROD_CONS_UPDATE_ENABLE_INT,
+ int_cmd);
+ SET_FIELD(igu_ack.sb_id_and_flags, IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS,
+ IGU_SEG_ACCESS_REG);
+ igu_ack.sb_id_and_flags = OSAL_CPU_TO_LE32(igu_ack.sb_id_and_flags);
#ifdef ECORE_CONFIG_DIRECT_HWFN
DIRECT_REG_WR(sb_info->p_hwfn, sb_info->igu_addr,
igu_ack.sb_id_and_flags);
#else
- DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, igu_ack.sb_id_and_flags);
+ val = OSAL_LE32_TO_CPU(igu_ack.sb_id_and_flags);
+ DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, val);
#endif
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
@@ -127,46 +151,29 @@ static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn,
static OSAL_INLINE void __internal_ram_wr(__rte_unused void *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
-#endif
-{
- unsigned int i;
- for (i = 0; i < size / sizeof(*data); i++)
- DIRECT_REG_WR(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], data[i]);
-}
-
-#ifdef ECORE_CONFIG_DIRECT_HWFN
-static OSAL_INLINE void __internal_ram_wr_relaxed(struct ecore_hwfn *p_hwfn,
- void OSAL_IOMEM * addr,
- int size, u32 *data)
-#else
-static OSAL_INLINE void __internal_ram_wr_relaxed(__rte_unused void *p_hwfn,
- void OSAL_IOMEM * addr,
- int size, u32 *data)
#endif
{
unsigned int i;
for (i = 0; i < size / sizeof(*data); i++)
- DIRECT_REG_WR_RELAXED(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i],
- data[i]);
+ DIRECT_REG_WR(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], data[i]);
}
#ifdef ECORE_CONFIG_DIRECT_HWFN
static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn,
- void OSAL_IOMEM * addr,
- int size, u32 *data)
+ void OSAL_IOMEM *addr,
+ int size, u32 *data)
{
- __internal_ram_wr_relaxed(p_hwfn, addr, size, data);
+ __internal_ram_wr(p_hwfn, addr, size, data);
}
#else
static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr,
- int size, u32 *data)
+ int size, u32 *data)
{
- __internal_ram_wr_relaxed(OSAL_NULL, addr, size, data);
+ __internal_ram_wr(OSAL_NULL, addr, size, data);
}
#endif
-
#endif
struct ecore_hwfn;
@@ -196,7 +203,6 @@ void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
u8 timeset);
/**
- *
* @brief ecore_int_igu_enable_int - enable device interrupts
*
* @param p_hwfn
@@ -208,17 +214,15 @@ void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
enum ecore_int_mode int_mode);
/**
- *
* @brief ecore_int_igu_disable_int - disable device interrupts
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+ struct ecore_ptt *p_ptt);
/**
- *
* @brief ecore_int_igu_read_sisr_reg - Reads the single isr multiple dpc
* register from igu.
*
@@ -246,11 +250,12 @@ u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn);
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_sb_info *sb_info,
- void *sb_virt_addr,
- dma_addr_t sb_phy_addr, u16 sb_id);
+enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id);
/**
* @brief ecore_int_sb_setup - Setup the sb.
*
@@ -258,8 +263,9 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
* @param p_ptt
* @param sb_info initialized sb_info structure
*/
-void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info);
+void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *sb_info);
/**
* @brief ecore_int_sb_release - releases the sb_info structure.
@@ -274,9 +280,9 @@ void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
- struct ecore_sb_info *sb_info,
- u16 sb_id);
+enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_info *sb_info,
+ u16 sb_id);
/**
* @brief ecore_int_sp_dpc - To be called when an interrupt is received on the
@@ -296,7 +302,7 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie);
*
* @return
*/
-void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
+void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
struct ecore_sb_cnt_info *p_sb_cnt_info);
/**
@@ -352,12 +358,11 @@ ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
/**
* @brief - Doorbell Recovery handler.
- * Run DB_REAL_DEAL doorbell recovery in case of PF overflow
- * (and flush DORQ if needed), otherwise run DB_REC_ONCE.
+ * Run doorbell recovery in case of PF overflow (and flush DORQ if
+ * needed).
*
* @param p_hwfn
* @param p_ptt
*/
-enum _ecore_status_t ecore_db_rec_handler(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+void ecore_db_rec_handler(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
#endif
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef __ECORE_SRIOV_API_H__
#define __ECORE_SRIOV_API_H__
@@ -12,7 +12,17 @@
#define ECORE_ETH_VF_NUM_MAC_FILTERS 1
#define ECORE_ETH_VF_NUM_VLAN_FILTERS 2
-#define ECORE_VF_ARRAY_LENGTH (3)
+
+#define ECORE_VF_ARRAY_LENGTH (DIV_ROUND_UP(MAX_NUM_VFS, 64))
+
+#define ECORE_VF_ARRAY_GET_VFID(arr, vfid) \
+ (((arr)[(vfid) / 64]) & (1ULL << ((vfid) % 64)))
+
+#define ECORE_VF_ARRAY_SET_VFID(arr, vfid) \
+ (((arr)[(vfid) / 64]) |= (1ULL << ((vfid) % 64)))
+
+#define ECORE_VF_ARRAY_CLEAR_VFID(arr, vfid) \
+ (((arr)[(vfid) / 64]) &= ~(1ULL << ((vfid) % 64)))
#define ECORE_VF_ARRAY_GET_VFID(arr, vfid) \
(((arr)[(vfid) / 64]) & (1ULL << ((vfid) % 64)))
@@ -28,7 +38,10 @@
#define IS_PF_PDA(p_hwfn) 0 /* @@TBD Michalk */
/* @@@ TBD MichalK - what should this number be*/
-#define ECORE_MAX_VF_CHAINS_PER_PF 16
+#define ECORE_MAX_QUEUE_VF_CHAINS_PER_PF 16
+#define ECORE_MAX_CNQ_VF_CHAINS_PER_PF 16
+#define ECORE_MAX_VF_CHAINS_PER_PF \
+ (ECORE_MAX_QUEUE_VF_CHAINS_PER_PF + ECORE_MAX_CNQ_VF_CHAINS_PER_PF)
/* vport update extended feature tlvs flags */
enum ecore_iov_vport_update_flag {
@@ -45,7 +58,7 @@ enum ecore_iov_vport_update_flag {
/* PF to VF STATUS is part of vfpf-channel API
* and must be forward compatible
-*/
+ */
enum ecore_iov_pf_to_vf_status {
PFVF_STATUS_WAITING = 0,
PFVF_STATUS_SUCCESS,
@@ -67,26 +80,18 @@ struct ecore_mcp_link_capabilities;
#define VFPF_ACQUIRE_OS_ESX (2)
#define VFPF_ACQUIRE_OS_SOLARIS (3)
#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
+#define VFPF_ACQUIRE_OS_FREEBSD (5)
struct ecore_vf_acquire_sw_info {
u32 driver_version;
u8 os_type;
-
- /* We have several close releases that all use ~same FW with different
- * versions [making it incompatible as the versioning scheme is still
- * tied directly to FW version], allow to override the checking. Only
- * those versions would actually support this feature [so it would not
- * break forward compatibility with newer HV drivers that are no longer
- * suited].
- */
- bool override_fw_version;
};
struct ecore_public_vf_info {
/* These copies will later be reflected in the bulletin board,
* but this copy should be newer.
*/
- u8 forced_mac[ETH_ALEN];
+ u8 forced_mac[ECORE_ETH_ALEN];
u16 forced_vlan;
/* Trusted VFs can configure promiscuous mode and
@@ -104,14 +109,17 @@ struct ecore_iov_vf_init_params {
* number of Rx/Tx queues.
*/
/* TODO - remove this limitation */
- u16 num_queues;
+ u8 num_queues;
+ u8 num_cnqs;
+
+ u8 cnq_offset;
/* Allow the client to choose which qzones to use for Rx/Tx,
* and which queue_base to use for Tx queues on a per-queue basis.
* Notice values should be relative to the PF resources.
*/
- u16 req_rx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
- u16 req_tx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+ u16 req_rx_queue[ECORE_MAX_QUEUE_VF_CHAINS_PER_PF];
+ u16 req_tx_queue[ECORE_MAX_QUEUE_VF_CHAINS_PER_PF];
u8 vport_id;
@@ -175,6 +183,33 @@ struct ecore_hw_sriov_info {
};
#ifdef CONFIG_ECORE_SRIOV
+/**
+ * @brief ecore_iov_pci_enable_prolog - Called before enabling sriov on pci.
+ * Reconfigure QM to initialize PQs for the
+ * max_active_vfs.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param max_active_vfs
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_pci_enable_prolog(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 max_active_vfs);
+
+/**
+ * @brief ecore_iov_pci_disable_epilog - Called after disabling sriov on pci.
+ * Reconfigure QM to delete VF PQs.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_pci_disable_epilog(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
#ifndef LINUX_REMOVE
/**
* @brief mark/clear all VFs before/after an incoming PCIe sriov
@@ -210,10 +245,10 @@ void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_iov_vf_init_params
- *p_params);
+enum _ecore_status_t
+ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_iov_vf_init_params *p_params);
/**
* @brief ecore_iov_process_mbx_req - process a request received
@@ -322,6 +357,7 @@ void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
*/
bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
+#endif
/**
* @brief Check if given VF ID @vfid is valid
@@ -704,7 +740,7 @@ bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
*
* @return - rate in Mbps
*/
-int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
+u32 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
/**
* @brief - Configure min rate for VF's vport.
@@ -716,11 +752,21 @@ int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
*/
enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
int vfid, u32 rate);
-#endif
/**
- * @brief ecore_pf_configure_vf_queue_coalesce - PF configure coalesce
- * parameters of VFs for Rx and Tx queue.
+ * @brief Set default vlan in VF info wihtout configuring FW/HW.
+ *
+ * @param p_hwfn
+ * @param vlan
+ * @param vfid
+ */
+enum _ecore_status_t ecore_iov_set_default_vlan(struct ecore_hwfn *p_hwfn,
+ u16 vlan, int vfid);
+
+
+/**
+ * @brief ecore_pf_configure_vf_queue_coalesce - PF configure coalesce parameters
+ * of VFs for Rx and Tx queue.
* While the API allows setting coalescing per-qid, all queues sharing a SB
* should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
* otherwise configuration would break.
@@ -744,10 +790,9 @@ ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param rel_vf_id
*
- * @return MAX_NUM_VFS_K2 in case no further active VFs, otherwise index.
+ * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
*/
u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
-
void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
u16 vxlan_port, u16 geneve_port);
@@ -764,11 +809,21 @@ void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
bool b_is_hw);
#endif
-#endif /* CONFIG_ECORE_SRIOV */
+
+/**
+ * @brief Run DORQ flush for VFs that may use EDPM, and notify VFs that need
+ * to perform doorbell overflow recovery. A VF needs to perform recovery
+ * if it or its PF overflowed.
+ *
+ * @param p_hwfn
+ */
+enum _ecore_status_t ecore_iov_db_rec_handler(struct ecore_hwfn *p_hwfn);
+
+#endif
#define ecore_for_each_vf(_p_hwfn, _i) \
for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0); \
- _i < MAX_NUM_VFS_K2; \
+ _i < MAX_NUM_VFS; \
_i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))
#endif
@@ -1,273 +1,304 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
* All rights reserved.
- * www.cavium.com
+ * www.marvell.com
*/
-
#ifndef __IRO_H__
#define __IRO_H__
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
-#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
-#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+/* Pstorm LL2 packet duplication configuration. Use pstorm_pkt_dup_cfg data type. */
+#define PSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \
+ (IRO[1].base + ((pf_id) * IRO[1].m1))
+#define PSTORM_PKT_DUPLICATION_CFG_SIZE (IRO[1].size)
/* Tstorm port statistics */
-#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1))
-#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
+ (IRO[2].base + ((port_id) * IRO[2].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[2].size)
/* Tstorm ll2 port statistics */
-#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) (IRO[2].base + \
- ((port_id) * IRO[2].m1))
-#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
+ (IRO[3].base + ((port_id) * IRO[3].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[3].size)
+/* Tstorm LL2 packet duplication configuration. Use tstorm_pkt_dup_cfg data type. */
+#define TSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \
+ (IRO[4].base + ((pf_id) * IRO[4].m1))
+#define TSTORM_PKT_DUPLICATION_CFG_SIZE (IRO[4].size)
/* Ustorm VF-PF Channel ready flag */
-#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[3].base + \
- ((vf_id) * IRO[3].m1))
-#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+ (IRO[5].base + ((vf_id) * IRO[5].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[5].size)
/* Ustorm Final flr cleanup ack */
-#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1))
-#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
+ (IRO[6].base + ((pf_id) * IRO[6].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[6].size)
/* Ustorm Event ring consumer */
-#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1))
-#define USTORM_EQE_CONS_SIZE (IRO[5].size)
+#define USTORM_EQE_CONS_OFFSET(pf_id) \
+ (IRO[7].base + ((pf_id) * IRO[7].m1))
+#define USTORM_EQE_CONS_SIZE (IRO[7].size)
/* Ustorm eth queue zone */
-#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) (IRO[6].base + \
- ((queue_zone_id) * IRO[6].m1))
-#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
+ (IRO[8].base + ((queue_zone_id) * IRO[8].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[8].size)
/* Ustorm Common Queue ring consumer */
-#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) (IRO[7].base + \
- ((queue_zone_id) * IRO[7].m1))
-#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
+ (IRO[9].base + ((queue_zone_id) * IRO[9].m1))
+#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[9].size)
/* Xstorm common PQ info */
-#define XSTORM_PQ_INFO_OFFSET(pq_id) (IRO[8].base + ((pq_id) * IRO[8].m1))
-#define XSTORM_PQ_INFO_SIZE (IRO[8].size)
+#define XSTORM_PQ_INFO_OFFSET(pq_id) \
+ (IRO[10].base + ((pq_id) * IRO[10].m1))
+#define XSTORM_PQ_INFO_SIZE (IRO[10].size)
/* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
/* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
/* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
/* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[14].size)
/* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[15].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[15].size)
/* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[14].size)
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[16].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[16].size)
/* Xstorm overlay buffer host address */
-#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[15].base)
-#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[15].size)
+#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base)
+#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size)
/* Ystorm overlay buffer host address */
-#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[16].base)
-#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[16].size)
+#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base)
+#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size)
/* Pstorm overlay buffer host address */
-#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base)
-#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size)
+#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base)
+#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size)
/* Tstorm overlay buffer host address */
-#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base)
-#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size)
+#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base)
+#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size)
/* Mstorm overlay buffer host address */
-#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base)
-#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size)
+#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[21].base)
+#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[21].size)
/* Ustorm overlay buffer host address */
-#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base)
-#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size)
+#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[22].base)
+#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[22].size)
/* Tstorm producers */
-#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[21].base + \
- ((core_rx_queue_id) * IRO[21].m1))
-#define TSTORM_LL2_RX_PRODS_SIZE (IRO[21].size)
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
+ (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[23].size)
/* Tstorm LightL2 queue statistics */
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[22].base + ((core_rx_queue_id) * IRO[22].m1))
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[22].size)
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[24].base + ((core_rx_queue_id) * IRO[24].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size)
/* Ustorm LiteL2 queue statistics */
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1))
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[23].size)
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[25].base + ((core_rx_queue_id) * IRO[25].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[25].size)
/* Pstorm LiteL2 queue statistics */
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
- (IRO[24].base + ((core_tx_stats_id) * IRO[24].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size)
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[26].base + ((core_tx_stats_id) * IRO[26].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[26].size)
/* Mstorm queue statistics */
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[25].base + \
- ((stat_counter_id) * IRO[25].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[25].size)
-/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[26].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[26].size)
-/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
- * mode.
- */
-#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) (IRO[27].base + \
- ((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2))
-#define MSTORM_ETH_VF_PRODS_SIZE (IRO[27].size)
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[27].base + ((stat_counter_id) * IRO[27].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[27].size)
+/* TPA aggregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[28].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[28].size)
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size mode. */
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
+ (IRO[29].base + ((vf_id) * IRO[29].m1) + ((vf_queue_id) * IRO[29].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[29].size)
/* Mstorm ETH PF queues producers */
-#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) (IRO[28].base + \
- ((queue_id) * IRO[28].m1))
-#define MSTORM_ETH_PF_PRODS_SIZE (IRO[28].size)
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
+ (IRO[30].base + ((queue_id) * IRO[30].m1))
+#define MSTORM_ETH_PF_PRODS_SIZE (IRO[30].size)
+/* Mstorm ETH PF BD queues producers */
+#define MSTORM_ETH_PF_BD_PROD_OFFSET(queue_id) \
+ (IRO[31].base + ((queue_id) * IRO[31].m1))
+#define MSTORM_ETH_PF_BD_PROD_SIZE (IRO[31].size)
+/* Mstorm ETH PF CQ queues producers */
+#define MSTORM_ETH_PF_CQE_PROD_OFFSET(queue_id) \
+ (IRO[32].base + ((queue_id) * IRO[32].m1))
+#define MSTORM_ETH_PF_CQE_PROD_SIZE (IRO[32].size)
/* Mstorm pf statistics */
-#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[29].base + ((pf_id) * IRO[29].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[29].size)
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[33].base + ((pf_id) * IRO[33].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[33].size)
/* Ustorm queue statistics */
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[30].base + \
- ((stat_counter_id) * IRO[30].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[30].size)
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[34].base + ((stat_counter_id) * IRO[34].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[34].size)
/* Ustorm pf statistics */
-#define USTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[31].base + ((pf_id) * IRO[31].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[31].size)
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[35].base + ((pf_id) * IRO[35].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[35].size)
/* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[32].base + \
- ((stat_counter_id) * IRO[32].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[32].size)
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[36].base + ((stat_counter_id) * IRO[36].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[36].size)
/* Pstorm pf statistics */
-#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[33].base + ((pf_id) * IRO[33].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[33].size)
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[37].size)
/* Control frame's EthType configuration for TX control frame security */
-#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) (IRO[34].base + \
- ((ethType_id) * IRO[34].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[34].size)
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
+ (IRO[38].base + ((eth_type_id) * IRO[38].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[38].size)
/* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[35].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[35].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[39].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[39].size)
/* Tstorm Eth limit Rx rate */
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[36].base + ((pf_id) * IRO[36].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[36].size)
-/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
- * Use eth_tstorm_rss_update_data for update.
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+ (IRO[40].base + ((pf_id) * IRO[40].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[40].size)
+/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. Use
+ * eth_tstorm_rss_update_data for update.
*/
-#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) (IRO[37].base + \
- ((pf_id) * IRO[37].m1))
-#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[37].size)
+#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \
+ (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[41].size)
/* Xstorm queue zone */
-#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[38].base + \
- ((queue_id) * IRO[38].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[38].size)
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+ (IRO[42].base + ((queue_id) * IRO[42].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[42].size)
+/* Hairpin Allocations Table with Per-PF Entry of Hairpin base CID, number of allocated CIDs and
+ * Default Vport
+ */
+#define XSTORM_ETH_HAIRPIN_CID_ALLOCATION_OFFSET(pf_id) \
+ (IRO[43].base + ((pf_id) * IRO[43].m1))
+#define XSTORM_ETH_HAIRPIN_CID_ALLOCATION_SIZE (IRO[43].size)
/* Ystorm cqe producer */
-#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[39].base + \
- ((rss_id) * IRO[39].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[39].size)
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[44].base + ((rss_id) * IRO[44].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[44].size)
/* Ustorm cqe producer */
-#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[40].base + \
- ((rss_id) * IRO[40].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[40].size)
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[45].base + ((rss_id) * IRO[45].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[45].size)
/* Ustorm grq producer */
-#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[41].base + \
- ((pf_id) * IRO[41].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[41].size)
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+ (IRO[46].base + ((pf_id) * IRO[46].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[46].size)
/* Tstorm cmdq-cons of given command queue-id */
-#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[42].base + \
- ((cmdq_queue_id) * IRO[42].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[42].size)
-/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
- * BDqueue-id
- */
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
- (IRO[43].base + ((storage_func_id) * IRO[43].m1) + \
- ((bdq_id) * IRO[43].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[43].size)
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
+ (IRO[47].base + ((cmdq_queue_id) * IRO[47].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[47].size)
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, BDqueue-id */
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
+ (IRO[48].base + ((storage_func_id) * IRO[48].m1) + ((bdq_id) * IRO[48].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[48].size)
/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
- (IRO[44].base + ((storage_func_id) * IRO[44].m1) + \
- ((bdq_id) * IRO[44].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[44].size)
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
+ (IRO[49].base + ((storage_func_id) * IRO[49].m1) + ((bdq_id) * IRO[49].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[49].size)
/* Tstorm iSCSI RX stats */
-#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) (IRO[45].base + \
- ((storage_func_id) * IRO[45].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[45].size)
+#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[50].base + ((storage_func_id) * IRO[50].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[50].size)
/* Mstorm iSCSI RX stats */
-#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) (IRO[46].base + \
- ((storage_func_id) * IRO[46].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[46].size)
+#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[51].base + ((storage_func_id) * IRO[51].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[51].size)
/* Ustorm iSCSI RX stats */
-#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) (IRO[47].base + \
- ((storage_func_id) * IRO[47].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[47].size)
+#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[52].base + ((storage_func_id) * IRO[52].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[52].size)
/* Xstorm iSCSI TX stats */
-#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) (IRO[48].base + \
- ((storage_func_id) * IRO[48].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[48].size)
+#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[53].base + ((storage_func_id) * IRO[53].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[53].size)
/* Ystorm iSCSI TX stats */
-#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) (IRO[49].base + \
- ((storage_func_id) * IRO[49].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[49].size)
+#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[54].base + ((storage_func_id) * IRO[54].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[54].size)
/* Pstorm iSCSI TX stats */
-#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) (IRO[50].base + \
- ((storage_func_id) * IRO[50].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[50].size)
+#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[55].base + ((storage_func_id) * IRO[55].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[55].size)
/* Tstorm FCoE RX stats */
-#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) (IRO[51].base + \
- ((pf_id) * IRO[51].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[51].size)
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
+ (IRO[56].base + ((pf_id) * IRO[56].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[56].size)
/* Pstorm FCoE TX stats */
-#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) (IRO[52].base + \
- ((pf_id) * IRO[52].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[52].size)
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
+ (IRO[57].base + ((pf_id) * IRO[57].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[57].size)
/* Pstorm RDMA queue statistics */
-#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[53].base + \
- ((rdma_stat_counter_id) * IRO[53].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[53].size)
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[58].base + ((rdma_stat_counter_id) * IRO[58].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[58].size)
/* Tstorm RDMA queue statistics */
-#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[54].base + \
- ((rdma_stat_counter_id) * IRO[54].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[54].size)
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[59].base + ((rdma_stat_counter_id) * IRO[59].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[59].size)
/* Xstorm error level for assert */
-#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[55].base + \
- ((pf_id) * IRO[55].m1))
-#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[55].size)
+#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[60].base + ((pf_id) * IRO[60].m1))
+#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[60].size)
/* Ystorm error level for assert */
-#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[56].base + \
- ((pf_id) * IRO[56].m1))
-#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[56].size)
+#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[61].base + ((pf_id) * IRO[61].m1))
+#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[61].size)
/* Pstorm error level for assert */
-#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[57].base + \
- ((pf_id) * IRO[57].m1))
-#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[57].size)
+#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[62].base + ((pf_id) * IRO[62].m1))
+#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[62].size)
/* Tstorm error level for assert */
-#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[58].base + \
- ((pf_id) * IRO[58].m1))
-#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[58].size)
+#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[63].base + ((pf_id) * IRO[63].m1))
+#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (