@@ -81,6 +81,11 @@
#define BROADCOM_DEV_957508_N2100 0x5208
#define BROADCOM_DEV_957414_N225 0x4145
+#define HWRM_SPEC_CODE_1_8_3 0x10803
+#define HWRM_VERSION_1_9_1 0x10901
+#define HWRM_VERSION_1_9_2 0x10903
+#define HWRM_VERSION_1_10_2_13 0x10a020d
+
#define BNXT_MAX_MTU 9574
#define BNXT_NUM_VLANS 2
#define BNXT_MAX_PKT_LEN (BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +\
@@ -430,16 +435,26 @@ struct bnxt_coal {
#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHFT)
#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
+#define BNXT_RTE_MEMZONE_FLAG (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
+
#define PTU_PTE_VALID 0x1UL
#define PTU_PTE_LAST 0x2UL
#define PTU_PTE_NEXT_TO_LAST 0x4UL
+#define BNXT_CTX_MIN 1
+#define BNXT_CTX_INV 0xffff
+
+#define BNXT_CTX_INIT_VALID(flags) \
+ ((flags) & \
+ HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_ENABLE_CTX_KIND_INIT)
+
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
uint32_t flags;
#define BNXT_RMEM_VALID_PTE_FLAG 1
#define BNXT_RMEM_RING_PTE_FLAG 2
+#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
void **pg_arr;
rte_iova_t *dma_arr;
@@ -460,7 +475,50 @@ struct bnxt_ctx_pg_info {
struct bnxt_ring_mem_info ring_mem;
};
+struct bnxt_ctx_mem {
+ uint16_t type;
+ uint16_t entry_size;
+ uint32_t flags;
+#define BNXT_CTX_MEM_TYPE_VALID \
+ HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_TYPE_VALID
+ uint32_t instance_bmap;
+ uint8_t init_value;
+ uint8_t entry_multiple;
+ uint16_t init_offset;
+#define BNXT_CTX_INIT_INVALID_OFFSET 0xffff
+ uint32_t max_entries;
+ uint32_t min_entries;
+ uint8_t last:1;
+ uint8_t split_entry_cnt;
+#define BNXT_MAX_SPLIT_ENTRY 4
+ union {
+ struct {
+ uint32_t qp_l2_entries;
+ uint32_t qp_qp1_entries;
+ uint32_t qp_fast_qpmd_entries;
+ };
+ uint32_t srq_l2_entries;
+ uint32_t cq_l2_entries;
+ uint32_t vnic_entries;
+ struct {
+ uint32_t mrav_av_entries;
+ uint32_t mrav_num_entries_units;
+ };
+ uint32_t split[BNXT_MAX_SPLIT_ENTRY];
+ };
+ struct bnxt_ctx_pg_info *pg_info;
+};
+
+#define BNXT_CTX_FLAG_INITED 0x01
+
struct bnxt_ctx_mem_info {
+ struct bnxt_ctx_mem *ctx_arr;
+ uint32_t supported_types;
+ uint32_t flags;
+ uint16_t types;
+ uint8_t tqm_fp_rings_count;
+
+ /* The following are used for V1 */
uint32_t qp_max_entries;
uint16_t qp_min_qp1_entries;
uint16_t qp_max_l2_entries;
@@ -484,10 +542,6 @@ struct bnxt_ctx_mem_info {
uint16_t tim_entry_size;
uint32_t tim_max_entries;
uint8_t tqm_entries_multiple;
- uint8_t tqm_fp_rings_count;
-
- uint32_t flags;
-#define BNXT_CTX_FLAG_INITED 0x01
struct bnxt_ctx_pg_info qp_mem;
struct bnxt_ctx_pg_info srq_mem;
@@ -739,6 +793,13 @@ struct bnxt {
#define BNXT_FW_CAP_TRUFLOW_EN BIT(8)
#define BNXT_FW_CAP_VLAN_TX_INSERT BIT(9)
#define BNXT_FW_CAP_RX_ALL_PKT_TS BIT(10)
+#define BNXT_FW_CAP_BACKING_STORE_V2 BIT(12)
+#define BNXT_FW_BACKING_STORE_V2_EN(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
+#define BNXT_FW_BACKING_STORE_V1_EN(bp) \
+ (BNXT_CHIP_P5_P7((bp)) && \
+ (bp)->hwrm_spec_code >= HWRM_VERSION_1_9_2 && \
+ !BNXT_VF((bp)))
#define BNXT_TRUFLOW_EN(bp) ((bp)->fw_cap & BNXT_FW_CAP_TRUFLOW_EN &&\
(bp)->app_id != 0xFF)
@@ -4759,8 +4759,26 @@ static int bnxt_map_pci_bars(struct rte_eth_dev *eth_dev)
return 0;
}
+static void bnxt_init_ctxm_mem(struct bnxt_ctx_mem *ctxm, void *p, int len)
+{
+ uint8_t init_val = ctxm->init_value;
+ uint16_t offset = ctxm->init_offset;
+ uint8_t *p2 = p;
+ int i;
+
+ if (!init_val)
+ return;
+ if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
+ memset(p, init_val, len);
+ return;
+ }
+ for (i = 0; i < len; i += ctxm->entry_size)
+ *(p2 + i + offset) = init_val;
+}
+
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg,
+ struct bnxt_ctx_mem *ctxm,
uint32_t mem_size,
const char *suffix,
uint16_t idx)
@@ -4776,8 +4794,8 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
if (!mem_size)
return 0;
- rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
- BNXT_PAGE_SIZE;
+ rmem->nr_pages =
+ RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / BNXT_PAGE_SIZE;
rmem->page_size = BNXT_PAGE_SIZE;
snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_arr%s_%x_%d",
@@ -4794,13 +4812,13 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
rmem->pg_arr = ctx_pg->ctx_pg_arr;
rmem->dma_arr = ctx_pg->ctx_dma_arr;
- rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
+ rmem->flags = BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_USE_FULL_PAGE_FLAG;
valid_bits = PTU_PTE_VALID;
if (rmem->nr_pages > 1) {
snprintf(name, RTE_MEMZONE_NAMESIZE,
- "bnxt_ctx_pg_tbl%s_%x_%d",
+ "bnxt_ctxpgtbl%s_%x_%d",
suffix, idx, bp->eth_dev->data->port_id);
name[RTE_MEMZONE_NAMESIZE - 1] = 0;
mz = rte_memzone_lookup(name);
@@ -4816,9 +4834,11 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
return -ENOMEM;
}
- memset(mz->addr, 0, mz->len);
+ memset(mz->addr, 0xff, mz->len);
mz_phys_addr = mz->iova;
+ if (ctxm != NULL)
+ bnxt_init_ctxm_mem(ctxm, mz->addr, mz->len);
rmem->pg_tbl = mz->addr;
rmem->pg_tbl_map = mz_phys_addr;
rmem->pg_tbl_mz = mz;
@@ -4839,9 +4859,11 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
return -ENOMEM;
}
- memset(mz->addr, 0, mz->len);
+ memset(mz->addr, 0xff, mz->len);
mz_phys_addr = mz->iova;
+ if (ctxm != NULL)
+ bnxt_init_ctxm_mem(ctxm, mz->addr, mz->len);
for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
rmem->pg_arr[i] = ((char *)mz->addr) + sz;
rmem->dma_arr[i] = mz_phys_addr + sz;
@@ -4866,6 +4888,34 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
return 0;
}
+static void bnxt_free_ctx_mem_v2(struct bnxt *bp)
+{
+ uint16_t type;
+
+ for (type = 0; type < bp->ctx->types; type++) {
+ struct bnxt_ctx_mem *ctxm = &bp->ctx->ctx_arr[type];
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, n = 1;
+
+ if (!ctx_pg)
+ continue;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+
+ for (i = 0; i < n; i++) {
+ rte_free(ctx_pg[i].ctx_pg_arr);
+ rte_free(ctx_pg[i].ctx_dma_arr);
+ rte_memzone_free(ctx_pg[i].ring_mem.mz);
+ rte_memzone_free(ctx_pg[i].ring_mem.pg_tbl_mz);
+ }
+
+ rte_free(ctx_pg);
+ ctxm->pg_info = NULL;
+ }
+ rte_free(bp->ctx->ctx_arr);
+ bp->ctx->ctx_arr = NULL;
+}
+
static void bnxt_free_ctx_mem(struct bnxt *bp)
{
int i;
@@ -4874,6 +4924,12 @@ static void bnxt_free_ctx_mem(struct bnxt *bp)
return;
bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
+
+ if (BNXT_FW_BACKING_STORE_V2_EN(bp)) {
+ bnxt_free_ctx_mem_v2(bp);
+ goto free_ctx;
+ }
+
rte_free(bp->ctx->qp_mem.ctx_pg_arr);
rte_free(bp->ctx->srq_mem.ctx_pg_arr);
rte_free(bp->ctx->cq_mem.ctx_pg_arr);
@@ -4903,6 +4959,7 @@ static void bnxt_free_ctx_mem(struct bnxt *bp)
rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
}
+free_ctx:
rte_free(bp->ctx);
bp->ctx = NULL;
}
@@ -4921,28 +4978,113 @@ static void bnxt_free_ctx_mem(struct bnxt *bp)
#define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max)
+int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ struct bnxt_ctx_mem *ctx2;
+ uint16_t type;
+ int rc = 0;
+
+ ctx2 = &ctx->ctx_arr[0];
+ for (type = 0; type < ctx->types && rc == 0; type++) {
+ struct bnxt_ctx_mem *ctxm = &ctx->ctx_arr[type];
+ struct bnxt_ctx_pg_info *ctx_pg;
+ uint32_t entries, mem_size;
+ int w = 1;
+ int i;
+
+ if (ctxm->entry_size == 0)
+ continue;
+
+ ctx_pg = ctxm->pg_info;
+
+ if (ctxm->instance_bmap)
+ w = hweight32(ctxm->instance_bmap);
+
+ for (i = 0; i < w && rc == 0; i++) {
+ char name[RTE_MEMZONE_NAMESIZE] = {0};
+
+ sprintf(name, "_%d_%d", i, type);
+
+ if (ctxm->entry_multiple)
+ entries = bnxt_roundup(ctxm->max_entries,
+ ctxm->entry_multiple);
+ else
+ entries = ctxm->max_entries;
+
+ if (ctxm->type == HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CQ)
+ entries = ctxm->cq_l2_entries;
+ else if (ctxm->type == HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_QP)
+ entries = ctxm->qp_l2_entries;
+ else if (ctxm->type == HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_MRAV)
+ entries = ctxm->mrav_av_entries;
+ else if (ctxm->type == HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TIM)
+ entries = ctx2->qp_l2_entries;
+ entries = clamp_t(uint32_t, entries, ctxm->min_entries,
+ ctxm->max_entries);
+ ctx_pg[i].entries = entries;
+ mem_size = ctxm->entry_size * entries;
+ PMD_DRV_LOG(DEBUG,
+ "Type:0x%x instance:%d entries:%d size:%d\n",
+ ctxm->type, i, ctx_pg[i].entries, mem_size);
+ rc = bnxt_alloc_ctx_mem_blk(bp, &ctx_pg[i],
+ ctxm->init_value ? ctxm : NULL,
+ mem_size, name, i);
+ }
+ }
+
+ return rc;
+}
+
int bnxt_alloc_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
uint32_t mem_size, ena, entries;
+ int types = BNXT_CTX_MIN;
uint32_t entries_sp, min;
- int i, rc;
+ int i, rc = 0;
+
+ if (!BNXT_FW_BACKING_STORE_V1_EN(bp) &&
+ !BNXT_FW_BACKING_STORE_V2_EN(bp))
+ return rc;
+
+ if (BNXT_FW_BACKING_STORE_V2_EN(bp)) {
+ types = bnxt_hwrm_func_backing_store_types_count(bp);
+ if (types <= 0)
+ return types;
+ }
+
+ rc = bnxt_hwrm_func_backing_store_ctx_alloc(bp, types);
+ if (rc != 0)
+ return rc;
+
+ if (bp->ctx->flags & BNXT_CTX_FLAG_INITED)
+ return 0;
+
+ ctx = bp->ctx;
+ if (BNXT_FW_BACKING_STORE_V2_EN(bp)) {
+ rc = bnxt_hwrm_func_backing_store_qcaps_v2(bp);
+
+ for (i = 0 ; i < bp->ctx->types && rc == 0; i++) {
+ struct bnxt_ctx_mem *ctxm = &ctx->ctx_arr[i];
+
+ rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm);
+ }
+ goto done;
+ }
rc = bnxt_hwrm_func_backing_store_qcaps(bp);
if (rc) {
PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
return rc;
}
- ctx = bp->ctx;
- if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
- return 0;
ctx_pg = &ctx->qp_mem;
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
if (ctx->qp_entry_size) {
mem_size = ctx->qp_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, NULL, mem_size, "qp_mem", 0);
if (rc)
return rc;
}
@@ -4951,7 +5093,7 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg->entries = ctx->srq_max_l2_entries;
if (ctx->srq_entry_size) {
mem_size = ctx->srq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, NULL, mem_size, "srq_mem", 0);
if (rc)
return rc;
}
@@ -4960,7 +5102,7 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg->entries = ctx->cq_max_l2_entries;
if (ctx->cq_entry_size) {
mem_size = ctx->cq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, NULL, mem_size, "cq_mem", 0);
if (rc)
return rc;
}
@@ -4970,7 +5112,7 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx->vnic_max_ring_table_entries;
if (ctx->vnic_entry_size) {
mem_size = ctx->vnic_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, NULL, mem_size, "vnic_mem", 0);
if (rc)
return rc;
}
@@ -4979,7 +5121,7 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg->entries = ctx->stat_max_entries;
if (ctx->stat_entry_size) {
mem_size = ctx->stat_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, NULL, mem_size, "stat_mem", 0);
if (rc)
return rc;
}
@@ -5003,8 +5145,8 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg->entries = i ? entries : entries_sp;
if (ctx->tqm_entry_size) {
mem_size = ctx->tqm_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size,
- "tqm_mem", i);
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, NULL,
+ mem_size, "tqm_mem", i);
if (rc)
return rc;
}
@@ -5016,6 +5158,7 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp)
ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
+done:
if (rc)
PMD_DRV_LOG(ERR,
"Failed to configure context mem: rc = %d\n", rc);
@@ -24,10 +24,6 @@
#include "bnxt_vnic.h"
#include "hsi_struct_def_dpdk.h"
-#define HWRM_SPEC_CODE_1_8_3 0x10803
-#define HWRM_VERSION_1_9_1 0x10901
-#define HWRM_VERSION_1_9_2 0x10903
-#define HWRM_VERSION_1_10_2_13 0x10a020d
struct bnxt_plcmodes_cfg {
uint32_t flags;
uint16_t jumbo_thresh;
@@ -35,6 +31,43 @@ struct bnxt_plcmodes_cfg {
uint16_t hds_threshold;
};
+const char *bnxt_backing_store_types[] = {
+ "Queue pair",
+ "Shared receive queue",
+ "Completion queue",
+ "Virtual NIC",
+ "Statistic context",
+ "Slow-path TQM ring",
+ "Fast-path TQM ring",
+ "Unused",
+ "Unused",
+ "Unused",
+ "Unused",
+ "Unused",
+ "Unused",
+ "Unused",
+ "MR and MAV Context",
+ "TIM",
+ "Unused",
+ "Unused",
+ "Unused",
+ "Tx key context",
+ "Rx key context",
+ "Mid-path TQM ring",
+ "SQ Doorbell shadow region",
+ "RQ Doorbell shadow region",
+ "SRQ Doorbell shadow region",
+ "CQ Doorbell shadow region",
+ "QUIC Tx key context",
+ "QUIC Rx key context",
+ "Invalid type",
+ "Invalid type",
+ "Invalid type",
+ "Invalid type",
+ "Invalid type",
+ "Invalid type"
+};
+
static int page_getenum(size_t size)
{
if (size <= 1 << 4)
@@ -894,6 +927,11 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_BS_V2_SUPPORTED) {
+ PMD_DRV_LOG(DEBUG, "Backing store v2 supported\n");
+ if (BNXT_CHIP_P7(bp))
+ bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
+ }
if (!(flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) {
bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
PMD_DRV_LOG(DEBUG, "VLAN acceleration for TX is enabled\n");
@@ -5461,7 +5499,194 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
return 0;
}
-#define BNXT_RTE_MEMZONE_FLAG (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
+static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem *ctxm,
+ uint8_t init_val,
+ uint8_t init_offset,
+ bool init_mask_set)
+{
+ ctxm->init_value = init_val;
+ ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
+ if (init_mask_set)
+ ctxm->init_offset = init_offset * 4;
+ else
+ ctxm->init_value = 0;
+}
+
+static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ char name[RTE_MEMZONE_NAMESIZE];
+ uint16_t type;
+
+ for (type = 0; type < ctx->types; type++) {
+ struct bnxt_ctx_mem *ctxm = &ctx->ctx_arr[type];
+ int n = 1;
+
+ if (!ctxm->max_entries || ctxm->pg_info)
+ continue;
+
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+
+ sprintf(name, "bnxt_ctx_pgmem_%d_%d",
+ bp->eth_dev->data->port_id, type);
+ ctxm->pg_info = rte_malloc(name, sizeof(*ctxm->pg_info) * n,
+ RTE_CACHE_LINE_SIZE);
+ if (!ctxm->pg_info)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void bnxt_init_ctx_v2_driver_managed(struct bnxt *bp __rte_unused,
+ struct bnxt_ctx_mem *ctxm)
+{
+ switch (ctxm->type) {
+ case HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SQ_DB_SHADOW:
+ case HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_RQ_DB_SHADOW:
+ case HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SRQ_DB_SHADOW:
+ case HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_CQ_DB_SHADOW:
+ /* FALLTHROUGH */
+ ctxm->entry_size = 0;
+ ctxm->min_entries = 1;
+ ctxm->max_entries = 1;
+ break;
+ }
+}
+
+int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
+{
+ struct hwrm_func_backing_store_qcaps_v2_input req = {0};
+ struct hwrm_func_backing_store_qcaps_v2_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ uint16_t last_valid_type = BNXT_CTX_INV;
+ uint16_t last_valid_idx = 0;
+ uint16_t types, type;
+ int rc;
+
+ for (types = 0, type = 0; types < bp->ctx->types && type != BNXT_CTX_INV; types++) {
+ struct bnxt_ctx_mem *ctxm = &bp->ctx->ctx_arr[types];
+ uint8_t init_val, init_off, i;
+ uint32_t *p;
+ uint32_t flags;
+
+ HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS_V2, BNXT_USE_CHIMP_MB);
+ req.type = rte_cpu_to_le_16(type);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+
+ flags = rte_le_to_cpu_32(resp->flags);
+ type = rte_le_to_cpu_16(resp->next_valid_type);
+ if (!(flags & HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_TYPE_VALID))
+ goto next;
+
+ ctxm->type = rte_le_to_cpu_16(resp->type);
+
+ ctxm->flags = flags;
+ if (flags &
+ HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_DRIVER_MANAGED_MEMORY) {
+ bnxt_init_ctx_v2_driver_managed(bp, ctxm);
+ goto next;
+ }
+ ctxm->entry_size = rte_le_to_cpu_16(resp->entry_size);
+
+ if (ctxm->entry_size == 0)
+ goto next;
+
+ ctxm->instance_bmap = rte_le_to_cpu_32(resp->instance_bit_map);
+ ctxm->entry_multiple = resp->entry_multiple;
+ ctxm->max_entries = rte_le_to_cpu_32(resp->max_num_entries);
+ ctxm->min_entries = rte_le_to_cpu_32(resp->min_num_entries);
+ init_val = resp->ctx_init_value;
+ init_off = resp->ctx_init_offset;
+ bnxt_init_ctx_initializer(ctxm, init_val, init_off,
+ BNXT_CTX_INIT_VALID(flags));
+ ctxm->split_entry_cnt = RTE_MIN(resp->subtype_valid_cnt,
+ BNXT_MAX_SPLIT_ENTRY);
+ for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
+ i++, p++)
+ ctxm->split[i] = rte_le_to_cpu_32(*p);
+
+ PMD_DRV_LOG(DEBUG,
+ "type:%s size:%d multiple:%d max:%d min:%d split:%d init_val:%d init_off:%d init:%d bmap:0x%x\n",
+ bnxt_backing_store_types[ctxm->type], ctxm->entry_size,
+ ctxm->entry_multiple, ctxm->max_entries, ctxm->min_entries,
+ ctxm->split_entry_cnt, init_val, init_off,
+ BNXT_CTX_INIT_VALID(flags), ctxm->instance_bmap);
+ last_valid_type = ctxm->type;
+ last_valid_idx = types;
+next:
+ HWRM_UNLOCK();
+ }
+ ctx->ctx_arr[last_valid_idx].last = true;
+ PMD_DRV_LOG(DEBUG, "Last valid type 0x%x\n", last_valid_type);
+
+ rc = bnxt_alloc_all_ctx_pg_info(bp);
+ if (rc == 0)
+ rc = bnxt_alloc_ctx_pg_tbls(bp);
+ return rc;
+}
+
+int bnxt_hwrm_func_backing_store_types_count(struct bnxt *bp)
+{
+ struct hwrm_func_backing_store_qcaps_v2_input req = {0};
+ struct hwrm_func_backing_store_qcaps_v2_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ uint16_t type = 0;
+ int types = 0;
+ int rc;
+
+ /* Calculate number of valid context types */
+ do {
+ uint32_t flags;
+
+ HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS_V2, BNXT_USE_CHIMP_MB);
+ req.type = rte_cpu_to_le_16(type);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+
+ flags = rte_le_to_cpu_32(resp->flags);
+ type = rte_le_to_cpu_16(resp->next_valid_type);
+ HWRM_UNLOCK();
+
+ if (flags & HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_TYPE_VALID) {
+ PMD_DRV_LOG(DEBUG, "Valid types 0x%x - %s\n",
+ req.type, bnxt_backing_store_types[req.type]);
+ types++;
+ }
+ } while (type != HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_INVALID);
+ PMD_DRV_LOG(DEBUG, "Number of valid types %d\n", types);
+
+ return types;
+}
+
+int bnxt_hwrm_func_backing_store_ctx_alloc(struct bnxt *bp, uint16_t types)
+{
+ int alloc_len = sizeof(struct bnxt_ctx_mem_info);
+
+ if (!BNXT_CHIP_P5_P7(bp) ||
+ bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
+ BNXT_VF(bp) ||
+ bp->ctx)
+ return 0;
+
+ bp->ctx = rte_zmalloc("bnxt_ctx_mem", alloc_len,
+ RTE_CACHE_LINE_SIZE);
+ if (bp->ctx == NULL)
+ return -ENOMEM;
+
+ alloc_len = sizeof(struct bnxt_ctx_mem) * types;
+ bp->ctx->ctx_arr = rte_zmalloc("bnxt_ctx_mem_arr",
+ alloc_len,
+ RTE_CACHE_LINE_SIZE);
+ if (bp->ctx->ctx_arr == NULL)
+ return -ENOMEM;
+
+ bp->ctx->types = types;
+ return 0;
+}
+
int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
{
struct hwrm_func_backing_store_qcaps_input req = {0};
@@ -5469,27 +5694,19 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
bp->hwrm_cmd_resp_addr;
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
- int total_alloc_len;
int rc, i, tqm_rings;
if (!BNXT_CHIP_P5_P7(bp) ||
bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
BNXT_VF(bp) ||
- bp->ctx)
+ bp->ctx->flags & BNXT_CTX_FLAG_INITED)
return 0;
+ ctx = bp->ctx;
HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT_SILENT();
- total_alloc_len = sizeof(*ctx);
- ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
- RTE_CACHE_LINE_SIZE);
- if (!ctx) {
- rc = -ENOMEM;
- goto ctx_err;
- }
-
ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
ctx->qp_min_qp1_entries =
rte_le_to_cpu_16(resp->qp_min_qp1_entries);
@@ -5500,8 +5717,13 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
rte_le_to_cpu_16(resp->srq_max_l2_entries);
ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
- ctx->cq_max_l2_entries =
- rte_le_to_cpu_16(resp->cq_max_l2_entries);
+ if (BNXT_CHIP_P7(bp))
+ ctx->cq_max_l2_entries =
+ RTE_MIN(BNXT_P7_CQ_MAX_L2_ENT,
+ rte_le_to_cpu_16(resp->cq_max_l2_entries));
+ else
+ ctx->cq_max_l2_entries =
+ rte_le_to_cpu_16(resp->cq_max_l2_entries);
ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
ctx->vnic_max_vnic_entries =
@@ -5555,12 +5777,73 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
for (i = 0; i < tqm_rings; i++, ctx_pg++)
ctx->tqm_mem[i] = ctx_pg;
- bp->ctx = ctx;
ctx_err:
HWRM_UNLOCK();
return rc;
}
+int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
+ struct bnxt_ctx_mem *ctxm)
+{
+ struct hwrm_func_backing_store_cfg_v2_input req = {0};
+ struct hwrm_func_backing_store_cfg_v2_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct bnxt_ctx_pg_info *ctx_pg;
+ int i, j, k;
+ uint32_t *p;
+ int rc = 0;
+ int w = 1;
+ int b = 1;
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG(INFO,
+ "Backing store config V2 can be issued on PF only\n");
+ return 0;
+ }
+
+ if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
+ return 0;
+
+ if (ctxm->instance_bmap)
+ b = ctxm->instance_bmap;
+
+ w = hweight32(b);
+
+ for (i = 0, j = 0; i < w && rc == 0; i++) {
+ if (!(b & (1 << i)))
+ continue;
+
+ HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG_V2, BNXT_USE_CHIMP_MB);
+ req.type = rte_cpu_to_le_16(ctxm->type);
+ req.entry_size = rte_cpu_to_le_16(ctxm->entry_size);
+ req.subtype_valid_cnt = ctxm->split_entry_cnt;
+ for (k = 0, p = &req.split_entry_0; k < ctxm->split_entry_cnt; k++)
+ p[k] = rte_cpu_to_le_32(ctxm->split[k]);
+
+ req.instance = rte_cpu_to_le_16(i);
+ ctx_pg = &ctxm->pg_info[j++];
+ if (!ctx_pg->entries)
+ goto unlock;
+
+ req.num_entries = rte_cpu_to_le_32(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.page_size_pbl_level,
+ &req.page_dir);
+ PMD_DRV_LOG(DEBUG,
+ "Backing store config V2 type:%s last %d, instance %d, hw %d\n",
+ bnxt_backing_store_types[req.type], ctxm->last, j, w);
+ if (ctxm->last && i == (w - 1))
+ req.flags =
+ rte_cpu_to_le_32(BACKING_STORE_CFG_V2_IN_FLG_CFG_ALL_DONE);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+unlock:
+ HWRM_UNLOCK();
+ }
+ return rc;
+}
+
int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
{
struct hwrm_func_backing_store_cfg_input req = {0};
@@ -60,6 +60,8 @@ struct hwrm_func_qstats_output;
HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAM4_LINK_SPEED_MASK
#define HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK \
HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK
+#define BACKING_STORE_CFG_V2_IN_FLG_CFG_ALL_DONE \
+ HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_FLAGS_BS_CFG_ALL_DONE
#define HWRM_SPEC_CODE_1_8_4 0x10804
#define HWRM_SPEC_CODE_1_9_0 0x10900
@@ -355,4 +357,10 @@ void bnxt_free_hwrm_tx_ring(struct bnxt *bp, int queue_index);
int bnxt_alloc_hwrm_tx_ring(struct bnxt *bp, int queue_index);
int bnxt_hwrm_config_host_mtu(struct bnxt *bp);
int bnxt_vnic_rss_clear_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp);
+int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
+ struct bnxt_ctx_mem *ctxm);
+int bnxt_hwrm_func_backing_store_types_count(struct bnxt *bp);
+int bnxt_hwrm_func_backing_store_ctx_alloc(struct bnxt *bp, uint16_t types);
+int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp);
#endif
@@ -27,3 +27,13 @@ void bnxt_eth_hw_addr_random(uint8_t *mac_addr)
mac_addr[1] = 0x0a;
mac_addr[2] = 0xf7;
}
+
+uint8_t hweight32(uint32_t word32)
+{
+ uint32_t res = word32 - ((word32 >> 1) & 0x55555555);
+
+ res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
+ res = (res + (res >> 4)) & 0x0F0F0F0F;
+ res = res + (res >> 8);
+ return (res + (res >> 16)) & 0x000000FF;
+}
@@ -17,4 +17,5 @@
int bnxt_check_zero_bytes(const uint8_t *bytes, int len);
void bnxt_eth_hw_addr_random(uint8_t *mac_addr);
+uint8_t hweight32(uint32_t word32);
#endif /* _BNXT_UTIL_H_ */