@@ -643,6 +643,9 @@ struct bnxt {
#define BNXT_FLAG_DFLT_MAC_SET BIT(26)
#define BNXT_FLAG_TRUFLOW_EN BIT(27)
#define BNXT_FLAG_GFID_ENABLE BIT(28)
+#define BNXT_FLAG_RFS_NEEDS_VNIC BIT(29)
+#define BNXT_FLAG_FLOW_CFA_RFS_RING_TBL_IDX_V2 BIT(30)
+#define BNXT_RFS_NEEDS_VNIC(bp) ((bp)->flags & BNXT_FLAG_RFS_NEEDS_VNIC)
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
#define BNXT_NPAR(bp) ((bp)->flags & BNXT_FLAG_NPAR_PF)
@@ -748,11 +748,17 @@ static int bnxt_init_chip(struct bnxt *bp)
goto err_out;
}
+ /* default vnic 0 */
+ rc = bnxt_setup_one_vnic(bp, 0);
+ if (rc)
+ goto err_out;
/* VNIC configuration */
- for (i = 0; i < bp->nr_vnics; i++) {
- rc = bnxt_setup_one_vnic(bp, i);
- if (rc)
- goto err_out;
+ if (BNXT_RFS_NEEDS_VNIC(bp)) {
+ for (i = 1; i < bp->nr_vnics; i++) {
+ rc = bnxt_setup_one_vnic(bp, i);
+ if (rc)
+ goto err_out;
+ }
}
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
@@ -4698,6 +4704,10 @@ static int bnxt_init_fw(struct bnxt *bp)
if (rc)
return rc;
+ rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
+ if (rc)
+ return rc;
+
bnxt_hwrm_port_mac_qcfg(bp);
bnxt_hwrm_parent_pf_qcfg(bp);
@@ -1056,6 +1056,13 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
}
PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
+ if (use_ntuple && !BNXT_RFS_NEEDS_VNIC(bp)) {
+ filter->flags =
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DEST_RFS_RING_IDX;
+ filter->dst_id = act_q->index;
+ goto skip_vnic_alloc;
+ }
+
vnic_id = attr->group;
if (!vnic_id) {
PMD_DRV_LOG(DEBUG, "Group id is 0\n");
@@ -1127,7 +1134,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
PMD_DRV_LOG(DEBUG,
"Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
filter->dst_id = vnic->fw_vnic_id;
-
+skip_vnic_alloc:
/* For ntuple filter, create the L2 filter with default VNIC.
* The user specified redirect queue will be set while creating
* the ntuple filter in hardware.
@@ -1808,7 +1815,10 @@ bnxt_flow_create(struct rte_eth_dev *dev,
}
}
- vnic = find_matching_vnic(bp, filter);
+ if (BNXT_RFS_NEEDS_VNIC(bp))
+ vnic = find_matching_vnic(bp, filter);
+ else
+ vnic = BNXT_GET_DEFAULT_VNIC(bp);
done:
if (!ret || update_flow) {
flow->filter = filter;
@@ -5834,3 +5834,35 @@ int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
rep_bp->vf_id);
return rc;
}
+
+int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
+{
+ struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
+ uint32_t flags = 0;
+ int rc = 0;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT))
+ return 0;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ HWRM_PREP(&req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_CHIMP_MB);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+ flags = rte_le_to_cpu_32(resp->flags);
+ HWRM_UNLOCK();
+
+ if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_RFS_RING_TBL_IDX_V2_SUPPORTED)
+ bp->flags |= BNXT_FLAG_FLOW_CFA_RFS_RING_TBL_IDX_V2;
+ else
+ bp->flags |= BNXT_FLAG_RFS_NEEDS_VNIC;
+
+ return rc;
+}
@@ -55,8 +55,8 @@ struct hwrm_func_qstats_output;
#define HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK \
HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK
-#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN \
-HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED
+#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_RFS_RING_TBL_IDX_V2_SUPPORTED \
+ HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED
#define HWRM_SPEC_CODE_1_8_4 0x10804
#define HWRM_SPEC_CODE_1_9_0 0x10900
@@ -300,4 +300,5 @@ int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
uint16_t *first_vf_id);
int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep);
int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep);
+int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp);
#endif