@@ -855,10 +855,14 @@ struct bnxt {
uint8_t port_cnt;
uint8_t vxlan_port_cnt;
uint8_t geneve_port_cnt;
+ uint8_t ecpri_port_cnt;
uint16_t vxlan_port;
uint16_t geneve_port;
+ uint16_t ecpri_port;
uint16_t vxlan_fw_dst_port_id;
uint16_t geneve_fw_dst_port_id;
+ uint16_t ecpri_fw_dst_port_id;
+ uint16_t ecpri_upar_in_use;
uint32_t fw_ver;
uint32_t hwrm_spec_code;
@@ -2405,6 +2405,20 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
tunnel_type =
HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
break;
+ case RTE_ETH_TUNNEL_TYPE_ECPRI:
+ if (bp->ecpri_port_cnt) {
+ PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
+ udp_tunnel->udp_port);
+ if (bp->ecpri_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Only one port allowed\n");
+ return -ENOSPC;
+ }
+ bp->ecpri_port_cnt++;
+ return 0;
+ }
+ tunnel_type =
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI;
+ break;
default:
PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
return -ENOTSUP;
@@ -2423,6 +2437,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE)
bp->geneve_port_cnt++;
+ if (tunnel_type ==
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI)
+ bp->ecpri_port_cnt++;
+
return rc;
}
@@ -2474,6 +2492,23 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
port = bp->geneve_fw_dst_port_id;
break;
+ case RTE_ETH_TUNNEL_TYPE_ECPRI:
+ if (!bp->ecpri_port_cnt) {
+ PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
+ return -EINVAL;
+ }
+ if (bp->ecpri_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
+ udp_tunnel->udp_port, bp->ecpri_port);
+ return -EINVAL;
+ }
+ if (--bp->ecpri_port_cnt)
+ return 0;
+
+ tunnel_type =
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ECPRI;
+ port = bp->ecpri_fw_dst_port_id;
+ break;
default:
PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
return -ENOTSUP;
@@ -2969,6 +2969,10 @@ bnxt_free_tunnel_ports(struct bnxt *bp)
if (bp->geneve_port_cnt)
bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
+
+ if (bp->ecpri_port_cnt)
+ bnxt_hwrm_tunnel_dst_port_free(bp, bp->ecpri_fw_dst_port_id,
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ECPRI);
}
void bnxt_free_all_hwrm_resources(struct bnxt *bp)
@@ -4075,6 +4079,12 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
rte_le_to_cpu_16(resp->tunnel_dst_port_id);
bp->geneve_port = port;
break;
+ case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI:
+ bp->ecpri_fw_dst_port_id =
+ rte_le_to_cpu_16(resp->tunnel_dst_port_id);
+ bp->ecpri_port = port;
+ bp->ecpri_upar_in_use = resp->upar_in_use;
+ break;
default:
break;
}
@@ -4142,6 +4152,13 @@ int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
bp->geneve_port_cnt = 0;
}
+ if (tunnel_type ==
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ECPRI) {
+ bp->ecpri_port = 0;
+ bp->ecpri_upar_in_use = 0;
+ bp->ecpri_port_cnt = 0;
+ }
+
return rc;
}
@@ -150,10 +150,14 @@ bnxt_check_pkt_needs_ts(struct rte_mbuf *m)
struct rte_ether_hdr _eth_hdr;
uint16_t eth_type, proto;
uint32_t off = 0;
-
+ /*
+ * Check that the received packet is a eCPRI packet
+ */
eth_hdr = rte_pktmbuf_read(m, off, sizeof(_eth_hdr), &_eth_hdr);
eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
off += sizeof(*eth_hdr);
+ if (eth_type == RTE_ETHER_TYPE_ECPRI)
+ return true;
/* Check for single tagged and double tagged VLANs */
if (eth_type == RTE_ETHER_TYPE_VLAN) {
const struct rte_vlan_hdr *vh;
@@ -164,6 +168,8 @@ bnxt_check_pkt_needs_ts(struct rte_mbuf *m)
return false;
off += sizeof(*vh);
proto = rte_be_to_cpu_16(vh->eth_proto);
+ if (proto == RTE_ETHER_TYPE_ECPRI)
+ return true;
if (proto == RTE_ETHER_TYPE_VLAN) {
const struct rte_vlan_hdr *vh;
struct rte_vlan_hdr vh_copy;
@@ -173,6 +179,8 @@ bnxt_check_pkt_needs_ts(struct rte_mbuf *m)
return false;
off += sizeof(*vh);
proto = rte_be_to_cpu_16(vh->eth_proto);
+ if (proto == RTE_ETHER_TYPE_ECPRI)
+ return true;
}
}
return false;
@@ -258,7 +258,8 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
{
uint16_t hwrm_type = 0;
- if (rte_type & RTE_ETH_RSS_IPV4)
+ if ((rte_type & RTE_ETH_RSS_IPV4) ||
+ (rte_type & RTE_ETH_RSS_ECPRI))
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
@@ -277,7 +278,7 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl)
{
uint32_t mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
- bool l3 = (hash_f & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6));
+ bool l3 = (hash_f & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_ECPRI));
bool l4 = (hash_f & (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
RTE_ETH_RSS_NONFRAG_IPV6_UDP |
RTE_ETH_RSS_NONFRAG_IPV4_TCP |
@@ -543,12 +543,15 @@ bnxt_pmd_global_tunnel_set(uint16_t port_id, uint8_t type,
case BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN:
hwtype = HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
break;
+ case BNXT_GLOBAL_REGISTER_TUNNEL_ECPRI:
+ hwtype = HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI;
+ break;
default:
BNXT_TF_DBG(ERR, "Tunnel Type (%d) invalid\n", type);
return -EINVAL;
}
- if (!udp_port) {
+ if (!udp_port && type != BNXT_GLOBAL_REGISTER_TUNNEL_ECPRI) {
/* Free based on the handle */
if (!handle) {
BNXT_TF_DBG(ERR, "Free with invalid handle\n");
@@ -589,7 +592,7 @@ bnxt_pmd_global_tunnel_set(uint16_t port_id, uint8_t type,
if (!rc) {
ulp_global_tunnel_db[type].ref_cnt++;
ulp_global_tunnel_db[type].dport = udp_port;
- bnxt_pmd_global_reg_data_to_hndl(port_id, 0,
+ bnxt_pmd_global_reg_data_to_hndl(port_id, bp->ecpri_upar_in_use,
type, handle);
}
}
@@ -19,6 +19,7 @@ struct bnxt_global_tunnel_info {
enum bnxt_global_register_tunnel_type {
BNXT_GLOBAL_REGISTER_TUNNEL_UNUSED = 0,
BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN,
+ BNXT_GLOBAL_REGISTER_TUNNEL_ECPRI,
BNXT_GLOBAL_REGISTER_TUNNEL_MAX
};
@@ -456,6 +456,7 @@ bnxt_ulp_cntxt_app_caps_init(struct bnxt *bp,
bnxt_ulp_vxlan_ip_port_set(ulp_ctx, info[i].vxlan_ip_port);
bnxt_ulp_vxlan_port_set(ulp_ctx, info[i].vxlan_port);
+ bnxt_ulp_ecpri_udp_port_set(ulp_ctx, info[i].ecpri_udp_port);
/* set the shared session support from firmware */
fw = info[i].upgrade_fw_update;
@@ -479,6 +480,29 @@ bnxt_ulp_cntxt_app_caps_init(struct bnxt *bp,
return 0;
}
+/* Function to retrieve the vxlan_ip (ecpri) port from the context. */
+int
+bnxt_ulp_ecpri_udp_port_set(struct bnxt_ulp_context *ulp_ctx,
+ uint32_t ecpri_udp_port)
+{
+ if (!ulp_ctx || !ulp_ctx->cfg_data)
+ return -EINVAL;
+
+ ulp_ctx->cfg_data->ecpri_udp_port = ecpri_udp_port;
+
+ return 0;
+}
+
+/* Function to retrieve the vxlan_ip (ecpri) port from the context. */
+unsigned int
+bnxt_ulp_ecpri_udp_port_get(struct bnxt_ulp_context *ulp_ctx)
+{
+ if (!ulp_ctx || !ulp_ctx->cfg_data)
+ return 0;
+
+ return (unsigned int)ulp_ctx->cfg_data->ecpri_udp_port;
+}
+
/* Function to set the number for vxlan_ip (custom vxlan) port into the context */
int
bnxt_ulp_vxlan_ip_port_set(struct bnxt_ulp_context *ulp_ctx,
@@ -113,6 +113,7 @@ struct bnxt_ulp_data {
struct bnxt_flow_app_tun_ent app_tun[BNXT_ULP_MAX_TUN_CACHE_ENTRIES];
uint32_t vxlan_port;
uint32_t vxlan_ip_port;
+ uint32_t ecpri_udp_port;
uint8_t hu_reg_state;
uint8_t hu_reg_cnt;
uint32_t hu_session_type;
@@ -367,12 +368,19 @@ bnxt_ulp_vxlan_port_set(struct bnxt_ulp_context *ulp_ctx,
uint32_t vxlan_port);
unsigned int
bnxt_ulp_vxlan_port_get(struct bnxt_ulp_context *ulp_ctx);
+
int
bnxt_ulp_vxlan_ip_port_set(struct bnxt_ulp_context *ulp_ctx,
uint32_t vxlan_ip_port);
unsigned int
bnxt_ulp_vxlan_ip_port_get(struct bnxt_ulp_context *ulp_ctx);
+int
+bnxt_ulp_ecpri_udp_port_set(struct bnxt_ulp_context *ulp_ctx,
+ uint32_t ecpri_udp_port);
+unsigned int
+bnxt_ulp_ecpri_udp_port_get(struct bnxt_ulp_context *ulp_ctx);
+
int32_t
bnxt_flow_meter_init(struct bnxt *bp);
@@ -391,5 +399,4 @@ bnxt_ulp_ha_reg_cnt_get(struct bnxt_ulp_context *ulp_ctx);
struct tf*
bnxt_ulp_bp_tfp_get(struct bnxt *bp, enum bnxt_ulp_session_type type);
-
#endif /* _BNXT_ULP_H_ */
@@ -3298,6 +3298,11 @@ ulp_mapper_global_res_free(struct bnxt_ulp_context *ulp __rte_unused,
rc = bnxt_pmd_global_tunnel_set(port_id, ttype, dport,
&handle);
break;
+ case BNXT_ULP_RESOURCE_SUB_TYPE_GLOBAL_REGISTER_CUST_ECPRI:
+ ttype = BNXT_GLOBAL_REGISTER_TUNNEL_ECPRI;
+ rc = bnxt_pmd_global_tunnel_set(port_id, ttype, dport,
+ &handle);
+ break;
default:
rc = -EINVAL;
BNXT_TF_DBG(ERR, "Invalid ulp global resource type %d\n",
@@ -3362,6 +3367,19 @@ ulp_mapper_global_register_tbl_process(struct bnxt_ulp_mapper_parms *parms,
return rc;
}
break;
+ case BNXT_ULP_RESOURCE_SUB_TYPE_GLOBAL_REGISTER_CUST_ECPRI:
+ tmp_data = ulp_blob_data_get(&data, &data_len);
+ udp_port = *((uint16_t *)tmp_data);
+ udp_port = tfp_be_to_cpu_16(udp_port);
+ ttype = BNXT_GLOBAL_REGISTER_TUNNEL_ECPRI;
+
+ rc = bnxt_pmd_global_tunnel_set(parms->port_id, ttype,
+ udp_port, &handle);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to set eCPRI UDP port\n");
+ return rc;
+ }
+ break;
default:
rc = -EINVAL;
BNXT_TF_DBG(ERR, "Invalid ulp global resource type %d\n",
@@ -408,6 +408,10 @@ struct bnxt_ulp_rte_hdr_info ulp_hdr_info[] = {
.hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED,
.proto_hdr_func = NULL
},
+ [RTE_FLOW_ITEM_TYPE_ECPRI] = {
+ .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED,
+ .proto_hdr_func = ulp_rte_ecpri_hdr_handler
+ },
[RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR] = {
.hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED,
.proto_hdr_func = ulp_rte_port_hdr_handler
@@ -143,7 +143,7 @@ bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
hdr_info = &ulp_vendor_hdr_info[item->type -
BNXT_RTE_FLOW_ITEM_TYPE_END];
} else {
- if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2)
+ if (item->type > RTE_FLOW_ITEM_TYPE_ECPRI)
goto hdr_parser_error;
hdr_info = &ulp_hdr_info[item->type];
}
@@ -613,6 +613,10 @@ ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
has_vlan_mask = 1;
has_vlan = 1;
+ } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_ECPRI)) {
+ /* Update the hdr_bitmap with eCPRI */
+ ULP_BITMAP_SET(param->hdr_fp_bit.bits,
+ BNXT_ULP_HDR_BIT_O_ECPRI);
} else if (type == tfp_cpu_to_be_16(ULP_RTE_ETHER_TYPE_ROE)) {
/* Update the hdr_bitmap with RoE */
ULP_BITMAP_SET(param->hdr_fp_bit.bits,
@@ -1661,6 +1665,120 @@ ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
return BNXT_TF_RC_SUCCESS;
}
+/* Function to handle the parsing of RTE Flow item ECPRI Header. */
+int32_t
+ulp_rte_ecpri_hdr_handler(const struct rte_flow_item *item,
+ struct ulp_rte_parser_params *params)
+{
+ const struct rte_flow_item_ecpri *ecpri_spec = item->spec;
+ const struct rte_flow_item_ecpri *ecpri_mask = item->mask;
+ struct rte_flow_item_ecpri l_ecpri_spec, l_ecpri_mask;
+ struct rte_flow_item_ecpri *p_ecpri_spec = &l_ecpri_spec;
+ struct rte_flow_item_ecpri *p_ecpri_mask = &l_ecpri_mask;
+ struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
+ uint32_t idx = 0, cnt;
+ uint32_t size;
+
+ if (ulp_rte_prsr_fld_size_validate(params, &idx,
+ BNXT_ULP_PROTO_HDR_ECPRI_NUM)) {
+ BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
+ return BNXT_TF_RC_ERROR;
+ }
+
+ /* Figure out if eCPRI is within L4(UDP), unsupported, for now */
+ cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
+ if (cnt >= 1) {
+ BNXT_TF_DBG(ERR, "Parse Err: L4 header stack >= 2 not supported\n");
+ return BNXT_TF_RC_ERROR;
+ }
+
+ if (!ecpri_spec || !ecpri_mask)
+ goto parser_set_ecpri_hdr_bit;
+
+ memcpy(p_ecpri_spec, ecpri_spec, sizeof(*ecpri_spec));
+ memcpy(p_ecpri_mask, ecpri_mask, sizeof(*ecpri_mask));
+
+ p_ecpri_spec->hdr.common.u32 = rte_be_to_cpu_32(p_ecpri_spec->hdr.common.u32);
+ p_ecpri_mask->hdr.common.u32 = rte_be_to_cpu_32(p_ecpri_mask->hdr.common.u32);
+
+ /*
+ * Init eCPRI spec+mask to correct defaults, also clear masks of fields
+ * we ignore in the TCAM.
+ */
+
+ l_ecpri_spec.hdr.common.size = 0;
+ l_ecpri_spec.hdr.common.c = 0;
+ l_ecpri_spec.hdr.common.res = 0;
+ l_ecpri_spec.hdr.common.revision = 1;
+ l_ecpri_mask.hdr.common.size = 0;
+ l_ecpri_mask.hdr.common.c = 1;
+ l_ecpri_mask.hdr.common.res = 0;
+ l_ecpri_mask.hdr.common.revision = 0xf;
+
+ switch (p_ecpri_spec->hdr.common.type) {
+ case RTE_ECPRI_MSG_TYPE_IQ_DATA:
+ l_ecpri_mask.hdr.type0.seq_id = 0;
+ break;
+
+ case RTE_ECPRI_MSG_TYPE_BIT_SEQ:
+ l_ecpri_mask.hdr.type1.seq_id = 0;
+ break;
+
+ case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
+ l_ecpri_mask.hdr.type2.seq_id = 0;
+ break;
+
+ case RTE_ECPRI_MSG_TYPE_GEN_DATA:
+ l_ecpri_mask.hdr.type3.seq_id = 0;
+ break;
+
+ case RTE_ECPRI_MSG_TYPE_RM_ACC:
+ l_ecpri_mask.hdr.type4.rr = 0;
+ l_ecpri_mask.hdr.type4.rw = 0;
+ l_ecpri_mask.hdr.type4.rma_id = 0;
+ break;
+
+ case RTE_ECPRI_MSG_TYPE_DLY_MSR:
+ l_ecpri_spec.hdr.type5.act_type = 0;
+ break;
+
+ case RTE_ECPRI_MSG_TYPE_RMT_RST:
+ l_ecpri_spec.hdr.type6.rst_op = 0;
+ break;
+
+ case RTE_ECPRI_MSG_TYPE_EVT_IND:
+ l_ecpri_spec.hdr.type7.evt_type = 0;
+ l_ecpri_spec.hdr.type7.seq = 0;
+ l_ecpri_spec.hdr.type7.number = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ p_ecpri_spec->hdr.common.u32 = rte_cpu_to_be_32(p_ecpri_spec->hdr.common.u32);
+ p_ecpri_mask->hdr.common.u32 = rte_cpu_to_be_32(p_ecpri_mask->hdr.common.u32);
+
+ /* Type */
+ size = sizeof(((struct rte_flow_item_ecpri *)NULL)->hdr.common.u32);
+ ulp_rte_prsr_fld_mask(params, &idx, size,
+ ulp_deference_struct(p_ecpri_spec, hdr.common.u32),
+ ulp_deference_struct(p_ecpri_mask, hdr.common.u32),
+ ULP_PRSR_ACT_DEFAULT);
+
+ /* PC/RTC/MSR_ID */
+ size = sizeof(((struct rte_flow_item_ecpri *)NULL)->hdr.dummy[0]);
+ ulp_rte_prsr_fld_mask(params, &idx, size,
+ ulp_deference_struct(p_ecpri_spec, hdr.dummy),
+ ulp_deference_struct(p_ecpri_mask, hdr.dummy),
+ ULP_PRSR_ACT_DEFAULT);
+
+parser_set_ecpri_hdr_bit:
+ /* Update the hdr_bitmap with eCPRI */
+ ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ECPRI);
+ return BNXT_TF_RC_SUCCESS;
+}
+
/* Function to handle the parsing of RTE Flow item void Header */
int32_t
ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
@@ -149,6 +149,11 @@ int32_t
ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
struct ulp_rte_parser_params *params);
+/* Function to handle the parsing of RTE Flow item ECPRI Header. */
+int32_t
+ulp_rte_ecpri_hdr_handler(const struct rte_flow_item *item,
+ struct ulp_rte_parser_params *params);
+
/* Function to handle the parsing of RTE Flow item void Header. */
int32_t
ulp_rte_void_hdr_handler(const struct rte_flow_item *item,
@@ -29,6 +29,7 @@
#define BNXT_ULP_PROTO_HDR_VXLAN_NUM 4
#define BNXT_ULP_PROTO_HDR_GRE_NUM 2
#define BNXT_ULP_PROTO_HDR_ICMP_NUM 5
+#define BNXT_ULP_PROTO_HDR_ECPRI_NUM 2
#define BNXT_ULP_PROTO_HDR_MAX 128
#define BNXT_ULP_PROTO_HDR_ENCAP_MAX 64
#define BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX 1
@@ -364,6 +365,7 @@ struct bnxt_ulp_app_capabilities_info {
uint8_t app_id;
uint32_t vxlan_port;
uint32_t vxlan_ip_port;
+ uint32_t ecpri_udp_port;
enum bnxt_ulp_device_id device_id;
uint32_t upgrade_fw_update;
uint8_t ha_pool_id;