@@ -43,7 +43,8 @@ static int atl_dev_stats_reset(struct rte_eth_dev *dev);
static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
size_t fw_size);
-static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev,
+ size_t *no_of_elements);
static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
@@ -1132,7 +1133,7 @@ atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
static const uint32_t *
-atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+atl_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -1144,11 +1145,12 @@ atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_L4_UDP,
RTE_PTYPE_L4_SCTP,
RTE_PTYPE_L4_ICMP,
- RTE_PTYPE_UNKNOWN
};
- if (dev->rx_pkt_burst == atl_recv_pkts)
+ if (dev->rx_pkt_burst == atl_recv_pkts) {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
return NULL;
}
@@ -93,7 +93,7 @@ static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
-const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements);
static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int
@@ -1454,7 +1454,7 @@ axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_free_thresh = txq->free_thresh;
}
const uint32_t *
-axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -1481,11 +1481,12 @@ axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_INNER_L4_SCTP,
RTE_PTYPE_INNER_L4_TCP,
RTE_PTYPE_INNER_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
- if (dev->rx_pkt_burst == axgbe_recv_pkts)
+ if (dev->rx_pkt_burst == axgbe_recv_pkts) {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
return NULL;
}
@@ -3435,7 +3435,7 @@ bnxt_flow_ops_get_op(struct rte_eth_dev *dev,
}
static const uint32_t *
-bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
+bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER_VLAN,
@@ -3449,12 +3449,12 @@ bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
RTE_PTYPE_INNER_L4_ICMP,
RTE_PTYPE_INNER_L4_TCP,
RTE_PTYPE_INNER_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
if (!dev->rx_pkt_burst)
return NULL;
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -613,7 +613,7 @@ int cnxk_nix_tx_descriptor_status(void *txq, uint16_t offset);
uint32_t cnxk_nix_rx_queue_count(void *rxq);
/* Lookup configuration */
-const uint32_t *cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev);
+const uint32_t *cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev, size_t *no_of_elements);
void *cnxk_nix_fastpath_lookup_mem_get(void);
/* Devargs */
@@ -9,7 +9,7 @@
#define LOOKUP_ARRAY_SZ (PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ + MEMPOOL_TBL_SZ)
const uint32_t *
-cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev)
+cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev, size_t *no_of_elements)
{
RTE_SET_USED(eth_dev);
@@ -47,10 +47,10 @@ cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev)
RTE_PTYPE_INNER_L4_TCP, /* LH */
RTE_PTYPE_INNER_L4_UDP, /* LH */
RTE_PTYPE_INNER_L4_SCTP, /* LH */
- RTE_PTYPE_INNER_L4_ICMP, /* LH */
- RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_INNER_L4_ICMP, /* LH */
};
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -273,7 +273,8 @@ cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
static const uint32_t *
-cpfl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+cpfl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused,
+ size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -284,9 +285,9 @@ cpfl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_SCTP,
RTE_PTYPE_L4_ICMP,
- RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -1149,16 +1149,17 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
}
const uint32_t *
-cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
+cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L3_IPV4,
RTE_PTYPE_L3_IPV6,
- RTE_PTYPE_UNKNOWN
};
- if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
+ if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts) {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
return NULL;
}
@@ -51,7 +51,7 @@ uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
-const uint32_t *cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev);
+const uint32_t *cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev, size_t *no_of_elements);
int cxgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
const uint64_t *ids, uint64_t *values,
unsigned int n);
@@ -348,7 +348,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
}
static const uint32_t *
-dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
+dpaa_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -369,8 +369,10 @@ dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
+ if (dev->rx_pkt_burst == dpaa_eth_queue_rx) {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
return NULL;
}
@@ -1081,7 +1081,7 @@ dpaa2_dev_rx_queue_count(void *rx_queue)
}
static const uint32_t *
-dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
+dpaa2_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
/*todo -= add more types */
@@ -1094,13 +1094,14 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_L4_UDP,
RTE_PTYPE_L4_SCTP,
RTE_PTYPE_L4_ICMP,
- RTE_PTYPE_UNKNOWN
};
if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
dev->rx_pkt_burst == dpaa2_dev_rx ||
- dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
+ dev->rx_pkt_burst == dpaa2_dev_loopback_rx) {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
return NULL;
}
@@ -104,7 +104,8 @@ static int eth_igb_fw_version_get(struct rte_eth_dev *dev,
char *fw_version, size_t fw_size);
static int eth_igb_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
-static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
+static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev,
+ size_t *no_of_elements);
static int eth_igbvf_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
@@ -2257,7 +2258,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
static const uint32_t *
-eth_igb_supported_ptypes_get(struct rte_eth_dev *dev)
+eth_igb_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
/* refers to igb_rxd_pkt_info_to_pkt_type() */
@@ -2274,12 +2275,13 @@ eth_igb_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_INNER_L3_IPV6_EXT,
RTE_PTYPE_INNER_L4_TCP,
RTE_PTYPE_INNER_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
if (dev->rx_pkt_burst == eth_igb_recv_pkts ||
- dev->rx_pkt_burst == eth_igb_recv_scattered_pkts)
+ dev->rx_pkt_burst == eth_igb_recv_scattered_pkts) {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
return NULL;
}
@@ -85,7 +85,7 @@ enetc_dev_stop(struct rte_eth_dev *dev)
}
static const uint32_t *
-enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -95,9 +95,9 @@ enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
RTE_PTYPE_L4_UDP,
RTE_PTYPE_L4_SCTP,
RTE_PTYPE_L4_ICMP,
- RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -511,7 +511,8 @@ static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
return 0;
}
-static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev,
+ size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -522,7 +523,6 @@ static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_L4_UDP,
RTE_PTYPE_L4_FRAG,
RTE_PTYPE_L4_NONFRAG,
- RTE_PTYPE_UNKNOWN
};
static const uint32_t ptypes_overlay[] = {
RTE_PTYPE_L2_ETHER,
@@ -541,16 +541,18 @@ static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_INNER_L4_UDP,
RTE_PTYPE_INNER_L4_FRAG,
RTE_PTYPE_INNER_L4_NONFRAG,
- RTE_PTYPE_UNKNOWN
};
if (dev->rx_pkt_burst != rte_eth_pkt_burst_dummy &&
dev->rx_pkt_burst != NULL) {
struct enic *enic = pmd_priv(dev);
- if (enic->overlay_offload)
+ if (enic->overlay_offload) {
+ *no_of_elements = RTE_DIM(ptypes_overlay);
return ptypes_overlay;
- else
+ } else {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
}
return NULL;
}
@@ -1282,7 +1282,7 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
}
static const uint32_t *
-fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+fs_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
struct sub_device *sdev;
struct rte_eth_dev *edev;
@@ -1308,7 +1308,7 @@ fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
* We just return the ptypes of the device of highest
* priority, usually the PREFERRED device.
*/
- ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev);
+ ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev, no_of_elements);
unlock:
fs_unlock(dev, 0);
return ret;
@@ -1446,7 +1446,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
static const uint32_t *
-fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
if (dev->rx_pkt_burst == fm10k_recv_pkts ||
dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
@@ -1459,9 +1459,9 @@ fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_L3_IPV6_EXT,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
} else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
@@ -1477,9 +1477,9 @@ fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_TUNNEL_NVGRE,
RTE_PTYPE_TUNNEL_VXLAN,
RTE_PTYPE_TUNNEL_GRE,
- RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ptypes_vec);
return ptypes_vec;
}
@@ -1487,7 +1487,7 @@ fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
}
#else
static const uint32_t *
-fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused, size_t *no_of_elements)
{
return NULL;
}
@@ -1971,7 +1971,7 @@ hns3_rx_scattered_calc(struct rte_eth_dev *dev)
}
const uint32_t *
-hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -1998,7 +1998,6 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_INNER_L4_ICMP,
RTE_PTYPE_TUNNEL_GRENAT,
RTE_PTYPE_TUNNEL_NVGRE,
- RTE_PTYPE_UNKNOWN
};
static const uint32_t adv_layout_ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -2026,7 +2025,6 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_INNER_L4_TCP,
RTE_PTYPE_INNER_L4_SCTP,
RTE_PTYPE_INNER_L4_ICMP,
- RTE_PTYPE_UNKNOWN
};
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -2034,10 +2032,13 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
dev->rx_pkt_burst == hns3_recv_pkts_vec ||
dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
- if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT))
+ if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT)) {
+ *no_of_elements = RTE_DIM(adv_layout_ptypes);
return adv_layout_ptypes;
- else
+ } else {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
}
return NULL;
@@ -776,7 +776,7 @@ uint16_t hns3_xmit_pkts_vec_sve(void *tx_queue, struct rte_mbuf **tx_pkts,
int hns3_tx_burst_mode_get(struct rte_eth_dev *dev,
__rte_unused uint16_t queue_id,
struct rte_eth_burst_mode *mode);
-const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements);
void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev);
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id);
@@ -1823,7 +1823,7 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}
const uint32_t *
-i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
/* refers to i40e_rxd_pkt_type_mapping() */
@@ -1851,7 +1851,6 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_INNER_L4_SCTP,
RTE_PTYPE_INNER_L4_TCP,
RTE_PTYPE_INNER_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
if (dev->rx_pkt_burst == i40e_recv_pkts ||
@@ -1866,8 +1865,10 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
dev->rx_pkt_burst == i40e_recv_pkts_vec_avx512 ||
#endif
dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 ||
- dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2)
+ dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2) {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
return NULL;
}
@@ -190,7 +190,7 @@ int i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
-const uint32_t *i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+const uint32_t *i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements);
int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
@@ -97,7 +97,8 @@ static int iavf_dev_close(struct rte_eth_dev *dev);
static int iavf_dev_reset(struct rte_eth_dev *dev);
static int iavf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
-static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev,
+ size_t *no_of_elements);
static int iavf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
@@ -1217,7 +1218,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
static const uint32_t *
-iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused,
+ size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -1228,8 +1230,8 @@ iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
RTE_PTYPE_L4_SCTP,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -1864,7 +1864,7 @@ ice_dcf_dev_reset(struct rte_eth_dev *dev)
}
static const uint32_t *
-ice_dcf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+ice_dcf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -1875,8 +1875,8 @@ ice_dcf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
RTE_PTYPE_L4_SCTP,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -2200,7 +2200,7 @@ ice_recv_scattered_pkts(void *rx_queue,
}
const uint32_t *
-ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+ice_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
struct ice_adapter *ad =
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
@@ -2231,7 +2231,6 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_INNER_L4_SCTP,
RTE_PTYPE_INNER_L4_TCP,
RTE_PTYPE_INNER_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
static const uint32_t ptypes_comms[] = {
@@ -2262,13 +2261,15 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_TUNNEL_GTPC,
RTE_PTYPE_TUNNEL_GTPU,
RTE_PTYPE_L2_ETHER_PPPOE,
- RTE_PTYPE_UNKNOWN
};
- if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
+ if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) {
+ *no_of_elements = RTE_DIM(ptypes_comms);
ptypes = ptypes_comms;
- else
+ } else {
+ *no_of_elements = RTE_DIM(ptypes_os);
ptypes = ptypes_os;
+ }
if (dev->rx_pkt_burst == ice_recv_pkts ||
dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
@@ -271,7 +271,7 @@ int ice_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
int ice_rx_descriptor_status(void *rx_queue, uint16_t offset);
int ice_tx_descriptor_status(void *tx_queue, uint16_t offset);
void ice_set_default_ptype_table(struct rte_eth_dev *dev);
-const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements);
void ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq,
uint32_t rxdid);
@@ -232,7 +232,7 @@ idpf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
static const uint32_t *
-idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -243,9 +243,9 @@ idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_SCTP,
RTE_PTYPE_L4_ICMP,
- RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -207,7 +207,8 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int eth_igc_led_on(struct rte_eth_dev *dev);
static int eth_igc_led_off(struct rte_eth_dev *dev);
-static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev);
+static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev,
+ size_t *no_of_elements);
static int eth_igc_rar_set(struct rte_eth_dev *dev,
struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool);
static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index);
@@ -1650,7 +1651,7 @@ eth_igc_led_off(struct rte_eth_dev *dev)
}
static const uint32_t *
-eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev)
+eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
/* refers to rx_desc_pkt_info_to_pkt_type() */
@@ -1667,9 +1668,9 @@ eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev)
RTE_PTYPE_INNER_L3_IPV6_EXT,
RTE_PTYPE_INNER_L4_TCP,
RTE_PTYPE_INNER_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -619,7 +619,7 @@ const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK]
};
const uint32_t *
-ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused, size_t *no_of_elements)
{
/* See ionic_ptype_table[] */
static const uint32_t ptypes[] = {
@@ -631,9 +631,9 @@ ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
RTE_PTYPE_L3_IPV6,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -54,7 +54,7 @@ void ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
int ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
-const uint32_t *ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+const uint32_t *ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements);
int ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm);
@@ -191,7 +191,8 @@ static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
size_t fw_size);
static int ixgbe_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
-static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev,
+ size_t *no_of_elements);
static int ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
@@ -3978,7 +3979,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
static const uint32_t *
-ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
/* For non-vec functions,
@@ -3999,19 +4000,22 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_INNER_L3_IPV6_EXT,
RTE_PTYPE_INNER_L4_TCP,
RTE_PTYPE_INNER_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
- dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
+ dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
#if defined(RTE_ARCH_X86) || defined(__ARM_NEON)
if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
- dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
+ dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
#endif
return NULL;
}
@@ -387,7 +387,7 @@ mana_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
}
static const uint32_t *
-mana_supported_ptypes(struct rte_eth_dev *dev __rte_unused)
+mana_supported_ptypes(struct rte_eth_dev *dev __rte_unused, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -396,9 +396,9 @@ mana_supported_ptypes(struct rte_eth_dev *dev __rte_unused)
RTE_PTYPE_L4_FRAG,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -229,7 +229,7 @@ int mlx4_flow_ctrl_get(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
int mlx4_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
-const uint32_t *mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+const uint32_t *mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements);
int mlx4_is_removed(struct rte_eth_dev *dev);
/* mlx4_intr.c */
@@ -934,7 +934,7 @@ mlx4_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
* NULL otherwise.
*/
const uint32_t *
-mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
/* refers to rxq_cq_to_pkt_type() */
@@ -944,7 +944,6 @@ mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_L4_FRAG,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
static const uint32_t ptypes_l2tun[] = {
/* refers to rxq_cq_to_pkt_type() */
@@ -956,15 +955,17 @@ mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_L4_UDP,
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
- RTE_PTYPE_UNKNOWN
};
struct mlx4_priv *priv = dev->data->dev_private;
if (dev->rx_pkt_burst == mlx4_rx_burst) {
- if (priv->hw_csum_l2tun)
+ if (priv->hw_csum_l2tun) {
+ *no_of_elements = RTE_DIM(ptypes_l2tun);
return ptypes_l2tun;
- else
+ } else {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
}
return NULL;
}
@@ -2090,7 +2090,7 @@ uint16_t mlx5_representor_id_encode(const struct mlx5_switch_info *info,
enum rte_eth_representor_type hpf_type);
int mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size);
-const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements);
int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
int mlx5_hairpin_cap_get(struct rte_eth_dev *dev,
struct rte_eth_hairpin_cap *cap);
@@ -579,7 +579,7 @@ mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
* A pointer to the supported Packet types array.
*/
const uint32_t *
-mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
/* refers to rxq_cq_to_pkt_type() */
@@ -596,14 +596,15 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_INNER_L4_FRAG,
RTE_PTYPE_INNER_L4_TCP,
RTE_PTYPE_INNER_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
if (dev->rx_pkt_burst == mlx5_rx_burst ||
dev->rx_pkt_burst == mlx5_rx_burst_mprq ||
dev->rx_pkt_burst == mlx5_rx_burst_vec ||
- dev->rx_pkt_burst == mlx5_rx_burst_mprq_vec)
+ dev->rx_pkt_burst == mlx5_rx_burst_mprq_vec) {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
return NULL;
}
@@ -246,7 +246,7 @@ int hn_vf_info_get(struct hn_data *hv,
int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
int hn_vf_configure_locked(struct rte_eth_dev *dev,
const struct rte_eth_conf *dev_conf);
-const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev);
+const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev, size_t *no_of_elements);
int hn_vf_start(struct rte_eth_dev *dev);
int hn_vf_close(struct rte_eth_dev *dev);
int hn_vf_stop(struct rte_eth_dev *dev);
@@ -466,7 +466,7 @@ int hn_vf_configure_locked(struct rte_eth_dev *dev,
return ret;
}
-const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
+const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev, size_t *no_of_elements)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
@@ -475,7 +475,7 @@ const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
- ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
+ ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev, no_of_elements);
rte_rwlock_read_unlock(&hv->vf_lock);
return ptypes;
@@ -1266,7 +1266,7 @@ nfp_net_common_init(struct rte_pci_device *pci_dev,
}
const uint32_t *
-nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
+nfp_net_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
struct nfp_net_hw *net_hw;
static const uint32_t ptypes[] = {
@@ -1309,6 +1309,7 @@ nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
if ((net_hw->super.cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE) == 0)
return NULL;
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -229,7 +229,7 @@ int nfp_net_xstats_get_by_id(struct rte_eth_dev *dev,
int nfp_net_xstats_reset(struct rte_eth_dev *dev);
int nfp_net_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
-const uint32_t *nfp_net_supported_ptypes_get(struct rte_eth_dev *dev);
+const uint32_t *nfp_net_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements);
int nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
int nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
void nfp_net_params_setup(struct nfp_net_hw *hw);
@@ -1864,13 +1864,13 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
const uint32_t *
-ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
if (dev->rx_pkt_burst == ngbe_recv_pkts ||
dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
- return ngbe_get_supported_ptypes();
+ return ngbe_get_supported_ptypes(no_of_elements);
return NULL;
}
@@ -328,7 +328,7 @@ struct rte_ngbe_xstats_name_off {
unsigned int offset;
};
-const uint32_t *ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+const uint32_t *ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements);
int ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
@@ -92,7 +92,7 @@ static u32 ngbe_ptype_lookup[NGBE_PTID_MAX] __rte_cache_aligned = {
TPTE(0xCD, ETHER, IPV6, NONE, IP, NONE, IPV6, SCTP),
};
-u32 *ngbe_get_supported_ptypes(void)
+u32 *ngbe_get_supported_ptypes(size_t *no_of_elements)
{
static u32 ptypes[] = {
/* For non-vec functions,
@@ -114,6 +114,7 @@ u32 *ngbe_get_supported_ptypes(void)
RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -110,7 +110,7 @@ struct rte_ngbe_ptype {
#define RTE_PTYPE_L2_ETHER_EAPOL RTE_PTYPE_L2_ETHER
#define RTE_PTYPE_L2_ETHER_FILTER RTE_PTYPE_L2_ETHER
-u32 *ngbe_get_supported_ptypes(void);
+u32 *ngbe_get_supported_ptypes(size_t *no_of_elements);
u32 ngbe_decode_ptype(u8 ptid);
u8 ngbe_encode_ptype(u32 ptype);
@@ -1467,7 +1467,7 @@ octeontx_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
}
static const uint32_t *
-octeontx_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+octeontx_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L3_IPV4,
@@ -1477,11 +1477,12 @@ octeontx_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
RTE_PTYPE_L4_FRAG,
- RTE_PTYPE_UNKNOWN
};
- if (dev->rx_pkt_burst == octeontx_recv_pkts)
+ if (dev->rx_pkt_burst == octeontx_recv_pkts) {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
return NULL;
}
@@ -509,7 +509,7 @@ pfe_tx_queue_setup(struct rte_eth_dev *dev,
}
static const uint32_t *
-pfe_supported_ptypes_get(struct rte_eth_dev *dev)
+pfe_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
/*todo -= add more types */
@@ -525,8 +525,10 @@ pfe_supported_ptypes_get(struct rte_eth_dev *dev)
};
if (dev->rx_pkt_burst == pfe_recv_pkts ||
- dev->rx_pkt_burst == pfe_recv_pkts_on_intr)
+ dev->rx_pkt_burst == pfe_recv_pkts_on_intr) {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
return NULL;
}
@@ -2054,7 +2054,7 @@ static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
}
static const uint32_t *
-qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
+qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -2075,13 +2075,14 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
RTE_PTYPE_INNER_L4_TCP,
RTE_PTYPE_INNER_L4_UDP,
RTE_PTYPE_INNER_L4_FRAG,
- RTE_PTYPE_UNKNOWN
};
if (eth_dev->rx_pkt_burst == qede_recv_pkts ||
eth_dev->rx_pkt_burst == qede_recv_pkts_regular ||
- eth_dev->rx_pkt_burst == qede_recv_pkts_cmt)
+ eth_dev->rx_pkt_burst == qede_recv_pkts_cmt) {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
return NULL;
}
@@ -199,7 +199,7 @@ typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);
/** Get packet types recognized/classified */
typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(
- uint32_t tunnel_encaps);
+ uint32_t tunnel_encaps, size_t *no_of_elements);
/** Get number of pending Rx descriptors */
typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq);
@@ -134,7 +134,7 @@ sfc_ef10_ev_qprime(volatile void *qprime, unsigned int read_ptr,
}
-const uint32_t * sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps);
+const uint32_t *sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps, size_t *no_of_elements);
#ifdef __cplusplus
@@ -665,7 +665,7 @@ sfc_ef100_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
static const uint32_t *
-sfc_ef100_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
+sfc_ef100_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps, size_t *no_of_elements)
{
static const uint32_t ef100_native_ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -684,9 +684,9 @@ sfc_ef100_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
RTE_PTYPE_INNER_L4_TCP,
RTE_PTYPE_INNER_L4_UDP,
RTE_PTYPE_INNER_L4_FRAG,
- RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ef100_native_ptypes);
return ef100_native_ptypes;
}
@@ -471,7 +471,7 @@ sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
const uint32_t *
-sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
+sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps, size_t *no_of_elements)
{
static const uint32_t ef10_native_ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -483,7 +483,6 @@ sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
RTE_PTYPE_L4_FRAG,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
static const uint32_t ef10_overlay_ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -505,7 +504,6 @@ sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
RTE_PTYPE_INNER_L4_FRAG,
RTE_PTYPE_INNER_L4_TCP,
RTE_PTYPE_INNER_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
/*
@@ -517,6 +515,7 @@ sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
case (1u << EFX_TUNNEL_PROTOCOL_VXLAN |
1u << EFX_TUNNEL_PROTOCOL_GENEVE |
1u << EFX_TUNNEL_PROTOCOL_NVGRE):
+ *no_of_elements = RTE_DIM(ef10_overlay_ptypes);
return ef10_overlay_ptypes;
default:
SFC_GENERIC_LOG(ERR,
@@ -524,6 +523,7 @@ sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
tunnel_encaps);
/* FALLTHROUGH */
case 0:
+ *no_of_elements = RTE_DIM(ef10_native_ptypes);
return ef10_native_ptypes;
}
}
@@ -194,11 +194,11 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
static const uint32_t *
-sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
- return sap->dp_rx->supported_ptypes_get(sap->shared->tunnel_encaps);
+ return sap->dp_rx->supported_ptypes_get(sap->shared->tunnel_encaps, no_of_elements);
}
static int
@@ -194,7 +194,7 @@ sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
}
static const uint32_t *
-sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
+sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -202,9 +202,9 @@ sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -1778,7 +1778,7 @@ tap_intr_handle_set(struct rte_eth_dev *dev, int set)
}
static const uint32_t*
-tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_INNER_L2_ETHER,
@@ -1803,9 +1803,9 @@ tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
RTE_PTYPE_L4_UDP,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_SCTP,
- RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -379,7 +379,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
}
static const uint32_t *
-nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
size_t copied;
static uint32_t ptypes[32];
@@ -414,6 +414,7 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
/* All Ptypes are supported in all Rx functions. */
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -2727,13 +2727,13 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
const uint32_t *
-txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
if (dev->rx_pkt_burst == txgbe_recv_pkts ||
dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
- return txgbe_get_supported_ptypes();
+ return txgbe_get_supported_ptypes(no_of_elements);
return NULL;
}
@@ -704,7 +704,7 @@ struct rte_txgbe_xstats_name_off {
unsigned int offset;
};
-const uint32_t *txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+const uint32_t *txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements);
int txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
@@ -186,7 +186,7 @@ static u32 txgbe_ptype_lookup[TXGBE_PTID_MAX] __rte_cache_aligned = {
TPTE(0xFD, ETHER, IPV6, NONE, GRENAT, ETHER_VLAN, IPV6, SCTP),
};
-u32 *txgbe_get_supported_ptypes(void)
+u32 *txgbe_get_supported_ptypes(size_t *no_of_elements)
{
static u32 ptypes[] = {
/* For non-vec functions,
@@ -205,9 +205,9 @@ u32 *txgbe_get_supported_ptypes(void)
RTE_PTYPE_INNER_L3_IPV6_EXT,
RTE_PTYPE_INNER_L4_TCP,
RTE_PTYPE_INNER_L4_UDP,
- RTE_PTYPE_UNKNOWN
};
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
}
@@ -185,7 +185,7 @@ struct rte_txgbe_ptype {
#define RTE_PTYPE_L2_ETHER_EAPOL RTE_PTYPE_L2_ETHER
#define RTE_PTYPE_L2_ETHER_FILTER RTE_PTYPE_L2_ETHER
-u32 *txgbe_get_supported_ptypes(void);
+u32 *txgbe_get_supported_ptypes(size_t *no_of_elements);
u32 txgbe_decode_ptype(u8 ptid);
u8 txgbe_encode_ptype(u32 ptype);
@@ -88,7 +88,7 @@ static int vmxnet3_dev_info_get(struct rte_eth_dev *dev,
static int vmxnet3_hw_ver_get(struct rte_eth_dev *dev,
char *fw_version, size_t fw_size);
static const uint32_t *
-vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements);
static int vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vid, int on);
@@ -1615,16 +1615,17 @@ vmxnet3_hw_ver_get(struct rte_eth_dev *dev,
}
static const uint32_t *
-vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L3_IPV4_EXT,
RTE_PTYPE_L3_IPV4,
- RTE_PTYPE_UNKNOWN
};
- if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
+ if (dev->rx_pkt_burst == vmxnet3_recv_pkts) {
+ *no_of_elements = RTE_DIM(ptypes);
return ptypes;
+ }
return NULL;
}
@@ -447,8 +447,22 @@ typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
typedef int (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
-/** @internal Get supported ptypes of an Ethernet device. */
-typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev);
+/**
+ * @internal
+ * Function used to get supported ptypes of an Ethernet device.
+ *
+ * @param dev
+ * ethdev handle of port.
+ *
+ * @param no_of_elements
+ * number of ptypes elements. Must be initialized to 0.
+ *
+ * @retval
+ * Success, array of ptypes elements and valid no_of_elements > 0.
+ * Failures, NULL.
+ */
+typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev,
+ size_t *no_of_elements);
/**
* @internal
@@ -3854,9 +3854,11 @@ int
rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
uint32_t *ptypes, int num)
{
- int i, j;
+ size_t i;
+ int j;
struct rte_eth_dev *dev;
const uint32_t *all_ptypes;
+ size_t no_of_elements = 0;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -3870,12 +3872,12 @@ rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
if (*dev->dev_ops->dev_supported_ptypes_get == NULL)
return 0;
- all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
+ all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev, &no_of_elements);
if (!all_ptypes)
return 0;
- for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
+ for (i = 0, j = 0; i < no_of_elements; ++i)
if (all_ptypes[i] & ptype_mask) {
if (j < num) {
ptypes[j] = all_ptypes[i];
@@ -3905,8 +3907,10 @@ rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
const uint32_t *all_ptypes;
struct rte_eth_dev *dev;
uint32_t unused_mask;
- unsigned int i, j;
+ size_t i;
+ unsigned int j;
int ret;
+ size_t no_of_elements = 0;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -3945,7 +3949,7 @@ rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
goto ptype_unknown;
}
- all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
+ all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev, &no_of_elements);
if (all_ptypes == NULL) {
ret = 0;
goto ptype_unknown;
@@ -3956,7 +3960,7 @@ rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
* set_ptypes array is insufficient fill it partially.
*/
for (i = 0, j = 0; set_ptypes != NULL &&
- (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
+ (i < no_of_elements); ++i) {
if (ptype_mask & all_ptypes[i]) {
if (j < num - 1) {
set_ptypes[j] = all_ptypes[i];
@@ -3971,9 +3975,6 @@ rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
}
}
- if (set_ptypes != NULL && j < num)
- set_ptypes[j] = RTE_PTYPE_UNKNOWN;
-
return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
ptype_unknown: