@@ -49,6 +49,8 @@ static void atl_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+static int atl_dev_supported_ptypes_set(struct rte_eth_dev *dev,
+ uint32_t ptype_mask);
static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
@@ -296,6 +298,7 @@ static const struct eth_dev_ops atl_eth_dev_ops = {
.fw_version_get = atl_fw_version_get,
.dev_infos_get = atl_dev_info_get,
.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = atl_dev_supported_ptypes_set,
.mtu_set = atl_dev_mtu_set,
@@ -1142,6 +1145,15 @@ atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+atl_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static void
atl_dev_delayed_handler(void *param)
{
@@ -2865,6 +2865,15 @@ bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
return ptypes;
}
+static int
+bnxt_dev_supported_ptypes_set_op(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
int reg_win)
{
@@ -3316,6 +3325,7 @@ static const struct eth_dev_ops bnxt_dev_ops = {
.tx_queue_stop = bnxt_tx_queue_stop,
.filter_ctrl = bnxt_filter_ctrl_op,
.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
+ .dev_supported_ptypes_set = bnxt_dev_supported_ptypes_set_op,
.get_eeprom_length = bnxt_get_eeprom_length_op,
.get_eeprom = bnxt_get_eeprom_op,
.set_eeprom = bnxt_set_eeprom_op,
@@ -804,6 +804,15 @@ cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
return NULL;
}
+static int
+cxgbe_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
/* Update RSS hash configuration
*/
static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
@@ -1081,6 +1090,7 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.dev_configure = cxgbe_dev_configure,
.dev_infos_get = cxgbe_dev_info_get,
.dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = cxgbe_dev_supported_ptypes_set,
.link_update = cxgbe_dev_link_update,
.dev_set_link_up = cxgbe_dev_set_link_up,
.dev_set_link_down = cxgbe_dev_set_link_down,
@@ -268,6 +268,15 @@ dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+dpaa_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
@@ -986,6 +995,7 @@ static struct eth_dev_ops dpaa_devops = {
.dev_close = dpaa_eth_dev_close,
.dev_infos_get = dpaa_eth_dev_info,
.dev_supported_ptypes_get = dpaa_supported_ptypes_get,
+ .dev_supported_ptypes_set = dpaa_supported_ptypes_set,
.rx_queue_setup = dpaa_eth_rx_queue_setup,
.tx_queue_setup = dpaa_eth_tx_queue_setup,
@@ -751,6 +751,15 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+dpaa2_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
/**
* Dpaa2 link Interrupt handler
*
@@ -1968,6 +1977,7 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
.fw_version_get = dpaa2_fw_version_get,
.dev_infos_get = dpaa2_dev_info_get,
.dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
+ .dev_supported_ptypes_set = dpaa2_supported_ptypes_set,
.mtu_set = dpaa2_dev_mtu_set,
.vlan_filter_set = dpaa2_vlan_filter_set,
.vlan_offload_set = dpaa2_vlan_offload_set,
@@ -105,6 +105,8 @@ static int eth_igb_fw_version_get(struct rte_eth_dev *dev,
static void eth_igb_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
+static int eth_igb_supported_ptypes_set(struct rte_eth_dev *dev,
+ uint32_t ptype_mask);
static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
@@ -372,6 +374,7 @@ static const struct eth_dev_ops eth_igb_ops = {
.fw_version_get = eth_igb_fw_version_get,
.dev_infos_get = eth_igb_infos_get,
.dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
+ .dev_supported_ptypes_set = eth_igb_supported_ptypes_set,
.mtu_set = eth_igb_mtu_set,
.vlan_filter_set = eth_igb_vlan_filter_set,
.vlan_tpid_set = eth_igb_vlan_tpid_set,
@@ -439,6 +442,7 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
.vlan_filter_set = igbvf_vlan_filter_set,
.dev_infos_get = eth_igbvf_infos_get,
.dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
+ .dev_supported_ptypes_set = eth_igb_supported_ptypes_set,
.rx_queue_setup = eth_igb_rx_queue_setup,
.rx_queue_release = eth_igb_rx_queue_release,
.rx_descriptor_done = eth_igb_rx_descriptor_done,
@@ -2320,6 +2324,15 @@ eth_igb_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+eth_igb_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static void
eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -80,6 +80,15 @@ enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
return ptypes;
}
+static int
+enetc_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
/* return 0 means link status changed, -1 means not changed */
static int
enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
@@ -795,6 +804,7 @@ static const struct eth_dev_ops enetc_ops = {
.tx_queue_stop = enetc_tx_queue_stop,
.tx_queue_release = enetc_tx_queue_release,
.dev_supported_ptypes_get = enetc_supported_ptypes_get,
+ .dev_supported_ptypes_set = enetc_supported_ptypes_set,
};
/**
@@ -601,6 +601,15 @@ static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int enicpmd_dev_supported_ptypes_set(struct rte_eth_dev *dev,
+ uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
struct enic *enic = pmd_priv(eth_dev);
@@ -1064,6 +1073,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.queue_stats_mapping_set = NULL,
.dev_infos_get = enicpmd_dev_info_get,
.dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = enicpmd_dev_supported_ptypes_set,
.mtu_set = enicpmd_mtu_set,
.vlan_filter_set = NULL,
.vlan_tpid_set = NULL,
@@ -971,6 +971,15 @@ fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return ret;
}
+static int
+fs_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static int
fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
@@ -1235,6 +1244,7 @@ const struct eth_dev_ops failsafe_ops = {
.stats_reset = fs_stats_reset,
.dev_infos_get = fs_dev_infos_get,
.dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = fs_dev_supported_ptypes_set,
.mtu_set = fs_mtu_set,
.vlan_filter_set = fs_vlan_filter_set,
.rx_queue_start = fs_rx_queue_start,
@@ -1489,6 +1489,15 @@ fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
}
#endif
+static int
+fm10k_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static int
fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
@@ -2817,6 +2826,7 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = {
.link_update = fm10k_link_update,
.dev_infos_get = fm10k_dev_infos_get,
.dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = fm10k_dev_supported_ptypes_set,
.vlan_filter_set = fm10k_vlan_filter_set,
.vlan_offload_set = fm10k_vlan_offload_set,
.mac_addr_add = fm10k_macaddr_add,
@@ -456,6 +456,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.fw_version_get = i40e_fw_version_get,
.dev_infos_get = i40e_dev_info_get,
.dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = i40e_dev_supported_ptypes_set,
.vlan_filter_set = i40e_vlan_filter_set,
.vlan_tpid_set = i40e_vlan_tpid_set,
.vlan_offload_set = i40e_vlan_offload_set,
@@ -188,6 +188,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.dev_reset = i40evf_dev_reset,
.dev_infos_get = i40evf_dev_info_get,
.dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = i40e_dev_supported_ptypes_set,
.vlan_filter_set = i40evf_vlan_filter_set,
.vlan_offload_set = i40evf_vlan_offload_set,
.rx_queue_start = i40evf_dev_rx_queue_start,
@@ -1687,6 +1687,15 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+int
+i40e_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static int
i40e_dev_first_queue(uint16_t idx, void **queues, int num)
{
@@ -171,6 +171,7 @@ int i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
const uint32_t *i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+int i40e_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask);
int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
@@ -40,6 +40,8 @@ static void iavf_dev_close(struct rte_eth_dev *dev);
static void iavf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+static int iavf_dev_supported_ptypes_set(struct rte_eth_dev *dev,
+ uint32_t ptype_mask);
static int iavf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void iavf_dev_stats_reset(struct rte_eth_dev *dev);
@@ -88,6 +90,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
.dev_close = iavf_dev_close,
.dev_infos_get = iavf_dev_info_get,
.dev_supported_ptypes_get = iavf_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = iavf_dev_supported_ptypes_set,
.link_update = iavf_dev_link_update,
.stats_get = iavf_dev_stats_get,
.stats_reset = iavf_dev_stats_reset,
@@ -577,6 +580,15 @@ iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
return ptypes;
}
+static int
+iavf_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
int
iavf_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
@@ -125,6 +125,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
.tx_queue_release = ice_tx_queue_release,
.dev_infos_get = ice_dev_info_get,
.dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = ice_dev_supported_ptypes_set,
.link_update = ice_link_update,
.mtu_set = ice_mtu_set,
.mac_addr_set = ice_macaddr_set,
@@ -1535,6 +1535,15 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+int
+ice_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
int
ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
@@ -170,6 +170,7 @@ int ice_rx_descriptor_status(void *rx_queue, uint16_t offset);
int ice_tx_descriptor_status(void *tx_queue, uint16_t offset);
void ice_set_default_ptype_table(struct rte_eth_dev *dev);
const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+int ice_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask);
int ice_rx_vec_dev_check(struct rte_eth_dev *dev);
int ice_tx_vec_dev_check(struct rte_eth_dev *dev);
@@ -185,6 +185,8 @@ static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+static int ixgbe_dev_supported_ptypes_set(struct rte_eth_dev *dev,
+ uint32_t ptype_mask);
static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
@@ -532,6 +534,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.fw_version_get = ixgbe_fw_version_get,
.dev_infos_get = ixgbe_dev_info_get,
.dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = ixgbe_dev_supported_ptypes_set,
.mtu_set = ixgbe_dev_mtu_set,
.vlan_filter_set = ixgbe_vlan_filter_set,
.vlan_tpid_set = ixgbe_vlan_tpid_set,
@@ -615,6 +618,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.allmulticast_disable = ixgbevf_dev_allmulticast_disable,
.dev_infos_get = ixgbevf_dev_info_get,
.dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = ixgbe_dev_supported_ptypes_set,
.mtu_set = ixgbevf_dev_set_mtu,
.vlan_filter_set = ixgbevf_vlan_filter_set,
.vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
@@ -3902,6 +3906,15 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+ixgbe_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static void
ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
@@ -419,6 +419,7 @@ static const struct eth_dev_ops mlx4_dev_ops = {
.fw_version_get = mlx4_fw_version_get,
.dev_infos_get = mlx4_dev_infos_get,
.dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = mlx4_dev_supported_ptypes_set,
.vlan_filter_set = mlx4_vlan_filter_set,
.rx_queue_setup = mlx4_rx_queue_setup,
.tx_queue_setup = mlx4_tx_queue_setup,
@@ -227,6 +227,7 @@ int mlx4_flow_ctrl_get(struct rte_eth_dev *dev,
int mlx4_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
const uint32_t *mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+int mlx4_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask);
int mlx4_is_removed(struct rte_eth_dev *dev);
/* mlx4_intr.c */
@@ -944,6 +944,27 @@ mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+/**
+ * DPDK callback to set the packet types that the application is interested in.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param ptype_mask
+ * Packet type mask that the application is interested in.
+ *
+ * @return
+ * 0 if packet types requested are successfully set.
+ * -ENOTSUP if packet types requested are not supported.
+ */
+static int
+mlx4_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
/**
* Check if mlx4 device was removed.
*
@@ -937,6 +937,7 @@ const struct eth_dev_ops mlx5_dev_ops = {
.dev_infos_get = mlx5_dev_infos_get,
.read_clock = mlx5_read_clock,
.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = mlx5_dev_supported_ptypes_set,
.vlan_filter_set = mlx5_vlan_filter_set,
.rx_queue_setup = mlx5_rx_queue_setup,
.tx_queue_setup = mlx5_tx_queue_setup,
@@ -998,6 +999,7 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
.fw_version_get = mlx5_fw_version_get,
.dev_infos_get = mlx5_dev_infos_get,
.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = mlx5_dev_supported_ptypes_set,
.vlan_filter_set = mlx5_vlan_filter_set,
.rx_queue_setup = mlx5_rx_queue_setup,
.tx_queue_setup = mlx5_tx_queue_setup,
@@ -689,6 +689,7 @@ void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
int mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock);
int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size);
const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+int mlx5_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask);
int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete);
int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status);
int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
@@ -695,6 +695,27 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+/**
+ * Set packet types interested in.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param ptype_mask
+ * Packet type mask that the application is interested in.
+ *
+ * @return
+ * 0 if packet types requested are successfully set.
+ * -ENOTSUP if packet types requested are not supported.
+ */
+static int
+mlx5_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
/**
* Retrieve the master device for representor in the same switch domain.
*
@@ -213,6 +213,27 @@ mvneta_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
return ptypes;
}
+/**
+ * Set packet types interested in.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param ptype_mask
+ * Packet type mask that the application is interested in.
+ *
+ * @return
+ * 0 if packet types requested are successfully set.
+ * -ENOTSUP if packet types requested are not supported.
+ */
+static int
+mvneta_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
/**
* DPDK callback to change the MTU.
*
@@ -758,6 +779,7 @@ static const struct eth_dev_ops mvneta_ops = {
.stats_reset = mvneta_stats_reset,
.dev_infos_get = mvneta_dev_infos_get,
.dev_supported_ptypes_get = mvneta_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = mvneta_dev_supported_ptypes_set,
.rxq_info_get = mvneta_rxq_info_get,
.txq_info_get = mvneta_txq_info_get,
.rx_queue_setup = mvneta_rx_queue_setup,
@@ -1488,6 +1488,27 @@ mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
return ptypes;
}
+/**
+ * Set packet types interested in.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param ptype_mask
+ * Packet type mask that the application is interested in.
+ *
+ * @return
+ * 0 if packet types requested are successfully set.
+ * -ENOTSUP if packet types requested are not supported.
+ */
+static int
+mrvl_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
/**
* DPDK callback to get information about specific receive queue.
*
@@ -2048,6 +2069,7 @@ static const struct eth_dev_ops mrvl_ops = {
.xstats_get_names = mrvl_xstats_get_names,
.dev_infos_get = mrvl_dev_infos_get,
.dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = mrvl_dev_supported_ptypes_set,
.rxq_info_get = mrvl_rxq_info_get,
.txq_info_get = mrvl_txq_info_get,
.vlan_filter_set = mrvl_vlan_filter_set,
@@ -818,7 +818,8 @@ static const struct eth_dev_ops hn_eth_dev_ops = {
.dev_stop = hn_dev_stop,
.dev_close = hn_dev_close,
.dev_infos_get = hn_dev_info_get,
- .dev_supported_ptypes_get = hn_vf_supported_ptypes,
+ .dev_supported_ptypes_get = hn_vf_supported_ptypes_get,
+ .dev_supported_ptypes_set = hn_vf_supported_ptypes_set,
.promiscuous_enable = hn_dev_promiscuous_enable,
.promiscuous_disable = hn_dev_promiscuous_disable,
.allmulticast_enable = hn_dev_allmulticast_enable,
@@ -206,7 +206,8 @@ void hn_vf_info_get(struct hn_data *hv,
int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
int hn_vf_configure(struct rte_eth_dev *dev,
const struct rte_eth_conf *dev_conf);
-const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev);
+const uint32_t *hn_vf_supported_ptypes_get(struct rte_eth_dev *dev);
+int hn_vf_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask);
int hn_vf_start(struct rte_eth_dev *dev);
void hn_vf_reset(struct rte_eth_dev *dev);
void hn_vf_stop(struct rte_eth_dev *dev);
@@ -302,7 +302,7 @@ int hn_vf_configure(struct rte_eth_dev *dev,
return ret;
}
-const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
+const uint32_t *hn_vf_supported_ptypes_get(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
@@ -317,6 +317,14 @@ const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
return ptypes;
}
+int hn_vf_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
int hn_vf_start(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
@@ -1295,6 +1295,15 @@ nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+nfp_net_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static uint32_t
nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
{
@@ -2705,6 +2714,7 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = {
.stats_reset = nfp_net_stats_reset,
.dev_infos_get = nfp_net_infos_get,
.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
+ .dev_supported_ptypes_set = nfp_net_supported_ptypes_set,
.mtu_set = nfp_net_dev_mtu_set,
.mac_addr_set = nfp_set_mac_addr,
.vlan_offload_set = nfp_net_vlan_offload_set,
@@ -955,6 +955,15 @@ octeontx_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+octeontx_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static int
octeontx_pool_ops(struct rte_eth_dev *dev, const char *pool)
{
@@ -986,6 +995,7 @@ static const struct eth_dev_ops octeontx_dev_ops = {
.rx_queue_setup = octeontx_dev_rx_queue_setup,
.rx_queue_release = octeontx_dev_rx_queue_release,
.dev_supported_ptypes_get = octeontx_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = octeontx_dev_supported_ptypes_set,
.pool_ops_supported = octeontx_pool_ops,
};
@@ -1620,6 +1620,7 @@ static const struct eth_dev_ops otx2_eth_dev_ops = {
.dev_set_link_up = otx2_nix_dev_set_link_up,
.dev_set_link_down = otx2_nix_dev_set_link_down,
.dev_supported_ptypes_get = otx2_nix_supported_ptypes_get,
+ .dev_supported_ptypes_set = otx2_nix_supported_ptypes_set,
.dev_reset = otx2_nix_dev_reset,
.stats_get = otx2_nix_dev_stats_get,
.stats_reset = otx2_nix_dev_stats_reset,
@@ -502,6 +502,7 @@ void *otx2_nix_fastpath_lookup_mem_get(void);
/* PTYPES */
const uint32_t *otx2_nix_supported_ptypes_get(struct rte_eth_dev *dev);
+int otx2_nix_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask);
/* Mac address handling */
int otx2_nix_mac_addr_set(struct rte_eth_dev *eth_dev,
@@ -61,6 +61,15 @@ otx2_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev)
return NULL;
}
+int
+otx2_nix_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
/*
* +------------------ +------------------ +
* | | IL4 | IL3| IL2 | TU | L4 | L3 | L2 |
@@ -1945,6 +1945,15 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
return NULL;
}
+static int
+qede_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
{
*rss_caps = 0;
@@ -2338,6 +2347,7 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
.flow_ctrl_set = qede_flow_ctrl_set,
.flow_ctrl_get = qede_flow_ctrl_get,
.dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = qede_dev_supported_ptypes_set,
.rss_hash_update = qede_rss_hash_update,
.rss_hash_conf_get = qede_rss_hash_conf_get,
.reta_update = qede_rss_reta_update,
@@ -2376,6 +2386,7 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.vlan_offload_set = qede_vlan_offload_set,
.vlan_filter_set = qede_vlan_filter_set,
.dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = qede_dev_supported_ptypes_set,
.rss_hash_update = qede_rss_hash_update,
.rss_hash_conf_get = qede_rss_hash_conf_get,
.reta_update = qede_rss_reta_update,
@@ -194,6 +194,15 @@ sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return sap->dp_rx->supported_ptypes_get(sap->shared->tunnel_encaps);
}
+static int
+sfc_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static int
sfc_dev_configure(struct rte_eth_dev *dev)
{
@@ -1782,6 +1791,7 @@ static const struct eth_dev_ops sfc_eth_dev_ops = {
.xstats_get_names = sfc_xstats_get_names,
.dev_infos_get = sfc_dev_infos_get,
.dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = sfc_dev_supported_ptypes_set,
.mtu_set = sfc_dev_set_mtu,
.rx_queue_start = sfc_rx_queue_start,
.rx_queue_stop = sfc_rx_queue_stop,
@@ -1987,6 +1997,7 @@ sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
.dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = sfc_dev_supported_ptypes_set,
.rx_queue_count = sfc_rx_queue_count,
.rx_descriptor_done = sfc_rx_descriptor_done,
.rx_descriptor_status = sfc_rx_descriptor_status,
@@ -1561,6 +1561,15 @@ tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
return ptypes;
}
+static int
+tap_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static int
tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_fc_conf *fc_conf)
@@ -1671,6 +1680,7 @@ static const struct eth_dev_ops ops = {
.stats_get = tap_stats_get,
.stats_reset = tap_stats_reset,
.dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = tap_dev_supported_ptypes_set,
.rss_hash_update = tap_rss_hash_update,
.filter_ctrl = tap_dev_filter_ctrl,
};
@@ -362,6 +362,15 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return ptypes;
}
+static int
+nicvf_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static void
nicvf_dev_stats_reset(struct rte_eth_dev *dev)
{
@@ -1987,6 +1996,7 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
.promiscuous_enable = nicvf_dev_promisc_enable,
.dev_infos_get = nicvf_dev_info_get,
.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = nicvf_dev_supported_ptypes_set,
.mtu_set = nicvf_dev_set_mtu,
.vlan_offload_set = nicvf_vlan_offload_set,
.reta_update = nicvf_dev_reta_update,
@@ -89,6 +89,8 @@ static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *
vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+static int
+vmxnet3_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask);
static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vid, int on);
static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
@@ -126,6 +128,7 @@ static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
.mac_addr_set = vmxnet3_mac_addr_set,
.dev_infos_get = vmxnet3_dev_info_get,
.dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
+ .dev_supported_ptypes_set = vmxnet3_dev_supported_ptypes_set,
.vlan_filter_set = vmxnet3_dev_vlan_filter_set,
.vlan_offload_set = vmxnet3_dev_vlan_offload_set,
.rx_queue_setup = vmxnet3_dev_rx_queue_setup,
@@ -1205,6 +1208,15 @@ vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+vmxnet3_dev_supported_ptypes_set(struct rte_eth_dev *dev, uint32_t ptype_mask)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(ptype_mask);
+
+ return 0;
+}
+
static int
vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
{