@@ -132,7 +132,6 @@ static const struct eth_dev_ops ark_eth_dev_ops = {
.dev_infos_get = eth_ark_dev_info_get,
.rx_queue_setup = eth_ark_dev_rx_queue_setup,
- .rx_queue_count = eth_ark_dev_rx_queue_count,
.tx_queue_setup = eth_ark_tx_queue_setup,
.link_update = eth_ark_dev_link_update,
@@ -318,6 +317,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
return -1;
dev->dev_ops = &ark_eth_dev_ops;
+ dev->rx_queue_count = eth_ark_dev_rx_queue_count;
dev->data->mac_addrs = rte_zmalloc("ark", RTE_ETHER_ADDR_LEN, 0);
if (!dev->data->mac_addrs) {
@@ -313,10 +313,6 @@ static const struct eth_dev_ops atl_eth_dev_ops = {
.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
- .rx_queue_count = atl_rx_queue_count,
- .rx_descriptor_status = atl_dev_rx_descriptor_status,
- .tx_descriptor_status = atl_dev_tx_descriptor_status,
-
/* EEPROM */
.get_eeprom_length = atl_dev_get_eeprom_length,
.get_eeprom = atl_dev_get_eeprom,
@@ -373,6 +369,11 @@ eth_atl_dev_init(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &atl_eth_dev_ops;
+
+ eth_dev->rx_queue_count = atl_rx_queue_count;
+ eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
+
eth_dev->rx_pkt_burst = &atl_recv_pkts;
eth_dev->tx_pkt_burst = &atl_xmit_pkts;
eth_dev->tx_pkt_prepare = &atl_prep_pkts;
@@ -224,8 +224,6 @@ static const struct eth_dev_ops axgbe_eth_dev_ops = {
.rxq_info_get = axgbe_rxq_info_get,
.txq_info_get = axgbe_txq_info_get,
.dev_supported_ptypes_get = axgbe_dev_supported_ptypes_get,
- .rx_descriptor_status = axgbe_dev_rx_descriptor_status,
- .tx_descriptor_status = axgbe_dev_tx_descriptor_status,
.mtu_set = axgb_mtu_set,
};
@@ -1632,6 +1630,9 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &axgbe_eth_dev_ops;
+ eth_dev->rx_descriptor_status = axgbe_dev_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = axgbe_dev_tx_descriptor_status;
+
/*
* For secondary processes, we don't initialise any further as primary
* has already done this work.
@@ -4233,9 +4233,6 @@ static const struct eth_dev_ops bnxt_dev_ops = {
.dev_led_off = bnxt_dev_led_off_op,
.xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
.xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
- .rx_queue_count = bnxt_rx_queue_count_op,
- .rx_descriptor_status = bnxt_rx_descriptor_status_op,
- .tx_descriptor_status = bnxt_tx_descriptor_status_op,
.rx_queue_start = bnxt_rx_queue_start,
.rx_queue_stop = bnxt_rx_queue_stop,
.tx_queue_start = bnxt_tx_queue_start,
@@ -5668,6 +5665,9 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
eth_dev->dev_ops = &bnxt_dev_ops;
+ eth_dev->rx_queue_count = bnxt_rx_queue_count_op;
+ eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op;
+ eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op;
eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
@@ -1303,7 +1303,6 @@ static struct eth_dev_ops dpaa_devops = {
.tx_queue_setup = dpaa_eth_tx_queue_setup,
.rx_queue_release = dpaa_eth_rx_queue_release,
.tx_queue_release = dpaa_eth_tx_queue_release,
- .rx_queue_count = dpaa_dev_rx_queue_count,
.rx_burst_mode_get = dpaa_dev_rx_burst_mode_get,
.tx_burst_mode_get = dpaa_dev_tx_burst_mode_get,
.rxq_info_get = dpaa_rxq_info_get,
@@ -1766,6 +1765,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
/* Populate ethdev structure */
eth_dev->dev_ops = &dpaa_devops;
+ eth_dev->rx_queue_count = dpaa_dev_rx_queue_count;
eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
@@ -2331,7 +2331,6 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
.tx_queue_release = dpaa2_dev_tx_queue_release,
.rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
.tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
- .rx_queue_count = dpaa2_dev_rx_queue_count,
.flow_ctrl_get = dpaa2_flow_ctrl_get,
.flow_ctrl_set = dpaa2_flow_ctrl_set,
.mac_addr_add = dpaa2_dev_add_mac_addr,
@@ -2486,6 +2485,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
* plugged.
*/
eth_dev->dev_ops = &dpaa2_ethdev_ops;
+ eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count;
if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
else if (dpaa2_get_devargs(dev->devargs,
@@ -176,9 +176,6 @@ static const struct eth_dev_ops eth_em_ops = {
.vlan_offload_set = eth_em_vlan_offload_set,
.rx_queue_setup = eth_em_rx_queue_setup,
.rx_queue_release = eth_em_rx_queue_release,
- .rx_queue_count = eth_em_rx_queue_count,
- .rx_descriptor_status = eth_em_rx_descriptor_status,
- .tx_descriptor_status = eth_em_tx_descriptor_status,
.tx_queue_setup = eth_em_tx_queue_setup,
.tx_queue_release = eth_em_tx_queue_release,
.rx_queue_intr_enable = eth_em_rx_queue_intr_enable,
@@ -249,6 +246,9 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
eth_dev->dev_ops = ð_em_ops;
+ eth_dev->rx_queue_count = eth_em_rx_queue_count;
+ eth_dev->rx_descriptor_status = eth_em_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = eth_em_tx_descriptor_status;
eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts;
eth_dev->tx_pkt_burst = (eth_tx_burst_t)ð_em_xmit_pkts;
eth_dev->tx_pkt_prepare = (eth_tx_prep_t)ð_em_prep_pkts;
@@ -380,9 +380,6 @@ static const struct eth_dev_ops eth_igb_ops = {
.rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
.rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
.rx_queue_release = eth_igb_rx_queue_release,
- .rx_queue_count = eth_igb_rx_queue_count,
- .rx_descriptor_status = eth_igb_rx_descriptor_status,
- .tx_descriptor_status = eth_igb_tx_descriptor_status,
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.tx_done_cleanup = eth_igb_tx_done_cleanup,
@@ -440,8 +437,6 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
.dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
.rx_queue_setup = eth_igb_rx_queue_setup,
.rx_queue_release = eth_igb_rx_queue_release,
- .rx_descriptor_status = eth_igb_rx_descriptor_status,
- .tx_descriptor_status = eth_igb_tx_descriptor_status,
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.tx_done_cleanup = eth_igb_tx_done_cleanup,
@@ -752,6 +747,9 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
uint32_t ctrl_ext;
eth_dev->dev_ops = ð_igb_ops;
+ eth_dev->rx_queue_count = eth_igb_rx_queue_count;
+ eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status;
eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
eth_dev->tx_pkt_prepare = ð_igb_prep_pkts;
@@ -947,6 +945,8 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &igbvf_eth_dev_ops;
+ eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status;
eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
eth_dev->tx_pkt_prepare = ð_igb_prep_pkts;
@@ -1141,7 +1141,6 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.tx_queue_stop = enicpmd_dev_tx_queue_stop,
.rx_queue_setup = enicpmd_dev_rx_queue_setup,
.rx_queue_release = enicpmd_dev_rx_queue_release,
- .rx_queue_count = enicpmd_dev_rx_queue_count,
.tx_queue_setup = enicpmd_dev_tx_queue_setup,
.tx_queue_release = enicpmd_dev_tx_queue_release,
.rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
@@ -1278,6 +1277,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
ENICPMD_FUNC_TRACE();
eth_dev->dev_ops = &enicpmd_eth_dev_ops;
+ eth_dev->rx_queue_count = enicpmd_dev_rx_queue_count;
eth_dev->rx_pkt_burst = &enic_recv_pkts;
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
eth_dev->tx_pkt_prepare = &enic_prep_pkts;
@@ -2855,9 +2855,6 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = {
.rx_queue_release = fm10k_rx_queue_release,
.tx_queue_setup = fm10k_tx_queue_setup,
.tx_queue_release = fm10k_tx_queue_release,
- .rx_queue_count = fm10k_dev_rx_queue_count,
- .rx_descriptor_status = fm10k_dev_rx_descriptor_status,
- .tx_descriptor_status = fm10k_dev_tx_descriptor_status,
.rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
.reta_update = fm10k_reta_update,
@@ -3054,6 +3051,9 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
dev->dev_ops = &fm10k_eth_dev_ops;
+ dev->rx_queue_count = fm10k_dev_rx_queue_count;
+ dev->rx_descriptor_status = fm10k_dev_rx_descriptor_status;
+ dev->tx_descriptor_status = fm10k_dev_tx_descriptor_status;
dev->rx_pkt_burst = &fm10k_recv_pkts;
dev->tx_pkt_burst = &fm10k_xmit_pkts;
dev->tx_pkt_prepare = &fm10k_prep_pkts;
@@ -474,9 +474,6 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable,
.rx_queue_release = i40e_dev_rx_queue_release,
- .rx_queue_count = i40e_dev_rx_queue_count,
- .rx_descriptor_status = i40e_dev_rx_descriptor_status,
- .tx_descriptor_status = i40e_dev_tx_descriptor_status,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
.dev_led_on = i40e_dev_led_on,
@@ -1447,6 +1444,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
PMD_INIT_FUNC_TRACE();
dev->dev_ops = &i40e_eth_dev_ops;
+ dev->rx_queue_count = i40e_dev_rx_queue_count;
+ dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
+ dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
dev->tx_pkt_prepare = i40e_prep_pkts;
@@ -199,11 +199,8 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.rx_queue_release = i40e_dev_rx_queue_release,
.rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
- .rx_descriptor_status = i40e_dev_rx_descriptor_status,
- .tx_descriptor_status = i40e_dev_tx_descriptor_status,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
- .rx_queue_count = i40e_dev_rx_queue_count,
.rxq_info_get = i40e_rxq_info_get,
.txq_info_get = i40e_txq_info_get,
.mac_addr_add = i40evf_add_mac_addr,
@@ -1517,6 +1514,9 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
/* assign ops func pointer */
eth_dev->dev_ops = &i40evf_eth_dev_ops;
+ eth_dev->rx_queue_count = i40e_dev_rx_queue_count;
+ eth_dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &i40e_recv_pkts;
eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
@@ -113,9 +113,6 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
.rss_hash_conf_get = iavf_dev_rss_hash_conf_get,
.rxq_info_get = iavf_dev_rxq_info_get,
.txq_info_get = iavf_dev_txq_info_get,
- .rx_queue_count = iavf_dev_rxq_count,
- .rx_descriptor_status = iavf_dev_rx_desc_status,
- .tx_descriptor_status = iavf_dev_tx_desc_status,
.mtu_set = iavf_dev_mtu_set,
.rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable,
@@ -1335,6 +1332,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
/* assign ops func pointer */
eth_dev->dev_ops = &iavf_eth_dev_ops;
+ eth_dev->rx_queue_count = iavf_dev_rxq_count;
+ eth_dev->rx_descriptor_status = iavf_dev_rx_desc_status;
+ eth_dev->tx_descriptor_status = iavf_dev_tx_desc_status;
eth_dev->rx_pkt_burst = &iavf_recv_pkts;
eth_dev->tx_pkt_burst = &iavf_xmit_pkts;
eth_dev->tx_pkt_prepare = &iavf_prep_pkts;
@@ -200,9 +200,6 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
.tx_burst_mode_get = ice_tx_burst_mode_get,
.get_eeprom_length = ice_get_eeprom_length,
.get_eeprom = ice_get_eeprom,
- .rx_queue_count = ice_rx_queue_count,
- .rx_descriptor_status = ice_rx_descriptor_status,
- .tx_descriptor_status = ice_tx_descriptor_status,
.stats_get = ice_stats_get,
.stats_reset = ice_stats_reset,
.xstats_get = ice_xstats_get,
@@ -2139,6 +2136,9 @@ ice_dev_init(struct rte_eth_dev *dev)
int ret;
dev->dev_ops = &ice_eth_dev_ops;
+ dev->rx_queue_count = ice_rx_queue_count;
+ dev->rx_descriptor_status = ice_rx_descriptor_status;
+ dev->tx_descriptor_status = ice_tx_descriptor_status;
dev->rx_pkt_burst = ice_recv_pkts;
dev->tx_pkt_burst = ice_xmit_pkts;
dev->tx_pkt_prepare = ice_prep_pkts;
@@ -272,9 +272,6 @@ static const struct eth_dev_ops eth_igc_ops = {
.rx_queue_setup = eth_igc_rx_queue_setup,
.rx_queue_release = eth_igc_rx_queue_release,
- .rx_queue_count = eth_igc_rx_queue_count,
- .rx_descriptor_status = eth_igc_rx_descriptor_status,
- .tx_descriptor_status = eth_igc_tx_descriptor_status,
.tx_queue_setup = eth_igc_tx_queue_setup,
.tx_queue_release = eth_igc_tx_queue_release,
.tx_done_cleanup = eth_igc_tx_done_cleanup,
@@ -1226,6 +1223,9 @@ eth_igc_dev_init(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
dev->dev_ops = ð_igc_ops;
+ dev->rx_queue_count = eth_igc_rx_queue_count;
+ dev->rx_descriptor_status = eth_igc_rx_descriptor_status;
+ dev->tx_descriptor_status = eth_igc_tx_descriptor_status;
/*
* for secondary processes, we don't initialize any further as primary
@@ -545,9 +545,6 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
.rx_queue_release = ixgbe_dev_rx_queue_release,
- .rx_queue_count = ixgbe_dev_rx_queue_count,
- .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
- .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.dev_led_on = ixgbe_dev_led_on,
@@ -621,8 +618,6 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.vlan_offload_set = ixgbevf_vlan_offload_set,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_release = ixgbe_dev_rx_queue_release,
- .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
- .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
@@ -1089,6 +1084,9 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
ixgbe_dev_macsec_setting_reset(eth_dev);
eth_dev->dev_ops = &ixgbe_eth_dev_ops;
+ eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status;
+ eth_dev->rx_queue_count = ixgbe_dev_rx_queue_count;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
@@ -1568,6 +1566,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
+ eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
@@ -585,6 +585,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
}
eth_dev->device = dpdk_dev;
eth_dev->dev_ops = &mlx5_os_dev_sec_ops;
+ eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
err = mlx5_proc_priv_init(eth_dev);
if (err)
return NULL;
@@ -1192,6 +1194,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
eth_dev->rx_pkt_burst = removed_rx_burst;
eth_dev->tx_pkt_burst = removed_tx_burst;
eth_dev->dev_ops = &mlx5_os_dev_ops;
+ eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
+ eth_dev->rx_queue_count = mlx5_rx_queue_count;
/* Register MAC address. */
claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
if (config->vf && config->vf_nl_en)
@@ -2363,13 +2368,10 @@ const struct eth_dev_ops mlx5_os_dev_ops = {
.rss_hash_update = mlx5_rss_hash_update,
.rss_hash_conf_get = mlx5_rss_hash_conf_get,
.filter_ctrl = mlx5_dev_filter_ctrl,
- .rx_descriptor_status = mlx5_rx_descriptor_status,
- .tx_descriptor_status = mlx5_tx_descriptor_status,
.rxq_info_get = mlx5_rxq_info_get,
.txq_info_get = mlx5_txq_info_get,
.rx_burst_mode_get = mlx5_rx_burst_mode_get,
.tx_burst_mode_get = mlx5_tx_burst_mode_get,
- .rx_queue_count = mlx5_rx_queue_count,
.rx_queue_intr_enable = mlx5_rx_intr_enable,
.rx_queue_intr_disable = mlx5_rx_intr_disable,
.is_removed = mlx5_is_removed,
@@ -2394,8 +2396,6 @@ const struct eth_dev_ops mlx5_os_dev_sec_ops = {
.rx_queue_stop = mlx5_rx_queue_stop,
.tx_queue_start = mlx5_tx_queue_start,
.tx_queue_stop = mlx5_tx_queue_stop,
- .rx_descriptor_status = mlx5_rx_descriptor_status,
- .tx_descriptor_status = mlx5_tx_descriptor_status,
.rxq_info_get = mlx5_rxq_info_get,
.txq_info_get = mlx5_txq_info_get,
.rx_burst_mode_get = mlx5_rx_burst_mode_get,
@@ -2447,8 +2447,6 @@ const struct eth_dev_ops mlx5_os_dev_ops_isolate = {
.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
.vlan_offload_set = mlx5_vlan_offload_set,
.filter_ctrl = mlx5_dev_filter_ctrl,
- .rx_descriptor_status = mlx5_rx_descriptor_status,
- .tx_descriptor_status = mlx5_tx_descriptor_status,
.rxq_info_get = mlx5_rxq_info_get,
.txq_info_get = mlx5_txq_info_get,
.rx_burst_mode_get = mlx5_rx_burst_mode_get,
@@ -5131,6 +5131,10 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
dev->dev_ops = &mlx5_os_dev_ops_isolate;
else
dev->dev_ops = &mlx5_os_dev_ops;
+
+ dev->rx_descriptor_status = mlx5_rx_descriptor_status;
+ dev->tx_descriptor_status = mlx5_tx_descriptor_status;
+
return 0;
}
@@ -871,11 +871,8 @@ static const struct eth_dev_ops hn_eth_dev_ops = {
.tx_queue_setup = hn_dev_tx_queue_setup,
.tx_queue_release = hn_dev_tx_queue_release,
.tx_done_cleanup = hn_dev_tx_done_cleanup,
- .tx_descriptor_status = hn_dev_tx_descriptor_status,
.rx_queue_setup = hn_dev_rx_queue_setup,
.rx_queue_release = hn_dev_rx_queue_release,
- .rx_queue_count = hn_dev_rx_queue_count,
- .rx_descriptor_status = hn_dev_rx_queue_status,
.link_update = hn_dev_link_update,
.stats_get = hn_dev_stats_get,
.stats_reset = hn_dev_stats_reset,
@@ -936,6 +933,9 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev)
vmbus = container_of(device, struct rte_vmbus_device, device);
eth_dev->dev_ops = &hn_eth_dev_ops;
+ eth_dev->rx_queue_count = hn_dev_rx_queue_count;
+ eth_dev->rx_descriptor_status = hn_dev_rx_queue_status;
+ eth_dev->tx_descriptor_status = hn_dev_tx_descriptor_status;
eth_dev->tx_pkt_burst = &hn_xmit_pkts;
eth_dev->rx_pkt_burst = &hn_recv_pkts;
@@ -2701,7 +2701,6 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = {
.rss_hash_conf_get = nfp_net_rss_hash_conf_get,
.rx_queue_setup = nfp_net_rx_queue_setup,
.rx_queue_release = nfp_net_rx_queue_release,
- .rx_queue_count = nfp_net_rx_queue_count,
.tx_queue_setup = nfp_net_tx_queue_setup,
.tx_queue_release = nfp_net_tx_queue_release,
.rx_queue_intr_enable = nfp_rx_queue_intr_enable,
@@ -2785,6 +2784,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
}
eth_dev->dev_ops = &nfp_net_eth_dev_ops;
+ eth_dev->rx_queue_count = nfp_net_rx_queue_count;
eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
@@ -2272,9 +2272,6 @@ static const struct eth_dev_ops otx2_eth_dev_ops = {
.txq_info_get = otx2_nix_txq_info_get,
.rx_burst_mode_get = otx2_rx_burst_mode_get,
.tx_burst_mode_get = otx2_tx_burst_mode_get,
- .rx_queue_count = otx2_nix_rx_queue_count,
- .rx_descriptor_status = otx2_nix_rx_descriptor_status,
- .tx_descriptor_status = otx2_nix_tx_descriptor_status,
.tx_done_cleanup = otx2_nix_tx_done_cleanup,
.set_queue_rate_limit = otx2_nix_tm_set_queue_rate_limit,
.pool_ops_supported = otx2_nix_pool_ops_supported,
@@ -2381,6 +2378,9 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
int rc, max_entries;
eth_dev->dev_ops = &otx2_eth_dev_ops;
+ eth_dev->rx_queue_count = otx2_nix_rx_queue_count;
+ eth_dev->rx_descriptor_status = otx2_nix_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = otx2_nix_tx_descriptor_status;
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
@@ -2386,7 +2386,6 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
- .rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
@@ -2431,7 +2430,6 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
- .rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
@@ -2670,6 +2668,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
}
eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
+ eth_dev->rx_descriptor_status = qede_rx_descriptor_status;
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
@@ -1804,9 +1804,6 @@ static const struct eth_dev_ops sfc_eth_dev_ops = {
.tx_queue_stop = sfc_tx_queue_stop,
.rx_queue_setup = sfc_rx_queue_setup,
.rx_queue_release = sfc_rx_queue_release,
- .rx_queue_count = sfc_rx_queue_count,
- .rx_descriptor_status = sfc_rx_descriptor_status,
- .tx_descriptor_status = sfc_tx_descriptor_status,
.rx_queue_intr_enable = sfc_rx_queue_intr_enable,
.rx_queue_intr_disable = sfc_rx_queue_intr_disable,
.tx_queue_setup = sfc_tx_queue_setup,
@@ -1962,6 +1959,9 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
dev->tx_pkt_burst = dp_tx->pkt_burst;
dev->dev_ops = &sfc_eth_dev_ops;
+ dev->rx_queue_count = sfc_rx_queue_count;
+ dev->rx_descriptor_status = sfc_rx_descriptor_status;
+ dev->tx_descriptor_status = sfc_tx_descriptor_status;
return 0;
@@ -2001,9 +2001,6 @@ sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
.dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
- .rx_queue_count = sfc_rx_queue_count,
- .rx_descriptor_status = sfc_rx_descriptor_status,
- .tx_descriptor_status = sfc_tx_descriptor_status,
.reta_query = sfc_dev_rss_reta_query,
.rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
.rxq_info_get = sfc_rx_queue_info_get,
@@ -2069,6 +2066,9 @@ sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main)
dev->tx_pkt_prepare = dp_tx->pkt_prepare;
dev->tx_pkt_burst = dp_tx->pkt_burst;
dev->dev_ops = &sfc_eth_dev_secondary_ops;
+ dev->rx_queue_count = sfc_rx_queue_count;
+ dev->rx_descriptor_status = sfc_rx_descriptor_status;
+ dev->tx_descriptor_status = sfc_tx_descriptor_status;
return 0;
@@ -2029,7 +2029,6 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
.tx_queue_stop = nicvf_dev_tx_queue_stop,
.rx_queue_setup = nicvf_dev_rx_queue_setup,
.rx_queue_release = nicvf_dev_rx_queue_release,
- .rx_queue_count = nicvf_dev_rx_queue_count,
.tx_queue_setup = nicvf_dev_tx_queue_setup,
.tx_queue_release = nicvf_dev_tx_queue_release,
.dev_set_link_up = nicvf_dev_set_link_up,
@@ -2134,6 +2133,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &nicvf_eth_dev_ops;
+ eth_dev->rx_queue_count = nicvf_dev_rx_queue_count;
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
@@ -1385,7 +1385,6 @@ static const struct eth_dev_ops ops = {
.rx_queue_release = eth_queue_release,
.tx_queue_release = eth_queue_release,
.tx_done_cleanup = eth_tx_done_cleanup,
- .rx_queue_count = eth_rx_queue_count,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
@@ -1447,6 +1446,7 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
data->all_multicast = 1;
eth_dev->dev_ops = &ops;
+ eth_dev->rx_queue_count = eth_rx_queue_count;
/* finally assign rx and tx ops */
eth_dev->rx_pkt_burst = eth_vhost_rx;
@@ -4545,11 +4545,11 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_queue_count, -ENOTSUP);
if (queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
+ return (int)(*dev->rx_queue_count)(dev, queue_id);
}
#define RTE_ETH_RX_DESC_AVAIL 0 /**< Desc available for hw. */
@@ -4604,10 +4604,10 @@ rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
if (queue_id >= dev->data->nb_rx_queues)
return -ENODEV;
#endif
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_status, -ENOTSUP);
rxq = dev->data->rx_queues[queue_id];
- return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
+ return (*dev->rx_descriptor_status)(rxq, offset);
}
#define RTE_ETH_TX_DESC_FULL 0 /**< Desc filled for hw, waiting xmit. */
@@ -4661,10 +4661,10 @@ static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
if (queue_id >= dev->data->nb_tx_queues)
return -ENODEV;
#endif
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_descriptor_status, -ENOTSUP);
txq = dev->data->tx_queues[queue_id];
- return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
+ return (*dev->tx_descriptor_status)(txq, offset);
}
/**
@@ -657,12 +657,6 @@ struct eth_dev_ops {
eth_queue_stop_t tx_queue_stop; /**< Stop TX for a queue. */
eth_rx_queue_setup_t rx_queue_setup;/**< Set up device RX queue. */
eth_queue_release_t rx_queue_release; /**< Release RX queue. */
- eth_rx_queue_count_t rx_queue_count;
- /**< Get the number of used RX descriptors. */
- eth_rx_descriptor_status_t rx_descriptor_status;
- /**< Check the status of a Rx descriptor. */
- eth_tx_descriptor_status_t tx_descriptor_status;
- /**< Check the status of a Tx descriptor. */
/*
* Static inline functions use functions ABOVE this comment.
* New dev_ops functions should be added BELOW to avoid breaking ABI.
@@ -778,6 +772,11 @@ struct rte_eth_dev {
eth_rx_burst_t rx_pkt_burst; /**< Pointer to PMD receive function. */
eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */
eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */
+
+ eth_rx_queue_count_t rx_queue_count; /**< Get the number of used RX descriptors. */
+ eth_rx_descriptor_status_t rx_descriptor_status; /**< Check the status of a Rx descriptor. */
+ eth_tx_descriptor_status_t tx_descriptor_status; /**< Check the status of a Tx descriptor. */
+
/**
* Next two fields are per-device data but *data is shared between
* primary and secondary processes and *process_private is per-process
This patch is a preparation to hide the 'struct eth_dev_ops' from applications by moving some device operations from 'struct eth_dev_ops' to 'struct rte_eth_dev'. Mentioned ethdev APIs are in the data path and implemented as inline because of performance reasons. Exposing 'struct eth_dev_ops' to applications is bad because it is a contract between ethdev and PMDs, not really needs to be known by applications, also changes in the struct causing ABI breakages which shouldn't. To be able to both keep APIs inline and hide the 'struct eth_dev_ops', moving device operations used in ethdev inline APIs to 'struct rte_eth_dev' to the same level with Rx/Tx burst functions. The list of dev_ops moved: eth_rx_queue_count_t rx_queue_count; eth_rx_descriptor_status_t rx_descriptor_status; eth_tx_descriptor_status_t tx_descriptor_status; Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com> --- drivers/net/ark/ark_ethdev.c | 2 +- drivers/net/atlantic/atl_ethdev.c | 9 +++++---- drivers/net/axgbe/axgbe_ethdev.c | 5 +++-- drivers/net/bnxt/bnxt_ethdev.c | 6 +++--- drivers/net/dpaa/dpaa_ethdev.c | 2 +- drivers/net/dpaa2/dpaa2_ethdev.c | 2 +- drivers/net/e1000/em_ethdev.c | 6 +++--- drivers/net/e1000/igb_ethdev.c | 10 +++++----- drivers/net/enic/enic_ethdev.c | 2 +- drivers/net/fm10k/fm10k_ethdev.c | 6 +++--- drivers/net/i40e/i40e_ethdev.c | 6 +++--- drivers/net/i40e/i40e_ethdev_vf.c | 6 +++--- drivers/net/iavf/iavf_ethdev.c | 6 +++--- drivers/net/ice/ice_ethdev.c | 6 +++--- drivers/net/igc/igc_ethdev.c | 6 +++--- drivers/net/ixgbe/ixgbe_ethdev.c | 10 +++++----- drivers/net/mlx5/linux/mlx5_os.c | 12 +++++------- drivers/net/mlx5/mlx5_flow.c | 4 ++++ drivers/net/netvsc/hn_ethdev.c | 6 +++--- drivers/net/nfp/nfp_net.c | 2 +- drivers/net/octeontx2/otx2_ethdev.c | 6 +++--- drivers/net/qede/qede_ethdev.c | 3 +-- drivers/net/sfc/sfc_ethdev.c | 12 ++++++------ drivers/net/thunderx/nicvf_ethdev.c | 2 +- drivers/net/vhost/rte_eth_vhost.c | 2 +- lib/librte_ethdev/rte_ethdev.h | 12 ++++++------ lib/librte_ethdev/rte_ethdev_core.h | 11 +++++------ 27 files changed, 82 insertions(+), 80 deletions(-)