On 9/23/22 16:45, skoteshwar@marvell.com wrote:
> From: Satha Rao <skoteshwar@marvell.com>
>
> The rate parameter modified to uint32_t, so that it can work
> for more than 64 Gbps.
>
> Signed-off-by: Satha Rao <skoteshwar@marvell.com>
Overall LGTM, but please update release notes and cleanup
deprecation in the next version.
However, the patch requires Acks from bnxt and ixgbe
maintainers.
> diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
> index 77ecbef..4dc38a2 100644
> --- a/drivers/net/bnxt/rte_pmd_bnxt.c
> +++ b/drivers/net/bnxt/rte_pmd_bnxt.c
> @@ -172,12 +172,12 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
> }
>
> int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
> - uint16_t tx_rate, uint64_t q_msk)
> + uint32_t tx_rate, uint64_t q_msk)
Deprecateion announces rte_eth_set_queue_rate_limit() changes,
but above is a separate API which is not directly related.
I'm OK with the change, but it requires maintainer Ack.
> {
> struct rte_eth_dev *eth_dev;
> struct rte_eth_dev_info dev_info;
> struct bnxt *bp;
> - uint16_t tot_rate = 0;
> + uint32_t tot_rate = 0;
> uint64_t idx;
> int rc;
>
[snip]
> diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
> index c09e9bf..17c820a 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.h
> +++ b/drivers/net/cnxk/cnxk_ethdev.h
> @@ -557,17 +557,14 @@ int cnxk_nix_timesync_write_time(struct rte_eth_dev *eth_dev,
>
> uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
> int cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
> -int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
> - uint16_t queue_idx, uint16_t tx_rate);
> -int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
> - int mark_yellow, int mark_red,
> - struct rte_tm_error *error);
> -int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
> - int mark_yellow, int mark_red,
> - struct rte_tm_error *error);
> -int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
> - int mark_yellow, int mark_red,
> - struct rte_tm_error *error);
Above changes for 3 functions look unrelated. Please, avoid it.
> +int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev, uint16_t queue_idx,
> + uint32_t tx_rate);
> +int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green, int mark_yellow,
> + int mark_red, struct rte_tm_error *error);
> +int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green, int mark_yellow,
> + int mark_red, struct rte_tm_error *error);
> +int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green, int mark_yellow,
> + int mark_red, struct rte_tm_error *error);
>
> /* MTR */
> int cnxk_nix_mtr_ops_get(struct rte_eth_dev *dev, void *ops);
> diff --git a/drivers/net/cnxk/cnxk_tm.c b/drivers/net/cnxk/cnxk_tm.c
> index d45e70a..a36f45d 100644
[snip]
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
> index 1dfad0e..9ff8ee0 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -2475,7 +2475,7 @@ static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
>
> int
> ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
> - uint16_t tx_rate, uint64_t q_msk)
> + uint32_t tx_rate, uint64_t q_msk)
Requires Ack from ixgbe maintainer
[snip]
Thanks Andrew for your comments. Sent v3 with your review comments.
Thanks,
Satha.
-----Original Message-----
From: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Sent: Monday, September 26, 2022 6:48 PM
To: Satha Koteswara Rao Kottidi <skoteshwar@marvell.com>; Aman Singh <aman.deep.singh@intel.com>; Yuying Zhang <yuying.zhang@intel.com>; Ajit Khaparde <ajit.khaparde@broadcom.com>; Somnath Kotur <somnath.kotur@broadcom.com>; Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>; Kiran Kumar Kokkilagadda <kirankumark@marvell.com>; Sunil Kumar Kori <skori@marvell.com>; Qiming Yang <qiming.yang@intel.com>; Wenjun Wu <wenjun1.wu@intel.com>; Jiawen Wu <jiawenwu@trustnetic.com>; Jian Wang <jianwang@trustnetic.com>; Thomas Monjalon <thomas@monjalon.net>; Ferruh Yigit <ferruh.yigit@xilinx.com>
Cc: dev@dpdk.org; ferruh.yigit@amd.com; bruce.richardson@intel.com; konstantin.v.ananyev@yandex.ru; Jerin Jacob Kollanukkaran <jerinj@marvell.com>
Subject: [EXT] Re: [PATCH v2] ethdev: queue rate parameter changed from 16b to 32b
External Email
----------------------------------------------------------------------
On 9/23/22 16:45, skoteshwar@marvell.com wrote:
> From: Satha Rao <skoteshwar@marvell.com>
>
> The rate parameter modified to uint32_t, so that it can work for more
> than 64 Gbps.
>
> Signed-off-by: Satha Rao <skoteshwar@marvell.com>
Overall LGTM, but please update release notes and cleanup deprecation in the next version.
However, the patch requires Acks from bnxt and ixgbe maintainers.
> diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c
> b/drivers/net/bnxt/rte_pmd_bnxt.c index 77ecbef..4dc38a2 100644
> --- a/drivers/net/bnxt/rte_pmd_bnxt.c
> +++ b/drivers/net/bnxt/rte_pmd_bnxt.c
> @@ -172,12 +172,12 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
> }
>
> int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
> - uint16_t tx_rate, uint64_t q_msk)
> + uint32_t tx_rate, uint64_t q_msk)
Deprecateion announces rte_eth_set_queue_rate_limit() changes, but above is a separate API which is not directly related.
I'm OK with the change, but it requires maintainer Ack.
> {
> struct rte_eth_dev *eth_dev;
> struct rte_eth_dev_info dev_info;
> struct bnxt *bp;
> - uint16_t tot_rate = 0;
> + uint32_t tot_rate = 0;
> uint64_t idx;
> int rc;
>
[snip]
> diff --git a/drivers/net/cnxk/cnxk_ethdev.h
> b/drivers/net/cnxk/cnxk_ethdev.h index c09e9bf..17c820a 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.h
> +++ b/drivers/net/cnxk/cnxk_ethdev.h
> @@ -557,17 +557,14 @@ int cnxk_nix_timesync_write_time(struct
> rte_eth_dev *eth_dev,
>
> uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
> int cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
> -int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
> - uint16_t queue_idx, uint16_t tx_rate);
> -int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
> - int mark_yellow, int mark_red,
> - struct rte_tm_error *error);
> -int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
> - int mark_yellow, int mark_red,
> - struct rte_tm_error *error);
> -int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
> - int mark_yellow, int mark_red,
> - struct rte_tm_error *error);
Above changes for 3 functions look unrelated. Please, avoid it.
> +int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev, uint16_t queue_idx,
> + uint32_t tx_rate);
> +int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green, int mark_yellow,
> + int mark_red, struct rte_tm_error *error); int
> +cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green, int mark_yellow,
> + int mark_red, struct rte_tm_error *error); int
> +cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green, int mark_yellow,
> + int mark_red, struct rte_tm_error *error);
>
> /* MTR */
> int cnxk_nix_mtr_ops_get(struct rte_eth_dev *dev, void *ops); diff
> --git a/drivers/net/cnxk/cnxk_tm.c b/drivers/net/cnxk/cnxk_tm.c index
> d45e70a..a36f45d 100644
[snip]
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c
> b/drivers/net/ixgbe/ixgbe_ethdev.c
> index 1dfad0e..9ff8ee0 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -2475,7 +2475,7 @@ static int eth_ixgbevf_pci_remove(struct
> rte_pci_device *pci_dev)
>
> int
> ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
> - uint16_t tx_rate, uint64_t q_msk)
> + uint32_t tx_rate, uint64_t q_msk)
Requires Ack from ixgbe maintainer
[snip]
@@ -8106,7 +8106,7 @@ struct cmd_queue_rate_limit_result {
cmdline_fixed_string_t queue;
uint8_t queue_num;
cmdline_fixed_string_t rate;
- uint16_t rate_num;
+ uint32_t rate_num;
};
static void cmd_queue_rate_limit_parsed(void *parsed_result,
@@ -8147,7 +8147,7 @@ static void cmd_queue_rate_limit_parsed(void *parsed_result,
rate, "rate");
static cmdline_parse_token_num_t cmd_queue_rate_limit_ratenum =
TOKEN_NUM_INITIALIZER(struct cmd_queue_rate_limit_result,
- rate_num, RTE_UINT16);
+ rate_num, RTE_UINT32);
static cmdline_parse_inst_t cmd_queue_rate_limit = {
.f = cmd_queue_rate_limit_parsed,
@@ -8174,7 +8174,7 @@ struct cmd_vf_rate_limit_result {
cmdline_fixed_string_t vf;
uint8_t vf_num;
cmdline_fixed_string_t rate;
- uint16_t rate_num;
+ uint32_t rate_num;
cmdline_fixed_string_t q_msk;
uint64_t q_msk_val;
};
@@ -8218,7 +8218,7 @@ static void cmd_vf_rate_limit_parsed(void *parsed_result,
rate, "rate");
static cmdline_parse_token_num_t cmd_vf_rate_limit_ratenum =
TOKEN_NUM_INITIALIZER(struct cmd_vf_rate_limit_result,
- rate_num, RTE_UINT16);
+ rate_num, RTE_UINT32);
static cmdline_parse_token_string_t cmd_vf_rate_limit_q_msk =
TOKEN_STRING_INITIALIZER(struct cmd_vf_rate_limit_result,
q_msk, "queue_mask");
@@ -5914,7 +5914,7 @@ struct igb_ring_desc_16_bytes {
}
int
-set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
+set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint32_t rate)
{
int diag;
struct rte_eth_link link;
@@ -5942,7 +5942,7 @@ struct igb_ring_desc_16_bytes {
}
int
-set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
+set_vf_rate_limit(portid_t port_id, uint16_t vf, uint32_t rate, uint64_t q_msk)
{
int diag = -ENOTSUP;
@@ -1097,8 +1097,8 @@ void port_rss_reta_info(portid_t port_id,
uint16_t nb_rx_desc, unsigned int socket_id,
struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp);
-int set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate);
-int set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate,
+int set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint32_t rate);
+int set_vf_rate_limit(portid_t port_id, uint16_t vf, uint32_t rate,
uint64_t q_msk);
int set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id,
@@ -172,12 +172,12 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
}
int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk)
+ uint32_t tx_rate, uint64_t q_msk)
{
struct rte_eth_dev *eth_dev;
struct rte_eth_dev_info dev_info;
struct bnxt *bp;
- uint16_t tot_rate = 0;
+ uint32_t tot_rate = 0;
uint64_t idx;
int rc;
@@ -184,7 +184,7 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
* - (-EINVAL) if *vf* or *mac_addr* is invalid.
*/
int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk);
+ uint32_t tx_rate, uint64_t q_msk);
/**
* Get VF's statistics
@@ -557,17 +557,14 @@ int cnxk_nix_timesync_write_time(struct rte_eth_dev *eth_dev,
uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
int cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
-int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
- uint16_t queue_idx, uint16_t tx_rate);
-int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
- int mark_yellow, int mark_red,
- struct rte_tm_error *error);
-int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
- int mark_yellow, int mark_red,
- struct rte_tm_error *error);
-int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
- int mark_yellow, int mark_red,
- struct rte_tm_error *error);
+int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev, uint16_t queue_idx,
+ uint32_t tx_rate);
+int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green, int mark_yellow,
+ int mark_red, struct rte_tm_error *error);
+int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green, int mark_yellow,
+ int mark_red, struct rte_tm_error *error);
+int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green, int mark_yellow,
+ int mark_red, struct rte_tm_error *error);
/* MTR */
int cnxk_nix_mtr_ops_get(struct rte_eth_dev *dev, void *ops);
@@ -750,8 +750,8 @@ struct rte_tm_ops cnxk_tm_ops = {
}
int
-cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
- uint16_t queue_idx, uint16_t tx_rate_mbps)
+cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev, uint16_t queue_idx,
+ uint32_t tx_rate_mbps)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
@@ -2475,7 +2475,7 @@ static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
int
ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk)
+ uint32_t tx_rate, uint64_t q_msk)
{
struct ixgbe_hw *hw;
struct ixgbe_vf_info *vfinfo;
@@ -6090,7 +6090,7 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
int
ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
- uint16_t queue_idx, uint16_t tx_rate)
+ uint16_t queue_idx, uint32_t tx_rate)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t rf_dec, rf_int;
@@ -753,13 +753,13 @@ void ixgbe_fdir_stats_get(struct rte_eth_dev *dev,
int ixgbe_vt_check(struct ixgbe_hw *hw);
int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk);
+ uint32_t tx_rate, uint64_t q_msk);
bool is_ixgbe_supported(struct rte_eth_dev *dev);
int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
void ixgbe_tm_conf_init(struct rte_eth_dev *dev);
void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
- uint16_t tx_rate);
+ uint32_t tx_rate);
int ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
const struct rte_flow_action_rss *in);
int ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
@@ -498,7 +498,7 @@
int
rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk)
+ uint32_t tx_rate, uint64_t q_msk)
{
struct rte_eth_dev *dev;
@@ -380,7 +380,7 @@ int rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
* - (-EINVAL) if bad parameter.
*/
int rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk);
+ uint32_t tx_rate, uint64_t q_msk);
/**
* Set all the TCs' bandwidth weight.
@@ -3764,7 +3764,7 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
int
txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
- uint16_t queue_idx, uint16_t tx_rate)
+ uint16_t queue_idx, uint32_t tx_rate)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
uint32_t bcnrc_val;
@@ -586,7 +586,7 @@ int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
void txgbe_tm_conf_init(struct rte_eth_dev *dev);
void txgbe_tm_conf_uninit(struct rte_eth_dev *dev);
int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
- uint16_t tx_rate);
+ uint32_t tx_rate);
int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
const struct rte_flow_action_rss *in);
int txgbe_action_rss_same(const struct rte_flow_action_rss *comp,
@@ -598,7 +598,7 @@ typedef int (*eth_uc_all_hash_table_set_t)(struct rte_eth_dev *dev,
/** @internal Set queue Tx rate. */
typedef int (*eth_set_queue_rate_limit_t)(struct rte_eth_dev *dev,
uint16_t queue_idx,
- uint16_t tx_rate);
+ uint32_t tx_rate);
/** @internal Add tunneling UDP port. */
typedef int (*eth_udp_tunnel_port_add_t)(struct rte_eth_dev *dev,
@@ -4388,7 +4388,7 @@ enum {
}
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
- uint16_t tx_rate)
+ uint32_t tx_rate)
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
@@ -4165,7 +4165,7 @@ int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
* - (-EINVAL) if bad parameter.
*/
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
- uint16_t tx_rate);
+ uint32_t tx_rate);
/**
* Configuration of Receive Side Scaling hash computation of Ethernet device.