[v2,09/14] net/idpf: add support for RSS
Checks
Commit Message
Add RSS support.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
drivers/net/idpf/idpf_ethdev.c | 118 ++++++++++++++++++++++++++++++++-
drivers/net/idpf/idpf_ethdev.h | 17 +++++
drivers/net/idpf/idpf_vchnl.c | 96 +++++++++++++++++++++++++++
3 files changed, 229 insertions(+), 2 deletions(-)
Comments
On 9/5/22 13:58, Junfeng Guo wrote:
> Add RSS support.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
> ---
> drivers/net/idpf/idpf_ethdev.c | 118 ++++++++++++++++++++++++++++++++-
> drivers/net/idpf/idpf_ethdev.h | 17 +++++
> drivers/net/idpf/idpf_vchnl.c | 96 +++++++++++++++++++++++++++
> 3 files changed, 229 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
> index 6310745684..b1e2ca21ca 100644
> --- a/drivers/net/idpf/idpf_ethdev.c
> +++ b/drivers/net/idpf/idpf_ethdev.c
> @@ -87,6 +87,7 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> dev_info->max_mtu = dev_info->max_rx_pktlen - IDPF_ETH_OVERHEAD;
> dev_info->min_mtu = RTE_ETHER_MIN_MTU;
>
> + dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL;
> dev_info->max_mac_addrs = IDPF_NUM_MACADDR_MAX;
> dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
> RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
> @@ -319,9 +320,116 @@ idpf_init_vport(struct rte_eth_dev *dev)
> }
>
> static int
> -idpf_dev_configure(__rte_unused struct rte_eth_dev *dev)
> +idpf_config_rss(struct idpf_vport *vport)
> {
> - return 0;
> + int ret;
> +
> + ret = idpf_set_rss_key(vport);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to configure RSS key");
> + return ret;
> + }
> +
> + ret = idpf_set_rss_lut(vport);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to configure RSS lut");
> + return ret;
> + }
> +
> + ret = idpf_set_rss_hash(vport);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to configure RSS hash");
> + return ret;
> + }
> +
> + return ret;
> +}
> +
> +static int
> +idpf_init_rss(struct idpf_vport *vport)
> +{
> + struct rte_eth_rss_conf *rss_conf;
> + uint16_t i, nb_q, lut_size;
> + int ret = 0;
> +
> + rss_conf = &vport->dev_data->dev_conf.rx_adv_conf.rss_conf;
> + nb_q = vport->dev_data->nb_rx_queues;
> +
> + vport->rss_key = (uint8_t *)rte_zmalloc("rss_key",
> + vport->rss_key_size, 0);
> + if (!vport->rss_key) {
> + PMD_INIT_LOG(ERR, "Failed to allocate RSS key");
> + ret = -ENOMEM;
> + goto err_key;
> + }
> +
> + lut_size = vport->rss_lut_size;
> + vport->rss_lut = (uint32_t *)rte_zmalloc("rss_lut",
> + sizeof(uint32_t) * lut_size, 0);
> + if (!vport->rss_lut) {
> + PMD_INIT_LOG(ERR, "Failed to allocate RSS lut");
> + ret = -ENOMEM;
> + goto err_lut;
> + }
> +
> + if (!rss_conf->rss_key) {
> + for (i = 0; i < vport->rss_key_size; i++)
> + vport->rss_key[i] = (uint8_t)rte_rand();
IMHO it is a bad idea. Random key could result in very
bad distribution of the traffic.
> + } else {
> + rte_memcpy(vport->rss_key, rss_conf->rss_key,
> + RTE_MIN(rss_conf->rss_key_len,
> + vport->rss_key_size));
It looks like rss_key_len from rss_conf is saved nowhere.
How do you know which bits of the RSS key are really valid/
initialized.
> + }
> +
> + for (i = 0; i < lut_size; i++)
> + vport->rss_lut[i] = i % nb_q;
> +
> + vport->rss_hf = IECM_DEFAULT_RSS_HASH_EXPANDED;
Hm, what about rss_conf->rss_hf?
> +
> + ret = idpf_config_rss(vport);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to configure RSS");
> + goto err_cfg;
> + }
> +
> + return ret;
> +
> +err_cfg:
> + rte_free(vport->rss_lut);
> + vport->rss_lut = NULL;
> +err_lut:
> + rte_free(vport->rss_key);
> + vport->rss_key = NULL;
> +err_key:
> + return ret;
> +}
> +
> +static int
> +idpf_dev_configure(struct rte_eth_dev *dev)
> +{
> + struct idpf_vport *vport = dev->data->dev_private;
> + struct idpf_adapter *adapter = vport->adapter;
> + int ret = 0;
> +
> + if (dev->data->nb_tx_queues > IDPF_DEFAULT_TXQ_NUM ||
> + dev->data->nb_rx_queues > IDPF_DEFAULT_RXQ_NUM) {
> + PMD_INIT_LOG(ERR, "Invalid queue number.");
> + return -EINVAL;
> + }
Above is checked on ethdev level if you report maximums
correctly.
> +
> + if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
> + dev->data->dev_conf.rxmode.offloads |=
> + RTE_ETH_RX_OFFLOAD_RSS_HASH;
It looks wrong. Offload configuration is a user choice.
> +
> + if (adapter->caps->rss_caps) {
> + ret = idpf_init_rss(vport);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to init rss");
> + return ret;
> + }
> + }
> +
> + return ret;
> }
>
> static int
[snip]
@@ -87,6 +87,7 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_mtu = dev_info->max_rx_pktlen - IDPF_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL;
dev_info->max_mac_addrs = IDPF_NUM_MACADDR_MAX;
dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
@@ -319,9 +320,116 @@ idpf_init_vport(struct rte_eth_dev *dev)
}
static int
-idpf_dev_configure(__rte_unused struct rte_eth_dev *dev)
+idpf_config_rss(struct idpf_vport *vport)
{
- return 0;
+ int ret;
+
+ ret = idpf_set_rss_key(vport);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure RSS key");
+ return ret;
+ }
+
+ ret = idpf_set_rss_lut(vport);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure RSS lut");
+ return ret;
+ }
+
+ ret = idpf_set_rss_hash(vport);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure RSS hash");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+idpf_init_rss(struct idpf_vport *vport)
+{
+ struct rte_eth_rss_conf *rss_conf;
+ uint16_t i, nb_q, lut_size;
+ int ret = 0;
+
+ rss_conf = &vport->dev_data->dev_conf.rx_adv_conf.rss_conf;
+ nb_q = vport->dev_data->nb_rx_queues;
+
+ vport->rss_key = (uint8_t *)rte_zmalloc("rss_key",
+ vport->rss_key_size, 0);
+ if (!vport->rss_key) {
+ PMD_INIT_LOG(ERR, "Failed to allocate RSS key");
+ ret = -ENOMEM;
+ goto err_key;
+ }
+
+ lut_size = vport->rss_lut_size;
+ vport->rss_lut = (uint32_t *)rte_zmalloc("rss_lut",
+ sizeof(uint32_t) * lut_size, 0);
+ if (!vport->rss_lut) {
+ PMD_INIT_LOG(ERR, "Failed to allocate RSS lut");
+ ret = -ENOMEM;
+ goto err_lut;
+ }
+
+ if (!rss_conf->rss_key) {
+ for (i = 0; i < vport->rss_key_size; i++)
+ vport->rss_key[i] = (uint8_t)rte_rand();
+ } else {
+ rte_memcpy(vport->rss_key, rss_conf->rss_key,
+ RTE_MIN(rss_conf->rss_key_len,
+ vport->rss_key_size));
+ }
+
+ for (i = 0; i < lut_size; i++)
+ vport->rss_lut[i] = i % nb_q;
+
+ vport->rss_hf = IECM_DEFAULT_RSS_HASH_EXPANDED;
+
+ ret = idpf_config_rss(vport);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure RSS");
+ goto err_cfg;
+ }
+
+ return ret;
+
+err_cfg:
+ rte_free(vport->rss_lut);
+ vport->rss_lut = NULL;
+err_lut:
+ rte_free(vport->rss_key);
+ vport->rss_key = NULL;
+err_key:
+ return ret;
+}
+
+static int
+idpf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct idpf_vport *vport = dev->data->dev_private;
+ struct idpf_adapter *adapter = vport->adapter;
+ int ret = 0;
+
+ if (dev->data->nb_tx_queues > IDPF_DEFAULT_TXQ_NUM ||
+ dev->data->nb_rx_queues > IDPF_DEFAULT_RXQ_NUM) {
+ PMD_INIT_LOG(ERR, "Invalid queue number.");
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |=
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+ if (adapter->caps->rss_caps) {
+ ret = idpf_init_rss(vport);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init rss");
+ return ret;
+ }
+ }
+
+ return ret;
}
static int
@@ -420,6 +528,12 @@ idpf_dev_close(struct rte_eth_dev *dev)
idpf_dev_stop(dev);
idpf_destroy_vport(vport);
+ rte_free(vport->rss_lut);
+ vport->rss_lut = NULL;
+
+ rte_free(vport->rss_key);
+ vport->rss_key = NULL;
+
adapter->cur_vports &= ~BIT(vport->devarg_id);
rte_free(vport);
@@ -53,6 +53,20 @@
#define IDPF_ETH_OVERHEAD \
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IDPF_VLAN_TAG_SIZE * 2)
+#define IDPF_RSS_OFFLOAD_ALL ( \
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
+
#ifndef ETH_ADDR_LEN
#define ETH_ADDR_LEN 6
#endif
@@ -225,6 +239,9 @@ int idpf_get_pkt_type(struct idpf_adapter *adapter);
int idpf_get_caps(struct idpf_adapter *adapter);
int idpf_create_vport(struct rte_eth_dev *dev);
int idpf_destroy_vport(struct idpf_vport *vport);
+int idpf_set_rss_key(struct idpf_vport *vport);
+int idpf_set_rss_lut(struct idpf_vport *vport);
+int idpf_set_rss_hash(struct idpf_vport *vport);
int idpf_config_rxqs(struct idpf_vport *vport);
int idpf_config_rxq(struct idpf_vport *vport, uint16_t rxq_id);
int idpf_config_txqs(struct idpf_vport *vport);
@@ -679,6 +679,102 @@ idpf_destroy_vport(struct idpf_vport *vport)
return err;
}
+int
+idpf_set_rss_key(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_rss_key *rss_key;
+ struct idpf_cmd_info args;
+ int len, err;
+
+ len = sizeof(*rss_key) + sizeof(rss_key->key[0]) *
+ (vport->rss_key_size - 1);
+ rss_key = rte_zmalloc("rss_key", len, 0);
+ if (!rss_key)
+ return -ENOMEM;
+
+ rss_key->vport_id = vport->vport_id;
+ rss_key->key_len = vport->rss_key_size;
+ rte_memcpy(rss_key->key, vport->rss_key,
+ sizeof(rss_key->key[0]) * vport->rss_key_size);
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_SET_RSS_KEY;
+ args.in_args = (uint8_t *)rss_key;
+ args.in_args_size = len;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_KEY");
+ return err;
+ }
+
+ rte_free(rss_key);
+ return err;
+}
+
+int
+idpf_set_rss_lut(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_rss_lut *rss_lut;
+ struct idpf_cmd_info args;
+ int len, err;
+
+ len = sizeof(*rss_lut) + sizeof(rss_lut->lut[0]) *
+ (vport->rss_lut_size - 1);
+ rss_lut = rte_zmalloc("rss_lut", len, 0);
+ if (!rss_lut)
+ return -ENOMEM;
+
+ rss_lut->vport_id = vport->vport_id;
+ rss_lut->lut_entries = vport->rss_lut_size;
+ rte_memcpy(rss_lut->lut, vport->rss_lut,
+ sizeof(rss_lut->lut[0]) * vport->rss_lut_size);
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_SET_RSS_LUT;
+ args.in_args = (uint8_t *)rss_lut;
+ args.in_args_size = len;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_LUT");
+
+ rte_free(rss_lut);
+ return err;
+}
+
+int
+idpf_set_rss_hash(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_rss_hash rss_hash;
+ struct idpf_cmd_info args;
+ int err;
+
+ memset(&rss_hash, 0, sizeof(rss_hash));
+ rss_hash.ptype_groups = vport->rss_hf;
+ rss_hash.vport_id = vport->vport_id;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_SET_RSS_HASH;
+ args.in_args = (uint8_t *)&rss_hash;
+ args.in_args_size = sizeof(rss_hash);
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of OP_SET_RSS_HASH");
+
+ return err;
+}
+
#define IDPF_RX_BUF_STRIDE 64
int
idpf_config_rxqs(struct idpf_vport *vport)