@@ -19,7 +19,10 @@
#define IAVF_FRAME_SIZE_MAX 9728
#define IAVF_QUEUE_BASE_ADDR_UNIT 128
-#define IAVF_MAX_NUM_QUEUES 16
+#define IAVF_MAX_NUM_QUEUES_DFLT 16
+#define IAVF_MAX_NUM_QUEUES_LV 256
+#define IAVF_RXTX_QUEUE_CHUNKS_NUM 2
+#define IAVF_CFG_Q_NUM_PER_BUF 32
#define IAVF_NUM_MACADDR_MAX 64
@@ -104,8 +107,10 @@ struct iavf_fdir_info {
struct iavf_fdir_conf conf;
};
-/* TODO: is that correct to assume the max number to be 16 ?*/
-#define IAVF_MAX_MSIX_VECTORS 16
+struct iavf_qv_map {
+ uint16_t queue_id;
+ uint16_t vector_id;
+};
/* Event status from PF */
enum pending_msg {
@@ -157,14 +162,16 @@ struct iavf_info {
uint8_t *rss_key;
uint16_t nb_msix; /* number of MSI-X interrupts on Rx */
uint16_t msix_base; /* msix vector base from */
- /* queue bitmask for each vector */
- uint16_t rxq_map[IAVF_MAX_MSIX_VECTORS];
+ uint16_t max_rss_qregion; /* max RSS queue region supported by PF */
+ struct iavf_qv_map *qv_map; /* queue vector mapping */
struct iavf_flow_list flow_list;
rte_spinlock_t flow_ops_lock;
struct iavf_parser_list rss_parser_list;
struct iavf_parser_list dist_parser_list;
struct iavf_fdir_info fdir; /* flow director info */
+ /* indicate large VF support enabled or not */
+ bool lv_enabled;
};
#define IAVF_MAX_PKT_TYPE 1024
@@ -291,13 +298,18 @@ int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
int iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
bool rx, bool on);
+int iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
+ bool rx, bool on);
int iavf_enable_queues(struct iavf_adapter *adapter);
+int iavf_enable_queues_lv(struct iavf_adapter *adapter);
int iavf_disable_queues(struct iavf_adapter *adapter);
+int iavf_disable_queues_lv(struct iavf_adapter *adapter);
int iavf_configure_rss_lut(struct iavf_adapter *adapter);
int iavf_configure_rss_key(struct iavf_adapter *adapter);
int iavf_configure_queues(struct iavf_adapter *adapter);
int iavf_get_supported_rxdid(struct iavf_adapter *adapter);
int iavf_config_irq_map(struct iavf_adapter *adapter);
+int iavf_config_irq_map_lv(struct iavf_adapter *adapter);
void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add);
int iavf_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete);
@@ -318,4 +330,5 @@ int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
struct rte_ether_addr *mc_addrs,
uint32_t mc_addrs_num, bool add);
int iavf_request_queues(struct rte_eth_dev *dev, uint16_t num);
+int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter);
#endif /* _IAVF_ETHDEV_H_ */
@@ -205,7 +205,7 @@ iavf_init_rss(struct iavf_adapter *adapter)
rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
- IAVF_MAX_NUM_QUEUES);
+ vf->max_rss_qregion);
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
PMD_DRV_LOG(DEBUG, "RSS is not supported");
@@ -258,6 +258,9 @@ iavf_dev_configure(struct rte_eth_dev *dev)
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
+ int ret = 0;
ad->rx_bulk_alloc_allowed = true;
/* Initialize to TRUE. If any of Rx queues doesn't meet the
@@ -269,6 +272,45 @@ iavf_dev_configure(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ /* Large VF setting */
+ if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
+ if (!(vf->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_LARGE_NUM_QPAIRS)) {
+ PMD_DRV_LOG(ERR, "large VF is not supported");
+ return -1;
+ }
+
+ if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_LV) {
+ PMD_DRV_LOG(ERR, "queue pairs number cannot be larger "
+ "than %u", IAVF_MAX_NUM_QUEUES_LV);
+ return -1;
+ }
+
+ ret = iavf_request_queues(dev, num_queue_pairs);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "request queues from PF failed");
+ return ret;
+ }
+ PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
+ vf->vsi_res->num_queue_pairs, num_queue_pairs);
+
+ ret = iavf_dev_reset(dev);
+ if (ret != 0)
+ return ret;
+
+ vf->lv_enabled = true;
+ }
+
+ /* Set max RSS queue region */
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_LARGE_NUM_QPAIRS) {
+ if (iavf_get_max_rss_queue_region(ad) != 0) {
+ PMD_INIT_LOG(ERR, "get max rss queue region failed");
+ return -1;
+ }
+ } else {
+ vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT;
+ }
+
/* Vlan stripping setting */
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
@@ -283,6 +325,7 @@ iavf_dev_configure(struct rte_eth_dev *dev)
return -1;
}
}
+
return 0;
}
@@ -365,6 +408,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+ struct iavf_qv_map *qv_map;
uint16_t interval, i;
int vec;
@@ -385,6 +429,14 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
}
}
+ qv_map = rte_zmalloc("qv_map",
+ dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0);
+ if (!qv_map) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ dev->data->nb_rx_queues);
+ return -1;
+ }
+
if (!dev->data->dev_conf.intr_conf.rxq ||
!rte_intr_dp_is_en(intr_handle)) {
/* Rx interrupt disabled, Map interrupt only for writeback */
@@ -415,16 +467,21 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
}
IAVF_WRITE_FLUSH(hw);
/* map all queues to the same interrupt */
- for (i = 0; i < dev->data->nb_rx_queues; i++)
- vf->rxq_map[vf->msix_base] |= 1 << i;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ qv_map[i].queue_id = i;
+ qv_map[i].vector_id = vf->msix_base;
+ }
+ vf->qv_map = qv_map;
} else {
if (!rte_intr_allow_others(intr_handle)) {
vf->nb_msix = 1;
vf->msix_base = IAVF_MISC_VEC_ID;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- vf->rxq_map[vf->msix_base] |= 1 << i;
+ qv_map[i].queue_id = i;
+ qv_map[i].vector_id = vf->msix_base;
intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
}
+ vf->qv_map = qv_map;
PMD_DRV_LOG(DEBUG,
"vector %u are mapping to all Rx queues",
vf->msix_base);
@@ -437,21 +494,32 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
vf->msix_base = IAVF_RX_VEC_START;
vec = IAVF_RX_VEC_START;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- vf->rxq_map[vec] |= 1 << i;
+ qv_map[i].queue_id = i;
+ qv_map[i].vector_id = vec;
intr_handle->intr_vec[i] = vec++;
if (vec >= vf->nb_msix)
vec = IAVF_RX_VEC_START;
}
+ vf->qv_map = qv_map;
PMD_DRV_LOG(DEBUG,
"%u vectors are mapping to %u Rx queues",
vf->nb_msix, dev->data->nb_rx_queues);
}
}
- if (iavf_config_irq_map(adapter)) {
- PMD_DRV_LOG(ERR, "config interrupt mapping failed");
- return -1;
+ if (!vf->lv_enabled) {
+ if (iavf_config_irq_map(adapter)) {
+ PMD_DRV_LOG(ERR, "config interrupt mapping failed");
+ return -1;
+ }
+ } else {
+ if (iavf_config_irq_map_lv(adapter)) {
+ PMD_DRV_LOG(ERR, "config interrupt mapping "
+ "for large VF failed");
+ return -1;
+ }
}
+
return 0;
}
@@ -515,6 +583,7 @@ iavf_dev_start(struct rte_eth_dev *dev)
PMD_DRV_LOG(ERR, "configure irq failed");
goto err_queue;
}
+
/* re-enable intr again, because efd assign may change */
if (dev->data->dev_conf.intr_conf.rxq != 0) {
rte_intr_disable(intr_handle);
@@ -579,8 +648,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
- dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
+ dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX;
dev_info->hash_key_size = vf->vf_res->rss_key_size;
@@ -1658,6 +1727,7 @@ iavf_init_vf(struct rte_eth_dev *dev)
PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
goto err_alloc;
}
+
/* Allocate memort for RSS info */
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
vf->rss_key = rte_zmalloc("rss_key",
@@ -720,6 +720,7 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct iavf_rx_queue *rxq;
int err = 0;
@@ -743,7 +744,11 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
IAVF_WRITE_FLUSH(hw);
/* Ready to switch the queue on */
- err = iavf_switch_queue(adapter, rx_queue_id, true, true);
+ if (!vf->lv_enabled)
+ err = iavf_switch_queue(adapter, rx_queue_id, true, true);
+ else
+ err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
+
if (err)
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
rx_queue_id);
@@ -760,6 +765,7 @@ iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct iavf_tx_queue *txq;
int err = 0;
@@ -775,7 +781,10 @@ iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
IAVF_WRITE_FLUSH(hw);
/* Ready to switch the queue on */
- err = iavf_switch_queue(adapter, tx_queue_id, false, true);
+ if (!vf->lv_enabled)
+ err = iavf_switch_queue(adapter, tx_queue_id, false, true);
+ else
+ err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
if (err)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
@@ -876,14 +885,22 @@ iavf_stop_queues(struct rte_eth_dev *dev)
{
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct iavf_rx_queue *rxq;
struct iavf_tx_queue *txq;
int ret, i;
/* Stop All queues */
- ret = iavf_disable_queues(adapter);
- if (ret)
- PMD_DRV_LOG(WARNING, "Fail to stop queues");
+ if (!vf->lv_enabled) {
+ ret = iavf_disable_queues(adapter);
+ if (ret)
+ PMD_DRV_LOG(WARNING, "Fail to stop queues");
+ } else {
+ ret = iavf_disable_queues_lv(adapter);
+ if (ret)
+ PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
+ }
+
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
@@ -450,7 +450,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
- VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+ VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
+ VIRTCHNL_VF_LARGE_NUM_QPAIRS;
args.in_args = (uint8_t *)∩︀
args.in_args_size = sizeof(caps);
@@ -600,6 +601,138 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
return err;
}
+int
+iavf_enable_queues_lv(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_del_ena_dis_queues *queue_select;
+ struct virtchnl_queue_chunk *queue_chunk;
+ struct iavf_cmd_info args;
+ int err, len;
+
+ len = sizeof(struct virtchnl_del_ena_dis_queues) +
+ sizeof(struct virtchnl_queue_chunk) *
+ (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1);
+ queue_select = rte_zmalloc("queue_select", len, 0);
+ if (!queue_select)
+ return -ENOMEM;
+
+ queue_chunk = queue_select->chunks.chunks;
+ queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM;
+ queue_select->vport_id = vf->vsi_res->vsi_id;
+
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
+ adapter->eth_dev->data->nb_tx_queues;
+
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
+ adapter->eth_dev->data->nb_rx_queues;
+
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
+ args.in_args = (u8 *)queue_select;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_ENABLE_QUEUES_V2");
+ return err;
+ }
+ return 0;
+}
+
+int
+iavf_disable_queues_lv(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_del_ena_dis_queues *queue_select;
+ struct virtchnl_queue_chunk *queue_chunk;
+ struct iavf_cmd_info args;
+ int err, len;
+
+ len = sizeof(struct virtchnl_del_ena_dis_queues) +
+ sizeof(struct virtchnl_queue_chunk) *
+ (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1);
+ queue_select = rte_zmalloc("queue_select", len, 0);
+ if (!queue_select)
+ return -ENOMEM;
+
+ queue_chunk = queue_select->chunks.chunks;
+ queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM;
+ queue_select->vport_id = vf->vsi_res->vsi_id;
+
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
+ adapter->eth_dev->data->nb_tx_queues;
+
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
+ adapter->eth_dev->data->nb_rx_queues;
+
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
+ args.in_args = (u8 *)queue_select;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_DISABLE_QUEUES_V2");
+ return err;
+ }
+ return 0;
+}
+
+int
+iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
+ bool rx, bool on)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_del_ena_dis_queues *queue_select;
+ struct virtchnl_queue_chunk *queue_chunk;
+ struct iavf_cmd_info args;
+ int err, len;
+
+ len = sizeof(struct virtchnl_del_ena_dis_queues);
+ queue_select = rte_zmalloc("queue_select", len, 0);
+ if (!queue_select)
+ return -ENOMEM;
+
+ queue_chunk = queue_select->chunks.chunks;
+ queue_select->chunks.num_chunks = 1;
+ queue_select->vport_id = vf->vsi_res->vsi_id;
+
+ if (rx) {
+ queue_chunk->type = VIRTCHNL_QUEUE_TYPE_RX;
+ queue_chunk->start_queue_id = qid;
+ queue_chunk->num_queues = 1;
+ } else {
+ queue_chunk->type = VIRTCHNL_QUEUE_TYPE_TX;
+ queue_chunk->start_queue_id = qid;
+ queue_chunk->num_queues = 1;
+ }
+
+ if (on)
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
+ else
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
+ args.in_args = (u8 *)queue_select;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of %s",
+ on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
+ return err;
+}
+
int
iavf_configure_rss_lut(struct iavf_adapter *adapter)
{
@@ -664,32 +797,26 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
return err;
}
-int
-iavf_configure_queues(struct iavf_adapter *adapter)
+static int
+iavf_exec_queue_cfg(struct iavf_adapter *adapter,
+ struct virtchnl_vsi_queue_config_info *vc_config, uint16_t count)
{
struct iavf_rx_queue **rxq =
(struct iavf_rx_queue **)adapter->eth_dev->data->rx_queues;
struct iavf_tx_queue **txq =
(struct iavf_tx_queue **)adapter->eth_dev->data->tx_queues;
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
- struct virtchnl_vsi_queue_config_info *vc_config;
struct virtchnl_queue_pair_info *vc_qp;
struct iavf_cmd_info args;
uint16_t i, size;
- int err;
+ int err = 0;
size = sizeof(*vc_config) +
- sizeof(vc_config->qpair[0]) * vf->num_queue_pairs;
- vc_config = rte_zmalloc("cfg_queue", size, 0);
- if (!vc_config)
- return -ENOMEM;
+ sizeof(vc_config->qpair[0]) * vc_config->num_queue_pairs;
- vc_config->vsi_id = vf->vsi_res->vsi_id;
- vc_config->num_queue_pairs = vf->num_queue_pairs;
-
- for (i = 0, vc_qp = vc_config->qpair;
- i < vf->num_queue_pairs;
- i++, vc_qp++) {
+ for (i = count * IAVF_CFG_Q_NUM_PER_BUF, vc_qp = vc_config->qpair;
+ i < count * IAVF_CFG_Q_NUM_PER_BUF + vc_config->num_queue_pairs;
+ i++, vc_qp++) {
vc_qp->txq.vsi_id = vf->vsi_res->vsi_id;
vc_qp->txq.queue_id = i;
/* Virtchnnl configure queues by pairs */
@@ -745,8 +872,71 @@ iavf_configure_queues(struct iavf_adapter *adapter)
err = iavf_execute_vf_cmd(adapter, &args);
if (err)
PMD_DRV_LOG(ERR, "Failed to execute command of"
- " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
+ " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
+
+ return err;
+}
+
+/* Configure VSI queues. Max VF queue pairs number is 256, may
+ * send this command multiple times to configure all queues.
+ */
+int
+iavf_configure_queues(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_vsi_queue_config_info *vc_config = NULL;
+ uint16_t i, size_full, size_inc;
+ uint16_t nb_cmd_full, nbq_inc;
+ int err = 0;
+
+ /* Compute how many times should the command to be sent,
+ * including the commands with full buffer and incomplete
+ * buffer.
+ */
+ nbq_inc = vf->num_queue_pairs % IAVF_CFG_Q_NUM_PER_BUF;
+ nb_cmd_full = vf->num_queue_pairs / IAVF_CFG_Q_NUM_PER_BUF;
+
+ size_full = sizeof(*vc_config) +
+ sizeof(vc_config->qpair[0]) * IAVF_CFG_Q_NUM_PER_BUF;
+ size_inc = sizeof(*vc_config) +
+ sizeof(vc_config->qpair[0]) * nbq_inc;
+
+ if (!nb_cmd_full) {
+ vc_config = rte_zmalloc("cfg_queue", size_inc, 0);
+ if (!vc_config)
+ return -ENOMEM;
+
+ vc_config->vsi_id = vf->vsi_res->vsi_id;
+ vc_config->num_queue_pairs = nbq_inc;
+ err = iavf_exec_queue_cfg(adapter, vc_config, 0);
+ goto free;
+ }
+
+ vc_config = rte_zmalloc("cfg_queue", size_full, 0);
+ if (!vc_config)
+ return -ENOMEM;
+ vc_config->vsi_id = vf->vsi_res->vsi_id;
+ vc_config->num_queue_pairs = IAVF_CFG_Q_NUM_PER_BUF;
+
+ for (i = 0; i < nb_cmd_full + (nbq_inc ? 1 : 0); i++) {
+ if (i >= nb_cmd_full) {
+ /* re-allocate virtchnl msg for less queues */
+ rte_free(vc_config);
+ vc_config = rte_zmalloc("cfg_queue", size_inc, 0);
+ if (!vc_config)
+ return -ENOMEM;
+
+ vc_config->vsi_id = vf->vsi_res->vsi_id;
+ vc_config->num_queue_pairs = nbq_inc;
+ }
+
+ err = iavf_exec_queue_cfg(adapter, vc_config, i);
+ if (err)
+ break;
+ }
+
+free:
rte_free(vc_config);
return err;
}
@@ -768,13 +958,14 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
return -ENOMEM;
map_info->num_vectors = vf->nb_msix;
- for (i = 0; i < vf->nb_msix; i++) {
- vecmap = &map_info->vecmap[i];
+ for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) {
+ vecmap =
+ &map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base];
vecmap->vsi_id = vf->vsi_res->vsi_id;
vecmap->rxitr_idx = IAVF_ITR_INDEX_DEFAULT;
- vecmap->vector_id = vf->msix_base + i;
+ vecmap->vector_id = vf->qv_map[i].vector_id;
vecmap->txq_map = 0;
- vecmap->rxq_map = vf->rxq_map[vf->msix_base + i];
+ vecmap->rxq_map |= 1 << vf->qv_map[i].queue_id;
}
args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
@@ -790,6 +981,46 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
return err;
}
+int
+iavf_config_irq_map_lv(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_vector_maps *map_info;
+ struct virtchnl_queue_vector *qv_maps;
+ struct iavf_cmd_info args;
+ int len, i, err;
+
+ len = sizeof(struct virtchnl_queue_vector_maps) +
+ sizeof(struct virtchnl_queue_vector) *
+ (adapter->eth_dev->data->nb_rx_queues - 1);
+
+ map_info = rte_zmalloc("map_info", len, 0);
+ if (!map_info)
+ return -ENOMEM;
+
+ map_info->vport_id = vf->vsi_res->vsi_id;
+ map_info->num_qv_maps = adapter->eth_dev->data->nb_rx_queues;
+ for (i = 0; i < map_info->num_qv_maps; i++) {
+ qv_maps = &map_info->qv_maps[i];
+ qv_maps->itr_idx = VIRTCHNL_ITR_IDX_0;
+ qv_maps->queue_type = VIRTCHNL_QUEUE_TYPE_RX;
+ qv_maps->queue_id = vf->qv_map[i].queue_id;
+ qv_maps->vector_id = vf->qv_map[i].vector_id;
+ }
+
+ args.ops = VIRTCHNL_OP_MAP_QUEUE_VECTOR;
+ args.in_args = (u8 *)map_info;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
+
+ rte_free(map_info);
+ return err;
+}
+
void
iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
{
@@ -1257,3 +1488,33 @@ iavf_request_queues(struct rte_eth_dev *dev, uint16_t num)
return -1;
}
+
+int
+iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct iavf_cmd_info args;
+ uint16_t qregion_width;
+ int err;
+
+ args.ops = VIRTCHNL_OP_GET_MAX_RSS_QREGION;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of "
+ "VIRTCHNL_OP_GET_MAX_RSS_QREGION");
+ return err;
+ }
+
+ qregion_width =
+ ((struct virtchnl_max_rss_qregion *)args.out_buffer)->qregion_width;
+
+ vf->max_rss_qregion = (uint16_t)(1 << qregion_width);
+
+ return 0;
+}