@@ -90,7 +90,6 @@ ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
*rsp_msglen = event.msg_len;
return rte_le_to_cpu_32(event.desc.cookie_low);
-
again:
rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
} while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
@@ -897,7 +896,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)
{
struct rte_eth_dev *dev = hw->eth_dev;
struct rte_eth_rss_conf *rss_conf;
- uint8_t i, j, nb_q;
+ uint16_t i, j, nb_q;
int ret;
rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
@@ -1076,6 +1075,12 @@ ice_dcf_request_queues(struct ice_dcf_hw *hw, uint16_t num)
return err;
}
+ /* request queues succeeded, vf is resetting */
+ if (hw->resetting) {
+ PMD_DRV_LOG(INFO, "vf is resetting");
+ return 0;
+ }
+
/* request additional queues failed, return available number */
num_queue_pairs = ((struct virtchnl_vf_res_request *)
args.rsp_msgbuf)->num_queue_pairs;
@@ -1186,7 +1191,8 @@ ice_dcf_config_irq_map_lv(struct ice_dcf_hw *hw,
args.req_msg = (u8 *)map_info;
args.req_msglen = len;
args.rsp_msgbuf = hw->arq_buf;
- args.req_msglen = ICE_DCF_AQ_BUF_SZ;
+ args.rsp_msglen = ICE_DCF_AQ_BUF_SZ;
+ args.rsp_buflen = ICE_DCF_AQ_BUF_SZ;
err = ice_dcf_execute_virtchnl_cmd(hw, &args);
if (err)
PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
@@ -1226,6 +1232,50 @@ ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on)
return err;
}
+int
+ice_dcf_switch_queue_lv(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on)
+{
+ struct virtchnl_del_ena_dis_queues *queue_select;
+ struct virtchnl_queue_chunk *queue_chunk;
+ struct dcf_virtchnl_cmd args;
+ int err, len;
+
+ len = sizeof(struct virtchnl_del_ena_dis_queues);
+ queue_select = rte_zmalloc("queue_select", len, 0);
+ if (!queue_select)
+ return -ENOMEM;
+
+ queue_chunk = queue_select->chunks.chunks;
+ queue_select->chunks.num_chunks = 1;
+ queue_select->vport_id = hw->vsi_res->vsi_id;
+
+ if (rx) {
+ queue_chunk->type = VIRTCHNL_QUEUE_TYPE_RX;
+ queue_chunk->start_queue_id = qid;
+ queue_chunk->num_queues = 1;
+ } else {
+ queue_chunk->type = VIRTCHNL_QUEUE_TYPE_TX;
+ queue_chunk->start_queue_id = qid;
+ queue_chunk->num_queues = 1;
+ }
+
+ if (on)
+ args.v_op = VIRTCHNL_OP_ENABLE_QUEUES_V2;
+ else
+ args.v_op = VIRTCHNL_OP_DISABLE_QUEUES_V2;
+ args.req_msg = (u8 *)queue_select;
+ args.req_msglen = len;
+ args.rsp_msgbuf = hw->arq_buf;
+ args.rsp_msglen = ICE_DCF_AQ_BUF_SZ;
+ args.rsp_buflen = ICE_DCF_AQ_BUF_SZ;
+ err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of %s",
+ on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
+ rte_free(queue_select);
+ return err;
+}
+
int
ice_dcf_disable_queues(struct ice_dcf_hw *hw)
{
@@ -1255,6 +1305,49 @@ ice_dcf_disable_queues(struct ice_dcf_hw *hw)
return err;
}
+int
+ice_dcf_disable_queues_lv(struct ice_dcf_hw *hw)
+{
+ struct virtchnl_del_ena_dis_queues *queue_select;
+ struct virtchnl_queue_chunk *queue_chunk;
+ struct dcf_virtchnl_cmd args;
+ int err, len;
+
+ len = sizeof(struct virtchnl_del_ena_dis_queues) +
+ sizeof(struct virtchnl_queue_chunk) *
+ (ICE_DCF_RXTX_QUEUE_CHUNKS_NUM - 1);
+ queue_select = rte_zmalloc("queue_select", len, 0);
+ if (!queue_select)
+ return -ENOMEM;
+
+ queue_chunk = queue_select->chunks.chunks;
+ queue_select->chunks.num_chunks = ICE_DCF_RXTX_QUEUE_CHUNKS_NUM;
+ queue_select->vport_id = hw->vsi_res->vsi_id;
+
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
+ hw->eth_dev->data->nb_tx_queues;
+
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
+ hw->eth_dev->data->nb_rx_queues;
+
+ args.v_op = VIRTCHNL_OP_DISABLE_QUEUES_V2;
+ args.req_msg = (u8 *)queue_select;
+ args.req_msglen = len;
+ args.rsp_msgbuf = hw->arq_buf;
+ args.rsp_msglen = ICE_DCF_AQ_BUF_SZ;
+ args.rsp_buflen = ICE_DCF_AQ_BUF_SZ;
+ err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_DISABLE_QUEUES_V2");
+ rte_free(queue_select);
+ return err;
+}
+
int
ice_dcf_query_stats(struct ice_dcf_hw *hw,
struct virtchnl_eth_stats *pstats)
@@ -15,6 +15,8 @@
#include "base/ice_type.h"
#include "ice_logs.h"
+#define ICE_DCF_RXTX_QUEUE_CHUNKS_NUM 2
+
struct dcf_virtchnl_cmd {
TAILQ_ENTRY(dcf_virtchnl_cmd) next;
@@ -145,7 +147,10 @@ int ice_dcf_config_irq_map(struct ice_dcf_hw *hw);
int ice_dcf_config_irq_map_lv(struct ice_dcf_hw *hw,
uint16_t num, uint16_t index);
int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on);
+int ice_dcf_switch_queue_lv(struct ice_dcf_hw *hw,
+ uint16_t qid, bool rx, bool on);
int ice_dcf_disable_queues(struct ice_dcf_hw *hw);
+int ice_dcf_disable_queues_lv(struct ice_dcf_hw *hw);
int ice_dcf_query_stats(struct ice_dcf_hw *hw,
struct virtchnl_eth_stats *pstats);
int ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw,
@@ -318,6 +318,7 @@ static int
ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ice_dcf_adapter *ad = dev->data->dev_private;
+ struct ice_dcf_hw *dcf_hw = &ad->real_hw;
struct iavf_hw *hw = &ad->real_hw.avf;
struct ice_rx_queue *rxq;
int err = 0;
@@ -340,7 +341,11 @@ ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
IAVF_WRITE_FLUSH(hw);
/* Ready to switch the queue on */
- err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
+ if (!dcf_hw->lv_enabled)
+ err = ice_dcf_switch_queue(dcf_hw, rx_queue_id, true, true);
+ else
+ err = ice_dcf_switch_queue_lv(dcf_hw, rx_queue_id, true, true);
+
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
rx_queue_id);
@@ -449,6 +454,7 @@ static int
ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ice_dcf_adapter *ad = dev->data->dev_private;
+ struct ice_dcf_hw *dcf_hw = &ad->real_hw;
struct iavf_hw *hw = &ad->real_hw.avf;
struct ice_tx_queue *txq;
int err = 0;
@@ -464,7 +470,10 @@ ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
IAVF_WRITE_FLUSH(hw);
/* Ready to switch the queue on */
- err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
+ if (!dcf_hw->lv_enabled)
+ err = ice_dcf_switch_queue(dcf_hw, tx_queue_id, false, true);
+ else
+ err = ice_dcf_switch_queue_lv(dcf_hw, tx_queue_id, false, true);
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
@@ -651,12 +660,17 @@ ice_dcf_stop_queues(struct rte_eth_dev *dev)
struct ice_dcf_hw *hw = &ad->real_hw;
struct ice_rx_queue *rxq;
struct ice_tx_queue *txq;
- int ret, i;
+ int i;
/* Stop All queues */
- ret = ice_dcf_disable_queues(hw);
- if (ret)
- PMD_DRV_LOG(WARNING, "Fail to stop queues");
+ if (!hw->lv_enabled) {
+ if (ice_dcf_disable_queues(hw))
+ PMD_DRV_LOG(WARNING, "Fail to stop queues");
+ } else {
+ if (ice_dcf_disable_queues_lv(hw))
+ PMD_DRV_LOG(WARNING,
+ "Fail to stop queues for large VF");
+ }
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
@@ -20,10 +20,10 @@
#define ICE_DCF_ETH_OVERHEAD \
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_DCF_VLAN_TAG_SIZE * 2)
#define ICE_DCF_ETH_MAX_LEN (RTE_ETHER_MTU + ICE_DCF_ETH_OVERHEAD)
-#define ICE_DCF_MAX_NUM_QUEUES_DFLT 16
-#define ICE_DCF_MAX_NUM_QUEUES_LV 256
-#define ICE_DCF_CFG_Q_NUM_PER_BUF 32
-#define ICE_DCF_IRQ_MAP_NUM_PER_BUF 128
+#define ICE_DCF_MAX_NUM_QUEUES_DFLT 16
+#define ICE_DCF_MAX_NUM_QUEUES_LV 256
+#define ICE_DCF_CFG_Q_NUM_PER_BUF 32
+#define ICE_DCF_IRQ_MAP_NUM_PER_BUF 128
struct ice_dcf_queue {
uint64_t dummy;