@@ -247,8 +247,21 @@ idpf_vport_init(struct idpf_vport *vport,
goto err_rss_lut;
}
+ /* recv_vectors is used for VIRTCHNL2_OP_ALLOC_VECTORS response,
+ * reserve maximum size for it now, may need optimization in future.
+ */
+ vport->recv_vectors = rte_zmalloc("recv_vectors", IDPF_DFLT_MBX_BUF_SIZE, 0);
+ if (vport->recv_vectors == NULL) {
+ DRV_LOG(ERR, "Failed to allocate recv_vectors");
+ ret = -ENOMEM;
+ goto err_recv_vec;
+ }
+
return 0;
+err_recv_vec:
+ rte_free(vport->rss_lut);
+ vport->rss_lut = NULL;
err_rss_lut:
vport->dev_data = NULL;
rte_free(vport->rss_key);
@@ -261,6 +274,8 @@ idpf_vport_init(struct idpf_vport *vport,
int
idpf_vport_deinit(struct idpf_vport *vport)
{
+ rte_free(vport->recv_vectors);
+ vport->recv_vectors = NULL;
rte_free(vport->rss_lut);
vport->rss_lut = NULL;
@@ -298,4 +313,91 @@ idpf_config_rss(struct idpf_vport *vport)
return ret;
}
+
+int
+idpf_config_irq_map(struct idpf_vport *vport, uint16_t nb_rx_queues)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_queue_vector *qv_map;
+ struct idpf_hw *hw = &adapter->hw;
+ uint32_t dynctl_val, itrn_val;
+ uint32_t dynctl_reg_start;
+ uint32_t itrn_reg_start;
+ uint16_t i;
+ int ret;
+
+ qv_map = rte_zmalloc("qv_map",
+ nb_rx_queues *
+ sizeof(struct virtchnl2_queue_vector), 0);
+ if (qv_map == NULL) {
+ DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ nb_rx_queues);
+ ret = -ENOMEM;
+ goto qv_map_alloc_err;
+ }
+
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+
+ /* The capability flags adapter->caps.other_caps should be
+ * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+ * condition should be updated when the FW can return the
+ * correct flag bits.
+ */
+ dynctl_reg_start =
+ vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+ itrn_reg_start =
+ vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+ dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+ DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+ itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+ DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+ /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+ * register. WB_ON_ITR and INTENA are mutually exclusive
+ * bits. Setting WB_ON_ITR bits means TX and RX Descs
+ * are written back based on ITR expiration irrespective
+ * of INTENA setting.
+ */
+ /* TBD: need to tune INTERVAL value for better performance. */
+ itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
+ dynctl_val = VIRTCHNL2_ITR_IDX_0 <<
+ PF_GLINT_DYN_CTL_ITR_INDX_S |
+ PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+ itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
+ IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ /* map all queues to the same vector */
+ qv_map[i].queue_id = vport->chunks_info.rx_start_qid + i;
+ qv_map[i].vector_id =
+ vport->recv_vectors->vchunks.vchunks->start_vector_id;
+ }
+ vport->qv_map = qv_map;
+
+ ret = idpf_vc_config_irq_map_unmap(vport, nb_rx_queues, true);
+ if (ret != 0) {
+ DRV_LOG(ERR, "config interrupt mapping failed");
+ goto config_irq_map_err;
+ }
+
+ return 0;
+
+config_irq_map_err:
+ rte_free(vport->qv_map);
+ vport->qv_map = NULL;
+
+qv_map_alloc_err:
+ return ret;
+}
+
+int
+idpf_config_irq_unmap(struct idpf_vport *vport, uint16_t nb_rx_queues)
+{
+ idpf_vc_config_irq_map_unmap(vport, nb_rx_queues, false);
+
+ rte_free(vport->qv_map);
+ vport->qv_map = NULL;
+
+ return 0;
+}
+
RTE_LOG_REGISTER_SUFFIX(idpf_common_logtype, common, NOTICE);
@@ -17,6 +17,8 @@
#define IDPF_MAX_PKT_TYPE 1024
+#define IDPF_DFLT_INTERVAL 16
+
struct idpf_adapter {
struct idpf_hw hw;
struct virtchnl2_version_info virtchnl_version;
@@ -155,5 +157,9 @@ __rte_internal
int idpf_vport_deinit(struct idpf_vport *vport);
__rte_internal
int idpf_config_rss(struct idpf_vport *vport);
+__rte_internal
+int idpf_config_irq_map(struct idpf_vport *vport, uint16_t nb_rx_queues);
+__rte_internal
+int idpf_config_irq_unmap(struct idpf_vport *vport, uint16_t nb_rx_queues);
#endif /* _IDPF_COMMON_DEVICE_H_ */
@@ -573,14 +573,6 @@ idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors)
if (err != 0)
DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_ALLOC_VECTORS");
- if (vport->recv_vectors == NULL) {
- vport->recv_vectors = rte_zmalloc("recv_vectors", len, 0);
- if (vport->recv_vectors == NULL) {
- rte_free(alloc_vec);
- return -ENOMEM;
- }
- }
-
rte_memcpy(vport->recv_vectors, args.out_buffer, len);
rte_free(alloc_vec);
return err;
@@ -23,6 +23,9 @@ int idpf_vc_set_rss_lut(struct idpf_vport *vport);
__rte_internal
int idpf_vc_set_rss_hash(struct idpf_vport *vport);
__rte_internal
+int idpf_vc_config_irq_map_unmap(struct idpf_vport *vport,
+ uint16_t nb_rxq, bool map);
+__rte_internal
int idpf_vc_switch_queue(struct idpf_vport *vport, uint16_t qid,
bool rx, bool on);
__rte_internal
@@ -30,9 +33,6 @@ int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable);
__rte_internal
int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);
__rte_internal
-int idpf_vc_config_irq_map_unmap(struct idpf_vport *vport,
- uint16_t nb_rxq, bool map);
-__rte_internal
int idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors);
__rte_internal
int idpf_vc_dealloc_vectors(struct idpf_vport *vport);
@@ -3,6 +3,8 @@ INTERNAL {
idpf_adapter_deinit;
idpf_adapter_init;
+ idpf_config_irq_map;
+ idpf_config_irq_unmap;
idpf_config_rss;
idpf_execute_vc_cmd;
idpf_vc_alloc_vectors;
@@ -281,84 +281,9 @@ static int
idpf_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_queue_vector *qv_map;
- struct idpf_hw *hw = &adapter->hw;
- uint32_t dynctl_reg_start;
- uint32_t itrn_reg_start;
- uint32_t dynctl_val, itrn_val;
- uint16_t i;
-
- qv_map = rte_zmalloc("qv_map",
- dev->data->nb_rx_queues *
- sizeof(struct virtchnl2_queue_vector), 0);
- if (qv_map == NULL) {
- PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
- dev->data->nb_rx_queues);
- goto qv_map_alloc_err;
- }
-
- /* Rx interrupt disabled, Map interrupt only for writeback */
-
- /* The capability flags adapter->caps.other_caps should be
- * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
- * condition should be updated when the FW can return the
- * correct flag bits.
- */
- dynctl_reg_start =
- vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
- itrn_reg_start =
- vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
- dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
- PMD_DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x",
- dynctl_val);
- itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
- PMD_DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
- /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
- * register. WB_ON_ITR and INTENA are mutually exclusive
- * bits. Setting WB_ON_ITR bits means TX and RX Descs
- * are written back based on ITR expiration irrespective
- * of INTENA setting.
- */
- /* TBD: need to tune INTERVAL value for better performance. */
- if (itrn_val != 0)
- IDPF_WRITE_REG(hw,
- dynctl_reg_start,
- VIRTCHNL2_ITR_IDX_0 <<
- PF_GLINT_DYN_CTL_ITR_INDX_S |
- PF_GLINT_DYN_CTL_WB_ON_ITR_M |
- itrn_val <<
- PF_GLINT_DYN_CTL_INTERVAL_S);
- else
- IDPF_WRITE_REG(hw,
- dynctl_reg_start,
- VIRTCHNL2_ITR_IDX_0 <<
- PF_GLINT_DYN_CTL_ITR_INDX_S |
- PF_GLINT_DYN_CTL_WB_ON_ITR_M |
- IDPF_DFLT_INTERVAL <<
- PF_GLINT_DYN_CTL_INTERVAL_S);
-
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- /* map all queues to the same vector */
- qv_map[i].queue_id = vport->chunks_info.rx_start_qid + i;
- qv_map[i].vector_id =
- vport->recv_vectors->vchunks.vchunks->start_vector_id;
- }
- vport->qv_map = qv_map;
-
- if (idpf_vc_config_irq_map_unmap(vport, dev->data->nb_rx_queues, true) != 0) {
- PMD_DRV_LOG(ERR, "config interrupt mapping failed");
- goto config_irq_map_err;
- }
-
- return 0;
-
-config_irq_map_err:
- rte_free(vport->qv_map);
- vport->qv_map = NULL;
+ uint16_t nb_rx_queues = dev->data->nb_rx_queues;
-qv_map_alloc_err:
- return -1;
+ return idpf_config_irq_map(vport, nb_rx_queues);
}
static int
@@ -404,8 +329,6 @@ idpf_dev_start(struct rte_eth_dev *dev)
uint16_t req_vecs_num;
int ret;
- vport->stopped = 0;
-
req_vecs_num = IDPF_DFLT_Q_VEC_NUM;
if (req_vecs_num + adapter->used_vecs_num > num_allocated_vectors) {
PMD_DRV_LOG(ERR, "The accumulated request vectors' number should be less than %d",
@@ -424,13 +347,13 @@ idpf_dev_start(struct rte_eth_dev *dev)
ret = idpf_config_rx_queues_irqs(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to configure irqs");
- goto err_vec;
+ goto err_irq;
}
ret = idpf_start_queues(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to start queues");
- goto err_vec;
+ goto err_startq;
}
idpf_set_rx_function(dev);
@@ -442,10 +365,16 @@ idpf_dev_start(struct rte_eth_dev *dev)
goto err_vport;
}
+ vport->stopped = 0;
+
return 0;
err_vport:
idpf_stop_queues(dev);
+err_startq:
+ idpf_config_irq_unmap(vport, dev->data->nb_rx_queues);
+err_irq:
+ idpf_vc_dealloc_vectors(vport);
err_vec:
return ret;
}
@@ -462,10 +391,9 @@ idpf_dev_stop(struct rte_eth_dev *dev)
idpf_stop_queues(dev);
- idpf_vc_config_irq_map_unmap(vport, dev->data->nb_rx_queues, false);
+ idpf_config_irq_unmap(vport, dev->data->nb_rx_queues);
- if (vport->recv_vectors != NULL)
- idpf_vc_dealloc_vectors(vport);
+ idpf_vc_dealloc_vectors(vport);
vport->stopped = 1;
@@ -482,12 +410,6 @@ idpf_dev_close(struct rte_eth_dev *dev)
idpf_vport_deinit(vport);
- rte_free(vport->recv_vectors);
- vport->recv_vectors = NULL;
-
- rte_free(vport->qv_map);
- vport->qv_map = NULL;
-
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
adapter->cur_vport_nb--;
dev->data->dev_private = NULL;
@@ -32,7 +32,6 @@
#define IDPF_RX_BUFQ_PER_GRP 2
#define IDPF_DFLT_Q_VEC_NUM 1
-#define IDPF_DFLT_INTERVAL 16
#define IDPF_MIN_BUF_SIZE 1024
#define IDPF_MAX_FRAME_SIZE 9728