[v4,02/21] net/cpfl: add Tx queue setup

Message ID 20230118075738.904616-3-mingxia.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series add support for cpfl PMD in DPDK |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Liu, Mingxia Jan. 18, 2023, 7:57 a.m. UTC
  Add support for tx_queue_setup ops.

In the single queue model, the same descriptor queue is used by SW to
post buffer descriptors to HW and by HW to post completed descriptors
to SW.

In the split queue model, "RX buffer queues" are used to pass
descriptor buffers from SW to HW while Rx queues are used only to
pass the descriptor completions, that is, descriptors that point
to completed buffers, from HW to SW. This is contrary to the single
queue model in which Rx queues are used for both purposes.

Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 13 +++++++++++++
 drivers/net/cpfl/cpfl_rxtx.c   |  8 ++++----
 drivers/net/cpfl/meson.build   |  1 +
 3 files changed, 18 insertions(+), 4 deletions(-)
  

Patch

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 2ac53bc5b0..4a569c2f7e 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -12,6 +12,7 @@ 
 #include <rte_alarm.h>
 
 #include "cpfl_ethdev.h"
+#include "cpfl_rxtx.h"
 
 #define CPFL_TX_SINGLE_Q	"tx_single"
 #define CPFL_RX_SINGLE_Q	"rx_single"
@@ -96,6 +97,17 @@  cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mtu = vport->max_mtu;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,
+	};
+
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
 	return 0;
 }
 
@@ -513,6 +525,7 @@  cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.dev_configure			= cpfl_dev_configure,
 	.dev_close			= cpfl_dev_close,
+	.tx_queue_setup			= cpfl_tx_queue_setup,
 	.dev_infos_get			= cpfl_dev_info_get,
 	.link_update			= cpfl_dev_link_update,
 	.dev_supported_ptypes_get	= cpfl_dev_supported_ptypes_get,
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index ea4a2002bf..a9742379db 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -130,7 +130,7 @@  cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
 	cq->tx_ring_phys_addr = mz->iova;
 	cq->compl_ring = mz->addr;
 	cq->mz = mz;
-	reset_split_tx_complq(cq);
+	idpf_reset_split_tx_complq(cq);
 
 	txq->complq = cq;
 
@@ -164,7 +164,7 @@  cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		tx_conf->tx_rs_thresh : CPFL_DEFAULT_TX_RS_THRESH);
 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh > 0) ?
 		tx_conf->tx_free_thresh : CPFL_DEFAULT_TX_FREE_THRESH);
-	if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
+	if (idpf_check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
 		return -EINVAL;
 
 	/* Allocate the TX queue data structure. */
@@ -215,10 +215,10 @@  cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
 	if (!is_splitq) {
 		txq->tx_ring = mz->addr;
-		reset_single_tx_queue(txq);
+		idpf_reset_single_tx_queue(txq);
 	} else {
 		txq->desc_ring = mz->addr;
-		reset_split_tx_descq(txq);
+		idpf_reset_split_tx_descq(txq);
 
 		/* Setup tx completion queue if split model */
 		ret = cpfl_tx_complq_setup(dev, txq, queue_idx,
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 106cc97e60..3ccee15703 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -11,4 +11,5 @@  deps += ['common_idpf']
 
 sources = files(
         'cpfl_ethdev.c',
+        'cpfl_rxtx.c',
 )
\ No newline at end of file