[v7,02/21] net/cpfl: add Tx queue setup

Message ID 20230216003010.3439881-3-mingxia.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series add support for cpfl PMD in DPDK |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Liu, Mingxia Feb. 16, 2023, 12:29 a.m. UTC
  Add support for tx_queue_setup ops.

In the single queue model, the same descriptor queue is used by SW to
post buffer descriptors to HW and by HW to post completed descriptors
to SW.

In the split queue model, "RX buffer queues" are used to pass
descriptor buffers from SW to HW while Rx queues are used only to
pass the descriptor completions, that is, descriptors that point
to completed buffers, from HW to SW. This is contrary to the single
queue model in which Rx queues are used for both purposes.

Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 13 +++++++++++++
 drivers/net/cpfl/cpfl_rxtx.c   |  8 ++++----
 drivers/net/cpfl/meson.build   |  1 +
 3 files changed, 18 insertions(+), 4 deletions(-)
  

Comments

Ferruh Yigit Feb. 27, 2023, 9:44 p.m. UTC | #1
On 2/16/2023 12:29 AM, Mingxia Liu wrote:
> Add support for tx_queue_setup ops.
> 
> In the single queue model, the same descriptor queue is used by SW to
> post buffer descriptors to HW and by HW to post completed descriptors
> to SW.
> 
> In the split queue model, "RX buffer queues" are used to pass
> descriptor buffers from SW to HW while Rx queues are used only to
> pass the descriptor completions, that is, descriptors that point
> to completed buffers, from HW to SW. This is contrary to the single
> queue model in which Rx queues are used for both purposes.
> 

This patch is related to the Tx and above description seems related Rx,
can next patch be a better place for above paragraph? Or please revise
it for Tx if it applies to this patch too.
  
Liu, Mingxia Feb. 28, 2023, 2:40 a.m. UTC | #2
Ok,thanks!

> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@amd.com>
> Sent: Tuesday, February 28, 2023 5:45 AM
> To: Liu, Mingxia <mingxia.liu@intel.com>; dev@dpdk.org; Xing, Beilei
> <beilei.xing@intel.com>; Zhang, Yuying <yuying.zhang@intel.com>
> Subject: Re: [PATCH v7 02/21] net/cpfl: add Tx queue setup
> 
> On 2/16/2023 12:29 AM, Mingxia Liu wrote:
> > Add support for tx_queue_setup ops.
> >
> > In the single queue model, the same descriptor queue is used by SW to
> > post buffer descriptors to HW and by HW to post completed descriptors
> > to SW.
> >
> > In the split queue model, "RX buffer queues" are used to pass
> > descriptor buffers from SW to HW while Rx queues are used only to pass
> > the descriptor completions, that is, descriptors that point to
> > completed buffers, from HW to SW. This is contrary to the single queue
> > model in which Rx queues are used for both purposes.
> >
> 
> This patch is related to the Tx and above description seems related Rx, can
> next patch be a better place for above paragraph? Or please revise it for Tx
> if it applies to this patch too.
  

Patch

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index fe0061133c..5ca21c9772 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -12,6 +12,7 @@ 
 #include <rte_alarm.h>
 
 #include "cpfl_ethdev.h"
+#include "cpfl_rxtx.h"
 
 #define CPFL_TX_SINGLE_Q	"tx_single"
 #define CPFL_RX_SINGLE_Q	"rx_single"
@@ -96,6 +97,17 @@  cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mtu = vport->max_mtu;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,
+	};
+
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
 	return 0;
 }
 
@@ -513,6 +525,7 @@  cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.dev_configure			= cpfl_dev_configure,
 	.dev_close			= cpfl_dev_close,
+	.tx_queue_setup			= cpfl_tx_queue_setup,
 	.dev_infos_get			= cpfl_dev_info_get,
 	.link_update			= cpfl_dev_link_update,
 	.dev_supported_ptypes_get	= cpfl_dev_supported_ptypes_get,
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 2b9c20928b..5b69ac0009 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -130,7 +130,7 @@  cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
 	cq->tx_ring_phys_addr = mz->iova;
 	cq->compl_ring = mz->addr;
 	cq->mz = mz;
-	reset_split_tx_complq(cq);
+	idpf_qc_split_tx_complq_reset(cq);
 
 	txq->complq = cq;
 
@@ -164,7 +164,7 @@  cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		tx_conf->tx_rs_thresh : CPFL_DEFAULT_TX_RS_THRESH);
 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh > 0) ?
 		tx_conf->tx_free_thresh : CPFL_DEFAULT_TX_FREE_THRESH);
-	if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
+	if (idpf_qc_tx_thresh_check(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
 		return -EINVAL;
 
 	/* Allocate the TX queue data structure. */
@@ -215,10 +215,10 @@  cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
 	if (!is_splitq) {
 		txq->tx_ring = mz->addr;
-		reset_single_tx_queue(txq);
+		idpf_qc_single_tx_queue_reset(txq);
 	} else {
 		txq->desc_ring = mz->addr;
-		reset_split_tx_descq(txq);
+		idpf_qc_split_tx_descq_reset(txq);
 
 		/* Setup tx completion queue if split model */
 		ret = cpfl_tx_complq_setup(dev, txq, queue_idx,
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index c721732b50..1894423689 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -11,4 +11,5 @@  deps += ['common_idpf']
 
 sources = files(
         'cpfl_ethdev.c',
+        'cpfl_rxtx.c',
 )
\ No newline at end of file