[v2,02/21] net/cpfl: add Tx queue setup
Checks
Commit Message
Add support for tx_queue_setup ops.
In the single queue model, the same descriptor queue is used by SW to
post buffer descriptors to HW and by HW to post completed descriptors
to SW.
In the split queue model, "RX buffer queues" are used to pass
descriptor buffers from SW to HW while Rx queues are used only to
pass the descriptor completions, that is, descriptors that point
to completed buffers, from HW to SW. This is contrary to the single
queue model in which Rx queues are used for both purposes.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 13 +++++++++++++
drivers/net/cpfl/meson.build | 1 +
2 files changed, 14 insertions(+)
@@ -12,6 +12,7 @@
#include <rte_alarm.h>
#include "cpfl_ethdev.h"
+#include "cpfl_rxtx.h"
#define CPFL_TX_SINGLE_Q "tx_single"
#define CPFL_RX_SINGLE_Q "rx_single"
@@ -96,6 +97,17 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_mtu = vport->max_mtu;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = CPFL_MAX_RING_DESC,
+ .nb_min = CPFL_MIN_RING_DESC,
+ .nb_align = CPFL_ALIGN_RING_DESC,
+ };
+
return 0;
}
@@ -514,6 +526,7 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
+ .tx_queue_setup = cpfl_tx_queue_setup,
.dev_infos_get = cpfl_dev_info_get,
.link_update = cpfl_dev_link_update,
.dev_supported_ptypes_get = cpfl_dev_supported_ptypes_get,
@@ -11,4 +11,5 @@ deps += ['common_idpf']
sources = files(
'cpfl_ethdev.c',
+ 'cpfl_rxtx.c',
)
\ No newline at end of file