From patchwork Tue Feb 14 11:38:48 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Liu, Mingxia" X-Patchwork-Id: 123882 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2640841C49; Tue, 14 Feb 2023 13:36:35 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B25BA42D67; Tue, 14 Feb 2023 13:36:31 +0100 (CET) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 32E9B42D47 for ; Tue, 14 Feb 2023 13:36:29 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1676378190; x=1707914190; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=UiTdEbiQhJmnijRSWL7U3Itx+xxqE5ipmenfVS4z6lc=; b=n+NU3RL+Dr229zrn4F7KPq+I2NxhfdzMA0ZYNBXbvX2XkiZinSGHTVQ3 mWBckkNl0ZZHz5qlgRmdr4hu2BK7K5KkPHb/D3PTC8LhPB36DSm9VtLTc Z8sWSRllZqxwAeoztXt6nspnEox/YCAuXpCPl9VH7FgX0HbtICacpC5R6 sOcWSjFh3T0blY/GXiLkViZmti+1/QS+KqwC2FHFeRiWwWdx9ZkoCELBp hId03Fp/lhR/Jq/yQbzNwE4VVMzWoBb9zU/JVibhcJ9RqDotfuRYbCRJ6 IefGOz8vanrSrO7/P6/fm68mIS+uqq16PS5sJ7prgapWoJdzHKjs4SD/w A==; X-IronPort-AV: E=McAfee;i="6500,9779,10620"; a="314793173" X-IronPort-AV: E=Sophos;i="5.97,296,1669104000"; d="scan'208";a="314793173" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 14 Feb 2023 04:36:28 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10620"; a="699528515" X-IronPort-AV: E=Sophos;i="5.97,296,1669104000"; d="scan'208";a="699528515" Received: from dpdk-mingxial-01.sh.intel.com ([10.67.119.167]) by orsmga008.jf.intel.com with ESMTP; 14 Feb 2023 04:36:26 -0800 From: Mingxia Liu To: dev@dpdk.org, beilei.xing@intel.com, yuying.zhang@intel.com Cc: Mingxia Liu Subject: [PATCH v2 1/5] net/cpfl: add some structure for hairpin queue Date: Tue, 14 Feb 2023 11:38:48 +0000 Message-Id: <20230214113852.3341607-2-mingxia.liu@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230214113852.3341607-1-mingxia.liu@intel.com> References: <20230118130659.976873-1-mingxia.liu@intel.com> <20230214113852.3341607-1-mingxia.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch add some structure for hairpin queue, cpfl_rx_queue/cpfl_tx_queue/cpfl_vport. Signed-off-by: Mingxia Liu --- drivers/net/cpfl/cpfl_ethdev.c | 102 +++++++----- drivers/net/cpfl/cpfl_ethdev.h | 8 +- drivers/net/cpfl/cpfl_rxtx.c | 196 +++++++++++++++++------- drivers/net/cpfl/cpfl_rxtx.h | 28 ++++ drivers/net/cpfl/cpfl_rxtx_vec_common.h | 18 ++- 5 files changed, 255 insertions(+), 97 deletions(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 543dbd60f0..f799707ea7 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -108,7 +108,9 @@ static int cpfl_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct rte_eth_link new_link; memset(&new_link, 0, sizeof(new_link)); @@ -160,7 +162,9 @@ cpfl_dev_link_update(struct rte_eth_dev *dev, static int cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct idpf_adapter *adapter = vport->adapter; dev_info->max_rx_queues = adapter->caps.max_rx_q; @@ -220,7 +224,9 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) static int cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); /* mtu setting is forbidden if port is start */ if (dev->data->dev_started) { @@ -260,12 +266,12 @@ static uint64_t cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev) { uint64_t mbuf_alloc_failed = 0; - struct idpf_rx_queue *rxq; + struct cpfl_rx_queue *cpfl_rxq; int i = 0; for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed, + cpfl_rxq = dev->data->rx_queues[i]; + mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, __ATOMIC_RELAXED); } @@ -275,8 +281,9 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev) static int cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { - struct idpf_vport *vport = - (struct idpf_vport *)dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct virtchnl2_vport_stats *pstats = NULL; int ret; @@ -308,20 +315,21 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) static void cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev) { - struct idpf_rx_queue *rxq; + struct cpfl_rx_queue *cpfl_rxq; int i; for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED); + cpfl_rxq = dev->data->rx_queues[i]; + __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED); } } static int cpfl_dev_stats_reset(struct rte_eth_dev *dev) { - struct idpf_vport *vport = - (struct idpf_vport *)dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct virtchnl2_vport_stats *pstats = NULL; int ret; @@ -346,8 +354,9 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev) static int cpfl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int n) { - struct idpf_vport *vport = - (struct idpf_vport *)dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct virtchnl2_vport_stats *pstats = NULL; unsigned int i; int ret; @@ -461,7 +470,9 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct idpf_adapter *adapter = vport->adapter; uint16_t idx, shift; int ret = 0; @@ -500,7 +511,9 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct idpf_adapter *adapter = vport->adapter; uint16_t idx, shift; int ret = 0; @@ -538,7 +551,9 @@ static int cpfl_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct idpf_adapter *adapter = vport->adapter; int ret = 0; @@ -603,7 +618,9 @@ static int cpfl_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct idpf_adapter *adapter = vport->adapter; int ret = 0; @@ -640,7 +657,9 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev, static int cpfl_dev_configure(struct rte_eth_dev *dev) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct rte_eth_conf *conf = &dev->data->dev_conf; struct idpf_adapter *adapter = vport->adapter; int ret; @@ -703,7 +722,9 @@ cpfl_dev_configure(struct rte_eth_dev *dev) static int cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); uint16_t nb_rx_queues = dev->data->nb_rx_queues; return idpf_vport_irq_map_config(vport, nb_rx_queues); @@ -712,15 +733,16 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev) static int cpfl_start_queues(struct rte_eth_dev *dev) { - struct idpf_rx_queue *rxq; - struct idpf_tx_queue *txq; + struct cpfl_rx_queue *cpfl_rxq; + struct cpfl_tx_queue *cpfl_txq; int err = 0; int i; for (i = 0; i < dev->data->nb_tx_queues; i++) { - txq = dev->data->tx_queues[i]; - if (txq == NULL || txq->tx_deferred_start) + cpfl_txq = dev->data->tx_queues[i]; + if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start) continue; + err = cpfl_tx_queue_start(dev, i); if (err != 0) { PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i); @@ -729,8 +751,8 @@ cpfl_start_queues(struct rte_eth_dev *dev) } for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - if (rxq == NULL || rxq->rx_deferred_start) + cpfl_rxq = dev->data->rx_queues[i]; + if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start) continue; err = cpfl_rx_queue_start(dev, i); if (err != 0) { @@ -745,7 +767,9 @@ cpfl_start_queues(struct rte_eth_dev *dev) static int cpfl_dev_start(struct rte_eth_dev *dev) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct idpf_adapter *base = vport->adapter; struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base); uint16_t num_allocated_vectors = base->caps.num_allocated_vectors; @@ -808,7 +832,9 @@ cpfl_dev_start(struct rte_eth_dev *dev) static int cpfl_dev_stop(struct rte_eth_dev *dev) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); if (vport->stopped == 1) return 0; @@ -829,7 +855,9 @@ cpfl_dev_stop(struct rte_eth_dev *dev) static int cpfl_dev_close(struct rte_eth_dev *dev) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter); cpfl_dev_stop(dev); @@ -839,7 +867,7 @@ cpfl_dev_close(struct rte_eth_dev *dev) adapter->cur_vport_nb--; dev->data->dev_private = NULL; adapter->vports[vport->sw_idx] = NULL; - rte_free(vport); + rte_free(cpfl_vport); return 0; } @@ -1012,7 +1040,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id) int i; for (i = 0; i < adapter->cur_vport_nb; i++) { - vport = adapter->vports[i]; + vport = &(adapter->vports[i]->base); if (vport->vport_id != vport_id) continue; else @@ -1225,7 +1253,9 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *ad) static int cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct cpfl_vport_param *param = init_params; struct cpfl_adapter_ext *adapter = param->adapter; /* for sending create vport virtchnl msg prepare */ @@ -1251,7 +1281,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) goto err; } - adapter->vports[param->idx] = vport; + adapter->vports[param->idx] = cpfl_vport; adapter->cur_vports |= RTE_BIT32(param->devarg_id); adapter->cur_vport_nb++; @@ -1369,7 +1399,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, snprintf(name, sizeof(name), "cpfl_%s_vport_0", pci_dev->device.name); retval = rte_eth_dev_create(&pci_dev->device, name, - sizeof(struct idpf_vport), + sizeof(struct cpfl_vport), NULL, NULL, cpfl_dev_vport_init, &vport_param); if (retval != 0) @@ -1387,7 +1417,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, pci_dev->device.name, devargs.req_vports[i]); retval = rte_eth_dev_create(&pci_dev->device, name, - sizeof(struct idpf_vport), + sizeof(struct cpfl_vport), NULL, NULL, cpfl_dev_vport_init, &vport_param); if (retval != 0) diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index e00dff4bf0..ef3225878b 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -70,13 +70,19 @@ struct cpfl_devargs { uint16_t req_vport_nb; }; +struct cpfl_vport { + /* p2p mbuf pool */ + struct rte_mempool *p2p_mp; + struct idpf_vport base; +}; + struct cpfl_adapter_ext { TAILQ_ENTRY(cpfl_adapter_ext) next; struct idpf_adapter base; char name[CPFL_ADAPTER_NAME_LEN]; - struct idpf_vport **vports; + struct cpfl_vport **vports; uint16_t max_vport_nb; uint16_t cur_vports; /* bit mask of created vport */ diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index 6226b02301..c7e5487366 100644 --- a/drivers/net/cpfl/cpfl_rxtx.c +++ b/drivers/net/cpfl/cpfl_rxtx.c @@ -10,6 +10,11 @@ #include "cpfl_rxtx.h" #include "cpfl_rxtx_vec_common.h" +static void +cpfl_tx_queue_release(void *txq); +static void +cpfl_rx_queue_release(void *txq); + static uint64_t cpfl_rx_offload_convert(uint64_t offload) { @@ -128,7 +133,9 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq, uint16_t nb_desc, unsigned int socket_id, struct rte_mempool *mp, uint8_t bufq_id) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct idpf_adapter *adapter = vport->adapter; struct idpf_hw *hw = &adapter->hw; const struct rte_memzone *mz; @@ -225,9 +232,12 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct idpf_adapter *adapter = vport->adapter; struct idpf_hw *hw = &adapter->hw; + struct cpfl_rx_queue *cpfl_rxq; const struct rte_memzone *mz; struct idpf_rx_queue *rxq; uint16_t rx_free_thresh; @@ -247,21 +257,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, /* Free memory if needed */ if (dev->data->rx_queues[queue_idx] != NULL) { - idpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]); + cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]); dev->data->rx_queues[queue_idx] = NULL; } /* Setup Rx queue */ - rxq = rte_zmalloc_socket("cpfl rxq", - sizeof(struct idpf_rx_queue), + cpfl_rxq = rte_zmalloc_socket("cpfl rxq", + sizeof(struct cpfl_rx_queue), RTE_CACHE_LINE_SIZE, socket_id); - if (rxq == NULL) { + if (cpfl_rxq == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure"); ret = -ENOMEM; goto err_rxq_alloc; } + rxq = &(cpfl_rxq->base); + is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT); rxq->mp = mp; @@ -328,7 +340,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, } rxq->q_set = true; - dev->data->rx_queues[queue_idx] = rxq; + dev->data->rx_queues[queue_idx] = cpfl_rxq; return 0; @@ -348,7 +360,9 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); const struct rte_memzone *mz; struct idpf_tx_queue *cq; int ret; @@ -396,15 +410,18 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct idpf_adapter *adapter = vport->adapter; uint16_t tx_rs_thresh, tx_free_thresh; struct idpf_hw *hw = &adapter->hw; + struct cpfl_tx_queue *cpfl_txq; const struct rte_memzone *mz; struct idpf_tx_queue *txq; uint64_t offloads; - uint16_t len; bool is_splitq; + uint16_t len; int ret; offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; @@ -418,21 +435,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, /* Free memory if needed. */ if (dev->data->tx_queues[queue_idx] != NULL) { - idpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]); + cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]); dev->data->tx_queues[queue_idx] = NULL; } /* Allocate the TX queue data structure. */ - txq = rte_zmalloc_socket("cpfl txq", - sizeof(struct idpf_tx_queue), + cpfl_txq = rte_zmalloc_socket("cpfl txq", + sizeof(struct cpfl_tx_queue), RTE_CACHE_LINE_SIZE, socket_id); - if (txq == NULL) { + if (cpfl_txq == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure"); ret = -ENOMEM; goto err_txq_alloc; } + txq = &(cpfl_txq->base); + is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT); txq->nb_tx_desc = nb_desc; @@ -486,7 +505,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, queue_idx * vport->chunks_info.tx_qtail_spacing); txq->ops = &def_txq_ops; txq->q_set = true; - dev->data->tx_queues[queue_idx] = txq; + dev->data->tx_queues[queue_idx] = cpfl_txq; return 0; @@ -502,6 +521,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, int cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) { + struct cpfl_rx_queue *cpfl_rxq; struct idpf_rx_queue *rxq; uint16_t max_pkt_len; uint32_t frame_size; @@ -510,7 +530,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (rx_queue_id >= dev->data->nb_rx_queues) return -EINVAL; - rxq = dev->data->rx_queues[rx_queue_id]; + cpfl_rxq = dev->data->rx_queues[rx_queue_id]; + rxq = &(cpfl_rxq->base); if (rxq == NULL || !rxq->q_set) { PMD_DRV_LOG(ERR, "RX queue %u not available or setup", @@ -574,9 +595,11 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) int cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct idpf_vport *vport = dev->data->dev_private; - struct idpf_rx_queue *rxq = - dev->data->rx_queues[rx_queue_id]; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); + struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id]; + struct idpf_rx_queue *rxq = &(cpfl_rxq->base); int err = 0; err = idpf_vc_rxq_config(vport, rxq); @@ -609,15 +632,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) int cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id) { - struct idpf_tx_queue *txq; + struct cpfl_tx_queue *cpfl_txq; if (tx_queue_id >= dev->data->nb_tx_queues) return -EINVAL; - txq = dev->data->tx_queues[tx_queue_id]; + cpfl_txq = dev->data->tx_queues[tx_queue_id]; /* Init the RX tail register. */ - IDPF_PCI_REG_WRITE(txq->qtx_tail, 0); + IDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0); return 0; } @@ -625,12 +648,14 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id) int cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) { - struct idpf_vport *vport = dev->data->dev_private; - struct idpf_tx_queue *txq = - dev->data->tx_queues[tx_queue_id]; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); + struct cpfl_tx_queue *cpfl_txq = + dev->data->tx_queues[tx_queue_id]; int err = 0; - err = idpf_vc_txq_config(vport, txq); + err = idpf_vc_txq_config(vport, &(cpfl_txq->base)); if (err != 0) { PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id); return err; @@ -649,7 +674,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", tx_queue_id); } else { - txq->q_started = true; + cpfl_txq->base.q_started = true; dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; } @@ -660,13 +685,17 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) int cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); + struct cpfl_rx_queue *cpfl_rxq; struct idpf_rx_queue *rxq; int err; if (rx_queue_id >= dev->data->nb_rx_queues) return -EINVAL; + cpfl_rxq = dev->data->rx_queues[rx_queue_id]; err = idpf_vc_queue_switch(vport, rx_queue_id, true, false); if (err != 0) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", @@ -674,7 +703,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) return err; } - rxq = dev->data->rx_queues[rx_queue_id]; + rxq = &(cpfl_rxq->base); if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) { rxq->ops->release_mbufs(rxq); idpf_qc_single_rx_queue_reset(rxq); @@ -691,13 +720,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) int cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); + struct cpfl_tx_queue *cpfl_txq; struct idpf_tx_queue *txq; int err; if (tx_queue_id >= dev->data->nb_tx_queues) return -EINVAL; + cpfl_txq = dev->data->tx_queues[tx_queue_id]; err = idpf_vc_queue_switch(vport, tx_queue_id, false, false); if (err != 0) { PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", @@ -705,7 +738,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return err; } - txq = dev->data->tx_queues[tx_queue_id]; + txq = &(cpfl_txq->base); txq->ops->release_mbufs(txq); if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) { idpf_qc_single_tx_queue_reset(txq); @@ -718,28 +751,83 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return 0; } +static void +cpfl_rx_queue_release(void *rxq) +{ + struct cpfl_rx_queue *cpfl_rxq = rxq; + struct idpf_rx_queue *q = NULL; + + if (cpfl_rxq == NULL) + return; + + q = &(cpfl_rxq->base); + + /* Split queue */ + if (q->bufq1 != NULL && q->bufq2 != NULL) { + q->bufq1->ops->release_mbufs(q->bufq1); + rte_free(q->bufq1->sw_ring); + rte_memzone_free(q->bufq1->mz); + rte_free(q->bufq1); + q->bufq2->ops->release_mbufs(q->bufq2); + rte_free(q->bufq2->sw_ring); + rte_memzone_free(q->bufq2->mz); + rte_free(q->bufq2); + rte_memzone_free(q->mz); + rte_free(cpfl_rxq); + return; + } + + /* Single queue */ + q->ops->release_mbufs(q); + rte_free(q->sw_ring); + rte_memzone_free(q->mz); + rte_free(cpfl_rxq); +} + void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) { - idpf_qc_rx_queue_release(dev->data->rx_queues[qid]); + cpfl_rx_queue_release(dev->data->rx_queues[qid]); +} + +static void +cpfl_tx_queue_release(void *txq) +{ + struct cpfl_tx_queue *cpfl_txq = txq; + struct idpf_tx_queue *q = NULL; + + if (cpfl_txq == NULL) + return; + + q = &(cpfl_txq->base); + + if (q->complq) { + rte_memzone_free(q->complq->mz); + rte_free(q->complq); + } + + q->ops->release_mbufs(q); + rte_free(q->sw_ring); + rte_memzone_free(q->mz); + rte_free(cpfl_txq); } void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) { - idpf_qc_tx_queue_release(dev->data->tx_queues[qid]); + cpfl_tx_queue_release(dev->data->tx_queues[qid]); } void cpfl_stop_queues(struct rte_eth_dev *dev) { - struct idpf_rx_queue *rxq; - struct idpf_tx_queue *txq; + struct cpfl_rx_queue *cpfl_rxq; + struct cpfl_tx_queue *cpfl_txq; int i; for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - if (rxq == NULL) + cpfl_rxq = dev->data->rx_queues[i]; + if (cpfl_rxq == NULL) continue; if (cpfl_rx_queue_stop(dev, i) != 0) @@ -747,8 +835,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev) } for (i = 0; i < dev->data->nb_tx_queues; i++) { - txq = dev->data->tx_queues[i]; - if (txq == NULL) + cpfl_txq = dev->data->tx_queues[i]; + if (cpfl_txq == NULL) continue; if (cpfl_tx_queue_stop(dev, i) != 0) @@ -760,9 +848,11 @@ cpfl_stop_queues(struct rte_eth_dev *dev) void cpfl_set_rx_function(struct rte_eth_dev *dev) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); #ifdef RTE_ARCH_X86 - struct idpf_rx_queue *rxq; + struct cpfl_rx_queue *cpfl_rxq; int i; if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH && @@ -788,8 +878,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev) if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { if (vport->rx_vec_allowed) { for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - (void)idpf_qc_splitq_rx_vec_setup(rxq); + cpfl_rxq = dev->data->rx_queues[i]; + (void)idpf_qc_splitq_rx_vec_setup(&(cpfl_rxq->base)); } #ifdef CC_AVX512_SUPPORT if (vport->rx_use_avx512) { @@ -808,8 +898,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev) } else { if (vport->rx_vec_allowed) { for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - (void)idpf_qc_singleq_rx_vec_setup(rxq); + cpfl_rxq = dev->data->rx_queues[i]; + (void)idpf_qc_singleq_rx_vec_setup(&(cpfl_rxq->base)); } #ifdef CC_AVX512_SUPPORT if (vport->rx_use_avx512) { @@ -858,10 +948,12 @@ cpfl_set_rx_function(struct rte_eth_dev *dev) void cpfl_set_tx_function(struct rte_eth_dev *dev) { - struct idpf_vport *vport = dev->data->dev_private; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); #ifdef RTE_ARCH_X86 #ifdef CC_AVX512_SUPPORT - struct idpf_tx_queue *txq; + struct cpfl_tx_queue *cpfl_txq; int i; #endif /* CC_AVX512_SUPPORT */ @@ -876,8 +968,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev) vport->tx_use_avx512 = true; if (vport->tx_use_avx512) { for (i = 0; i < dev->data->nb_tx_queues; i++) { - txq = dev->data->tx_queues[i]; - idpf_qc_tx_vec_avx512_setup(txq); + cpfl_txq = dev->data->tx_queues[i]; + idpf_qc_tx_vec_avx512_setup(&(cpfl_txq->base)); } } } @@ -914,10 +1006,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev) #ifdef CC_AVX512_SUPPORT if (vport->tx_use_avx512) { for (i = 0; i < dev->data->nb_tx_queues; i++) { - txq = dev->data->tx_queues[i]; - if (txq == NULL) + cpfl_txq = dev->data->tx_queues[i]; + if (cpfl_txq == NULL) continue; - idpf_qc_tx_vec_avx512_setup(txq); + idpf_qc_tx_vec_avx512_setup(&(cpfl_txq->base)); } PMD_DRV_LOG(NOTICE, "Using Single AVX512 Vector Tx (port %d).", diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index fb267d38c8..e241afece9 100644 --- a/drivers/net/cpfl/cpfl_rxtx.h +++ b/drivers/net/cpfl/cpfl_rxtx.h @@ -23,6 +23,34 @@ #define CPFL_SUPPORT_CHAIN_NUM 5 +struct cpfl_rxq_hairpin_info { + bool hairpin_q; /* if rx queue is a hairpin queue */ + /* only valid if the hairpin queue pair crosses vport */ + bool hairpin_cv; + uint16_t peer_txp; +}; + +struct cpfl_rx_queue { + struct idpf_rx_queue base; + struct cpfl_rxq_hairpin_info hairpin_info; +}; + +struct cpfl_txq_hairpin_info { + /* only valid for hairpin queue */ + bool hairpin_q; + /* only valid if the hairpin queue pair crosses vport */ + bool hairpin_cv; + uint16_t peer_rxq_id; + uint16_t peer_rxp; + bool bound; + uint16_t complq_peer_rxq_id; +}; + +struct cpfl_tx_queue { + struct idpf_tx_queue base; + struct cpfl_txq_hairpin_info hairpin_info; +}; + int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h index 665418d27d..8d0b825f95 100644 --- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h +++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h @@ -76,15 +76,17 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq) static inline int cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev) { - struct idpf_vport *vport = dev->data->dev_private; - struct idpf_rx_queue *rxq; + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); + struct cpfl_rx_queue *cpfl_rxq; int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH; for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - default_ret = cpfl_rx_vec_queue_default(rxq); + cpfl_rxq = dev->data->rx_queues[i]; + default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base); if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { - splitq_ret = cpfl_rx_splitq_vec_default(rxq); + splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base); ret = splitq_ret && default_ret; } else { ret = default_ret; @@ -100,12 +102,12 @@ static inline int cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev) { int i; - struct idpf_tx_queue *txq; + struct cpfl_tx_queue *cpfl_txq; int ret = 0; for (i = 0; i < dev->data->nb_tx_queues; i++) { - txq = dev->data->tx_queues[i]; - ret = cpfl_tx_vec_queue_default(txq); + cpfl_txq = dev->data->tx_queues[i]; + ret = cpfl_tx_vec_queue_default(&cpfl_txq->base); if (ret == CPFL_SCALAR_PATH) return CPFL_SCALAR_PATH; } From patchwork Tue Feb 14 11:38:49 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Liu, Mingxia" X-Patchwork-Id: 123883 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 69C5741C49; Tue, 14 Feb 2023 13:36:45 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1E57C42D8F; Tue, 14 Feb 2023 13:36:34 +0100 (CET) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 5D8F842D47 for ; Tue, 14 Feb 2023 13:36:31 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1676378191; x=1707914191; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=FNxjFy2NdWvI1WPLaWdtCBaFt+SQN+IovRYeguHQv7U=; b=TeunEv9SzDvqloZ0yCcLyEiK2KZhUnsTr2CyG53U2xK+KJOz7tjbNUnP KfFnLMd7114Ir14oDid/amOdy3yBNgYR8nVMsSK2SWqhBBJQhETAa55d1 7wUL/53NK13VjpTYywXWWkZ2LVRziU4ArtYl0Akfwtcr9RPIh/DKwrusn F0JTVHzsp9r/HfOGLzHrctc7V0nrpGtoLg1SMGH1kEjgXRlY+uAqTcbj4 f6fkAs5nKBOyiRoMRnEnY2kSq0wK1zwGz/JzyOUJXZJniZGYBoa76Lb8g Gl/pyXS8NGgMvuG/GWg1vhOPuH/JJEkJUbsJChLBFp8UxNJiHdrzTCqGY g==; X-IronPort-AV: E=McAfee;i="6500,9779,10620"; a="314793182" X-IronPort-AV: E=Sophos;i="5.97,296,1669104000"; d="scan'208";a="314793182" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 14 Feb 2023 04:36:30 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10620"; a="699528519" X-IronPort-AV: E=Sophos;i="5.97,296,1669104000"; d="scan'208";a="699528519" Received: from dpdk-mingxial-01.sh.intel.com ([10.67.119.167]) by orsmga008.jf.intel.com with ESMTP; 14 Feb 2023 04:36:28 -0800 From: Mingxia Liu To: dev@dpdk.org, beilei.xing@intel.com, yuying.zhang@intel.com Cc: Mingxia Liu , Xiao Wang , Junfeng Guo Subject: [PATCH v2 2/5] net/cpfl: update device initialization for hairpin queue Date: Tue, 14 Feb 2023 11:38:49 +0000 Message-Id: <20230214113852.3341607-3-mingxia.liu@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230214113852.3341607-1-mingxia.liu@intel.com> References: <20230118130659.976873-1-mingxia.liu@intel.com> <20230214113852.3341607-1-mingxia.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch update device initialization for hairpin queue. Signed-off-by: Beilei Xing Signed-off-by: Xiao Wang Signed-off-by: Junfeng Guo Signed-off-by: Mingxia Liu --- drivers/common/idpf/idpf_common_device.c | 51 ++++++++++++++++ drivers/common/idpf/idpf_common_device.h | 2 + drivers/common/idpf/idpf_common_virtchnl.c | 28 +++++++++ drivers/common/idpf/idpf_common_virtchnl.h | 3 + drivers/common/idpf/version.map | 2 + drivers/net/cpfl/cpfl_ethdev.c | 68 +++++++++++++++++++++- 6 files changed, 153 insertions(+), 1 deletion(-) diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c index 5475a3e52c..2d968884c6 100644 --- a/drivers/common/idpf/idpf_common_device.c +++ b/drivers/common/idpf/idpf_common_device.c @@ -362,6 +362,57 @@ idpf_adapter_init(struct idpf_adapter *adapter) return ret; } +int +idpf_adapter_common_init(struct idpf_adapter *adapter) +{ + struct idpf_hw *hw = &adapter->hw; + int ret; + + idpf_reset_pf(hw); + ret = idpf_check_pf_reset_done(hw); + if (ret != 0) { + DRV_LOG(ERR, "IDPF is still resetting"); + goto err_check_reset; + } + + ret = idpf_init_mbx(hw); + if (ret != 0) { + DRV_LOG(ERR, "Failed to init mailbox"); + goto err_check_reset; + } + + adapter->mbx_resp = rte_zmalloc("idpf_adapter_mbx_resp", + IDPF_DFLT_MBX_BUF_SIZE, 0); + if (adapter->mbx_resp == NULL) { + DRV_LOG(ERR, "Failed to allocate idpf_adapter_mbx_resp memory"); + ret = -ENOMEM; + goto err_mbx_resp; + } + + ret = idpf_vc_api_version_check(adapter); + if (ret != 0) { + DRV_LOG(ERR, "Failed to check api version"); + goto err_check_api; + } + + ret = idpf_get_pkt_type(adapter); + if (ret != 0) { + DRV_LOG(ERR, "Failed to set ptype table"); + goto err_check_api; + } + + return 0; + +err_check_api: + rte_free(adapter->mbx_resp); + adapter->mbx_resp = NULL; +err_mbx_resp: + idpf_ctlq_deinit(hw); +err_check_reset: + return ret; +} + + int idpf_adapter_deinit(struct idpf_adapter *adapter) { diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h index 364a60221a..185c88fcd2 100644 --- a/drivers/common/idpf/idpf_common_device.h +++ b/drivers/common/idpf/idpf_common_device.h @@ -183,6 +183,8 @@ atomic_set_cmd(struct idpf_adapter *adapter, uint32_t ops) __rte_internal int idpf_adapter_init(struct idpf_adapter *adapter); __rte_internal +int idpf_adapter_common_init(struct idpf_adapter *adapter); +__rte_internal int idpf_adapter_deinit(struct idpf_adapter *adapter); __rte_internal int idpf_vport_init(struct idpf_vport *vport, diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c index 99d9efbb7c..7fa0074293 100644 --- a/drivers/common/idpf/idpf_common_virtchnl.c +++ b/drivers/common/idpf/idpf_common_virtchnl.c @@ -338,6 +338,34 @@ idpf_vc_caps_get(struct idpf_adapter *adapter) return 0; } +int idpf_vc_caps_get_by_info(struct idpf_adapter *adapter, + struct virtchnl2_get_capabilities *caps_info) +{ + struct virtchnl2_get_capabilities caps_msg; + struct idpf_cmd_info args; + int err; + + memset(&caps_msg, 0, sizeof(struct virtchnl2_get_capabilities)); + rte_memcpy(&caps_msg, caps_info, sizeof(caps_msg)); + + args.ops = VIRTCHNL2_OP_GET_CAPS; + args.in_args = (uint8_t *)&caps_msg; + args.in_args_size = sizeof(caps_msg); + args.out_buffer = adapter->mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(adapter, &args); + if (err != 0) { + DRV_LOG(ERR, + "Failed to execute command of VIRTCHNL2_OP_GET_CAPS"); + return err; + } + + rte_memcpy(&adapter->caps, args.out_buffer, sizeof(caps_msg)); + + return 0; +} + int idpf_vc_vport_create(struct idpf_vport *vport, struct virtchnl2_create_vport *create_vport_info) diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h index d479d93c8e..6f46bef673 100644 --- a/drivers/common/idpf/idpf_common_virtchnl.h +++ b/drivers/common/idpf/idpf_common_virtchnl.h @@ -64,4 +64,7 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, __rte_internal int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 *buff_count, struct idpf_dma_mem **buffs); +__rte_internal +int idpf_vc_caps_get_by_info(struct idpf_adapter *adapter, + struct virtchnl2_get_capabilities *caps_info); #endif /* _IDPF_COMMON_VIRTCHNL_H_ */ diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map index 70334a1b03..c021669fd2 100644 --- a/drivers/common/idpf/version.map +++ b/drivers/common/idpf/version.map @@ -2,6 +2,7 @@ INTERNAL { global: idpf_adapter_deinit; + idpf_adapter_common_init; idpf_adapter_init; idpf_dp_prep_pkts; @@ -37,6 +38,7 @@ INTERNAL { idpf_vc_api_version_check; idpf_vc_caps_get; + idpf_vc_caps_get_by_info; idpf_vc_cmd_execute; idpf_vc_ctlq_post_rx_buffs; idpf_vc_ctlq_recv; diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index f799707ea7..acc6180ca4 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -1154,6 +1154,72 @@ cpfl_dev_alarm_handler(void *param) rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter); } +static int +cpfl_get_caps(struct idpf_adapter *adapter) +{ + struct virtchnl2_get_capabilities caps_msg = {0}; + + caps_msg.csum_caps = + VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP | + VIRTCHNL2_CAP_TX_CSUM_GENERIC | + VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP | + VIRTCHNL2_CAP_RX_CSUM_GENERIC; + + caps_msg.rss_caps = + VIRTCHNL2_CAP_RSS_IPV4_TCP | + VIRTCHNL2_CAP_RSS_IPV4_UDP | + VIRTCHNL2_CAP_RSS_IPV4_SCTP | + VIRTCHNL2_CAP_RSS_IPV4_OTHER | + VIRTCHNL2_CAP_RSS_IPV6_TCP | + VIRTCHNL2_CAP_RSS_IPV6_UDP | + VIRTCHNL2_CAP_RSS_IPV6_SCTP | + VIRTCHNL2_CAP_RSS_IPV6_OTHER | + VIRTCHNL2_CAP_RSS_IPV4_AH | + VIRTCHNL2_CAP_RSS_IPV4_ESP | + VIRTCHNL2_CAP_RSS_IPV4_AH_ESP | + VIRTCHNL2_CAP_RSS_IPV6_AH | + VIRTCHNL2_CAP_RSS_IPV6_ESP | + VIRTCHNL2_CAP_RSS_IPV6_AH_ESP; + + caps_msg.other_caps = VIRTCHNL2_CAP_WB_ON_ITR | + VIRTCHNL2_CAP_PTP | + VIRTCHNL2_CAP_RX_FLEX_DESC; + + return idpf_vc_caps_get_by_info(adapter, &caps_msg); +} + +static int +cpfl_adapter_init(struct idpf_adapter *adapter) +{ + int ret = 0; + + ret = idpf_adapter_common_init(adapter); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to init idpf common adapter"); + return ret; + } + + ret = cpfl_get_caps(adapter); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to get capabilities"); + return ret; + } + + return ret; +} + static int cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) { @@ -1170,7 +1236,7 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a strncpy(adapter->name, pci_dev->device.name, PCI_PRI_STR_SIZE); - ret = idpf_adapter_init(base); + ret = cpfl_adapter_init(base); if (ret != 0) { PMD_INIT_LOG(ERR, "Failed to init adapter"); goto err_adapter_init; From patchwork Tue Feb 14 11:38:50 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Liu, Mingxia" X-Patchwork-Id: 123884 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 49FD641C49; Tue, 14 Feb 2023 13:36:51 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 187F542D9B; Tue, 14 Feb 2023 13:36:35 +0100 (CET) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 5025342D8A for ; Tue, 14 Feb 2023 13:36:33 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1676378193; x=1707914193; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=hDI0fYJETKxuxHL87DLZUMfunVr4MrpqXn3JerfYyiQ=; b=ce8p4B0j7XeTXK6jh+L3ERT3jPXVBfDmEVgiq02gfrCZ4WNoel0SZeqQ kwYs6cFPbYkVamt8EqUxrzve1wR5Hazxboy0pQBFt7cJ0TH7yTyrGnMdt rScDrU6XazU1uxhXe3kL9zCoc9L9Qu4KJ5CzVoBmLGrAKl2zdX6Swx5cw 6h2xKqP6jlE1hDqreGS8A/GNcxdsxm2qxwakif2PW85x/kneitPHlz8Nk IF/Z21LHKNXw3WnbDMWpFFgRfqADnfjj2gj+JJ2ibudPv+FdLiXc+nmMx i0AhDq1vPhPY/aVFT3+fTfDNEb5mxF5gdSQZVpwN/+IvMamvjxocv64nm w==; X-IronPort-AV: E=McAfee;i="6500,9779,10620"; a="314793190" X-IronPort-AV: E=Sophos;i="5.97,296,1669104000"; d="scan'208";a="314793190" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 14 Feb 2023 04:36:32 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10620"; a="699528522" X-IronPort-AV: E=Sophos;i="5.97,296,1669104000"; d="scan'208";a="699528522" Received: from dpdk-mingxial-01.sh.intel.com ([10.67.119.167]) by orsmga008.jf.intel.com with ESMTP; 14 Feb 2023 04:36:30 -0800 From: Mingxia Liu To: dev@dpdk.org, beilei.xing@intel.com, yuying.zhang@intel.com Cc: Mingxia Liu Subject: [PATCH v2 3/5] net/cpfl: add hairpin queue enable and setup Date: Tue, 14 Feb 2023 11:38:50 +0000 Message-Id: <20230214113852.3341607-4-mingxia.liu@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230214113852.3341607-1-mingxia.liu@intel.com> References: <20230118130659.976873-1-mingxia.liu@intel.com> <20230214113852.3341607-1-mingxia.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch add hairpin queue ops - hairpin_cap_get - rx_hairpin_queue_setup - tx_hairpin_queue_setup Signed-off-by: Mingxia Liu --- drivers/net/cpfl/cpfl_ethdev.c | 15 ++ drivers/net/cpfl/cpfl_rxtx.c | 443 ++++++++++++++++++++++++++++++++- drivers/net/cpfl/cpfl_rxtx.h | 22 +- 3 files changed, 468 insertions(+), 12 deletions(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index acc6180ca4..ebee21a82a 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -159,6 +159,18 @@ cpfl_dev_link_update(struct rte_eth_dev *dev, return rte_eth_linkstatus_set(dev, &new_link); } +static int +cpfl_hairpin_cap_get(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_hairpin_cap *cap) +{ + cap->max_nb_queues = CPFL_MAX_NB_QUEUES; + cap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX; + cap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX; + cap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC; + + return 0; +} + static int cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { @@ -1295,6 +1307,9 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = { .xstats_get = cpfl_dev_xstats_get, .xstats_get_names = cpfl_dev_xstats_get_names, .xstats_reset = cpfl_dev_xstats_reset, + .hairpin_cap_get = cpfl_hairpin_cap_get, + .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup, + .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup, }; static uint16_t diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index c7e5487366..e59cabe893 100644 --- a/drivers/net/cpfl/cpfl_rxtx.c +++ b/drivers/net/cpfl/cpfl_rxtx.c @@ -10,11 +10,77 @@ #include "cpfl_rxtx.h" #include "cpfl_rxtx_vec_common.h" +#define CPFL_NB_MBUF 4096 +#define CPFL_CACHE_SIZE 250 +#define CPFL_MBUF_SIZE 2048 +#define CPFL_P2P_RING_BUF 128 + static void cpfl_tx_queue_release(void *txq); static void cpfl_rx_queue_release(void *txq); +static inline void +reset_tx_hairpin_descq(struct idpf_tx_queue *txq) +{ + uint32_t i, size; + + if (!txq) { + PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL"); + return; + } + + size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN; + for (i = 0; i < size; i++) + ((volatile char *)txq->desc_ring)[i] = 0; +} + +static inline void +reset_tx_hairpin_complq(struct idpf_tx_queue *cq) +{ + uint32_t i, size; + + if (!cq) { + PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL"); + return; + } + + size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN; + for (i = 0; i < size; i++) + ((volatile char *)cq->compl_ring)[i] = 0; +} + +static inline void +reset_rx_hairpin_descq(struct idpf_rx_queue *rxq) +{ + uint16_t len; + uint32_t i; + + if (!rxq) + return; + + len = rxq->nb_rx_desc; + for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++) + ((volatile char *)rxq->rx_ring)[i] = 0; +} + +static inline void +reset_rx_hairpin_bufq(struct idpf_rx_queue *rxbq) +{ + uint16_t len; + uint32_t i; + + if (!rxbq) + return; + + len = rxbq->nb_rx_desc; + for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++) + ((volatile char *)rxbq->rx_ring)[i] = 0; + + rxbq->bufq1 = NULL; + rxbq->bufq2 = NULL; +} + static uint64_t cpfl_rx_offload_convert(uint64_t offload) { @@ -763,16 +829,25 @@ cpfl_rx_queue_release(void *rxq) q = &(cpfl_rxq->base); /* Split queue */ - if (q->bufq1 != NULL && q->bufq2 != NULL) { + if (q->bufq1 != NULL) { + /* the mz is shared between Tx/Rx hairpin, let Tx_release + * free the buf. + */ + if (!cpfl_rxq->hairpin_info.hairpin_q) { + rte_memzone_free(q->bufq1->mz); + if (q->bufq2 != NULL) + rte_memzone_free(q->bufq2->mz); + rte_memzone_free(q->mz); + } q->bufq1->ops->release_mbufs(q->bufq1); rte_free(q->bufq1->sw_ring); - rte_memzone_free(q->bufq1->mz); rte_free(q->bufq1); - q->bufq2->ops->release_mbufs(q->bufq2); - rte_free(q->bufq2->sw_ring); - rte_memzone_free(q->bufq2->mz); - rte_free(q->bufq2); - rte_memzone_free(q->mz); + + if (q->bufq2 != NULL) { + q->bufq2->ops->release_mbufs(q->bufq2); + rte_free(q->bufq2->sw_ring); + rte_free(q->bufq2); + } rte_free(cpfl_rxq); return; } @@ -1042,3 +1117,357 @@ cpfl_set_tx_function(struct rte_eth_dev *dev) } #endif /* RTE_ARCH_X86 */ } + +static int +cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq, + uint16_t queue_idx, uint16_t nb_desc, + struct idpf_tx_queue *peer_txq) +{ + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); + struct idpf_adapter *adapter = vport->adapter; + struct idpf_hw *hw = &adapter->hw; + const struct rte_memzone *mz; + struct rte_mempool *mp; + uint32_t ring_size; + char pool_name[RTE_MEMPOOL_NAMESIZE]; + + mp = cpfl_vport->p2p_mp; + if (!mp) { + snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "p2p_mb_pool_%u", + dev->data->port_id); + mp = rte_pktmbuf_pool_create(pool_name, CPFL_NB_MBUF, CPFL_CACHE_SIZE, + 0, CPFL_MBUF_SIZE, dev->device->numa_node); + if (!mp) { + PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for p2p"); + return -ENOMEM; + } + cpfl_vport->p2p_mp = mp; + } + + bufq->mp = mp; + bufq->nb_rx_desc = nb_desc; + bufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx; + bufq->port_id = dev->data->port_id; + bufq->adapter = adapter; + bufq->rx_buf_len = CPFL_MBUF_SIZE - RTE_PKTMBUF_HEADROOM; + + bufq->sw_ring = rte_zmalloc("sw ring", + sizeof(struct rte_mbuf *) * nb_desc, + RTE_CACHE_LINE_SIZE); + if (!bufq->sw_ring) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring"); + return -ENOMEM; + } + + /* Hairpin Rx buffer queue and Tx completion queue share the same HW ring */ + if (peer_txq && peer_txq->complq->mz) { + mz = peer_txq->complq->mz; + bufq->rx_ring_phys_addr = mz->iova; + bufq->rx_ring = mz->addr; + bufq->mz = mz; + } else { + ring_size = RTE_ALIGN(bufq->nb_rx_desc * CPFL_P2P_DESC_LEN, + CPFL_DMA_MEM_ALIGN); + mz = rte_eth_dma_zone_reserve(dev, "hairpin_rx_buf_ring", queue_idx, + ring_size + CPFL_P2P_RING_BUF, + CPFL_RING_BASE_ALIGN, + dev->device->numa_node); + if (!mz) { + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory" + "for hairpin RX buffer queue."); + rte_free(bufq->sw_ring); + return -ENOMEM; + } + + bufq->rx_ring_phys_addr = mz->iova; + bufq->rx_ring = mz->addr; + bufq->mz = mz; + } + reset_rx_hairpin_bufq(bufq); + bufq->q_set = true; + bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start + + queue_idx * vport->chunks_info.rx_buf_qtail_spacing); + bufq->ops = &def_rxq_ops; + + return 0; +} + +int +cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, + const struct rte_eth_hairpin_conf *conf) +{ + struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); + struct idpf_adapter *adapter_base = vport->adapter; + struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(adapter_base); + struct cpfl_rxq_hairpin_info *hairpin_info; + struct cpfl_vport *peer_cpfl_vport; + struct rte_eth_dev_data *dev_data; + struct cpfl_rx_queue *cpfl_rxq; + struct cpfl_tx_queue *peer_txq = NULL; + struct idpf_vport *peer_vport; + struct idpf_rx_queue *bufq1 = NULL; + struct idpf_rx_queue *rxq; + uint16_t peer_port = conf->peers[0].port; + uint16_t peer_q = conf->peers[0].queue; + const struct rte_memzone *mz; + uint32_t ring_size; + uint16_t qid; + int ret; + + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) { + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue."); + return -EINVAL; + } + + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 || + nb_desc > CPFL_MAX_RING_DESC || + nb_desc < CPFL_MIN_RING_DESC) { + PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid", nb_desc); + return -EINVAL; + } + + /* Free memory if needed */ + if (dev->data->rx_queues[queue_idx]) { + cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* Setup Rx description queue */ + cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq", + sizeof(struct cpfl_rx_queue), + RTE_CACHE_LINE_SIZE, + SOCKET_ID_ANY); + if (!cpfl_rxq) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure"); + return -ENOMEM; + } + + rxq = &(cpfl_rxq->base); + hairpin_info = &(cpfl_rxq->hairpin_info); + rxq->nb_rx_desc = nb_desc * 2; + rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx; + rxq->port_id = dev->data->port_id; + rxq->adapter = adapter_base; + hairpin_info->hairpin_q = true; + rxq->rx_buf_len = CPFL_MBUF_SIZE - RTE_PKTMBUF_HEADROOM; + + if (peer_port != dev->data->port_id) + hairpin_info->hairpin_cv = true; + hairpin_info->peer_txp = peer_port; + peer_cpfl_vport = adapter->vports[peer_port]; + peer_vport = &(peer_cpfl_vport->base); + dev_data = peer_vport->dev_data; + if (peer_q < dev_data->nb_tx_queues) + peer_txq = dev_data->tx_queues[peer_q]; + + /* Hairpin Rxq and Txq share the same HW ring */ + if (peer_txq && peer_txq->base.mz) { + mz = peer_txq->base.mz; + rxq->rx_ring_phys_addr = mz->iova; + rxq->rx_ring = mz->addr; + rxq->mz = mz; + } else { + ring_size = RTE_ALIGN(rxq->nb_rx_desc * CPFL_P2P_DESC_LEN, + CPFL_DMA_MEM_ALIGN); + mz = rte_eth_dma_zone_reserve(dev, "hairpin_rx_ring", queue_idx, + ring_size + CPFL_P2P_RING_BUF, + CPFL_RING_BASE_ALIGN, + dev->device->numa_node); + if (!mz) { + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX"); + ret = -ENOMEM; + goto free_rxq; + } + + rxq->rx_ring_phys_addr = mz->iova; + rxq->rx_ring = mz->addr; + rxq->mz = mz; + } + reset_rx_hairpin_descq(rxq); + + /* setup 1 Rx buffer queue for 1 hairpin rxq */ + bufq1 = rte_zmalloc_socket("hairpin rx bufq1", + sizeof(struct idpf_rx_queue), + RTE_CACHE_LINE_SIZE, + SOCKET_ID_ANY); + if (!bufq1) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for hairpin Rx buffer queue 1."); + ret = -ENOMEM; + goto free_mz; + } + qid = 2 * queue_idx; + ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc, &(peer_txq->base)); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer queue 1"); + ret = -EINVAL; + goto free_bufq1; + } + rxq->bufq1 = bufq1; + rxq->bufq2 = NULL; + rxq->q_set = true; + dev->data->rx_queues[queue_idx] = cpfl_rxq; + + return 0; +free_bufq1: + rte_free(bufq1); +free_mz: + rte_memzone_free(mz); +free_rxq: + rte_free(rxq); + + return ret; +} + +int +cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, + const struct rte_eth_hairpin_conf *conf) +{ + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + + struct idpf_vport *vport = &(cpfl_vport->base); + struct idpf_adapter *adapter_base = vport->adapter; + struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(adapter_base); + struct cpfl_txq_hairpin_info *hairpin_info; + struct cpfl_vport *peer_cpfl_vport; + struct rte_eth_dev_data *dev_data; + struct idpf_vport *peer_vport; + struct idpf_hw *hw = &adapter_base->hw; + struct cpfl_tx_queue *cpfl_txq; + struct idpf_tx_queue *txq, *cq; + struct idpf_rx_queue *peer_rxq = NULL; + const struct rte_memzone *mz; + uint32_t ring_size; + uint16_t peer_port = conf->peers[0].port; + uint16_t peer_q = conf->peers[0].queue; + + if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) { + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue."); + return -EINVAL; + } + + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 || + nb_desc > CPFL_MAX_RING_DESC || + nb_desc < CPFL_MIN_RING_DESC) { + PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid", + nb_desc); + return -EINVAL; + } + + /* Free memory if needed. */ + if (dev->data->tx_queues[queue_idx]) { + cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* Allocate the TX queue data structure. */ + cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq", + sizeof(struct cpfl_tx_queue), + RTE_CACHE_LINE_SIZE, + SOCKET_ID_ANY); + if (!cpfl_txq) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure"); + return -ENOMEM; + } + + txq = &(cpfl_txq->base); + hairpin_info = &(cpfl_txq->hairpin_info); + /* Txq ring length should be 2 times of Tx completion queue size. */ + txq->nb_tx_desc = nb_desc * 2; + txq->queue_id = vport->chunks_info.tx_start_qid + queue_idx; + txq->port_id = dev->data->port_id; + hairpin_info->hairpin_q = true; + + if (peer_port != dev->data->port_id) + cpfl_txq->hairpin_info.hairpin_cv = true; + hairpin_info->peer_rxp = peer_port; + peer_cpfl_vport = adapter->vports[peer_port]; + peer_vport = &(peer_cpfl_vport->base); + hairpin_info->peer_rxq_id = peer_vport->chunks_info.rx_start_qid + conf->peers[0].queue; + dev_data = peer_vport->dev_data; + if (peer_q < dev_data->nb_rx_queues) + peer_rxq = dev_data->rx_queues[peer_q]; + + /* Hairpin Rxq and Txq share the same HW ring */ + if (peer_rxq && peer_rxq->mz) { + mz = peer_rxq->mz; + txq->tx_ring_phys_addr = mz->iova; + txq->desc_ring = mz->addr; + txq->mz = mz; + } else { + ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN, + CPFL_DMA_MEM_ALIGN); + mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", queue_idx, + ring_size + CPFL_P2P_RING_BUF, + CPFL_RING_BASE_ALIGN, + dev->device->numa_node); + if (!mz) { + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX"); + rte_free(txq->sw_ring); + rte_free(txq); + return -ENOMEM; + } + + txq->tx_ring_phys_addr = mz->iova; + txq->desc_ring = mz->addr; + txq->mz = mz; + } + + reset_tx_hairpin_descq(txq); + txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start + + queue_idx * vport->chunks_info.tx_qtail_spacing); + txq->ops = &def_txq_ops; + + /* Allocate the TX completion queue data structure. */ + txq->complq = rte_zmalloc_socket("cpfl hairpin cq", + sizeof(struct idpf_tx_queue), + RTE_CACHE_LINE_SIZE, + dev->device->numa_node); + cq = txq->complq; + if (!cq) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure"); + return -ENOMEM; + } + + cq->nb_tx_desc = nb_desc; + cq->queue_id = vport->chunks_info.tx_compl_start_qid + queue_idx; + cq->port_id = dev->data->port_id; + hairpin_info->complq_peer_rxq_id = + peer_vport->chunks_info.rx_buf_start_qid + conf->peers[0].queue * 2; + + /* Hairpin Rx buffer queue and Tx completion queue share the same HW ring */ + if (peer_rxq && peer_rxq->bufq1->mz) { + mz = peer_rxq->bufq1->mz; + cq->tx_ring_phys_addr = mz->iova; + cq->compl_ring = mz->addr; + cq->mz = mz; + } else { + ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN, + CPFL_DMA_MEM_ALIGN); + mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring", queue_idx, + ring_size + CPFL_P2P_RING_BUF, + CPFL_RING_BASE_ALIGN, + dev->device->numa_node); + if (!mz) { + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX completion queue"); + rte_free(txq->sw_ring); + rte_free(txq); + return -ENOMEM; + } + cq->tx_ring_phys_addr = mz->iova; + cq->compl_ring = mz->addr; + cq->mz = mz; + } + + reset_tx_hairpin_complq(cq); + + txq->q_set = true; + dev->data->tx_queues[queue_idx] = cpfl_txq; + + return 0; +} diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index e241afece9..d4790d60ae 100644 --- a/drivers/net/cpfl/cpfl_rxtx.h +++ b/drivers/net/cpfl/cpfl_rxtx.h @@ -9,12 +9,17 @@ #include "cpfl_ethdev.h" /* In QLEN must be whole number of 32 descriptors. */ -#define CPFL_ALIGN_RING_DESC 32 -#define CPFL_MIN_RING_DESC 32 -#define CPFL_MAX_RING_DESC 4096 -#define CPFL_DMA_MEM_ALIGN 4096 +#define CPFL_ALIGN_RING_DESC 32 +#define CPFL_MIN_RING_DESC 32 +#define CPFL_MAX_RING_DESC 4096 +#define CPFL_DMA_MEM_ALIGN 4096 +#define CPFL_P2P_DESC_LEN 16 +#define CPFL_MAX_HAIRPINQ_RX_2_TX 1 +#define CPFL_MAX_HAIRPINQ_TX_2_RX 1 +#define CPFL_MAX_HAIRPINQ_NB_DESC 1024 +#define CPFL_MAX_NB_QUEUES 16 /* Base address of the HW descriptor ring should be 128B aligned. */ -#define CPFL_RING_BASE_ALIGN 128 +#define CPFL_RING_BASE_ALIGN 128 #define CPFL_DEFAULT_RX_FREE_THRESH 32 @@ -69,4 +74,11 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); void cpfl_set_rx_function(struct rte_eth_dev *dev); void cpfl_set_tx_function(struct rte_eth_dev *dev); +int +cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf); +int +cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, + const struct rte_eth_hairpin_conf *conf); #endif /* _CPFL_RXTX_H_ */ From patchwork Tue Feb 14 11:38:51 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Liu, Mingxia" X-Patchwork-Id: 123885 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id F10D841C49; Tue, 14 Feb 2023 13:36:57 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2805042D79; Tue, 14 Feb 2023 13:36:37 +0100 (CET) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 7407C42D79 for ; Tue, 14 Feb 2023 13:36:35 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1676378195; x=1707914195; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=9vN95RaAtp+CfXU/5Bp9IsmUk1Pg1WhlNAMqm5YMXd4=; b=cZm1ZuHqyld9/busHgIW1QKWjEM1igkNrc359MrPOMrCSSmrr5MY1XLc arRzN8NzbT5KsJxKEXyvjw8yEr9uZQrSsPyqPELNFR1F+q8F/ptfm73ZQ UvRynKWmQ8CKW5ut4ds4ppLPrMy2hVX/N2NotrdKuKrQpnuYSbpBNR6EN Al+uETB6OTFpu3dLHKms6kLSc0J3n8nbn62Zavm1nyk4YVcJhYiH+KrE7 52A+2/ABb3Bsj9uMa4wgoRoWPwpB9910u/6SsawwTZDApraEF+6E8wY/2 QtC6G9A4aUQ4fRTStx2efo7cakAwf3FLWsbZKxOG5fMMqCnuSEZMiFaSr g==; X-IronPort-AV: E=McAfee;i="6500,9779,10620"; a="314793197" X-IronPort-AV: E=Sophos;i="5.97,296,1669104000"; d="scan'208";a="314793197" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 14 Feb 2023 04:36:34 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10620"; a="699528526" X-IronPort-AV: E=Sophos;i="5.97,296,1669104000"; d="scan'208";a="699528526" Received: from dpdk-mingxial-01.sh.intel.com ([10.67.119.167]) by orsmga008.jf.intel.com with ESMTP; 14 Feb 2023 04:36:32 -0800 From: Mingxia Liu To: dev@dpdk.org, beilei.xing@intel.com, yuying.zhang@intel.com Cc: Mingxia Liu , Xiao Wang , Junfeng Guo Subject: [PATCH v2 4/5] net/cpfl: support hairpin queue start and stop Date: Tue, 14 Feb 2023 11:38:51 +0000 Message-Id: <20230214113852.3341607-5-mingxia.liu@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230214113852.3341607-1-mingxia.liu@intel.com> References: <20230118130659.976873-1-mingxia.liu@intel.com> <20230214113852.3341607-1-mingxia.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch add hairpin queue start and stop. Signed-off-by: Beilei Xing Signed-off-by: Xiao Wang Signed-off-by: Junfeng Guo Signed-off-by: Mingxia Liu --- drivers/common/idpf/idpf_common_device.c | 1 + drivers/common/idpf/idpf_common_virtchnl.c | 72 ++++++- drivers/common/idpf/idpf_common_virtchnl.h | 9 + drivers/common/idpf/version.map | 3 + drivers/net/cpfl/cpfl_ethdev.c | 69 +++++- drivers/net/cpfl/cpfl_rxtx.c | 232 +++++++++++++++++++-- drivers/net/cpfl/cpfl_rxtx.h | 21 ++ drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 + 8 files changed, 381 insertions(+), 30 deletions(-) diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c index 2d968884c6..d0156eabd6 100644 --- a/drivers/common/idpf/idpf_common_device.c +++ b/drivers/common/idpf/idpf_common_device.c @@ -538,6 +538,7 @@ idpf_vport_init(struct idpf_vport *vport, err_create_vport: return ret; } + int idpf_vport_deinit(struct idpf_vport *vport) { diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c index 7fa0074293..c75f4ac68c 100644 --- a/drivers/common/idpf/idpf_common_virtchnl.c +++ b/drivers/common/idpf/idpf_common_virtchnl.c @@ -734,7 +734,7 @@ idpf_vc_vectors_dealloc(struct idpf_vport *vport) return err; } -static int +int idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid, uint32_t type, bool on) { @@ -1050,6 +1050,41 @@ idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq) return err; } +int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info, + uint16_t num_qs) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_config_rx_queues *vc_rxqs = NULL; + struct idpf_cmd_info args; + int size, err, i; + + size = sizeof(*vc_rxqs) + (num_qs - 1) * + sizeof(struct virtchnl2_rxq_info); + vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0); + if (vc_rxqs == NULL) { + DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues"); + err = -ENOMEM; + return err; + } + vc_rxqs->vport_id = vport->vport_id; + vc_rxqs->num_qinfo = num_qs; + memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct virtchnl2_rxq_info)); + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES; + args.in_args = (uint8_t *)vc_rxqs; + args.in_args_size = size; + args.out_buffer = adapter->mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(adapter, &args); + rte_free(vc_rxqs); + if (err != 0) + DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES"); + + return err; +} + int idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq) { @@ -1121,6 +1156,41 @@ idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq) return err; } +int +idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info, + uint16_t num_qs) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_config_tx_queues *vc_txqs = NULL; + struct idpf_cmd_info args; + int size, err; + + size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info); + vc_txqs = rte_zmalloc("cfg_txqs", size, 0); + if (vc_txqs == NULL) { + DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues"); + err = -ENOMEM; + return err; + } + vc_txqs->vport_id = vport->vport_id; + vc_txqs->num_qinfo = num_qs; + memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct virtchnl2_txq_info)); + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES; + args.in_args = (uint8_t *)vc_txqs; + args.in_args_size = size; + args.out_buffer = adapter->mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(adapter, &args); + rte_free(vc_txqs); + if (err != 0) + DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES"); + + return err; +} + int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, struct idpf_ctlq_msg *q_msg) diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h index 6f46bef673..900d79c3c7 100644 --- a/drivers/common/idpf/idpf_common_virtchnl.h +++ b/drivers/common/idpf/idpf_common_virtchnl.h @@ -67,4 +67,13 @@ int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, __rte_internal int idpf_vc_caps_get_by_info(struct idpf_adapter *adapter, struct virtchnl2_get_capabilities *caps_info); +__rte_internal +int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info, + uint16_t num_qs); +__rte_internal +int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info, + uint16_t num_qs); +__rte_internal +int idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid, + uint32_t type, bool on); #endif /* _IDPF_COMMON_VIRTCHNL_H_ */ diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map index c021669fd2..764e603dfb 100644 --- a/drivers/common/idpf/version.map +++ b/drivers/common/idpf/version.map @@ -42,6 +42,7 @@ INTERNAL { idpf_vc_cmd_execute; idpf_vc_ctlq_post_rx_buffs; idpf_vc_ctlq_recv; + idpf_vc_ena_dis_one_queue; idpf_vc_irq_map_unmap_config; idpf_vc_one_msg_read; idpf_vc_ptype_info_query; @@ -54,8 +55,10 @@ INTERNAL { idpf_vc_rss_lut_get; idpf_vc_rss_lut_set; idpf_vc_rxq_config; + idpf_vc_rxq_config_by_info; idpf_vc_stats_query; idpf_vc_txq_config; + idpf_vc_txq_config_by_info; idpf_vc_vectors_alloc; idpf_vc_vectors_dealloc; idpf_vc_vport_create; diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index ebee21a82a..f08b7beb13 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -745,6 +745,9 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev) static int cpfl_start_queues(struct rte_eth_dev *dev) { + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; + struct idpf_vport *vport = &(cpfl_vport->base); struct cpfl_rx_queue *cpfl_rxq; struct cpfl_tx_queue *cpfl_txq; int err = 0; @@ -755,10 +758,18 @@ cpfl_start_queues(struct rte_eth_dev *dev) if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start) continue; - err = cpfl_tx_queue_start(dev, i); - if (err != 0) { - PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i); - return err; + if (!cpfl_txq->hairpin_info.hairpin_q) { + err = cpfl_tx_queue_start(dev, i); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i); + return err; + } + } else if (!cpfl_txq->hairpin_info.hairpin_cv) { + err = cpfl_set_hairpin_txqinfo(vport, cpfl_txq); + if (err) { + PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i); + return err; + } } } @@ -766,10 +777,48 @@ cpfl_start_queues(struct rte_eth_dev *dev) cpfl_rxq = dev->data->rx_queues[i]; if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start) continue; - err = cpfl_rx_queue_start(dev, i); - if (err != 0) { - PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i); - return err; + if (!cpfl_rxq->hairpin_info.hairpin_q) { + err = cpfl_rx_queue_start(dev, i); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i); + return err; + } + } else if (!cpfl_rxq->hairpin_info.hairpin_cv) { + err = cpfl_set_hairpin_rxqinfo(vport, cpfl_rxq); + if (err) { + PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i); + return err; + } + err = cpfl_rx_queue_init(dev, i); + if (err) { + PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i); + return err; + } + } + } + + /* For non-cross vport hairpin queues, enable Txq and Rxq at last. */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + cpfl_txq = dev->data->tx_queues[i]; + if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_txq->hairpin_info.hairpin_cv) { + err = cpfl_switch_hairpin_queue(vport, i, false, true); + if (err) + PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on", + i); + else + cpfl_txq->base.q_started = true; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + cpfl_rxq = dev->data->rx_queues[i]; + if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_rxq->hairpin_info.hairpin_cv) { + err = cpfl_switch_hairpin_queue(vport, i, true, true); + if (err) + PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on", + i); + else + cpfl_rxq->base.q_started = true; } } @@ -873,6 +922,10 @@ cpfl_dev_close(struct rte_eth_dev *dev) struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter); cpfl_dev_stop(dev); + if (cpfl_vport->p2p_mp) { + rte_mempool_free(cpfl_vport->p2p_mp); + cpfl_vport->p2p_mp = NULL; + } idpf_vport_deinit(vport); adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id); diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index e59cabe893..519c0d5fe5 100644 --- a/drivers/net/cpfl/cpfl_rxtx.c +++ b/drivers/net/cpfl/cpfl_rxtx.c @@ -19,6 +19,8 @@ static void cpfl_tx_queue_release(void *txq); static void cpfl_rx_queue_release(void *txq); +static int +cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq); static inline void reset_tx_hairpin_descq(struct idpf_tx_queue *txq) @@ -637,27 +639,81 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); } else { /* Split queue */ - err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1); - if (err != 0) { - PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf"); - return err; - } - err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2); - if (err != 0) { - PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf"); - return err; + if (cpfl_rxq->hairpin_info.hairpin_q) { + err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to allocate p2p RX buffer queue mbuf"); + return err; + } + } else { + err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf"); + return err; + } + err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf"); + return err; + } } rte_wmb(); /* Init the RX tail register. */ IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail); - IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail); + if (rxq->bufq2) + IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail); } return err; } +static bool cpfl_is_hairpin_txq(u32 txq_model, bool hairpin_txq) +{ + return (txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) && hairpin_txq; +} + +static bool cpfl_is_hairpin_rxq(u32 rxq_model, bool hairpin_rxq) +{ + return (rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) && hairpin_rxq; +} + +int +cpfl_set_hairpin_rxqinfo(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq) +{ + struct virtchnl2_rxq_info rxq_info[IDPF_RXQ_PER_GRP + 1] = {0}; + struct idpf_rx_queue *rxq = &(cpfl_rxq->base); + struct idpf_rx_queue *bufq = rxq->bufq1; + + rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_P2P_RX; + rxq_info[0].queue_id = rxq->queue_id; + rxq_info[0].ring_len = rxq->nb_rx_desc; + rxq_info[0].dma_ring_addr = rxq->rx_ring_phys_addr; + rxq_info[0].rx_bufq1_id = bufq->queue_id; + rxq_info[0].max_pkt_size = vport->max_pkt_len; + rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M; + rxq_info[0].qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE; + + rxq_info[0].data_buffer_size = rxq->rx_buf_len; + rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT; + rxq_info[0].rx_buffer_low_watermark = 64; + + /* Buffer queue */ + rxq_info[1].type = VIRTCHNL2_QUEUE_TYPE_P2P_RX_BUFFER; + rxq_info[1].queue_id = bufq->queue_id; + rxq_info[1].ring_len = bufq->nb_rx_desc; + rxq_info[1].dma_ring_addr = bufq->rx_ring_phys_addr; + rxq_info[1].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M; + rxq_info[1].rx_buffer_low_watermark = 64; + rxq_info[1].model = VIRTCHNL2_QUEUE_MODEL_SPLIT; + rxq_info[1].data_buffer_size = bufq->rx_buf_len; + rxq_info[1].buffer_notif_stride = CPFL_RX_BUF_STRIDE; + PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x", + vport->vport_id, rxq_info[0].queue_id); + return idpf_vc_rxq_config_by_info(vport, rxq_info, CPFL_RXQ_PER_GRP + 1); +} + int cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { @@ -668,7 +724,10 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct idpf_rx_queue *rxq = &(cpfl_rxq->base); int err = 0; - err = idpf_vc_rxq_config(vport, rxq); + if (cpfl_is_hairpin_rxq(vport->rxq_model, cpfl_rxq->hairpin_info.hairpin_q)) + err = cpfl_set_hairpin_rxqinfo(vport, cpfl_rxq); + else + err = idpf_vc_rxq_config(vport, rxq); if (err != 0) { PMD_DRV_LOG(ERR, "Fail to configure Rx queue %u", rx_queue_id); return err; @@ -711,6 +770,35 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id) return 0; } +int +cpfl_set_hairpin_txqinfo(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq) +{ + struct idpf_tx_queue *txq = &(cpfl_txq->base); + struct virtchnl2_txq_info txq_info[CPFL_RXQ_PER_GRP + 1] = {0}; + + /* txq info */ + txq_info[0].dma_ring_addr = txq->tx_ring_phys_addr; + txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_P2P_TX; + txq_info[0].queue_id = txq->queue_id; + txq_info[0].ring_len = txq->nb_tx_desc; + txq_info[0].tx_compl_queue_id = txq->complq->queue_id; + txq_info[0].relative_queue_id = txq->queue_id; + txq_info[0].peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id; + txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT; + txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; + + /* tx completion queue info */ + txq_info[1].dma_ring_addr = txq->complq->tx_ring_phys_addr; + txq_info[1].type = VIRTCHNL2_QUEUE_TYPE_P2P_TX_COMPLETION; + txq_info[1].queue_id = txq->complq->queue_id; + txq_info[1].ring_len = txq->complq->nb_tx_desc; + txq_info[1].peer_rx_queue_id = cpfl_txq->hairpin_info.complq_peer_rxq_id; + txq_info[1].model = VIRTCHNL2_QUEUE_MODEL_SPLIT; + txq_info[1].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; + + return idpf_vc_txq_config_by_info(vport, txq_info, CPFL_RXQ_PER_GRP + 1); +} + int cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) { @@ -721,7 +809,10 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) dev->data->tx_queues[tx_queue_id]; int err = 0; - err = idpf_vc_txq_config(vport, &(cpfl_txq->base)); + if (cpfl_is_hairpin_txq(vport->txq_model, cpfl_txq->hairpin_info.hairpin_q)) + err = cpfl_set_hairpin_txqinfo(vport, cpfl_txq); + else + err = idpf_vc_txq_config(vport, &(cpfl_txq->base)); if (err != 0) { PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id); return err; @@ -762,7 +853,11 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) return -EINVAL; cpfl_rxq = dev->data->rx_queues[rx_queue_id]; - err = idpf_vc_queue_switch(vport, rx_queue_id, true, false); + if (cpfl_rxq->hairpin_info.hairpin_q) + err = cpfl_switch_hairpin_queue(vport, rx_queue_id, true, false); + else + err = idpf_vc_queue_switch(vport, rx_queue_id, true, false); + if (err != 0) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", rx_queue_id); @@ -775,10 +870,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) idpf_qc_single_rx_queue_reset(rxq); } else { rxq->bufq1->ops->release_mbufs(rxq->bufq1); - rxq->bufq2->ops->release_mbufs(rxq->bufq2); - idpf_qc_split_rx_queue_reset(rxq); + if (rxq->bufq2) + rxq->bufq2->ops->release_mbufs(rxq->bufq2); + if (cpfl_rxq->hairpin_info.hairpin_q) { + reset_rx_hairpin_descq(rxq); + reset_rx_hairpin_bufq(rxq->bufq1); + } else { + idpf_qc_split_rx_queue_reset(rxq); + } } - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + if (!cpfl_rxq->hairpin_info.hairpin_q) + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -797,7 +899,11 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return -EINVAL; cpfl_txq = dev->data->tx_queues[tx_queue_id]; - err = idpf_vc_queue_switch(vport, tx_queue_id, false, false); + + if (cpfl_txq->hairpin_info.hairpin_q) + err = cpfl_switch_hairpin_queue(vport, tx_queue_id, false, false); + else + err = idpf_vc_queue_switch(vport, tx_queue_id, false, false); if (err != 0) { PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", tx_queue_id); @@ -809,10 +915,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) { idpf_qc_single_tx_queue_reset(txq); } else { - idpf_qc_split_tx_descq_reset(txq); - idpf_qc_split_tx_complq_reset(txq->complq); + if (cpfl_txq->hairpin_info.hairpin_q) { + reset_tx_hairpin_descq(txq); + reset_tx_hairpin_complq(txq->complq); + } else { + idpf_qc_split_tx_descq_reset(txq); + idpf_qc_split_tx_complq_reset(txq->complq); + } } - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + if (!cpfl_txq->hairpin_info.hairpin_q) + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -904,7 +1017,7 @@ cpfl_stop_queues(struct rte_eth_dev *dev) cpfl_rxq = dev->data->rx_queues[i]; if (cpfl_rxq == NULL) continue; - + /* hairpin queue is also stopped here. */ if (cpfl_rx_queue_stop(dev, i) != 0) PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i); } @@ -954,6 +1067,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev) if (vport->rx_vec_allowed) { for (i = 0; i < dev->data->nb_rx_queues; i++) { cpfl_rxq = dev->data->rx_queues[i]; + if (cpfl_rxq->hairpin_info.hairpin_q) + continue; (void)idpf_qc_splitq_rx_vec_setup(&(cpfl_rxq->base)); } #ifdef CC_AVX512_SUPPORT @@ -1118,6 +1233,45 @@ cpfl_set_tx_function(struct rte_eth_dev *dev) #endif /* RTE_ARCH_X86 */ } +int +cpfl_switch_hairpin_queue(struct idpf_vport *vport, uint16_t qid, + bool rx, bool on) +{ + uint32_t type; + int err, queue_id; + + type = rx ? VIRTCHNL2_QUEUE_TYPE_P2P_RX : VIRTCHNL2_QUEUE_TYPE_P2P_TX; + + /* switch p2p txq/rxq */ + if (type == VIRTCHNL2_QUEUE_TYPE_P2P_RX) + queue_id = vport->chunks_info.rx_start_qid + qid; + else + queue_id = vport->chunks_info.tx_start_qid + qid; + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on); + if (err) + return err; + + /* switch p2p tx completion queue */ + if (!rx && vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + type = VIRTCHNL2_QUEUE_TYPE_P2P_TX_COMPLETION; + queue_id = vport->chunks_info.tx_compl_start_qid + qid; + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on); + if (err) + return err; + } + + /* switch p2p rx buffer queue */ + if (rx && vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + type = VIRTCHNL2_QUEUE_TYPE_P2P_RX_BUFFER; + queue_id = vport->chunks_info.rx_buf_start_qid + 2 * qid; + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on); + if (err) + return err; + } + + return err; +} + static int cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq, uint16_t queue_idx, uint16_t nb_desc, @@ -1471,3 +1625,39 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return 0; } + +static int +cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq) +{ + volatile struct virtchnl2_p2p_rx_buf_desc *rxd; + struct rte_mbuf *mbuf = NULL; + uint64_t dma_addr; + uint16_t i; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + mbuf = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!mbuf)) { + PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX"); + return -ENOMEM; + } + + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + + rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq->rx_ring))[i]; + rxd->reserve0 = 0; + rxd->pkt_addr = dma_addr; + + rxq->sw_ring[i] = mbuf; + } + + rxq->nb_rx_hold = 0; + /* The value written in the RX buffer queue tail register, must be a multiple of 8.*/ + rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE; + + return 0; +} diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index d4790d60ae..4803ad555b 100644 --- a/drivers/net/cpfl/cpfl_rxtx.h +++ b/drivers/net/cpfl/cpfl_rxtx.h @@ -28,6 +28,21 @@ #define CPFL_SUPPORT_CHAIN_NUM 5 +#define CPFL_RX_BUF_STRIDE 64 + +#define CPFL_RXQ_PER_GRP 1 + +/* The value written in the RX buffer queue tail register, + * and in WritePTR field in the TX completion queue context, + * must be a multiple of 8. + */ +#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8 + +struct virtchnl2_p2p_rx_buf_desc { + __le64 reserve0; + __le64 pkt_addr; /* Packet buffer address */ +}; + struct cpfl_rxq_hairpin_info { bool hairpin_q; /* if rx queue is a hairpin queue */ /* only valid if the hairpin queue pair crosses vport */ @@ -74,6 +89,12 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); void cpfl_set_rx_function(struct rte_eth_dev *dev); void cpfl_set_tx_function(struct rte_eth_dev *dev); +int cpfl_switch_hairpin_queue(struct idpf_vport *vport, uint16_t qid, + bool rx, bool on); +int +cpfl_set_hairpin_txqinfo(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq); +int +cpfl_set_hairpin_rxqinfo(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq); int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf); diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h index 8d0b825f95..7d4d46b833 100644 --- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h +++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h @@ -86,6 +86,8 @@ cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev) cpfl_rxq = dev->data->rx_queues[i]; default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base); if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + if (cpfl_rxq->hairpin_info.hairpin_q) + continue; splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base); ret = splitq_ret && default_ret; } else { @@ -107,6 +109,8 @@ cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { cpfl_txq = dev->data->tx_queues[i]; + if (cpfl_txq->hairpin_info.hairpin_q) + continue; ret = cpfl_tx_vec_queue_default(&cpfl_txq->base); if (ret == CPFL_SCALAR_PATH) return CPFL_SCALAR_PATH; From patchwork Tue Feb 14 11:38:52 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Liu, Mingxia" X-Patchwork-Id: 123886 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4B17541C49; Tue, 14 Feb 2023 13:37:06 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A2AA842DB8; Tue, 14 Feb 2023 13:36:39 +0100 (CET) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 43F1042DA5 for ; Tue, 14 Feb 2023 13:36:37 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1676378197; x=1707914197; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=RJNJuqZzfgx1t432HkK4BQ9sVn19wjVgqD+koZA42TY=; b=eB3oba3xCyKIMJLi9olbIQss/RTZUvNeRLBFmhkPk5pBGGwSpjCUUtZj AaJBoBs7LuIThuPY7e2ZpaTn5uM7tWGiGQF4oQg3qYuok1bUskLpvpXVs uDLfYltTEp/K4z6sMlBzY23bDk/WrPRuoB2qsomMbgg/9U2/h1+5uFFWh H2N0sB+XFp+bGyCHwmhZfhU4ojSCJ4Z60MdiD7NxMMex3zvrONeMcYIzx cJk0PqjvYQwdGtfHOsnlSH+i3PLvpbzfBkGxUSw55BK11rgLzGUXQUUo7 0kcOqYHNH0KnFzW+4+8g9Y8JgPCLlFR8bt1ZMFdw/P4aIvO5mUKszy0jy w==; X-IronPort-AV: E=McAfee;i="6500,9779,10620"; a="314793207" X-IronPort-AV: E=Sophos;i="5.97,296,1669104000"; d="scan'208";a="314793207" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 14 Feb 2023 04:36:36 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10620"; a="699528535" X-IronPort-AV: E=Sophos;i="5.97,296,1669104000"; d="scan'208";a="699528535" Received: from dpdk-mingxial-01.sh.intel.com ([10.67.119.167]) by orsmga008.jf.intel.com with ESMTP; 14 Feb 2023 04:36:35 -0800 From: Mingxia Liu To: dev@dpdk.org, beilei.xing@intel.com, yuying.zhang@intel.com Cc: Mingxia Liu , Xiao Wang , Junfeng Guo Subject: [PATCH v2 5/5] net/cpfl: adjust RSS LUT to exclude hairpin queue Date: Tue, 14 Feb 2023 11:38:52 +0000 Message-Id: <20230214113852.3341607-6-mingxia.liu@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230214113852.3341607-1-mingxia.liu@intel.com> References: <20230118130659.976873-1-mingxia.liu@intel.com> <20230214113852.3341607-1-mingxia.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org RSS should direct traffic only to the normal data Rx queues, so when hairpin queue configured, RSS LUT should be adjusted to exclude the hairpin queue. Signed-off-by: Xiao Wang Signed-off-by: Junfeng Guo Signed-off-by: Mingxia Liu --- drivers/net/cpfl/cpfl_ethdev.c | 38 ++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index f08b7beb13..014735b2b0 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -446,7 +446,8 @@ cpfl_init_rss(struct idpf_vport *vport) { struct rte_eth_rss_conf *rss_conf; struct rte_eth_dev_data *dev_data; - uint16_t i, nb_q; + struct cpfl_rx_queue *cpfl_rxq; + uint16_t i, nb_q, max_nb_data_q; int ret = 0; dev_data = vport->dev_data; @@ -465,8 +466,16 @@ cpfl_init_rss(struct idpf_vport *vport) vport->rss_key_size); } + /* RSS only to the data queues */ + max_nb_data_q = nb_q; + if (nb_q > 1) { + cpfl_rxq = dev_data->rx_queues[nb_q - 1]; + if (cpfl_rxq && cpfl_rxq->hairpin_info.hairpin_q) + max_nb_data_q = nb_q - 1; + } + for (i = 0; i < vport->rss_lut_size; i++) - vport->rss_lut[i] = i % nb_q; + vport->rss_lut[i] = i % max_nb_data_q; vport->rss_hf = IDPF_DEFAULT_RSS_HASH_EXPANDED; @@ -673,8 +682,6 @@ cpfl_dev_configure(struct rte_eth_dev *dev) (struct cpfl_vport *)dev->data->dev_private; struct idpf_vport *vport = &(cpfl_vport->base); struct rte_eth_conf *conf = &dev->data->dev_conf; - struct idpf_adapter *adapter = vport->adapter; - int ret; if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) { PMD_INIT_LOG(ERR, "Setting link speed is not supported"); @@ -713,17 +720,6 @@ cpfl_dev_configure(struct rte_eth_dev *dev) return -ENOTSUP; } - if (adapter->caps.rss_caps != 0 && dev->data->nb_rx_queues != 0) { - ret = cpfl_init_rss(vport); - if (ret != 0) { - PMD_INIT_LOG(ERR, "Failed to init rss"); - return ret; - } - } else { - PMD_INIT_LOG(ERR, "RSS is not supported."); - return -1; - } - vport->max_pkt_len = (dev->data->mtu == 0) ? CPFL_DEFAULT_MTU : dev->data->mtu + CPFL_ETH_OVERHEAD; @@ -748,11 +744,23 @@ cpfl_start_queues(struct rte_eth_dev *dev) struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private; struct idpf_vport *vport = &(cpfl_vport->base); + struct idpf_adapter *adapter = vport->adapter; struct cpfl_rx_queue *cpfl_rxq; struct cpfl_tx_queue *cpfl_txq; int err = 0; int i; + if (adapter->caps.rss_caps != 0 && dev->data->nb_rx_queues != 0) { + err = cpfl_init_rss(vport); + if (err != 0) { + PMD_INIT_LOG(ERR, "Failed to init rss"); + return err; + } + } else { + PMD_INIT_LOG(ERR, "RSS is not supported."); + return -1; + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { cpfl_txq = dev->data->tx_queues[i]; if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)