From patchwork Fri Oct 30 14:16:38 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Zhe Tao X-Patchwork-Id: 8401 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 916DD91CC; Fri, 30 Oct 2015 15:16:59 +0100 (CET) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id 823BE91DD for ; Fri, 30 Oct 2015 15:16:55 +0100 (CET) Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga102.jf.intel.com with ESMTP; 30 Oct 2015 07:16:54 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,218,1444719600"; d="scan'208";a="591033893" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by FMSMGA003.fm.intel.com with ESMTP; 30 Oct 2015 07:16:54 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id t9UEGq9b002469; Fri, 30 Oct 2015 22:16:52 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id t9UEGn6w014731; Fri, 30 Oct 2015 22:16:51 +0800 Received: (from zhetao@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id t9UEGnWj014727; Fri, 30 Oct 2015 22:16:49 +0800 From: Zhe Tao To: dev@dpdk.org Date: Fri, 30 Oct 2015 22:16:38 +0800 Message-Id: <1446214599-14669-5-git-send-email-zhe.tao@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1446214599-14669-1-git-send-email-zhe.tao@intel.com> References: <1446210115-13927-1-git-send-email-zhe.tao@intel.com> <1446214599-14669-1-git-send-email-zhe.tao@intel.com> Subject: [dpdk-dev] [PATCH 4/4 v4] add RX and TX selection function for FVL X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" To support FVL PMD can select which RX and TX function should be used according to the queue config. Signed-off-by: Zhe Tao --- doc/guides/rel_notes/release_2_2.rst | 4 + drivers/net/i40e/i40e_ethdev.c | 20 +++- drivers/net/i40e/i40e_ethdev.h | 6 ++ drivers/net/i40e/i40e_ethdev_vf.c | 28 ++++-- drivers/net/i40e/i40e_rxtx.c | 182 ++++++++++++++++++++++++++++++++--- drivers/net/i40e/i40e_rxtx.h | 6 ++ drivers/net/i40e/i40e_rxtx_vec.c | 32 ++++++ 7 files changed, 258 insertions(+), 20 deletions(-) diff --git a/doc/guides/rel_notes/release_2_2.rst b/doc/guides/rel_notes/release_2_2.rst index bc9b00f..c78ceca 100644 --- a/doc/guides/rel_notes/release_2_2.rst +++ b/doc/guides/rel_notes/release_2_2.rst @@ -39,6 +39,10 @@ Drivers Fixed i40e issue that occurred when a DPDK application didn't initialize ports if memory wasn't available on socket 0. +* **i40e: Add vector PMD support for FVL.** + + Add vector RX/TX support for FVL + * **vhost: Fixed Qemu shutdown.** Fixed issue with libvirt ``virsh destroy`` not killing the VM. diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 2dd9fdc..153be45 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -403,8 +403,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) * has already done this work. Only check we don't need a different * RX function */ if (rte_eal_process_type() != RTE_PROC_PRIMARY){ - if (dev->data->scattered_rx) - dev->rx_pkt_burst = i40e_recv_scattered_pkts; + i40e_set_rx_function(dev); + i40e_set_tx_function(dev); return 0; } pci_dev = dev->pci_dev; @@ -670,10 +670,20 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) static int i40e_dev_configure(struct rte_eth_dev *dev) { + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; int ret; + /* Initialize to TRUE. If any of Rx queues doesn't meet the + * bulk allocation or vector Rx preconditions we will reset it. + */ + ad->rx_bulk_alloc_allowed = true; + ad->rx_vec_allowed = true; + ad->tx_simple_allowed = true; + ad->tx_vec_allowed = true; + if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) { ret = i40e_fdir_setup(pf); if (ret != I40E_SUCCESS) { @@ -3707,6 +3717,9 @@ i40e_dev_tx_init(struct i40e_pf *pf) if (ret != I40E_SUCCESS) break; } + if (ret == I40E_SUCCESS) + i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf) + ->eth_dev); return ret; } @@ -3733,6 +3746,9 @@ i40e_dev_rx_init(struct i40e_pf *pf) break; } } + if (ret == I40E_SUCCESS) + i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf) + ->eth_dev); return ret; } diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 6185657..ab78c54 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -462,6 +462,12 @@ struct i40e_adapter { struct i40e_pf pf; struct i40e_vf vf; }; + + /* for vector PMD */ + bool rx_bulk_alloc_allowed; + bool rx_vec_allowed; + bool tx_simple_allowed; + bool tx_vec_allowed; }; int i40e_dev_switch_queues(struct i40e_pf *pf, bool on); diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index b694400..b8ebacb 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1182,8 +1182,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) * has already done this work. */ if (rte_eal_process_type() != RTE_PROC_PRIMARY){ - if (eth_dev->data->scattered_rx) - eth_dev->rx_pkt_burst = i40e_recv_scattered_pkts; + i40e_set_rx_function(eth_dev); + i40e_set_tx_function(eth_dev); return 0; } @@ -1277,6 +1277,17 @@ PMD_REGISTER_DRIVER(rte_i40evf_driver); static int i40evf_dev_configure(struct rte_eth_dev *dev) { + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk + * allocation or vector Rx preconditions we will reset it. + */ + ad->rx_bulk_alloc_allowed = true; + ad->rx_vec_allowed = true; + ad->tx_simple_allowed = true; + ad->tx_vec_allowed = true; + return i40evf_init_vlan(dev); } @@ -1508,7 +1519,6 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq) if (dev_data->dev_conf.rxmode.enable_scatter || (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) { dev_data->scattered_rx = 1; - dev->rx_pkt_burst = i40e_recv_scattered_pkts; } return 0; @@ -1519,6 +1529,7 @@ i40evf_rx_init(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); uint16_t i; + int ret = I40E_SUCCESS; struct i40e_rx_queue **rxq = (struct i40e_rx_queue **)dev->data->rx_queues; @@ -1526,11 +1537,14 @@ i40evf_rx_init(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { if (!rxq[i] || !rxq[i]->q_set) continue; - if (i40evf_rxq_init(dev, rxq[i]) < 0) - return -EFAULT; + ret = i40evf_rxq_init(dev, rxq[i]); + if (ret != I40E_SUCCESS) + break; } + if (ret == I40E_SUCCESS) + i40e_set_rx_function(dev); - return 0; + return ret; } static void @@ -1543,6 +1557,8 @@ i40evf_tx_init(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i); + + i40e_set_tx_function(dev); } static inline void diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index d37cbde..8731712 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -2105,6 +2105,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, struct i40e_vsi *vsi; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct i40e_rx_queue *rxq; const struct rte_memzone *rz; uint32_t ring_size; @@ -2213,13 +2215,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq); - if (!use_def_burst_func && !dev->data->scattered_rx) { + if (!use_def_burst_func) { #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function will be " "used on port=%d, queue=%d.", rxq->port_id, rxq->queue_id); - dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc; #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ } else { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " @@ -2227,6 +2228,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " "not enabled on port=%d, queue=%d.", rxq->port_id, rxq->queue_id); + ad->rx_bulk_alloc_allowed = false; } return 0; @@ -2488,14 +2490,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, dev->data->tx_queues[queue_idx] = txq; /* Use a simple TX queue without offloads or multi segs if possible */ - if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) && - (txq->tx_rs_thresh >= I40E_TX_MAX_BURST)) { - PMD_INIT_LOG(INFO, "Using simple tx path"); - dev->tx_pkt_burst = i40e_xmit_pkts_simple; - } else { - PMD_INIT_LOG(INFO, "Using full-featured tx path"); - dev->tx_pkt_burst = i40e_xmit_pkts; - } + i40e_set_tx_function_flag(dev, txq); return 0; } @@ -2564,6 +2559,12 @@ i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq) { uint16_t i; + /* SSE Vector driver has a different way of releasing mbufs. */ + if (rxq->rx_using_sse) { + i40e_rx_queue_release_mbufs_vec(rxq); + return; + } + if (!rxq || !rxq->sw_ring) { PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); return; @@ -2837,7 +2838,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) int err = I40E_SUCCESS; struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi); struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi); - struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(rxq->vsi); uint16_t pf_q = rxq->reg_idx; uint16_t buf_size; struct i40e_hmc_obj_rxq rx_ctx; @@ -2893,7 +2893,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) /* Check if scattered RX needs to be used. */ if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) { dev_data->scattered_rx = 1; - dev->rx_pkt_burst = i40e_recv_scattered_pkts; } /* Init the RX tail regieter. */ @@ -3064,7 +3063,159 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf) return I40E_SUCCESS; } +void __attribute__((cold)) +i40e_set_rx_function(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + uint16_t rx_using_sse, i; + /* In order to allow Vector Rx there are a few configuration + * conditions to be met and Rx Bulk Allocation should be allowed. + */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (i40e_rx_vec_dev_conf_condition_check(dev) || + !ad->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet" + " Vector Rx preconditions", + dev->data->port_id); + + ad->rx_vec_allowed = false; + } + if (ad->rx_vec_allowed) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct i40e_rx_queue *rxq = + dev->data->rx_queues[i]; + + if (i40e_rxq_vec_setup(rxq)) { + ad->rx_vec_allowed = false; + break; + } + } + } + } + + if (dev->data->scattered_rx) { + /* Set the non-LRO scattered callback: there are Vector and + * single allocation versions. + */ + if (ad->rx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx " + "callback (port=%d).", + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec; + } else { + PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk " + "allocation callback (port=%d).", + dev->data->port_id); + dev->rx_pkt_burst = i40e_recv_scattered_pkts; + } + /* If parameters allow we are going to choose between the following + * callbacks: + * - Vector + * - Bulk Allocation + * - Single buffer allocation (the simplest one) + */ + } else if (ad->rx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX " + "burst size no less than %d (port=%d).", + RTE_I40E_DESCS_PER_LOOP, + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_pkts_vec; + } else if (ad->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function " + "will be used on port=%d.", + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc; + } else { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " + "satisfied, or Scattered Rx is requested " + "(port=%d).", + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_pkts; + } + + /* Propagate information about RX function choice through all queues. */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + rx_using_sse = + (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec || + dev->rx_pkt_burst == i40e_recv_pkts_vec); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct i40e_rx_queue *rxq = dev->data->rx_queues[i]; + + rxq->rx_using_sse = rx_using_sse; + } + } +} + +void __attribute__((cold)) +i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + /* Use a simple Tx queue (no offloads, no multi segs) if possible */ + if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) + && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) { + if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) { + PMD_INIT_LOG(DEBUG, "Vector tx" + " can be enabled on this txq."); + + } else { + ad->tx_vec_allowed = false; + } + } else { + ad->tx_simple_allowed = false; + } +} + +void __attribute__((cold)) +i40e_set_tx_function(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int i; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (ad->tx_vec_allowed) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct i40e_tx_queue *txq = + dev->data->tx_queues[i]; + + if (i40e_txq_vec_setup(txq)) { + ad->tx_vec_allowed = false; + break; + } + } + } + } + + if (ad->tx_simple_allowed) { + if (ad->tx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Vector tx finally be used."); + dev->tx_pkt_burst = i40e_xmit_pkts_vec; + } else { + PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); + dev->tx_pkt_burst = i40e_xmit_pkts_simple; + } + } else { + PMD_INIT_LOG(DEBUG, "Xmit tx finally be used."); + dev->tx_pkt_burst = i40e_xmit_pkts; + } +} + /* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */ +int __attribute__((weak)) +i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) +{ + return -1; +} + uint16_t __attribute__((weak)) i40e_recv_pkts_vec( void __rte_unused *rx_queue, @@ -3089,6 +3240,12 @@ i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq) return -1; } +int __attribute__((weak)) +i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq) +{ + return -1; +} + void __attribute__((weak)) i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq) { @@ -3102,3 +3259,4 @@ i40e_xmit_pkts_vec(void __rte_unused *tx_queue, { return 0; } + diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 98d6e90..39cb95a 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -132,6 +132,7 @@ struct i40e_rx_queue { uint8_t hs_mode; /* Header Split mode */ bool q_set; /**< indicate if rx queue has been configured */ bool rx_deferred_start; /**< don't start this queue in dev start */ + uint16_t rx_using_sse; /**data->dev_conf.rxmode; + struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; + +#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE + /* whithout rx ol_flags, no VP flag report */ + if (rxmode->hw_vlan_strip != 0 || + rxmode->hw_vlan_extend != 0) + return -1; +#endif + + /* no fdir support */ + if (fconf->mode != RTE_FDIR_MODE_NONE) + return -1; + + /* - no csum error report support + * - no header split support + */ + if (rxmode->hw_ip_checksum == 1 || + rxmode->header_split == 1) + return -1; + + return 0; +#else + RTE_SET_USED(dev); + return -1; +#endif +}