From patchwork Tue Jun 7 05:45:17 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Zhe Tao X-Patchwork-Id: 13283 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 823408D91; Tue, 7 Jun 2016 07:47:37 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id 69B6C8D91 for ; Tue, 7 Jun 2016 07:47:35 +0200 (CEST) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by fmsmga101.fm.intel.com with ESMTP; 06 Jun 2016 22:47:17 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.26,431,1459839600"; d="scan'208";a="970340070" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by orsmga001.jf.intel.com with ESMTP; 06 Jun 2016 22:47:16 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id u575lDeg007878; Tue, 7 Jun 2016 13:47:13 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id u575lAbH005079; Tue, 7 Jun 2016 13:47:12 +0800 Received: (from zhetao@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id u575lAlx005075; Tue, 7 Jun 2016 13:47:10 +0800 From: Zhe Tao To: dev@dpdk.org Cc: wenzhuo.lu@intel.com, zhe.tao@intel.com, konstantin.ananyev@intel.com, bruce.richardson@intel.com, jing.d.chen@intel.com, cunming.liang@intel.com, jingjing.wu@intel.com, helin.zhang@intel.com Date: Tue, 7 Jun 2016 13:45:17 +0800 Message-Id: <1465278318-4949-8-git-send-email-zhe.tao@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1465278318-4949-1-git-send-email-zhe.tao@intel.com> References: <1465278318-4949-1-git-send-email-zhe.tao@intel.com> Subject: [dpdk-dev] [PATCH v2 7/8] i40e: RX/TX with lock on VF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: "zhe.tao" Add RX/TX paths with lock for VF. It's used when the function of link reset on VF is needed. When the lock for RX/TX is added, the RX/TX can be stopped. Then we have a chance to reset the VF link. Please be aware there's performence drop if the lock path is chosen. Signed-off-by: zhe.tao --- drivers/net/i40e/i40e_ethdev.c | 4 ++-- drivers/net/i40e/i40e_ethdev.h | 4 ++++ drivers/net/i40e/i40e_ethdev_vf.c | 4 ++-- drivers/net/i40e/i40e_rxtx.c | 45 +++++++++++++++++++++++++-------------- drivers/net/i40e/i40e_rxtx.h | 30 ++++++++++++++++++++++++++ 5 files changed, 67 insertions(+), 20 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 24777d5..1380330 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -764,8 +764,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); dev->dev_ops = &i40e_eth_dev_ops; - dev->rx_pkt_burst = i40e_recv_pkts; - dev->tx_pkt_burst = i40e_xmit_pkts; + dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, i40e_recv_pkts); + dev->tx_pkt_burst = TX_LOCK_FUNCTION(dev, i40e_xmit_pkts); /* for secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index cfd2399..672d920 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -540,6 +540,10 @@ struct i40e_adapter { struct rte_timecounter systime_tc; struct rte_timecounter rx_tstamp_tc; struct rte_timecounter tx_tstamp_tc; + + /* For VF reset backup */ + eth_rx_burst_t rx_backup; + eth_tx_burst_t tx_backup; }; int i40e_dev_switch_queues(struct i40e_pf *pf, bool on); diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index 90682ac..46d8a7c 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1451,8 +1451,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) /* assign ops func pointer */ eth_dev->dev_ops = &i40evf_eth_dev_ops; - eth_dev->rx_pkt_burst = &i40e_recv_pkts; - eth_dev->tx_pkt_burst = &i40e_xmit_pkts; + eth_dev->rx_pkt_burst = RX_LOCK_FUNCTION(eth_dev, i40e_recv_pkts); + eth_dev->tx_pkt_burst = TX_LOCK_FUNCTION(eth_dev, i40e_xmit_pkts); /* * For secondary processes, we don't initialise any further as primary diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index c833aa3..0a6dcfb 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -79,10 +79,6 @@ PKT_TX_TCP_SEG | \ PKT_TX_OUTER_IP_CKSUM) -static uint16_t i40e_xmit_pkts_simple(void *tx_queue, - struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); - static inline void i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp) { @@ -1144,7 +1140,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) return 0; } -static uint16_t +uint16_t i40e_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -1169,7 +1165,7 @@ i40e_recv_pkts_bulk_alloc(void *rx_queue, return nb_rx; } #else -static uint16_t +uint16_t i40e_recv_pkts_bulk_alloc(void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, uint16_t __rte_unused nb_pkts) @@ -1892,7 +1888,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq, return nb_pkts; } -static uint16_t +uint16_t i40e_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -2121,10 +2117,13 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) }; if (dev->rx_pkt_burst == i40e_recv_pkts || + dev->rx_pkt_burst == i40e_recv_pkts_lock || #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc || + dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc_lock || #endif - dev->rx_pkt_burst == i40e_recv_scattered_pkts) + dev->rx_pkt_burst == i40e_recv_scattered_pkts || + dev->rx_pkt_burst == i40e_recv_scattered_pkts_lock) return ptypes; return NULL; } @@ -2648,6 +2647,7 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq) rxq->rxrearm_start = 0; rxq->rxrearm_nb = 0; + rte_spinlock_init(&rxq->rx_lock); } void @@ -2704,6 +2704,7 @@ i40e_reset_tx_queue(struct i40e_tx_queue *txq) txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); + rte_spinlock_init(&txq->tx_lock); } /* Init the TX queue in hardware */ @@ -3155,12 +3156,12 @@ i40e_set_rx_function(struct rte_eth_dev *dev) "callback (port=%d).", dev->data->port_id); - dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec; + dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, i40e_recv_scattered_pkts_vec); } else { PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk " "allocation callback (port=%d).", dev->data->port_id); - dev->rx_pkt_burst = i40e_recv_scattered_pkts; + dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, i40e_recv_scattered_pkts); } /* If parameters allow we are going to choose between the following * callbacks: @@ -3174,27 +3175,29 @@ i40e_set_rx_function(struct rte_eth_dev *dev) RTE_I40E_DESCS_PER_LOOP, dev->data->port_id); - dev->rx_pkt_burst = i40e_recv_pkts_vec; + dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, i40e_recv_pkts_vec); } else if (ad->rx_bulk_alloc_allowed) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function " "will be used on port=%d.", dev->data->port_id); - dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc; + dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, i40e_recv_pkts_bulk_alloc); } else { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " "satisfied, or Scattered Rx is requested " "(port=%d).", dev->data->port_id); - dev->rx_pkt_burst = i40e_recv_pkts; + dev->rx_pkt_burst = RX_LOCK_FUNCTION(dev, i40e_recv_pkts); } /* Propagate information about RX function choice through all queues. */ if (rte_eal_process_type() == RTE_PROC_PRIMARY) { rx_using_sse = (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec || + dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_lock || + dev->rx_pkt_burst == i40e_recv_pkts_vec_lock || dev->rx_pkt_burst == i40e_recv_pkts_vec); for (i = 0; i < dev->data->nb_rx_queues; i++) { @@ -3250,14 +3253,14 @@ i40e_set_tx_function(struct rte_eth_dev *dev) if (ad->tx_simple_allowed) { if (ad->tx_vec_allowed) { PMD_INIT_LOG(DEBUG, "Vector tx finally be used."); - dev->tx_pkt_burst = i40e_xmit_pkts_vec; + dev->tx_pkt_burst = TX_LOCK_FUNCTION(dev, i40e_xmit_pkts_vec); } else { PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); - dev->tx_pkt_burst = i40e_xmit_pkts_simple; + dev->tx_pkt_burst = TX_LOCK_FUNCTION(dev, i40e_xmit_pkts_simple); } } else { PMD_INIT_LOG(DEBUG, "Xmit tx finally be used."); - dev->tx_pkt_burst = i40e_xmit_pkts; + dev->tx_pkt_burst = TX_LOCK_FUNCTION(dev, i40e_xmit_pkts); } } @@ -3311,3 +3314,13 @@ i40e_xmit_pkts_vec(void __rte_unused *tx_queue, { return 0; } + +GENERATE_RX_LOCK(i40e_recv_pkts, i40e) +GENERATE_RX_LOCK(i40e_recv_pkts_vec, i40e) +GENERATE_RX_LOCK(i40e_recv_pkts_bulk_alloc, i40e) +GENERATE_RX_LOCK(i40e_recv_scattered_pkts, i40e) +GENERATE_RX_LOCK(i40e_recv_scattered_pkts_vec, i40e) + +GENERATE_TX_LOCK(i40e_xmit_pkts, i40e) +GENERATE_TX_LOCK(i40e_xmit_pkts_vec, i40e) +GENERATE_TX_LOCK(i40e_xmit_pkts_simple, i40e) diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 98179f0..a1c13b8 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -140,6 +140,7 @@ struct i40e_rx_queue { bool rx_deferred_start; /**< don't start this queue in dev start */ uint16_t rx_using_sse; /**