From patchwork Fri Oct 30 10:52:15 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Zhe Tao X-Patchwork-Id: 8351 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id EB4508E8E; Fri, 30 Oct 2015 11:52:46 +0100 (CET) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id CA2148E72 for ; Fri, 30 Oct 2015 11:52:41 +0100 (CET) Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga102.jf.intel.com with ESMTP; 30 Oct 2015 03:52:40 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,217,1444719600"; d="scan'208";a="590946596" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by FMSMGA003.fm.intel.com with ESMTP; 30 Oct 2015 03:52:40 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id t9UAqb2v012299; Fri, 30 Oct 2015 18:52:37 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id t9UAqYvx008866; Fri, 30 Oct 2015 18:52:36 +0800 Received: (from zhetao@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id t9UAqY17008862; Fri, 30 Oct 2015 18:52:34 +0800 From: Zhe Tao To: dev@dpdk.org Date: Fri, 30 Oct 2015 18:52:15 +0800 Message-Id: <1446202336-8723-8-git-send-email-zhe.tao@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1446202336-8723-1-git-send-email-zhe.tao@intel.com> References: <1443373527-28948-1-git-send-email-zhe.tao@intel.com> <1446202336-8723-1-git-send-email-zhe.tao@intel.com> Subject: [dpdk-dev] [PATCH 7/8 v2] move all the extra definition out of share code X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" move all the exta definition out of share code Signed-off-by: Zhe Tao --- drivers/net/i40e/base/i40e_type.h | 13 ------------- drivers/net/i40e/i40e_ethdev.c | 11 ++++++----- drivers/net/i40e/i40e_ethdev.h | 6 ++++++ drivers/net/i40e/i40e_ethdev_vf.c | 11 ++++++----- drivers/net/i40e/i40e_rxtx.c | 39 ++++++++++++++++++++++----------------- drivers/net/i40e/i40e_rxtx.h | 8 ++++++++ 6 files changed, 48 insertions(+), 40 deletions(-) diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h index 9491f94..6ee398e 100644 --- a/drivers/net/i40e/base/i40e_type.h +++ b/drivers/net/i40e/base/i40e_type.h @@ -113,11 +113,6 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); #define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF)) #define I40E_LO_BYTE(x) ((u8)((x) & 0xFF)) -#undef container_of -#define container_of(ptr, type, member) ({ \ - typeof(((type *)0)->member)(*__mptr) = (ptr); \ - (type *)((char *)__mptr - offsetof(type, member)); }) - /* Number of Transmit Descriptors must be a multiple of 8. */ #define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8 /* Number of Receive Descriptors must be a multiple of 32 if @@ -568,11 +563,6 @@ struct i40e_hw { /* debug mask */ u32 debug_mask; - - bool rx_bulk_alloc_allowed; - bool rx_vec_allowed; - bool tx_simple_allowed; - bool tx_vec_allowed; }; static inline bool i40e_is_vf(struct i40e_hw *hw) @@ -979,9 +969,6 @@ enum i40e_tx_desc_cmd_bits { #define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ I40E_TXD_QW1_OFFSET_SHIFT) -#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\ - I40E_TX_DESC_CMD_EOP) - enum i40e_tx_desc_length_fields { /* Note: These are predefined bit offsets */ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index e241f66..153be45 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -670,18 +670,19 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) static int i40e_dev_configure(struct rte_eth_dev *dev) { + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; int ret; /* Initialize to TRUE. If any of Rx queues doesn't meet the * bulk allocation or vector Rx preconditions we will reset it. */ - hw->rx_bulk_alloc_allowed = true; - hw->rx_vec_allowed = true; - hw->tx_simple_allowed = true; - hw->tx_vec_allowed = true; + ad->rx_bulk_alloc_allowed = true; + ad->rx_vec_allowed = true; + ad->tx_simple_allowed = true; + ad->tx_vec_allowed = true; if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) { ret = i40e_fdir_setup(pf); diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 6185657..ab78c54 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -462,6 +462,12 @@ struct i40e_adapter { struct i40e_pf pf; struct i40e_vf vf; }; + + /* for vector PMD */ + bool rx_bulk_alloc_allowed; + bool rx_vec_allowed; + bool tx_simple_allowed; + bool tx_vec_allowed; }; int i40e_dev_switch_queues(struct i40e_pf *pf, bool on); diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index bdea8f0..b8ebacb 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1277,15 +1277,16 @@ PMD_REGISTER_DRIVER(rte_i40evf_driver); static int i40evf_dev_configure(struct rte_eth_dev *dev) { - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk * allocation or vector Rx preconditions we will reset it. */ - hw->rx_bulk_alloc_allowed = true; - hw->rx_vec_allowed = true; - hw->tx_simple_allowed = true; - hw->tx_vec_allowed = true; + ad->rx_bulk_alloc_allowed = true; + ad->rx_vec_allowed = true; + ad->tx_simple_allowed = true; + ad->tx_vec_allowed = true; return i40evf_init_vlan(dev); } diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 71f601f..8731712 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -2105,6 +2105,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, struct i40e_vsi *vsi; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct i40e_rx_queue *rxq; const struct rte_memzone *rz; uint32_t ring_size; @@ -2226,7 +2228,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " "not enabled on port=%d, queue=%d.", rxq->port_id, rxq->queue_id); - hw->rx_bulk_alloc_allowed = false; + ad->rx_bulk_alloc_allowed = false; } return 0; @@ -3064,27 +3066,28 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf) void __attribute__((cold)) i40e_set_rx_function(struct rte_eth_dev *dev) { - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); uint16_t rx_using_sse, i; /* In order to allow Vector Rx there are a few configuration * conditions to be met and Rx Bulk Allocation should be allowed. */ if (rte_eal_process_type() == RTE_PROC_PRIMARY) { if (i40e_rx_vec_dev_conf_condition_check(dev) || - !hw->rx_bulk_alloc_allowed) { + !ad->rx_bulk_alloc_allowed) { PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet" " Vector Rx preconditions", dev->data->port_id); - hw->rx_vec_allowed = false; + ad->rx_vec_allowed = false; } - if (hw->rx_vec_allowed) { + if (ad->rx_vec_allowed) { for (i = 0; i < dev->data->nb_rx_queues; i++) { struct i40e_rx_queue *rxq = dev->data->rx_queues[i]; if (i40e_rxq_vec_setup(rxq)) { - hw->rx_vec_allowed = false; + ad->rx_vec_allowed = false; break; } } @@ -3095,7 +3098,7 @@ i40e_set_rx_function(struct rte_eth_dev *dev) /* Set the non-LRO scattered callback: there are Vector and * single allocation versions. */ - if (hw->rx_vec_allowed) { + if (ad->rx_vec_allowed) { PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx " "callback (port=%d).", dev->data->port_id); @@ -3113,14 +3116,14 @@ i40e_set_rx_function(struct rte_eth_dev *dev) * - Bulk Allocation * - Single buffer allocation (the simplest one) */ - } else if (hw->rx_vec_allowed) { + } else if (ad->rx_vec_allowed) { PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX " "burst size no less than %d (port=%d).", RTE_I40E_DESCS_PER_LOOP, dev->data->port_id); dev->rx_pkt_burst = i40e_recv_pkts_vec; - } else if (hw->rx_bulk_alloc_allowed) { + } else if (ad->rx_bulk_alloc_allowed) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function " "will be used on port=%d.", @@ -3153,7 +3156,8 @@ i40e_set_rx_function(struct rte_eth_dev *dev) void __attribute__((cold)) i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) { - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); /* Use a simple Tx queue (no offloads, no multi segs) if possible */ if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) @@ -3163,35 +3167,36 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) " can be enabled on this txq."); } else { - hw->tx_vec_allowed = false; + ad->tx_vec_allowed = false; } } else { - hw->tx_simple_allowed = false; + ad->tx_simple_allowed = false; } } void __attribute__((cold)) i40e_set_tx_function(struct rte_eth_dev *dev) { - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); int i; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - if (hw->tx_vec_allowed) { + if (ad->tx_vec_allowed) { for (i = 0; i < dev->data->nb_tx_queues; i++) { struct i40e_tx_queue *txq = dev->data->tx_queues[i]; if (i40e_txq_vec_setup(txq)) { - hw->tx_vec_allowed = false; + ad->tx_vec_allowed = false; break; } } } } - if (hw->tx_simple_allowed) { - if (hw->tx_vec_allowed) { + if (ad->tx_simple_allowed) { + if (ad->tx_vec_allowed) { PMD_INIT_LOG(DEBUG, "Vector tx finally be used."); dev->tx_pkt_burst = i40e_xmit_pkts_vec; } else { diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index dc575fd..39cb95a 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -57,6 +57,14 @@ #define I40E_RXBUF_SZ_1024 1024 #define I40E_RXBUF_SZ_2048 2048 +#undef container_of +#define container_of(ptr, type, member) ({ \ + typeof(((type *)0)->member)(*__mptr) = (ptr); \ + (type *)((char *)__mptr - offsetof(type, member)); }) + +#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\ + I40E_TX_DESC_CMD_EOP) + enum i40e_header_split_mode { i40e_header_split_none = 0, i40e_header_split_enabled = 1,