From patchwork Thu Mar 5 15:24:25 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vladislav Zolotarov X-Patchwork-Id: 3881 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id B9A2C7EEF; Thu, 5 Mar 2015 16:24:39 +0100 (CET) Received: from mail-wg0-f48.google.com (mail-wg0-f48.google.com [74.125.82.48]) by dpdk.org (Postfix) with ESMTP id E16E95A9E for ; Thu, 5 Mar 2015 16:24:36 +0100 (CET) Received: by wghk14 with SMTP id k14so3409945wgh.3 for ; Thu, 05 Mar 2015 07:24:36 -0800 (PST) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=EkJU5cEGkeoZ2TCqjcgkdu6JAPuTLVmUDeeW74EGW64=; b=k1o9iqfucI4SB3TzYryjyFrr1P5BLMtVW5w8s4lsuISWCQzXi6oZy5opVQ3boGVXEO mYiy4oCiYcycwoGZ1+TM/xHTArj7louR+z4qPC1hK32LVyNdrtNtPfNwHT5avUYVqlUB +BJ8/zpnFcB3O/bKhCK4mznik98T/ttKxWaLsJVaGe7cMjUqDc6rEDWik0F+lpHGTt5l 9Jt9OPze0Vtoy2aZaFCUAxE7IIbBIH3OX1UMIdhevHhSTYw8AbIitQcuTNXhJg0oILku biVCki66DMCGQyy7paws7tc6GNyEOYFUqA7dPueW1+Xy291N7B11owGDPFa2p5QAhhbd 3qNA== X-Gm-Message-State: ALoCoQmhTSIE65drvr81hGgiUv2EUvye06oofeNTUvSyKifIeDyLC1qcPazWu5/UMEcKsbkFut4N X-Received: by 10.180.87.106 with SMTP id w10mr66066380wiz.62.1425569076768; Thu, 05 Mar 2015 07:24:36 -0800 (PST) Received: from vladz-laptop.cloudius-systems.com. ([212.143.139.214]) by mx.google.com with ESMTPSA id m4sm12082938wik.20.2015.03.05.07.24.35 (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Thu, 05 Mar 2015 07:24:36 -0800 (PST) From: Vlad Zolotarov To: dev@dpdk.org Date: Thu, 5 Mar 2015 17:24:25 +0200 Message-Id: <1425569067-4958-5-git-send-email-vladz@cloudius-systems.com> X-Mailer: git-send-email 2.1.0 In-Reply-To: <1425569067-4958-1-git-send-email-vladz@cloudius-systems.com> References: <1425569067-4958-1-git-send-email-vladz@cloudius-systems.com> Subject: [dpdk-dev] [PATCH v3 4/6] ixgbe: Unify the rx_pkt_bulk callback initialization X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" - Set the callback in a single function that is called from ixgbe_dev_rx_init() for a primary process and from eth_ixgbe_dev_init() for a secondary processes. This is instead of multiple, hard to track places. - Removed rte_eth_dev_data.lro_bulk_alloc and added ixgbe_hw.rx_bulk_alloc_allowed instead. - Bug fix: vector scattered packets callback was called regardless the appropriate preconditions. Signed-off-by: Vlad Zolotarov --- lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h | 1 + lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 14 ++- lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 158 ++++++++++++++++++++------------ lib/librte_pmd_ixgbe/ixgbe_rxtx.h | 20 +++- 4 files changed, 128 insertions(+), 65 deletions(-) diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h index c67d462..c60081c 100644 --- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h @@ -3657,6 +3657,7 @@ struct ixgbe_hw { bool force_full_reset; bool allow_unsupported_sfp; bool wol_enabled; + bool rx_bulk_alloc_allowed; }; #define ixgbe_call_func(hw, func, params, error) \ diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c index 9bdc046..f93dcfc 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c @@ -760,8 +760,8 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, "Using default TX function."); } - if (eth_dev->data->scattered_rx) - eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; + set_rx_function(eth_dev); + return 0; } pci_dev = eth_dev->pci_dev; @@ -771,6 +771,15 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, hw->vendor_id = pci_dev->id.vendor_id; hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; hw->allow_unsupported_sfp = 1; +#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC + /* + * Initialize to TRUE. If any of Rx queues doesn't meet the bulk + * allocation preconditions we will reset it. + */ + hw->rx_bulk_alloc_allowed = true; +#else + hw->rx_bulk_alloc_allowed = false; +#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */ /* Initialize the shared code (base driver) */ #ifdef RTE_NIC_BYPASS @@ -1641,6 +1650,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) /* Clear stored conf */ dev->data->scattered_rx = 0; + hw->rx_bulk_alloc_allowed = false; /* Clear recorded link status */ memset(&link, 0, sizeof(link)); diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c index c1a2823..d912fa8 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -2096,12 +2096,12 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq) /* Reset dynamic igb_rx_queue fields back to defaults */ static void -ixgbe_reset_rx_queue(struct igb_rx_queue *rxq) +ixgbe_reset_rx_queue(struct ixgbe_hw *hw, struct igb_rx_queue *rxq) { static const union ixgbe_adv_rx_desc zeroed_desc = { .read = { .pkt_addr = 0}}; unsigned i; - uint16_t len; + uint16_t len = rxq->nb_rx_desc; /* * By default, the Rx queue setup function allocates enough memory for @@ -2113,14 +2113,9 @@ ixgbe_reset_rx_queue(struct igb_rx_queue *rxq) * constraints here to see if we need to zero out memory after the end * of the H/W descriptor ring. */ -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC - if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0) + if (hw->rx_bulk_alloc_allowed) /* zero out extra memory */ - len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST); - else -#endif - /* do not zero out extra memory */ - len = rxq->nb_rx_desc; + len += RTE_PMD_IXGBE_RX_MAX_BURST; /* * Zero out HW ring memory. Zero out extra memory at the end of @@ -2162,7 +2157,6 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, const struct rte_memzone *rz; struct igb_rx_queue *rxq; struct ixgbe_hw *hw; - int use_def_burst_func = 1; uint16_t len; PMD_INIT_FUNC_TRACE(); @@ -2247,11 +2241,10 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, * S/W ring to make sure look-ahead logic in bulk alloc Rx burst * function does not access an invalid memory region. */ -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC - len = (uint16_t)(nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST); -#else len = nb_desc; -#endif + if (hw->rx_bulk_alloc_allowed) + len += RTE_PMD_IXGBE_RX_MAX_BURST; + rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring", sizeof(struct igb_rx_entry) * len, RTE_CACHE_LINE_SIZE, socket_id); @@ -2264,45 +2257,24 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, /* * Certain constraints must be met in order to use the bulk buffer - * allocation Rx burst function. + * allocation Rx burst function. If any of Rx queues doesn't meet them + * the feature should be disabled for the whole port. */ - use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq); + if (check_rx_burst_bulk_alloc_preconditions(rxq)) { + PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc " + "preconditions - canceling the feature for " + "the whole port[%d]", + rxq->queue_id, rxq->port_id); + hw->rx_bulk_alloc_allowed = false; + } #ifdef RTE_IXGBE_INC_VECTOR ixgbe_rxq_vec_setup(rxq); #endif - /* - * TODO: This must be moved to ixgbe_dev_rx_init() since rx_pkt_burst - * is a global per-device callback thus bulk allocation may be used - * only if all queues meet the above preconditions. - */ - /* Check if pre-conditions are satisfied, and no Scattered Rx */ - if (!use_def_burst_func && !dev->data->scattered_rx) { -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC - PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " - "satisfied. Rx Burst Bulk Alloc function will be " - "used on port=%d, queue=%d.", - rxq->port_id, rxq->queue_id); - dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc; -#ifdef RTE_IXGBE_INC_VECTOR - if (!ixgbe_rx_vec_condition_check(dev)) { - PMD_INIT_LOG(INFO, "Vector rx enabled, please make " - "sure RX burst size no less than 32."); - dev->rx_pkt_burst = ixgbe_recv_pkts_vec; - } -#endif -#endif - } else { - PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions " - "are not satisfied, Scattered Rx is requested, " - "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not " - "enabled (port=%d, queue=%d).", - rxq->port_id, rxq->queue_id); - } dev->data->rx_queues[queue_idx] = rxq; - ixgbe_reset_rx_queue(rxq); + ixgbe_reset_rx_queue(hw, rxq); return 0; } @@ -2356,6 +2328,7 @@ void ixgbe_dev_clear_queues(struct rte_eth_dev *dev) { unsigned i; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); @@ -2371,7 +2344,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) struct igb_rx_queue *rxq = dev->data->rx_queues[i]; if (rxq != NULL) { ixgbe_rx_queue_release_mbufs(rxq); - ixgbe_reset_rx_queue(rxq); + ixgbe_reset_rx_queue(hw, rxq); } } } @@ -3533,6 +3506,57 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev) return 0; } +void set_rx_function(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + bool vec_is_allowed = !ixgbe_rx_vec_condition_check(dev); + + if (!vec_is_allowed) + PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx " + "preconditions or RTE_IXGBE_INC_VECTOR is " + "not enabled", + dev->data->port_id); + + /* Check if bulk alloc is allowed and no Scattered Rx */ + if (hw->rx_bulk_alloc_allowed && !dev->data->scattered_rx) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function " + "will be used on port=%d.", + dev->data->port_id); + dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc; + + if (vec_is_allowed) { + PMD_INIT_LOG(INFO, "Vector rx enabled, please make " + "sure RX burst size no less " + "than 32."); + dev->rx_pkt_burst = ixgbe_recv_pkts_vec; + } + } else { + dev->rx_pkt_burst = ixgbe_recv_pkts; + + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " + "satisfied, or Scattered Rx is requested, " + "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC " + "is not enabled (port=%d).", + dev->data->port_id); + } + + if (dev->data->scattered_rx) { + if (vec_is_allowed) { + PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx Bulk " + "callback (port=%d).", + dev->data->port_id); + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; + } else { + PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector) " + "Scattered Rx Bulk callback " + "(port=%d).", + dev->data->port_id); + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; + } + } +} + /* * Initializes Receive Unit. */ @@ -3673,24 +3697,13 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->data->scattered_rx = 1; -#ifdef RTE_IXGBE_INC_VECTOR - dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; -#else - dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; -#endif } } - if (rx_conf->enable_scatter) { - if (!dev->data->scattered_rx) - PMD_INIT_LOG(DEBUG, "forcing scatter mode"); -#ifdef RTE_IXGBE_INC_VECTOR - dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; -#else - dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; -#endif + if (rx_conf->enable_scatter) dev->data->scattered_rx = 1; - } + + set_rx_function(dev); /* * Device configured with multiple RX queues. @@ -3967,7 +3980,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) rte_delay_us(RTE_IXGBE_WAIT_100_US); ixgbe_rx_queue_release_mbufs(rxq); - ixgbe_reset_rx_queue(rxq); + ixgbe_reset_rx_queue(hw, rxq); } else return -1; @@ -4331,3 +4344,26 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } } + +/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */ +#ifndef RTE_IXGBE_INC_VECTOR +int ixgbe_rx_vec_condition_check( + struct rte_eth_dev __rte_unused *dev) +{ + return -1; +} + +uint16_t +ixgbe_recv_pkts_vec(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t ixgbe_recv_scattered_pkts_vec(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, uint16_t __rte_unused nb_pkts) +{ + return 0; +} +#endif diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h index 329007c..18c9154 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h @@ -255,16 +255,32 @@ struct ixgbe_txq_ops { */ void set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq); -#ifdef RTE_IXGBE_INC_VECTOR +/** + * Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance. + * + * Sets the callback based on the device parameters: + * - ixgbe_hw.rx_bulk_alloc_allowed + * - rte_eth_dev_data.scattered_rx + * - rte_eth_dev_data.lro + * - conditions checked in ixgbe_rx_vec_condition_check() + * + * This means that the parameters above have to be configured prior to calling + * to this function. + * + * @dev rte_eth_dev handle + */ +void set_rx_function(struct rte_eth_dev *dev); + uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev); +#ifdef RTE_IXGBE_INC_VECTOR uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); int ixgbe_txq_vec_setup(struct igb_tx_queue *txq); int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq); -int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev); #endif #endif