From patchwork Mon Nov 21 15:01:09 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Rybchenko X-Patchwork-Id: 17157 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 319F0D6A4; Mon, 21 Nov 2016 16:03:55 +0100 (CET) Received: from nbfkord-smmo02.seg.att.com (nbfkord-smmo02.seg.att.com [209.65.160.78]) by dpdk.org (Postfix) with ESMTP id 4A446558C for ; Mon, 21 Nov 2016 16:01:57 +0100 (CET) Received: from unknown [12.187.104.26] (EHLO nbfkord-smmo02.seg.att.com) by nbfkord-smmo02.seg.att.com(mxl_mta-7.2.4-7) with ESMTP id 56c03385.2b92cd0a7940.1541330.00-2472.3424385.nbfkord-smmo02.seg.att.com (envelope-from ); Mon, 21 Nov 2016 15:01:57 +0000 (UTC) X-MXL-Hash: 58330c6535433e44-38451e3f8fcdb927ac63b591d9bcdb9fcae669fc Received: from unknown [12.187.104.26] by nbfkord-smmo02.seg.att.com(mxl_mta-7.2.4-7) with SMTP id f5c03385.0.1541303.00-2341.3424292.nbfkord-smmo02.seg.att.com (envelope-from ); Mon, 21 Nov 2016 15:01:52 +0000 (UTC) X-MXL-Hash: 58330c602aa1bd61-a807d10b74f3b3013e1771a2920ac6c4daab5191 Received: from ocex03.SolarFlarecom.com (10.20.40.36) by ocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server (TLS) id 15.0.1044.25; Mon, 21 Nov 2016 07:01:23 -0800 Received: from opal.uk.solarflarecom.com (10.17.10.1) by ocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server (TLS) id 15.0.1044.25 via Frontend Transport; Mon, 21 Nov 2016 07:01:22 -0800 Received: from uklogin.uk.solarflarecom.com (uklogin.uk.solarflarecom.com [10.17.10.10]) by opal.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id uALF1LpJ007263; Mon, 21 Nov 2016 15:01:21 GMT Received: from uklogin.uk.solarflarecom.com (localhost.localdomain [127.0.0.1]) by uklogin.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id uALF1J3k006765; Mon, 21 Nov 2016 15:01:21 GMT From: Andrew Rybchenko To: CC: Ivan Malov Date: Mon, 21 Nov 2016 15:01:09 +0000 Message-ID: <1479740470-6723-56-git-send-email-arybchenko@solarflare.com> X-Mailer: git-send-email 1.8.2.3 In-Reply-To: <1479740470-6723-1-git-send-email-arybchenko@solarflare.com> References: <1479740470-6723-1-git-send-email-arybchenko@solarflare.com> MIME-Version: 1.0 X-AnalysisOut: [v=2.1 cv=UI/baXry c=1 sm=1 tr=0 a=8BlWFWvVlq5taO8ncb8nKg==] X-AnalysisOut: [:17 a=L24OOQBejmoA:10 a=pK7X0mNQAAAA:8 a=zRKbQ67AAAAA:8 a=] X-AnalysisOut: [sAmS4cjNjRCDDAeag8AA:9 a=Wa7Vp4OCCADL6X9i:21 a=ZtcpCXYpMVf] X-AnalysisOut: [Yysbs:21 a=5HA-qpC1VU4iIGLgRoNS:22 a=PA03WX8tBzeizutn5_OT:] X-AnalysisOut: [22] X-Spam: [F=0.2000000000; CM=0.500; S=0.200(2015072901)] X-MAIL-FROM: X-SOURCE-IP: [12.187.104.26] Subject: [dpdk-dev] [PATCH 55/56] net/sfc: implement transmit path start / stop X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Ivan Malov Reviewed-by: Andy Moreton Signed-off-by: Ivan Malov Signed-off-by: Andrew Rybchenko --- drivers/net/sfc/efx/sfc.c | 8 ++ drivers/net/sfc/efx/sfc_ev.c | 12 ++- drivers/net/sfc/efx/sfc_tx.c | 237 +++++++++++++++++++++++++++++++++++++++++++ drivers/net/sfc/efx/sfc_tx.h | 17 ++++ 4 files changed, 271 insertions(+), 3 deletions(-) diff --git a/drivers/net/sfc/efx/sfc.c b/drivers/net/sfc/efx/sfc.c index 6990ccd..ef9e0d4 100644 --- a/drivers/net/sfc/efx/sfc.c +++ b/drivers/net/sfc/efx/sfc.c @@ -276,10 +276,17 @@ sfc_start(struct sfc_adapter *sa) if (rc != 0) goto fail_rx_start; + rc = sfc_tx_start(sa); + if (rc != 0) + goto fail_tx_start; + sa->state = SFC_ADAPTER_STARTED; sfc_log_init(sa, "done"); return 0; +fail_tx_start: + sfc_rx_stop(sa); + fail_rx_start: sfc_port_stop(sa); @@ -321,6 +328,7 @@ sfc_stop(struct sfc_adapter *sa) sa->state = SFC_ADAPTER_STOPPING; + sfc_tx_stop(sa); sfc_rx_stop(sa); sfc_port_stop(sa); sfc_ev_stop(sa); diff --git a/drivers/net/sfc/efx/sfc_ev.c b/drivers/net/sfc/efx/sfc_ev.c index 8f260e7..b3cecbb 100644 --- a/drivers/net/sfc/efx/sfc_ev.c +++ b/drivers/net/sfc/efx/sfc_ev.c @@ -38,6 +38,7 @@ #include "sfc_log.h" #include "sfc_ev.h" #include "sfc_rx.h" +#include "sfc_tx.h" /* Initial delay when waiting for event queue init complete event */ @@ -206,10 +207,15 @@ static boolean_t sfc_ev_txq_flush_done(void *arg, uint32_t txq_hw_index) { struct sfc_evq *evq = arg; + struct sfc_txq *txq; - sfc_err(evq->sa, "EVQ %u unexpected Tx flush done event", - evq->evq_index); - return B_TRUE; + txq = evq->txq; + SFC_ASSERT(txq != NULL); + SFC_ASSERT(txq->hw_index == txq_hw_index); + SFC_ASSERT(txq->evq == evq); + sfc_tx_qflush_done(txq); + + return B_FALSE; } static boolean_t diff --git a/drivers/net/sfc/efx/sfc_tx.c b/drivers/net/sfc/efx/sfc_tx.c index a4ffe9c..99d845a 100644 --- a/drivers/net/sfc/efx/sfc_tx.c +++ b/drivers/net/sfc/efx/sfc_tx.c @@ -28,10 +28,30 @@ */ #include "sfc.h" +#include "sfc_debug.h" #include "sfc_log.h" #include "sfc_ev.h" #include "sfc_tx.h" +/* + * Maximum number of TX queue flush attempts in case of + * failure or flush timeout + */ +#define SFC_TX_QFLUSH_ATTEMPTS (3) + +/* + * Time to wait between event queue polling attempts when waiting for TX + * queue flush done or flush failed events + */ +#define SFC_TX_QFLUSH_POLL_WAIT_MS (1) + +/* + * Maximum number of event queue polling attempts when wating for TX queue + * flush done or flush failed events; it defines TX queue flush attempt timeout + * together with SFC_TX_QFLUSH_POLL_WAIT_MS + */ +#define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000) + static int sfc_tx_qcheck_conf(struct sfc_adapter *sa, const struct rte_eth_txconf *tx_conf) @@ -83,6 +103,36 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, return rc; } +void +sfc_tx_qflush_done(struct sfc_txq *txq) +{ + txq->state |= SFC_TXQ_FLUSHED; + txq->state &= ~SFC_TXQ_FLUSHING; +} + +static void +sfc_tx_reap(struct sfc_txq *txq) +{ + unsigned int completed; + + + sfc_ev_qpoll(txq->evq); + + for (completed = txq->completed; + completed != txq->pending; completed++) { + struct sfc_tx_sw_desc *txd; + + txd = &txq->sw_ring[completed & txq->ptr_mask]; + + if (txd->mbuf != NULL) { + rte_pktmbuf_free(txd->mbuf); + txd->mbuf = NULL; + } + } + + txq->completed = completed; +} + int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, uint16_t nb_tx_desc, unsigned int socket_id, @@ -292,3 +342,190 @@ sfc_tx_fini(struct sfc_adapter *sa) sa->txq_info = NULL; sa->txq_count = 0; } + +int +sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) +{ + struct rte_eth_dev_data *dev_data; + struct sfc_txq_info *txq_info; + struct sfc_txq *txq; + struct sfc_evq *evq; + uint16_t flags; + unsigned int desc_index; + int rc = 0; + + sfc_log_init(sa, "TxQ = %u", sw_index); + + SFC_ASSERT(sw_index < sa->txq_count); + txq_info = &sa->txq_info[sw_index]; + + txq = txq_info->txq; + + SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED); + + evq = txq->evq; + + rc = sfc_ev_qstart(sa, evq->evq_index); + if (rc != 0) + goto fail_ev_qstart; + + /* + * It seems that DPDK has no controls regarding IPv4 offloads, + * hence, we always enable it here + */ + if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) || + (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP)) + flags = EFX_TXQ_CKSUM_IPV4; + else + flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP; + + rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem, + txq_info->entries, 0 /* not used on EF10 */, + flags, evq->common, + &txq->common, &desc_index); + if (rc != 0) + goto fail_tx_qcreate; + + txq->added = txq->pending = txq->completed = desc_index; + + efx_tx_qenable(txq->common); + + txq->state |= (SFC_TXQ_STARTED | SFC_TXQ_RUNNING); + + /* + * It seems to be used by DPDK for debug purposes only ('rte_ether') + */ + dev_data = sa->eth_dev->data; + dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; + +fail_tx_qcreate: + sfc_ev_qstop(sa, evq->evq_index); + +fail_ev_qstart: + return rc; +} + +void +sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) +{ + struct rte_eth_dev_data *dev_data; + struct sfc_txq_info *txq_info; + struct sfc_txq *txq; + unsigned int retry_count; + unsigned int wait_count; + unsigned int txds; + + sfc_log_init(sa, "TxQ = %u", sw_index); + + SFC_ASSERT(sw_index < sa->txq_count); + txq_info = &sa->txq_info[sw_index]; + + txq = txq_info->txq; + + SFC_ASSERT(txq->state & SFC_TXQ_STARTED); + + txq->state &= ~SFC_TXQ_RUNNING; + + /* + * Retry TX queue flushing in case of flush failed or + * timeout; in the worst case it can delay for 6 seconds + */ + for (retry_count = 0; + ((txq->state & SFC_TXQ_FLUSHED) == 0) && + (retry_count < SFC_TX_QFLUSH_ATTEMPTS); + ++retry_count) { + if (efx_tx_qflush(txq->common) != 0) { + txq->state |= SFC_TXQ_FLUSHING; + break; + } + + /* + * Wait for TX queue flush done or flush failed event at least + * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more + * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied + * by SFC_TX_QFLUSH_POLL_ATTEMPTS) + */ + wait_count = 0; + do { + rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS); + sfc_ev_qpoll(txq->evq); + } while ((txq->state & SFC_TXQ_FLUSHING) && + wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS); + + if (txq->state & SFC_TXQ_FLUSHING) + sfc_err(sa, "TxQ %u flush timed out", sw_index); + + if (txq->state & SFC_TXQ_FLUSHED) + sfc_info(sa, "TxQ %u flushed", sw_index); + } + + sfc_tx_reap(txq); + + for (txds = 0; txds < txq_info->entries; txds++) { + if (txq->sw_ring[txds].mbuf != NULL) { + rte_pktmbuf_free(txq->sw_ring[txds].mbuf); + txq->sw_ring[txds].mbuf = NULL; + } + } + + txq->state = SFC_TXQ_INITIALIZED; + + efx_tx_qdestroy(txq->common); + + sfc_ev_qstop(sa, txq->evq->evq_index); + + /* + * It seems to be used by DPDK for debug purposes only ('rte_ether') + */ + dev_data = sa->eth_dev->data; + dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED; +} + +int +sfc_tx_start(struct sfc_adapter *sa) +{ + int sw_index; + int rc = 0; + + sfc_log_init(sa, "txq_count = %u", sa->txq_count); + + rc = efx_tx_init(sa->nic); + if (rc != 0) + goto fail_efx_tx_init; + + for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) { + rc = sfc_tx_qstart(sa, sw_index); + if (rc != 0) + goto fail_tx_qstart; + } + + return 0; + +fail_tx_qstart: + while (--sw_index >= 0) + sfc_tx_qstop(sa, sw_index); + + efx_tx_fini(sa->nic); + +fail_efx_tx_init: + sfc_log_init(sa, "failed (rc = %d)", rc); + return rc; +} + +void +sfc_tx_stop(struct sfc_adapter *sa) +{ + int sw_index; + + sfc_log_init(sa, "txq_count = %u", sa->txq_count); + + sw_index = sa->txq_count; + while (--sw_index >= 0) { + if (sa->txq_info[sw_index].txq != NULL) + sfc_tx_qstop(sa, sw_index); + } + + efx_tx_fini(sa->nic); +} diff --git a/drivers/net/sfc/efx/sfc_tx.h b/drivers/net/sfc/efx/sfc_tx.h index 3278797..47970f9 100644 --- a/drivers/net/sfc/efx/sfc_tx.h +++ b/drivers/net/sfc/efx/sfc_tx.h @@ -49,6 +49,14 @@ struct sfc_tx_sw_desc { enum sfc_txq_state_bit { SFC_TXQ_INITIALIZED_BIT = 0, #define SFC_TXQ_INITIALIZED (1 << SFC_TXQ_INITIALIZED_BIT) + SFC_TXQ_STARTED_BIT, +#define SFC_TXQ_STARTED (1 << SFC_TXQ_STARTED_BIT) + SFC_TXQ_RUNNING_BIT, +#define SFC_TXQ_RUNNING (1 << SFC_TXQ_RUNNING_BIT) + SFC_TXQ_FLUSHING_BIT, +#define SFC_TXQ_FLUSHING (1 << SFC_TXQ_FLUSHING_BIT) + SFC_TXQ_FLUSHED_BIT, +#define SFC_TXQ_FLUSHED (1 << SFC_TXQ_FLUSHED_BIT) }; struct sfc_txq { @@ -59,6 +67,9 @@ struct sfc_txq { efx_desc_t *pend_desc; efx_txq_t *common; efsys_mem_t mem; + unsigned int added; + unsigned int pending; + unsigned int completed; unsigned int hw_index; unsigned int flags; @@ -83,6 +94,12 @@ int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, const struct rte_eth_txconf *tx_conf); void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index); +void sfc_tx_qflush_done(struct sfc_txq *txq); +int sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index); +void sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index); +int sfc_tx_start(struct sfc_adapter *sa); +void sfc_tx_stop(struct sfc_adapter *sa); + #ifdef __cplusplus } #endif