From patchwork Fri Jul 2 08:39:40 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Rybchenko X-Patchwork-Id: 95195 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 27B43A0A0C; Fri, 2 Jul 2021 10:41:47 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 05F2C413A4; Fri, 2 Jul 2021 10:40:53 +0200 (CEST) Received: from shelob.oktetlabs.ru (shelob.oktetlabs.ru [91.220.146.113]) by mails.dpdk.org (Postfix) with ESMTP id E2522413A0 for ; Fri, 2 Jul 2021 10:40:51 +0200 (CEST) Received: by shelob.oktetlabs.ru (Postfix, from userid 122) id AC2EA7F6C0; Fri, 2 Jul 2021 11:40:51 +0300 (MSK) X-Spam-Checker-Version: SpamAssassin 3.4.2 (2018-09-13) on shelob.oktetlabs.ru X-Spam-Level: X-Spam-Status: No, score=0.8 required=5.0 tests=ALL_TRUSTED, DKIM_ADSP_DISCARD, URIBL_BLOCKED autolearn=no autolearn_force=no version=3.4.2 Received: from aros.oktetlabs.ru (aros.oktetlabs.ru [192.168.38.17]) by shelob.oktetlabs.ru (Postfix) with ESMTP id B15647F6C9; Fri, 2 Jul 2021 11:40:18 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 shelob.oktetlabs.ru B15647F6C9 Authentication-Results: shelob.oktetlabs.ru/B15647F6C9; dkim=none; dkim-atps=neutral From: Andrew Rybchenko To: dev@dpdk.org Cc: David Marchand , Igor Romanov , Andy Moreton , Ivan Malov Date: Fri, 2 Jul 2021 11:39:40 +0300 Message-Id: <20210702083948.546667-13-andrew.rybchenko@oktetlabs.ru> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20210702083948.546667-1-andrew.rybchenko@oktetlabs.ru> References: <20210527152510.1551026-1-andrew.rybchenko@oktetlabs.ru> <20210702083948.546667-1-andrew.rybchenko@oktetlabs.ru> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v4 12/20] net/sfc: reserve RxQ for counters X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Igor Romanov MAE delivers counters data as special packets via dedicated Rx queue. Reserve an RxQ so that it does not interfere with ethdev Rx queues. A routine will be added later to handle these packets. There is no point to reserve the queue if no service cores are available and counters cannot be used. Signed-off-by: Igor Romanov Signed-off-by: Andrew Rybchenko Reviewed-by: Andy Moreton Reviewed-by: Ivan Malov --- drivers/net/sfc/meson.build | 1 + drivers/net/sfc/sfc.c | 68 ++++++++-- drivers/net/sfc/sfc.h | 19 +++ drivers/net/sfc/sfc_dp.h | 2 + drivers/net/sfc/sfc_ev.h | 72 ++++++++-- drivers/net/sfc/sfc_mae.c | 1 + drivers/net/sfc/sfc_mae_counter.c | 217 ++++++++++++++++++++++++++++++ drivers/net/sfc/sfc_mae_counter.h | 44 ++++++ drivers/net/sfc/sfc_rx.c | 43 ++++-- 9 files changed, 438 insertions(+), 29 deletions(-) create mode 100644 drivers/net/sfc/sfc_mae_counter.c create mode 100644 drivers/net/sfc/sfc_mae_counter.h diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build index 4ac97e8d43..f8880f740a 100644 --- a/drivers/net/sfc/meson.build +++ b/drivers/net/sfc/meson.build @@ -55,6 +55,7 @@ sources = files( 'sfc_filter.c', 'sfc_switch.c', 'sfc_mae.c', + 'sfc_mae_counter.c', 'sfc_flow.c', 'sfc_dp.c', 'sfc_ef10_rx.c', diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c index 3477c7530b..4097cf39de 100644 --- a/drivers/net/sfc/sfc.c +++ b/drivers/net/sfc/sfc.c @@ -20,6 +20,7 @@ #include "sfc_log.h" #include "sfc_ev.h" #include "sfc_rx.h" +#include "sfc_mae_counter.h" #include "sfc_tx.h" #include "sfc_kvargs.h" #include "sfc_tweak.h" @@ -174,6 +175,7 @@ static int sfc_estimate_resource_limits(struct sfc_adapter *sa) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + struct sfc_adapter_shared *sas = sfc_sa2shared(sa); efx_drv_limits_t limits; int rc; uint32_t evq_allocated; @@ -235,17 +237,53 @@ sfc_estimate_resource_limits(struct sfc_adapter *sa) rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count); txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count); - /* Subtract management EVQ not used for traffic */ - SFC_ASSERT(evq_allocated > 0); + /* + * Subtract management EVQ not used for traffic + * The resource allocation strategy is as follows: + * - one EVQ for management + * - one EVQ for each ethdev RXQ + * - one EVQ for each ethdev TXQ + * - one EVQ and one RXQ for optional MAE counters. + */ + if (evq_allocated == 0) { + sfc_err(sa, "count of allocated EvQ is 0"); + rc = ENOMEM; + goto fail_allocate_evq; + } evq_allocated--; - /* Right now we use separate EVQ for Rx and Tx */ - sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2); - sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max); + /* + * Reserve absolutely required minimum. + * Right now we use separate EVQ for Rx and Tx. + */ + if (rxq_allocated > 0 && evq_allocated > 0) { + sa->rxq_max = 1; + rxq_allocated--; + evq_allocated--; + } + if (txq_allocated > 0 && evq_allocated > 0) { + sa->txq_max = 1; + txq_allocated--; + evq_allocated--; + } + + if (sfc_mae_counter_rxq_required(sa) && + rxq_allocated > 0 && evq_allocated > 0) { + rxq_allocated--; + evq_allocated--; + sas->counters_rxq_allocated = true; + } else { + sas->counters_rxq_allocated = false; + } + + /* Add remaining allocated queues */ + sa->rxq_max += MIN(rxq_allocated, evq_allocated / 2); + sa->txq_max += MIN(txq_allocated, evq_allocated - sa->rxq_max); /* Keep NIC initialized */ return 0; +fail_allocate_evq: fail_get_vi_pool: efx_nic_fini(sa->nic); fail_nic_init: @@ -256,14 +294,20 @@ static int sfc_set_drv_limits(struct sfc_adapter *sa) { const struct rte_eth_dev_data *data = sa->eth_dev->data; + uint32_t rxq_reserved = sfc_nb_reserved_rxq(sfc_sa2shared(sa)); efx_drv_limits_t lim; memset(&lim, 0, sizeof(lim)); - /* Limits are strict since take into account initial estimation */ + /* + * Limits are strict since take into account initial estimation. + * Resource allocation stategy is described in + * sfc_estimate_resource_limits(). + */ lim.edl_min_evq_count = lim.edl_max_evq_count = - 1 + data->nb_rx_queues + data->nb_tx_queues; - lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues; + 1 + data->nb_rx_queues + data->nb_tx_queues + rxq_reserved; + lim.edl_min_rxq_count = lim.edl_max_rxq_count = + data->nb_rx_queues + rxq_reserved; lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues; return efx_nic_set_drv_limits(sa->nic, &lim); @@ -834,6 +878,10 @@ sfc_attach(struct sfc_adapter *sa) if (rc != 0) goto fail_filter_attach; + rc = sfc_mae_counter_rxq_attach(sa); + if (rc != 0) + goto fail_mae_counter_rxq_attach; + rc = sfc_mae_attach(sa); if (rc != 0) goto fail_mae_attach; @@ -862,6 +910,9 @@ sfc_attach(struct sfc_adapter *sa) sfc_mae_detach(sa); fail_mae_attach: + sfc_mae_counter_rxq_detach(sa); + +fail_mae_counter_rxq_attach: sfc_filter_detach(sa); fail_filter_attach: @@ -903,6 +954,7 @@ sfc_detach(struct sfc_adapter *sa) sfc_flow_fini(sa); sfc_mae_detach(sa); + sfc_mae_counter_rxq_detach(sa); sfc_filter_detach(sa); sfc_rss_detach(sa); sfc_port_detach(sa); diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h index 00fc26cf0e..546739bd4a 100644 --- a/drivers/net/sfc/sfc.h +++ b/drivers/net/sfc/sfc.h @@ -186,6 +186,8 @@ struct sfc_adapter_shared { char *dp_rx_name; char *dp_tx_name; + + bool counters_rxq_allocated; }; /* Adapter process private data */ @@ -205,6 +207,15 @@ sfc_adapter_priv_by_eth_dev(struct rte_eth_dev *eth_dev) return sap; } +/* RxQ dedicated for counters (counter only RxQ) data */ +struct sfc_counter_rxq { + unsigned int state; +#define SFC_COUNTER_RXQ_ATTACHED 0x1 +#define SFC_COUNTER_RXQ_INITIALIZED 0x2 + sfc_sw_index_t sw_index; + struct rte_mempool *mp; +}; + /* Adapter private data */ struct sfc_adapter { /* @@ -283,6 +294,8 @@ struct sfc_adapter { bool mgmt_evq_running; struct sfc_evq *mgmt_evq; + struct sfc_counter_rxq counter_rxq; + struct sfc_rxq *rxq_ctrl; struct sfc_txq *txq_ctrl; @@ -357,6 +370,12 @@ sfc_adapter_lock_fini(__rte_unused struct sfc_adapter *sa) /* Just for symmetry of the API */ } +static inline unsigned int +sfc_nb_counter_rxq(const struct sfc_adapter_shared *sas) +{ + return sas->counters_rxq_allocated ? 1 : 0; +} + /** Get the number of milliseconds since boot from the default timer */ static inline uint64_t sfc_get_system_msecs(void) diff --git a/drivers/net/sfc/sfc_dp.h b/drivers/net/sfc/sfc_dp.h index 76065483d4..61c1a3fbac 100644 --- a/drivers/net/sfc/sfc_dp.h +++ b/drivers/net/sfc/sfc_dp.h @@ -97,6 +97,8 @@ struct sfc_dp { TAILQ_HEAD(sfc_dp_list, sfc_dp); typedef unsigned int sfc_sw_index_t; +#define SFC_SW_INDEX_INVALID ((sfc_sw_index_t)(UINT_MAX)) + typedef int32_t sfc_ethdev_qid_t; #define SFC_ETHDEV_QID_INVALID ((sfc_ethdev_qid_t)(-1)) diff --git a/drivers/net/sfc/sfc_ev.h b/drivers/net/sfc/sfc_ev.h index 3f3c4b5b9a..b2a0380205 100644 --- a/drivers/net/sfc/sfc_ev.h +++ b/drivers/net/sfc/sfc_ev.h @@ -66,36 +66,87 @@ sfc_mgmt_evq_sw_index(__rte_unused const struct sfc_adapter_shared *sas) return 0; } +/* Return the number of Rx queues reserved for driver's internal use */ +static inline unsigned int +sfc_nb_reserved_rxq(const struct sfc_adapter_shared *sas) +{ + return sfc_nb_counter_rxq(sas); +} + +static inline unsigned int +sfc_nb_reserved_evq(const struct sfc_adapter_shared *sas) +{ + /* An EvQ is required for each reserved RxQ */ + return 1 + sfc_nb_reserved_rxq(sas); +} + +/* + * The mapping functions that return SW index of a specific reserved + * queue rely on the relative order of reserved queues. Some reserved + * queues are optional, and if they are disabled or not supported, then + * the function for that specific reserved queue will return previous + * valid index of a reserved queue in the dependency chain or + * SFC_SW_INDEX_INVALID if it is the first reserved queue in the chain. + * If at least one of the reserved queues in the chain is enabled, then + * the corresponding function will give valid SW index, even if previous + * functions in the chain returned SFC_SW_INDEX_INVALID, since this value + * is one less than the first valid SW index. + * + * The dependency mechanism is utilized to avoid regid defines for SW indices + * for reserved queues and to allow these indices to shrink and make space + * for ethdev queue indices when some of the reserved queues are disabled. + */ + +static inline sfc_sw_index_t +sfc_counters_rxq_sw_index(const struct sfc_adapter_shared *sas) +{ + return sas->counters_rxq_allocated ? 0 : SFC_SW_INDEX_INVALID; +} + /* * Functions below define event queue to transmit/receive queue and vice * versa mapping. + * SFC_ETHDEV_QID_INVALID is returned when sw_index is converted to + * ethdev_qid, but sw_index represents a reserved queue for driver's + * internal use. * Own event queue is allocated for management, each Rx and each Tx queue. * Zero event queue is used for management events. - * Rx event queues from 1 to RxQ number follow management event queue. + * When counters are supported, one Rx event queue is reserved. + * Rx event queues follow reserved event queues. * Tx event queues follow Rx event queues. */ static inline sfc_ethdev_qid_t -sfc_ethdev_rx_qid_by_rxq_sw_index(__rte_unused struct sfc_adapter_shared *sas, +sfc_ethdev_rx_qid_by_rxq_sw_index(struct sfc_adapter_shared *sas, sfc_sw_index_t rxq_sw_index) { - /* Only ethdev queues are present for now */ - return rxq_sw_index; + if (rxq_sw_index < sfc_nb_reserved_rxq(sas)) + return SFC_ETHDEV_QID_INVALID; + + return rxq_sw_index - sfc_nb_reserved_rxq(sas); } static inline sfc_sw_index_t -sfc_rxq_sw_index_by_ethdev_rx_qid(__rte_unused struct sfc_adapter_shared *sas, +sfc_rxq_sw_index_by_ethdev_rx_qid(struct sfc_adapter_shared *sas, sfc_ethdev_qid_t ethdev_qid) { - /* Only ethdev queues are present for now */ - return ethdev_qid; + return sfc_nb_reserved_rxq(sas) + ethdev_qid; } static inline sfc_sw_index_t -sfc_evq_sw_index_by_rxq_sw_index(__rte_unused struct sfc_adapter *sa, +sfc_evq_sw_index_by_rxq_sw_index(struct sfc_adapter *sa, sfc_sw_index_t rxq_sw_index) { - return 1 + rxq_sw_index; + struct sfc_adapter_shared *sas = sfc_sa2shared(sa); + sfc_ethdev_qid_t ethdev_qid; + + ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, rxq_sw_index); + if (ethdev_qid == SFC_ETHDEV_QID_INVALID) { + /* One EvQ is reserved for management */ + return 1 + rxq_sw_index; + } + + return sfc_nb_reserved_evq(sas) + ethdev_qid; } static inline sfc_ethdev_qid_t @@ -118,7 +169,8 @@ static inline sfc_sw_index_t sfc_evq_sw_index_by_txq_sw_index(struct sfc_adapter *sa, sfc_sw_index_t txq_sw_index) { - return 1 + sa->eth_dev->data->nb_rx_queues + txq_sw_index; + return sfc_nb_reserved_evq(sfc_sa2shared(sa)) + + sa->eth_dev->data->nb_rx_queues + txq_sw_index; } int sfc_ev_attach(struct sfc_adapter *sa); diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c index a2c0aa1436..8ffcf72d88 100644 --- a/drivers/net/sfc/sfc_mae.c +++ b/drivers/net/sfc/sfc_mae.c @@ -16,6 +16,7 @@ #include "efx.h" #include "sfc.h" +#include "sfc_mae_counter.h" #include "sfc_log.h" #include "sfc_switch.h" diff --git a/drivers/net/sfc/sfc_mae_counter.c b/drivers/net/sfc/sfc_mae_counter.c new file mode 100644 index 0000000000..c7646cf7b1 --- /dev/null +++ b/drivers/net/sfc/sfc_mae_counter.c @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2020-2021 Xilinx, Inc. + */ + +#include + +#include "efx.h" + +#include "sfc_ev.h" +#include "sfc.h" +#include "sfc_rx.h" +#include "sfc_mae_counter.h" +#include "sfc_service.h" + +static uint32_t +sfc_mae_counter_get_service_lcore(struct sfc_adapter *sa) +{ + uint32_t cid; + + cid = sfc_get_service_lcore(sa->socket_id); + if (cid != RTE_MAX_LCORE) + return cid; + + if (sa->socket_id != SOCKET_ID_ANY) + cid = sfc_get_service_lcore(SOCKET_ID_ANY); + + if (cid == RTE_MAX_LCORE) { + sfc_warn(sa, "failed to get service lcore for counter service"); + } else if (sa->socket_id != SOCKET_ID_ANY) { + sfc_warn(sa, + "failed to get service lcore for counter service at socket %d, but got at socket %u", + sa->socket_id, rte_lcore_to_socket_id(cid)); + } + return cid; +} + +bool +sfc_mae_counter_rxq_required(struct sfc_adapter *sa) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + + if (encp->enc_mae_supported == B_FALSE) + return false; + + if (sfc_mae_counter_get_service_lcore(sa) == RTE_MAX_LCORE) + return false; + + return true; +} + +int +sfc_mae_counter_rxq_attach(struct sfc_adapter *sa) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + char name[RTE_MEMPOOL_NAMESIZE]; + struct rte_mempool *mp; + unsigned int n_elements; + unsigned int cache_size; + /* The mempool is internal and private area is not required */ + const uint16_t priv_size = 0; + const uint16_t data_room_size = RTE_PKTMBUF_HEADROOM + + SFC_MAE_COUNTER_STREAM_PACKET_SIZE; + int rc; + + sfc_log_init(sa, "entry"); + + if (!sas->counters_rxq_allocated) { + sfc_log_init(sa, "counter queue is not supported - skip"); + return 0; + } + + /* + * At least one element in the ring is always unused to distinguish + * between empty and full ring cases. + */ + n_elements = SFC_COUNTER_RXQ_RX_DESC_COUNT - 1; + + /* + * The cache must have sufficient space to put received buckets + * before they're reused on refill. + */ + cache_size = rte_align32pow2(SFC_COUNTER_RXQ_REFILL_LEVEL + + SFC_MAE_COUNTER_RX_BURST - 1); + + if (snprintf(name, sizeof(name), "counter_rxq-pool-%u", sas->port_id) >= + (int)sizeof(name)) { + sfc_err(sa, "failed: counter RxQ mempool name is too long"); + rc = ENAMETOOLONG; + goto fail_long_name; + } + + /* + * It could be single-producer single-consumer ring mempool which + * requires minimal barriers. However, cache size and refill/burst + * policy are aligned, therefore it does not matter which + * mempool backend is chosen since backend is unused. + */ + mp = rte_pktmbuf_pool_create(name, n_elements, cache_size, + priv_size, data_room_size, sa->socket_id); + if (mp == NULL) { + sfc_err(sa, "failed to create counter RxQ mempool"); + rc = rte_errno; + goto fail_mp_create; + } + + sa->counter_rxq.sw_index = sfc_counters_rxq_sw_index(sas); + sa->counter_rxq.mp = mp; + sa->counter_rxq.state |= SFC_COUNTER_RXQ_ATTACHED; + + sfc_log_init(sa, "done"); + + return 0; + +fail_mp_create: +fail_long_name: + sfc_log_init(sa, "failed: %s", rte_strerror(rc)); + + return rc; +} + +void +sfc_mae_counter_rxq_detach(struct sfc_adapter *sa) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + + sfc_log_init(sa, "entry"); + + if (!sas->counters_rxq_allocated) { + sfc_log_init(sa, "counter queue is not supported - skip"); + return; + } + + if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED) == 0) { + sfc_log_init(sa, "counter queue is not attached - skip"); + return; + } + + rte_mempool_free(sa->counter_rxq.mp); + sa->counter_rxq.mp = NULL; + sa->counter_rxq.state &= ~SFC_COUNTER_RXQ_ATTACHED; + + sfc_log_init(sa, "done"); +} + +int +sfc_mae_counter_rxq_init(struct sfc_adapter *sa) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + const struct rte_eth_rxconf rxconf = { + .rx_free_thresh = SFC_COUNTER_RXQ_REFILL_LEVEL, + .rx_drop_en = 1, + }; + uint16_t nb_rx_desc = SFC_COUNTER_RXQ_RX_DESC_COUNT; + int rc; + + sfc_log_init(sa, "entry"); + + if (!sas->counters_rxq_allocated) { + sfc_log_init(sa, "counter queue is not supported - skip"); + return 0; + } + + if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED) == 0) { + sfc_log_init(sa, "counter queue is not attached - skip"); + return 0; + } + + nb_rx_desc = RTE_MIN(nb_rx_desc, sa->rxq_max_entries); + nb_rx_desc = RTE_MAX(nb_rx_desc, sa->rxq_min_entries); + + rc = sfc_rx_qinit_info(sa, sa->counter_rxq.sw_index, + EFX_RXQ_FLAG_USER_MARK); + if (rc != 0) + goto fail_counter_rxq_init_info; + + rc = sfc_rx_qinit(sa, sa->counter_rxq.sw_index, nb_rx_desc, + sa->socket_id, &rxconf, sa->counter_rxq.mp); + if (rc != 0) { + sfc_err(sa, "failed to init counter RxQ"); + goto fail_counter_rxq_init; + } + + sa->counter_rxq.state |= SFC_COUNTER_RXQ_INITIALIZED; + + sfc_log_init(sa, "done"); + + return 0; + +fail_counter_rxq_init: +fail_counter_rxq_init_info: + sfc_log_init(sa, "failed: %s", rte_strerror(rc)); + + return rc; +} + +void +sfc_mae_counter_rxq_fini(struct sfc_adapter *sa) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + + sfc_log_init(sa, "entry"); + + if (!sas->counters_rxq_allocated) { + sfc_log_init(sa, "counter queue is not supported - skip"); + return; + } + + if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) { + sfc_log_init(sa, "counter queue is not initialized - skip"); + return; + } + + sfc_rx_qfini(sa, sa->counter_rxq.sw_index); + + sfc_log_init(sa, "done"); +} diff --git a/drivers/net/sfc/sfc_mae_counter.h b/drivers/net/sfc/sfc_mae_counter.h new file mode 100644 index 0000000000..f16d64a999 --- /dev/null +++ b/drivers/net/sfc/sfc_mae_counter.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2020-2021 Xilinx, Inc. + */ + +#ifndef _SFC_MAE_COUNTER_H +#define _SFC_MAE_COUNTER_H + +#include "sfc.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Default values for a user of counter RxQ */ +#define SFC_MAE_COUNTER_RX_BURST 32 +#define SFC_COUNTER_RXQ_RX_DESC_COUNT 256 + +/* + * The refill level is chosen based on requirement to keep number + * of give credits operations low. + */ +#define SFC_COUNTER_RXQ_REFILL_LEVEL (SFC_COUNTER_RXQ_RX_DESC_COUNT / 4) + +/* + * SF-122415-TC states that the packetiser that generates packets for + * counter stream must support 9k frames. Set it to the maximum supported + * size since in case of huge flow of counters, having fewer packets in counter + * updates is better. + */ +#define SFC_MAE_COUNTER_STREAM_PACKET_SIZE 9216 + +bool sfc_mae_counter_rxq_required(struct sfc_adapter *sa); + +int sfc_mae_counter_rxq_attach(struct sfc_adapter *sa); +void sfc_mae_counter_rxq_detach(struct sfc_adapter *sa); + +int sfc_mae_counter_rxq_init(struct sfc_adapter *sa); +void sfc_mae_counter_rxq_fini(struct sfc_adapter *sa); + +#ifdef __cplusplus +} +#endif +#endif /* _SFC_MAE_COUNTER_H */ diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c index c7a7bd66ef..0532f77082 100644 --- a/drivers/net/sfc/sfc_rx.c +++ b/drivers/net/sfc/sfc_rx.c @@ -16,6 +16,7 @@ #include "sfc_log.h" #include "sfc_ev.h" #include "sfc_rx.h" +#include "sfc_mae_counter.h" #include "sfc_kvargs.h" #include "sfc_tweak.h" @@ -1705,6 +1706,9 @@ sfc_rx_configure(struct sfc_adapter *sa) struct sfc_rss *rss = &sas->rss; struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf; const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues; + const unsigned int nb_rsrv_rx_queues = sfc_nb_reserved_rxq(sas); + const unsigned int nb_rxq_total = nb_rx_queues + nb_rsrv_rx_queues; + bool reconfigure; int rc; sfc_log_init(sa, "nb_rx_queues=%u (old %u)", @@ -1714,12 +1718,15 @@ sfc_rx_configure(struct sfc_adapter *sa) if (rc != 0) goto fail_check_mode; - if (nb_rx_queues == sas->rxq_count) + if (nb_rxq_total == sas->rxq_count) { + reconfigure = true; goto configure_rss; + } if (sas->rxq_info == NULL) { + reconfigure = false; rc = ENOMEM; - sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues, + sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rxq_total, sizeof(sas->rxq_info[0]), 0, sa->socket_id); if (sas->rxq_info == NULL) @@ -1730,39 +1737,42 @@ sfc_rx_configure(struct sfc_adapter *sa) * since it should not be shared. */ rc = ENOMEM; - sa->rxq_ctrl = calloc(nb_rx_queues, sizeof(sa->rxq_ctrl[0])); + sa->rxq_ctrl = calloc(nb_rxq_total, sizeof(sa->rxq_ctrl[0])); if (sa->rxq_ctrl == NULL) goto fail_rxqs_ctrl_alloc; } else { struct sfc_rxq_info *new_rxq_info; struct sfc_rxq *new_rxq_ctrl; + reconfigure = true; + + /* Do not ununitialize reserved queues */ if (nb_rx_queues < sas->ethdev_rxq_count) sfc_rx_fini_queues(sa, nb_rx_queues); rc = ENOMEM; new_rxq_info = rte_realloc(sas->rxq_info, - nb_rx_queues * sizeof(sas->rxq_info[0]), 0); - if (new_rxq_info == NULL && nb_rx_queues > 0) + nb_rxq_total * sizeof(sas->rxq_info[0]), 0); + if (new_rxq_info == NULL && nb_rxq_total > 0) goto fail_rxqs_realloc; rc = ENOMEM; new_rxq_ctrl = realloc(sa->rxq_ctrl, - nb_rx_queues * sizeof(sa->rxq_ctrl[0])); - if (new_rxq_ctrl == NULL && nb_rx_queues > 0) + nb_rxq_total * sizeof(sa->rxq_ctrl[0])); + if (new_rxq_ctrl == NULL && nb_rxq_total > 0) goto fail_rxqs_ctrl_realloc; sas->rxq_info = new_rxq_info; sa->rxq_ctrl = new_rxq_ctrl; - if (nb_rx_queues > sas->rxq_count) { + if (nb_rxq_total > sas->rxq_count) { unsigned int rxq_count = sas->rxq_count; memset(&sas->rxq_info[rxq_count], 0, - (nb_rx_queues - rxq_count) * + (nb_rxq_total - rxq_count) * sizeof(sas->rxq_info[0])); memset(&sa->rxq_ctrl[rxq_count], 0, - (nb_rx_queues - rxq_count) * + (nb_rxq_total - rxq_count) * sizeof(sa->rxq_ctrl[0])); } } @@ -1779,7 +1789,13 @@ sfc_rx_configure(struct sfc_adapter *sa) sas->ethdev_rxq_count++; } - sas->rxq_count = sas->ethdev_rxq_count; + sas->rxq_count = sas->ethdev_rxq_count + nb_rsrv_rx_queues; + + if (!reconfigure) { + rc = sfc_mae_counter_rxq_init(sa); + if (rc != 0) + goto fail_count_rxq_init; + } configure_rss: rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ? @@ -1801,6 +1817,10 @@ sfc_rx_configure(struct sfc_adapter *sa) return 0; fail_rx_process_adv_conf_rss: + if (!reconfigure) + sfc_mae_counter_rxq_fini(sa); + +fail_count_rxq_init: fail_rx_qinit_info: fail_rxqs_ctrl_realloc: fail_rxqs_realloc: @@ -1824,6 +1844,7 @@ sfc_rx_close(struct sfc_adapter *sa) struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; sfc_rx_fini_queues(sa, 0); + sfc_mae_counter_rxq_fini(sa); rss->channels = 0;