From patchwork Tue Jun 1 12:00:35 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Burakov, Anatoly" X-Patchwork-Id: 93725 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id ED98AA0524; Tue, 1 Jun 2021 14:01:24 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id EF8E44111A; Tue, 1 Jun 2021 14:00:53 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by mails.dpdk.org (Postfix) with ESMTP id 44BE440689 for ; Tue, 1 Jun 2021 14:00:52 +0200 (CEST) IronPort-SDR: gP9kZ4BlKDP426F02Ixp4UbMBieq0Yi8mbBmRxIzVaaaV4xmeJEyFI5HKOEs1zhF4t3J19xJ2H jhAKe69bajBA== X-IronPort-AV: E=McAfee;i="6200,9189,10001"; a="203528496" X-IronPort-AV: E=Sophos;i="5.83,239,1616482800"; d="scan'208";a="203528496" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Jun 2021 05:00:51 -0700 IronPort-SDR: I9eKDRiv3RRS2Lc8My3B68qjnENer2hs4aLLy/ztcdKP0yE10142wLV6jEvZTnxXIsQbV5Q+LZ YIXdwHw1QQUw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.83,239,1616482800"; d="scan'208";a="549707033" Received: from silpixa00399498.ir.intel.com (HELO silpixa00399498.ger.corp.intel.com) ([10.237.222.54]) by fmsmga001.fm.intel.com with ESMTP; 01 Jun 2021 05:00:50 -0700 From: Anatoly Burakov To: dev@dpdk.org, David Hunt Cc: ciara.loftus@intel.com Date: Tue, 1 Jun 2021 12:00:35 +0000 Message-Id: X-Mailer: git-send-email 2.25.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v1 6/7] power: support monitoring multiple Rx queues X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Use the new multi-monitor intrinsic to allow monitoring multiple ethdev Rx queues while entering the energy efficient power state. The multi version will be used unconditionally if supported, and the UMWAIT one will only be used when multi-monitor is not supported by the hardware. Signed-off-by: Anatoly Burakov --- lib/power/rte_power_pmd_mgmt.c | 75 +++++++++++++++++++++++++++++++++- 1 file changed, 73 insertions(+), 2 deletions(-) diff --git a/lib/power/rte_power_pmd_mgmt.c b/lib/power/rte_power_pmd_mgmt.c index 60dd21a19c..9e0b8bdfaf 100644 --- a/lib/power/rte_power_pmd_mgmt.c +++ b/lib/power/rte_power_pmd_mgmt.c @@ -147,6 +147,23 @@ queue_list_remove(struct pmd_core_cfg *cfg, const struct queue *q) return 0; } +static inline int +get_monitor_addresses(struct pmd_core_cfg *cfg, + struct rte_power_monitor_cond *pmc) +{ + size_t i; + int ret; + + for (i = 0; i < cfg->n_queues; i++) { + struct rte_power_monitor_cond *cur = &pmc[i]; + struct queue *q = &cfg->queues[i]; + ret = rte_eth_get_monitor_addr(q->portid, q->qid, cur); + if (ret < 0) + return ret; + } + return 0; +} + static void calc_tsc(void) { @@ -175,6 +192,48 @@ calc_tsc(void) } } +static uint16_t +clb_multiwait(uint16_t port_id, uint16_t qidx, + struct rte_mbuf **pkts __rte_unused, uint16_t nb_rx, + uint16_t max_pkts __rte_unused, void *addr __rte_unused) +{ + const unsigned int lcore = rte_lcore_id(); + const struct queue q = {port_id, qidx}; + const bool empty = nb_rx == 0; + struct pmd_core_cfg *q_conf; + + q_conf = &lcore_cfg[lcore]; + + /* early exit */ + if (likely(!empty)) { + q_conf->empty_poll_stats = 0; + } else { + /* do we care about this particular queue? */ + if (!queue_is_power_save(q_conf, &q)) + return nb_rx; + + /* + * we can increment unconditionally here because if there were + * non-empty polls in other queues assigned to this core, we + * dropped the counter to zero anyway. + */ + q_conf->empty_poll_stats++; + if (unlikely(q_conf->empty_poll_stats > EMPTYPOLL_MAX)) { + struct rte_power_monitor_cond pmc[RTE_MAX_ETHPORTS]; + uint16_t ret; + + /* gather all monitoring conditions */ + ret = get_monitor_addresses(q_conf, pmc); + + if (ret == 0) + rte_power_monitor_multi(pmc, + q_conf->n_queues, UINT64_MAX); + } + } + + return nb_rx; +} + static uint16_t clb_umwait(uint16_t port_id, uint16_t qidx, struct rte_mbuf **pkts __rte_unused, uint16_t nb_rx, uint16_t max_pkts __rte_unused, @@ -315,14 +374,19 @@ static int check_monitor(struct pmd_core_cfg *cfg, const struct queue *qdata) { struct rte_power_monitor_cond dummy; + bool multimonitor_supported; /* check if rte_power_monitor is supported */ if (!global_data.intrinsics_support.power_monitor) { RTE_LOG(DEBUG, POWER, "Monitoring intrinsics are not supported\n"); return -ENOTSUP; } + /* check if multi-monitor is supported */ + multimonitor_supported = + global_data.intrinsics_support.power_monitor_multi; - if (cfg->n_queues > 0) { + /* if we're adding a new queue, do we support multiple queues? */ + if (cfg->n_queues > 0 && !multimonitor_supported) { RTE_LOG(DEBUG, POWER, "Monitoring multiple queues is not supported\n"); return -ENOTSUP; } @@ -338,6 +402,13 @@ check_monitor(struct pmd_core_cfg *cfg, const struct queue *qdata) return 0; } +static inline rte_rx_callback_fn +get_monitor_callback(void) +{ + return global_data.intrinsics_support.power_monitor_multi ? + clb_multiwait : clb_umwait; +} + int rte_power_ethdev_pmgmt_queue_enable(unsigned int lcore_id, uint16_t port_id, uint16_t queue_id, enum rte_power_pmd_mgmt_type mode) @@ -385,7 +456,7 @@ rte_power_ethdev_pmgmt_queue_enable(unsigned int lcore_id, uint16_t port_id, if (ret < 0) goto end; - clb = clb_umwait; + clb = get_monitor_callback(); break; case RTE_POWER_MGMT_TYPE_SCALE: /* check if we can add a new queue */