From patchwork Fri Oct 7 19:03:21 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Chautru, Nicolas" X-Patchwork-Id: 117621 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 07D58A0540; Fri, 7 Oct 2022 21:04:36 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 13C6142B87; Fri, 7 Oct 2022 21:03:54 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 068F040E5A for ; Fri, 7 Oct 2022 21:03:48 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1665169429; x=1696705429; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=o7tsNmvnm+R9qw58s3UHntjilkRtH5vO0GlTpOYwcuA=; b=KcgjKI8apHFNnzpCk1gbaY+SKL+7RU2cswIbdA9Qu1VUP0bM9HLCti0P DHDmslTOBGiG4j1JBg8w80qbCQk8BKEcLNvXb2YfZDruiCrIRAb7MtYNR Dt2fC+m58IZWeW2hIZ1wM+aKMi11FCQ+HoWfJMCEWra8Verta/1d0trh/ DrBa+lH96b1vnEHs2m68JzdDdFgakFo0P9obTKAQj57LuaS9YcSAvS/y0 aVF4AUw/to3t/uLtaelpOBsKXUCJYuYsChGW1fyv0wehGRvniETKHQxn7 oCB66/vF2VkKRqSytZb4gRiHz4viCZkJyGXjdwjjekrbV64RO1uIMzGLU w==; X-IronPort-AV: E=McAfee;i="6500,9779,10493"; a="365758272" X-IronPort-AV: E=Sophos;i="5.95,167,1661842800"; d="scan'208";a="365758272" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 07 Oct 2022 12:03:47 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10493"; a="870394177" X-IronPort-AV: E=Sophos;i="5.95,167,1661842800"; d="scan'208";a="870394177" Received: from unknown (HELO icx-npg-scs1-cp1.localdomain) ([10.233.180.245]) by fmsmga006.fm.intel.com with ESMTP; 07 Oct 2022 12:03:46 -0700 From: Nicolas Chautru To: dev@dpdk.org, gakhil@marvell.com, maxime.coquelin@redhat.com Cc: trix@redhat.com, mdr@ashroe.eu, bruce.richardson@intel.com, hemant.agrawal@nxp.com, david.marchand@redhat.com, stephen@networkplumber.org, hernan.vargas@intel.com, Nic Chautru Subject: [PATCH v8 06/14] baseband/acc: add info get function for ACC200 Date: Fri, 7 Oct 2022 12:03:21 -0700 Message-Id: <20221007190329.25381-7-nicolas.chautru@intel.com> X-Mailer: git-send-email 2.37.1 In-Reply-To: <20221007190329.25381-1-nicolas.chautru@intel.com> References: <20221007190329.25381-1-nicolas.chautru@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Nic Chautru Added support for info_get to allow to query the device. Null capability exposed. Signed-off-by: Nic Chautru Reviewed-by: Maxime Coquelin --- drivers/baseband/acc/acc200_pmd.h | 1 + drivers/baseband/acc/rte_acc200_pmd.c | 239 ++++++++++++++++++++++++++ 2 files changed, 240 insertions(+) diff --git a/drivers/baseband/acc/acc200_pmd.h b/drivers/baseband/acc/acc200_pmd.h index 9df1f506ad..0a0f6dc4fe 100644 --- a/drivers/baseband/acc/acc200_pmd.h +++ b/drivers/baseband/acc/acc200_pmd.h @@ -8,6 +8,7 @@ #include "acc_common.h" #include "acc200_pf_enum.h" #include "acc200_vf_enum.h" +#include "rte_acc200_cfg.h" /* Helper macro for logging */ #define rte_bbdev_log(level, fmt, ...) \ diff --git a/drivers/baseband/acc/rte_acc200_pmd.c b/drivers/baseband/acc/rte_acc200_pmd.c index c59cad1d26..8d0d63f5c9 100644 --- a/drivers/baseband/acc/rte_acc200_pmd.c +++ b/drivers/baseband/acc/rte_acc200_pmd.c @@ -29,6 +29,197 @@ RTE_LOG_REGISTER_DEFAULT(acc200_logtype, DEBUG); RTE_LOG_REGISTER_DEFAULT(acc200_logtype, NOTICE); #endif +/* Calculate the offset of the enqueue register. */ +static inline uint32_t +queue_offset(bool pf_device, uint8_t vf_id, uint8_t qgrp_id, uint16_t aq_id) +{ + if (pf_device) + return ((vf_id << 12) + (qgrp_id << 7) + (aq_id << 3) + + HWPfQmgrIngressAq); + else + return ((qgrp_id << 7) + (aq_id << 3) + + HWVfQmgrIngressAq); +} + +enum {UL_4G = 0, UL_5G, DL_4G, DL_5G, FFT, NUM_ACC}; + +/* Return the queue topology for a Queue Group Index. */ +static inline void +qtopFromAcc(struct rte_acc_queue_topology **qtop, int acc_enum, + struct rte_acc_conf *acc_conf) +{ + struct rte_acc_queue_topology *p_qtop; + p_qtop = NULL; + switch (acc_enum) { + case UL_4G: + p_qtop = &(acc_conf->q_ul_4g); + break; + case UL_5G: + p_qtop = &(acc_conf->q_ul_5g); + break; + case DL_4G: + p_qtop = &(acc_conf->q_dl_4g); + break; + case DL_5G: + p_qtop = &(acc_conf->q_dl_5g); + break; + case FFT: + p_qtop = &(acc_conf->q_fft); + break; + default: + /* NOTREACHED */ + rte_bbdev_log(ERR, "Unexpected error evaluating qtopFromAcc %d", + acc_enum); + break; + } + *qtop = p_qtop; +} + +static void +initQTop(struct rte_acc_conf *acc_conf) +{ + acc_conf->q_ul_4g.num_aqs_per_groups = 0; + acc_conf->q_ul_4g.num_qgroups = 0; + acc_conf->q_ul_4g.first_qgroup_index = -1; + acc_conf->q_ul_5g.num_aqs_per_groups = 0; + acc_conf->q_ul_5g.num_qgroups = 0; + acc_conf->q_ul_5g.first_qgroup_index = -1; + acc_conf->q_dl_4g.num_aqs_per_groups = 0; + acc_conf->q_dl_4g.num_qgroups = 0; + acc_conf->q_dl_4g.first_qgroup_index = -1; + acc_conf->q_dl_5g.num_aqs_per_groups = 0; + acc_conf->q_dl_5g.num_qgroups = 0; + acc_conf->q_dl_5g.first_qgroup_index = -1; + acc_conf->q_fft.num_aqs_per_groups = 0; + acc_conf->q_fft.num_qgroups = 0; + acc_conf->q_fft.first_qgroup_index = -1; +} + +static inline void +updateQtop(uint8_t acc, uint8_t qg, struct rte_acc_conf *acc_conf, struct acc_device *d) { + uint32_t reg; + struct rte_acc_queue_topology *q_top = NULL; + uint16_t aq; + + qtopFromAcc(&q_top, acc, acc_conf); + if (unlikely(q_top == NULL)) + return; + q_top->num_qgroups++; + if (q_top->first_qgroup_index == -1) { + q_top->first_qgroup_index = qg; + /* Can be optimized to assume all are enabled by default. */ + reg = acc_reg_read(d, queue_offset(d->pf_device, + 0, qg, ACC200_NUM_AQS - 1)); + if (reg & ACC_QUEUE_ENABLE) { + q_top->num_aqs_per_groups = ACC200_NUM_AQS; + return; + } + q_top->num_aqs_per_groups = 0; + for (aq = 0; aq < ACC200_NUM_AQS; aq++) { + reg = acc_reg_read(d, queue_offset(d->pf_device, + 0, qg, aq)); + if (reg & ACC_QUEUE_ENABLE) + q_top->num_aqs_per_groups++; + } + } +} + +/* Fetch configuration enabled for the PF/VF using MMIO Read (slow). */ +static inline void +fetch_acc200_config(struct rte_bbdev *dev) +{ + struct acc_device *d = dev->data->dev_private; + struct rte_acc_conf *acc_conf = &d->acc_conf; + const struct acc200_registry_addr *reg_addr; + uint8_t acc, qg; + uint32_t reg_aq, reg_len0, reg_len1, reg0, reg1; + uint32_t reg_mode, idx; + struct rte_acc_queue_topology *q_top = NULL; + int qman_func_id[ACC200_NUM_ACCS] = {ACC_ACCMAP_0, ACC_ACCMAP_1, + ACC_ACCMAP_2, ACC_ACCMAP_3, ACC_ACCMAP_4}; + + /* No need to retrieve the configuration is already done. */ + if (d->configured) + return; + + /* Choose correct registry addresses for the device type. */ + if (d->pf_device) + reg_addr = &pf_reg_addr; + else + reg_addr = &vf_reg_addr; + + d->ddr_size = 0; + + /* Single VF Bundle by VF. */ + acc_conf->num_vf_bundles = 1; + initQTop(acc_conf); + + reg0 = acc_reg_read(d, reg_addr->qman_group_func); + reg1 = acc_reg_read(d, reg_addr->qman_group_func + 4); + for (qg = 0; qg < ACC200_NUM_QGRPS; qg++) { + reg_aq = acc_reg_read(d, + queue_offset(d->pf_device, 0, qg, 0)); + if (reg_aq & ACC_QUEUE_ENABLE) { + if (qg < ACC_NUM_QGRPS_PER_WORD) + idx = (reg0 >> (qg * 4)) & 0x7; + else + idx = (reg1 >> ((qg - + ACC_NUM_QGRPS_PER_WORD) * 4)) & 0x7; + if (idx < ACC200_NUM_ACCS) { + acc = qman_func_id[idx]; + updateQtop(acc, qg, acc_conf, d); + } + } + } + + /* Check the depth of the AQs. */ + reg_len0 = acc_reg_read(d, reg_addr->depth_log0_offset); + reg_len1 = acc_reg_read(d, reg_addr->depth_log1_offset); + for (acc = 0; acc < NUM_ACC; acc++) { + qtopFromAcc(&q_top, acc, acc_conf); + if (q_top->first_qgroup_index < ACC_NUM_QGRPS_PER_WORD) + q_top->aq_depth_log2 = (reg_len0 >> + (q_top->first_qgroup_index * 4)) + & 0xF; + else + q_top->aq_depth_log2 = (reg_len1 >> + ((q_top->first_qgroup_index - + ACC_NUM_QGRPS_PER_WORD) * 4)) + & 0xF; + } + + /* Read PF mode. */ + if (d->pf_device) { + reg_mode = acc_reg_read(d, HWPfHiPfMode); + acc_conf->pf_mode_en = (reg_mode == ACC_PF_VAL) ? 1 : 0; + } else { + reg_mode = acc_reg_read(d, reg_addr->hi_mode); + acc_conf->pf_mode_en = reg_mode & 1; + } + + rte_bbdev_log_debug( + "%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u %u AQ %u %u %u %u %u Len %u %u %u %u %u\n", + (d->pf_device) ? "PF" : "VF", + (acc_conf->input_pos_llr_1_bit) ? "POS" : "NEG", + (acc_conf->output_pos_llr_1_bit) ? "POS" : "NEG", + acc_conf->q_ul_4g.num_qgroups, + acc_conf->q_dl_4g.num_qgroups, + acc_conf->q_ul_5g.num_qgroups, + acc_conf->q_dl_5g.num_qgroups, + acc_conf->q_fft.num_qgroups, + acc_conf->q_ul_4g.num_aqs_per_groups, + acc_conf->q_dl_4g.num_aqs_per_groups, + acc_conf->q_ul_5g.num_aqs_per_groups, + acc_conf->q_dl_5g.num_aqs_per_groups, + acc_conf->q_fft.num_aqs_per_groups, + acc_conf->q_ul_4g.aq_depth_log2, + acc_conf->q_dl_4g.aq_depth_log2, + acc_conf->q_ul_5g.aq_depth_log2, + acc_conf->q_dl_5g.aq_depth_log2, + acc_conf->q_fft.aq_depth_log2); +} + +/* Free memory used for software rings. */ static int acc200_dev_close(struct rte_bbdev *dev) { @@ -38,9 +229,57 @@ acc200_dev_close(struct rte_bbdev *dev) return 0; } +/* Get ACC200 device info. */ +static void +acc200_dev_info_get(struct rte_bbdev *dev, + struct rte_bbdev_driver_info *dev_info) +{ + struct acc_device *d = dev->data->dev_private; + int i; + static const struct rte_bbdev_op_cap bbdev_capabilities[] = { + RTE_BBDEV_END_OF_CAPABILITIES_LIST() + }; + + static struct rte_bbdev_queue_conf default_queue_conf; + default_queue_conf.socket = dev->data->socket_id; + default_queue_conf.queue_size = ACC_MAX_QUEUE_DEPTH; + + dev_info->driver_name = dev->device->driver->name; + + /* Read and save the populated config from ACC200 registers. */ + fetch_acc200_config(dev); + + /* Exposed number of queues. */ + dev_info->num_queues[RTE_BBDEV_OP_NONE] = 0; + dev_info->num_queues[RTE_BBDEV_OP_TURBO_DEC] = 0; + dev_info->num_queues[RTE_BBDEV_OP_TURBO_ENC] = 0; + dev_info->num_queues[RTE_BBDEV_OP_LDPC_DEC] = 0; + dev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = 0; + dev_info->num_queues[RTE_BBDEV_OP_FFT] = 0; + dev_info->queue_priority[RTE_BBDEV_OP_TURBO_DEC] = 0; + dev_info->queue_priority[RTE_BBDEV_OP_TURBO_ENC] = 0; + dev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] = 0; + dev_info->queue_priority[RTE_BBDEV_OP_LDPC_ENC] = 0; + dev_info->queue_priority[RTE_BBDEV_OP_FFT] = 0; + dev_info->max_num_queues = 0; + for (i = RTE_BBDEV_OP_NONE; i <= RTE_BBDEV_OP_FFT; i++) + dev_info->max_num_queues += dev_info->num_queues[i]; + dev_info->queue_size_lim = ACC_MAX_QUEUE_DEPTH; + dev_info->hardware_accelerated = true; + dev_info->max_dl_queue_priority = + d->acc_conf.q_dl_4g.num_qgroups - 1; + dev_info->max_ul_queue_priority = + d->acc_conf.q_ul_4g.num_qgroups - 1; + dev_info->default_queue_conf = default_queue_conf; + dev_info->cpu_flag_reqs = NULL; + dev_info->min_alignment = 1; + dev_info->capabilities = bbdev_capabilities; + dev_info->harq_buffer_size = 0; +} static const struct rte_bbdev_ops acc200_bbdev_ops = { .close = acc200_dev_close, + .info_get = acc200_dev_info_get, }; /* ACC200 PCI PF address map. */