From patchwork Thu Oct 14 16:11:31 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Fan Zhang X-Patchwork-Id: 101651 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 10946A0C4B; Thu, 14 Oct 2021 18:19:16 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6C617412A0; Thu, 14 Oct 2021 18:18:07 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by mails.dpdk.org (Postfix) with ESMTP id 21A6A4126A for ; Thu, 14 Oct 2021 18:17:56 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10137"; a="227998444" X-IronPort-AV: E=Sophos;i="5.85,372,1624345200"; d="scan'208";a="227998444" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 14 Oct 2021 09:11:47 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.85,372,1624345200"; d="scan'208";a="527644557" Received: from silpixa00400885.ir.intel.com ([10.243.23.122]) by fmsmga008.fm.intel.com with ESMTP; 14 Oct 2021 09:11:45 -0700 From: Fan Zhang To: dev@dpdk.org Cc: gakhil@marvell.com, Fan Zhang , Arek Kusztal , Kai Ji Date: Thu, 14 Oct 2021 17:11:31 +0100 Message-Id: <20211014161137.1405168-5-roy.fan.zhang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20211014161137.1405168-1-roy.fan.zhang@intel.com> References: <20211001165954.717846-1-roy.fan.zhang@intel.com> <20211014161137.1405168-1-roy.fan.zhang@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [dpdk-dev v3 04/10] common/qat: add gen specific queue implementation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch replaces the mixed QAT queue pair configuration implementation by separate files with shared or individual implementation for specific QAT generation. Signed-off-by: Arek Kusztal Signed-off-by: Fan Zhang Signed-off-by: Kai Ji --- drivers/common/qat/dev/qat_dev_gen1.c | 193 ++++- drivers/common/qat/dev/qat_dev_gen2.c | 14 + drivers/common/qat/dev/qat_dev_gen3.c | 60 ++ drivers/common/qat/dev/qat_dev_gen4.c | 161 ++++- drivers/common/qat/dev/qat_dev_gens.h | 30 +- .../qat/qat_adf/adf_transport_access_macros.h | 2 + drivers/common/qat/qat_device.h | 3 - drivers/common/qat/qat_qp.c | 667 +++++++----------- drivers/common/qat/qat_qp.h | 24 +- drivers/crypto/qat/qat_sym_pmd.c | 32 +- 10 files changed, 710 insertions(+), 476 deletions(-) diff --git a/drivers/common/qat/dev/qat_dev_gen1.c b/drivers/common/qat/dev/qat_dev_gen1.c index d9e75fe9e2..f1f43c17b1 100644 --- a/drivers/common/qat/dev/qat_dev_gen1.c +++ b/drivers/common/qat/dev/qat_dev_gen1.c @@ -3,6 +3,7 @@ */ #include "qat_device.h" +#include "qat_qp.h" #include "adf_transport_access_macros.h" #include "qat_dev_gens.h" @@ -10,6 +11,195 @@ #define ADF_ARB_REG_SLOT 0x1000 +#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ + (ADF_ARB_REG_SLOT * index), value) + +__extension__ +const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES] + [ADF_MAX_QPS_ON_ANY_SERVICE] = { + /* queue pairs which provide an asymmetric crypto service */ + [QAT_SERVICE_ASYMMETRIC] = { + { + .service_type = QAT_SERVICE_ASYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 0, + .rx_ring_num = 8, + .tx_msg_size = 64, + .rx_msg_size = 32, + + }, { + .service_type = QAT_SERVICE_ASYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 1, + .rx_ring_num = 9, + .tx_msg_size = 64, + .rx_msg_size = 32, + } + }, + /* queue pairs which provide a symmetric crypto service */ + [QAT_SERVICE_SYMMETRIC] = { + { + .service_type = QAT_SERVICE_SYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 2, + .rx_ring_num = 10, + .tx_msg_size = 128, + .rx_msg_size = 32, + }, + { + .service_type = QAT_SERVICE_SYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 3, + .rx_ring_num = 11, + .tx_msg_size = 128, + .rx_msg_size = 32, + } + }, + /* queue pairs which provide a compression service */ + [QAT_SERVICE_COMPRESSION] = { + { + .service_type = QAT_SERVICE_COMPRESSION, + .hw_bundle_num = 0, + .tx_ring_num = 6, + .rx_ring_num = 14, + .tx_msg_size = 128, + .rx_msg_size = 32, + }, { + .service_type = QAT_SERVICE_COMPRESSION, + .hw_bundle_num = 0, + .tx_ring_num = 7, + .rx_ring_num = 15, + .tx_msg_size = 128, + .rx_msg_size = 32, + } + } +}; + +const struct qat_qp_hw_data * +qat_qp_get_hw_data_gen1(struct qat_pci_device *dev __rte_unused, + enum qat_service_type service_type, uint16_t qp_id) +{ + return qat_gen1_qps[service_type] + qp_id; +} + +int +qat_qp_rings_per_service_gen1(struct qat_pci_device *qat_dev, + enum qat_service_type service) +{ + int i = 0, count = 0; + + for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) { + const struct qat_qp_hw_data *hw_qps = + qat_qp_get_hw_data(qat_dev, service, i); + if (hw_qps->service_type == service) + count++; + } + + return count; +} + +void +qat_qp_csr_build_ring_base_gen1(void *io_addr, + struct qat_queue *queue) +{ + uint64_t queue_base; + + queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr, + queue->queue_size); + WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number, + queue->hw_queue_number, queue_base); +} + +void +qat_qp_adf_arb_enable_gen1(const struct qat_queue *txq, + void *base_addr, rte_spinlock_t *lock) +{ + uint32_t arb_csr_offset = 0, value; + + rte_spinlock_lock(lock); + arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + + (ADF_ARB_REG_SLOT * + txq->hw_bundle_number); + value = ADF_CSR_RD(base_addr, + arb_csr_offset); + value |= (0x01 << txq->hw_queue_number); + ADF_CSR_WR(base_addr, arb_csr_offset, value); + rte_spinlock_unlock(lock); +} + +void +qat_qp_adf_arb_disable_gen1(const struct qat_queue *txq, + void *base_addr, rte_spinlock_t *lock) +{ + uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + + (ADF_ARB_REG_SLOT * + txq->hw_bundle_number); + uint32_t value; + + rte_spinlock_lock(lock); + value = ADF_CSR_RD(base_addr, arb_csr_offset); + value &= ~(0x01 << txq->hw_queue_number); + ADF_CSR_WR(base_addr, arb_csr_offset, value); + rte_spinlock_unlock(lock); +} + +void +qat_qp_adf_configure_queues_gen1(struct qat_qp *qp) +{ + uint32_t q_tx_config, q_resp_config; + struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q; + + q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size); + q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size, + ADF_RING_NEAR_WATERMARK_512, + ADF_RING_NEAR_WATERMARK_0); + WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, + q_tx->hw_bundle_number, q_tx->hw_queue_number, + q_tx_config); + WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, + q_rx->hw_bundle_number, q_rx->hw_queue_number, + q_resp_config); +} + +void +qat_qp_csr_write_tail_gen1(struct qat_qp *qp, struct qat_queue *q) +{ + WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number, + q->hw_queue_number, q->tail); +} + +void +qat_qp_csr_write_head_gen1(struct qat_qp *qp, struct qat_queue *q, + uint32_t new_head) +{ + WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number, + q->hw_queue_number, new_head); +} + +void +qat_qp_csr_setup_gen1(struct qat_pci_device *qat_dev, + void *io_addr, struct qat_qp *qp) +{ + qat_qp_csr_build_ring_base_gen1(io_addr, &qp->tx_q); + qat_qp_csr_build_ring_base_gen1(io_addr, &qp->rx_q); + qat_qp_adf_configure_queues_gen1(qp); + qat_qp_adf_arb_enable_gen1(&qp->tx_q, qp->mmap_bar_addr, + &qat_dev->arb_csr_lock); +} + +static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen1 = { + .qat_qp_rings_per_service = qat_qp_rings_per_service_gen1, + .qat_qp_build_ring_base = qat_qp_csr_build_ring_base_gen1, + .qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen1, + .qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen1, + .qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen1, + .qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen1, + .qat_qp_csr_write_head = qat_qp_csr_write_head_gen1, + .qat_qp_csr_setup = qat_qp_csr_setup_gen1, + .qat_qp_get_hw_data = qat_qp_get_hw_data_gen1, +}; + int qat_reset_ring_pairs_gen1(struct qat_pci_device *qat_pci_dev __rte_unused) { @@ -26,7 +216,7 @@ qat_dev_get_transport_bar_gen1(struct rte_pci_device *pci_dev) } int -qat_dev_get_misc_bar_gen1(struct rte_mem_resource **mem_resource __rte_unused, +qat_dev_get_misc_bar_gen1(struct rte_mem_resource **mem_resource __rte_unused, struct rte_pci_device *pci_dev __rte_unused) { return -1; @@ -59,6 +249,7 @@ static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen1 = { RTE_INIT(qat_dev_gen_gen1_init) { + qat_qp_hw_spec[QAT_GEN1] = &qat_qp_hw_spec_gen1; qat_dev_hw_spec[QAT_GEN1] = &qat_dev_hw_spec_gen1; qat_gen_config[QAT_GEN1].dev_gen = QAT_GEN1; qat_gen_config[QAT_GEN1].comp_num_im_bufs_required = diff --git a/drivers/common/qat/dev/qat_dev_gen2.c b/drivers/common/qat/dev/qat_dev_gen2.c index d3470ed6b8..f077fe9eef 100644 --- a/drivers/common/qat/dev/qat_dev_gen2.c +++ b/drivers/common/qat/dev/qat_dev_gen2.c @@ -3,11 +3,24 @@ */ #include "qat_device.h" +#include "qat_qp.h" #include "adf_transport_access_macros.h" #include "qat_dev_gens.h" #include +static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen2 = { + .qat_qp_rings_per_service = qat_qp_rings_per_service_gen1, + .qat_qp_build_ring_base = qat_qp_csr_build_ring_base_gen1, + .qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen1, + .qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen1, + .qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen1, + .qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen1, + .qat_qp_csr_write_head = qat_qp_csr_write_head_gen1, + .qat_qp_csr_setup = qat_qp_csr_setup_gen1, + .qat_qp_get_hw_data = qat_qp_get_hw_data_gen1, +}; + static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen2 = { .qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen1, .qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen1, @@ -18,6 +31,7 @@ static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen2 = { RTE_INIT(qat_dev_gen_gen2_init) { + qat_qp_hw_spec[QAT_GEN2] = &qat_qp_hw_spec_gen2; qat_dev_hw_spec[QAT_GEN2] = &qat_dev_hw_spec_gen2; qat_gen_config[QAT_GEN2].dev_gen = QAT_GEN2; } diff --git a/drivers/common/qat/dev/qat_dev_gen3.c b/drivers/common/qat/dev/qat_dev_gen3.c index e4a66869d2..de3fa17fa9 100644 --- a/drivers/common/qat/dev/qat_dev_gen3.c +++ b/drivers/common/qat/dev/qat_dev_gen3.c @@ -3,11 +3,70 @@ */ #include "qat_device.h" +#include "qat_qp.h" #include "adf_transport_access_macros.h" #include "qat_dev_gens.h" #include +__extension__ +const struct qat_qp_hw_data qat_gen3_qps[QAT_MAX_SERVICES] + [ADF_MAX_QPS_ON_ANY_SERVICE] = { + /* queue pairs which provide an asymmetric crypto service */ + [QAT_SERVICE_ASYMMETRIC] = { + { + .service_type = QAT_SERVICE_ASYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 0, + .rx_ring_num = 4, + .tx_msg_size = 64, + .rx_msg_size = 32, + } + }, + /* queue pairs which provide a symmetric crypto service */ + [QAT_SERVICE_SYMMETRIC] = { + { + .service_type = QAT_SERVICE_SYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 1, + .rx_ring_num = 5, + .tx_msg_size = 128, + .rx_msg_size = 32, + } + }, + /* queue pairs which provide a compression service */ + [QAT_SERVICE_COMPRESSION] = { + { + .service_type = QAT_SERVICE_COMPRESSION, + .hw_bundle_num = 0, + .tx_ring_num = 3, + .rx_ring_num = 7, + .tx_msg_size = 128, + .rx_msg_size = 32, + } + } +}; + + +static const struct qat_qp_hw_data * +qat_qp_get_hw_data_gen3(struct qat_pci_device *dev __rte_unused, + enum qat_service_type service_type, uint16_t qp_id) +{ + return qat_gen3_qps[service_type] + qp_id; +} + +static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen3 = { + .qat_qp_rings_per_service = qat_qp_rings_per_service_gen1, + .qat_qp_build_ring_base = qat_qp_csr_build_ring_base_gen1, + .qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen1, + .qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen1, + .qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen1, + .qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen1, + .qat_qp_csr_write_head = qat_qp_csr_write_head_gen1, + .qat_qp_csr_setup = qat_qp_csr_setup_gen1, + .qat_qp_get_hw_data = qat_qp_get_hw_data_gen3 +}; + static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen3 = { .qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen1, .qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen1, @@ -18,6 +77,7 @@ static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen3 = { RTE_INIT(qat_dev_gen_gen3_init) { + qat_qp_hw_spec[QAT_GEN3] = &qat_qp_hw_spec_gen3; qat_dev_hw_spec[QAT_GEN3] = &qat_dev_hw_spec_gen3; qat_gen_config[QAT_GEN3].dev_gen = QAT_GEN3; } diff --git a/drivers/common/qat/dev/qat_dev_gen4.c b/drivers/common/qat/dev/qat_dev_gen4.c index 5e5423ebfa..7ffde5f4c8 100644 --- a/drivers/common/qat/dev/qat_dev_gen4.c +++ b/drivers/common/qat/dev/qat_dev_gen4.c @@ -10,10 +10,13 @@ #include "adf_transport_access_macros_gen4vf.h" #include "adf_pf2vf_msg.h" #include "qat_pf2vf.h" -#include "qat_dev_gens.h" #include +/* QAT GEN 4 specific macros */ +#define QAT_GEN4_BUNDLE_NUM 4 +#define QAT_GEN4_QPS_PER_BUNDLE_NUM 1 + struct qat_dev_gen4_extra { struct qat_qp_hw_data qp_gen4_data[QAT_GEN4_BUNDLE_NUM] [QAT_GEN4_QPS_PER_BUNDLE_NUM]; @@ -28,7 +31,7 @@ static struct qat_pf2vf_dev qat_pf2vf_gen4 = { .pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK, }; -int +static int qat_query_svc_gen4(struct qat_pci_device *qat_dev, uint8_t *val) { struct qat_pf2vf_msg pf2vf_msg; @@ -39,6 +42,52 @@ qat_query_svc_gen4(struct qat_pci_device *qat_dev, uint8_t *val) return qat_pf2vf_exch_msg(qat_dev, pf2vf_msg, 2, val); } +static int +qat_select_valid_queue_gen4(struct qat_pci_device *qat_dev, int qp_id, + enum qat_service_type service_type) +{ + int i = 0, valid_qps = 0; + struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private; + + for (; i < QAT_GEN4_BUNDLE_NUM; i++) { + if (dev_extra->qp_gen4_data[i][0].service_type == + service_type) { + if (valid_qps == qp_id) + return i; + ++valid_qps; + } + } + return -1; +} + +static const struct qat_qp_hw_data * +qat_qp_get_hw_data_gen4(struct qat_pci_device *qat_dev, + enum qat_service_type service_type, uint16_t qp_id) +{ + struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private; + int ring_pair = qat_select_valid_queue_gen4(qat_dev, qp_id, + service_type); + + if (ring_pair < 0) + return NULL; + + return &dev_extra->qp_gen4_data[ring_pair][0]; +} + +static int +qat_qp_rings_per_service_gen4(struct qat_pci_device *qat_dev, + enum qat_service_type service) +{ + int i = 0, count = 0, max_ops_per_srv = 0; + struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private; + + max_ops_per_srv = QAT_GEN4_BUNDLE_NUM; + for (i = 0, count = 0; i < max_ops_per_srv; i++) + if (dev_extra->qp_gen4_data[i][0].service_type == service) + count++; + return count; +} + static enum qat_service_type gen4_pick_service(uint8_t hw_service) { @@ -94,6 +143,109 @@ qat_dev_read_config_gen4(struct qat_pci_device *qat_dev) return 0; } +static void +qat_qp_build_ring_base_gen4(void *io_addr, + struct qat_queue *queue) +{ + uint64_t queue_base; + + queue_base = BUILD_RING_BASE_ADDR_GEN4(queue->base_phys_addr, + queue->queue_size); + WRITE_CSR_RING_BASE_GEN4VF(io_addr, queue->hw_bundle_number, + queue->hw_queue_number, queue_base); +} + +static void +qat_qp_adf_arb_enable_gen4(const struct qat_queue *txq, + void *base_addr, rte_spinlock_t *lock) +{ + uint32_t arb_csr_offset = 0, value; + + rte_spinlock_lock(lock); + arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + + (ADF_RING_BUNDLE_SIZE_GEN4 * + txq->hw_bundle_number); + value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN4VF, + arb_csr_offset); + value |= (0x01 << txq->hw_queue_number); + ADF_CSR_WR(base_addr, arb_csr_offset, value); + rte_spinlock_unlock(lock); +} + +static void +qat_qp_adf_arb_disable_gen4(const struct qat_queue *txq, + void *base_addr, rte_spinlock_t *lock) +{ + uint32_t arb_csr_offset = 0, value; + + rte_spinlock_lock(lock); + arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + + (ADF_RING_BUNDLE_SIZE_GEN4 * + txq->hw_bundle_number); + value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN4VF, + arb_csr_offset); + value &= ~(0x01 << txq->hw_queue_number); + ADF_CSR_WR(base_addr, arb_csr_offset, value); + rte_spinlock_unlock(lock); +} + +static void +qat_qp_adf_configure_queues_gen4(struct qat_qp *qp) +{ + uint32_t q_tx_config, q_resp_config; + struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q; + + q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size); + q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size, + ADF_RING_NEAR_WATERMARK_512, + ADF_RING_NEAR_WATERMARK_0); + + WRITE_CSR_RING_CONFIG_GEN4VF(qp->mmap_bar_addr, + q_tx->hw_bundle_number, q_tx->hw_queue_number, + q_tx_config); + WRITE_CSR_RING_CONFIG_GEN4VF(qp->mmap_bar_addr, + q_rx->hw_bundle_number, q_rx->hw_queue_number, + q_resp_config); +} + +static void +qat_qp_csr_write_tail_gen4(struct qat_qp *qp, struct qat_queue *q) +{ + WRITE_CSR_RING_TAIL_GEN4VF(qp->mmap_bar_addr, + q->hw_bundle_number, q->hw_queue_number, q->tail); +} + +static void +qat_qp_csr_write_head_gen4(struct qat_qp *qp, struct qat_queue *q, + uint32_t new_head) +{ + WRITE_CSR_RING_HEAD_GEN4VF(qp->mmap_bar_addr, + q->hw_bundle_number, q->hw_queue_number, new_head); +} + +static void +qat_qp_csr_setup_gen4(struct qat_pci_device *qat_dev, + void *io_addr, struct qat_qp *qp) +{ + qat_qp_build_ring_base_gen4(io_addr, &qp->tx_q); + qat_qp_build_ring_base_gen4(io_addr, &qp->rx_q); + qat_qp_adf_configure_queues_gen4(qp); + qat_qp_adf_arb_enable_gen4(&qp->tx_q, qp->mmap_bar_addr, + &qat_dev->arb_csr_lock); +} + +static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen4 = { + .qat_qp_rings_per_service = qat_qp_rings_per_service_gen4, + .qat_qp_build_ring_base = qat_qp_build_ring_base_gen4, + .qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen4, + .qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen4, + .qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen4, + .qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen4, + .qat_qp_csr_write_head = qat_qp_csr_write_head_gen4, + .qat_qp_csr_setup = qat_qp_csr_setup_gen4, + .qat_qp_get_hw_data = qat_qp_get_hw_data_gen4, +}; + static int qat_reset_ring_pairs_gen4(struct qat_pci_device *qat_pci_dev) { @@ -116,8 +268,8 @@ qat_reset_ring_pairs_gen4(struct qat_pci_device *qat_pci_dev) return 0; } -static const struct -rte_mem_resource *qat_dev_get_transport_bar_gen4(struct rte_pci_device *pci_dev) +static const struct rte_mem_resource * +qat_dev_get_transport_bar_gen4(struct rte_pci_device *pci_dev) { return &pci_dev->mem_resource[0]; } @@ -146,6 +298,7 @@ static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen4 = { RTE_INIT(qat_dev_gen_4_init) { + qat_qp_hw_spec[QAT_GEN4] = &qat_qp_hw_spec_gen4; qat_dev_hw_spec[QAT_GEN4] = &qat_dev_hw_spec_gen4; qat_gen_config[QAT_GEN4].dev_gen = QAT_GEN4; qat_gen_config[QAT_GEN4].pf2vf_dev = &qat_pf2vf_gen4; diff --git a/drivers/common/qat/dev/qat_dev_gens.h b/drivers/common/qat/dev/qat_dev_gens.h index fc069d8867..0a86b3e933 100644 --- a/drivers/common/qat/dev/qat_dev_gens.h +++ b/drivers/common/qat/dev/qat_dev_gens.h @@ -16,6 +16,33 @@ extern const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES] int qat_dev_get_extra_size_gen1(void); +const struct qat_qp_hw_data * +qat_qp_get_hw_data_gen1(struct qat_pci_device *dev, + enum qat_service_type service_type, uint16_t qp_id); + +int +qat_qp_rings_per_service_gen1(struct qat_pci_device *qat_dev, + enum qat_service_type service); +void +qat_qp_csr_build_ring_base_gen1(void *io_addr, + struct qat_queue *queue); +void +qat_qp_adf_arb_enable_gen1(const struct qat_queue *txq, + void *base_addr, rte_spinlock_t *lock); +void +qat_qp_adf_arb_disable_gen1(const struct qat_queue *txq, + void *base_addr, rte_spinlock_t *lock); +void +qat_qp_adf_configure_queues_gen1(struct qat_qp *qp); +void +qat_qp_csr_write_tail_gen1(struct qat_qp *qp, struct qat_queue *q); +void +qat_qp_csr_write_head_gen1(struct qat_qp *qp, struct qat_queue *q, + uint32_t new_head); +void +qat_qp_csr_setup_gen1(struct qat_pci_device *qat_dev, + void *io_addr, struct qat_qp *qp); + int qat_reset_ring_pairs_gen1( struct qat_pci_device *qat_pci_dev); @@ -28,7 +55,4 @@ qat_dev_get_misc_bar_gen1(struct rte_mem_resource **mem_resource, int qat_dev_read_config_gen1(struct qat_pci_device *qat_dev); -int -qat_query_svc_gen4(struct qat_pci_device *qat_dev, uint8_t *val); - #endif diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h index 504ffb7236..f98bbb5001 100644 --- a/drivers/common/qat/qat_adf/adf_transport_access_macros.h +++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h @@ -51,6 +51,8 @@ #define ADF_MIN_RING_SIZE ADF_RING_SIZE_128 #define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M #define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K +/* ARB CSR offset */ +#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C /* Maximum number of qps on a device for any service type */ #define ADF_MAX_QPS_ON_ANY_SERVICE 2 diff --git a/drivers/common/qat/qat_device.h b/drivers/common/qat/qat_device.h index 8b69206df5..8233cc045d 100644 --- a/drivers/common/qat/qat_device.h +++ b/drivers/common/qat/qat_device.h @@ -128,9 +128,6 @@ struct qat_pci_device { /* Data relating to compression service */ struct qat_comp_dev_private *comp_dev; /**< link back to compressdev private data */ - struct qat_qp_hw_data qp_gen4_data[QAT_GEN4_BUNDLE_NUM] - [QAT_GEN4_QPS_PER_BUNDLE_NUM]; - /**< Data of ring configuration on gen4 */ void *misc_bar_io_addr; /**< Address of misc bar */ void *dev_private; diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c index 27994036b8..39a329d5d8 100644 --- a/drivers/common/qat/qat_qp.c +++ b/drivers/common/qat/qat_qp.c @@ -18,124 +18,15 @@ #include "qat_sym.h" #include "qat_asym.h" #include "qat_comp.h" -#include "adf_transport_access_macros.h" -#include "adf_transport_access_macros_gen4vf.h" -#include "dev/qat_dev_gens.h" #define QAT_CQ_MAX_DEQ_RETRIES 10 #define ADF_MAX_DESC 4096 #define ADF_MIN_DESC 128 -#define ADF_ARB_REG_SLOT 0x1000 -#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C - -#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ - ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ - (ADF_ARB_REG_SLOT * index), value) - struct qat_qp_hw_spec_funcs* qat_qp_hw_spec[QAT_N_GENS]; -__extension__ -const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES] - [ADF_MAX_QPS_ON_ANY_SERVICE] = { - /* queue pairs which provide an asymmetric crypto service */ - [QAT_SERVICE_ASYMMETRIC] = { - { - .service_type = QAT_SERVICE_ASYMMETRIC, - .hw_bundle_num = 0, - .tx_ring_num = 0, - .rx_ring_num = 8, - .tx_msg_size = 64, - .rx_msg_size = 32, - - }, { - .service_type = QAT_SERVICE_ASYMMETRIC, - .hw_bundle_num = 0, - .tx_ring_num = 1, - .rx_ring_num = 9, - .tx_msg_size = 64, - .rx_msg_size = 32, - } - }, - /* queue pairs which provide a symmetric crypto service */ - [QAT_SERVICE_SYMMETRIC] = { - { - .service_type = QAT_SERVICE_SYMMETRIC, - .hw_bundle_num = 0, - .tx_ring_num = 2, - .rx_ring_num = 10, - .tx_msg_size = 128, - .rx_msg_size = 32, - }, - { - .service_type = QAT_SERVICE_SYMMETRIC, - .hw_bundle_num = 0, - .tx_ring_num = 3, - .rx_ring_num = 11, - .tx_msg_size = 128, - .rx_msg_size = 32, - } - }, - /* queue pairs which provide a compression service */ - [QAT_SERVICE_COMPRESSION] = { - { - .service_type = QAT_SERVICE_COMPRESSION, - .hw_bundle_num = 0, - .tx_ring_num = 6, - .rx_ring_num = 14, - .tx_msg_size = 128, - .rx_msg_size = 32, - }, { - .service_type = QAT_SERVICE_COMPRESSION, - .hw_bundle_num = 0, - .tx_ring_num = 7, - .rx_ring_num = 15, - .tx_msg_size = 128, - .rx_msg_size = 32, - } - } -}; - -__extension__ -const struct qat_qp_hw_data qat_gen3_qps[QAT_MAX_SERVICES] - [ADF_MAX_QPS_ON_ANY_SERVICE] = { - /* queue pairs which provide an asymmetric crypto service */ - [QAT_SERVICE_ASYMMETRIC] = { - { - .service_type = QAT_SERVICE_ASYMMETRIC, - .hw_bundle_num = 0, - .tx_ring_num = 0, - .rx_ring_num = 4, - .tx_msg_size = 64, - .rx_msg_size = 32, - } - }, - /* queue pairs which provide a symmetric crypto service */ - [QAT_SERVICE_SYMMETRIC] = { - { - .service_type = QAT_SERVICE_SYMMETRIC, - .hw_bundle_num = 0, - .tx_ring_num = 1, - .rx_ring_num = 5, - .tx_msg_size = 128, - .rx_msg_size = 32, - } - }, - /* queue pairs which provide a compression service */ - [QAT_SERVICE_COMPRESSION] = { - { - .service_type = QAT_SERVICE_COMPRESSION, - .hw_bundle_num = 0, - .tx_ring_num = 3, - .rx_ring_num = 7, - .tx_msg_size = 128, - .rx_msg_size = 32, - } - } -}; - static int qat_qp_check_queue_alignment(uint64_t phys_addr, uint32_t queue_size_bytes); static void qat_queue_delete(struct qat_queue *queue); @@ -143,68 +34,21 @@ static int qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, struct qat_qp_config *, uint8_t dir); static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, uint32_t *queue_size_for_csr); -static void adf_configure_queues(struct qat_qp *queue, +static int adf_configure_queues(struct qat_qp *queue, enum qat_device_gen qat_dev_gen); -static void adf_queue_arb_enable(enum qat_device_gen qat_dev_gen, +static int adf_queue_arb_enable(struct qat_pci_device *qat_dev, struct qat_queue *txq, void *base_addr, rte_spinlock_t *lock); -static void adf_queue_arb_disable(enum qat_device_gen qat_dev_gen, +static int adf_queue_arb_disable(enum qat_device_gen qat_dev_gen, struct qat_queue *txq, void *base_addr, rte_spinlock_t *lock); +static int qat_qp_build_ring_base(struct qat_pci_device *qat_dev, + void *io_addr, struct qat_queue *queue); +static const struct rte_memzone *queue_dma_zone_reserve(const char *queue_name, + uint32_t queue_size, int socket_id); +static int qat_qp_csr_setup(struct qat_pci_device *qat_dev, void *io_addr, + struct qat_qp *qp); -int qat_qps_per_service(struct qat_pci_device *qat_dev, - enum qat_service_type service) -{ - int i = 0, count = 0, max_ops_per_srv = 0; - - if (qat_dev->qat_dev_gen == QAT_GEN4) { - max_ops_per_srv = QAT_GEN4_BUNDLE_NUM; - for (i = 0, count = 0; i < max_ops_per_srv; i++) - if (qat_dev->qp_gen4_data[i][0].service_type == service) - count++; - } else { - const struct qat_qp_hw_data *sym_hw_qps = - qat_gen_config[qat_dev->qat_dev_gen] - .qp_hw_data[service]; - - max_ops_per_srv = ADF_MAX_QPS_ON_ANY_SERVICE; - for (i = 0, count = 0; i < max_ops_per_srv; i++) - if (sym_hw_qps[i].service_type == service) - count++; - } - - return count; -} - -static const struct rte_memzone * -queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size, - int socket_id) -{ - const struct rte_memzone *mz; - - mz = rte_memzone_lookup(queue_name); - if (mz != 0) { - if (((size_t)queue_size <= mz->len) && - ((socket_id == SOCKET_ID_ANY) || - (socket_id == mz->socket_id))) { - QAT_LOG(DEBUG, "re-use memzone already " - "allocated for %s", queue_name); - return mz; - } - - QAT_LOG(ERR, "Incompatible memzone already " - "allocated %s, size %u, socket %d. " - "Requested size %u, socket %u", - queue_name, (uint32_t)mz->len, - mz->socket_id, queue_size, socket_id); - return NULL; - } - - QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u", - queue_name, queue_size, socket_id); - return rte_memzone_reserve_aligned(queue_name, queue_size, - socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size); -} - -int qat_qp_setup(struct qat_pci_device *qat_dev, +int +qat_qp_setup(struct qat_pci_device *qat_dev, struct qat_qp **qp_addr, uint16_t queue_pair_id, struct qat_qp_config *qat_qp_conf) @@ -213,7 +57,9 @@ int qat_qp_setup(struct qat_pci_device *qat_dev, struct rte_pci_device *pci_dev = qat_pci_devs[qat_dev->qat_dev_id].pci_dev; char op_cookie_pool_name[RTE_RING_NAMESIZE]; - enum qat_device_gen qat_dev_gen = qat_dev->qat_dev_gen; + struct qat_dev_hw_spec_funcs *ops_hw = + qat_dev_hw_spec[qat_dev->qat_dev_gen]; + void *io_addr; uint32_t i; QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d", @@ -226,7 +72,15 @@ int qat_qp_setup(struct qat_pci_device *qat_dev, return -EINVAL; } - if (pci_dev->mem_resource[0].addr == NULL) { + if (ops_hw->qat_dev_get_transport_bar == NULL) { + QAT_LOG(ERR, + "QAT Internal Error: qat_dev_get_transport_bar not set for gen %d", + qat_dev->qat_dev_gen); + goto create_err; + } + + io_addr = ops_hw->qat_dev_get_transport_bar(pci_dev)->addr; + if (io_addr == NULL) { QAT_LOG(ERR, "Could not find VF config space " "(UIO driver attached?)."); return -EINVAL; @@ -250,7 +104,7 @@ int qat_qp_setup(struct qat_pci_device *qat_dev, return -ENOMEM; } - qp->mmap_bar_addr = pci_dev->mem_resource[0].addr; + qp->mmap_bar_addr = io_addr; qp->enqueued = qp->dequeued = 0; if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf, @@ -277,10 +131,6 @@ int qat_qp_setup(struct qat_pci_device *qat_dev, goto create_err; } - adf_configure_queues(qp, qat_dev_gen); - adf_queue_arb_enable(qat_dev_gen, &qp->tx_q, qp->mmap_bar_addr, - &qat_dev->arb_csr_lock); - snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s%d_cookies_%s_qp%hu", pci_dev->driver->driver.name, qat_dev->qat_dev_id, @@ -298,6 +148,8 @@ int qat_qp_setup(struct qat_pci_device *qat_dev, if (!qp->op_cookie_pool) { QAT_LOG(ERR, "QAT PMD Cannot create" " op mempool"); + qat_queue_delete(&(qp->tx_q)); + qat_queue_delete(&(qp->rx_q)); goto create_err; } @@ -316,6 +168,8 @@ int qat_qp_setup(struct qat_pci_device *qat_dev, QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s", queue_pair_id, op_cookie_pool_name); + qat_qp_csr_setup(qat_dev, io_addr, qp); + *qp_addr = qp; return 0; @@ -327,80 +181,13 @@ int qat_qp_setup(struct qat_pci_device *qat_dev, return -EFAULT; } - -int qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr) -{ - struct qat_qp *qp = *qp_addr; - uint32_t i; - - if (qp == NULL) { - QAT_LOG(DEBUG, "qp already freed"); - return 0; - } - - QAT_LOG(DEBUG, "Free qp on qat_pci device %d", - qp->qat_dev->qat_dev_id); - - /* Don't free memory if there are still responses to be processed */ - if ((qp->enqueued - qp->dequeued) == 0) { - qat_queue_delete(&(qp->tx_q)); - qat_queue_delete(&(qp->rx_q)); - } else { - return -EAGAIN; - } - - adf_queue_arb_disable(qat_dev_gen, &(qp->tx_q), qp->mmap_bar_addr, - &qp->qat_dev->arb_csr_lock); - - for (i = 0; i < qp->nb_descriptors; i++) - rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]); - - if (qp->op_cookie_pool) - rte_mempool_free(qp->op_cookie_pool); - - rte_free(qp->op_cookies); - rte_free(qp); - *qp_addr = NULL; - return 0; -} - - -static void qat_queue_delete(struct qat_queue *queue) -{ - const struct rte_memzone *mz; - int status = 0; - - if (queue == NULL) { - QAT_LOG(DEBUG, "Invalid queue"); - return; - } - QAT_LOG(DEBUG, "Free ring %d, memzone: %s", - queue->hw_queue_number, queue->memz_name); - - mz = rte_memzone_lookup(queue->memz_name); - if (mz != NULL) { - /* Write an unused pattern to the queue memory. */ - memset(queue->base_addr, 0x7F, queue->queue_size); - status = rte_memzone_free(mz); - if (status != 0) - QAT_LOG(ERR, "Error %d on freeing queue %s", - status, queue->memz_name); - } else { - QAT_LOG(DEBUG, "queue %s doesn't exist", - queue->memz_name); - } -} - static int qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, struct qat_qp_config *qp_conf, uint8_t dir) { - uint64_t queue_base; - void *io_addr; const struct rte_memzone *qp_mz; struct rte_pci_device *pci_dev = qat_pci_devs[qat_dev->qat_dev_id].pci_dev; - enum qat_device_gen qat_dev_gen = qat_dev->qat_dev_gen; int ret = 0; uint16_t desc_size = (dir == ADF_RING_DIR_TX ? qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size); @@ -460,19 +247,6 @@ qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, * Write an unused pattern to the queue memory. */ memset(queue->base_addr, 0x7F, queue_size_bytes); - io_addr = pci_dev->mem_resource[0].addr; - - if (qat_dev_gen == QAT_GEN4) { - queue_base = BUILD_RING_BASE_ADDR_GEN4(queue->base_phys_addr, - queue->queue_size); - WRITE_CSR_RING_BASE_GEN4VF(io_addr, queue->hw_bundle_number, - queue->hw_queue_number, queue_base); - } else { - queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr, - queue->queue_size); - WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number, - queue->hw_queue_number, queue_base); - } QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u," " nb msgs %u, msg_size %u, modulo mask %u", @@ -488,202 +262,231 @@ qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, return ret; } -int -qat_select_valid_queue(struct qat_pci_device *qat_dev, int qp_id, - enum qat_service_type service_type) +static const struct rte_memzone * +queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size, + int socket_id) { - if (qat_dev->qat_dev_gen == QAT_GEN4) { - int i = 0, valid_qps = 0; - - for (; i < QAT_GEN4_BUNDLE_NUM; i++) { - if (qat_dev->qp_gen4_data[i][0].service_type == - service_type) { - if (valid_qps == qp_id) - return i; - ++valid_qps; - } + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(queue_name); + if (mz != 0) { + if (((size_t)queue_size <= mz->len) && + ((socket_id == SOCKET_ID_ANY) || + (socket_id == mz->socket_id))) { + QAT_LOG(DEBUG, "re-use memzone already " + "allocated for %s", queue_name); + return mz; } + + QAT_LOG(ERR, "Incompatible memzone already " + "allocated %s, size %u, socket %d. " + "Requested size %u, socket %u", + queue_name, (uint32_t)mz->len, + mz->socket_id, queue_size, socket_id); + return NULL; } - return -1; + + QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u", + queue_name, queue_size, socket_id); + return rte_memzone_reserve_aligned(queue_name, queue_size, + socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size); } int -qat_read_qp_config(struct qat_pci_device *qat_dev) +qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr) { - int i = 0; - enum qat_device_gen qat_dev_gen = qat_dev->qat_dev_gen; - - if (qat_dev_gen == QAT_GEN4) { - uint16_t svc = 0; - - if (qat_query_svc_gen4(qat_dev, (uint8_t *)&svc)) - return -(EFAULT); - for (; i < QAT_GEN4_BUNDLE_NUM; i++) { - struct qat_qp_hw_data *hw_data = - &qat_dev->qp_gen4_data[i][0]; - uint8_t svc1 = (svc >> (3 * i)) & 0x7; - enum qat_service_type service_type = QAT_SERVICE_INVALID; - - if (svc1 == QAT_SVC_SYM) { - service_type = QAT_SERVICE_SYMMETRIC; - QAT_LOG(DEBUG, - "Discovered SYMMETRIC service on bundle %d", - i); - } else if (svc1 == QAT_SVC_COMPRESSION) { - service_type = QAT_SERVICE_COMPRESSION; - QAT_LOG(DEBUG, - "Discovered COPRESSION service on bundle %d", - i); - } else if (svc1 == QAT_SVC_ASYM) { - service_type = QAT_SERVICE_ASYMMETRIC; - QAT_LOG(DEBUG, - "Discovered ASYMMETRIC service on bundle %d", - i); - } else { - QAT_LOG(ERR, - "Unrecognized service on bundle %d", - i); - return -(EFAULT); - } + int ret; + struct qat_qp *qp = *qp_addr; + uint32_t i; - memset(hw_data, 0, sizeof(*hw_data)); - hw_data->service_type = service_type; - if (service_type == QAT_SERVICE_ASYMMETRIC) { - hw_data->tx_msg_size = 64; - hw_data->rx_msg_size = 32; - } else if (service_type == QAT_SERVICE_SYMMETRIC || - service_type == - QAT_SERVICE_COMPRESSION) { - hw_data->tx_msg_size = 128; - hw_data->rx_msg_size = 32; - } - hw_data->tx_ring_num = 0; - hw_data->rx_ring_num = 1; - hw_data->hw_bundle_num = i; - } + if (qp == NULL) { + QAT_LOG(DEBUG, "qp already freed"); return 0; } - return -(EINVAL); + + QAT_LOG(DEBUG, "Free qp on qat_pci device %d", + qp->qat_dev->qat_dev_id); + + /* Don't free memory if there are still responses to be processed */ + if ((qp->enqueued - qp->dequeued) == 0) { + qat_queue_delete(&(qp->tx_q)); + qat_queue_delete(&(qp->rx_q)); + } else { + return -EAGAIN; + } + + ret = adf_queue_arb_disable(qat_dev_gen, &(qp->tx_q), + qp->mmap_bar_addr, &qp->qat_dev->arb_csr_lock); + if (ret) + return ret; + + for (i = 0; i < qp->nb_descriptors; i++) + rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]); + + if (qp->op_cookie_pool) + rte_mempool_free(qp->op_cookie_pool); + + rte_free(qp->op_cookies); + rte_free(qp); + *qp_addr = NULL; + return 0; } -static int qat_qp_check_queue_alignment(uint64_t phys_addr, - uint32_t queue_size_bytes) + +static void +qat_queue_delete(struct qat_queue *queue) { - if (((queue_size_bytes - 1) & phys_addr) != 0) - return -EINVAL; + const struct rte_memzone *mz; + int status = 0; + + if (queue == NULL) { + QAT_LOG(DEBUG, "Invalid queue"); + return; + } + QAT_LOG(DEBUG, "Free ring %d, memzone: %s", + queue->hw_queue_number, queue->memz_name); + + mz = rte_memzone_lookup(queue->memz_name); + if (mz != NULL) { + /* Write an unused pattern to the queue memory. */ + memset(queue->base_addr, 0x7F, queue->queue_size); + status = rte_memzone_free(mz); + if (status != 0) + QAT_LOG(ERR, "Error %d on freeing queue %s", + status, queue->memz_name); + } else { + QAT_LOG(DEBUG, "queue %s doesn't exist", + queue->memz_name); + } +} + +static int __rte_unused +adf_queue_arb_enable(struct qat_pci_device *qat_dev, struct qat_queue *txq, + void *base_addr, rte_spinlock_t *lock) +{ + struct qat_qp_hw_spec_funcs *ops = + qat_qp_hw_spec[qat_dev->qat_dev_gen]; + + RTE_FUNC_PTR_OR_ERR_RET(ops->qat_qp_adf_arb_enable, + -ENOTSUP); + ops->qat_qp_adf_arb_enable(txq, base_addr, lock); return 0; } -static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, - uint32_t *p_queue_size_for_csr) +static int +adf_queue_arb_disable(enum qat_device_gen qat_dev_gen, struct qat_queue *txq, + void *base_addr, rte_spinlock_t *lock) { - uint8_t i = ADF_MIN_RING_SIZE; + struct qat_qp_hw_spec_funcs *ops = + qat_qp_hw_spec[qat_dev_gen]; - for (; i <= ADF_MAX_RING_SIZE; i++) - if ((msg_size * msg_num) == - (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) { - *p_queue_size_for_csr = i; - return 0; - } - QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num); - return -EINVAL; + RTE_FUNC_PTR_OR_ERR_RET(ops->qat_qp_adf_arb_disable, + -ENOTSUP); + ops->qat_qp_adf_arb_disable(txq, base_addr, lock); + return 0; } -static void -adf_queue_arb_enable(enum qat_device_gen qat_dev_gen, struct qat_queue *txq, - void *base_addr, rte_spinlock_t *lock) +static int __rte_unused +qat_qp_build_ring_base(struct qat_pci_device *qat_dev, void *io_addr, + struct qat_queue *queue) { - uint32_t arb_csr_offset = 0, value; - - rte_spinlock_lock(lock); - if (qat_dev_gen == QAT_GEN4) { - arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + - (ADF_RING_BUNDLE_SIZE_GEN4 * - txq->hw_bundle_number); - value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN4VF, - arb_csr_offset); - } else { - arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + - (ADF_ARB_REG_SLOT * - txq->hw_bundle_number); - value = ADF_CSR_RD(base_addr, - arb_csr_offset); - } - value |= (0x01 << txq->hw_queue_number); - ADF_CSR_WR(base_addr, arb_csr_offset, value); - rte_spinlock_unlock(lock); + struct qat_qp_hw_spec_funcs *ops = + qat_qp_hw_spec[qat_dev->qat_dev_gen]; + + RTE_FUNC_PTR_OR_ERR_RET(ops->qat_qp_build_ring_base, + -ENOTSUP); + ops->qat_qp_build_ring_base(io_addr, queue); + return 0; } -static void adf_queue_arb_disable(enum qat_device_gen qat_dev_gen, - struct qat_queue *txq, void *base_addr, rte_spinlock_t *lock) +int +qat_qps_per_service(struct qat_pci_device *qat_dev, + enum qat_service_type service) { - uint32_t arb_csr_offset = 0, value; - - rte_spinlock_lock(lock); - if (qat_dev_gen == QAT_GEN4) { - arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + - (ADF_RING_BUNDLE_SIZE_GEN4 * - txq->hw_bundle_number); - value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN4VF, - arb_csr_offset); - } else { - arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + - (ADF_ARB_REG_SLOT * - txq->hw_bundle_number); - value = ADF_CSR_RD(base_addr, - arb_csr_offset); - } - value &= ~(0x01 << txq->hw_queue_number); - ADF_CSR_WR(base_addr, arb_csr_offset, value); - rte_spinlock_unlock(lock); + struct qat_qp_hw_spec_funcs *ops = + qat_qp_hw_spec[qat_dev->qat_dev_gen]; + + RTE_FUNC_PTR_OR_ERR_RET(ops->qat_qp_rings_per_service, + -ENOTSUP); + return ops->qat_qp_rings_per_service(qat_dev, service); } -static void adf_configure_queues(struct qat_qp *qp, - enum qat_device_gen qat_dev_gen) +const struct qat_qp_hw_data * +qat_qp_get_hw_data(struct qat_pci_device *qat_dev, + enum qat_service_type service, uint16_t qp_id) { - uint32_t q_tx_config, q_resp_config; - struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q; - - q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size); - q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size, - ADF_RING_NEAR_WATERMARK_512, - ADF_RING_NEAR_WATERMARK_0); - - if (qat_dev_gen == QAT_GEN4) { - WRITE_CSR_RING_CONFIG_GEN4VF(qp->mmap_bar_addr, - q_tx->hw_bundle_number, q_tx->hw_queue_number, - q_tx_config); - WRITE_CSR_RING_CONFIG_GEN4VF(qp->mmap_bar_addr, - q_rx->hw_bundle_number, q_rx->hw_queue_number, - q_resp_config); - } else { - WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, - q_tx->hw_bundle_number, q_tx->hw_queue_number, - q_tx_config); - WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, - q_rx->hw_bundle_number, q_rx->hw_queue_number, - q_resp_config); - } + struct qat_qp_hw_spec_funcs *ops = + qat_qp_hw_spec[qat_dev->qat_dev_gen]; + + RTE_FUNC_PTR_OR_ERR_RET(ops->qat_qp_get_hw_data, NULL); + return ops->qat_qp_get_hw_data(qat_dev, service, qp_id); } -static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask) +int +qat_read_qp_config(struct qat_pci_device *qat_dev) { - return data & modulo_mask; + struct qat_dev_hw_spec_funcs *ops_hw = + qat_dev_hw_spec[qat_dev->qat_dev_gen]; + + RTE_FUNC_PTR_OR_ERR_RET(ops_hw->qat_dev_read_config, + -ENOTSUP); + return ops_hw->qat_dev_read_config(qat_dev); +} + +static int __rte_unused +adf_configure_queues(struct qat_qp *qp, enum qat_device_gen qat_dev_gen) +{ + struct qat_qp_hw_spec_funcs *ops = + qat_qp_hw_spec[qat_dev_gen]; + + RTE_FUNC_PTR_OR_ERR_RET(ops->qat_qp_adf_configure_queues, + -ENOTSUP); + ops->qat_qp_adf_configure_queues(qp); + return 0; } static inline void txq_write_tail(enum qat_device_gen qat_dev_gen, - struct qat_qp *qp, struct qat_queue *q) { + struct qat_qp *qp, struct qat_queue *q) +{ + struct qat_qp_hw_spec_funcs *ops = + qat_qp_hw_spec[qat_dev_gen]; - if (qat_dev_gen == QAT_GEN4) { - WRITE_CSR_RING_TAIL_GEN4VF(qp->mmap_bar_addr, - q->hw_bundle_number, q->hw_queue_number, q->tail); - } else { - WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number, - q->hw_queue_number, q->tail); - } + /* + * Pointer check should be done during + * initialization + */ + ops->qat_qp_csr_write_tail(qp, q); +} + +static inline void +qat_qp_csr_write_head(enum qat_device_gen qat_dev_gen, struct qat_qp *qp, + struct qat_queue *q, uint32_t new_head) +{ + struct qat_qp_hw_spec_funcs *ops = + qat_qp_hw_spec[qat_dev_gen]; + + /* + * Pointer check should be done during + * initialization + */ + ops->qat_qp_csr_write_head(qp, q, new_head); +} + +static int +qat_qp_csr_setup(struct qat_pci_device *qat_dev, + void *io_addr, struct qat_qp *qp) +{ + struct qat_qp_hw_spec_funcs *ops = + qat_qp_hw_spec[qat_dev->qat_dev_gen]; + + RTE_FUNC_PTR_OR_ERR_RET(ops->qat_qp_csr_setup, + -ENOTSUP); + ops->qat_qp_csr_setup(qat_dev, io_addr, qp); + return 0; } + static inline void rxq_free_desc(enum qat_device_gen qat_dev_gen, struct qat_qp *qp, struct qat_queue *q) @@ -707,15 +510,37 @@ void rxq_free_desc(enum qat_device_gen qat_dev_gen, struct qat_qp *qp, q->nb_processed_responses = 0; q->csr_head = new_head; - /* write current head to CSR */ - if (qat_dev_gen == QAT_GEN4) { - WRITE_CSR_RING_HEAD_GEN4VF(qp->mmap_bar_addr, - q->hw_bundle_number, q->hw_queue_number, new_head); - } else { - WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number, - q->hw_queue_number, new_head); - } + qat_qp_csr_write_head(qat_dev_gen, qp, q, new_head); +} + +static int +qat_qp_check_queue_alignment(uint64_t phys_addr, uint32_t queue_size_bytes) +{ + if (((queue_size_bytes - 1) & phys_addr) != 0) + return -EINVAL; + return 0; +} + +static int +adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, + uint32_t *p_queue_size_for_csr) +{ + uint8_t i = ADF_MIN_RING_SIZE; + + for (; i <= ADF_MAX_RING_SIZE; i++) + if ((msg_size * msg_num) == + (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) { + *p_queue_size_for_csr = i; + return 0; + } + QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num); + return -EINVAL; +} +static inline uint32_t +adf_modulo(uint32_t data, uint32_t modulo_mask) +{ + return data & modulo_mask; } uint16_t diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h index 726cd2ef61..deafb407b3 100644 --- a/drivers/common/qat/qat_qp.h +++ b/drivers/common/qat/qat_qp.h @@ -12,16 +12,6 @@ #define QAT_QP_MIN_INFL_THRESHOLD 256 -/* Default qp configuration for GEN4 devices */ -#define QAT_GEN4_QP_DEFCON (QAT_SERVICE_SYMMETRIC | \ - QAT_SERVICE_SYMMETRIC << 8 | \ - QAT_SERVICE_SYMMETRIC << 16 | \ - QAT_SERVICE_SYMMETRIC << 24) - -/* QAT GEN 4 specific macros */ -#define QAT_GEN4_BUNDLE_NUM 4 -#define QAT_GEN4_QPS_PER_BUNDLE_NUM 1 - struct qat_pci_device; /** @@ -106,7 +96,11 @@ qat_qp_setup(struct qat_pci_device *qat_dev, int qat_qps_per_service(struct qat_pci_device *qat_dev, - enum qat_service_type service); + enum qat_service_type service); + +const struct qat_qp_hw_data * +qat_qp_get_hw_data(struct qat_pci_device *qat_dev, + enum qat_service_type service, uint16_t qp_id); int qat_cq_get_fw_version(struct qat_qp *qp); @@ -116,11 +110,6 @@ int qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused, void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused); - -int -qat_select_valid_queue(struct qat_pci_device *qat_dev, int qp_id, - enum qat_service_type service_type); - int qat_read_qp_config(struct qat_pci_device *qat_dev); @@ -166,7 +155,4 @@ struct qat_qp_hw_spec_funcs { extern struct qat_qp_hw_spec_funcs *qat_qp_hw_spec[]; -extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_ON_ANY_SERVICE]; -extern const struct qat_qp_hw_data qat_gen3_qps[][ADF_MAX_QPS_ON_ANY_SERVICE]; - #endif /* _QAT_QP_H_ */ diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c index efda921c05..71907a606d 100644 --- a/drivers/crypto/qat/qat_sym_pmd.c +++ b/drivers/crypto/qat/qat_sym_pmd.c @@ -164,35 +164,11 @@ static int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, int ret = 0; uint32_t i; struct qat_qp_config qat_qp_conf; - const struct qat_qp_hw_data *sym_hw_qps = NULL; - const struct qat_qp_hw_data *qp_hw_data = NULL; - struct qat_qp **qp_addr = (struct qat_qp **)&(dev->data->queue_pairs[qp_id]); struct qat_sym_dev_private *qat_private = dev->data->dev_private; struct qat_pci_device *qat_dev = qat_private->qat_dev; - if (qat_dev->qat_dev_gen == QAT_GEN4) { - int ring_pair = - qat_select_valid_queue(qat_dev, qp_id, - QAT_SERVICE_SYMMETRIC); - - if (ring_pair < 0) { - QAT_LOG(ERR, - "qp_id %u invalid for this device, no enough services allocated for GEN4 device", - qp_id); - return -EINVAL; - } - sym_hw_qps = - &qat_dev->qp_gen4_data[0][0]; - qp_hw_data = - &qat_dev->qp_gen4_data[ring_pair][0]; - } else { - sym_hw_qps = qat_gen_config[qat_dev->qat_dev_gen] - .qp_hw_data[QAT_SERVICE_SYMMETRIC]; - qp_hw_data = sym_hw_qps + qp_id; - } - /* If qp is already in use free ring memory and qp metadata. */ if (*qp_addr != NULL) { ret = qat_sym_qp_release(dev, qp_id); @@ -204,7 +180,13 @@ static int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, return -EINVAL; } - qat_qp_conf.hw = qp_hw_data; + qat_qp_conf.hw = qat_qp_get_hw_data(qat_dev, QAT_SERVICE_SYMMETRIC, + qp_id); + if (qat_qp_conf.hw == NULL) { + QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id); + return -EINVAL; + } + qat_qp_conf.cookie_size = sizeof(struct qat_sym_op_cookie); qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors; qat_qp_conf.socket_id = socket_id;