From patchwork Tue Jul 3 05:58:01 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "De Lara Guarch, Pablo" X-Patchwork-Id: 42156 X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6CA511BF17; Tue, 3 Jul 2018 16:03:53 +0200 (CEST) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 5B03C1BEF4 for ; Tue, 3 Jul 2018 16:03:51 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 03 Jul 2018 07:03:47 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.51,303,1526367600"; d="scan'208";a="242289691" Received: from silpixa00399466.ir.intel.com (HELO silpixa00399466.ger.corp.intel.com) ([10.237.223.220]) by fmsmga005.fm.intel.com with ESMTP; 03 Jul 2018 07:03:41 -0700 From: Pablo de Lara To: declan.doherty@intel.com Cc: dev@dpdk.org, Pablo de Lara Date: Tue, 3 Jul 2018 06:58:01 +0100 Message-Id: <20180703055801.45087-1-pablo.de.lara.guarch@intel.com> X-Mailer: git-send-email 2.14.4 Subject: [dpdk-dev] [PATCH] crypto/aesni_mb: call buffer manager allocation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Instead of having a static field for the buffer manager MB_MGR in the queue pair structure, use the provided API that allocates memory for it and store a pointer to it. Signed-off-by: Pablo de Lara --- drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 10 +++++----- drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 22 ++++++++++++++++++---- drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 2 +- 3 files changed, 24 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c index 9ae2b54f4..cebb5318d 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c @@ -721,7 +721,7 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job, if (processed_jobs == nb_ops) break; - job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr); + job = (*qp->op_fns->job.get_completed_job)(qp->mb_mgr); } return processed_jobs; @@ -734,7 +734,7 @@ flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops, int processed_ops = 0; /* Flush the remaining jobs */ - JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr); + JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(qp->mb_mgr); if (job) processed_ops += handle_completed_jobs(qp, job, @@ -779,14 +779,14 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, break; /* Get next free mb job struct from mb manager */ - job = (*qp->op_fns->job.get_next)(&qp->mb_mgr); + job = (*qp->op_fns->job.get_next)(qp->mb_mgr); if (unlikely(job == NULL)) { /* if no free mb job structs we need to flush mb_mgr */ processed_jobs += flush_mb_mgr(qp, &ops[processed_jobs], (nb_ops - processed_jobs) - 1); - job = (*qp->op_fns->job.get_next)(&qp->mb_mgr); + job = (*qp->op_fns->job.get_next)(qp->mb_mgr); } retval = set_mb_job_params(job, qp, op, &digest_idx); @@ -796,7 +796,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, } /* Submit job to multi-buffer for processing */ - job = (*qp->op_fns->job.submit)(&qp->mb_mgr); + job = (*qp->op_fns->job.submit)(qp->mb_mgr); /* * If submit returns a processed job then handle it, diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c index 6a96d5077..f0087aeb1 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c @@ -402,6 +402,8 @@ aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) r = rte_ring_lookup(qp->name); if (r) rte_ring_free(r); + if (qp->mb_mgr) + free_mb_mgr(qp->mb_mgr); rte_free(qp); dev->data->queue_pairs[qp_id] = NULL; } @@ -463,6 +465,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, { struct aesni_mb_qp *qp = NULL; struct aesni_mb_private *internals = dev->data->dev_private; + int ret = -1; /* Free memory prior to re-allocation if needed. */ if (dev->data->queue_pairs[qp_id] != NULL) @@ -481,12 +484,20 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, goto qp_setup_cleanup; + qp->mb_mgr = alloc_mb_mgr(0); + if (qp->mb_mgr == NULL) { + ret = -ENOMEM; + goto qp_setup_cleanup; + } + qp->op_fns = &job_ops[internals->vector_mode]; qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp, "ingress", qp_conf->nb_descriptors, socket_id); - if (qp->ingress_queue == NULL) + if (qp->ingress_queue == NULL) { + ret = -1; goto qp_setup_cleanup; + } qp->sess_mp = session_pool; @@ -498,14 +509,17 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, "digest_mp_%u_%u", dev->data->dev_id, qp_id); /* Initialise multi-buffer manager */ - (*qp->op_fns->job.init_mgr)(&qp->mb_mgr); + (*qp->op_fns->job.init_mgr)(qp->mb_mgr); return 0; qp_setup_cleanup: - if (qp) + if (qp) { + if (qp->mb_mgr == NULL) + free_mb_mgr(qp->mb_mgr); rte_free(qp); + } - return -1; + return ret; } /** Start queue pair */ diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h index bad2a8743..9fb9f934b 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h @@ -125,7 +125,7 @@ struct aesni_mb_qp { /**< Unique Queue Pair Name */ const struct aesni_mb_op_fns *op_fns; /**< Vector mode dependent pointer table of the multi-buffer APIs */ - MB_MGR mb_mgr; + MB_MGR *mb_mgr; /**< Multi-buffer instance */ struct rte_ring *ingress_queue; /**< Ring for placing operations ready for processing */