From patchwork Mon Sep 30 14:41:03 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Akhil Goyal X-Patchwork-Id: 60230 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id BB1771BF1F; Mon, 30 Sep 2019 16:57:58 +0200 (CEST) Received: from inva021.nxp.com (inva021.nxp.com [92.121.34.21]) by dpdk.org (Postfix) with ESMTP id 01F511BED3 for ; Mon, 30 Sep 2019 16:56:45 +0200 (CEST) Received: from inva021.nxp.com (localhost [127.0.0.1]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id AF400200238; Mon, 30 Sep 2019 16:56:44 +0200 (CEST) Received: from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com [165.114.16.14]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id 559C12000A7; Mon, 30 Sep 2019 16:56:42 +0200 (CEST) Received: from GDB1.ap.freescale.net (GDB1.ap.freescale.net [10.232.132.179]) by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id 547B7402B7; Mon, 30 Sep 2019 22:56:39 +0800 (SGT) From: Akhil Goyal To: dev@dpdk.org Cc: aconole@redhat.com, anoobj@marvell.com, Akhil Goyal Date: Mon, 30 Sep 2019 20:11:03 +0530 Message-Id: <20190930144104.12742-24-akhil.goyal@nxp.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190930144104.12742-1-akhil.goyal@nxp.com> References: <20190930115237.5073-1-akhil.goyal@nxp.com> <20190930144104.12742-1-akhil.goyal@nxp.com> X-Virus-Scanned: ClamAV using ClamSMTP Subject: [dpdk-dev] [PATCH v3 23/24] crypto/dpaa_sec: change per cryptodev pool to per qp X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" In cases where single cryptodev is used by multiple cores using multiple queues, there will be contention for mempool resources and may eventually get exhuasted. Basically, mempool should be defined per core. Now since qp is used per core, mempools are defined in qp setup. Signed-off-by: Akhil Goyal --- drivers/crypto/dpaa_sec/dpaa_sec.c | 58 ++++++++++++------------------ drivers/crypto/dpaa_sec/dpaa_sec.h | 3 +- 2 files changed, 24 insertions(+), 37 deletions(-) diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c index fa9d03adc..32c7392d8 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.c +++ b/drivers/crypto/dpaa_sec/dpaa_sec.c @@ -70,7 +70,9 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count) struct dpaa_sec_op_ctx *ctx; int i, retval; - retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx)); + retval = rte_mempool_get( + ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool, + (void **)(&ctx)); if (!ctx || retval) { DPAA_SEC_DP_WARN("Alloc sec descriptor failed!"); return NULL; @@ -84,7 +86,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count) for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4) dcbz_64(&ctx->job.sg[i]); - ctx->ctx_pool = ses->ctx_pool; + ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool; ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx); return ctx; @@ -1939,6 +1941,7 @@ dpaa_sec_queue_pair_release(struct rte_cryptodev *dev, } qp = &internals->qps[qp_id]; + rte_mempool_free(qp->ctx_pool); qp->internals = NULL; dev->data->queue_pairs[qp_id] = NULL; @@ -1953,6 +1956,7 @@ dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, { struct dpaa_sec_dev_private *internals; struct dpaa_sec_qp *qp = NULL; + char str[20]; DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf); @@ -1965,6 +1969,22 @@ dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, qp = &internals->qps[qp_id]; qp->internals = internals; + snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d", + dev->data->dev_id, qp_id); + if (!qp->ctx_pool) { + qp->ctx_pool = rte_mempool_create((const char *)str, + CTX_POOL_NUM_BUFS, + CTX_POOL_BUF_SIZE, + CTX_POOL_CACHE_SIZE, 0, + NULL, NULL, NULL, NULL, + SOCKET_ID_ANY, 0); + if (!qp->ctx_pool) { + DPAA_SEC_ERR("%s create failed\n", str); + return -ENOMEM; + } + } else + DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d", + dev->data->dev_id, qp_id); dev->data->queue_pairs[qp_id] = qp; return 0; @@ -2181,7 +2201,6 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, DPAA_SEC_ERR("Invalid crypto type"); return -EINVAL; } - session->ctx_pool = internals->ctx_pool; rte_spinlock_lock(&internals->lock); for (i = 0; i < MAX_DPAA_CORES; i++) { session->inq[i] = dpaa_sec_attach_rxq(internals); @@ -2436,7 +2455,6 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, session->dir = DIR_DEC; } else goto out; - session->ctx_pool = internals->ctx_pool; rte_spinlock_lock(&internals->lock); for (i = 0; i < MAX_DPAA_CORES; i++) { session->inq[i] = dpaa_sec_attach_rxq(internals); @@ -2547,7 +2565,6 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; - session->ctx_pool = dev_priv->ctx_pool; rte_spinlock_lock(&dev_priv->lock); for (i = 0; i < MAX_DPAA_CORES; i++) { session->inq[i] = dpaa_sec_attach_rxq(dev_priv); @@ -2624,32 +2641,11 @@ dpaa_sec_security_session_destroy(void *dev __rte_unused, } static int -dpaa_sec_dev_configure(struct rte_cryptodev *dev, +dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, struct rte_cryptodev_config *config __rte_unused) { - - char str[20]; - struct dpaa_sec_dev_private *internals; - PMD_INIT_FUNC_TRACE(); - internals = dev->data->dev_private; - snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id); - if (!internals->ctx_pool) { - internals->ctx_pool = rte_mempool_create((const char *)str, - CTX_POOL_NUM_BUFS, - CTX_POOL_BUF_SIZE, - CTX_POOL_CACHE_SIZE, 0, - NULL, NULL, NULL, NULL, - SOCKET_ID_ANY, 0); - if (!internals->ctx_pool) { - DPAA_SEC_ERR("%s create failed\n", str); - return -ENOMEM; - } - } else - DPAA_SEC_INFO("mempool already created for dev_id : %d", - dev->data->dev_id); - return 0; } @@ -2669,17 +2665,11 @@ dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused) static int dpaa_sec_dev_close(struct rte_cryptodev *dev) { - struct dpaa_sec_dev_private *internals; - PMD_INIT_FUNC_TRACE(); if (dev == NULL) return -ENOMEM; - internals = dev->data->dev_private; - rte_mempool_free(internals->ctx_pool); - internals->ctx_pool = NULL; - return 0; } @@ -2919,8 +2909,6 @@ dpaa_sec_uninit(struct rte_cryptodev *dev) internals = dev->data->dev_private; rte_free(dev->security_ctx); - /* In case close has been called, internals->ctx_pool would be NULL */ - rte_mempool_free(internals->ctx_pool); rte_free(internals); DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u", diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h index 2a6a3fad7..009ab7536 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.h +++ b/drivers/crypto/dpaa_sec/dpaa_sec.h @@ -154,11 +154,11 @@ typedef struct dpaa_sec_session_entry { struct dpaa_sec_qp *qp[MAX_DPAA_CORES]; struct qman_fq *inq[MAX_DPAA_CORES]; struct sec_cdb cdb; /**< cmd block associated with qp */ - struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */ } dpaa_sec_session; struct dpaa_sec_qp { struct dpaa_sec_dev_private *internals; + struct rte_mempool *ctx_pool; /* mempool for dpaa_sec_op_ctx */ struct qman_fq outq; int rx_pkts; int rx_errs; @@ -173,7 +173,6 @@ struct dpaa_sec_qp { /* internal sec queue interface */ struct dpaa_sec_dev_private { void *sec_hw; - struct rte_mempool *ctx_pool; /* per dev mempool for dpaa_sec_op_ctx */ struct dpaa_sec_qp qps[RTE_DPAA_MAX_NB_SEC_QPS]; /* i/o queue for sec */ struct qman_fq inq[RTE_DPAA_MAX_RX_QUEUE]; unsigned char inq_attach[RTE_DPAA_MAX_RX_QUEUE];