From patchwork Fri Aug 13 04:45:53 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kiran Kumar Kokkilagadda X-Patchwork-Id: 96877 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6C828A0C4D; Fri, 13 Aug 2021 06:46:08 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E24F840DF4; Fri, 13 Aug 2021 06:46:07 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 26EA440151 for ; Fri, 13 Aug 2021 06:46:06 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id 17D4eVT5027347; Thu, 12 Aug 2021 21:46:05 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=eR9vzha1csh12/JvPTcR9BUsFAfI386sAso7AWWlzUM=; b=kjXWgZauQ18+7A5Znf4J3H8N8DtZy/Nds+nkNMe3xGS/7PZGw4HTkGN2D4h+0aOvSq9Z ONM1O58/KjoFKDaJeCM1GMcGwipeNKd94gYyh9FGjsQXaGxOJgiUH5noXW/kMq2pFoIp L+BusjSQ+2hYngkX/oGTtmW7bQ7eT0Rb9ykePbvfhehT3W4v8VEoO8z/y+VEFtUStHFu ucFgfglyJxfRCB9f9OwpFUAxXBSWNsMYHxSrguZ3b7OOy8WJOJEFrV6K9qDKVSFJRyS1 eG3rDSaiJ6RzU4dV5vVIrFU6U+mzdbrjcPNZDSmiEPzS0lmWTyXiGf8KOSxabapZat3N dQ== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0b-0016f401.pphosted.com with ESMTP id 3ad8x9hkp8-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Thu, 12 Aug 2021 21:46:04 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Thu, 12 Aug 2021 21:46:02 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Thu, 12 Aug 2021 21:46:02 -0700 Received: from localhost.localdomain (unknown [10.28.34.15]) by maili.marvell.com (Postfix) with ESMTP id AD1603F7073; Thu, 12 Aug 2021 21:46:00 -0700 (PDT) From: To: Declan Doherty , Ciara Power CC: , , Kiran Kumar K Date: Fri, 13 Aug 2021 10:15:53 +0530 Message-ID: <20210813044553.3321871-1-kirankumark@marvell.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-Proofpoint-GUID: zgpRlHbbnLivc2uFNF4in3dIOtQaS_-n X-Proofpoint-ORIG-GUID: zgpRlHbbnLivc2uFNF4in3dIOtQaS_-n X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.391, 18.0.790 definitions=2021-08-13_01:2021-08-12, 2021-08-13 signatures=0 Subject: [dpdk-dev] [PATCH] app/crypto-perf: add throughput test for asymmetric crypto X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Kiran Kumar K Adding support for asymmetric crypto perf test. Only modex is supported for now. Signed-off-by: Kiran Kumar K --- app/test-crypto-perf/cperf_ops.c | 59 ++++++++++- app/test-crypto-perf/cperf_options.h | 3 +- app/test-crypto-perf/cperf_options_parsing.c | 4 + app/test-crypto-perf/cperf_test_common.c | 16 +++ app/test-crypto-perf/cperf_test_throughput.c | 24 +++-- app/test-crypto-perf/cperf_test_vectors.c | 36 +++++++ app/test-crypto-perf/cperf_test_vectors.h | 11 ++ app/test-crypto-perf/main.c | 105 +++++++++++++++++-- 8 files changed, 242 insertions(+), 16 deletions(-) diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c index 0d7baf214b..4b7d66edb2 100644 --- a/app/test-crypto-perf/cperf_ops.c +++ b/app/test-crypto-perf/cperf_ops.c @@ -8,6 +8,33 @@ #include "cperf_ops.h" #include "cperf_test_vectors.h" +static int +cperf_set_ops_asym(struct rte_crypto_op **ops, + uint32_t src_buf_offset __rte_unused, + uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops, + struct rte_cryptodev_sym_session *sess, + const struct cperf_options *options __rte_unused, + const struct cperf_test_vector *test_vector __rte_unused, + uint16_t iv_offset __rte_unused, + uint32_t *imix_idx __rte_unused) +{ + uint16_t i; + uint8_t result[sizeof(perf_mod_p)] = { 0 }; + struct rte_cryptodev_asym_session *asym_sess = (void *)sess; + + for (i = 0; i < nb_ops; i++) { + struct rte_crypto_asym_op *asym_op = ops[i]->asym; + + ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + asym_op->modex.base.data = perf_base; + asym_op->modex.base.length = sizeof(perf_base); + asym_op->modex.result.data = result; + asym_op->modex.result.length = sizeof(result); + rte_crypto_op_attach_asym_session(ops[i], asym_sess); + } + return 0; +} + #ifdef RTE_LIB_SECURITY static int cperf_set_ops_security(struct rte_crypto_op **ops, @@ -550,7 +577,32 @@ cperf_create_session(struct rte_mempool *sess_mp, struct rte_crypto_sym_xform auth_xform; struct rte_crypto_sym_xform aead_xform; struct rte_cryptodev_sym_session *sess = NULL; - + struct rte_crypto_asym_xform xform = {0}; + int rc; + + if (options->op_type == CPERF_ASYM_MODEX) { + xform.next = NULL; + xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX; + xform.modex.modulus.data = perf_mod_p; + xform.modex.modulus.length = sizeof(perf_mod_p); + xform.modex.exponent.data = perf_mod_e; + xform.modex.exponent.length = sizeof(perf_mod_e); + + sess = (void *)rte_cryptodev_asym_session_create(sess_mp); + if (sess == NULL) + return NULL; + rc = rte_cryptodev_asym_session_init(dev_id, (void *)sess, + &xform, priv_mp); + if (rc < 0) { + if (sess != NULL) { + rte_cryptodev_asym_session_clear(dev_id, + (void *)sess); + rte_cryptodev_asym_session_free((void *)sess); + } + return NULL; + } + return sess; + } #ifdef RTE_LIB_SECURITY /* * security only @@ -820,6 +872,11 @@ cperf_get_op_functions(const struct cperf_options *options, op_fns->sess_create = cperf_create_session; + if (options->op_type == CPERF_ASYM_MODEX) { + op_fns->populate_ops = cperf_set_ops_asym; + return 0; + } + if (options->op_type == CPERF_AEAD) { op_fns->populate_ops = cperf_set_ops_aead; return 0; diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h index 9664a4b343..f5ea2b90a5 100644 --- a/app/test-crypto-perf/cperf_options.h +++ b/app/test-crypto-perf/cperf_options.h @@ -79,7 +79,8 @@ enum cperf_op_type { CPERF_AUTH_THEN_CIPHER, CPERF_AEAD, CPERF_PDCP, - CPERF_DOCSIS + CPERF_DOCSIS, + CPERF_ASYM_MODEX }; extern const char *cperf_op_type_strs[]; diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c index e84f56cfaa..fcff187257 100644 --- a/app/test-crypto-perf/cperf_options_parsing.c +++ b/app/test-crypto-perf/cperf_options_parsing.c @@ -457,6 +457,10 @@ parse_op_type(struct cperf_options *opts, const char *arg) { cperf_op_type_strs[CPERF_DOCSIS], CPERF_DOCSIS + }, + { + cperf_op_type_strs[CPERF_ASYM_MODEX], + CPERF_ASYM_MODEX } }; diff --git a/app/test-crypto-perf/cperf_test_common.c b/app/test-crypto-perf/cperf_test_common.c index 12925c7f22..89f13fdebd 100644 --- a/app/test-crypto-perf/cperf_test_common.c +++ b/app/test-crypto-perf/cperf_test_common.c @@ -140,6 +140,22 @@ cperf_alloc_common_memory(const struct cperf_options *options, uint16_t crypto_op_size = sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op); uint16_t crypto_op_private_size; + + if (options->op_type == CPERF_ASYM_MODEX) { + snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_op_pool%u", + rte_socket_id()); + *pool = rte_crypto_op_pool_create( + pool_name, RTE_CRYPTO_OP_TYPE_ASYMMETRIC, + options->pool_sz, 0, 0, rte_socket_id()); + if (*pool == NULL) { + RTE_LOG(ERR, USER1, + "Cannot allocate mempool for device %u\n", + dev_id); + return -1; + } + return 0; + } + /* * If doing AES-CCM, IV field needs to be 16 bytes long, * and AAD field needs to be long enough to have 18 bytes, diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c index f6eb8cf259..888ac988a4 100644 --- a/app/test-crypto-perf/cperf_test_throughput.c +++ b/app/test-crypto-perf/cperf_test_throughput.c @@ -35,17 +35,23 @@ cperf_throughput_test_free(struct cperf_throughput_ctx *ctx) if (!ctx) return; if (ctx->sess) { + if (ctx->options->op_type == CPERF_ASYM_MODEX) { + rte_cryptodev_asym_session_clear(ctx->dev_id, + (void *)ctx->sess); + rte_cryptodev_asym_session_free((void *)ctx->sess); + } #ifdef RTE_LIB_SECURITY - if (ctx->options->op_type == CPERF_PDCP || - ctx->options->op_type == CPERF_DOCSIS) { + else if (ctx->options->op_type == CPERF_PDCP || + ctx->options->op_type == CPERF_DOCSIS) { struct rte_security_ctx *sec_ctx = (struct rte_security_ctx *) - rte_cryptodev_get_sec_ctx(ctx->dev_id); - rte_security_session_destroy(sec_ctx, + rte_cryptodev_get_sec_ctx(ctx->dev_id); + rte_security_session_destroy( + sec_ctx, (struct rte_security_session *)ctx->sess); - } else + } #endif - { + else { rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess); rte_cryptodev_sym_session_free(ctx->sess); } @@ -119,7 +125,8 @@ cperf_throughput_test_runner(void *test_ctx) int linearize = 0; /* Check if source mbufs require coalescing */ - if (ctx->options->segment_sz < ctx->options->max_buffer_size) { + if ((ctx->options->op_type != CPERF_ASYM_MODEX) && + (ctx->options->segment_sz < ctx->options->max_buffer_size)) { rte_cryptodev_info_get(ctx->dev_id, &dev_info); if ((dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0) @@ -194,7 +201,8 @@ cperf_throughput_test_runner(void *test_ctx) } #ifdef CPERF_LINEARIZATION_ENABLE - if (linearize) { + if ((ctx->options->op_type != CPERF_ASYM_MODEX) && + linearize) { /* PMD doesn't support scatter-gather and source buffer * is segmented. * We need to linearize it before enqueuing. diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c index 0af01ff911..4bba405961 100644 --- a/app/test-crypto-perf/cperf_test_vectors.c +++ b/app/test-crypto-perf/cperf_test_vectors.c @@ -7,6 +7,35 @@ #include "cperf_test_vectors.h" +/* modular operation test data */ +uint8_t perf_base[20] = { + 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85, + 0xAE, 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD, + 0xA8, 0xEB, 0x7E, 0x78, 0xA0, 0x50 +}; + +uint8_t perf_mod_p[129] = { + 0x00, 0xb3, 0xa1, 0xaf, 0xb7, 0x13, 0x08, 0x00, + 0x0a, 0x35, 0xdc, 0x2b, 0x20, 0x8d, 0xa1, 0xb5, + 0xce, 0x47, 0x8a, 0xc3, 0x80, 0xf4, 0x7d, 0x4a, + 0xa2, 0x62, 0xfd, 0x61, 0x7f, 0xb5, 0xa8, 0xde, + 0x0a, 0x17, 0x97, 0xa0, 0xbf, 0xdf, 0x56, 0x5a, + 0x3d, 0x51, 0x56, 0x4f, 0x70, 0x70, 0x3f, 0x63, + 0x6a, 0x44, 0x5b, 0xad, 0x84, 0x0d, 0x3f, 0x27, + 0x6e, 0x3b, 0x34, 0x91, 0x60, 0x14, 0xb9, 0xaa, + 0x72, 0xfd, 0xa3, 0x64, 0xd2, 0x03, 0xa7, 0x53, + 0x87, 0x9e, 0x88, 0x0b, 0xc1, 0x14, 0x93, 0x1a, + 0x62, 0xff, 0xb1, 0x5d, 0x74, 0xcd, 0x59, 0x63, + 0x18, 0x11, 0x3d, 0x4f, 0xba, 0x75, 0xd4, 0x33, + 0x4e, 0x23, 0x6b, 0x7b, 0x57, 0x44, 0xe1, 0xd3, + 0x03, 0x13, 0xa6, 0xf0, 0x8b, 0x60, 0xb0, 0x9e, + 0xee, 0x75, 0x08, 0x9d, 0x71, 0x63, 0x13, 0xcb, + 0xa6, 0x81, 0x92, 0x14, 0x03, 0x22, 0x2d, 0xde, + 0x55 +}; + +uint8_t perf_mod_e[3] = {0x01, 0x00, 0x01}; + uint8_t plaintext[2048] = { 0x71, 0x75, 0x83, 0x98, 0x75, 0x42, 0x51, 0x09, 0x94, 0x02, 0x13, 0x20, 0x15, 0x64, 0x46, 0x32, 0x08, 0x18, 0x91, 0x82, 0x86, 0x52, 0x23, 0x93, @@ -412,6 +441,13 @@ cperf_test_vector_get_dummy(struct cperf_options *options) t_vec->plaintext.data = plaintext; t_vec->plaintext.length = options->max_buffer_size; + if (options->op_type == CPERF_ASYM_MODEX) { + t_vec->modex.mod = perf_mod_p; + t_vec->modex.exp = perf_mod_e; + t_vec->modex.mlen = sizeof(perf_mod_p); + t_vec->modex.elen = sizeof(perf_mod_e); + } + if (options->op_type == CPERF_PDCP) { if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { t_vec->cipher_key.length = 0; diff --git a/app/test-crypto-perf/cperf_test_vectors.h b/app/test-crypto-perf/cperf_test_vectors.h index 6f10823ef3..92818c22b7 100644 --- a/app/test-crypto-perf/cperf_test_vectors.h +++ b/app/test-crypto-perf/cperf_test_vectors.h @@ -68,6 +68,13 @@ struct cperf_test_vector { uint32_t aead_offset; uint32_t aead_length; } data; + + struct { + uint8_t *mod; + uint8_t *exp; + uint32_t mlen; + uint32_t elen; + } modex; }; struct cperf_test_vector* @@ -83,4 +90,8 @@ extern uint8_t aad[]; extern uint8_t digest[2048]; +extern uint8_t perf_base[20]; +extern uint8_t perf_mod_p[129]; +extern uint8_t perf_mod_e[3]; + #endif diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c index 70cb04a214..390380898e 100644 --- a/app/test-crypto-perf/main.c +++ b/app/test-crypto-perf/main.c @@ -40,7 +40,8 @@ const char *cperf_op_type_strs[] = { [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher", [CPERF_AEAD] = "aead", [CPERF_PDCP] = "pdcp", - [CPERF_DOCSIS] = "docsis" + [CPERF_DOCSIS] = "docsis", + [CPERF_ASYM_MODEX] = "modex" }; const struct cperf_test cperf_testmap[] = { @@ -66,6 +67,50 @@ const struct cperf_test cperf_testmap[] = { } }; +static int +create_asym_op_pool_socket(uint8_t dev_id, int32_t socket_id, + uint32_t nb_sessions) +{ + char mp_name[RTE_MEMPOOL_NAMESIZE]; + struct rte_mempool *mpool = NULL; + unsigned int session_size = + RTE_MAX(rte_cryptodev_asym_get_private_session_size(dev_id), + rte_cryptodev_asym_get_header_session_size()); + + if (session_pool_socket[socket_id].priv_mp == NULL) { + snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_priv_pool%u", + socket_id); + + mpool = rte_mempool_create(mp_name, nb_sessions, session_size, + 0, 0, NULL, NULL, NULL, NULL, + socket_id, 0); + if (mpool == NULL) { + printf("Cannot create pool \"%s\" on socket %d\n", + mp_name, socket_id); + return -ENOMEM; + } + printf("Allocated pool \"%s\" on socket %d\n", mp_name, + socket_id); + session_pool_socket[socket_id].priv_mp = mpool; + } + + if (session_pool_socket[socket_id].sess_mp == NULL) { + + snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_sess_pool%u", + socket_id); + mpool = rte_mempool_create(mp_name, nb_sessions, + session_size, 0, 0, NULL, NULL, NULL, + NULL, socket_id, 0); + if (mpool == NULL) { + printf("Cannot create pool \"%s\" on socket %d\n", + mp_name, socket_id); + return -ENOMEM; + } + session_pool_socket[socket_id].sess_mp = mpool; + } + return 0; +} + static int fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size, uint32_t nb_sessions) @@ -199,6 +244,13 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs) socket_id = 0; rte_cryptodev_info_get(cdev_id, &cdev_info); + + if (opts->op_type == CPERF_ASYM_MODEX) { + if ((cdev_info.feature_flags & + RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) == 0) + continue; + } + if (opts->nb_qps > cdev_info.max_nb_queue_pairs) { printf("Number of needed queue pairs is higher " "than the maximum number of queue pairs " @@ -210,12 +262,27 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs) struct rte_cryptodev_config conf = { .nb_queue_pairs = opts->nb_qps, .socket_id = socket_id, - .ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO, }; - if (opts->op_type != CPERF_PDCP && - opts->op_type != CPERF_DOCSIS) + switch (opts->op_type) { + case CPERF_ASYM_MODEX: + conf.ff_disable |= (RTE_CRYPTODEV_FF_SECURITY | + RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO); + break; + case CPERF_CIPHER_ONLY: + case CPERF_AUTH_ONLY: + case CPERF_CIPHER_THEN_AUTH: + case CPERF_AUTH_THEN_CIPHER: + case CPERF_AEAD: conf.ff_disable |= RTE_CRYPTODEV_FF_SECURITY; + /* Fall through */ + case CPERF_PDCP: + case CPERF_DOCSIS: + /* Fall through */ + default: + + conf.ff_disable |= RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO; + } struct rte_cryptodev_qp_conf qp_conf = { .nb_descriptors = opts->nb_descriptors @@ -267,8 +334,12 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs) return -ENOTSUP; } - ret = fill_session_pool_socket(socket_id, max_sess_size, - sessions_needed); + if (opts->op_type == CPERF_ASYM_MODEX) + ret = create_asym_op_pool_socket(cdev_id, socket_id, + sessions_needed); + else + ret = fill_session_pool_socket(socket_id, max_sess_size, + sessions_needed); if (ret < 0) return ret; @@ -276,6 +347,11 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs) qp_conf.mp_session_private = session_pool_socket[socket_id].priv_mp; + if (opts->op_type == CPERF_ASYM_MODEX) { + qp_conf.mp_session = NULL; + qp_conf.mp_session_private = NULL; + } + ret = rte_cryptodev_configure(cdev_id, &conf); if (ret < 0) { printf("Failed to configure cryptodev %u", cdev_id); @@ -309,6 +385,9 @@ cperf_verify_devices_capabilities(struct cperf_options *opts, { struct rte_cryptodev_sym_capability_idx cap_idx; const struct rte_cryptodev_symmetric_capability *capability; + struct rte_cryptodev_asym_capability_idx asym_cap_idx; + const struct rte_cryptodev_asymmetric_xform_capability *asym_capability; + uint8_t i, cdev_id; int ret; @@ -317,6 +396,20 @@ cperf_verify_devices_capabilities(struct cperf_options *opts, cdev_id = enabled_cdevs[i]; + if (opts->op_type == CPERF_ASYM_MODEX) { + asym_cap_idx.type = RTE_CRYPTO_ASYM_XFORM_MODEX; + asym_capability = rte_cryptodev_asym_capability_get( + cdev_id, &asym_cap_idx); + if (asym_capability == NULL) + return -1; + + ret = rte_cryptodev_asym_xform_capability_check_modlen( + asym_capability, sizeof(perf_mod_p)); + if (ret != 0) + return ret; + + } + if (opts->op_type == CPERF_AUTH_ONLY || opts->op_type == CPERF_CIPHER_THEN_AUTH || opts->op_type == CPERF_AUTH_THEN_CIPHER) {