@@ -182,6 +182,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += test_pmd_ring_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_blockcipher.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_asym.c
+SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_fips.c
ifeq ($(CONFIG_RTE_COMPRESSDEV_TEST),y)
SRCS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += test_compressdev.c
new file mode 100644
@@ -0,0 +1,771 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#include <sys/stat.h>
+#include <dirent.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_pause.h>
+#include <rte_bus_vdev.h>
+
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_string_fns.h>
+
+#include "test.h"
+#include "test_cryptodev.h"
+#include "test_cryptodev_fips_parse.h"
+
+#define AES_IV_LENGTH 16
+#define AES_BLOCK_LENGTH 16
+#define AES_MCT_NUM_INTERNAL_ITER 1000
+#define AES_MCT_NUM_EXTERNAL_ITER 100
+
+
+
+struct crypto_testsuite_params {
+ struct rte_mempool *mbuf_pool;
+ struct rte_mempool *op_mpool;
+ struct rte_mempool *session_mpool;
+ struct rte_cryptodev_config conf;
+ struct rte_cryptodev_qp_conf qp_conf;
+ uint8_t enabled_devs[RTE_CRYPTO_MAX_DEVS];
+ uint8_t nb_enabled_devs;
+};
+
+static struct crypto_testsuite_params testsuite_params = { NULL };
+
+static int
+fips_testsuite_setup(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ uint8_t nb_devs;
+ uint8_t dev_id, i;
+ uint32_t max_session_size = 0;
+
+ memset(ts_params, 0, sizeof(*ts_params));
+
+ ts_params->mbuf_pool = rte_mempool_lookup("mbuf pool");
+ if (ts_params->mbuf_pool == NULL) {
+ /* Not already created so create */
+ ts_params->mbuf_pool = rte_pktmbuf_pool_create(
+ "Mbuf pool",
+ 1, 0, 0, UINT16_MAX,
+ rte_socket_id());
+ if (ts_params->mbuf_pool == NULL) {
+ RTE_LOG(ERR, USER1, "Can't create CRYPTO_MBUFPOOL\n");
+ return TEST_FAILED;
+ }
+ }
+
+ ts_params->op_mpool = rte_crypto_op_pool_create(
+ "Op pool",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ 1, 0,
+ MAXIMUM_IV_LENGTH,
+ rte_socket_id());
+ if (ts_params->op_mpool == NULL) {
+ RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
+ return TEST_FAILED;
+ }
+
+ nb_devs = rte_cryptodev_count();
+ if (nb_devs < 1) {
+ RTE_LOG(ERR, USER1, "No crypto devices found?\n");
+ return TEST_FAILED;
+ }
+
+ ts_params->conf.nb_queue_pairs = 1;
+ ts_params->conf.socket_id = SOCKET_ID_ANY;
+
+ struct rte_cryptodev_sym_capability_idx cap_idx;
+
+ /* Configure device capable of encrypting with AES-CBC */
+ for (dev_id = 0; dev_id < nb_devs; dev_id++) {
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_AES_CBC;
+ if (rte_cryptodev_sym_capability_get(dev_id, &cap_idx) != NULL) {
+ ts_params->enabled_devs[ts_params->nb_enabled_devs] = dev_id;
+ (ts_params->nb_enabled_devs)++;
+ break;
+ }
+ }
+
+ if (dev_id == nb_devs)
+ RTE_LOG(WARNING, USER1, "There is no device that supports AES-CBC\n");
+
+ /* Configure device capable of encrypting with AES-GCM */
+ for (dev_id = 0; dev_id < nb_devs; dev_id++) {
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ cap_idx.algo.aead = RTE_CRYPTO_AEAD_AES_GCM;
+ if (rte_cryptodev_sym_capability_get(dev_id, &cap_idx) != NULL) {
+ for (i = 0; i < ts_params->nb_enabled_devs; i++)
+ if (ts_params->enabled_devs[i] == dev_id)
+ break;
+ if (i == ts_params->nb_enabled_devs) {
+ ts_params->enabled_devs[ts_params->nb_enabled_devs] = dev_id;
+ (ts_params->nb_enabled_devs)++;
+ }
+
+ break;
+ }
+ }
+
+ if (dev_id == nb_devs)
+ RTE_LOG(WARNING, USER1, "There is no device that supports AES-GCM\n");
+
+ /* Configure device capable of authenticating with HMAC */
+ for (dev_id = 0; dev_id < nb_devs; dev_id++) {
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ cap_idx.algo.auth = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ if (rte_cryptodev_sym_capability_get(dev_id, &cap_idx) != NULL) {
+ /*
+ * Check if device supports other HMAC algorithms,
+ * apart from SHA1
+ */
+ cap_idx.algo.auth = RTE_CRYPTO_AUTH_SHA224_HMAC;
+ if (rte_cryptodev_sym_capability_get(dev_id, &cap_idx) == NULL)
+ continue;
+
+ cap_idx.algo.auth = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ if (rte_cryptodev_sym_capability_get(dev_id, &cap_idx) == NULL)
+ continue;
+
+ cap_idx.algo.auth = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ if (rte_cryptodev_sym_capability_get(dev_id, &cap_idx) == NULL)
+ continue;
+
+ cap_idx.algo.auth = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ if (rte_cryptodev_sym_capability_get(dev_id, &cap_idx) == NULL)
+ continue;
+
+ for (i = 0; i < ts_params->nb_enabled_devs; i++)
+ if (ts_params->enabled_devs[i] == dev_id)
+ break;
+ if (i == ts_params->nb_enabled_devs) {
+ ts_params->enabled_devs[ts_params->nb_enabled_devs] = dev_id;
+ (ts_params->nb_enabled_devs)++;
+ }
+
+ break;
+ }
+ }
+
+ if (dev_id == nb_devs)
+ RTE_LOG(WARNING, USER1, "There is no device that supports all HMAC algorithms\n");
+
+ if (ts_params->nb_enabled_devs == 0) {
+ RTE_LOG(ERR, USER1,
+ "There are no crypto devices that support at least "
+ "one algorithm for the FIPS testing\n");
+ return TEST_FAILED;
+ }
+
+ for (i = 0; i < ts_params->nb_enabled_devs; i++) {
+ uint32_t session_size;
+ dev_id = ts_params->enabled_devs[i];
+ TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id,
+ &ts_params->conf),
+ "Failed to configure cryptodev %u with %u qps",
+ dev_id, ts_params->conf.nb_queue_pairs);
+
+ ts_params->qp_conf.nb_descriptors = 128;
+
+ TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
+ dev_id, 0, &ts_params->qp_conf,
+ rte_cryptodev_socket_id(dev_id),
+ ts_params->session_mpool),
+ "Failed to setup queue pair %u on cryptodev %u",
+ 0, dev_id);
+
+ /* Start the device */
+ TEST_ASSERT_SUCCESS(rte_cryptodev_start(dev_id),
+ "Failed to start cryptodev %u",
+ dev_id);
+ session_size = rte_cryptodev_sym_get_private_session_size(dev_id);
+
+ if (session_size > max_session_size)
+ max_session_size = session_size;
+ }
+
+ /*
+ * Create mempool with session header and private session data
+ */
+ ts_params->session_mpool = rte_mempool_create(
+ "Session pool",
+ 2,
+ max_session_size,
+ 0, 0, NULL, NULL, NULL,
+ NULL, SOCKET_ID_ANY,
+ 0);
+
+ TEST_ASSERT_NOT_NULL(ts_params->session_mpool,
+ "session mempool allocation failed");
+
+ return TEST_SUCCESS;
+}
+
+static void
+testsuite_teardown(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ uint16_t i;
+
+ if (ts_params->mbuf_pool != NULL) {
+ rte_mempool_free(ts_params->mbuf_pool);
+ ts_params->mbuf_pool = NULL;
+ }
+
+ if (ts_params->op_mpool != NULL) {
+ rte_mempool_free(ts_params->op_mpool);
+ ts_params->op_mpool = NULL;
+ }
+
+ /* Free session mempools */
+ if (ts_params->session_mpool != NULL) {
+ rte_mempool_free(ts_params->session_mpool);
+ ts_params->session_mpool = NULL;
+ }
+
+ for (i = 0; i < ts_params->nb_enabled_devs; i++) {
+ rte_cryptodev_stop(ts_params->enabled_devs[i]);
+ }
+}
+
+static int
+test_aes_single_case(struct fips_aes_test_data *aes_data,
+ uint8_t *second_last_out,
+ uint16_t dev_id,
+ uint16_t vid,
+ uint32_t *num_test_vectors_run,
+ uint16_t *num_test_vectors_skipped)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ uint8_t *pt = NULL;
+ uint8_t *ct = NULL;
+ struct rte_cryptodev_sym_session *session = NULL;
+ struct rte_crypto_op *op = NULL;
+ struct rte_mbuf *mbuf = NULL;
+ int ret;
+ uint16_t i;
+ struct rte_crypto_sym_xform cipher_xform = {
+ .next = NULL,
+ .type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ .cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .op = aes_data->test_cases[vid].test_op,
+ .key = {
+ .length = aes_data->key_len,
+ .data = aes_data->test_cases[vid].key
+ },
+ .iv = {
+ .offset = IV_OFFSET,
+ .length = AES_IV_LENGTH
+ }
+ }
+ };
+
+ struct rte_cryptodev_sym_capability_idx cap_idx;
+ const struct rte_cryptodev_symmetric_capability *capability;
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cap_idx.algo.aead = RTE_CRYPTO_CIPHER_AES_CBC;
+ capability = rte_cryptodev_sym_capability_get(dev_id, &cap_idx);
+ if (capability == NULL) {
+ RTE_LOG(ERR, USER1, "Could not get capability\n");
+ return -1;
+ }
+ if (rte_cryptodev_sym_capability_check_cipher(capability,
+ cipher_xform.cipher.key.length,
+ cipher_xform.cipher.iv.length) != 0) {
+ RTE_LOG(DEBUG, USER1, "Operation not supported by device\n");
+ (*num_test_vectors_skipped)++;
+ aes_data->test_cases[vid].skip_flag = 1;
+ ret = 0;
+ goto exit;
+ }
+
+ /* Create crypto session */
+ session = rte_cryptodev_sym_session_create(ts_params->session_mpool);
+ if (session == NULL) {
+ RTE_LOG(ERR, USER1, "Session creation failed\n");
+ return -1;
+ }
+
+ ret = rte_cryptodev_sym_session_init(dev_id, session,
+ &cipher_xform, ts_params->session_mpool);
+ if (ret < 0) {
+ if (ret == -ENOTSUP) {
+ (*num_test_vectors_skipped)++;
+ aes_data->test_cases[vid].skip_flag = 1;
+ RTE_LOG(INFO, USER1, "Session not supported\n");
+ ret = 0;
+ } else {
+ RTE_LOG(ERR, USER1, "Session init failed\n");
+ ret = -1;
+ }
+ goto exit;
+ }
+
+ op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ if (op == NULL) {
+ RTE_LOG(ERR, USER1, "Operation allocation failed\n");
+ ret = -1;
+ goto exit;
+ }
+
+ mbuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ if (mbuf == NULL) {
+ RTE_LOG(ERR, USER1, "Mbuf allocation failed\n");
+ ret = -1;
+ goto exit;
+ }
+
+ if (aes_data->test_cases[vid].test_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ pt = (uint8_t *)rte_pktmbuf_append(mbuf,
+ aes_data->test_cases[vid].plaintext_len);
+ if (pt == NULL) {
+ RTE_LOG(ERR, USER1, "Appending data failed\n");
+ ret = -1;
+ goto exit;
+ }
+ memcpy(pt, aes_data->test_cases[vid].plaintext,
+ aes_data->test_cases[vid].plaintext_len);
+ } else {
+ ct = (uint8_t *)rte_pktmbuf_append(mbuf,
+ aes_data->test_cases[vid].ciphertext_len);
+ if (ct == NULL) {
+ RTE_LOG(ERR, USER1, "Appending data failed\n");
+ ret = -1;
+ goto exit;
+ }
+ memcpy(ct, aes_data->test_cases[vid].ciphertext,
+ aes_data->test_cases[vid].ciphertext_len);
+ }
+
+ /* Set up crypto op */
+ uint8_t *iv = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
+ memcpy(iv, aes_data->test_cases[vid].iv,
+ AES_IV_LENGTH);
+ op->sym->m_src = mbuf;
+ op->sym->cipher.data.offset = 0;
+ if (aes_data->test_cases[vid].test_op ==
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ op->sym->cipher.data.length = aes_data->test_cases[vid].plaintext_len;
+ else
+ op->sym->cipher.data.length = aes_data->test_cases[vid].ciphertext_len;
+
+
+ /* Attach the crypto session to the operation */
+ rte_crypto_op_attach_sym_session(op, session);
+
+ uint16_t num_enqd = rte_cryptodev_enqueue_burst(dev_id, 0,
+ &op, 1);
+ if (num_enqd < 1) {
+ RTE_LOG(ERR, USER1, "Operation could not be enqueued\n");
+ ret = -1;
+ goto exit;
+ }
+
+ uint16_t num_deqd = 0;
+ do {
+ struct rte_crypto_op *deqd_op;
+ num_deqd = rte_cryptodev_dequeue_burst(dev_id, 0, &deqd_op, 1);
+
+ if (num_deqd == 0)
+ continue;
+
+ if (deqd_op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
+ RTE_LOG(ERR, USER1, "Operation status was not successful\n");
+ ret = -1;
+ goto exit;
+ }
+ } while (num_deqd == 0);
+
+ /*
+ * If Monte Carlo test, process plaintext with following algo:
+ * For j = 0 to 999
+ * If ( j=0 )
+ * CT[j] = AES(Key[i], IV[i], PT[j])
+ * PT[j+1] = IV[i]
+ * Else
+ * CT[j] = AES(Key[i], PT[j])
+ * PT[j+1] = CT[j-1]
+ * Output CT[j]
+ */
+ if (aes_data->test_type == AESAVS_TYPE_MCT) {
+ uint8_t prev_out[AES_BLOCK_LENGTH];
+ uint8_t new_out[AES_BLOCK_LENGTH];
+ uint8_t prev_in[AES_BLOCK_LENGTH];
+ uint8_t *iv;
+
+ memcpy(prev_in, aes_data->test_cases[vid].ciphertext,
+ aes_data->test_cases[vid].ciphertext_len);
+
+ if (aes_data->test_cases[vid].test_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ /* PT[1] = IV, save CT[0] */
+ pt = (uint8_t *)rte_pktmbuf_mtod(mbuf, uint8_t *);
+ memcpy(prev_out, pt, AES_BLOCK_LENGTH);
+ memcpy(pt, aes_data->test_cases[vid].iv,
+ AES_IV_LENGTH);
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
+ memcpy(iv, prev_out, AES_IV_LENGTH);
+ } else {
+ /* CT[1] = IV, save PT[0] */
+ ct = (uint8_t *)rte_pktmbuf_mtod(mbuf, uint8_t *);
+ memcpy(prev_out, ct, AES_BLOCK_LENGTH);
+ memcpy(ct, aes_data->test_cases[vid].iv,
+ AES_IV_LENGTH);
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
+ memcpy(iv, prev_in, AES_IV_LENGTH);
+
+ }
+
+ /* The other 999 iterations */
+ for (i = 1; i <= (AES_MCT_NUM_INTERNAL_ITER - 1); i++) {
+ memcpy(prev_in, rte_pktmbuf_mtod(mbuf, uint8_t *), AES_BLOCK_LENGTH);
+ uint16_t num_enqd = rte_cryptodev_enqueue_burst(dev_id, 0,
+ &op, 1);
+ if (num_enqd < 1) {
+ RTE_LOG(ERR, USER1, "Operation could not be enqueued\n");
+ ret = -1;
+ goto exit;
+ }
+
+ uint16_t num_deqd = 0;
+ do {
+ struct rte_crypto_op *deqd_op;
+ num_deqd = rte_cryptodev_dequeue_burst(dev_id, 0, &deqd_op, 1);
+
+ if (num_deqd == 0)
+ continue;
+
+ if (deqd_op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
+ RTE_LOG(ERR, USER1, "Operation status was not successful\n");
+ ret = -1;
+ goto exit;
+ }
+ } while (num_deqd == 0);
+
+ if (i == (AES_MCT_NUM_INTERNAL_ITER - 1)) {
+ memcpy(second_last_out, prev_out, AES_BLOCK_LENGTH);
+ break;
+ }
+
+ if (aes_data->test_cases[vid].test_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ /* PT[j+1] = CT[j-1] (CT[j-1] stored in prev_out, store CT[j]) */
+ pt = (uint8_t *)rte_pktmbuf_mtod(mbuf, uint8_t *);
+ memcpy(new_out, pt, AES_BLOCK_LENGTH);
+ memcpy(pt, prev_out, AES_IV_LENGTH);
+ memcpy(prev_out, new_out, AES_IV_LENGTH);
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
+ memcpy(iv, prev_out, AES_IV_LENGTH);
+ } else {
+ /* CT[j+1] = PT[j-1] (PT[j-1] stored in prev_out, store PT[j]) */
+ ct = (uint8_t *)rte_pktmbuf_mtod(mbuf, uint8_t *);
+ memcpy(new_out, ct, AES_BLOCK_LENGTH);
+ memcpy(ct, prev_out, AES_IV_LENGTH);
+ memcpy(prev_out, new_out, AES_IV_LENGTH);
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
+ memcpy(iv, prev_in, AES_IV_LENGTH);
+ }
+ }
+ }
+
+ /* Output result */
+ if (aes_data->test_cases[vid].test_op ==
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ ct = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+ aes_data->test_cases[vid].ciphertext_len =
+ aes_data->test_cases[vid].plaintext_len;
+ memcpy(aes_data->test_cases[vid].ciphertext, ct,
+ aes_data->test_cases[vid].ciphertext_len);
+ } else {
+ pt = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+ aes_data->test_cases[vid].plaintext_len =
+ aes_data->test_cases[vid].ciphertext_len;
+ memcpy(aes_data->test_cases[vid].plaintext, ct,
+ aes_data->test_cases[vid].plaintext_len);
+ }
+
+ ret = 0;
+ (*num_test_vectors_run)++;
+exit:
+ /* Clean up */
+ if (op != NULL) {
+ rte_pktmbuf_free(op->sym->m_src);
+ rte_crypto_op_free(op);
+ }
+
+ if (session != NULL) {
+ ret = rte_cryptodev_sym_session_clear(dev_id, session);
+ TEST_ASSERT(ret == 0, "Session clear failed");
+ ret = rte_cryptodev_sym_session_free(session);
+ TEST_ASSERT(ret == 0, "Session free failed");
+ }
+
+ return ret;
+}
+
+
+
+#define MAX_NUM_FILES 32
+#define MAX_FILE_PATH 128
+static int
+test_fips_aes(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct fips_aes_test_data *aes_data = NULL;
+ struct fips_aes_test_data *aes_mct_data = NULL;
+ uint16_t mct_vec_id = 0;
+ uint16_t num_test_vectors_skipped = 0;
+ uint32_t num_test_vectors_run = 0;
+ uint32_t num_total_run = 0;
+ uint32_t num_total_skipped = 0;
+ unsigned int num_files = 0;
+ uint16_t i, j, k, m;
+ char file_paths[MAX_NUM_FILES][MAX_FILE_PATH];
+ char file_names[MAX_NUM_FILES][MAX_FILE_PATH];
+ uint8_t dev_id;
+ int ret;
+
+ /* Find the first device which supports AES-CBC */
+ for (i = 0; i < ts_params->nb_enabled_devs; i++) {
+ struct rte_cryptodev_sym_capability_idx cap_idx;
+ dev_id = ts_params->enabled_devs[i];
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_AES_CBC;
+ if (rte_cryptodev_sym_capability_get(dev_id, &cap_idx) != NULL) {
+ break;
+ }
+ }
+
+ if (i == ts_params->nb_enabled_devs) {
+ RTE_LOG(WARNING, USER1, "Skipping AES-CBC tests\n");
+ return -ENOTSUP;
+ }
+
+ DIR *d;
+ struct dirent *dir;
+ d = opendir("./AES/req/");
+ if (d) {
+ while ((dir = readdir(d)) != NULL) {
+ if (strstr(dir->d_name, "req") != NULL) {
+ snprintf(file_paths[num_files], MAX_FILE_PATH,
+ "%s%s", "./AES/req/", dir->d_name);
+ strlcpy(file_names[num_files], dir->d_name,
+ strlen(dir->d_name) - 3);
+ num_files++;
+ }
+ }
+ closedir(d);
+ }
+
+ if (num_files == 0) {
+ RTE_LOG(INFO, USER1, "No AES test vector files available\n");
+ return TEST_SKIPPED;
+ }
+
+ aes_data = rte_malloc(NULL, sizeof(struct fips_aes_test_data), 0);
+ TEST_ASSERT_NOT_NULL(aes_data, "memory could not be allocated\n");
+ aes_mct_data = rte_malloc(NULL, sizeof(struct fips_aes_test_data), 0);
+ TEST_ASSERT_NOT_NULL(aes_mct_data, "memory could not be allocated\n");
+
+ for (i = 0; i < num_files; i++) {
+ printf("Testing vectors from file %s\n", file_paths[i]);
+ /* Vector parsing function */
+ parse_aes_vectors(aes_data, file_paths[i]);
+ /*
+ * Output data will contain more test vectors than the input data,
+ * so first thing to be done is to create the structure big enough
+ * to hold all the vectors.
+ */
+ if (aes_data->test_type == AESAVS_TYPE_MCT) {
+ uint8_t second_last_out[AES_BLOCK_LENGTH];
+
+ memcpy(aes_mct_data, aes_data,
+ sizeof(struct fips_aes_test_data));
+ memset(aes_mct_data->test_cases, 0,
+ sizeof(struct fips_aes_test_case) * MAX_NB_TESTS);
+ /* Each test case has 100 iterations */
+ aes_mct_data->nb_test_cases *= AES_MCT_NUM_EXTERNAL_ITER;
+
+ mct_vec_id = 0;
+ for (j = 0; j < aes_data->nb_test_cases; j++) {
+ /* Copy initial vector */
+ memcpy(&aes_mct_data->test_cases[mct_vec_id],
+ &aes_data->test_cases[j],
+ sizeof(struct fips_aes_test_case));
+
+ for (k = 0; k < AES_MCT_NUM_EXTERNAL_ITER; k++) {
+ aes_mct_data->test_cases[mct_vec_id].count = (aes_data->test_cases[j].count * AES_MCT_NUM_EXTERNAL_ITER) + k;
+ int ret = test_aes_single_case(aes_mct_data,
+ second_last_out,
+ dev_id, mct_vec_id,
+ &num_test_vectors_run,
+ &num_test_vectors_skipped);
+ if (ret < 0) {
+ ret = TEST_FAILED;
+ goto exit;
+ }
+ mct_vec_id++;
+ if (k != AES_MCT_NUM_EXTERNAL_ITER) {
+ /* Prepare next vector */
+ aes_mct_data->test_cases[mct_vec_id].test_op = aes_data->test_cases[j].test_op;
+ aes_mct_data->test_cases[mct_vec_id].iv_len = aes_data->test_cases[j].iv_len;
+ aes_mct_data->test_cases[mct_vec_id].count = mct_vec_id;
+ if (aes_data->test_cases[j].test_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ aes_mct_data->test_cases[mct_vec_id].plaintext_len = aes_data->test_cases[j].plaintext_len;
+ if (aes_data->key_len == 16) {
+ for (m = 0; m < aes_data->key_len; m++) {
+ /* Key[i+1] = Key[i] xor CT[j] */
+ aes_mct_data->test_cases[mct_vec_id].key[m] =
+ aes_mct_data->test_cases[mct_vec_id - 1].key[m] ^
+ aes_mct_data->test_cases[mct_vec_id - 1].ciphertext[m];
+ }
+ } else if (aes_data->key_len == 24) {
+ /* Key[i+1] = Key[i] xor (last 64-bits of CT[j-1] || CT[j]) */
+ for (m = 0; m < 8; m++)
+ aes_mct_data->test_cases[mct_vec_id].key[m] =
+ aes_mct_data->test_cases[mct_vec_id - 1].key[m] ^
+ second_last_out[m + 8];
+ for (m = 8; m < aes_data->key_len; m++)
+ aes_mct_data->test_cases[mct_vec_id].key[m] =
+ aes_mct_data->test_cases[mct_vec_id - 1].key[m] ^
+ aes_mct_data->test_cases[mct_vec_id - 1].ciphertext[m - 8];
+ } else {
+ /* Key[i+1] = Key[i] xor (CT[j-1] || CT[j]) */
+ for (m = 0; m < 16; m++)
+ aes_mct_data->test_cases[mct_vec_id].key[m] =
+ aes_mct_data->test_cases[mct_vec_id - 1].key[m] ^
+ second_last_out[m];
+ for (m = 16; m < aes_data->key_len; m++)
+ aes_mct_data->test_cases[mct_vec_id].key[m] =
+ aes_mct_data->test_cases[mct_vec_id - 1].key[m] ^
+ aes_mct_data->test_cases[mct_vec_id - 1].ciphertext[m - 16];
+ }
+
+ /* IV[i+1] = CT[j] */
+ memcpy(aes_mct_data->test_cases[mct_vec_id].iv,
+ aes_mct_data->test_cases[mct_vec_id - 1].ciphertext,
+ AES_IV_LENGTH);
+ /* PT[0] = CT[j-1] */
+ memcpy(aes_mct_data->test_cases[mct_vec_id].plaintext,
+ second_last_out, AES_BLOCK_LENGTH);
+ } else {
+ aes_mct_data->test_cases[mct_vec_id].ciphertext_len = aes_data->test_cases[j].ciphertext_len;
+ if (aes_data->key_len == 16) {
+ for (m = 0; m < aes_data->key_len; m++) {
+ /* Key[i+1] = Key[i] xor PT[j] */
+ aes_mct_data->test_cases[mct_vec_id].key[m] =
+ aes_mct_data->test_cases[mct_vec_id - 1].key[m] ^
+ aes_mct_data->test_cases[mct_vec_id - 1].plaintext[m];
+ }
+ } else if (aes_data->key_len == 24) {
+ /* Key[i+1] = Key[i] xor (last 64-bits of PT[j-1] || PT[j]) */
+ for (m = 0; m < 8; m++)
+ aes_mct_data->test_cases[mct_vec_id].key[m] =
+ aes_mct_data->test_cases[mct_vec_id - 1].key[m] ^
+ second_last_out[m + 8];
+ for (m = 8; m < aes_data->key_len; m++)
+ aes_mct_data->test_cases[mct_vec_id].key[m] =
+ aes_mct_data->test_cases[mct_vec_id - 1].key[m] ^
+ aes_mct_data->test_cases[mct_vec_id - 1].plaintext[m - 8];
+ } else {
+ /* Key[i+1] = Key[i] xor (PT[j-1] || PT[j]) */
+ for (m = 0; m < 16; m++)
+ aes_mct_data->test_cases[mct_vec_id].key[m] =
+ aes_mct_data->test_cases[mct_vec_id - 1].key[m] ^
+ second_last_out[m];
+ for (m = 16; m < aes_data->key_len; m++)
+ aes_mct_data->test_cases[mct_vec_id].key[m] =
+ aes_mct_data->test_cases[mct_vec_id - 1].key[m] ^
+ aes_mct_data->test_cases[mct_vec_id - 1].plaintext[m - 16];
+ }
+ /* IV[i+1] = PT[j] */
+ memcpy(aes_mct_data->test_cases[mct_vec_id].iv,
+ aes_mct_data->test_cases[mct_vec_id - 1].plaintext,
+ AES_IV_LENGTH);
+ /* CT[0] = PT[j-1] */
+ memcpy(aes_mct_data->test_cases[mct_vec_id].ciphertext,
+ second_last_out, AES_BLOCK_LENGTH);
+ }
+
+ }
+ }
+ }
+
+ } else {
+ for (j = 0; j < aes_data->nb_test_cases; j++) {
+ int ret = test_aes_single_case(aes_data, NULL,
+ dev_id, j,
+ &num_test_vectors_run,
+ &num_test_vectors_skipped);
+ if (ret < 0) {
+ ret = TEST_FAILED;
+ goto exit;
+ }
+ }
+ }
+
+ printf("Test vectors run: %u\n", num_test_vectors_run);
+ printf("Test vectors skipped: %u\n", num_test_vectors_skipped);
+ num_total_skipped += num_test_vectors_skipped;
+ num_total_run += num_test_vectors_run;
+ num_test_vectors_skipped = 0;
+
+ char output_path[MAX_FILE_PATH];
+ char device_name[128];
+ struct rte_cryptodev_info info;
+ rte_cryptodev_info_get(dev_id, &info);
+ mkdir("./AES/resp/", 0700);
+ snprintf(output_path, MAX_FILE_PATH, "%s%s%s", "./AES/resp/",
+ file_names[i], ".rsp");
+ snprintf(device_name, 128, "%s%s%s", "DPDK Cryptodev ",
+ info.driver_name, " PMD");
+ if (aes_data->test_type == AESAVS_TYPE_MCT) {
+ write_aes_vectors(aes_mct_data, output_path, device_name);
+ memset(aes_mct_data, 0, sizeof(struct fips_aes_test_data));
+ } else {
+ write_aes_vectors(aes_data, output_path, device_name);
+ }
+ memset(aes_data, 0, sizeof(struct fips_aes_test_data));
+ }
+
+ printf("================================\n");
+ printf("Total test vectors run: %u\n", num_total_run);
+ printf("Total test vectors skipped: %u\n", num_total_skipped);
+ printf("================================\n");
+
+ ret = TEST_SUCCESS;
+exit:
+ rte_free(aes_data);
+ rte_free(aes_mct_data);
+
+ return ret;
+}
+
+static struct unit_test_suite cryptodev_fips_testsuite = {
+ .suite_name = "Crypto FIPS Test Suite",
+ .setup = fips_testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(NULL, NULL,
+ test_fips_aes),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_cryptodev_fips(void)
+{
+ return unit_test_suite_runner(&cryptodev_fips_testsuite);
+}
+
+REGISTER_TEST_COMMAND(cryptodev_fips, test_cryptodev_fips);
+