@@ -179,6 +179,10 @@ post_process_raw_dp_op(void *user_data, uint32_t index __rte_unused,
RTE_CRYPTO_OP_STATUS_ERROR;
}
+static struct crypto_testsuite_params testsuite_params = { NULL };
+struct crypto_testsuite_params *p_testsuite_params = &testsuite_params;
+static struct crypto_unittest_params unittest_params;
+
void
process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
struct rte_crypto_op *op, uint8_t is_cipher, uint8_t is_auth,
@@ -193,6 +197,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
struct rte_crypto_sgl sgl, dest_sgl;
uint32_t max_len;
union rte_cryptodev_session_ctx sess;
+ uint64_t auth_end_iova;
uint32_t count = 0;
struct rte_crypto_raw_dp_ctx *ctx;
uint32_t cipher_offset = 0, cipher_len = 0, auth_offset = 0,
@@ -202,6 +207,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
int ctx_service_size;
int32_t status = 0;
int enqueue_status, dequeue_status;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+ /* oop is not supported in raw hw dp api*/
+ int is_sgl = sop->m_src->nb_segs > 1;
ctx_service_size = rte_cryptodev_get_raw_dp_ctx_size(dev_id);
if (ctx_service_size < 0) {
@@ -267,6 +275,30 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
digest.va = (void *)sop->auth.digest.data;
digest.iova = sop->auth.digest.phys_addr;
+ if (is_sgl) {
+ uint32_t remaining_off = auth_offset + auth_len;
+ struct rte_mbuf *sgl_buf = sop->m_src;
+
+ while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+ && sgl_buf->next != NULL) {
+ remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+ sgl_buf = sgl_buf->next;
+ }
+
+ auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+ sgl_buf, remaining_off);
+ } else {
+ /* oop is not supported in raw hw dp api */
+ auth_end_iova = rte_pktmbuf_iova(op->sym->m_src) +
+ auth_offset + auth_len;
+ }
+ /* Then check if digest-encrypted conditions are met */
+ if ((auth_offset + auth_len < cipher_offset + cipher_len) &&
+ (digest.iova == auth_end_iova) && is_sgl)
+ max_len = RTE_MAX(max_len,
+ auth_offset + auth_len +
+ ut_params->auth_xform.auth.digest_length);
+
} else if (is_cipher) {
cipher_offset = sop->cipher.data.offset;
cipher_len = sop->cipher.data.length;
@@ -503,10 +535,6 @@ process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
return op;
}
-static struct crypto_testsuite_params testsuite_params = { NULL };
-struct crypto_testsuite_params *p_testsuite_params = &testsuite_params;
-static struct crypto_unittest_params unittest_params;
-
static int
testsuite_setup(void)
{
@@ -4077,9 +4105,9 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
/* Create KASUMI operation */
retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
- tdata->cipher_iv.len,
- tdata->ciphertext.len,
- tdata->validCipherOffsetInBits.len);
+ tdata->cipher_iv.len,
+ RTE_ALIGN_CEIL(tdata->validCipherLenInBits.len, 8),
+ tdata->validCipherOffsetInBits.len);
if (retval < 0)
return retval;
@@ -7310,6 +7338,7 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata,
unsigned int plaintext_len;
unsigned int ciphertext_pad_len;
unsigned int ciphertext_len;
+ unsigned int data_len;
struct rte_cryptodev_info dev_info;
struct rte_crypto_op *op;
@@ -7370,21 +7399,22 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata,
plaintext_len = ceil_byte_length(tdata->plaintext.len_bits);
ciphertext_pad_len = RTE_ALIGN_CEIL(ciphertext_len, 16);
plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
+ data_len = RTE_MAX(ciphertext_pad_len, plaintext_pad_len);
if (verify) {
ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- ciphertext_pad_len);
+ data_len);
memcpy(ciphertext, tdata->ciphertext.data, ciphertext_len);
if (op_mode == OUT_OF_PLACE)
- rte_pktmbuf_append(ut_params->obuf, ciphertext_pad_len);
+ rte_pktmbuf_append(ut_params->obuf, data_len);
debug_hexdump(stdout, "ciphertext:", ciphertext,
ciphertext_len);
} else {
plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- plaintext_pad_len);
+ data_len);
memcpy(plaintext, tdata->plaintext.data, plaintext_len);
if (op_mode == OUT_OF_PLACE)
- rte_pktmbuf_append(ut_params->obuf, plaintext_pad_len);
+ rte_pktmbuf_append(ut_params->obuf, data_len);
debug_hexdump(stdout, "plaintext:", plaintext, plaintext_len);
}
@@ -467,8 +467,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
(uint8_t *)tx_queue->base_addr + tail);
rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
- vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+ if (vec->dest_sgl) {
+ data_len = qat_sym_build_req_set_data(req,
+ user_data[i], cookie,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num,
+ vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+ } else {
+ data_len = qat_sym_build_req_set_data(req,
+ user_data[i], cookie,
+ vec->src_sgl[i].vec,
+ vec->src_sgl[i].num, NULL, 0);
+ }
+
if (unlikely(data_len < 0))
break;
@@ -564,8 +574,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
(uint8_t *)tx_queue->base_addr + tail);
rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
- vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+ if (vec->dest_sgl) {
+ data_len = qat_sym_build_req_set_data(req,
+ user_data[i], cookie,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num,
+ vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+ } else {
+ data_len = qat_sym_build_req_set_data(req,
+ user_data[i], cookie,
+ vec->src_sgl[i].vec,
+ vec->src_sgl[i].num, NULL, 0);
+ }
+
if (unlikely(data_len < 0))
break;
enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
@@ -295,8 +295,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
(uint8_t *)tx_queue->base_addr + tail);
rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
- vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+ if (vec->dest_sgl) {
+ data_len = qat_sym_build_req_set_data(req,
+ user_data[i], cookie,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num,
+ vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+ } else {
+ data_len = qat_sym_build_req_set_data(req,
+ user_data[i], cookie,
+ vec->src_sgl[i].vec,
+ vec->src_sgl[i].num, NULL, 0);
+ }
+
if (unlikely(data_len < 0))
break;
@@ -529,9 +529,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
(uint8_t *)tx_queue->base_addr + tail);
rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- data_len = qat_sym_build_req_set_data(req, user_data[i],
- cookie, vec->src_sgl[i].vec,
+ if (vec->dest_sgl) {
+ data_len = qat_sym_build_req_set_data(req,
+ user_data[i], cookie,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num,
+ vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+ } else {
+ data_len = qat_sym_build_req_set_data(req,
+ user_data[i], cookie,
+ vec->src_sgl[i].vec,
vec->src_sgl[i].num, NULL, 0);
+ }
+
if (unlikely(data_len < 0))
break;
enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
@@ -628,8 +637,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
(uint8_t *)tx_queue->base_addr + tail);
rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
- vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+ if (vec->dest_sgl) {
+ data_len = qat_sym_build_req_set_data(req,
+ user_data[i], cookie,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num,
+ vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+ } else {
+ data_len = qat_sym_build_req_set_data(req,
+ user_data[i], cookie,
+ vec->src_sgl[i].vec,
+ vec->src_sgl[i].num, NULL, 0);
+ }
+
if (unlikely(data_len < 0))
break;
enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
@@ -728,8 +747,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
(uint8_t *)tx_queue->base_addr + tail);
rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
- vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+ if (vec->dest_sgl) {
+ data_len = qat_sym_build_req_set_data(req,
+ user_data[i], cookie,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num,
+ vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+ } else {
+ data_len = qat_sym_build_req_set_data(req,
+ user_data[i], cookie,
+ vec->src_sgl[i].vec,
+ vec->src_sgl[i].num, NULL, 0);
+ }
+
if (unlikely(data_len < 0))
break;
@@ -833,8 +862,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
(uint8_t *)tx_queue->base_addr + tail);
rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
- vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+ if (vec->dest_sgl) {
+ data_len = qat_sym_build_req_set_data(req,
+ user_data[i], cookie,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num,
+ vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+ } else {
+ data_len = qat_sym_build_req_set_data(req,
+ user_data[i], cookie,
+ vec->src_sgl[i].vec,
+ vec->src_sgl[i].num, NULL, 0);
+ }
+
if (unlikely(data_len < 0))
break;