@@ -166,6 +166,90 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
return 0;
}
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+ uint8_t hash_flag)
+{
+ struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+ struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+ (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+ session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+ /* Set the Use Extended Protocol Flags bit in LW 1 */
+ QAT_FIELD_SET(header->comn_req_flags,
+ QAT_COMN_EXT_FLAGS_USED,
+ QAT_COMN_EXT_FLAGS_BITPOS,
+ QAT_COMN_EXT_FLAGS_MASK);
+
+ /* Set Hash Flags in LW 28 */
+ cd_ctrl->hash_flags |= hash_flag;
+
+ /* Set proto flags in LW 1 */
+ switch (session->qat_cipher_alg) {
+ case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_SNOW_3G_PROTO);
+ ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+ header->serv_specif_flags, 0);
+ break;
+ case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_PROTO);
+ ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_ZUC_3G_PROTO);
+ break;
+ default:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_PROTO);
+ ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+ header->serv_specif_flags, 0);
+ break;
+ }
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+ struct rte_cryptodev *dev = cdev;
+ struct qat_sym_session *ctx = session;
+ const struct qat_cryptodev_private *qat_private =
+ dev->data->dev_private;
+ int ret;
+
+ ret = qat_sym_crypto_set_session_gen1(cdev, session);
+ if (ret == 0 || ret != -ENOTSUP)
+ return ret;
+
+ /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+ * but some are not supported by GEN2, so checking here
+ */
+ if ((qat_private->internal_capabilities &
+ QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+ return -ENOTSUP;
+
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+ ctx->qat_cipher_alg !=
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+ qat_sym_session_set_ext_hash_flags_gen2(ctx,
+ 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+ } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+ ctx->qat_cipher_alg !=
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+ qat_sym_session_set_ext_hash_flags_gen2(ctx,
+ 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+ } else if ((ctx->aes_cmac ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+ (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+ qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+ }
+
+ return 0;
+}
+
struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
/* Device related operations */
@@ -204,6 +288,10 @@ RTE_INIT(qat_sym_crypto_gen2_init)
qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
qat_sym_crypto_cap_get_gen2;
+ qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+ qat_sym_crypto_set_session_gen2;
+ qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
+ qat_sym_configure_raw_dp_ctx_gen1;
qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
qat_sym_crypto_feature_flags_get_gen1;
@@ -221,4 +309,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
qat_asym_crypto_cap_get_gen1;
qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
qat_asym_crypto_feature_flags_get_gen1;
+ qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+ qat_asym_crypto_set_session_gen1;
}
@@ -143,6 +143,468 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
return capa_info;
}
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+ struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_va_iova_ptr *iv,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *aad,
+ union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+ if (ctx->is_single_pass) {
+ struct icp_qat_fw_la_cipher_req_params *cipher_param =
+ (void *)&req->serv_specif_rqpars;
+
+ /* QAT GEN3 uses single pass to treat AEAD as
+ * cipher operation
+ */
+ cipher_param = (void *)&req->serv_specif_rqpars;
+
+ qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+ cipher_param->cipher_offset = ofs.ofs.cipher.head;
+ cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+ ofs.ofs.cipher.tail;
+
+ cipher_param->spc_aad_addr = aad->iova;
+ cipher_param->spc_auth_res_addr = digest->iova;
+
+ return;
+ }
+
+ enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+ struct qat_sym_op_cookie *cookie,
+ struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *auth_iv,
+ union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ uint32_t ver_key_offset;
+ uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+ ofs.ofs.auth.tail;
+
+ if (!ctx->is_single_pass_gmac ||
+ (auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+ enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+ data_len);
+ return;
+ }
+
+ cipher_cd_ctrl = (void *) &req->cd_ctrl;
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+ ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+ ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+ ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+ sizeof(struct icp_qat_hw_cipher_config);
+
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /* AES-GMAC */
+ qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+ req);
+ }
+
+ /* Fill separate Content Descriptor for this op */
+ rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+ ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+ ctx->cd.cipher.key :
+ RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+ ctx->auth_key_length);
+ cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+ ICP_QAT_HW_CIPHER_AEAD_MODE,
+ ctx->qat_cipher_alg,
+ ICP_QAT_HW_CIPHER_NO_CONVERT,
+ (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+ ICP_QAT_HW_CIPHER_ENCRYPT :
+ ICP_QAT_HW_CIPHER_DECRYPT));
+ QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+ ctx->digest_length,
+ QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+ QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+ cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+ /* Update the request */
+ req->cd_pars.u.s.content_desc_addr =
+ cookie->opt.spc_gmac.cd_phys_addr;
+ req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+ sizeof(struct icp_qat_hw_cipher_config) +
+ ctx->auth_key_length, 8) >> 3;
+ req->comn_mid.src_length = data_len;
+ req->comn_mid.dst_length = 0;
+
+ cipher_param->spc_aad_addr = 0;
+ cipher_param->spc_auth_res_addr = digest->iova;
+ cipher_param->spc_aad_sz = auth_data_len;
+ cipher_param->reserved = 0;
+ cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+ req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+ cipher_cd_ctrl->cipher_cfg_offset = 0;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+ ICP_QAT_FW_LA_PROTO_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_NO_PROTO);
+}
+
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie)
+{
+ register struct icp_qat_fw_la_bulk_req *req;
+ struct rte_crypto_op *op = in_op;
+ struct qat_sym_op_cookie *cookie = op_cookie;
+ struct rte_crypto_sgl in_sgl, out_sgl;
+ struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+ out_vec[QAT_SYM_SGL_MAX_NUMBER];
+ struct rte_crypto_va_iova_ptr cipher_iv;
+ struct rte_crypto_va_iova_ptr aad;
+ struct rte_crypto_va_iova_ptr digest;
+ union rte_crypto_sym_ofs ofs;
+ int32_t total_len;
+
+ in_sgl.vec = in_vec;
+ out_sgl.vec = out_vec;
+
+ req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+ ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+ &cipher_iv, &aad, &digest);
+ if (unlikely(ofs.raw == UINT64_MAX)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+ in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+ if (unlikely(total_len < 0)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+ total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+ NULL, &aad, &digest);
+#endif
+
+ return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie)
+{
+ register struct icp_qat_fw_la_bulk_req *req;
+ struct rte_crypto_op *op = in_op;
+ struct qat_sym_op_cookie *cookie = op_cookie;
+ struct rte_crypto_sgl in_sgl, out_sgl;
+ struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+ out_vec[QAT_SYM_SGL_MAX_NUMBER];
+ struct rte_crypto_va_iova_ptr auth_iv;
+ struct rte_crypto_va_iova_ptr digest;
+ union rte_crypto_sym_ofs ofs;
+ int32_t total_len;
+
+ in_sgl.vec = in_vec;
+ out_sgl.vec = out_vec;
+
+ req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+ ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+ NULL, &auth_iv, &digest);
+ if (unlikely(ofs.raw == UINT64_MAX)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+ in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+ if (unlikely(total_len < 0)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+ ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+ &auth_iv, NULL, &digest);
+#endif
+
+ return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+ struct qat_sym_session *ctx = session;
+ enum rte_proc_type_t proc_type = rte_eal_process_type();
+ int ret;
+
+ ret = qat_sym_crypto_set_session_gen1(cdev, session);
+ /* special single pass build request for GEN3 */
+ if (ctx->is_single_pass)
+ ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+ else if (ctx->is_single_pass_gmac)
+ ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+ if (ret == 0)
+ return ret;
+
+ /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+ * this is addressed by GEN3
+ */
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+ ctx->qat_cipher_alg !=
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+ qat_sym_session_set_ext_hash_flags_gen2(ctx,
+ 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+ } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+ ctx->qat_cipher_alg !=
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+ qat_sym_session_set_ext_hash_flags_gen2(ctx,
+ 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+ } else if ((ctx->aes_cmac ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+ (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+ qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+ }
+
+ return 0;
+}
+
+static int
+qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_vec *data, uint16_t n_data_vecs,
+ union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_va_iova_ptr *iv,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *aad,
+ void *user_data)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sym_session *ctx = dp_ctx->session;
+ struct icp_qat_fw_la_bulk_req *req;
+
+ int32_t data_len;
+ uint32_t tail = dp_ctx->tail;
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ cookie = qp->op_cookies[tail >> tx_queue->trailz];
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+ data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+ data, n_data_vecs, NULL, 0);
+ if (unlikely(data_len < 0))
+ return -1;
+
+ enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
+ (uint32_t)data_len);
+
+ dp_ctx->tail = tail;
+ dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+ NULL, aad, digest);
+#endif
+ return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+ void *user_data[], int *status)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx = dp_ctx->session;
+ uint32_t i, n;
+ uint32_t tail;
+ struct icp_qat_fw_la_bulk_req *req;
+ int32_t data_len;
+
+ n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+ if (unlikely(n == 0)) {
+ qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+ *status = 0;
+ return 0;
+ }
+
+ tail = dp_ctx->tail;
+
+ for (i = 0; i < n; i++) {
+ struct qat_sym_op_cookie *cookie =
+ qp->op_cookies[tail >> tx_queue->trailz];
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+ data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+ if (unlikely(data_len < 0))
+ break;
+
+ enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
+ &vec->digest[i], &vec->aad[i], ofs,
+ (uint32_t)data_len);
+
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+ vec->src_sgl[i].num, &vec->iv[i], NULL,
+ &vec->aad[i], &vec->digest[i]);
+#endif
+ }
+
+ if (unlikely(i < n))
+ qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+ dp_ctx->tail = tail;
+ dp_ctx->cached_enqueue += i;
+ *status = 0;
+ return i;
+}
+
+static int
+qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_vec *data, uint16_t n_data_vecs,
+ union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_va_iova_ptr *iv __rte_unused,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *auth_iv,
+ void *user_data)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sym_session *ctx = dp_ctx->session;
+ struct icp_qat_fw_la_bulk_req *req;
+ int32_t data_len;
+ uint32_t tail = dp_ctx->tail;
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ cookie = qp->op_cookies[tail >> tx_queue->trailz];
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+ data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+ data, n_data_vecs, NULL, 0);
+ if (unlikely(data_len < 0))
+ return -1;
+
+ enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
+ (uint32_t)data_len);
+
+ dp_ctx->tail = tail;
+ dp_ctx->cached_enqueue++;
+
+ return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+ void *user_data[], int *status)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx = dp_ctx->session;
+ uint32_t i, n;
+ uint32_t tail;
+ struct icp_qat_fw_la_bulk_req *req;
+ int32_t data_len;
+
+ n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+ if (unlikely(n == 0)) {
+ qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+ *status = 0;
+ return 0;
+ }
+
+ tail = dp_ctx->tail;
+
+ for (i = 0; i < n; i++) {
+ struct qat_sym_op_cookie *cookie =
+ qp->op_cookies[tail >> tx_queue->trailz];
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+ data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+ if (unlikely(data_len < 0))
+ break;
+ enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
+ &vec->auth_iv[i], ofs, (uint32_t)data_len);
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+ }
+
+ if (unlikely(i < n))
+ qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+ dp_ctx->tail = tail;
+ dp_ctx->cached_enqueue += i;
+ *status = 0;
+ return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
+{
+ struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+ struct qat_sym_session *ctx = _ctx;
+ int ret;
+
+ ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+ if (ret < 0)
+ return ret;
+
+ if (ctx->is_single_pass) {
+ raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
+ raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
+ } else if (ctx->is_single_pass_gmac) {
+ raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
+ raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
+ }
+
+ return 0;
+}
+
+
RTE_INIT(qat_sym_crypto_gen3_init)
{
qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -150,6 +612,10 @@ RTE_INIT(qat_sym_crypto_gen3_init)
qat_sym_crypto_cap_get_gen3;
qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
qat_sym_crypto_feature_flags_get_gen1;
+ qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+ qat_sym_crypto_set_session_gen3;
+ qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
+ qat_sym_configure_raw_dp_ctx_gen3;
#ifdef RTE_LIB_SECURITY
qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
qat_sym_create_security_gen1;
@@ -161,4 +627,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+ qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
}
@@ -103,11 +103,253 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
return capa_info;
}
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+ struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_va_iova_ptr *iv,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *aad,
+ union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+ if (ctx->is_single_pass && ctx->is_ucs) {
+ struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+ (void *)&req->serv_specif_rqpars;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param =
+ (void *)&req->serv_specif_rqpars;
+
+ /* QAT GEN4 uses single pass to treat AEAD as cipher
+ * operation
+ */
+ qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+ req);
+ cipher_param->cipher_offset = ofs.ofs.cipher.head;
+ cipher_param->cipher_length = data_len -
+ ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+ cipher_param_20->spc_aad_addr = aad->iova;
+ cipher_param_20->spc_auth_res_addr = digest->iova;
+
+ return;
+ }
+
+ enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie)
+{
+ register struct icp_qat_fw_la_bulk_req *qat_req;
+ struct rte_crypto_op *op = in_op;
+ struct qat_sym_op_cookie *cookie = op_cookie;
+ struct rte_crypto_sgl in_sgl, out_sgl;
+ struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+ out_vec[QAT_SYM_SGL_MAX_NUMBER];
+ struct rte_crypto_va_iova_ptr cipher_iv;
+ struct rte_crypto_va_iova_ptr aad;
+ struct rte_crypto_va_iova_ptr digest;
+ union rte_crypto_sym_ofs ofs;
+ int32_t total_len;
+
+ in_sgl.vec = in_vec;
+ out_sgl.vec = out_vec;
+
+ qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+ ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+ &cipher_iv, &aad, &digest);
+ if (unlikely(ofs.raw == UINT64_MAX)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+ in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+ if (unlikely(total_len < 0)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+ total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+ NULL, &aad, &digest);
+#endif
+
+ return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+ struct qat_sym_session *ctx = session;
+ enum rte_proc_type_t proc_type = rte_eal_process_type();
+ int ret;
+
+ ret = qat_sym_crypto_set_session_gen1(cdev, session);
+ /* special single pass build request for GEN4 */
+ if (ctx->is_single_pass && ctx->is_ucs)
+ ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+ if (ret == 0)
+ return ret;
+
+ /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+ * this is addressed by GEN4
+ */
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+ ctx->qat_cipher_alg !=
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+ qat_sym_session_set_ext_hash_flags_gen2(ctx,
+ 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+ } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+ ctx->qat_cipher_alg !=
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+ qat_sym_session_set_ext_hash_flags_gen2(ctx,
+ 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+ } else if ((ctx->aes_cmac ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+ (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+ qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+ }
+
+ return 0;
+}
+
+static int
+qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_vec *data, uint16_t n_data_vecs,
+ union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_va_iova_ptr *iv,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *aad,
+ void *user_data)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sym_session *ctx = dp_ctx->session;
+ struct icp_qat_fw_la_bulk_req *req;
+
+ int32_t data_len;
+ uint32_t tail = dp_ctx->tail;
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ cookie = qp->op_cookies[tail >> tx_queue->trailz];
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+ data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+ data, n_data_vecs, NULL, 0);
+ if (unlikely(data_len < 0))
+ return -1;
+
+ enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
+ (uint32_t)data_len);
+
+ dp_ctx->tail = tail;
+ dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+ NULL, aad, digest);
+#endif
+ return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+ void *user_data[], int *status)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx = dp_ctx->session;
+ uint32_t i, n;
+ uint32_t tail;
+ struct icp_qat_fw_la_bulk_req *req;
+ int32_t data_len;
+
+ n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+ if (unlikely(n == 0)) {
+ qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+ *status = 0;
+ return 0;
+ }
+
+ tail = dp_ctx->tail;
+
+ for (i = 0; i < n; i++) {
+ struct qat_sym_op_cookie *cookie =
+ qp->op_cookies[tail >> tx_queue->trailz];
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+ data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+ if (unlikely(data_len < 0))
+ break;
+
+ enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
+ &vec->digest[i], &vec->aad[i], ofs,
+ (uint32_t)data_len);
+
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+ vec->src_sgl[i].num, &vec->iv[i], NULL,
+ &vec->aad[i], &vec->digest[i]);
+#endif
+ }
+
+ if (unlikely(i < n))
+ qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+ dp_ctx->tail = tail;
+ dp_ctx->cached_enqueue += i;
+ *status = 0;
+ return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+{
+ struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+ struct qat_sym_session *ctx = _ctx;
+ int ret;
+
+ ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+ if (ret < 0)
+ return ret;
+
+ if (ctx->is_single_pass && ctx->is_ucs) {
+ raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+ raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
+ }
+
+ return 0;
+}
+
RTE_INIT(qat_sym_crypto_gen4_init)
{
qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
qat_sym_crypto_cap_get_gen4;
+ qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+ qat_sym_crypto_set_session_gen4;
+ qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
+ qat_sym_configure_raw_dp_ctx_gen4;
qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
qat_sym_crypto_feature_flags_get_gen1;
#ifdef RTE_LIB_SECURITY
@@ -121,4 +363,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+ qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
}
@@ -6,6 +6,7 @@
#ifdef RTE_LIB_SECURITY
#include <rte_security_driver.h>
#endif
+#include <cryptodev_pmd.h>
#include "adf_transport_access_macros.h"
#include "icp_qat_fw.h"
@@ -154,7 +155,7 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
};
static struct qat_capabilities_info
-qat_sym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev __rte_unused)
+qat_sym_crypto_cap_get_gen1(struct qat_pci_device * qat_dev __rte_unused)
{
struct qat_capabilities_info capa_info;
capa_info.data = qat_sym_crypto_caps_gen1;
@@ -1169,6 +1170,10 @@ RTE_INIT(qat_sym_crypto_gen1_init)
qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
qat_sym_crypto_cap_get_gen1;
+ qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+ qat_sym_crypto_set_session_gen1;
+ qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
+ qat_sym_configure_raw_dp_ctx_gen1;
qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
qat_sym_crypto_feature_flags_get_gen1;
#ifdef RTE_LIB_SECURITY
@@ -2,6 +2,7 @@
* Copyright(c) 2021 Intel Corporation
*/
+#include <cryptodev_pmd.h>
#include "qat_device.h"
#include "qat_qp.h"
#include "qat_crypto.h"
@@ -48,15 +48,24 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
typedef void * (*create_security_ctx_t)(void *cryptodev);
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
+typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx);
+
struct qat_crypto_gen_dev_ops {
get_feature_flags_t get_feature_flags;
get_capabilities_info_t get_capabilities;
struct rte_cryptodev_ops *cryptodev_ops;
+ set_session_t set_session;
+ set_raw_dp_ctx_t set_raw_dp_ctx;
#ifdef RTE_LIB_SECURITY
create_security_ctx_t create_security_ctx;
#endif
};
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
+
int
qat_cryptodev_config(struct rte_cryptodev *dev,
struct rte_cryptodev_config *config);
@@ -237,7 +237,7 @@ refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
int
qat_sym_build_request(void *in_op, uint8_t *out_msg,
- void *op_cookie, enum qat_device_gen qat_dev_gen)
+ void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
{
int ret = 0;
struct qat_sym_session *ctx = NULL;
@@ -302,12 +302,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
return -EINVAL;
}
- if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
- QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- return -EINVAL;
- }
-
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
return 0;
}
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
- uint8_t hash_flag)
-{
- struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
- struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
- (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
- session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
- /* Set the Use Extended Protocol Flags bit in LW 1 */
- QAT_FIELD_SET(header->comn_req_flags,
- QAT_COMN_EXT_FLAGS_USED,
- QAT_COMN_EXT_FLAGS_BITPOS,
- QAT_COMN_EXT_FLAGS_MASK);
-
- /* Set Hash Flags in LW 28 */
- cd_ctrl->hash_flags |= hash_flag;
-
- /* Set proto flags in LW 1 */
- switch (session->qat_cipher_alg) {
- case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
- ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_SNOW_3G_PROTO);
- ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
- header->serv_specif_flags, 0);
- break;
- case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
- ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_PROTO);
- ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
- header->serv_specif_flags,
- ICP_QAT_FW_LA_ZUC_3G_PROTO);
- break;
- default:
- ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_PROTO);
- ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
- header->serv_specif_flags, 0);
- break;
- }
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
- struct qat_sym_session *session)
-{
- const struct qat_cryptodev_private *qat_private =
- dev->data->dev_private;
- enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
- QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
- if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
- session->qat_cipher_alg !=
- ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
- session->min_qat_dev_gen = min_dev_gen;
- qat_sym_session_set_ext_hash_flags(session,
- 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
- } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
- session->qat_cipher_alg !=
- ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
- session->min_qat_dev_gen = min_dev_gen;
- qat_sym_session_set_ext_hash_flags(session,
- 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
- } else if ((session->aes_cmac ||
- session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
- (session->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
- session->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
- session->min_qat_dev_gen = min_dev_gen;
- qat_sym_session_set_ext_hash_flags(session, 0);
- }
-}
-
int
qat_sym_session_set_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
int ret;
int qat_cmd_id;
- int handle_mixed = 0;
/* Verify the session physical address is known */
rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
session->cd_paddr = session_paddr +
offsetof(struct qat_sym_session, cd);
- session->min_qat_dev_gen = QAT_GEN1;
+ session->dev_id = internals->dev_id;
session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
session->is_ucs = 0;
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
xform, session);
if (ret < 0)
return ret;
- handle_mixed = 1;
}
break;
case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
xform, session);
if (ret < 0)
return ret;
- handle_mixed = 1;
}
break;
case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
return -ENOTSUP;
}
qat_sym_session_finalize(session);
- if (handle_mixed) {
- /* Special handling of mixed hash+cipher algorithms */
- qat_sym_session_handle_mixed(dev, session);
- }
- return 0;
+ return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+ (void *)session);
}
static int
@@ -678,7 +598,6 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
{
session->is_single_pass = 1;
session->is_auth = 1;
- session->min_qat_dev_gen = QAT_GEN3;
session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
/* Chacha-Poly is special case that use QAT CTR mode */
if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
@@ -1205,9 +1124,10 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
return 0;
}
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
- uint8_t *data_in,
- uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+ uint8_t *data_in,
+ uint8_t *data_out)
{
int digest_size;
uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1574,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
cipher_cd_ctrl->cipher_state_sz =
ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
- cdesc->min_qat_dev_gen = QAT_GEN2;
} else {
total_key_size = cipherkeylen;
cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1921,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
- cdesc->min_qat_dev_gen = QAT_GEN2;
break;
case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2181,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
session->cd_paddr = session_paddr +
offsetof(struct qat_sym_session, cd);
- session->min_qat_dev_gen = QAT_GEN1;
-
/* Get requested QAT command id - should be cipher */
qat_cmd_id = qat_get_cmd_id(xform);
if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2205,9 @@ qat_security_session_create(void *dev,
{
void *sess_private_data;
struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+ struct qat_cryptodev_private *internals = cdev->data->dev_private;
+ enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+ struct qat_sym_session *sym_session = NULL;
int ret;
if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2231,11 @@ qat_security_session_create(void *dev,
}
set_sec_session_private_data(sess, sess_private_data);
+ sym_session = (struct qat_sym_session *)sess_private_data;
+ sym_session->dev_id = internals->dev_id;
- return ret;
+ return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+ sess_private_data);
}
int
@@ -100,7 +100,7 @@ struct qat_sym_session {
uint16_t auth_key_length;
uint16_t digest_length;
rte_spinlock_t lock; /* protects this struct */
- enum qat_device_gen min_qat_dev_gen;
+ uint16_t dev_id;
uint8_t aes_cmac;
uint8_t is_single_pass;
uint8_t is_single_pass_gmac;