@@ -70,8 +70,8 @@ if qat_compress
endif
if qat_crypto
- foreach f: ['qat_sym_refactor.c', 'qat_sym_session.c',
- 'qat_sym_hw_dp.c', 'qat_asym_refactor.c', 'qat_crypto.c',
+ foreach f: ['qat_sym.c', 'qat_sym_session.c',
+ 'qat_asym.c', 'qat_crypto.c',
'dev/qat_sym_pmd_gen1.c',
'dev/qat_asym_pmd_gen1.c',
'dev/qat_crypto_pmd_gen2.c',
@@ -8,7 +8,7 @@
#include "qat_device.h"
#include "adf_transport_access_macros.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
#include "qat_comp_pmd.h"
#include "adf_pf2vf_msg.h"
#include "qat_pf2vf.h"
@@ -15,8 +15,8 @@
#include "qat_logs.h"
#include "qat_device.h"
#include "qat_qp.h"
-#include "qat_sym_refactor.h"
-#include "qat_asym_refactor.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
#include "qat_comp.h"
#define QAT_CQ_MAX_DEQ_RETRIES 10
@@ -130,7 +130,7 @@ qat_qp_setup(struct qat_pci_device *qat_dev,
int
qat_qps_per_service(struct qat_pci_device *qat_dev,
- enum qat_service_type service);
+ enum qat_service_type service);
const struct qat_qp_hw_data *
qat_qp_get_hw_data(struct qat_pci_device *qat_dev,
@@ -5,8 +5,8 @@
#include <rte_cryptodev.h>
#include <cryptodev_pmd.h>
#include "qat_sym_session.h"
-#include "qat_sym_refactor.h"
-#include "qat_asym_refactor.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
#include "qat_crypto.h"
#include "qat_crypto_pmd_gens.h"
@@ -5,8 +5,8 @@
#include <rte_cryptodev.h>
#include <cryptodev_pmd.h>
#include "qat_sym_session.h"
-#include "qat_sym_refactor.h"
-#include "qat_asym_refactor.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
#include "qat_crypto.h"
#include "qat_crypto_pmd_gens.h"
@@ -10,166 +10,6 @@
#include "qat_sym_session.h"
#include "qat_sym.h"
-#define QAT_BASE_GEN1_SYM_CAPABILITIES \
- QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
- CAP_RNG(digest_size, 1, 20, 1)), \
- QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
- CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
- QAT_SYM_AEAD_CAP(AES_CCM, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
- CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
- QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
- CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)), \
- QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
- CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
- CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
- CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
- CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
- CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
- CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(MD5_HMAC, CAP_SET(block_size, 64), \
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SNOW3G_UIA2, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
- CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)), \
- QAT_SYM_AUTH_CAP(KASUMI_F9, CAP_SET(block_size, 8), \
- CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
- CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
- QAT_SYM_CIPHER_CAP(AES_CTR, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
- QAT_SYM_CIPHER_CAP(AES_XTS, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)), \
- QAT_SYM_CIPHER_CAP(AES_DOCSISBPI, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
- QAT_SYM_CIPHER_CAP(SNOW3G_UEA2, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
- QAT_SYM_CIPHER_CAP(KASUMI_F8, CAP_SET(block_size, 8), \
- CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)), \
- QAT_SYM_CIPHER_CAP(NULL, CAP_SET(block_size, 1), \
- CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_CIPHER_CAP(3DES_CBC, CAP_SET(block_size, 8), \
- CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
- QAT_SYM_CIPHER_CAP(3DES_CTR, CAP_SET(block_size, 8), \
- CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
- QAT_SYM_CIPHER_CAP(DES_CBC, CAP_SET(block_size, 8), \
- CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
- QAT_SYM_CIPHER_CAP(DES_DOCSISBPI, CAP_SET(block_size, 8), \
- CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0))
-
-#define QAT_BASE_GEN1_ASYM_CAPABILITIES \
- QAT_ASYM_CAP(MODEX, 0, 1, 512, 1), \
- QAT_ASYM_CAP(MODINV, 0, 1, 512, 1), \
- QAT_ASYM_CAP(RSA, \
- ((1 << RTE_CRYPTO_ASYM_OP_SIGN) | \
- (1 << RTE_CRYPTO_ASYM_OP_VERIFY) | \
- (1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) | \
- (1 << RTE_CRYPTO_ASYM_OP_DECRYPT)), \
- 64, 512, 64)
-
-#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \
- QAT_SYM_CIPHER_CAP(ZUC_EEA3, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
- QAT_SYM_AUTH_CAP(ZUC_EIA3, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
- CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)) \
-
-#define QAT_EXTRA_GEN3_SYM_CAPABILITIES \
- QAT_SYM_AEAD_CAP(CHACHA20_POLY1305, CAP_SET(block_size, 64), \
- CAP_RNG(key_size, 32, 32, 0), \
- CAP_RNG(digest_size, 16, 16, 0), \
- CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0))
-
-#define QAT_BASE_GEN4_SYM_CAPABILITIES \
- QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
- QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
- CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
- CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_CIPHER_CAP(AES_DOCSISBPI, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
- QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
- CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_CIPHER_CAP(NULL, CAP_SET(block_size, 1), \
- CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
- CAP_RNG(digest_size, 1, 20, 1)), \
- QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
- CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
- CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
- CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
- CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
- QAT_SYM_CIPHER_CAP(AES_CTR, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
- QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
- CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
- QAT_SYM_AEAD_CAP(AES_CCM, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
- CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
- QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
- CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
- CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)) \
-
#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
@@ -432,7 +272,7 @@ qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
cipher_len, out_sgl->vec,
QAT_SYM_SGL_MAX_NUMBER);
- if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+ if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
return UINT64_MAX;
}
@@ -506,7 +346,7 @@ qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
auth_ofs + auth_len, out_sgl->vec,
QAT_SYM_SGL_MAX_NUMBER);
- if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+ if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
return UINT64_MAX;
}
@@ -13,7 +13,7 @@
#include "icp_qat_fw_la.h"
#include "qat_sym_session.h"
-#include "qat_sym_refactor.h"
+#include "qat_sym.h"
#include "qat_sym_session.h"
#include "qat_crypto.h"
#include "qat_crypto_pmd_gens.h"
@@ -1,70 +1,147 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2021 Intel Corporation
*/
#include <stdarg.h>
-#include "qat_asym.h"
+#include <cryptodev_pmd.h>
+
#include "icp_qat_fw_pke.h"
#include "icp_qat_fw.h"
#include "qat_pke_functionality_arrays.h"
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+#include "qat_device.h"
-static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
- size_t arr_sz, size_t *size, uint32_t *func_id)
+#include "qat_logs.h"
+#include "qat_asym.h"
+
+uint8_t qat_asym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
+
+void
+qat_asym_init_op_cookie(void *op_cookie)
{
- size_t i;
+ int j;
+ struct qat_asym_op_cookie *cookie = op_cookie;
- for (i = 0; i < arr_sz; i++) {
- if (*size <= arr[i][0]) {
- *size = arr[i][0];
- *func_id = arr[i][1];
- return 0;
- }
+ cookie->input_addr = rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_asym_op_cookie,
+ input_params_ptrs);
+
+ cookie->output_addr = rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_asym_op_cookie,
+ output_params_ptrs);
+
+ for (j = 0; j < 8; j++) {
+ cookie->input_params_ptrs[j] =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_asym_op_cookie,
+ input_array[j]);
+ cookie->output_params_ptrs[j] =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_asym_op_cookie,
+ output_array[j]);
}
- return -1;
}
-static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_asym_xform *xform,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_mempool *mempool)
{
- memset(qat_req, 0, sizeof(*qat_req));
- qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+ int err = 0;
+ void *sess_private_data;
+ struct qat_asym_session *session;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ QAT_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
- qat_req->pke_hdr.hdr_flags =
- ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
- (ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ session = sess_private_data;
+ if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+ if (xform->modex.exponent.length == 0 ||
+ xform->modex.modulus.length == 0) {
+ QAT_LOG(ERR, "Invalid mod exp input parameter");
+ err = -EINVAL;
+ goto error;
+ }
+ } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+ if (xform->modinv.modulus.length == 0) {
+ QAT_LOG(ERR, "Invalid mod inv input parameter");
+ err = -EINVAL;
+ goto error;
+ }
+ } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+ if (xform->rsa.n.length == 0) {
+ QAT_LOG(ERR, "Invalid rsa input parameter");
+ err = -EINVAL;
+ goto error;
+ }
+ } else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
+ || xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
+ QAT_LOG(ERR, "Invalid asymmetric crypto xform");
+ err = -EINVAL;
+ goto error;
+ } else {
+ QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
+ err = -EINVAL;
+ goto error;
+ }
+
+ session->xform = xform;
+ qat_asym_build_req_tmpl(sess_private_data);
+ set_asym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+error:
+ rte_mempool_put(mempool, sess_private_data);
+ return err;
}
-static inline void qat_asym_build_req_tmpl(void *sess_private_data)
+unsigned int
+qat_asym_session_get_private_size(
+ struct rte_cryptodev *dev __rte_unused)
{
- struct icp_qat_fw_pke_request *qat_req;
- struct qat_asym_session *session = sess_private_data;
-
- qat_req = &session->req_tmpl;
- qat_fill_req_tmpl(qat_req);
+ return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
}
-static size_t max_of(int n, ...)
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_asym_session *sess)
{
- va_list args;
- size_t len = 0, num;
- int i;
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_asym_session_private_data(sess, index);
+ struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
- va_start(args, n);
- len = va_arg(args, size_t);
+ if (sess_priv) {
+ memset(s, 0, qat_asym_session_get_private_size(dev));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- for (i = 0; i < n - 1; i++) {
- num = va_arg(args, size_t);
- if (num > len)
- len = num;
+ set_asym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
}
- va_end(args);
-
- return len;
}
-static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+ .name = qat_asym_drv_name,
+ .alias = qat_asym_drv_name
+};
+
+
+static void
+qat_clear_arrays(struct qat_asym_op_cookie *cookie,
int in_count, int out_count, int in_size, int out_size)
{
int i;
@@ -75,7 +152,8 @@ static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
memset(cookie->output_array[i], 0x0, out_size);
}
-static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
+static void
+qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
enum rte_crypto_asym_xform_type alg, int in_size, int out_size)
{
if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX)
@@ -88,7 +166,229 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
out_size);
}
-static int qat_asym_check_nonzero(rte_crypto_param n)
+static void
+qat_asym_collect_response(struct rte_crypto_op *rx_op,
+ struct qat_asym_op_cookie *cookie,
+ struct rte_crypto_asym_xform *xform)
+{
+ size_t alg_size, alg_size_in_bytes = 0;
+ struct rte_crypto_asym_op *asym_op = rx_op->asym;
+
+ if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+ rte_crypto_param n = xform->modex.modulus;
+
+ alg_size = cookie->alg_size;
+ alg_size_in_bytes = alg_size >> 3;
+ uint8_t *modexp_result = asym_op->modex.result.data;
+
+ if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+ rte_memcpy(modexp_result +
+ (asym_op->modex.result.length -
+ n.length),
+ cookie->output_array[0] + alg_size_in_bytes
+ - n.length, n.length
+ );
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
+ cookie->output_array[0],
+ alg_size_in_bytes);
+
+#endif
+ }
+ } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+ rte_crypto_param n = xform->modinv.modulus;
+
+ alg_size = cookie->alg_size;
+ alg_size_in_bytes = alg_size >> 3;
+ uint8_t *modinv_result = asym_op->modinv.result.data;
+
+ if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+ rte_memcpy(modinv_result +
+ (asym_op->modinv.result.length - n.length),
+ cookie->output_array[0] +
+ alg_size_in_bytes - n.length, n.length);
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
+ cookie->output_array[0],
+ alg_size_in_bytes);
+#endif
+ }
+ } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+
+ alg_size = cookie->alg_size;
+ alg_size_in_bytes = alg_size >> 3;
+ if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
+ asym_op->rsa.op_type ==
+ RTE_CRYPTO_ASYM_OP_VERIFY) {
+ if (asym_op->rsa.op_type ==
+ RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+ uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+ rte_memcpy(rsa_result,
+ cookie->output_array[0],
+ alg_size_in_bytes);
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
+ cookie->output_array[0],
+ alg_size_in_bytes);
+#endif
+ } else if (asym_op->rsa.op_type ==
+ RTE_CRYPTO_ASYM_OP_VERIFY) {
+ uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+ switch (asym_op->rsa.pad) {
+ case RTE_CRYPTO_RSA_PADDING_NONE:
+ rte_memcpy(rsa_result,
+ cookie->output_array[0],
+ alg_size_in_bytes);
+ rx_op->status =
+ RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+ cookie->output_array[0],
+ alg_size_in_bytes);
+#endif
+ break;
+ default:
+ QAT_LOG(ERR, "Padding not supported");
+ rx_op->status =
+ RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+ }
+ } else {
+ if (asym_op->rsa.op_type ==
+ RTE_CRYPTO_ASYM_OP_DECRYPT) {
+ uint8_t *rsa_result = asym_op->rsa.message.data;
+
+ switch (asym_op->rsa.pad) {
+ case RTE_CRYPTO_RSA_PADDING_NONE:
+ rte_memcpy(rsa_result,
+ cookie->output_array[0],
+ alg_size_in_bytes);
+ break;
+ default:
+ QAT_LOG(ERR, "Padding not supported");
+ rx_op->status =
+ RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
+ rsa_result, alg_size_in_bytes);
+#endif
+ } else if (asym_op->rsa.op_type ==
+ RTE_CRYPTO_ASYM_OP_SIGN) {
+ uint8_t *rsa_result = asym_op->rsa.sign.data;
+
+ rte_memcpy(rsa_result,
+ cookie->output_array[0],
+ alg_size_in_bytes);
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+ cookie->output_array[0],
+ alg_size_in_bytes);
+#endif
+ }
+ }
+ }
+ qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
+ alg_size_in_bytes);
+}
+
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+ void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
+{
+ struct qat_asym_session *ctx;
+ struct icp_qat_fw_pke_resp *resp_msg =
+ (struct icp_qat_fw_pke_resp *)resp;
+ struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque);
+ struct qat_asym_op_cookie *cookie = op_cookie;
+
+ if (cookie->error) {
+ cookie->error = 0;
+ if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ QAT_DP_LOG(ERR, "Cookie status returned error");
+ } else {
+ if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+ resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
+ if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ QAT_DP_LOG(ERR, "Asymmetric response status"
+ " returned error");
+ }
+ if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
+ if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ QAT_DP_LOG(ERR, "Asymmetric common status"
+ " returned error");
+ }
+ }
+
+ if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ ctx = (struct qat_asym_session *)get_asym_session_private_data(
+ rx_op->asym->session, qat_asym_driver_id);
+ qat_asym_collect_response(rx_op, cookie, ctx->xform);
+ } else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
+ }
+ *op = rx_op;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
+ sizeof(struct icp_qat_fw_pke_resp));
+#endif
+
+ return 1;
+}
+
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+ size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+ size_t i;
+
+ for (i = 0; i < arr_sz; i++) {
+ if (*size <= arr[i][0]) {
+ *size = arr[i][0];
+ *func_id = arr[i][1];
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+ va_list args;
+ size_t len = 0, num;
+ int i;
+
+ va_start(args, n);
+ len = va_arg(args, size_t);
+
+ for (i = 0; i < n - 1; i++) {
+ num = va_arg(args, size_t);
+ if (num > len)
+ len = num;
+ }
+ va_end(args);
+
+ return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
{
if (n.length < 8) {
/* Not a case for any cryptograpic function except for DH
@@ -452,50 +752,14 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
} else {
QAT_LOG(ERR, "Invalid asymmetric crypto xform");
return -(EINVAL);
- }
- return 0;
-}
-
-static __rte_always_inline int
-refactor_qat_asym_build_request(__rte_unused void *in_op,
- __rte_unused uint8_t *out_msg, __rte_unused void *op_cookie,
- __rte_unused uint64_t *opaque,
- __rte_unused enum qat_device_gen dev_gen)
-{
- return 0;
-}
-
-int
-refactor_qat_asym_process_response(__rte_unused void **op,
- __rte_unused uint8_t *resp,
- __rte_unused void *op_cookie,
- __rte_unused uint64_t *dequeue_err_count)
-{
+ }
return 0;
}
-uint16_t
-qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- return refactor_qat_enqueue_op_burst(qp,
- refactor_qat_asym_build_request,
- (void **)ops, nb_ops);
-}
-
-uint16_t
-qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- return refactor_qat_dequeue_op_burst(qp, (void **)ops,
- refactor_qat_asym_process_response, nb_ops);
-}
-
-int
-qat_asym_build_request(void *in_op,
- uint8_t *out_msg,
- void *op_cookie,
- __rte_unused enum qat_device_gen qat_dev_gen)
+static __rte_always_inline int
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+ __rte_unused uint64_t *opaque,
+ __rte_unused enum qat_device_gen dev_gen)
{
struct qat_asym_session *ctx;
struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
@@ -562,262 +826,161 @@ qat_asym_build_request(void *in_op,
return 0;
}
-static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
- struct qat_asym_op_cookie *cookie,
- struct rte_crypto_asym_xform *xform)
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- size_t alg_size, alg_size_in_bytes = 0;
- struct rte_crypto_asym_op *asym_op = rx_op->asym;
-
- if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
- rte_crypto_param n = xform->modex.modulus;
-
- alg_size = cookie->alg_size;
- alg_size_in_bytes = alg_size >> 3;
- uint8_t *modexp_result = asym_op->modex.result.data;
-
- if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
- rte_memcpy(modexp_result +
- (asym_op->modex.result.length -
- n.length),
- cookie->output_array[0] + alg_size_in_bytes
- - n.length, n.length
- );
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
- cookie->output_array[0],
- alg_size_in_bytes);
-
-#endif
- }
- } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
- rte_crypto_param n = xform->modinv.modulus;
-
- alg_size = cookie->alg_size;
- alg_size_in_bytes = alg_size >> 3;
- uint8_t *modinv_result = asym_op->modinv.result.data;
-
- if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
- rte_memcpy(modinv_result + (asym_op->modinv.result.length
- - n.length),
- cookie->output_array[0] + alg_size_in_bytes
- - n.length, n.length);
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
- cookie->output_array[0],
- alg_size_in_bytes);
-#endif
- }
- } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-
- alg_size = cookie->alg_size;
- alg_size_in_bytes = alg_size >> 3;
- if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
- asym_op->rsa.op_type ==
- RTE_CRYPTO_ASYM_OP_VERIFY) {
- if (asym_op->rsa.op_type ==
- RTE_CRYPTO_ASYM_OP_ENCRYPT) {
- uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
- rte_memcpy(rsa_result,
- cookie->output_array[0],
- alg_size_in_bytes);
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
- cookie->output_array[0],
- alg_size_in_bytes);
-#endif
- } else if (asym_op->rsa.op_type ==
- RTE_CRYPTO_ASYM_OP_VERIFY) {
- uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
- switch (asym_op->rsa.pad) {
- case RTE_CRYPTO_RSA_PADDING_NONE:
- rte_memcpy(rsa_result,
- cookie->output_array[0],
- alg_size_in_bytes);
- rx_op->status =
- RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
- cookie->output_array[0],
- alg_size_in_bytes);
-#endif
- break;
- default:
- QAT_LOG(ERR, "Padding not supported");
- rx_op->status =
- RTE_CRYPTO_OP_STATUS_ERROR;
- break;
- }
- }
- } else {
- if (asym_op->rsa.op_type ==
- RTE_CRYPTO_ASYM_OP_DECRYPT) {
- uint8_t *rsa_result = asym_op->rsa.message.data;
-
- switch (asym_op->rsa.pad) {
- case RTE_CRYPTO_RSA_PADDING_NONE:
- rte_memcpy(rsa_result,
- cookie->output_array[0],
- alg_size_in_bytes);
- break;
- default:
- QAT_LOG(ERR, "Padding not supported");
- rx_op->status =
- RTE_CRYPTO_OP_STATUS_ERROR;
- break;
- }
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
- rsa_result, alg_size_in_bytes);
-#endif
- } else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
- uint8_t *rsa_result = asym_op->rsa.sign.data;
-
- rte_memcpy(rsa_result,
- cookie->output_array[0],
- alg_size_in_bytes);
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
- cookie->output_array[0],
- alg_size_in_bytes);
-#endif
- }
- }
- }
- qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
- alg_size_in_bytes);
+ return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+ nb_ops);
}
-void
-qat_asym_process_response(void **op, uint8_t *resp,
- void *op_cookie)
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct qat_asym_session *ctx;
- struct icp_qat_fw_pke_resp *resp_msg =
- (struct icp_qat_fw_pke_resp *)resp;
- struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
- (resp_msg->opaque);
- struct qat_asym_op_cookie *cookie = op_cookie;
-
- if (cookie->error) {
- cookie->error = 0;
- if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- QAT_DP_LOG(ERR, "Cookie status returned error");
- } else {
- if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
- resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
- if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- QAT_DP_LOG(ERR, "Asymmetric response status"
- " returned error");
- }
- if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
- if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- QAT_DP_LOG(ERR, "Asymmetric common status"
- " returned error");
- }
- }
-
- if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
- ctx = (struct qat_asym_session *)get_asym_session_private_data(
- rx_op->asym->session, qat_asym_driver_id);
- qat_asym_collect_response(rx_op, cookie, ctx->xform);
- } else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
- }
- *op = rx_op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
- sizeof(struct icp_qat_fw_pke_resp));
-#endif
+ return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+ nb_ops);
}
int
-qat_asym_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_asym_xform *xform,
- struct rte_cryptodev_asym_session *sess,
- struct rte_mempool *mempool)
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+ struct qat_dev_cmd_param *qat_dev_cmd_param)
{
- int err = 0;
- void *sess_private_data;
- struct qat_asym_session *session;
-
- if (rte_mempool_get(mempool, &sess_private_data)) {
- QAT_LOG(ERR,
- "Couldn't get object from session mempool");
- return -ENOMEM;
+ struct qat_cryptodev_private *internals;
+ struct rte_cryptodev *cryptodev;
+ struct qat_device_info *qat_dev_instance =
+ &qat_pci_devs[qat_pci_dev->qat_dev_id];
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_dev_instance->pci_dev->device.numa_node,
+ .private_data_size = sizeof(struct qat_cryptodev_private)
+ };
+ struct qat_capabilities_info capa_info;
+ const struct rte_cryptodev_capabilities *capabilities;
+ const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+ &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ uint64_t capa_size;
+ int i = 0;
+
+ snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "asym");
+ QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+ if (gen_dev_ops->cryptodev_ops == NULL) {
+ QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+ name);
+ return -(EFAULT);
}
- session = sess_private_data;
- if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
- if (xform->modex.exponent.length == 0 ||
- xform->modex.modulus.length == 0) {
- QAT_LOG(ERR, "Invalid mod exp input parameter");
- err = -EINVAL;
- goto error;
- }
- } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
- if (xform->modinv.modulus.length == 0) {
- QAT_LOG(ERR, "Invalid mod inv input parameter");
- err = -EINVAL;
- goto error;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ qat_pci_dev->qat_asym_driver_id =
+ qat_asym_driver_id;
+ } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ if (qat_pci_dev->qat_asym_driver_id !=
+ qat_asym_driver_id) {
+ QAT_LOG(ERR,
+ "Device %s have different driver id than corresponding device in primary process",
+ name);
+ return -(EFAULT);
}
- } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
- if (xform->rsa.n.length == 0) {
- QAT_LOG(ERR, "Invalid rsa input parameter");
- err = -EINVAL;
- goto error;
+ }
+
+ /* Populate subset device to use in cryptodev device creation */
+ qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+ qat_dev_instance->asym_rte_dev.numa_node =
+ qat_dev_instance->pci_dev->device.numa_node;
+ qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+ cryptodev = rte_cryptodev_pmd_create(name,
+ &(qat_dev_instance->asym_rte_dev), &init_params);
+
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+ cryptodev->driver_id = qat_asym_driver_id;
+ cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+ cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+ cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "QAT_ASYM_CAPA_GEN_%d",
+ qat_pci_dev->qat_dev_gen);
+
+ internals = cryptodev->data->dev_private;
+ internals->qat_dev = qat_pci_dev;
+ internals->dev_id = cryptodev->data->dev_id;
+
+ capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+ capabilities = capa_info.data;
+ capa_size = capa_info.size;
+
+ internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+ if (internals->capa_mz == NULL) {
+ internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+ capa_size, rte_socket_id(), 0);
+ if (internals->capa_mz == NULL) {
+ QAT_LOG(DEBUG,
+ "Error allocating memzone for capabilities, "
+ "destroying PMD for %s",
+ name);
+ rte_cryptodev_pmd_destroy(cryptodev);
+ memset(&qat_dev_instance->asym_rte_dev, 0,
+ sizeof(qat_dev_instance->asym_rte_dev));
+ return -EFAULT;
}
- } else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
- || xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
- QAT_LOG(ERR, "Invalid asymmetric crypto xform");
- err = -EINVAL;
- goto error;
- } else {
- QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
- err = -EINVAL;
- goto error;
}
- session->xform = xform;
- qat_asym_build_req_tmpl(sess_private_data);
- set_asym_session_private_data(sess, dev->driver_id,
- sess_private_data);
+ memcpy(internals->capa_mz->addr, capabilities, capa_size);
+ internals->qat_dev_capabilities = internals->capa_mz->addr;
- return 0;
-error:
- rte_mempool_put(mempool, sess_private_data);
- return err;
-}
+ while (1) {
+ if (qat_dev_cmd_param[i].name == NULL)
+ break;
+ if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+ internals->min_enq_burst_threshold =
+ qat_dev_cmd_param[i].val;
+ i++;
+ }
-unsigned int qat_asym_session_get_private_size(
- struct rte_cryptodev *dev __rte_unused)
-{
- return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
+ qat_pci_dev->asym_dev = internals;
+ internals->service_type = QAT_SERVICE_ASYMMETRIC;
+ QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+ cryptodev->data->name, internals->dev_id);
+ return 0;
}
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
- struct rte_cryptodev_asym_session *sess)
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
{
- uint8_t index = dev->driver_id;
- void *sess_priv = get_asym_session_private_data(sess, index);
- struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
-
- if (sess_priv) {
- memset(s, 0, qat_asym_session_get_private_size(dev));
- struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ struct rte_cryptodev *cryptodev;
+
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+ if (qat_pci_dev->asym_dev == NULL)
+ return 0;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+ /* free crypto device */
+ cryptodev = rte_cryptodev_pmd_get_dev(
+ qat_pci_dev->asym_dev->dev_id);
+ rte_cryptodev_pmd_destroy(cryptodev);
+ qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+ qat_pci_dev->asym_dev = NULL;
- set_asym_session_private_data(sess, index, NULL);
- rte_mempool_put(sess_mp, sess_priv);
- }
+ return 0;
}
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+ cryptodev_qat_asym_driver,
+ qat_asym_driver_id);
@@ -8,10 +8,13 @@
#include <cryptodev_pmd.h>
#include <rte_crypto_asym.h>
#include "icp_qat_fw_pke.h"
-#include "qat_common.h"
-#include "qat_asym_pmd.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
#include "icp_qat_fw.h"
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD crypto_qat_asym
+
typedef uint64_t large_int_ptr;
#define MAX_PKE_PARAMS 8
#define QAT_PKE_MAX_LN_SIZE 512
@@ -26,6 +29,28 @@ typedef uint64_t large_int_ptr;
#define QAT_ASYM_RSA_NUM_OUT_PARAMS 1
#define QAT_ASYM_RSA_QT_NUM_IN_PARAMS 6
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i) \
+ { \
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, \
+ {.asym = { \
+ .xform_capa = { \
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+ .op_types = o, \
+ { \
+ .modlen = { \
+ .min = l, \
+ .max = r, \
+ .increment = i \
+ }, } \
+ } \
+ }, \
+ } \
+ }
+
struct qat_asym_op_cookie {
size_t alg_size;
uint64_t error;
@@ -45,6 +70,27 @@ struct qat_asym_session {
struct rte_crypto_asym_xform *xform;
};
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+ memset(qat_req, 0, sizeof(*qat_req));
+ qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+ qat_req->pke_hdr.hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+ (ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+ struct icp_qat_fw_pke_request *qat_req;
+ struct qat_asym_session *session = sess_private_data;
+
+ qat_req = &session->req_tmpl;
+ qat_fill_req_tmpl(qat_req);
+}
+
int
qat_asym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_asym_xform *xform,
@@ -58,26 +104,6 @@ void
qat_asym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_asym_session *sess);
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param in_op Pointer to the crypto operation, for every
- * service it points to service specific struct.
- * @param out_msg Message to be returned to enqueue function
- * @param op_cookie Cookie pointer that holds private metadata
- * @param qat_dev_gen Generation of QAT hardware
- *
- * @return
- * This function always returns zero,
- * it is because of backward compatibility.
- * - 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
- void *op_cookie, enum qat_device_gen qat_dev_gen);
-
/*
* Process PKE response received from outgoing queue of QAT
*
@@ -88,23 +114,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg,
* @param op_cookie Cookie pointer that holds private metadata
*
*/
-void
-qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
- void *op_cookie);
-
int
-refactor_qat_asym_process_response(__rte_unused void **op,
- __rte_unused uint8_t *resp,
- __rte_unused void *op_cookie,
- __rte_unused uint64_t *dequeue_err_count);
-
-uint16_t
-qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops);
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+ void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
-
-uint16_t
-qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops);
+void
+qat_asym_init_op_cookie(void *cookie);
#endif /* _QAT_ASYM_H_ */
deleted file mode 100644
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "qat_logs.h"
-
-#include "qat_crypto.h"
-#include "qat_asym.h"
-#include "qat_asym_pmd.h"
-
-uint8_t qat_asym_driver_id;
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
- int j;
- struct qat_asym_op_cookie *cookie = op_cookie;
-
- cookie->input_addr = rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_asym_op_cookie,
- input_params_ptrs);
-
- cookie->output_addr = rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_asym_op_cookie,
- output_params_ptrs);
-
- for (j = 0; j < 8; j++) {
- cookie->input_params_ptrs[j] =
- rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_asym_op_cookie,
- input_array[j]);
- cookie->output_params_ptrs[j] =
- rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_asym_op_cookie,
- output_array[j]);
- }
-}
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
- /* Device related operations */
- .dev_configure = qat_cryptodev_config,
- .dev_start = qat_cryptodev_start,
- .dev_stop = qat_cryptodev_stop,
- .dev_close = qat_cryptodev_close,
- .dev_infos_get = qat_cryptodev_info_get,
-
- .stats_get = qat_cryptodev_stats_get,
- .stats_reset = qat_cryptodev_stats_reset,
- .queue_pair_setup = qat_cryptodev_qp_setup,
- .queue_pair_release = qat_cryptodev_qp_release,
-
- /* Crypto related operations */
- .asym_session_get_size = qat_asym_session_get_private_size,
- .asym_session_configure = qat_asym_session_configure,
- .asym_session_clear = qat_asym_session_clear
-};
-
-uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
- .name = qat_asym_drv_name,
- .alias = qat_asym_drv_name
-};
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
- struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
- int i = 0;
- struct qat_device_info *qat_dev_instance =
- &qat_pci_devs[qat_pci_dev->qat_dev_id];
- struct rte_cryptodev_pmd_init_params init_params = {
- .name = "",
- .socket_id = qat_dev_instance->pci_dev->device.numa_node,
- .private_data_size = sizeof(struct qat_cryptodev_private)
- };
- struct qat_capabilities_info capa_info;
- const struct rte_cryptodev_capabilities *capabilities;
- const struct qat_crypto_gen_dev_ops *gen_dev_ops =
- &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
- char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
- struct rte_cryptodev *cryptodev;
- struct qat_cryptodev_private *internals;
- uint64_t capa_size;
-
- if (gen_dev_ops->cryptodev_ops == NULL) {
- QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
- name);
- return -EFAULT;
- }
-
- snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
- qat_pci_dev->name, "asym");
- QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- qat_pci_dev->qat_asym_driver_id =
- qat_asym_driver_id;
- } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
- if (qat_pci_dev->qat_asym_driver_id !=
- qat_asym_driver_id) {
- QAT_LOG(ERR,
- "Device %s have different driver id than corresponding device in primary process",
- name);
- return -(EFAULT);
- }
- }
-
- /* Populate subset device to use in cryptodev device creation */
- qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
- qat_dev_instance->asym_rte_dev.numa_node =
- qat_dev_instance->pci_dev->device.numa_node;
- qat_dev_instance->asym_rte_dev.devargs = NULL;
-
- cryptodev = rte_cryptodev_pmd_create(name,
- &(qat_dev_instance->asym_rte_dev), &init_params);
-
- if (cryptodev == NULL)
- return -ENODEV;
-
- qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
- cryptodev->driver_id = qat_asym_driver_id;
- cryptodev->dev_ops = &crypto_qat_ops;
-
- cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
- cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
-
-
- cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
- snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
- "QAT_ASYM_CAPA_GEN_%d",
- qat_pci_dev->qat_dev_gen);
-
- internals = cryptodev->data->dev_private;
- internals->qat_dev = qat_pci_dev;
- internals->dev_id = cryptodev->data->dev_id;
- internals->service_type = QAT_SERVICE_ASYMMETRIC;
-
- capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
- capabilities = capa_info.data;
- capa_size = capa_info.size;
-
- internals->capa_mz = rte_memzone_lookup(capa_memz_name);
- if (internals->capa_mz == NULL) {
- internals->capa_mz = rte_memzone_reserve(capa_memz_name,
- capa_size, rte_socket_id(), 0);
- if (internals->capa_mz == NULL) {
- QAT_LOG(DEBUG,
- "Error allocating memzone for capabilities, "
- "destroying PMD for %s",
- name);
- rte_cryptodev_pmd_destroy(cryptodev);
- memset(&qat_dev_instance->asym_rte_dev, 0,
- sizeof(qat_dev_instance->asym_rte_dev));
- return -EFAULT;
- }
- }
-
- memcpy(internals->capa_mz->addr, capabilities, capa_size);
- internals->qat_dev_capabilities = internals->capa_mz->addr;
-
- while (1) {
- if (qat_dev_cmd_param[i].name == NULL)
- break;
- if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
- internals->min_enq_burst_threshold =
- qat_dev_cmd_param[i].val;
- i++;
- }
-
- qat_pci_dev->asym_dev = internals;
-
- rte_cryptodev_pmd_probing_finish(cryptodev);
-
- QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
- cryptodev->data->name, internals->dev_id);
- return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
- struct rte_cryptodev *cryptodev;
-
- if (qat_pci_dev == NULL)
- return -ENODEV;
- if (qat_pci_dev->asym_dev == NULL)
- return 0;
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
- /* free crypto device */
- cryptodev = rte_cryptodev_pmd_get_dev(
- qat_pci_dev->asym_dev->dev_id);
- rte_cryptodev_pmd_destroy(cryptodev);
- qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
- qat_pci_dev->asym_dev = NULL;
-
- return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
- cryptodev_qat_asym_driver,
- qat_asym_driver_id);
deleted file mode 100644
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-
-#ifndef _QAT_ASYM_PMD_H_
-#define _QAT_ASYM_PMD_H_
-
-#include <rte_cryptodev.h>
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD driver name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD crypto_qat_asym
-
-
-/**
- * Helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i) \
- { \
- .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, \
- {.asym = { \
- .xform_capa = { \
- .xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
- .op_types = o, \
- { \
- .modlen = { \
- .min = l, \
- .max = r, \
- .increment = i \
- }, } \
- } \
- }, \
- } \
- }
-
-extern uint8_t qat_asym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
-
-void
-qat_asym_init_op_cookie(void *op_cookie);
-
-uint16_t
-qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops);
-
-uint16_t
-qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops);
-
-#endif /* _QAT_ASYM_PMD_H_ */
deleted file mode 100644
@@ -1,994 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 - 2021 Intel Corporation
- */
-
-
-#include <stdarg.h>
-
-#include <cryptodev_pmd.h>
-
-#include "icp_qat_fw_pke.h"
-#include "icp_qat_fw.h"
-#include "qat_pke_functionality_arrays.h"
-
-#include "qat_device.h"
-
-#include "qat_logs.h"
-#include "qat_asym_refactor.h"
-
-uint8_t qat_asym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
- int j;
- struct qat_asym_op_cookie *cookie = op_cookie;
-
- cookie->input_addr = rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_asym_op_cookie,
- input_params_ptrs);
-
- cookie->output_addr = rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_asym_op_cookie,
- output_params_ptrs);
-
- for (j = 0; j < 8; j++) {
- cookie->input_params_ptrs[j] =
- rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_asym_op_cookie,
- input_array[j]);
- cookie->output_params_ptrs[j] =
- rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_asym_op_cookie,
- output_array[j]);
- }
-}
-
-int
-qat_asym_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_asym_xform *xform,
- struct rte_cryptodev_asym_session *sess,
- struct rte_mempool *mempool)
-{
- int err = 0;
- void *sess_private_data;
- struct qat_asym_session *session;
-
- if (rte_mempool_get(mempool, &sess_private_data)) {
- QAT_LOG(ERR,
- "Couldn't get object from session mempool");
- return -ENOMEM;
- }
-
- session = sess_private_data;
- if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
- if (xform->modex.exponent.length == 0 ||
- xform->modex.modulus.length == 0) {
- QAT_LOG(ERR, "Invalid mod exp input parameter");
- err = -EINVAL;
- goto error;
- }
- } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
- if (xform->modinv.modulus.length == 0) {
- QAT_LOG(ERR, "Invalid mod inv input parameter");
- err = -EINVAL;
- goto error;
- }
- } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
- if (xform->rsa.n.length == 0) {
- QAT_LOG(ERR, "Invalid rsa input parameter");
- err = -EINVAL;
- goto error;
- }
- } else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
- || xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
- QAT_LOG(ERR, "Invalid asymmetric crypto xform");
- err = -EINVAL;
- goto error;
- } else {
- QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
- err = -EINVAL;
- goto error;
- }
-
- session->xform = xform;
- qat_asym_build_req_tmpl(sess_private_data);
- set_asym_session_private_data(sess, dev->driver_id,
- sess_private_data);
-
- return 0;
-error:
- rte_mempool_put(mempool, sess_private_data);
- return err;
-}
-
-unsigned int
-qat_asym_session_get_private_size(
- struct rte_cryptodev *dev __rte_unused)
-{
- return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
-}
-
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
- struct rte_cryptodev_asym_session *sess)
-{
- uint8_t index = dev->driver_id;
- void *sess_priv = get_asym_session_private_data(sess, index);
- struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
-
- if (sess_priv) {
- memset(s, 0, qat_asym_session_get_private_size(dev));
- struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-
- set_asym_session_private_data(sess, index, NULL);
- rte_mempool_put(sess_mp, sess_priv);
- }
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
- .name = qat_asym_drv_name,
- .alias = qat_asym_drv_name
-};
-
-
-static void
-qat_clear_arrays(struct qat_asym_op_cookie *cookie,
- int in_count, int out_count, int in_size, int out_size)
-{
- int i;
-
- for (i = 0; i < in_count; i++)
- memset(cookie->input_array[i], 0x0, in_size);
- for (i = 0; i < out_count; i++)
- memset(cookie->output_array[i], 0x0, out_size);
-}
-
-static void
-qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
- enum rte_crypto_asym_xform_type alg, int in_size, int out_size)
-{
- if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX)
- qat_clear_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS,
- QAT_ASYM_MODEXP_NUM_OUT_PARAMS, in_size,
- out_size);
- else if (alg == RTE_CRYPTO_ASYM_XFORM_MODINV)
- qat_clear_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS,
- QAT_ASYM_MODINV_NUM_OUT_PARAMS, in_size,
- out_size);
-}
-
-static void
-qat_asym_collect_response(struct rte_crypto_op *rx_op,
- struct qat_asym_op_cookie *cookie,
- struct rte_crypto_asym_xform *xform)
-{
- size_t alg_size, alg_size_in_bytes = 0;
- struct rte_crypto_asym_op *asym_op = rx_op->asym;
-
- if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
- rte_crypto_param n = xform->modex.modulus;
-
- alg_size = cookie->alg_size;
- alg_size_in_bytes = alg_size >> 3;
- uint8_t *modexp_result = asym_op->modex.result.data;
-
- if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
- rte_memcpy(modexp_result +
- (asym_op->modex.result.length -
- n.length),
- cookie->output_array[0] + alg_size_in_bytes
- - n.length, n.length
- );
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
- cookie->output_array[0],
- alg_size_in_bytes);
-
-#endif
- }
- } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
- rte_crypto_param n = xform->modinv.modulus;
-
- alg_size = cookie->alg_size;
- alg_size_in_bytes = alg_size >> 3;
- uint8_t *modinv_result = asym_op->modinv.result.data;
-
- if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
- rte_memcpy(modinv_result +
- (asym_op->modinv.result.length - n.length),
- cookie->output_array[0] + alg_size_in_bytes
- - n.length, n.length);
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
- cookie->output_array[0],
- alg_size_in_bytes);
-#endif
- }
- } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-
- alg_size = cookie->alg_size;
- alg_size_in_bytes = alg_size >> 3;
- if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
- asym_op->rsa.op_type ==
- RTE_CRYPTO_ASYM_OP_VERIFY) {
- if (asym_op->rsa.op_type ==
- RTE_CRYPTO_ASYM_OP_ENCRYPT) {
- uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
- rte_memcpy(rsa_result,
- cookie->output_array[0],
- alg_size_in_bytes);
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
- cookie->output_array[0],
- alg_size_in_bytes);
-#endif
- } else if (asym_op->rsa.op_type ==
- RTE_CRYPTO_ASYM_OP_VERIFY) {
- uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
- switch (asym_op->rsa.pad) {
- case RTE_CRYPTO_RSA_PADDING_NONE:
- rte_memcpy(rsa_result,
- cookie->output_array[0],
- alg_size_in_bytes);
- rx_op->status =
- RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
- cookie->output_array[0],
- alg_size_in_bytes);
-#endif
- break;
- default:
- QAT_LOG(ERR, "Padding not supported");
- rx_op->status =
- RTE_CRYPTO_OP_STATUS_ERROR;
- break;
- }
- }
- } else {
- if (asym_op->rsa.op_type ==
- RTE_CRYPTO_ASYM_OP_DECRYPT) {
- uint8_t *rsa_result = asym_op->rsa.message.data;
-
- switch (asym_op->rsa.pad) {
- case RTE_CRYPTO_RSA_PADDING_NONE:
- rte_memcpy(rsa_result,
- cookie->output_array[0],
- alg_size_in_bytes);
- break;
- default:
- QAT_LOG(ERR, "Padding not supported");
- rx_op->status =
- RTE_CRYPTO_OP_STATUS_ERROR;
- break;
- }
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
- rsa_result, alg_size_in_bytes);
-#endif
- } else if (asym_op->rsa.op_type ==
- RTE_CRYPTO_ASYM_OP_SIGN) {
- uint8_t *rsa_result = asym_op->rsa.sign.data;
-
- rte_memcpy(rsa_result,
- cookie->output_array[0],
- alg_size_in_bytes);
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
- cookie->output_array[0],
- alg_size_in_bytes);
-#endif
- }
- }
- }
- qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
- alg_size_in_bytes);
-}
-
-int
-qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
- void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
-{
- struct qat_asym_session *ctx;
- struct icp_qat_fw_pke_resp *resp_msg =
- (struct icp_qat_fw_pke_resp *)resp;
- struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
- (resp_msg->opaque);
- struct qat_asym_op_cookie *cookie = op_cookie;
-
- if (cookie->error) {
- cookie->error = 0;
- if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- QAT_DP_LOG(ERR, "Cookie status returned error");
- } else {
- if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
- resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
- if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- QAT_DP_LOG(ERR, "Asymmetric response status"
- " returned error");
- }
- if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
- if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- QAT_DP_LOG(ERR, "Asymmetric common status"
- " returned error");
- }
- }
-
- if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
- ctx = (struct qat_asym_session *)get_asym_session_private_data(
- rx_op->asym->session, qat_asym_driver_id);
- qat_asym_collect_response(rx_op, cookie, ctx->xform);
- } else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
- }
- *op = rx_op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
- sizeof(struct icp_qat_fw_pke_resp));
-#endif
-
- return 1;
-}
-
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
-
-static int
-qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
- size_t arr_sz, size_t *size, uint32_t *func_id)
-{
- size_t i;
-
- for (i = 0; i < arr_sz; i++) {
- if (*size <= arr[i][0]) {
- *size = arr[i][0];
- *func_id = arr[i][1];
- return 0;
- }
- }
- return -1;
-}
-
-static size_t
-max_of(int n, ...)
-{
- va_list args;
- size_t len = 0, num;
- int i;
-
- va_start(args, n);
- len = va_arg(args, size_t);
-
- for (i = 0; i < n - 1; i++) {
- num = va_arg(args, size_t);
- if (num > len)
- len = num;
- }
- va_end(args);
-
- return len;
-}
-
-static int
-qat_asym_check_nonzero(rte_crypto_param n)
-{
- if (n.length < 8) {
- /* Not a case for any cryptograpic function except for DH
- * generator which very often can be of one byte length
- */
- size_t i;
-
- if (n.data[n.length - 1] == 0x0) {
- for (i = 0; i < n.length - 1; i++)
- if (n.data[i] != 0x0)
- break;
- if (i == n.length - 1)
- return -(EINVAL);
- }
- } else if (*(uint64_t *)&n.data[
- n.length - 8] == 0) {
- /* Very likely it is zeroed modulus */
- size_t i;
-
- for (i = 0; i < n.length - 8; i++)
- if (n.data[i] != 0x0)
- break;
- if (i == n.length - 8)
- return -(EINVAL);
- }
-
- return 0;
-}
-
-static int
-qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
- struct icp_qat_fw_pke_request *qat_req,
- struct qat_asym_op_cookie *cookie,
- struct rte_crypto_asym_xform *xform)
-{
- int err = 0;
- size_t alg_size;
- size_t alg_size_in_bytes;
- uint32_t func_id = 0;
-
- if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
- err = qat_asym_check_nonzero(xform->modex.modulus);
- if (err) {
- QAT_LOG(ERR, "Empty modulus in modular exponentiation,"
- " aborting this operation");
- return err;
- }
-
- alg_size_in_bytes = max_of(3, asym_op->modex.base.length,
- xform->modex.exponent.length,
- xform->modex.modulus.length);
- alg_size = alg_size_in_bytes << 3;
-
- if (qat_asym_get_sz_and_func_id(MOD_EXP_SIZE,
- sizeof(MOD_EXP_SIZE)/sizeof(*MOD_EXP_SIZE),
- &alg_size, &func_id)) {
- return -(EINVAL);
- }
-
- alg_size_in_bytes = alg_size >> 3;
- rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
- asym_op->modex.base.length
- , asym_op->modex.base.data,
- asym_op->modex.base.length);
- rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
- xform->modex.exponent.length
- , xform->modex.exponent.data,
- xform->modex.exponent.length);
- rte_memcpy(cookie->input_array[2] + alg_size_in_bytes -
- xform->modex.modulus.length,
- xform->modex.modulus.data,
- xform->modex.modulus.length);
- cookie->alg_size = alg_size;
- qat_req->pke_hdr.cd_pars.func_id = func_id;
- qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS;
- qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp base",
- cookie->input_array[0],
- alg_size_in_bytes);
- QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp exponent",
- cookie->input_array[1],
- alg_size_in_bytes);
- QAT_DP_HEXDUMP_LOG(DEBUG, " ModExpmodulus",
- cookie->input_array[2],
- alg_size_in_bytes);
-#endif
- } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
- err = qat_asym_check_nonzero(xform->modinv.modulus);
- if (err) {
- QAT_LOG(ERR, "Empty modulus in modular multiplicative"
- " inverse, aborting this operation");
- return err;
- }
-
- alg_size_in_bytes = max_of(2, asym_op->modinv.base.length,
- xform->modinv.modulus.length);
- alg_size = alg_size_in_bytes << 3;
-
- if (xform->modinv.modulus.data[
- xform->modinv.modulus.length - 1] & 0x01) {
- if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_ODD,
- sizeof(MOD_INV_IDS_ODD)/
- sizeof(*MOD_INV_IDS_ODD),
- &alg_size, &func_id)) {
- return -(EINVAL);
- }
- } else {
- if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_EVEN,
- sizeof(MOD_INV_IDS_EVEN)/
- sizeof(*MOD_INV_IDS_EVEN),
- &alg_size, &func_id)) {
- return -(EINVAL);
- }
- }
-
- alg_size_in_bytes = alg_size >> 3;
- rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
- asym_op->modinv.base.length
- , asym_op->modinv.base.data,
- asym_op->modinv.base.length);
- rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
- xform->modinv.modulus.length
- , xform->modinv.modulus.data,
- xform->modinv.modulus.length);
- cookie->alg_size = alg_size;
- qat_req->pke_hdr.cd_pars.func_id = func_id;
- qat_req->input_param_count =
- QAT_ASYM_MODINV_NUM_IN_PARAMS;
- qat_req->output_param_count =
- QAT_ASYM_MODINV_NUM_OUT_PARAMS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv base",
- cookie->input_array[0],
- alg_size_in_bytes);
- QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv modulus",
- cookie->input_array[1],
- alg_size_in_bytes);
-#endif
- } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
- err = qat_asym_check_nonzero(xform->rsa.n);
- if (err) {
- QAT_LOG(ERR, "Empty modulus in RSA"
- " inverse, aborting this operation");
- return err;
- }
-
- alg_size_in_bytes = xform->rsa.n.length;
- alg_size = alg_size_in_bytes << 3;
-
- qat_req->input_param_count =
- QAT_ASYM_RSA_NUM_IN_PARAMS;
- qat_req->output_param_count =
- QAT_ASYM_RSA_NUM_OUT_PARAMS;
-
- if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
- asym_op->rsa.op_type ==
- RTE_CRYPTO_ASYM_OP_VERIFY) {
-
- if (qat_asym_get_sz_and_func_id(RSA_ENC_IDS,
- sizeof(RSA_ENC_IDS)/
- sizeof(*RSA_ENC_IDS),
- &alg_size, &func_id)) {
- err = -(EINVAL);
- QAT_LOG(ERR,
- "Not supported RSA parameter size (key)");
- return err;
- }
- alg_size_in_bytes = alg_size >> 3;
- if (asym_op->rsa.op_type ==
- RTE_CRYPTO_ASYM_OP_ENCRYPT) {
- switch (asym_op->rsa.pad) {
- case RTE_CRYPTO_RSA_PADDING_NONE:
- rte_memcpy(cookie->input_array[0] +
- alg_size_in_bytes -
- asym_op->rsa.message.length
- , asym_op->rsa.message.data,
- asym_op->rsa.message.length);
- break;
- default:
- err = -(EINVAL);
- QAT_LOG(ERR,
- "Invalid RSA padding (Encryption)");
- return err;
- }
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Message",
- cookie->input_array[0],
- alg_size_in_bytes);
-#endif
- } else {
- switch (asym_op->rsa.pad) {
- case RTE_CRYPTO_RSA_PADDING_NONE:
- rte_memcpy(cookie->input_array[0],
- asym_op->rsa.sign.data,
- alg_size_in_bytes);
- break;
- default:
- err = -(EINVAL);
- QAT_LOG(ERR,
- "Invalid RSA padding (Verify)");
- return err;
- }
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, " RSA Signature",
- cookie->input_array[0],
- alg_size_in_bytes);
-#endif
-
- }
- rte_memcpy(cookie->input_array[1] +
- alg_size_in_bytes -
- xform->rsa.e.length
- , xform->rsa.e.data,
- xform->rsa.e.length);
- rte_memcpy(cookie->input_array[2] +
- alg_size_in_bytes -
- xform->rsa.n.length,
- xform->rsa.n.data,
- xform->rsa.n.length);
-
- cookie->alg_size = alg_size;
- qat_req->pke_hdr.cd_pars.func_id = func_id;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Public Key",
- cookie->input_array[1],
- alg_size_in_bytes);
- QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Modulus",
- cookie->input_array[2],
- alg_size_in_bytes);
-#endif
- } else {
- if (asym_op->rsa.op_type ==
- RTE_CRYPTO_ASYM_OP_DECRYPT) {
- switch (asym_op->rsa.pad) {
- case RTE_CRYPTO_RSA_PADDING_NONE:
- rte_memcpy(cookie->input_array[0]
- + alg_size_in_bytes -
- asym_op->rsa.cipher.length,
- asym_op->rsa.cipher.data,
- asym_op->rsa.cipher.length);
- break;
- default:
- QAT_LOG(ERR,
- "Invalid padding of RSA (Decrypt)");
- return -(EINVAL);
- }
-
- } else if (asym_op->rsa.op_type ==
- RTE_CRYPTO_ASYM_OP_SIGN) {
- switch (asym_op->rsa.pad) {
- case RTE_CRYPTO_RSA_PADDING_NONE:
- rte_memcpy(cookie->input_array[0]
- + alg_size_in_bytes -
- asym_op->rsa.message.length,
- asym_op->rsa.message.data,
- asym_op->rsa.message.length);
- break;
- default:
- QAT_LOG(ERR,
- "Invalid padding of RSA (Signature)");
- return -(EINVAL);
- }
- }
- if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) {
-
- qat_req->input_param_count =
- QAT_ASYM_RSA_QT_NUM_IN_PARAMS;
- if (qat_asym_get_sz_and_func_id(RSA_DEC_CRT_IDS,
- sizeof(RSA_DEC_CRT_IDS)/
- sizeof(*RSA_DEC_CRT_IDS),
- &alg_size, &func_id)) {
- return -(EINVAL);
- }
- alg_size_in_bytes = alg_size >> 3;
-
- rte_memcpy(cookie->input_array[1] +
- (alg_size_in_bytes >> 1) -
- xform->rsa.qt.p.length
- , xform->rsa.qt.p.data,
- xform->rsa.qt.p.length);
- rte_memcpy(cookie->input_array[2] +
- (alg_size_in_bytes >> 1) -
- xform->rsa.qt.q.length
- , xform->rsa.qt.q.data,
- xform->rsa.qt.q.length);
- rte_memcpy(cookie->input_array[3] +
- (alg_size_in_bytes >> 1) -
- xform->rsa.qt.dP.length
- , xform->rsa.qt.dP.data,
- xform->rsa.qt.dP.length);
- rte_memcpy(cookie->input_array[4] +
- (alg_size_in_bytes >> 1) -
- xform->rsa.qt.dQ.length
- , xform->rsa.qt.dQ.data,
- xform->rsa.qt.dQ.length);
- rte_memcpy(cookie->input_array[5] +
- (alg_size_in_bytes >> 1) -
- xform->rsa.qt.qInv.length
- , xform->rsa.qt.qInv.data,
- xform->rsa.qt.qInv.length);
- cookie->alg_size = alg_size;
- qat_req->pke_hdr.cd_pars.func_id = func_id;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "C",
- cookie->input_array[0],
- alg_size_in_bytes);
- QAT_DP_HEXDUMP_LOG(DEBUG, "p",
- cookie->input_array[1],
- alg_size_in_bytes);
- QAT_DP_HEXDUMP_LOG(DEBUG, "q",
- cookie->input_array[2],
- alg_size_in_bytes);
- QAT_DP_HEXDUMP_LOG(DEBUG,
- "dP", cookie->input_array[3],
- alg_size_in_bytes);
- QAT_DP_HEXDUMP_LOG(DEBUG,
- "dQ", cookie->input_array[4],
- alg_size_in_bytes);
- QAT_DP_HEXDUMP_LOG(DEBUG,
- "qInv", cookie->input_array[5],
- alg_size_in_bytes);
-#endif
- } else if (xform->rsa.key_type ==
- RTE_RSA_KEY_TYPE_EXP) {
- if (qat_asym_get_sz_and_func_id(
- RSA_DEC_IDS,
- sizeof(RSA_DEC_IDS)/
- sizeof(*RSA_DEC_IDS),
- &alg_size, &func_id)) {
- return -(EINVAL);
- }
- alg_size_in_bytes = alg_size >> 3;
- rte_memcpy(cookie->input_array[1] +
- alg_size_in_bytes -
- xform->rsa.d.length,
- xform->rsa.d.data,
- xform->rsa.d.length);
- rte_memcpy(cookie->input_array[2] +
- alg_size_in_bytes -
- xform->rsa.n.length,
- xform->rsa.n.data,
- xform->rsa.n.length);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "RSA ciphertext",
- cookie->input_array[0],
- alg_size_in_bytes);
- QAT_DP_HEXDUMP_LOG(DEBUG, "RSA d",
- cookie->input_array[1],
- alg_size_in_bytes);
- QAT_DP_HEXDUMP_LOG(DEBUG, "RSA n",
- cookie->input_array[2],
- alg_size_in_bytes);
-#endif
-
- cookie->alg_size = alg_size;
- qat_req->pke_hdr.cd_pars.func_id = func_id;
- } else {
- QAT_LOG(ERR, "Invalid RSA key type");
- return -(EINVAL);
- }
- }
- } else {
- QAT_LOG(ERR, "Invalid asymmetric crypto xform");
- return -(EINVAL);
- }
- return 0;
-}
-
-static __rte_always_inline int
-qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
- __rte_unused uint64_t *opaque,
- __rte_unused enum qat_device_gen dev_gen)
-{
- struct qat_asym_session *ctx;
- struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
- struct rte_crypto_asym_op *asym_op = op->asym;
- struct icp_qat_fw_pke_request *qat_req =
- (struct icp_qat_fw_pke_request *)out_msg;
- struct qat_asym_op_cookie *cookie =
- (struct qat_asym_op_cookie *)op_cookie;
- int err = 0;
-
- op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
- if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
- ctx = (struct qat_asym_session *)
- get_asym_session_private_data(
- op->asym->session, qat_asym_driver_id);
- if (unlikely(ctx == NULL)) {
- QAT_LOG(ERR, "Session has not been created for this device");
- goto error;
- }
- rte_mov64((uint8_t *)qat_req,
- (const uint8_t *)&(ctx->req_tmpl));
- err = qat_asym_fill_arrays(asym_op, qat_req,
- cookie, ctx->xform);
- if (err) {
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- goto error;
- }
- } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- qat_fill_req_tmpl(qat_req);
- err = qat_asym_fill_arrays(asym_op, qat_req, cookie,
- op->asym->xform);
- if (err) {
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- goto error;
- }
- } else {
- QAT_DP_LOG(ERR, "Invalid session/xform settings");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- goto error;
- }
-
- qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
- qat_req->pke_mid.src_data_addr = cookie->input_addr;
- qat_req->pke_mid.dest_data_addr = cookie->output_addr;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
- sizeof(struct icp_qat_fw_pke_request));
-#endif
-
- return 0;
-error:
-
- qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
- sizeof(struct icp_qat_fw_pke_request));
-#endif
-
- qat_req->output_param_count = 0;
- qat_req->input_param_count = 0;
- qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
- cookie->error |= err;
-
- return 0;
-}
-
-static uint16_t
-qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
- nb_ops);
-}
-
-static uint16_t
-qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
- nb_ops);
-}
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
- struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
- struct qat_cryptodev_private *internals;
- struct rte_cryptodev *cryptodev;
- struct qat_device_info *qat_dev_instance =
- &qat_pci_devs[qat_pci_dev->qat_dev_id];
- struct rte_cryptodev_pmd_init_params init_params = {
- .name = "",
- .socket_id = qat_dev_instance->pci_dev->device.numa_node,
- .private_data_size = sizeof(struct qat_cryptodev_private)
- };
- struct qat_capabilities_info capa_info;
- const struct rte_cryptodev_capabilities *capabilities;
- const struct qat_crypto_gen_dev_ops *gen_dev_ops =
- &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
- char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
- uint64_t capa_size;
- int i = 0;
-
- snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
- qat_pci_dev->name, "asym");
- QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
- if (gen_dev_ops->cryptodev_ops == NULL) {
- QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
- name);
- return -(EFAULT);
- }
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- qat_pci_dev->qat_asym_driver_id =
- qat_asym_driver_id;
- } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
- if (qat_pci_dev->qat_asym_driver_id !=
- qat_asym_driver_id) {
- QAT_LOG(ERR,
- "Device %s have different driver id than corresponding device in primary process",
- name);
- return -(EFAULT);
- }
- }
-
- /* Populate subset device to use in cryptodev device creation */
- qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
- qat_dev_instance->asym_rte_dev.numa_node =
- qat_dev_instance->pci_dev->device.numa_node;
- qat_dev_instance->asym_rte_dev.devargs = NULL;
-
- cryptodev = rte_cryptodev_pmd_create(name,
- &(qat_dev_instance->asym_rte_dev), &init_params);
-
- if (cryptodev == NULL)
- return -ENODEV;
-
- qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
- cryptodev->driver_id = qat_asym_driver_id;
- cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
- cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
- cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
-
- cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
- snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
- "QAT_ASYM_CAPA_GEN_%d",
- qat_pci_dev->qat_dev_gen);
-
- internals = cryptodev->data->dev_private;
- internals->qat_dev = qat_pci_dev;
- internals->dev_id = cryptodev->data->dev_id;
-
- capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
- capabilities = capa_info.data;
- capa_size = capa_info.size;
-
- internals->capa_mz = rte_memzone_lookup(capa_memz_name);
- if (internals->capa_mz == NULL) {
- internals->capa_mz = rte_memzone_reserve(capa_memz_name,
- capa_size, rte_socket_id(), 0);
- if (internals->capa_mz == NULL) {
- QAT_LOG(DEBUG,
- "Error allocating memzone for capabilities, "
- "destroying PMD for %s",
- name);
- rte_cryptodev_pmd_destroy(cryptodev);
- memset(&qat_dev_instance->asym_rte_dev, 0,
- sizeof(qat_dev_instance->asym_rte_dev));
- return -EFAULT;
- }
- }
-
- memcpy(internals->capa_mz->addr, capabilities, capa_size);
- internals->qat_dev_capabilities = internals->capa_mz->addr;
-
- while (1) {
- if (qat_dev_cmd_param[i].name == NULL)
- break;
- if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
- internals->min_enq_burst_threshold =
- qat_dev_cmd_param[i].val;
- i++;
- }
-
- qat_pci_dev->asym_dev = internals;
- internals->service_type = QAT_SERVICE_ASYMMETRIC;
- QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
- cryptodev->data->name, internals->dev_id);
- return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
- struct rte_cryptodev *cryptodev;
-
- if (qat_pci_dev == NULL)
- return -ENODEV;
- if (qat_pci_dev->asym_dev == NULL)
- return 0;
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
- /* free crypto device */
- cryptodev = rte_cryptodev_pmd_get_dev(
- qat_pci_dev->asym_dev->dev_id);
- rte_cryptodev_pmd_destroy(cryptodev);
- qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
- qat_pci_dev->asym_dev = NULL;
-
- return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
- cryptodev_qat_asym_driver,
- qat_asym_driver_id);
deleted file mode 100644
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#ifndef _QAT_ASYM_H_
-#define _QAT_ASYM_H_
-
-#include <cryptodev_pmd.h>
-#include <rte_crypto_asym.h>
-#include "icp_qat_fw_pke.h"
-#include "qat_device.h"
-#include "qat_crypto.h"
-#include "icp_qat_fw.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD driver name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD crypto_qat_asym
-
-typedef uint64_t large_int_ptr;
-#define MAX_PKE_PARAMS 8
-#define QAT_PKE_MAX_LN_SIZE 512
-#define _PKE_ALIGN_ __rte_aligned(8)
-
-#define QAT_ASYM_MAX_PARAMS 8
-#define QAT_ASYM_MODINV_NUM_IN_PARAMS 2
-#define QAT_ASYM_MODINV_NUM_OUT_PARAMS 1
-#define QAT_ASYM_MODEXP_NUM_IN_PARAMS 3
-#define QAT_ASYM_MODEXP_NUM_OUT_PARAMS 1
-#define QAT_ASYM_RSA_NUM_IN_PARAMS 3
-#define QAT_ASYM_RSA_NUM_OUT_PARAMS 1
-#define QAT_ASYM_RSA_QT_NUM_IN_PARAMS 6
-
-/**
- * helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i) \
- { \
- .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, \
- {.asym = { \
- .xform_capa = { \
- .xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
- .op_types = o, \
- { \
- .modlen = { \
- .min = l, \
- .max = r, \
- .increment = i \
- }, } \
- } \
- }, \
- } \
- }
-
-struct qat_asym_op_cookie {
- size_t alg_size;
- uint64_t error;
- rte_iova_t input_addr;
- rte_iova_t output_addr;
- large_int_ptr input_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
- large_int_ptr output_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
- union {
- uint8_t input_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE];
- uint8_t input_buffer[MAX_PKE_PARAMS * QAT_PKE_MAX_LN_SIZE];
- } _PKE_ALIGN_;
- uint8_t output_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE] _PKE_ALIGN_;
-} _PKE_ALIGN_;
-
-struct qat_asym_session {
- struct icp_qat_fw_pke_request req_tmpl;
- struct rte_crypto_asym_xform *xform;
-};
-
-static inline void
-qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
-{
- memset(qat_req, 0, sizeof(*qat_req));
- qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-
- qat_req->pke_hdr.hdr_flags =
- ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
- (ICP_QAT_FW_COMN_REQ_FLAG_SET);
-}
-
-static inline void
-qat_asym_build_req_tmpl(void *sess_private_data)
-{
- struct icp_qat_fw_pke_request *qat_req;
- struct qat_asym_session *session = sess_private_data;
-
- qat_req = &session->req_tmpl;
- qat_fill_req_tmpl(qat_req);
-}
-
-int
-qat_asym_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_asym_xform *xform,
- struct rte_cryptodev_asym_session *sess,
- struct rte_mempool *mempool);
-
-unsigned int
-qat_asym_session_get_private_size(struct rte_cryptodev *dev);
-
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
- struct rte_cryptodev_asym_session *sess);
-
-/*
- * Process PKE response received from outgoing queue of QAT
- *
- * @param op a ptr to the rte_crypto_op referred to by
- * the response message is returned in this param
- * @param resp icp_qat_fw_pke_resp message received from
- * outgoing fw message queue
- * @param op_cookie Cookie pointer that holds private metadata
- * @param dequeue_err_count Error count number pointer
- *
- */
-int
-qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
- void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
-
-void
-qat_asym_init_op_cookie(void *cookie);
-
-#endif /* _QAT_ASYM_H_ */
@@ -12,7 +12,10 @@
extern uint8_t qat_sym_driver_id;
extern uint8_t qat_asym_driver_id;
-/** helper macro to set cryptodev capability range **/
+/**
+ * helper macro to set cryptodev capability range
+ * <n: name> <l: min > <r: max> <i: increment> <v: value>
+ **/
#define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i}
#define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0}
@@ -11,273 +11,107 @@
#include <rte_byteorder.h>
#include "qat_sym.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
+uint8_t qat_sym_driver_id;
-/** Decrypt a single partial block
- * Depends on openssl libcrypto
- * Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
- uint8_t *iv, int ivlen, int srclen,
- void *bpi_ctx)
-{
- EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
- int encrypted_ivlen;
- uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
- uint8_t *encr = encrypted_iv;
-
- /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
- if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
- <= 0)
- goto cipher_decrypt_err;
-
- for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
- *dst = *src ^ *encr;
-
- return 0;
-
-cipher_decrypt_err:
- QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
- return -EINVAL;
-}
-
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
- struct rte_crypto_op *op)
-{
- int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
- struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = block_len > 0 ?
- sym_op->cipher.data.length % block_len : 0;
-
- if (last_block_len &&
- ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
- /* Decrypt last block */
- uint8_t *last_block, *dst, *iv;
- uint32_t last_block_offset = sym_op->cipher.data.offset +
- sym_op->cipher.data.length - last_block_len;
- last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
- uint8_t *, last_block_offset);
-
- if (unlikely((sym_op->m_dst != NULL)
- && (sym_op->m_dst != sym_op->m_src)))
- /* out-of-place operation (OOP) */
- dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
- uint8_t *, last_block_offset);
- else
- dst = last_block;
-
- if (last_block_len < sym_op->cipher.data.length)
- /* use previous block ciphertext as IV */
- iv = last_block - block_len;
- else
- /* runt block, i.e. less than one full block */
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
- last_block, last_block_len);
- if (sym_op->m_dst != NULL)
- QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
- dst, last_block_len);
-#endif
- bpi_cipher_decrypt(last_block, dst, iv, block_len,
- last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
- last_block, last_block_len);
- if (sym_op->m_dst != NULL)
- QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
- dst, last_block_len);
-#endif
- }
-
- return sym_op->cipher.data.length - last_block_len;
-}
-
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
- struct icp_qat_fw_la_cipher_req_params *cipher_param,
- struct rte_crypto_op *op,
- struct icp_qat_fw_la_bulk_req *qat_req)
-{
- /* copy IV into request if it fits */
- if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
- rte_memcpy(cipher_param->u.cipher_IV_array,
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset),
- iv_length);
- } else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr =
- rte_crypto_op_ctophys_offset(op,
- iv_offset);
- }
-}
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-/** Set IV for CCM is special case, 0th byte is set to q-1
- * where q is padding of nonce in 16 byte block
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
*/
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
- struct icp_qat_fw_la_cipher_req_params *cipher_param,
- struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+ .name = qat_sym_drv_name,
+ .alias = qat_sym_drv_name
+};
+
+void
+qat_sym_init_op_cookie(void *op_cookie)
{
- rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
- ICP_QAT_HW_CCM_NONCE_OFFSET,
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
- iv_length);
- *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
- q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
- if (aad_len_field_sz)
- rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
- iv_length);
-}
-
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
- struct qat_sym_op_cookie *cookie,
- struct icp_qat_fw_la_bulk_req *qat_req)
-{
- static const uint32_t ver_key_offset =
- sizeof(struct icp_qat_hw_auth_setup) +
- ICP_QAT_HW_GALOIS_128_STATE1_SZ +
- ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
- ICP_QAT_HW_GALOIS_E_CTR0_SZ +
- sizeof(struct icp_qat_hw_cipher_config);
- struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
- (void *) &qat_req->cd_ctrl;
- struct icp_qat_fw_la_cipher_req_params *cipher_param =
- (void *) &qat_req->serv_specif_rqpars;
- uint32_t data_length = op->sym->auth.data.length;
-
- /* Fill separate Content Descriptor for this op */
- rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
- ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
- ctx->cd.cipher.key :
- RTE_PTR_ADD(&ctx->cd, ver_key_offset),
- ctx->auth_key_length);
- cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(
- ICP_QAT_HW_CIPHER_AEAD_MODE,
- ctx->qat_cipher_alg,
- ICP_QAT_HW_CIPHER_NO_CONVERT,
- (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
- ICP_QAT_HW_CIPHER_ENCRYPT :
- ICP_QAT_HW_CIPHER_DECRYPT));
- QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
- ctx->digest_length,
- QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
- QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
- cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
- ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
- /* Update the request */
- qat_req->cd_pars.u.s.content_desc_addr =
- cookie->opt.spc_gmac.cd_phys_addr;
- qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
- sizeof(struct icp_qat_hw_cipher_config) +
- ctx->auth_key_length, 8) >> 3;
- qat_req->comn_mid.src_length = data_length;
- qat_req->comn_mid.dst_length = 0;
-
- cipher_param->spc_aad_addr = 0;
- cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
- cipher_param->spc_aad_sz = data_length;
- cipher_param->reserved = 0;
- cipher_param->spc_auth_res_sz = ctx->digest_length;
-
- qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
- cipher_cd_ctrl->cipher_cfg_offset = 0;
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
- ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
- ICP_QAT_FW_LA_PROTO_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_NO_PROTO);
+ struct qat_sym_op_cookie *cookie = op_cookie;
+
+ cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ qat_sgl_src);
+
+ cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ qat_sgl_dst);
+
+ cookie->opt.spc_gmac.cd_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ opt.spc_gmac.cd_cipher);
}
static __rte_always_inline int
-refactor_qat_sym_build_request(__rte_unused void *in_op,
- __rte_unused uint8_t *out_msg, __rte_unused void *op_cookie,
- __rte_unused uint64_t *opaque,
- __rte_unused enum qat_device_gen dev_gen)
-{
- return 0;
-}
-
-uint16_t
-refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- return refactor_qat_enqueue_op_burst(qp, refactor_qat_sym_build_request,
- (void **)ops, nb_ops);
-}
-
-uint16_t
-refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- return refactor_qat_dequeue_op_burst(qp, (void **)ops,
- refactor_qat_sym_process_response, nb_ops);
-}
-
-int
qat_sym_build_request(void *in_op, uint8_t *out_msg,
- void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
+ void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
{
- int ret = 0;
- struct qat_sym_session *ctx = NULL;
- struct icp_qat_fw_la_cipher_req_params *cipher_param;
- struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
- struct icp_qat_fw_la_auth_req_params *auth_param;
- register struct icp_qat_fw_la_bulk_req *qat_req;
- uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
- uint32_t cipher_len = 0, cipher_ofs = 0;
- uint32_t auth_len = 0, auth_ofs = 0;
- uint32_t min_ofs = 0;
- uint64_t src_buf_start = 0, dst_buf_start = 0;
- uint64_t auth_data_end = 0;
- uint8_t do_sgl = 0;
- uint8_t in_place = 1;
- int alignment_adjustment = 0;
- int oop_shift = 0;
struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
- struct qat_sym_op_cookie *cookie =
- (struct qat_sym_op_cookie *)op_cookie;
+ void *sess = (void *)opaque[0];
+ qat_sym_build_request_t build_request = (void *)opaque[1];
+ struct qat_sym_session *ctx = NULL;
- if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
- QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
- "operation requests, op (%p) is not a "
- "symmetric operation.", op);
- return -EINVAL;
+ if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+ ctx = get_sym_session_private_data(op->sym->session,
+ qat_sym_driver_id);
+ if (unlikely(!ctx)) {
+ QAT_DP_LOG(ERR, "No session for this device");
+ return -EINVAL;
+ }
+ if (sess != ctx) {
+ struct rte_cryptodev *cdev;
+ struct qat_cryptodev_private *internals;
+ enum rte_proc_type_t proc_type;
+
+ cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+ internals = cdev->data->dev_private;
+ proc_type = rte_eal_process_type();
+
+ if (internals->qat_dev->qat_dev_gen != dev_gen) {
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
+ if (unlikely(ctx->build_request[proc_type] == NULL)) {
+ int ret =
+ qat_sym_gen_dev_ops[dev_gen].set_session(
+ (void *)cdev, sess);
+ if (ret < 0) {
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+ }
+
+ build_request = ctx->build_request[proc_type];
+ opaque[0] = (uintptr_t)ctx;
+ opaque[1] = (uintptr_t)build_request;
+ }
}
- if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
- QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
- " requests, op (%p) is sessionless.", op);
- return -EINVAL;
- } else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
- ctx = (struct qat_sym_session *)get_sym_session_private_data(
- op->sym->session, qat_sym_driver_id);
#ifdef RTE_LIB_SECURITY
- } else {
- ctx = (struct qat_sym_session *)get_sec_session_private_data(
- op->sym->sec_session);
- if (likely(ctx)) {
+ else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ if (sess != (void *)op->sym->sec_session) {
+ struct rte_cryptodev *cdev;
+ struct qat_cryptodev_private *internals;
+ enum rte_proc_type_t proc_type;
+
+ ctx = get_sec_session_private_data(
+ op->sym->sec_session);
+ if (unlikely(!ctx)) {
+ QAT_DP_LOG(ERR, "No session for this device");
+ return -EINVAL;
+ }
if (unlikely(ctx->bpi_ctx == NULL)) {
QAT_DP_LOG(ERR, "QAT PMD only supports security"
" operation requests for"
@@ -293,463 +127,284 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
return -EINVAL;
}
- }
-#endif
- }
+ cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+ internals = cdev->data->dev_private;
+ proc_type = rte_eal_process_type();
- if (unlikely(ctx == NULL)) {
- QAT_DP_LOG(ERR, "Session was not created for this device");
- return -EINVAL;
- }
+ if (internals->qat_dev->qat_dev_gen != dev_gen) {
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
- qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
- rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
- qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
- cipher_param = (void *)&qat_req->serv_specif_rqpars;
- cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param +
- ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
- if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
- ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
- !ctx->is_gmac) {
- /* AES-GCM or AES-CCM */
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
- (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
- && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
- && ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
- do_aead = 1;
- } else {
- do_auth = 1;
- do_cipher = 1;
+ if (unlikely(ctx->build_request[proc_type] == NULL)) {
+ int ret =
+ qat_sym_gen_dev_ops[dev_gen].set_session(
+ (void *)cdev, sess);
+ if (ret < 0) {
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+ }
+
+ sess = (void *)op->sym->sec_session;
+ build_request = ctx->build_request[proc_type];
+ opaque[0] = (uintptr_t)sess;
+ opaque[1] = (uintptr_t)build_request;
}
- } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
- do_auth = 1;
- do_cipher = 0;
- } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
- do_auth = 0;
- do_cipher = 1;
+ }
+#endif
+ else { /* RTE_CRYPTO_OP_SESSIONLESS */
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+ return -1;
}
- if (do_cipher) {
+ return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
- if (ctx->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
- ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
- ctx->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_enqueue_op_burst(qp, qat_sym_build_request,
+ (void **)ops, nb_ops);
+}
- if (unlikely(
- (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
- (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
- QAT_DP_LOG(ERR,
- "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
- }
- cipher_len = op->sym->cipher.data.length >> 3;
- cipher_ofs = op->sym->cipher.data.offset >> 3;
-
- } else if (ctx->bpi_ctx) {
- /* DOCSIS - only send complete blocks to device.
- * Process any partial block using CFB mode.
- * Even if 0 complete blocks, still send this to device
- * to get into rx queue for post-process and dequeuing
- */
- cipher_len = qat_bpicipher_preprocess(ctx, op);
- cipher_ofs = op->sym->cipher.data.offset;
- } else {
- cipher_len = op->sym->cipher.data.length;
- cipher_ofs = op->sym->cipher.data.offset;
- }
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_dequeue_op_burst(qp, (void **)ops,
+ qat_sym_process_response, nb_ops);
+}
- set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
- cipher_param, op, qat_req);
- min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+ struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+ int i = 0, ret = 0;
+ struct qat_device_info *qat_dev_instance =
+ &qat_pci_devs[qat_pci_dev->qat_dev_id];
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_dev_instance->pci_dev->device.numa_node,
+ .private_data_size = sizeof(struct qat_cryptodev_private)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *cryptodev;
+ struct qat_cryptodev_private *internals;
+ struct qat_capabilities_info capa_info;
+ const struct rte_cryptodev_capabilities *capabilities;
+ const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+ &qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+ uint64_t capa_size;
+
+ snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "sym");
+ QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+ if (gen_dev_ops->cryptodev_ops == NULL) {
+ QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+ name);
+ return -(EFAULT);
}
- if (do_auth) {
-
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
- if (unlikely(
- (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
- (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
- QAT_DP_LOG(ERR,
- "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
- }
- auth_ofs = op->sym->auth.data.offset >> 3;
- auth_len = op->sym->auth.data.length >> 3;
-
- auth_param->u1.aad_adr =
- rte_crypto_op_ctophys_offset(op,
- ctx->auth_iv.offset);
-
- } else if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- /* AES-GMAC */
- set_cipher_iv(ctx->auth_iv.length,
- ctx->auth_iv.offset,
- cipher_param, op, qat_req);
- auth_ofs = op->sym->auth.data.offset;
- auth_len = op->sym->auth.data.length;
-
- auth_param->u1.aad_adr = 0;
- auth_param->u2.aad_sz = 0;
-
- } else {
- auth_ofs = op->sym->auth.data.offset;
- auth_len = op->sym->auth.data.length;
-
+ /*
+ * All processes must use same driver id so they can share sessions.
+ * Store driver_id so we can validate that all processes have the same
+ * value, typically they have, but could differ if binaries built
+ * separately.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ qat_pci_dev->qat_sym_driver_id =
+ qat_sym_driver_id;
+ } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ if (qat_pci_dev->qat_sym_driver_id !=
+ qat_sym_driver_id) {
+ QAT_LOG(ERR,
+ "Device %s have different driver id than corresponding device in primary process",
+ name);
+ return -(EFAULT);
}
- min_ofs = auth_ofs;
-
- if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
- ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
- auth_param->auth_res_addr =
- op->sym->auth.digest.phys_addr;
-
}
- if (do_aead) {
- /*
- * This address may used for setting AAD physical pointer
- * into IV offset from op
- */
- rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
- if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
- set_cipher_iv(ctx->cipher_iv.length,
- ctx->cipher_iv.offset,
- cipher_param, op, qat_req);
-
- } else if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
- /* In case of AES-CCM this may point to user selected
- * memory or iv offset in cypto_op
- */
- uint8_t *aad_data = op->sym->aead.aad.data;
- /* This is true AAD length, it not includes 18 bytes of
- * preceding data
- */
- uint8_t aad_ccm_real_len = 0;
- uint8_t aad_len_field_sz = 0;
- uint32_t msg_len_be =
- rte_bswap32(op->sym->aead.data.length);
-
- if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
- aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
- aad_ccm_real_len = ctx->aad_len -
- ICP_QAT_HW_CCM_AAD_B0_LEN -
- ICP_QAT_HW_CCM_AAD_LEN_INFO;
- } else {
- /*
- * aad_len not greater than 18, so no actual aad
- * data, then use IV after op for B0 block
- */
- aad_data = rte_crypto_op_ctod_offset(op,
- uint8_t *,
- ctx->cipher_iv.offset);
- aad_phys_addr_aead =
- rte_crypto_op_ctophys_offset(op,
- ctx->cipher_iv.offset);
- }
+ /* Populate subset device to use in cryptodev device creation */
+ qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+ qat_dev_instance->sym_rte_dev.numa_node =
+ qat_dev_instance->pci_dev->device.numa_node;
+ qat_dev_instance->sym_rte_dev.devargs = NULL;
- uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
- ctx->cipher_iv.length;
-
- aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
- aad_len_field_sz,
- ctx->digest_length, q);
-
- if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
- memcpy(aad_data + ctx->cipher_iv.length +
- ICP_QAT_HW_CCM_NONCE_OFFSET +
- (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
- (uint8_t *)&msg_len_be,
- ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
- } else {
- memcpy(aad_data + ctx->cipher_iv.length +
- ICP_QAT_HW_CCM_NONCE_OFFSET,
- (uint8_t *)&msg_len_be
- + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
- - q), q);
- }
+ cryptodev = rte_cryptodev_pmd_create(name,
+ &(qat_dev_instance->sym_rte_dev), &init_params);
- if (aad_len_field_sz > 0) {
- *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
- = rte_bswap16(aad_ccm_real_len);
-
- if ((aad_ccm_real_len + aad_len_field_sz)
- % ICP_QAT_HW_CCM_AAD_B0_LEN) {
- uint8_t pad_len = 0;
- uint8_t pad_idx = 0;
-
- pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
- ((aad_ccm_real_len + aad_len_field_sz) %
- ICP_QAT_HW_CCM_AAD_B0_LEN);
- pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
- aad_ccm_real_len + aad_len_field_sz;
- memset(&aad_data[pad_idx],
- 0, pad_len);
- }
+ if (cryptodev == NULL)
+ return -ENODEV;
- }
+ qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+ cryptodev->driver_id = qat_sym_driver_id;
+ cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
- set_cipher_iv_ccm(ctx->cipher_iv.length,
- ctx->cipher_iv.offset,
- cipher_param, op, q,
- aad_len_field_sz);
+ cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+ cryptodev->dequeue_burst = qat_sym_dequeue_burst;
- }
+ cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
- cipher_len = op->sym->aead.data.length;
- cipher_ofs = op->sym->aead.data.offset;
- auth_len = op->sym->aead.data.length;
- auth_ofs = op->sym->aead.data.offset;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
- auth_param->u1.aad_adr = aad_phys_addr_aead;
- auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
- min_ofs = op->sym->aead.data.offset;
- }
-
- if (op->sym->m_src->nb_segs > 1 ||
- (op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
- do_sgl = 1;
-
- /* adjust for chain case */
- if (do_cipher && do_auth)
- min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
- if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
- min_ofs = 0;
-
- if (unlikely((op->sym->m_dst != NULL) &&
- (op->sym->m_dst != op->sym->m_src))) {
- /* Out-of-place operation (OOP)
- * Don't align DMA start. DMA the minimum data-set
- * so as not to overwrite data in dest buffer
- */
- in_place = 0;
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
- dst_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
- oop_shift = min_ofs;
-
- } else {
- /* In-place operation
- * Start DMA at nearest aligned address below min_ofs
- */
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
- & QAT_64_BTYE_ALIGN_MASK;
-
- if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
- rte_pktmbuf_headroom(op->sym->m_src))
- > src_buf_start)) {
- /* alignment has pushed addr ahead of start of mbuf
- * so revert and take the performance hit
- */
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src,
- min_ofs);
+#ifdef RTE_LIB_SECURITY
+ if (gen_dev_ops->create_security_ctx) {
+ cryptodev->security_ctx =
+ gen_dev_ops->create_security_ctx((void *)cryptodev);
+ if (cryptodev->security_ctx == NULL) {
+ QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+ ret = -ENOMEM;
+ goto error;
}
- dst_buf_start = src_buf_start;
-
- /* remember any adjustment for later, note, can be +/- */
- alignment_adjustment = src_buf_start -
- rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
- }
- if (do_cipher || do_aead) {
- cipher_param->cipher_offset =
- (uint32_t)rte_pktmbuf_iova_offset(
- op->sym->m_src, cipher_ofs) - src_buf_start;
- cipher_param->cipher_length = cipher_len;
+ cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+ QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
} else {
- cipher_param->cipher_offset = 0;
- cipher_param->cipher_length = 0;
+ QAT_LOG(INFO, "Device %s rte_security support disabled", name);
}
-
- if (!ctx->is_single_pass) {
- /* Do not let to overwrite spc_aad len */
- if (do_auth || do_aead) {
- auth_param->auth_off =
- (uint32_t)rte_pktmbuf_iova_offset(
- op->sym->m_src, auth_ofs) - src_buf_start;
- auth_param->auth_len = auth_len;
- } else {
- auth_param->auth_off = 0;
- auth_param->auth_len = 0;
+#endif
+ snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "QAT_SYM_CAPA_GEN_%d",
+ qat_pci_dev->qat_dev_gen);
+
+ internals = cryptodev->data->dev_private;
+ internals->qat_dev = qat_pci_dev;
+
+ internals->dev_id = cryptodev->data->dev_id;
+
+ capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+ capabilities = capa_info.data;
+ capa_size = capa_info.size;
+
+ internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+ if (internals->capa_mz == NULL) {
+ internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+ capa_size, rte_socket_id(), 0);
+ if (internals->capa_mz == NULL) {
+ QAT_LOG(DEBUG,
+ "Error allocating memzone for capabilities, "
+ "destroying PMD for %s",
+ name);
+ ret = -EFAULT;
+ goto error;
}
}
- qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length =
- (cipher_param->cipher_offset + cipher_param->cipher_length)
- > (auth_param->auth_off + auth_param->auth_len) ?
- (cipher_param->cipher_offset + cipher_param->cipher_length)
- : (auth_param->auth_off + auth_param->auth_len);
-
- if (do_auth && do_cipher) {
- /* Handle digest-encrypted cases, i.e.
- * auth-gen-then-cipher-encrypt and
- * cipher-decrypt-then-auth-verify
- */
- /* First find the end of the data */
- if (do_sgl) {
- uint32_t remaining_off = auth_param->auth_off +
- auth_param->auth_len + alignment_adjustment + oop_shift;
- struct rte_mbuf *sgl_buf =
- (in_place ?
- op->sym->m_src : op->sym->m_dst);
-
- while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
- && sgl_buf->next != NULL) {
- remaining_off -= rte_pktmbuf_data_len(sgl_buf);
- sgl_buf = sgl_buf->next;
- }
+ memcpy(internals->capa_mz->addr, capabilities, capa_size);
+ internals->qat_dev_capabilities = internals->capa_mz->addr;
- auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
- sgl_buf, remaining_off);
- } else {
- auth_data_end = (in_place ?
- src_buf_start : dst_buf_start) +
- auth_param->auth_off + auth_param->auth_len;
- }
- /* Then check if digest-encrypted conditions are met */
- if ((auth_param->auth_off + auth_param->auth_len <
- cipher_param->cipher_offset +
- cipher_param->cipher_length) &&
- (op->sym->auth.digest.phys_addr ==
- auth_data_end)) {
- /* Handle partial digest encryption */
- if (cipher_param->cipher_offset +
- cipher_param->cipher_length <
- auth_param->auth_off +
- auth_param->auth_len +
- ctx->digest_length)
- qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length =
- auth_param->auth_off +
- auth_param->auth_len +
- ctx->digest_length;
- struct icp_qat_fw_comn_req_hdr *header =
- &qat_req->comn_hdr;
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
- header->serv_specif_flags,
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
- }
+ while (1) {
+ if (qat_dev_cmd_param[i].name == NULL)
+ break;
+ if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+ internals->min_enq_burst_threshold =
+ qat_dev_cmd_param[i].val;
+ i++;
}
- if (do_sgl) {
+ internals->service_type = QAT_SERVICE_SYMMETRIC;
+ qat_pci_dev->sym_dev = internals;
+ QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+ cryptodev->data->name, internals->dev_id);
- ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
- QAT_COMN_PTR_TYPE_SGL);
- ret = qat_sgl_fill_array(op->sym->m_src,
- (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
- &cookie->qat_sgl_src,
- qat_req->comn_mid.src_length,
- QAT_SYM_SGL_MAX_NUMBER);
+ return 0;
- if (unlikely(ret)) {
- QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
- return ret;
- }
+error:
+#ifdef RTE_LIB_SECURITY
+ rte_free(cryptodev->security_ctx);
+ cryptodev->security_ctx = NULL;
+#endif
+ rte_cryptodev_pmd_destroy(cryptodev);
+ memset(&qat_dev_instance->sym_rte_dev, 0,
+ sizeof(qat_dev_instance->sym_rte_dev));
- if (in_place)
- qat_req->comn_mid.dest_data_addr =
- qat_req->comn_mid.src_data_addr =
- cookie->qat_sgl_src_phys_addr;
- else {
- ret = qat_sgl_fill_array(op->sym->m_dst,
- (int64_t)(dst_buf_start -
- rte_pktmbuf_iova(op->sym->m_dst)),
- &cookie->qat_sgl_dst,
- qat_req->comn_mid.dst_length,
- QAT_SYM_SGL_MAX_NUMBER);
-
- if (unlikely(ret)) {
- QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
- return ret;
- }
+ return ret;
+}
- qat_req->comn_mid.src_data_addr =
- cookie->qat_sgl_src_phys_addr;
- qat_req->comn_mid.dest_data_addr =
- cookie->qat_sgl_dst_phys_addr;
- }
- qat_req->comn_mid.src_length = 0;
- qat_req->comn_mid.dst_length = 0;
- } else {
- qat_req->comn_mid.src_data_addr = src_buf_start;
- qat_req->comn_mid.dest_data_addr = dst_buf_start;
- }
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
- if (ctx->is_single_pass) {
- if (ctx->is_ucs) {
- /* GEN 4 */
- cipher_param20->spc_aad_addr =
- op->sym->aead.aad.phys_addr;
- cipher_param20->spc_auth_res_addr =
- op->sym->aead.digest.phys_addr;
- } else {
- cipher_param->spc_aad_addr =
- op->sym->aead.aad.phys_addr;
- cipher_param->spc_auth_res_addr =
- op->sym->aead.digest.phys_addr;
- }
- } else if (ctx->is_single_pass_gmac &&
- op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
- /* Handle Single-Pass AES-GMAC */
- handle_spc_gmac(ctx, op, cookie, qat_req);
- }
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+ if (qat_pci_dev->sym_dev == NULL)
+ return 0;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
- sizeof(struct icp_qat_fw_la_bulk_req));
- QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
- rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
- rte_pktmbuf_data_len(op->sym->m_src));
- if (do_cipher) {
- uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
- uint8_t *,
- ctx->cipher_iv.offset);
- QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
- ctx->cipher_iv.length);
- }
+ /* free crypto device */
+ cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+ rte_free(cryptodev->security_ctx);
+ cryptodev->security_ctx = NULL;
+#endif
+ rte_cryptodev_pmd_destroy(cryptodev);
+ qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+ qat_pci_dev->sym_dev = NULL;
- if (do_auth) {
- if (ctx->auth_iv.length) {
- uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
- uint8_t *,
- ctx->auth_iv.offset);
- QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
- ctx->auth_iv.length);
- }
- QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
- ctx->digest_length);
+ return 0;
+}
+
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+ struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+ enum rte_crypto_op_sess_type sess_type,
+ union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+ struct qat_cryptodev_private *internals = dev->data->dev_private;
+ enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+ struct qat_crypto_gen_dev_ops *gen_dev_ops =
+ &qat_sym_gen_dev_ops[qat_dev_gen];
+ struct qat_qp *qp;
+ struct qat_sym_session *ctx;
+ struct qat_sym_dp_ctx *dp_ctx;
+
+ if (!gen_dev_ops->set_raw_dp_ctx) {
+ QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+ qat_dev_gen);
+ return -ENOTSUP;
}
- if (do_aead) {
- QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
- ctx->digest_length);
- QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
- ctx->aad_len);
+ qp = dev->data->queue_pairs[qp_id];
+ dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+ if (!is_update) {
+ memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+ sizeof(struct qat_sym_dp_ctx));
+ raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+ dp_ctx->tail = qp->tx_q.tail;
+ dp_ctx->head = qp->rx_q.head;
+ dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
}
-#endif
- return 0;
+
+ if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+ return -EINVAL;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session_ctx.crypto_sess, qat_sym_driver_id);
+
+ dp_ctx->session = ctx;
+
+ return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct qat_sym_dp_ctx);
}
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+ cryptodev_qat_sym_driver,
+ qat_sym_driver_id);
@@ -15,15 +15,75 @@
#include "qat_common.h"
#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_crypto.h"
#include "qat_logs.h"
+#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
+
#define BYTE_LENGTH 8
/* bpi is only used for partial blocks of DES and AES
* so AES block len can be assumed as max len for iv, src and dst
*/
#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO (1 << 0)
+#define QAT_SYM_CAP_VALID (1 << 31)
+
+/**
+ * Macro to add a sym capability
+ * helper function to add an sym capability
+ * <n: name> <b: block size> <k: key size> <d: digest size>
+ * <a: aad_size> <i: iv_size>
+ **/
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d) \
+ { \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_##n, \
+ b, d \
+ }, } \
+ }, } \
+ }
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i) \
+ { \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_##n, \
+ b, k, d, a, i \
+ }, } \
+ }, } \
+ }
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i) \
+ { \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_##n, \
+ b, k, d, a, i \
+ }, } \
+ }, } \
+ }
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i) \
+ { \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_##n, \
+ b, k, i \
+ }, } \
+ }, } \
+ }
+
/*
* Maximum number of SGL entries
*/
@@ -62,27 +122,14 @@ struct qat_sym_dp_ctx {
uint16_t cached_dequeue;
};
-static __rte_always_inline int
-refactor_qat_sym_process_response(__rte_unused void **op,
- __rte_unused uint8_t *resp, __rte_unused void *op_cookie,
- __rte_unused uint64_t *dequeue_err_count)
-{
- return 0;
-}
-
uint16_t
-refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops);
uint16_t
-refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops);
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
- void *op_cookie, enum qat_device_gen qat_dev_gen);
-
-
/** Encrypt a single partial block
* Depends on openssl libcrypto
* Uses ECB+XOR to do CFB encryption, same result, more performant
@@ -237,17 +284,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
}
}
}
-#else
-
-static inline void
-qat_sym_preprocess_requests(void **ops __rte_unused,
- uint16_t nb_ops __rte_unused)
-{
-}
#endif
-static inline void
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+ uint64_t *dequeue_err_count __rte_unused)
{
struct icp_qat_fw_comn_resp *resp_msg =
(struct icp_qat_fw_comn_resp *)resp;
@@ -306,6 +347,8 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
}
*op = (void *)rx_op;
+
+ return 1;
}
int
@@ -317,6 +360,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
int
qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+ struct qat_sym_session *ctx,
+ struct rte_crypto_vec *vec, uint32_t vec_len,
+ struct rte_crypto_va_iova_ptr *cipher_iv,
+ struct rte_crypto_va_iova_ptr *auth_iv,
+ struct rte_crypto_va_iova_ptr *aad,
+ struct rte_crypto_va_iova_ptr *digest)
+{
+ uint32_t i;
+
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ for (i = 0; i < vec_len; i++)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+ if (cipher_iv && ctx->cipher_iv.length > 0)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+ ctx->cipher_iv.length);
+ if (auth_iv && ctx->auth_iv.length > 0)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+ ctx->auth_iv.length);
+ if (aad && ctx->aad_len > 0)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+ ctx->aad_len);
+ if (digest && ctx->digest_length > 0)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+ ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+ struct qat_sym_session *ctx __rte_unused,
+ struct rte_crypto_vec *vec __rte_unused,
+ uint32_t vec_len __rte_unused,
+ struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+ struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+ struct rte_crypto_va_iova_ptr *aad __rte_unused,
+ struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+}
+#endif
+
#else
static inline void
@@ -331,5 +420,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
{
}
-#endif
+#endif /* BUILD_QAT_SYM */
#endif /* _QAT_SYM_H_ */
deleted file mode 100644
@@ -1,975 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "adf_transport_access_macros.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-
-#include "qat_sym_refactor.h"
-#include "qat_sym_pmd.h"
-#include "qat_sym_session.h"
-#include "qat_qp.h"
-
-static __rte_always_inline int32_t
-qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
- struct rte_crypto_vec *data, uint16_t n_data_vecs)
-{
- struct qat_queue *tx_queue;
- struct qat_sym_op_cookie *cookie;
- struct qat_sgl *list;
- uint32_t i;
- uint32_t total_len;
-
- if (likely(n_data_vecs == 1)) {
- req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
- data[0].iova;
- req->comn_mid.src_length = req->comn_mid.dst_length =
- data[0].len;
- return data[0].len;
- }
-
- if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
- return -1;
-
- total_len = 0;
- tx_queue = &qp->tx_q;
-
- ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
- QAT_COMN_PTR_TYPE_SGL);
- cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
- list = (struct qat_sgl *)&cookie->qat_sgl_src;
-
- for (i = 0; i < n_data_vecs; i++) {
- list->buffers[i].len = data[i].len;
- list->buffers[i].resrvd = 0;
- list->buffers[i].addr = data[i].iova;
- if (total_len + data[i].len > UINT32_MAX) {
- QAT_DP_LOG(ERR, "Message too long");
- return -1;
- }
- total_len += data[i].len;
- }
-
- list->num_bufs = i;
- req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
- cookie->qat_sgl_src_phys_addr;
- req->comn_mid.src_length = req->comn_mid.dst_length = 0;
- return total_len;
-}
-
-static __rte_always_inline void
-set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
- struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
- struct icp_qat_fw_la_bulk_req *qat_req)
-{
- /* copy IV into request if it fits */
- if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
- rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
- iv_len);
- else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
- }
-}
-
-#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
- (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
- ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
-
-static __rte_always_inline void
-qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
-{
- uint32_t i;
-
- for (i = 0; i < n; i++)
- sta[i] = status;
-}
-
-#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
- RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
-
-static __rte_always_inline void
-enqueue_one_cipher_job(struct qat_sym_session *ctx,
- struct icp_qat_fw_la_bulk_req *req,
- struct rte_crypto_va_iova_ptr *iv,
- union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
- struct icp_qat_fw_la_cipher_req_params *cipher_param;
-
- cipher_param = (void *)&req->serv_specif_rqpars;
-
- /* cipher IV */
- set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
- cipher_param->cipher_offset = ofs.ofs.cipher.head;
- cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
- ofs.ofs.cipher.tail;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
- struct rte_crypto_vec *data, uint16_t n_data_vecs,
- union rte_crypto_sym_ofs ofs,
- struct rte_crypto_va_iova_ptr *iv,
- struct rte_crypto_va_iova_ptr *digest __rte_unused,
- struct rte_crypto_va_iova_ptr *aad __rte_unused,
- void *user_data)
-{
- struct qat_qp *qp = qp_data;
- struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
- struct qat_queue *tx_queue = &qp->tx_q;
- struct qat_sym_session *ctx = dp_ctx->session;
- struct icp_qat_fw_la_bulk_req *req;
- int32_t data_len;
- uint32_t tail = dp_ctx->tail;
-
- req = (struct icp_qat_fw_la_bulk_req *)(
- (uint8_t *)tx_queue->base_addr + tail);
- tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
- rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
- data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
- if (unlikely(data_len < 0))
- return -1;
- req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
- enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
-
- dp_ctx->tail = tail;
- dp_ctx->cached_enqueue++;
-
- return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
- struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
- void *user_data[], int *status)
-{
- struct qat_qp *qp = qp_data;
- struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
- struct qat_queue *tx_queue = &qp->tx_q;
- struct qat_sym_session *ctx = dp_ctx->session;
- uint32_t i, n;
- uint32_t tail;
- struct icp_qat_fw_la_bulk_req *req;
- int32_t data_len;
-
- n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
- if (unlikely(n == 0)) {
- qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
- *status = 0;
- return 0;
- }
-
- tail = dp_ctx->tail;
-
- for (i = 0; i < n; i++) {
- req = (struct icp_qat_fw_la_bulk_req *)(
- (uint8_t *)tx_queue->base_addr + tail);
- rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
- data_len = qat_sym_dp_parse_data_vec(qp, req,
- vec->src_sgl[i].vec,
- vec->src_sgl[i].num);
- if (unlikely(data_len < 0))
- break;
- req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
- enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
- (uint32_t)data_len);
- tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
- }
-
- if (unlikely(i < n))
- qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
- dp_ctx->tail = tail;
- dp_ctx->cached_enqueue += i;
- *status = 0;
- return i;
-}
-
-static __rte_always_inline void
-enqueue_one_auth_job(struct qat_sym_session *ctx,
- struct icp_qat_fw_la_bulk_req *req,
- struct rte_crypto_va_iova_ptr *digest,
- struct rte_crypto_va_iova_ptr *auth_iv,
- union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
- struct icp_qat_fw_la_cipher_req_params *cipher_param;
- struct icp_qat_fw_la_auth_req_params *auth_param;
-
- cipher_param = (void *)&req->serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param +
- ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
- auth_param->auth_off = ofs.ofs.auth.head;
- auth_param->auth_len = data_len - ofs.ofs.auth.head -
- ofs.ofs.auth.tail;
- auth_param->auth_res_addr = digest->iova;
-
- switch (ctx->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
- case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
- case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
- auth_param->u1.aad_adr = auth_iv->iova;
- break;
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
- rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
- ctx->auth_iv.length);
- break;
- default:
- break;
- }
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
- struct rte_crypto_vec *data, uint16_t n_data_vecs,
- union rte_crypto_sym_ofs ofs,
- struct rte_crypto_va_iova_ptr *iv __rte_unused,
- struct rte_crypto_va_iova_ptr *digest,
- struct rte_crypto_va_iova_ptr *auth_iv,
- void *user_data)
-{
- struct qat_qp *qp = qp_data;
- struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
- struct qat_queue *tx_queue = &qp->tx_q;
- struct qat_sym_session *ctx = dp_ctx->session;
- struct icp_qat_fw_la_bulk_req *req;
- int32_t data_len;
- uint32_t tail = dp_ctx->tail;
-
- req = (struct icp_qat_fw_la_bulk_req *)(
- (uint8_t *)tx_queue->base_addr + tail);
- tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
- rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
- data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
- if (unlikely(data_len < 0))
- return -1;
- req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
- enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
- (uint32_t)data_len);
-
- dp_ctx->tail = tail;
- dp_ctx->cached_enqueue++;
-
- return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
- struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
- void *user_data[], int *status)
-{
- struct qat_qp *qp = qp_data;
- struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
- struct qat_queue *tx_queue = &qp->tx_q;
- struct qat_sym_session *ctx = dp_ctx->session;
- uint32_t i, n;
- uint32_t tail;
- struct icp_qat_fw_la_bulk_req *req;
- int32_t data_len;
-
- n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
- if (unlikely(n == 0)) {
- qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
- *status = 0;
- return 0;
- }
-
- tail = dp_ctx->tail;
-
- for (i = 0; i < n; i++) {
- req = (struct icp_qat_fw_la_bulk_req *)(
- (uint8_t *)tx_queue->base_addr + tail);
- rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
- data_len = qat_sym_dp_parse_data_vec(qp, req,
- vec->src_sgl[i].vec,
- vec->src_sgl[i].num);
- if (unlikely(data_len < 0))
- break;
- req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
- enqueue_one_auth_job(ctx, req, &vec->digest[i],
- &vec->auth_iv[i], ofs, (uint32_t)data_len);
- tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
- }
-
- if (unlikely(i < n))
- qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
- dp_ctx->tail = tail;
- dp_ctx->cached_enqueue += i;
- *status = 0;
- return i;
-}
-
-static __rte_always_inline int
-enqueue_one_chain_job(struct qat_sym_session *ctx,
- struct icp_qat_fw_la_bulk_req *req,
- struct rte_crypto_vec *data,
- uint16_t n_data_vecs,
- struct rte_crypto_va_iova_ptr *cipher_iv,
- struct rte_crypto_va_iova_ptr *digest,
- struct rte_crypto_va_iova_ptr *auth_iv,
- union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
- struct icp_qat_fw_la_cipher_req_params *cipher_param;
- struct icp_qat_fw_la_auth_req_params *auth_param;
- rte_iova_t auth_iova_end;
- int32_t cipher_len, auth_len;
-
- cipher_param = (void *)&req->serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param +
- ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
- cipher_len = data_len - ofs.ofs.cipher.head -
- ofs.ofs.cipher.tail;
- auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
-
- if (unlikely(cipher_len < 0 || auth_len < 0))
- return -1;
-
- cipher_param->cipher_offset = ofs.ofs.cipher.head;
- cipher_param->cipher_length = cipher_len;
- set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
-
- auth_param->auth_off = ofs.ofs.auth.head;
- auth_param->auth_len = auth_len;
- auth_param->auth_res_addr = digest->iova;
-
- switch (ctx->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
- case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
- case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
- auth_param->u1.aad_adr = auth_iv->iova;
- break;
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
- break;
- default:
- break;
- }
-
- if (unlikely(n_data_vecs > 1)) {
- int auth_end_get = 0, i = n_data_vecs - 1;
- struct rte_crypto_vec *cvec = &data[0];
- uint32_t len;
-
- len = data_len - ofs.ofs.auth.tail;
-
- while (i >= 0 && len > 0) {
- if (cvec->len >= len) {
- auth_iova_end = cvec->iova + len;
- len = 0;
- auth_end_get = 1;
- break;
- }
- len -= cvec->len;
- i--;
- cvec++;
- }
-
- if (unlikely(auth_end_get == 0))
- return -1;
- } else
- auth_iova_end = data[0].iova + auth_param->auth_off +
- auth_param->auth_len;
-
- /* Then check if digest-encrypted conditions are met */
- if ((auth_param->auth_off + auth_param->auth_len <
- cipher_param->cipher_offset +
- cipher_param->cipher_length) &&
- (digest->iova == auth_iova_end)) {
- /* Handle partial digest encryption */
- if (cipher_param->cipher_offset +
- cipher_param->cipher_length <
- auth_param->auth_off +
- auth_param->auth_len +
- ctx->digest_length)
- req->comn_mid.dst_length =
- req->comn_mid.src_length =
- auth_param->auth_off +
- auth_param->auth_len +
- ctx->digest_length;
- struct icp_qat_fw_comn_req_hdr *header =
- &req->comn_hdr;
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
- header->serv_specif_flags,
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
- }
-
- return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
- struct rte_crypto_vec *data, uint16_t n_data_vecs,
- union rte_crypto_sym_ofs ofs,
- struct rte_crypto_va_iova_ptr *cipher_iv,
- struct rte_crypto_va_iova_ptr *digest,
- struct rte_crypto_va_iova_ptr *auth_iv,
- void *user_data)
-{
- struct qat_qp *qp = qp_data;
- struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
- struct qat_queue *tx_queue = &qp->tx_q;
- struct qat_sym_session *ctx = dp_ctx->session;
- struct icp_qat_fw_la_bulk_req *req;
- int32_t data_len;
- uint32_t tail = dp_ctx->tail;
-
- req = (struct icp_qat_fw_la_bulk_req *)(
- (uint8_t *)tx_queue->base_addr + tail);
- tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
- rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
- data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
- if (unlikely(data_len < 0))
- return -1;
- req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
- if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
- cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
- return -1;
-
- dp_ctx->tail = tail;
- dp_ctx->cached_enqueue++;
-
- return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
- struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
- void *user_data[], int *status)
-{
- struct qat_qp *qp = qp_data;
- struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
- struct qat_queue *tx_queue = &qp->tx_q;
- struct qat_sym_session *ctx = dp_ctx->session;
- uint32_t i, n;
- uint32_t tail;
- struct icp_qat_fw_la_bulk_req *req;
- int32_t data_len;
-
- n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
- if (unlikely(n == 0)) {
- qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
- *status = 0;
- return 0;
- }
-
- tail = dp_ctx->tail;
-
- for (i = 0; i < n; i++) {
- req = (struct icp_qat_fw_la_bulk_req *)(
- (uint8_t *)tx_queue->base_addr + tail);
- rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
- data_len = qat_sym_dp_parse_data_vec(qp, req,
- vec->src_sgl[i].vec,
- vec->src_sgl[i].num);
- if (unlikely(data_len < 0))
- break;
- req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
- if (unlikely(enqueue_one_chain_job(ctx, req,
- vec->src_sgl[i].vec, vec->src_sgl[i].num,
- &vec->iv[i], &vec->digest[i],
- &vec->auth_iv[i], ofs, (uint32_t)data_len)))
- break;
-
- tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
- }
-
- if (unlikely(i < n))
- qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
- dp_ctx->tail = tail;
- dp_ctx->cached_enqueue += i;
- *status = 0;
- return i;
-}
-
-static __rte_always_inline void
-enqueue_one_aead_job(struct qat_sym_session *ctx,
- struct icp_qat_fw_la_bulk_req *req,
- struct rte_crypto_va_iova_ptr *iv,
- struct rte_crypto_va_iova_ptr *digest,
- struct rte_crypto_va_iova_ptr *aad,
- union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
- struct icp_qat_fw_la_cipher_req_params *cipher_param =
- (void *)&req->serv_specif_rqpars;
- struct icp_qat_fw_la_auth_req_params *auth_param =
- (void *)((uint8_t *)&req->serv_specif_rqpars +
- ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
- uint8_t *aad_data;
- uint8_t aad_ccm_real_len;
- uint8_t aad_len_field_sz;
- uint32_t msg_len_be;
- rte_iova_t aad_iova = 0;
- uint8_t q;
-
- /* CPM 1.7 uses single pass to treat AEAD as cipher operation */
- if (ctx->is_single_pass) {
- enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
- cipher_param->spc_aad_addr = aad->iova;
- cipher_param->spc_auth_res_addr = digest->iova;
- return;
- }
-
- switch (ctx->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
- rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
- ctx->cipher_iv.length);
- aad_iova = aad->iova;
- break;
- case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
- aad_data = aad->va;
- aad_iova = aad->iova;
- aad_ccm_real_len = 0;
- aad_len_field_sz = 0;
- msg_len_be = rte_bswap32((uint32_t)data_len -
- ofs.ofs.cipher.head);
-
- if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
- aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
- aad_ccm_real_len = ctx->aad_len -
- ICP_QAT_HW_CCM_AAD_B0_LEN -
- ICP_QAT_HW_CCM_AAD_LEN_INFO;
- } else {
- aad_data = iv->va;
- aad_iova = iv->iova;
- }
-
- q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
- aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
- aad_len_field_sz, ctx->digest_length, q);
- if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
- memcpy(aad_data + ctx->cipher_iv.length +
- ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
- ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
- (uint8_t *)&msg_len_be,
- ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
- } else {
- memcpy(aad_data + ctx->cipher_iv.length +
- ICP_QAT_HW_CCM_NONCE_OFFSET,
- (uint8_t *)&msg_len_be +
- (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
- - q), q);
- }
-
- if (aad_len_field_sz > 0) {
- *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
- rte_bswap16(aad_ccm_real_len);
-
- if ((aad_ccm_real_len + aad_len_field_sz)
- % ICP_QAT_HW_CCM_AAD_B0_LEN) {
- uint8_t pad_len = 0;
- uint8_t pad_idx = 0;
-
- pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
- ((aad_ccm_real_len +
- aad_len_field_sz) %
- ICP_QAT_HW_CCM_AAD_B0_LEN);
- pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
- aad_ccm_real_len +
- aad_len_field_sz;
- memset(&aad_data[pad_idx], 0, pad_len);
- }
- }
-
- rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
- + ICP_QAT_HW_CCM_NONCE_OFFSET,
- (uint8_t *)iv->va +
- ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
- *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
- q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
- rte_memcpy((uint8_t *)aad->va +
- ICP_QAT_HW_CCM_NONCE_OFFSET,
- (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
- ctx->cipher_iv.length);
- break;
- default:
- break;
- }
-
- cipher_param->cipher_offset = ofs.ofs.cipher.head;
- cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
- ofs.ofs.cipher.tail;
- auth_param->auth_off = ofs.ofs.cipher.head;
- auth_param->auth_len = cipher_param->cipher_length;
- auth_param->auth_res_addr = digest->iova;
- auth_param->u1.aad_adr = aad_iova;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
- struct rte_crypto_vec *data, uint16_t n_data_vecs,
- union rte_crypto_sym_ofs ofs,
- struct rte_crypto_va_iova_ptr *iv,
- struct rte_crypto_va_iova_ptr *digest,
- struct rte_crypto_va_iova_ptr *aad,
- void *user_data)
-{
- struct qat_qp *qp = qp_data;
- struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
- struct qat_queue *tx_queue = &qp->tx_q;
- struct qat_sym_session *ctx = dp_ctx->session;
- struct icp_qat_fw_la_bulk_req *req;
- int32_t data_len;
- uint32_t tail = dp_ctx->tail;
-
- req = (struct icp_qat_fw_la_bulk_req *)(
- (uint8_t *)tx_queue->base_addr + tail);
- tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
- rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
- rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
- data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
- if (unlikely(data_len < 0))
- return -1;
- req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
- enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
- (uint32_t)data_len);
-
- dp_ctx->tail = tail;
- dp_ctx->cached_enqueue++;
-
- return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
- struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
- void *user_data[], int *status)
-{
- struct qat_qp *qp = qp_data;
- struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
- struct qat_queue *tx_queue = &qp->tx_q;
- struct qat_sym_session *ctx = dp_ctx->session;
- uint32_t i, n;
- uint32_t tail;
- struct icp_qat_fw_la_bulk_req *req;
- int32_t data_len;
-
- n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
- if (unlikely(n == 0)) {
- qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
- *status = 0;
- return 0;
- }
-
- tail = dp_ctx->tail;
-
- for (i = 0; i < n; i++) {
- req = (struct icp_qat_fw_la_bulk_req *)(
- (uint8_t *)tx_queue->base_addr + tail);
- rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
- data_len = qat_sym_dp_parse_data_vec(qp, req,
- vec->src_sgl[i].vec,
- vec->src_sgl[i].num);
- if (unlikely(data_len < 0))
- break;
- req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
- enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
- &vec->aad[i], ofs, (uint32_t)data_len);
- tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
- }
-
- if (unlikely(i < n))
- qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
- dp_ctx->tail = tail;
- dp_ctx->cached_enqueue += i;
- *status = 0;
- return i;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
- rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
- uint32_t max_nb_to_dequeue,
- rte_cryptodev_raw_post_dequeue_t post_dequeue,
- void **out_user_data, uint8_t is_user_data_array,
- uint32_t *n_success_jobs, int *return_status)
-{
- struct qat_qp *qp = qp_data;
- struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
- struct qat_queue *rx_queue = &qp->rx_q;
- struct icp_qat_fw_comn_resp *resp;
- void *resp_opaque;
- uint32_t i, n, inflight;
- uint32_t head;
- uint8_t status;
-
- *n_success_jobs = 0;
- *return_status = 0;
- head = dp_ctx->head;
-
- inflight = qp->enqueued - qp->dequeued;
- if (unlikely(inflight == 0))
- return 0;
-
- resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
- head);
- /* no operation ready */
- if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
- return 0;
-
- resp_opaque = (void *)(uintptr_t)resp->opaque_data;
- /* get the dequeue count */
- if (get_dequeue_count) {
- n = get_dequeue_count(resp_opaque);
- if (unlikely(n == 0))
- return 0;
- } else {
- if (unlikely(max_nb_to_dequeue == 0))
- return 0;
- n = max_nb_to_dequeue;
- }
-
- out_user_data[0] = resp_opaque;
- status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
- post_dequeue(resp_opaque, 0, status);
- *n_success_jobs += status;
-
- head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
-
- /* we already finished dequeue when n == 1 */
- if (unlikely(n == 1)) {
- i = 1;
- goto end_deq;
- }
-
- if (is_user_data_array) {
- for (i = 1; i < n; i++) {
- resp = (struct icp_qat_fw_comn_resp *)(
- (uint8_t *)rx_queue->base_addr + head);
- if (unlikely(*(uint32_t *)resp ==
- ADF_RING_EMPTY_SIG))
- goto end_deq;
- out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
- status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
- *n_success_jobs += status;
- post_dequeue(out_user_data[i], i, status);
- head = (head + rx_queue->msg_size) &
- rx_queue->modulo_mask;
- }
-
- goto end_deq;
- }
-
- /* opaque is not array */
- for (i = 1; i < n; i++) {
- resp = (struct icp_qat_fw_comn_resp *)(
- (uint8_t *)rx_queue->base_addr + head);
- status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
- if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
- goto end_deq;
- head = (head + rx_queue->msg_size) &
- rx_queue->modulo_mask;
- post_dequeue(resp_opaque, i, status);
- *n_success_jobs += status;
- }
-
-end_deq:
- dp_ctx->head = head;
- dp_ctx->cached_dequeue += i;
- return i;
-}
-
-static __rte_always_inline void *
-qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
- enum rte_crypto_op_status *op_status)
-{
- struct qat_qp *qp = qp_data;
- struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
- struct qat_queue *rx_queue = &qp->rx_q;
- register struct icp_qat_fw_comn_resp *resp;
-
- resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
- dp_ctx->head);
-
- if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
- return NULL;
-
- dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
- rx_queue->modulo_mask;
- dp_ctx->cached_dequeue++;
-
- *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
- RTE_CRYPTO_OP_STATUS_SUCCESS :
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- *dequeue_status = 0;
- return (void *)(uintptr_t)resp->opaque_data;
-}
-
-static __rte_always_inline int
-qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
- struct qat_qp *qp = qp_data;
- struct qat_queue *tx_queue = &qp->tx_q;
- struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
- if (unlikely(dp_ctx->cached_enqueue != n))
- return -1;
-
- qp->enqueued += n;
- qp->stats.enqueued_count += n;
-
- tx_queue->tail = dp_ctx->tail;
-
- WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
- tx_queue->hw_bundle_number,
- tx_queue->hw_queue_number, tx_queue->tail);
- tx_queue->csr_tail = tx_queue->tail;
- dp_ctx->cached_enqueue = 0;
-
- return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
- struct qat_qp *qp = qp_data;
- struct qat_queue *rx_queue = &qp->rx_q;
- struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
- if (unlikely(dp_ctx->cached_dequeue != n))
- return -1;
-
- rx_queue->head = dp_ctx->head;
- rx_queue->nb_processed_responses += n;
- qp->dequeued += n;
- qp->stats.dequeued_count += n;
- if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
- uint32_t old_head, new_head;
- uint32_t max_head;
-
- old_head = rx_queue->csr_head;
- new_head = rx_queue->head;
- max_head = qp->nb_descriptors * rx_queue->msg_size;
-
- /* write out free descriptors */
- void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
-
- if (new_head < old_head) {
- memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
- max_head - old_head);
- memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
- new_head);
- } else {
- memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
- old_head);
- }
- rx_queue->nb_processed_responses = 0;
- rx_queue->csr_head = new_head;
-
- /* write current head to CSR */
- WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
- rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
- new_head);
- }
-
- dp_ctx->cached_dequeue = 0;
- return 0;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
- struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
- enum rte_crypto_op_sess_type sess_type,
- union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
- struct qat_qp *qp;
- struct qat_sym_session *ctx;
- struct qat_sym_dp_ctx *dp_ctx;
-
- qp = dev->data->queue_pairs[qp_id];
- dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
- if (!is_update) {
- memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
- sizeof(struct qat_sym_dp_ctx));
- raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
- dp_ctx->tail = qp->tx_q.tail;
- dp_ctx->head = qp->rx_q.head;
- dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
- }
-
- if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
- return -EINVAL;
-
- ctx = (struct qat_sym_session *)get_sym_session_private_data(
- session_ctx.crypto_sess, qat_sym_driver_id);
-
- dp_ctx->session = ctx;
-
- raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
- raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
- raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
- raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
-
- if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
- ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
- !ctx->is_gmac) {
- /* AES-GCM or AES-CCM */
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
- (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
- && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
- && ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
- raw_dp_ctx->enqueue_burst =
- qat_sym_dp_enqueue_aead_jobs;
- raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
- } else {
- raw_dp_ctx->enqueue_burst =
- qat_sym_dp_enqueue_chain_jobs;
- raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
- }
- } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
- raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
- raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
- } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
- if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
- ctx->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
- raw_dp_ctx->enqueue_burst =
- qat_sym_dp_enqueue_aead_jobs;
- raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
- } else {
- raw_dp_ctx->enqueue_burst =
- qat_sym_dp_enqueue_cipher_jobs;
- raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
- }
- } else
- return -1;
-
- return 0;
-}
-
-int
-qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
-{
- return sizeof(struct qat_sym_dp_ctx);
-}
deleted file mode 100644
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security_driver.h>
-#endif
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
-
-#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
- struct qat_sym_op_cookie *cookie = op_cookie;
-
- cookie->qat_sgl_src_phys_addr =
- rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_sym_op_cookie,
- qat_sgl_src);
-
- cookie->qat_sgl_dst_phys_addr =
- rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_sym_op_cookie,
- qat_sgl_dst);
-
- cookie->opt.spc_gmac.cd_phys_addr =
- rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_sym_op_cookie,
- opt.spc_gmac.cd_cipher);
-}
-
-static uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-static uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
- .name = qat_sym_drv_name,
- .alias = qat_sym_drv_name
-};
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
- struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
- int i = 0, ret = 0;
- struct qat_device_info *qat_dev_instance =
- &qat_pci_devs[qat_pci_dev->qat_dev_id];
- struct rte_cryptodev_pmd_init_params init_params = {
- .name = "",
- .socket_id = qat_dev_instance->pci_dev->device.numa_node,
- .private_data_size = sizeof(struct qat_cryptodev_private)
- };
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
- char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
- struct rte_cryptodev *cryptodev;
- struct qat_cryptodev_private *internals;
- struct qat_capabilities_info capa_info;
- const struct rte_cryptodev_capabilities *capabilities;
- const struct qat_crypto_gen_dev_ops *gen_dev_ops =
- &qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
- uint64_t capa_size;
-
- snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
- qat_pci_dev->name, "sym");
- QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
- if (gen_dev_ops->cryptodev_ops == NULL) {
- QAT_LOG(ERR, "Device %s does not support symmetric crypto",
- name);
- return -EFAULT;
- }
-
- /*
- * All processes must use same driver id so they can share sessions.
- * Store driver_id so we can validate that all processes have the same
- * value, typically they have, but could differ if binaries built
- * separately.
- */
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- qat_pci_dev->qat_sym_driver_id =
- qat_sym_driver_id;
- } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
- if (qat_pci_dev->qat_sym_driver_id !=
- qat_sym_driver_id) {
- QAT_LOG(ERR,
- "Device %s have different driver id than corresponding device in primary process",
- name);
- return -(EFAULT);
- }
- }
-
- /* Populate subset device to use in cryptodev device creation */
- qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
- qat_dev_instance->sym_rte_dev.numa_node =
- qat_dev_instance->pci_dev->device.numa_node;
- qat_dev_instance->sym_rte_dev.devargs = NULL;
-
- cryptodev = rte_cryptodev_pmd_create(name,
- &(qat_dev_instance->sym_rte_dev), &init_params);
-
- if (cryptodev == NULL)
- return -ENODEV;
-
- qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
- cryptodev->driver_id = qat_sym_driver_id;
- cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
- cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
- cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
-
- cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
-#ifdef RTE_LIB_SECURITY
- if (gen_dev_ops->create_security_ctx) {
- cryptodev->security_ctx =
- gen_dev_ops->create_security_ctx((void *)cryptodev);
- if (cryptodev->security_ctx == NULL) {
- QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
- ret = -ENOMEM;
- goto error;
- }
-
- cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
- QAT_LOG(INFO, "Device %s rte_security support enabled", name);
- } else
- QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-
-#endif
- snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
- "QAT_SYM_CAPA_GEN_%d",
- qat_pci_dev->qat_dev_gen);
-
- internals = cryptodev->data->dev_private;
- internals->qat_dev = qat_pci_dev;
- internals->service_type = QAT_SERVICE_SYMMETRIC;
- internals->dev_id = cryptodev->data->dev_id;
-
- capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
- capabilities = capa_info.data;
- capa_size = capa_info.size;
-
- internals->capa_mz = rte_memzone_lookup(capa_memz_name);
- if (internals->capa_mz == NULL) {
- internals->capa_mz = rte_memzone_reserve(capa_memz_name,
- capa_size, rte_socket_id(), 0);
- if (internals->capa_mz == NULL) {
- QAT_LOG(DEBUG,
- "Error allocating capability memzon for %s",
- name);
- ret = -EFAULT;
- goto error;
- }
- }
-
- memcpy(internals->capa_mz->addr, capabilities, capa_size);
- internals->qat_dev_capabilities = internals->capa_mz->addr;
-
- while (1) {
- if (qat_dev_cmd_param[i].name == NULL)
- break;
- if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
- internals->min_enq_burst_threshold =
- qat_dev_cmd_param[i].val;
- i++;
- }
-
- qat_pci_dev->sym_dev = internals;
- QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
- cryptodev->data->name, internals->dev_id);
-
- rte_cryptodev_pmd_probing_finish(cryptodev);
-
- return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
- rte_free(cryptodev->security_ctx);
- cryptodev->security_ctx = NULL;
-#endif
- rte_cryptodev_pmd_destroy(cryptodev);
- memset(&qat_dev_instance->sym_rte_dev, 0,
- sizeof(qat_dev_instance->sym_rte_dev));
-
- return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
- struct rte_cryptodev *cryptodev;
-
- if (qat_pci_dev == NULL)
- return -ENODEV;
- if (qat_pci_dev->sym_dev == NULL)
- return 0;
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
- /* free crypto device */
- cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
- rte_free(cryptodev->security_ctx);
- cryptodev->security_ctx = NULL;
-#endif
- rte_cryptodev_pmd_destroy(cryptodev);
- qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
- qat_pci_dev->sym_dev = NULL;
-
- return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
- cryptodev_qat_sym_driver,
- qat_sym_driver_id);
deleted file mode 100644
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_PMD_H_
-#define _QAT_SYM_PMD_H_
-
-#ifdef BUILD_QAT_SYM
-
-#include <rte_ether.h>
-#include <rte_cryptodev.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security.h>
-#endif
-
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Symmetric Crypto PMD driver name */
-#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO (1 << 0)
-#define QAT_SYM_CAP_VALID (1 << 31)
-
-/**
- * Macro to add a sym capability
- * helper function to add an sym capability
- * <n: name> <b: block size> <k: key size> <d: digest size>
- * <a: aad_size> <i: iv_size>
- **/
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d) \
- { \
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
- {.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
- {.auth = { \
- .algo = RTE_CRYPTO_AUTH_##n, \
- b, d \
- }, } \
- }, } \
- }
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i) \
- { \
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
- {.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
- {.auth = { \
- .algo = RTE_CRYPTO_AUTH_##n, \
- b, k, d, a, i \
- }, } \
- }, } \
- }
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i) \
- { \
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
- {.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
- {.aead = { \
- .algo = RTE_CRYPTO_AEAD_##n, \
- b, k, d, a, i \
- }, } \
- }, } \
- }
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i) \
- { \
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
- {.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
- {.cipher = { \
- .algo = RTE_CRYPTO_CIPHER_##n, \
- b, k, i \
- }, } \
- }, } \
- }
-
-extern uint8_t qat_sym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
- struct qat_dev_cmd_param *qat_dev_cmd_param);
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
-
-void
-qat_sym_init_op_cookie(void *op_cookie);
-
-#endif
-#endif /* _QAT_SYM_PMD_H_ */
deleted file mode 100644
@@ -1,360 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
- */
-
-#include <openssl/evp.h>
-
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_crypto_sym.h>
-#include <rte_bus_pci.h>
-#include <rte_byteorder.h>
-
-#include "qat_sym_refactor.h"
-#include "qat_crypto.h"
-#include "qat_qp.h"
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
- .name = qat_sym_drv_name,
- .alias = qat_sym_drv_name
-};
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
- struct qat_sym_op_cookie *cookie = op_cookie;
-
- cookie->qat_sgl_src_phys_addr =
- rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_sym_op_cookie,
- qat_sgl_src);
-
- cookie->qat_sgl_dst_phys_addr =
- rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_sym_op_cookie,
- qat_sgl_dst);
-
- cookie->opt.spc_gmac.cd_phys_addr =
- rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_sym_op_cookie,
- opt.spc_gmac.cd_cipher);
-}
-
-static __rte_always_inline int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
- void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
-{
- struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
- void *sess = (void *)opaque[0];
- qat_sym_build_request_t build_request = (void *)opaque[1];
- struct qat_sym_session *ctx = NULL;
-
- if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
- ctx = get_sym_session_private_data(op->sym->session,
- qat_sym_driver_id);
- if (unlikely(!ctx)) {
- QAT_DP_LOG(ERR, "No session for this device");
- return -EINVAL;
- }
- if (sess != ctx) {
- struct rte_cryptodev *cdev;
- struct qat_cryptodev_private *internals;
- enum rte_proc_type_t proc_type;
-
- cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
- internals = cdev->data->dev_private;
- proc_type = rte_eal_process_type();
-
- if (internals->qat_dev->qat_dev_gen != dev_gen) {
- op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- return -EINVAL;
- }
-
- if (unlikely(ctx->build_request[proc_type] == NULL)) {
- int ret =
- qat_sym_gen_dev_ops[dev_gen].set_session(
- (void *)cdev, sess);
- if (ret < 0) {
- op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- return -EINVAL;
- }
- }
-
- build_request = ctx->build_request[proc_type];
- opaque[0] = (uintptr_t)ctx;
- opaque[1] = (uintptr_t)build_request;
- }
- }
-
-#ifdef RTE_LIB_SECURITY
- else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
- if (sess != (void *)op->sym->sec_session) {
- struct rte_cryptodev *cdev;
- struct qat_cryptodev_private *internals;
- enum rte_proc_type_t proc_type;
-
- ctx = get_sec_session_private_data(
- op->sym->sec_session);
- if (unlikely(!ctx)) {
- QAT_DP_LOG(ERR, "No session for this device");
- return -EINVAL;
- }
- if (unlikely(ctx->bpi_ctx == NULL)) {
- QAT_DP_LOG(ERR, "QAT PMD only supports security"
- " operation requests for"
- " DOCSIS, op (%p) is not for"
- " DOCSIS.", op);
- return -EINVAL;
- } else if (unlikely(((op->sym->m_dst != NULL) &&
- (op->sym->m_dst != op->sym->m_src)) ||
- op->sym->m_src->nb_segs > 1)) {
- QAT_DP_LOG(ERR, "OOP and/or multi-segment"
- " buffers not supported for"
- " DOCSIS security.");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
- }
- cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
- internals = cdev->data->dev_private;
- proc_type = rte_eal_process_type();
-
- if (internals->qat_dev->qat_dev_gen != dev_gen) {
- op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- return -EINVAL;
- }
-
- if (unlikely(ctx->build_request[proc_type] == NULL)) {
- int ret =
- qat_sym_gen_dev_ops[dev_gen].set_session(
- (void *)cdev, sess);
- if (ret < 0) {
- op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- return -EINVAL;
- }
- }
-
- sess = (void *)op->sym->sec_session;
- build_request = ctx->build_request[proc_type];
- opaque[0] = (uintptr_t)sess;
- opaque[1] = (uintptr_t)build_request;
- }
- }
-#endif
- else { /* RTE_CRYPTO_OP_SESSIONLESS */
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- QAT_LOG(DEBUG, "QAT does not support sessionless operation");
- return -1;
- }
-
- return build_request(op, (void *)ctx, out_msg, op_cookie);
-}
-
-uint16_t
-qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- return qat_enqueue_op_burst(qp, qat_sym_build_request,
- (void **)ops, nb_ops);
-}
-
-uint16_t
-qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- return qat_dequeue_op_burst(qp, (void **)ops,
- qat_sym_process_response, nb_ops);
-}
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
- struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
- int i = 0, ret = 0;
- struct qat_device_info *qat_dev_instance =
- &qat_pci_devs[qat_pci_dev->qat_dev_id];
- struct rte_cryptodev_pmd_init_params init_params = {
- .name = "",
- .socket_id = qat_dev_instance->pci_dev->device.numa_node,
- .private_data_size = sizeof(struct qat_cryptodev_private)
- };
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
- char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
- struct rte_cryptodev *cryptodev;
- struct qat_cryptodev_private *internals;
- struct qat_capabilities_info capa_info;
- const struct rte_cryptodev_capabilities *capabilities;
- const struct qat_crypto_gen_dev_ops *gen_dev_ops =
- &qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
- uint64_t capa_size;
-
- snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
- qat_pci_dev->name, "sym");
- QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
- if (gen_dev_ops->cryptodev_ops == NULL) {
- QAT_LOG(ERR, "Device %s does not support symmetric crypto",
- name);
- return -(EFAULT);
- }
-
- /*
- * All processes must use same driver id so they can share sessions.
- * Store driver_id so we can validate that all processes have the same
- * value, typically they have, but could differ if binaries built
- * separately.
- */
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- qat_pci_dev->qat_sym_driver_id =
- qat_sym_driver_id;
- } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
- if (qat_pci_dev->qat_sym_driver_id !=
- qat_sym_driver_id) {
- QAT_LOG(ERR,
- "Device %s have different driver id than corresponding device in primary process",
- name);
- return -(EFAULT);
- }
- }
-
- /* Populate subset device to use in cryptodev device creation */
- qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
- qat_dev_instance->sym_rte_dev.numa_node =
- qat_dev_instance->pci_dev->device.numa_node;
- qat_dev_instance->sym_rte_dev.devargs = NULL;
-
- cryptodev = rte_cryptodev_pmd_create(name,
- &(qat_dev_instance->sym_rte_dev), &init_params);
-
- if (cryptodev == NULL)
- return -ENODEV;
-
- qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
- cryptodev->driver_id = qat_sym_driver_id;
- cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
- cryptodev->enqueue_burst = qat_sym_enqueue_burst;
- cryptodev->dequeue_burst = qat_sym_dequeue_burst;
-
- cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
-#ifdef RTE_LIB_SECURITY
- if (gen_dev_ops->create_security_ctx) {
- cryptodev->security_ctx =
- gen_dev_ops->create_security_ctx((void *)cryptodev);
- if (cryptodev->security_ctx == NULL) {
- QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
- ret = -ENOMEM;
- goto error;
- }
-
- cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
- QAT_LOG(INFO, "Device %s rte_security support enabled", name);
- } else {
- QAT_LOG(INFO, "Device %s rte_security support disabled", name);
- }
-#endif
- snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
- "QAT_SYM_CAPA_GEN_%d",
- qat_pci_dev->qat_dev_gen);
-
- internals = cryptodev->data->dev_private;
- internals->qat_dev = qat_pci_dev;
-
- internals->dev_id = cryptodev->data->dev_id;
-
- capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
- capabilities = capa_info.data;
- capa_size = capa_info.size;
-
- internals->capa_mz = rte_memzone_lookup(capa_memz_name);
- if (internals->capa_mz == NULL) {
- internals->capa_mz = rte_memzone_reserve(capa_memz_name,
- capa_size, rte_socket_id(), 0);
- if (internals->capa_mz == NULL) {
- QAT_LOG(DEBUG,
- "Error allocating memzone for capabilities, "
- "destroying PMD for %s",
- name);
- ret = -EFAULT;
- goto error;
- }
- }
-
- memcpy(internals->capa_mz->addr, capabilities, capa_size);
- internals->qat_dev_capabilities = internals->capa_mz->addr;
-
- while (1) {
- if (qat_dev_cmd_param[i].name == NULL)
- break;
- if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
- internals->min_enq_burst_threshold =
- qat_dev_cmd_param[i].val;
- i++;
- }
-
- internals->service_type = QAT_SERVICE_SYMMETRIC;
- qat_pci_dev->sym_dev = internals;
- QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
- cryptodev->data->name, internals->dev_id);
-
- return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
- rte_free(cryptodev->security_ctx);
- cryptodev->security_ctx = NULL;
-#endif
- rte_cryptodev_pmd_destroy(cryptodev);
- memset(&qat_dev_instance->sym_rte_dev, 0,
- sizeof(qat_dev_instance->sym_rte_dev));
-
- return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
- struct rte_cryptodev *cryptodev;
-
- if (qat_pci_dev == NULL)
- return -ENODEV;
- if (qat_pci_dev->sym_dev == NULL)
- return 0;
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
- /* free crypto device */
- cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
- rte_free(cryptodev->security_ctx);
- cryptodev->security_ctx = NULL;
-#endif
- rte_cryptodev_pmd_destroy(cryptodev);
- qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
- qat_pci_dev->sym_dev = NULL;
-
- return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
- cryptodev_qat_sym_driver,
- qat_sym_driver_id);
deleted file mode 100644
@@ -1,402 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_H_
-#define _QAT_SYM_H_
-
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_net_crc.h>
-#endif
-
-#include <openssl/evp.h>
-
-#include "qat_common.h"
-#include "qat_sym_session.h"
-#include "qat_crypto.h"
-#include "qat_logs.h"
-
-#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
-
-#define BYTE_LENGTH 8
-/* bpi is only used for partial blocks of DES and AES
- * so AES block len can be assumed as max len for iv, src and dst
- */
-#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO (1 << 0)
-#define QAT_SYM_CAP_VALID (1 << 31)
-
-/* Macro to add a capability */
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d) \
- { \
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
- {.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
- {.auth = { \
- .algo = RTE_CRYPTO_AUTH_##n, \
- b, d \
- }, } \
- }, } \
- }
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i) \
- { \
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
- {.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
- {.auth = { \
- .algo = RTE_CRYPTO_AUTH_##n, \
- b, k, d, a, i \
- }, } \
- }, } \
- }
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i) \
- { \
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
- {.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
- {.aead = { \
- .algo = RTE_CRYPTO_AEAD_##n, \
- b, k, d, a, i \
- }, } \
- }, } \
- }
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i) \
- { \
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
- {.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
- {.cipher = { \
- .algo = RTE_CRYPTO_CIPHER_##n, \
- b, k, i \
- }, } \
- }, } \
- }
-
-/*
- * Maximum number of SGL entries
- */
-#define QAT_SYM_SGL_MAX_NUMBER 16
-
-/* Maximum data length for single pass GMAC: 2^14-1 */
-#define QAT_AES_GMAC_SPC_MAX_SIZE 16383
-
-struct qat_sym_session;
-
-struct qat_sym_sgl {
- qat_sgl_hdr;
- struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
-} __rte_packed __rte_cache_aligned;
-
-struct qat_sym_op_cookie {
- struct qat_sym_sgl qat_sgl_src;
- struct qat_sym_sgl qat_sgl_dst;
- phys_addr_t qat_sgl_src_phys_addr;
- phys_addr_t qat_sgl_dst_phys_addr;
- union {
- /* Used for Single-Pass AES-GMAC only */
- struct {
- struct icp_qat_hw_cipher_algo_blk cd_cipher
- __rte_packed __rte_cache_aligned;
- phys_addr_t cd_phys_addr;
- } spc_gmac;
- } opt;
-};
-
-struct qat_sym_dp_ctx {
- struct qat_sym_session *session;
- uint32_t tail;
- uint32_t head;
- uint16_t cached_enqueue;
- uint16_t cached_dequeue;
-};
-
-uint16_t
-qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops);
-
-uint16_t
-qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops);
-
-/** Encrypt a single partial block
- * Depends on openssl libcrypto
- * Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
- uint8_t *iv, int ivlen, int srclen,
- void *bpi_ctx)
-{
- EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
- int encrypted_ivlen;
- uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
- uint8_t *encr = encrypted_iv;
-
- /* ECB method: encrypt the IV, then XOR this with plaintext */
- if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
- <= 0)
- goto cipher_encrypt_err;
-
- for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
- *dst = *src ^ *encr;
-
- return 0;
-
-cipher_encrypt_err:
- QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
- return -EINVAL;
-}
-
-static inline uint32_t
-qat_bpicipher_postprocess(struct qat_sym_session *ctx,
- struct rte_crypto_op *op)
-{
- int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
- struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = block_len > 0 ?
- sym_op->cipher.data.length % block_len : 0;
-
- if (last_block_len > 0 &&
- ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
-
- /* Encrypt last block */
- uint8_t *last_block, *dst, *iv;
- uint32_t last_block_offset;
-
- last_block_offset = sym_op->cipher.data.offset +
- sym_op->cipher.data.length - last_block_len;
- last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
- uint8_t *, last_block_offset);
-
- if (unlikely(sym_op->m_dst != NULL))
- /* out-of-place operation (OOP) */
- dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
- uint8_t *, last_block_offset);
- else
- dst = last_block;
-
- if (last_block_len < sym_op->cipher.data.length)
- /* use previous block ciphertext as IV */
- iv = dst - block_len;
- else
- /* runt block, i.e. less than one full block */
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
- last_block, last_block_len);
- if (sym_op->m_dst != NULL)
- QAT_DP_HEXDUMP_LOG(DEBUG,
- "BPI: dst before post-process:",
- dst, last_block_len);
-#endif
- bpi_cipher_encrypt(last_block, dst, iv, block_len,
- last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
- last_block, last_block_len);
- if (sym_op->m_dst != NULL)
- QAT_DP_HEXDUMP_LOG(DEBUG,
- "BPI: dst after post-process:",
- dst, last_block_len);
-#endif
- }
- return sym_op->cipher.data.length - last_block_len;
-}
-
-#ifdef RTE_LIB_SECURITY
-static inline void
-qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
-{
- struct rte_crypto_sym_op *sym_op = op->sym;
- uint32_t crc_data_ofs, crc_data_len, crc;
- uint8_t *crc_data;
-
- if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
- sym_op->auth.data.length != 0) {
-
- crc_data_ofs = sym_op->auth.data.offset;
- crc_data_len = sym_op->auth.data.length;
- crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
- crc_data_ofs);
-
- crc = rte_net_crc_calc(crc_data, crc_data_len,
- RTE_NET_CRC32_ETH);
-
- if (crc != *(uint32_t *)(crc_data + crc_data_len))
- op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- }
-}
-
-static inline void
-qat_crc_generate(struct qat_sym_session *ctx,
- struct rte_crypto_op *op)
-{
- struct rte_crypto_sym_op *sym_op = op->sym;
- uint32_t *crc, crc_data_len;
- uint8_t *crc_data;
-
- if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT &&
- sym_op->auth.data.length != 0 &&
- sym_op->m_src->nb_segs == 1) {
-
- crc_data_len = sym_op->auth.data.length;
- crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
- sym_op->auth.data.offset);
- crc = (uint32_t *)(crc_data + crc_data_len);
- *crc = rte_net_crc_calc(crc_data, crc_data_len,
- RTE_NET_CRC32_ETH);
- }
-}
-
-static inline void
-qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
-{
- struct rte_crypto_op *op;
- struct qat_sym_session *ctx;
- uint16_t i;
-
- for (i = 0; i < nb_ops; i++) {
- op = (struct rte_crypto_op *)ops[i];
-
- if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
- ctx = (struct qat_sym_session *)
- get_sec_session_private_data(
- op->sym->sec_session);
-
- if (ctx == NULL || ctx->bpi_ctx == NULL)
- continue;
-
- qat_crc_generate(ctx, op);
- }
- }
-}
-#endif
-
-static __rte_always_inline int
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
- uint64_t *dequeue_err_count __rte_unused)
-{
- struct icp_qat_fw_comn_resp *resp_msg =
- (struct icp_qat_fw_comn_resp *)resp;
- struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
- (resp_msg->opaque_data);
- struct qat_sym_session *sess;
- uint8_t is_docsis_sec;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
- sizeof(struct icp_qat_fw_comn_resp));
-#endif
-
-#ifdef RTE_LIB_SECURITY
- if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
- /*
- * Assuming at this point that if it's a security
- * op, that this is for DOCSIS
- */
- sess = (struct qat_sym_session *)
- get_sec_session_private_data(
- rx_op->sym->sec_session);
- is_docsis_sec = 1;
- } else
-#endif
- {
- sess = (struct qat_sym_session *)
- get_sym_session_private_data(
- rx_op->sym->session,
- qat_sym_driver_id);
- is_docsis_sec = 0;
- }
-
- if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
- ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
- resp_msg->comn_hdr.comn_status)) {
-
- rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- if (sess->bpi_ctx) {
- qat_bpicipher_postprocess(sess, rx_op);
-#ifdef RTE_LIB_SECURITY
- if (is_docsis_sec)
- qat_crc_verify(sess, rx_op);
-#endif
- }
- }
-
- if (sess->is_single_pass_gmac) {
- struct qat_sym_op_cookie *cookie =
- (struct qat_sym_op_cookie *) op_cookie;
- memset(cookie->opt.spc_gmac.cd_cipher.key, 0,
- sess->auth_key_length);
- }
-
- *op = (void *)rx_op;
-
- return 1;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
- struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
- enum rte_crypto_op_sess_type sess_type,
- union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
-
-int
-qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
-
-void
-qat_sym_init_op_cookie(void *cookie);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-static __rte_always_inline void
-qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
- struct qat_sym_session *ctx,
- struct rte_crypto_vec *vec, uint32_t vec_len,
- struct rte_crypto_va_iova_ptr *cipher_iv,
- struct rte_crypto_va_iova_ptr *auth_iv,
- struct rte_crypto_va_iova_ptr *aad,
- struct rte_crypto_va_iova_ptr *digest)
-{
- uint32_t i;
-
- QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
- sizeof(struct icp_qat_fw_la_bulk_req));
- for (i = 0; i < vec_len; i++)
- QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
- if (cipher_iv && ctx->cipher_iv.length > 0)
- QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
- ctx->cipher_iv.length);
- if (auth_iv && ctx->auth_iv.length > 0)
- QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
- ctx->auth_iv.length);
- if (aad && ctx->aad_len > 0)
- QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
- ctx->aad_len);
- if (digest && ctx->digest_length > 0)
- QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
- ctx->digest_length);
-}
-#else
-static __rte_always_inline void
-qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
- struct qat_sym_session *ctx __rte_unused,
- struct rte_crypto_vec *vec __rte_unused,
- uint32_t vec_len __rte_unused,
- struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
- struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
- struct rte_crypto_va_iova_ptr *aad __rte_unused,
- struct rte_crypto_va_iova_ptr *digest __rte_unused)
-{}
-#endif
-
-#endif /* _QAT_SYM_H_ */
@@ -20,7 +20,7 @@
#include "qat_logs.h"
#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
/* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
static const uint8_t sha1InitialState[] = {
@@ -600,11 +600,11 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
session->is_auth = 1;
session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
/* Chacha-Poly is special case that use QAT CTR mode */
- if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
- } else {
+ else
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- }
+
session->cipher_iv.offset = aead_xform->iv.offset;
session->cipher_iv.length = aead_xform->iv.length;
session->aad_len = aead_xform->aad_length;