@@ -550,8 +550,7 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
}
uint16_t
-qat_enqueue_op_burst(void *qp,
- __rte_unused qat_op_build_request_t op_build_request,
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
void **ops, uint16_t nb_ops)
{
register struct qat_queue *queue;
@@ -602,29 +601,18 @@ qat_enqueue_op_burst(void *qp,
}
}
-#ifdef BUILD_QAT_SYM
+#ifdef RTE_LIB_SECURITY
if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
qat_sym_preprocess_requests(ops, nb_ops_possible);
#endif
+ memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
+
while (nb_ops_sent != nb_ops_possible) {
- if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
-#ifdef BUILD_QAT_SYM
- ret = qat_sym_build_request(*ops, base_addr + tail,
- tmp_qp->op_cookies[tail >> queue->trailz],
- tmp_qp->qat_dev_gen);
-#endif
- } else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
- ret = qat_comp_build_request(*ops, base_addr + tail,
+ ret = op_build_request(*ops, base_addr + tail,
tmp_qp->op_cookies[tail >> queue->trailz],
- tmp_qp->qat_dev_gen);
- } else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
-#ifdef BUILD_QAT_ASYM
- ret = qat_asym_build_request(*ops, base_addr + tail,
- tmp_qp->op_cookies[tail >> queue->trailz],
- NULL, tmp_qp->qat_dev_gen);
-#endif
- }
+ tmp_qp->opaque, tmp_qp->qat_dev_gen);
+
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
/* This message cannot be enqueued */
@@ -820,8 +808,7 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
uint16_t
qat_dequeue_op_burst(void *qp, void **ops,
- __rte_unused qat_op_dequeue_t qat_dequeue_process_response,
- uint16_t nb_ops)
+ qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
{
struct qat_queue *rx_queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -839,21 +826,10 @@ qat_dequeue_op_burst(void *qp, void **ops,
nb_fw_responses = 1;
- if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
- qat_sym_process_response(ops, resp_msg,
- tmp_qp->op_cookies[head >> rx_queue->trailz],
- NULL);
- else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
- nb_fw_responses = qat_comp_process_response(
+ nb_fw_responses = qat_dequeue_process_response(
ops, resp_msg,
tmp_qp->op_cookies[head >> rx_queue->trailz],
&tmp_qp->stats.dequeue_err_count);
-#ifdef BUILD_QAT_ASYM
- else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
- qat_asym_process_response(ops, resp_msg,
- tmp_qp->op_cookies[head >> rx_queue->trailz],
- NULL);
-#endif
head = adf_modulo(head + rx_queue->msg_size,
rx_queue->modulo_mask);
@@ -14,6 +14,24 @@
struct qat_pci_device;
+/* Default qp configuration for GEN4 devices */
+#define QAT_GEN4_QP_DEFCON (QAT_SERVICE_SYMMETRIC | \
+ QAT_SERVICE_SYMMETRIC << 8 | \
+ QAT_SERVICE_SYMMETRIC << 16 | \
+ QAT_SERVICE_SYMMETRIC << 24)
+
+/* QAT GEN 4 specific macros */
+#define QAT_GEN4_BUNDLE_NUM 4
+#define QAT_GEN4_QPS_PER_BUNDLE_NUM 1
+
+/* Queue pair setup error codes */
+#define QAT_NOMEM 1
+#define QAT_QP_INVALID_DESC_NO 2
+#define QAT_QP_BUSY 3
+#define QAT_PCI_NO_RESOURCE 4
+
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
/**
* Structure associated with each queue.
*/
@@ -306,4 +306,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
qat_asym_crypto_cap_get_gen1;
qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
qat_asym_crypto_feature_flags_get_gen1;
+ qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+ qat_asym_crypto_set_session_gen1;
}
@@ -148,10 +148,6 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
.sym_session_get_size = qat_sym_session_get_private_size,
.sym_session_configure = qat_sym_session_configure,
.sym_session_clear = qat_sym_session_clear,
-
- /* Raw data-path API related operations */
- .sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
- .sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
};
static struct qat_capabilities_info
@@ -181,6 +177,94 @@ qat_sym_crypto_feature_flags_get_gen1(
return feature_flags;
}
+#ifdef RTE_LIB_SECURITY
+
+#define QAT_SECURITY_SYM_CAPABILITIES \
+ { /* AES DOCSIS BPI */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 16 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#define QAT_SECURITY_CAPABILITIES(sym) \
+ [0] = { /* DOCSIS Uplink */ \
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, \
+ .protocol = RTE_SECURITY_PROTOCOL_DOCSIS, \
+ .docsis = { \
+ .direction = RTE_SECURITY_DOCSIS_UPLINK \
+ }, \
+ .crypto_capabilities = (sym) \
+ }, \
+ [1] = { /* DOCSIS Downlink */ \
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, \
+ .protocol = RTE_SECURITY_PROTOCOL_DOCSIS, \
+ .docsis = { \
+ .direction = RTE_SECURITY_DOCSIS_DOWNLINK \
+ }, \
+ .crypto_capabilities = (sym) \
+ }
+
+static const struct rte_cryptodev_capabilities
+ qat_security_sym_capabilities[] = {
+ QAT_SECURITY_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_security_capability qat_security_capabilities_gen1[] = {
+ QAT_SECURITY_CAPABILITIES(qat_security_sym_capabilities),
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+};
+
+static const struct rte_security_capability *
+qat_security_cap_get_gen1(void *dev __rte_unused)
+{
+ return qat_security_capabilities_gen1;
+}
+
+struct rte_security_ops security_qat_ops_gen1 = {
+ .session_create = qat_security_session_create,
+ .session_update = NULL,
+ .session_stats_get = NULL,
+ .session_destroy = qat_security_session_destroy,
+ .set_pkt_metadata = NULL,
+ .capabilities_get = qat_security_cap_get_gen1
+};
+
+void *
+qat_sym_create_security_gen1(void *cryptodev)
+{
+ struct rte_security_ctx *security_instance;
+
+ security_instance = rte_malloc(NULL, sizeof(struct rte_security_ctx),
+ RTE_CACHE_LINE_SIZE);
+ if (security_instance == NULL)
+ return NULL;
+
+ security_instance->device = cryptodev;
+ security_instance->ops = &security_qat_ops_gen1;
+ security_instance->sess_cnt = 0;
+
+ return (void *)security_instance;
+}
+
+#endif
int
qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
@@ -367,94 +451,6 @@ qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
return 0;
}
-#ifdef RTE_LIB_SECURITY
-
-#define QAT_SECURITY_SYM_CAPABILITIES \
- { /* AES DOCSIS BPI */ \
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
- {.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
- {.cipher = { \
- .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\
- .block_size = 16, \
- .key_size = { \
- .min = 16, \
- .max = 32, \
- .increment = 16 \
- }, \
- .iv_size = { \
- .min = 16, \
- .max = 16, \
- .increment = 0 \
- } \
- }, } \
- }, } \
- }
-
-#define QAT_SECURITY_CAPABILITIES(sym) \
- [0] = { /* DOCSIS Uplink */ \
- .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, \
- .protocol = RTE_SECURITY_PROTOCOL_DOCSIS, \
- .docsis = { \
- .direction = RTE_SECURITY_DOCSIS_UPLINK \
- }, \
- .crypto_capabilities = (sym) \
- }, \
- [1] = { /* DOCSIS Downlink */ \
- .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, \
- .protocol = RTE_SECURITY_PROTOCOL_DOCSIS, \
- .docsis = { \
- .direction = RTE_SECURITY_DOCSIS_DOWNLINK \
- }, \
- .crypto_capabilities = (sym) \
- }
-
-static const struct rte_cryptodev_capabilities
- qat_security_sym_capabilities[] = {
- QAT_SECURITY_SYM_CAPABILITIES,
- RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-static const struct rte_security_capability qat_security_capabilities_gen1[] = {
- QAT_SECURITY_CAPABILITIES(qat_security_sym_capabilities),
- {
- .action = RTE_SECURITY_ACTION_TYPE_NONE
- }
-};
-
-static const struct rte_security_capability *
-qat_security_cap_get_gen1(void *dev __rte_unused)
-{
- return qat_security_capabilities_gen1;
-}
-
-struct rte_security_ops security_qat_ops_gen1 = {
- .session_create = qat_security_session_create,
- .session_update = NULL,
- .session_stats_get = NULL,
- .session_destroy = qat_security_session_destroy,
- .set_pkt_metadata = NULL,
- .capabilities_get = qat_security_cap_get_gen1
-};
-
-void *
-qat_sym_create_security_gen1(void *cryptodev)
-{
- struct rte_security_ctx *security_instance;
-
- security_instance = rte_malloc(NULL, sizeof(struct rte_security_ctx),
- RTE_CACHE_LINE_SIZE);
- if (security_instance == NULL)
- return NULL;
-
- security_instance->device = cryptodev;
- security_instance->ops = &security_qat_ops_gen1;
- security_instance->sess_cnt = 0;
-
- return (void *)security_instance;
-}
-
-#endif
int
qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
{
@@ -15,6 +15,10 @@
#include "qat_logs.h"
#include "qat_asym.h"
+uint8_t qat_asym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
+
void
qat_asym_init_op_cookie(void *op_cookie)
{
@@ -769,7 +773,7 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
return 0;
}
-int
+static __rte_always_inline int
qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
__rte_unused uint64_t *opaque,
__rte_unused enum qat_device_gen dev_gen)
@@ -29,10 +29,6 @@ typedef uint64_t large_int_ptr;
#define QAT_ASYM_RSA_NUM_OUT_PARAMS 1
#define QAT_ASYM_RSA_QT_NUM_IN_PARAMS 6
-uint8_t qat_asym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
/**
* helper function to add an asym capability
* <name> <op type> <modlen (min, max, increment)>
@@ -108,28 +104,6 @@ void
qat_asym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_asym_session *sess);
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param in_op Pointer to the crypto operation, for every
- * service it points to service specific struct.
- * @param out_msg Message to be returned to enqueue function
- * @param op_cookie Cookie pointer that holds private metadata
- * @param qat_dev_gen Generation of QAT hardware
- *
- * @return
- * This function always returns zero,
- * it is because of backward compatibility.
- * - 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
- void *op_cookie,
- __rte_unused uint64_t *opaque,
- enum qat_device_gen qat_dev_gen);
-
/*
* Process PKE response received from outgoing queue of QAT
*
@@ -12,7 +12,10 @@
extern uint8_t qat_sym_driver_id;
extern uint8_t qat_asym_driver_id;
-/** helper macro to set cryptodev capability range **/
+/**
+ * helper macro to set cryptodev capability range
+ * <n: name> <l: min > <r: max> <i: increment> <v: value>
+ **/
#define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i}
#define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0}
@@ -61,6 +64,7 @@ struct qat_crypto_gen_dev_ops {
};
extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
int
qat_cryptodev_config(struct rte_cryptodev *dev,
@@ -11,11 +11,25 @@
#include <rte_byteorder.h>
#include "qat_sym.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
uint8_t qat_sym_driver_id;
struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+ .name = qat_sym_drv_name,
+ .alias = qat_sym_drv_name
+};
+
void
qat_sym_init_op_cookie(void *op_cookie)
{
@@ -37,246 +51,67 @@ qat_sym_init_op_cookie(void *op_cookie)
opt.spc_gmac.cd_cipher);
}
-/** Decrypt a single partial block
- * Depends on openssl libcrypto
- * Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
- uint8_t *iv, int ivlen, int srclen,
- void *bpi_ctx)
-{
- EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
- int encrypted_ivlen;
- uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
- uint8_t *encr = encrypted_iv;
-
- /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
- if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
- <= 0)
- goto cipher_decrypt_err;
-
- for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
- *dst = *src ^ *encr;
-
- return 0;
-
-cipher_decrypt_err:
- QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
- return -EINVAL;
-}
-
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
- struct rte_crypto_op *op)
-{
- int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
- struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = block_len > 0 ?
- sym_op->cipher.data.length % block_len : 0;
-
- if (last_block_len &&
- ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
- /* Decrypt last block */
- uint8_t *last_block, *dst, *iv;
- uint32_t last_block_offset = sym_op->cipher.data.offset +
- sym_op->cipher.data.length - last_block_len;
- last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
- uint8_t *, last_block_offset);
-
- if (unlikely((sym_op->m_dst != NULL)
- && (sym_op->m_dst != sym_op->m_src)))
- /* out-of-place operation (OOP) */
- dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
- uint8_t *, last_block_offset);
- else
- dst = last_block;
-
- if (last_block_len < sym_op->cipher.data.length)
- /* use previous block ciphertext as IV */
- iv = last_block - block_len;
- else
- /* runt block, i.e. less than one full block */
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
- last_block, last_block_len);
- if (sym_op->m_dst != NULL)
- QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
- dst, last_block_len);
-#endif
- bpi_cipher_decrypt(last_block, dst, iv, block_len,
- last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
- last_block, last_block_len);
- if (sym_op->m_dst != NULL)
- QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
- dst, last_block_len);
-#endif
- }
-
- return sym_op->cipher.data.length - last_block_len;
-}
-
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
- struct icp_qat_fw_la_cipher_req_params *cipher_param,
- struct rte_crypto_op *op,
- struct icp_qat_fw_la_bulk_req *qat_req)
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
{
- /* copy IV into request if it fits */
- if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
- rte_memcpy(cipher_param->u.cipher_IV_array,
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset),
- iv_length);
- } else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr =
- rte_crypto_op_ctophys_offset(op,
- iv_offset);
- }
-}
+ struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+ void *sess = (void *)opaque[0];
+ qat_sym_build_request_t build_request = (void *)opaque[1];
+ struct qat_sym_session *ctx = NULL;
-/** Set IV for CCM is special case, 0th byte is set to q-1
- * where q is padding of nonce in 16 byte block
- */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
- struct icp_qat_fw_la_cipher_req_params *cipher_param,
- struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
- rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
- ICP_QAT_HW_CCM_NONCE_OFFSET,
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
- iv_length);
- *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
- q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
- if (aad_len_field_sz)
- rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
- iv_length);
-}
+ if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+ ctx = get_sym_session_private_data(op->sym->session,
+ qat_sym_driver_id);
+ if (unlikely(!ctx)) {
+ QAT_DP_LOG(ERR, "No session for this device");
+ return -EINVAL;
+ }
+ if (sess != ctx) {
+ struct rte_cryptodev *cdev;
+ struct qat_cryptodev_private *internals;
+ enum rte_proc_type_t proc_type;
+
+ cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+ internals = cdev->data->dev_private;
+ proc_type = rte_eal_process_type();
+
+ if (internals->qat_dev->qat_dev_gen != dev_gen) {
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
- struct qat_sym_op_cookie *cookie,
- struct icp_qat_fw_la_bulk_req *qat_req)
-{
- static const uint32_t ver_key_offset =
- sizeof(struct icp_qat_hw_auth_setup) +
- ICP_QAT_HW_GALOIS_128_STATE1_SZ +
- ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
- ICP_QAT_HW_GALOIS_E_CTR0_SZ +
- sizeof(struct icp_qat_hw_cipher_config);
- struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
- (void *) &qat_req->cd_ctrl;
- struct icp_qat_fw_la_cipher_req_params *cipher_param =
- (void *) &qat_req->serv_specif_rqpars;
- uint32_t data_length = op->sym->auth.data.length;
-
- /* Fill separate Content Descriptor for this op */
- rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
- ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
- ctx->cd.cipher.key :
- RTE_PTR_ADD(&ctx->cd, ver_key_offset),
- ctx->auth_key_length);
- cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(
- ICP_QAT_HW_CIPHER_AEAD_MODE,
- ctx->qat_cipher_alg,
- ICP_QAT_HW_CIPHER_NO_CONVERT,
- (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
- ICP_QAT_HW_CIPHER_ENCRYPT :
- ICP_QAT_HW_CIPHER_DECRYPT));
- QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
- ctx->digest_length,
- QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
- QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
- cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
- ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
- /* Update the request */
- qat_req->cd_pars.u.s.content_desc_addr =
- cookie->opt.spc_gmac.cd_phys_addr;
- qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
- sizeof(struct icp_qat_hw_cipher_config) +
- ctx->auth_key_length, 8) >> 3;
- qat_req->comn_mid.src_length = data_length;
- qat_req->comn_mid.dst_length = 0;
-
- cipher_param->spc_aad_addr = 0;
- cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
- cipher_param->spc_aad_sz = data_length;
- cipher_param->reserved = 0;
- cipher_param->spc_auth_res_sz = ctx->digest_length;
-
- qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
- cipher_cd_ctrl->cipher_cfg_offset = 0;
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
- ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
- ICP_QAT_FW_LA_PROTO_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_NO_PROTO);
-}
+ if (unlikely(ctx->build_request[proc_type] == NULL)) {
+ int ret =
+ qat_sym_gen_dev_ops[dev_gen].set_session(
+ (void *)cdev, sess);
+ if (ret < 0) {
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+ }
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
- void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
-{
- int ret = 0;
- struct qat_sym_session *ctx = NULL;
- struct icp_qat_fw_la_cipher_req_params *cipher_param;
- struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
- struct icp_qat_fw_la_auth_req_params *auth_param;
- register struct icp_qat_fw_la_bulk_req *qat_req;
- uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
- uint32_t cipher_len = 0, cipher_ofs = 0;
- uint32_t auth_len = 0, auth_ofs = 0;
- uint32_t min_ofs = 0;
- uint64_t src_buf_start = 0, dst_buf_start = 0;
- uint64_t auth_data_end = 0;
- uint8_t do_sgl = 0;
- uint8_t in_place = 1;
- int alignment_adjustment = 0;
- int oop_shift = 0;
- struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
- struct qat_sym_op_cookie *cookie =
- (struct qat_sym_op_cookie *)op_cookie;
-
- if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
- QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
- "operation requests, op (%p) is not a "
- "symmetric operation.", op);
- return -EINVAL;
+ build_request = ctx->build_request[proc_type];
+ opaque[0] = (uintptr_t)ctx;
+ opaque[1] = (uintptr_t)build_request;
+ }
}
- if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
- QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
- " requests, op (%p) is sessionless.", op);
- return -EINVAL;
- } else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
- ctx = (struct qat_sym_session *)get_sym_session_private_data(
- op->sym->session, qat_sym_driver_id);
#ifdef RTE_LIB_SECURITY
- } else {
- ctx = (struct qat_sym_session *)get_sec_session_private_data(
- op->sym->sec_session);
- if (likely(ctx)) {
+ else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ if (sess != (void *)op->sym->sec_session) {
+ struct rte_cryptodev *cdev;
+ struct qat_cryptodev_private *internals;
+ enum rte_proc_type_t proc_type;
+
+ ctx = get_sec_session_private_data(
+ op->sym->sec_session);
+ if (unlikely(!ctx)) {
+ QAT_DP_LOG(ERR, "No session for this device");
+ return -EINVAL;
+ }
if (unlikely(ctx->bpi_ctx == NULL)) {
QAT_DP_LOG(ERR, "QAT PMD only supports security"
" operation requests for"
@@ -292,463 +127,234 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
return -EINVAL;
}
- }
-#endif
- }
+ cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+ internals = cdev->data->dev_private;
+ proc_type = rte_eal_process_type();
- if (unlikely(ctx == NULL)) {
- QAT_DP_LOG(ERR, "Session was not created for this device");
- return -EINVAL;
- }
+ if (internals->qat_dev->qat_dev_gen != dev_gen) {
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
+ if (unlikely(ctx->build_request[proc_type] == NULL)) {
+ int ret =
+ qat_sym_gen_dev_ops[dev_gen].set_session(
+ (void *)cdev, sess);
+ if (ret < 0) {
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+ }
- qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
- rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
- qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
- cipher_param = (void *)&qat_req->serv_specif_rqpars;
- cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param +
- ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
- if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
- ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
- !ctx->is_gmac) {
- /* AES-GCM or AES-CCM */
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
- (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
- && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
- && ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
- do_aead = 1;
- } else {
- do_auth = 1;
- do_cipher = 1;
+ sess = (void *)op->sym->sec_session;
+ build_request = ctx->build_request[proc_type];
+ opaque[0] = (uintptr_t)sess;
+ opaque[1] = (uintptr_t)build_request;
}
- } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
- do_auth = 1;
- do_cipher = 0;
- } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
- do_auth = 0;
- do_cipher = 1;
+ }
+#endif
+ else { /* RTE_CRYPTO_OP_SESSIONLESS */
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+ return -1;
}
- if (do_cipher) {
+ return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
- if (ctx->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
- ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
- ctx->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_enqueue_op_burst(qp, qat_sym_build_request,
+ (void **)ops, nb_ops);
+}
- if (unlikely(
- (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
- (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
- QAT_DP_LOG(ERR,
- "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
- }
- cipher_len = op->sym->cipher.data.length >> 3;
- cipher_ofs = op->sym->cipher.data.offset >> 3;
-
- } else if (ctx->bpi_ctx) {
- /* DOCSIS - only send complete blocks to device.
- * Process any partial block using CFB mode.
- * Even if 0 complete blocks, still send this to device
- * to get into rx queue for post-process and dequeuing
- */
- cipher_len = qat_bpicipher_preprocess(ctx, op);
- cipher_ofs = op->sym->cipher.data.offset;
- } else {
- cipher_len = op->sym->cipher.data.length;
- cipher_ofs = op->sym->cipher.data.offset;
- }
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_dequeue_op_burst(qp, (void **)ops,
+ qat_sym_process_response, nb_ops);
+}
- set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
- cipher_param, op, qat_req);
- min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+ struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+ int i = 0, ret = 0;
+ struct qat_device_info *qat_dev_instance =
+ &qat_pci_devs[qat_pci_dev->qat_dev_id];
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_dev_instance->pci_dev->device.numa_node,
+ .private_data_size = sizeof(struct qat_cryptodev_private)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *cryptodev;
+ struct qat_cryptodev_private *internals;
+ struct qat_capabilities_info capa_info;
+ const struct rte_cryptodev_capabilities *capabilities;
+ const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+ &qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+ uint64_t capa_size;
+
+ snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "sym");
+ QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+ if (gen_dev_ops->cryptodev_ops == NULL) {
+ QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+ name);
+ return -(EFAULT);
}
- if (do_auth) {
-
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
- if (unlikely(
- (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
- (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
- QAT_DP_LOG(ERR,
- "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
- }
- auth_ofs = op->sym->auth.data.offset >> 3;
- auth_len = op->sym->auth.data.length >> 3;
-
- auth_param->u1.aad_adr =
- rte_crypto_op_ctophys_offset(op,
- ctx->auth_iv.offset);
-
- } else if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- /* AES-GMAC */
- set_cipher_iv(ctx->auth_iv.length,
- ctx->auth_iv.offset,
- cipher_param, op, qat_req);
- auth_ofs = op->sym->auth.data.offset;
- auth_len = op->sym->auth.data.length;
-
- auth_param->u1.aad_adr = 0;
- auth_param->u2.aad_sz = 0;
-
- } else {
- auth_ofs = op->sym->auth.data.offset;
- auth_len = op->sym->auth.data.length;
-
+ /*
+ * All processes must use same driver id so they can share sessions.
+ * Store driver_id so we can validate that all processes have the same
+ * value, typically they have, but could differ if binaries built
+ * separately.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ qat_pci_dev->qat_sym_driver_id =
+ qat_sym_driver_id;
+ } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ if (qat_pci_dev->qat_sym_driver_id !=
+ qat_sym_driver_id) {
+ QAT_LOG(ERR,
+ "Device %s have different driver id than corresponding device in primary process",
+ name);
+ return -(EFAULT);
}
- min_ofs = auth_ofs;
-
- if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
- ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
- auth_param->auth_res_addr =
- op->sym->auth.digest.phys_addr;
-
}
- if (do_aead) {
- /*
- * This address may used for setting AAD physical pointer
- * into IV offset from op
- */
- rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
- if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
- set_cipher_iv(ctx->cipher_iv.length,
- ctx->cipher_iv.offset,
- cipher_param, op, qat_req);
-
- } else if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
- /* In case of AES-CCM this may point to user selected
- * memory or iv offset in cypto_op
- */
- uint8_t *aad_data = op->sym->aead.aad.data;
- /* This is true AAD length, it not includes 18 bytes of
- * preceding data
- */
- uint8_t aad_ccm_real_len = 0;
- uint8_t aad_len_field_sz = 0;
- uint32_t msg_len_be =
- rte_bswap32(op->sym->aead.data.length);
-
- if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
- aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
- aad_ccm_real_len = ctx->aad_len -
- ICP_QAT_HW_CCM_AAD_B0_LEN -
- ICP_QAT_HW_CCM_AAD_LEN_INFO;
- } else {
- /*
- * aad_len not greater than 18, so no actual aad
- * data, then use IV after op for B0 block
- */
- aad_data = rte_crypto_op_ctod_offset(op,
- uint8_t *,
- ctx->cipher_iv.offset);
- aad_phys_addr_aead =
- rte_crypto_op_ctophys_offset(op,
- ctx->cipher_iv.offset);
- }
-
- uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
- ctx->cipher_iv.length;
-
- aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
- aad_len_field_sz,
- ctx->digest_length, q);
-
- if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
- memcpy(aad_data + ctx->cipher_iv.length +
- ICP_QAT_HW_CCM_NONCE_OFFSET +
- (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
- (uint8_t *)&msg_len_be,
- ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
- } else {
- memcpy(aad_data + ctx->cipher_iv.length +
- ICP_QAT_HW_CCM_NONCE_OFFSET,
- (uint8_t *)&msg_len_be
- + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
- - q), q);
- }
-
- if (aad_len_field_sz > 0) {
- *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
- = rte_bswap16(aad_ccm_real_len);
-
- if ((aad_ccm_real_len + aad_len_field_sz)
- % ICP_QAT_HW_CCM_AAD_B0_LEN) {
- uint8_t pad_len = 0;
- uint8_t pad_idx = 0;
-
- pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
- ((aad_ccm_real_len + aad_len_field_sz) %
- ICP_QAT_HW_CCM_AAD_B0_LEN);
- pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
- aad_ccm_real_len + aad_len_field_sz;
- memset(&aad_data[pad_idx],
- 0, pad_len);
- }
+ /* Populate subset device to use in cryptodev device creation */
+ qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+ qat_dev_instance->sym_rte_dev.numa_node =
+ qat_dev_instance->pci_dev->device.numa_node;
+ qat_dev_instance->sym_rte_dev.devargs = NULL;
- }
+ cryptodev = rte_cryptodev_pmd_create(name,
+ &(qat_dev_instance->sym_rte_dev), &init_params);
- set_cipher_iv_ccm(ctx->cipher_iv.length,
- ctx->cipher_iv.offset,
- cipher_param, op, q,
- aad_len_field_sz);
+ if (cryptodev == NULL)
+ return -ENODEV;
- }
+ qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+ cryptodev->driver_id = qat_sym_driver_id;
+ cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
- cipher_len = op->sym->aead.data.length;
- cipher_ofs = op->sym->aead.data.offset;
- auth_len = op->sym->aead.data.length;
- auth_ofs = op->sym->aead.data.offset;
+ cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+ cryptodev->dequeue_burst = qat_sym_dequeue_burst;
- auth_param->u1.aad_adr = aad_phys_addr_aead;
- auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
- min_ofs = op->sym->aead.data.offset;
- }
+ cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
- if (op->sym->m_src->nb_segs > 1 ||
- (op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
- do_sgl = 1;
-
- /* adjust for chain case */
- if (do_cipher && do_auth)
- min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
- if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
- min_ofs = 0;
-
- if (unlikely((op->sym->m_dst != NULL) &&
- (op->sym->m_dst != op->sym->m_src))) {
- /* Out-of-place operation (OOP)
- * Don't align DMA start. DMA the minimum data-set
- * so as not to overwrite data in dest buffer
- */
- in_place = 0;
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
- dst_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
- oop_shift = min_ofs;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
- } else {
- /* In-place operation
- * Start DMA at nearest aligned address below min_ofs
- */
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
- & QAT_64_BTYE_ALIGN_MASK;
-
- if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
- rte_pktmbuf_headroom(op->sym->m_src))
- > src_buf_start)) {
- /* alignment has pushed addr ahead of start of mbuf
- * so revert and take the performance hit
- */
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src,
- min_ofs);
+#ifdef RTE_LIB_SECURITY
+ if (gen_dev_ops->create_security_ctx) {
+ cryptodev->security_ctx =
+ gen_dev_ops->create_security_ctx((void *)cryptodev);
+ if (cryptodev->security_ctx == NULL) {
+ QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+ ret = -ENOMEM;
+ goto error;
}
- dst_buf_start = src_buf_start;
-
- /* remember any adjustment for later, note, can be +/- */
- alignment_adjustment = src_buf_start -
- rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
- }
- if (do_cipher || do_aead) {
- cipher_param->cipher_offset =
- (uint32_t)rte_pktmbuf_iova_offset(
- op->sym->m_src, cipher_ofs) - src_buf_start;
- cipher_param->cipher_length = cipher_len;
+ cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+ QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
} else {
- cipher_param->cipher_offset = 0;
- cipher_param->cipher_length = 0;
+ QAT_LOG(INFO, "Device %s rte_security support disabled", name);
}
-
- if (!ctx->is_single_pass) {
- /* Do not let to overwrite spc_aad len */
- if (do_auth || do_aead) {
- auth_param->auth_off =
- (uint32_t)rte_pktmbuf_iova_offset(
- op->sym->m_src, auth_ofs) - src_buf_start;
- auth_param->auth_len = auth_len;
- } else {
- auth_param->auth_off = 0;
- auth_param->auth_len = 0;
+#endif
+ snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "QAT_SYM_CAPA_GEN_%d",
+ qat_pci_dev->qat_dev_gen);
+
+ internals = cryptodev->data->dev_private;
+ internals->qat_dev = qat_pci_dev;
+
+ internals->dev_id = cryptodev->data->dev_id;
+
+ capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+ capabilities = capa_info.data;
+ capa_size = capa_info.size;
+
+ internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+ if (internals->capa_mz == NULL) {
+ internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+ capa_size, rte_socket_id(), 0);
+ if (internals->capa_mz == NULL) {
+ QAT_LOG(DEBUG,
+ "Error allocating memzone for capabilities, "
+ "destroying PMD for %s",
+ name);
+ ret = -EFAULT;
+ goto error;
}
}
- qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length =
- (cipher_param->cipher_offset + cipher_param->cipher_length)
- > (auth_param->auth_off + auth_param->auth_len) ?
- (cipher_param->cipher_offset + cipher_param->cipher_length)
- : (auth_param->auth_off + auth_param->auth_len);
-
- if (do_auth && do_cipher) {
- /* Handle digest-encrypted cases, i.e.
- * auth-gen-then-cipher-encrypt and
- * cipher-decrypt-then-auth-verify
- */
- /* First find the end of the data */
- if (do_sgl) {
- uint32_t remaining_off = auth_param->auth_off +
- auth_param->auth_len + alignment_adjustment + oop_shift;
- struct rte_mbuf *sgl_buf =
- (in_place ?
- op->sym->m_src : op->sym->m_dst);
-
- while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
- && sgl_buf->next != NULL) {
- remaining_off -= rte_pktmbuf_data_len(sgl_buf);
- sgl_buf = sgl_buf->next;
- }
+ memcpy(internals->capa_mz->addr, capabilities, capa_size);
+ internals->qat_dev_capabilities = internals->capa_mz->addr;
- auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
- sgl_buf, remaining_off);
- } else {
- auth_data_end = (in_place ?
- src_buf_start : dst_buf_start) +
- auth_param->auth_off + auth_param->auth_len;
- }
- /* Then check if digest-encrypted conditions are met */
- if ((auth_param->auth_off + auth_param->auth_len <
- cipher_param->cipher_offset +
- cipher_param->cipher_length) &&
- (op->sym->auth.digest.phys_addr ==
- auth_data_end)) {
- /* Handle partial digest encryption */
- if (cipher_param->cipher_offset +
- cipher_param->cipher_length <
- auth_param->auth_off +
- auth_param->auth_len +
- ctx->digest_length)
- qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length =
- auth_param->auth_off +
- auth_param->auth_len +
- ctx->digest_length;
- struct icp_qat_fw_comn_req_hdr *header =
- &qat_req->comn_hdr;
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
- header->serv_specif_flags,
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
- }
+ while (1) {
+ if (qat_dev_cmd_param[i].name == NULL)
+ break;
+ if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+ internals->min_enq_burst_threshold =
+ qat_dev_cmd_param[i].val;
+ i++;
}
- if (do_sgl) {
-
- ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
- QAT_COMN_PTR_TYPE_SGL);
- ret = qat_sgl_fill_array(op->sym->m_src,
- (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
- &cookie->qat_sgl_src,
- qat_req->comn_mid.src_length,
- QAT_SYM_SGL_MAX_NUMBER);
-
- if (unlikely(ret)) {
- QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
- return ret;
- }
+ internals->service_type = QAT_SERVICE_SYMMETRIC;
+ qat_pci_dev->sym_dev = internals;
+ QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+ cryptodev->data->name, internals->dev_id);
- if (in_place)
- qat_req->comn_mid.dest_data_addr =
- qat_req->comn_mid.src_data_addr =
- cookie->qat_sgl_src_phys_addr;
- else {
- ret = qat_sgl_fill_array(op->sym->m_dst,
- (int64_t)(dst_buf_start -
- rte_pktmbuf_iova(op->sym->m_dst)),
- &cookie->qat_sgl_dst,
- qat_req->comn_mid.dst_length,
- QAT_SYM_SGL_MAX_NUMBER);
-
- if (unlikely(ret)) {
- QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
- return ret;
- }
+ return 0;
- qat_req->comn_mid.src_data_addr =
- cookie->qat_sgl_src_phys_addr;
- qat_req->comn_mid.dest_data_addr =
- cookie->qat_sgl_dst_phys_addr;
- }
- qat_req->comn_mid.src_length = 0;
- qat_req->comn_mid.dst_length = 0;
- } else {
- qat_req->comn_mid.src_data_addr = src_buf_start;
- qat_req->comn_mid.dest_data_addr = dst_buf_start;
- }
+error:
+#ifdef RTE_LIB_SECURITY
+ rte_free(cryptodev->security_ctx);
+ cryptodev->security_ctx = NULL;
+#endif
+ rte_cryptodev_pmd_destroy(cryptodev);
+ memset(&qat_dev_instance->sym_rte_dev, 0,
+ sizeof(qat_dev_instance->sym_rte_dev));
- if (ctx->is_single_pass) {
- if (ctx->is_ucs) {
- /* GEN 4 */
- cipher_param20->spc_aad_addr =
- op->sym->aead.aad.phys_addr;
- cipher_param20->spc_auth_res_addr =
- op->sym->aead.digest.phys_addr;
- } else {
- cipher_param->spc_aad_addr =
- op->sym->aead.aad.phys_addr;
- cipher_param->spc_auth_res_addr =
- op->sym->aead.digest.phys_addr;
- }
- } else if (ctx->is_single_pass_gmac &&
- op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
- /* Handle Single-Pass AES-GMAC */
- handle_spc_gmac(ctx, op, cookie, qat_req);
- }
+ return ret;
+}
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
- QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
- sizeof(struct icp_qat_fw_la_bulk_req));
- QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
- rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
- rte_pktmbuf_data_len(op->sym->m_src));
- if (do_cipher) {
- uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
- uint8_t *,
- ctx->cipher_iv.offset);
- QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
- ctx->cipher_iv.length);
- }
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
- if (do_auth) {
- if (ctx->auth_iv.length) {
- uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
- uint8_t *,
- ctx->auth_iv.offset);
- QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
- ctx->auth_iv.length);
- }
- QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
- ctx->digest_length);
- }
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+ if (qat_pci_dev->sym_dev == NULL)
+ return 0;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
- if (do_aead) {
- QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
- ctx->digest_length);
- QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
- ctx->aad_len);
- }
+ /* free crypto device */
+ cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+ rte_free(cryptodev->security_ctx);
+ cryptodev->security_ctx = NULL;
#endif
+ rte_cryptodev_pmd_destroy(cryptodev);
+ qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+ qat_pci_dev->sym_dev = NULL;
+
return 0;
}
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+ cryptodev_qat_sym_driver,
+ qat_sym_driver_id);
@@ -130,10 +130,6 @@ uint16_t
qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops);
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
- void *op_cookie, enum qat_device_gen qat_dev_gen);
-
/** Encrypt a single partial block
* Depends on openssl libcrypto
* Uses ECB+XOR to do CFB encryption, same result, more performant
@@ -600,11 +600,11 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
session->is_auth = 1;
session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
/* Chacha-Poly is special case that use QAT CTR mode */
- if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
- } else {
+ else
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- }
+
session->cipher_iv.offset = aead_xform->iv.offset;
session->cipher_iv.length = aead_xform->iv.length;
session->aad_len = aead_xform->aad_length;
@@ -55,6 +55,11 @@
#define QAT_SESSION_IS_SLICE_SET(flags, flag) \
(!!((flags) & (flag)))
+struct qat_sym_session;
+
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie);
+
enum qat_sym_proto_flag {
QAT_CRYPTO_PROTO_FLAG_NONE = 0,
QAT_CRYPTO_PROTO_FLAG_CCM = 1,
@@ -63,11 +68,6 @@ enum qat_sym_proto_flag {
QAT_CRYPTO_PROTO_FLAG_ZUC = 4
};
-struct qat_sym_session;
-
-typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
- uint8_t *out_msg, void *op_cookie);
-
/* Common content descriptor */
struct qat_sym_cd {
struct icp_qat_hw_cipher_algo_blk cipher;