@@ -8,14 +8,1082 @@
#include <rte_cryptodev.h>
#include "qat_crypto.h"
#include "qat_sym_session.h"
+#include "qat_sym.h"
+
+#define QAT_BASE_GEN1_SYM_CAPABILITIES \
+ QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
+ CAP_RNG(digest_size, 1, 20, 1)), \
+ QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+ CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
+ QAT_SYM_AEAD_CAP(AES_CCM, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
+ CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
+ QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)), \
+ QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
+ CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
+ CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(MD5_HMAC, CAP_SET(block_size, 64), \
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SNOW3G_UIA2, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)), \
+ QAT_SYM_AUTH_CAP(KASUMI_F9, CAP_SET(block_size, 8), \
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
+ CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+ QAT_SYM_CIPHER_CAP(AES_CTR, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+ QAT_SYM_CIPHER_CAP(AES_XTS, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)), \
+ QAT_SYM_CIPHER_CAP(AES_DOCSISBPI, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
+ QAT_SYM_CIPHER_CAP(SNOW3G_UEA2, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
+ QAT_SYM_CIPHER_CAP(KASUMI_F8, CAP_SET(block_size, 8), \
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)), \
+ QAT_SYM_CIPHER_CAP(NULL, CAP_SET(block_size, 1), \
+ CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_CIPHER_CAP(3DES_CBC, CAP_SET(block_size, 8), \
+ CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
+ QAT_SYM_CIPHER_CAP(3DES_CTR, CAP_SET(block_size, 8), \
+ CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
+ QAT_SYM_CIPHER_CAP(DES_CBC, CAP_SET(block_size, 8), \
+ CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
+ QAT_SYM_CIPHER_CAP(DES_DOCSISBPI, CAP_SET(block_size, 8), \
+ CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0))
+
+#define QAT_BASE_GEN1_ASYM_CAPABILITIES \
+ QAT_ASYM_CAP(MODEX, 0, 1, 512, 1), \
+ QAT_ASYM_CAP(MODINV, 0, 1, 512, 1), \
+ QAT_ASYM_CAP(RSA, \
+ ((1 << RTE_CRYPTO_ASYM_OP_SIGN) | \
+ (1 << RTE_CRYPTO_ASYM_OP_VERIFY) | \
+ (1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) | \
+ (1 << RTE_CRYPTO_ASYM_OP_DECRYPT)), \
+ 64, 512, 64)
+
+#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \
+ QAT_SYM_CIPHER_CAP(ZUC_EEA3, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
+ QAT_SYM_AUTH_CAP(ZUC_EIA3, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)) \
+
+#define QAT_EXTRA_GEN3_SYM_CAPABILITIES \
+ QAT_SYM_AEAD_CAP(CHACHA20_POLY1305, CAP_SET(block_size, 64), \
+ CAP_RNG(key_size, 32, 32, 0), \
+ CAP_RNG(digest_size, 16, 16, 0), \
+ CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0))
+
+#define QAT_BASE_GEN4_SYM_CAPABILITIES \
+ QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+ QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
+ CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
+ CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_CIPHER_CAP(AES_DOCSISBPI, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
+ QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
+ CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_CIPHER_CAP(NULL, CAP_SET(block_size, 1), \
+ CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
+ CAP_RNG(digest_size, 1, 20, 1)), \
+ QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+ QAT_SYM_CIPHER_CAP(AES_CTR, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+ QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+ CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
+ QAT_SYM_AEAD_CAP(AES_CCM, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
+ CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
+ QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
+ CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+ CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)) \
+
+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
+ RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+ (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+static __rte_always_inline int
+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
+
+ /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_decrypt_err;
+
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
+
+ return 0;
+
+cipher_decrypt_err:
+ QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+ return -EINVAL;
+}
+
+static __rte_always_inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
+
+ if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+ /* Decrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely((sym_op->m_dst != NULL)
+ && (sym_op->m_dst != sym_op->m_src)))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = last_block - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
+ dst, last_block_len);
+#endif
+ op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+ dst, last_block_len);
+#endif
+ }
+
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+static __rte_always_inline int
+qat_chk_len_in_bits_auth(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+ if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+ (op->sym->auth.data.length % BYTE_LENGTH != 0)))
+ return -EINVAL;
+ return 1;
+ }
+ return 0;
+}
+
+static __rte_always_inline int
+qat_chk_len_in_bits_cipher(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+ if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+ ((op->sym->cipher.data.offset %
+ BYTE_LENGTH) != 0)))
+ return -EINVAL;
+ return 1;
+ }
+ return 0;
+}
+
+static __rte_always_inline int32_t
+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+ void *opaque, struct qat_sym_op_cookie *cookie,
+ struct rte_crypto_vec *src_vec, uint16_t n_src,
+ struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+{
+ struct qat_sgl *list;
+ uint32_t i;
+ uint32_t tl_src = 0, total_len_src, total_len_dst;
+ uint64_t src_data_start = 0, dst_data_start = 0;
+ int is_sgl = n_src > 1 || n_dst > 1;
+
+ if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
+ n_dst > QAT_SYM_SGL_MAX_NUMBER))
+ return -1;
+
+ if (likely(!is_sgl)) {
+ src_data_start = src_vec[0].iova;
+ tl_src = total_len_src =
+ src_vec[0].len;
+ if (unlikely(n_dst)) { /* oop */
+ total_len_dst = dst_vec[0].len;
+
+ dst_data_start = dst_vec[0].iova;
+ if (unlikely(total_len_src != total_len_dst))
+ return -EINVAL;
+ } else {
+ dst_data_start = src_data_start;
+ total_len_dst = tl_src;
+ }
+ } else { /* sgl */
+ total_len_dst = total_len_src = 0;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+
+ list = (struct qat_sgl *)&cookie->qat_sgl_src;
+ for (i = 0; i < n_src; i++) {
+ list->buffers[i].len = src_vec[i].len;
+ list->buffers[i].resrvd = 0;
+ list->buffers[i].addr = src_vec[i].iova;
+ if (tl_src + src_vec[i].len > UINT32_MAX) {
+ QAT_DP_LOG(ERR, "Message too long");
+ return -1;
+ }
+ tl_src += src_vec[i].len;
+ }
+
+ list->num_bufs = i;
+ src_data_start = cookie->qat_sgl_src_phys_addr;
+
+ if (unlikely(n_dst > 0)) { /* oop sgl */
+ uint32_t tl_dst = 0;
+
+ list = (struct qat_sgl *)&cookie->qat_sgl_dst;
+
+ for (i = 0; i < n_dst; i++) {
+ list->buffers[i].len = dst_vec[i].len;
+ list->buffers[i].resrvd = 0;
+ list->buffers[i].addr = dst_vec[i].iova;
+ if (tl_dst + dst_vec[i].len > UINT32_MAX) {
+ QAT_DP_LOG(ERR, "Message too long");
+ return -ENOTSUP;
+ }
+
+ tl_dst += dst_vec[i].len;
+ }
+
+ if (tl_src != tl_dst)
+ return -EINVAL;
+ list->num_bufs = i;
+ dst_data_start = cookie->qat_sgl_dst_phys_addr;
+ } else
+ dst_data_start = src_data_start;
+ }
+
+ req->comn_mid.src_data_addr = src_data_start;
+ req->comn_mid.dest_data_addr = dst_data_start;
+ req->comn_mid.src_length = total_len_src;
+ req->comn_mid.dst_length = total_len_dst;
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+
+ return tl_src;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
+ struct qat_sym_session *ctx,
+ struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+ struct rte_crypto_va_iova_ptr *cipher_iv,
+ struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
+ struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+ uint32_t cipher_len = 0, cipher_ofs = 0;
+ int n_src = 0;
+ int ret;
+
+ ret = qat_chk_len_in_bits_cipher(ctx, op);
+ switch (ret) {
+ case 1:
+ cipher_len = op->sym->cipher.data.length >> 3;
+ cipher_ofs = op->sym->cipher.data.offset >> 3;
+ break;
+ case 0:
+ if (ctx->bpi_ctx) {
+ /* DOCSIS - only send complete blocks to device.
+ * Process any partial block using CFB mode.
+ * Even if 0 complete blocks, still send this to device
+ * to get into rx queue for post-process and dequeuing
+ */
+ cipher_len = qat_bpicipher_preprocess(ctx, op);
+ cipher_ofs = op->sym->cipher.data.offset;
+ } else {
+ cipher_len = op->sym->cipher.data.length;
+ cipher_ofs = op->sym->cipher.data.offset;
+ }
+ break;
+ default:
+ QAT_DP_LOG(ERR,
+ "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return UINT64_MAX;
+ }
+
+ cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+ ctx->cipher_iv.offset);
+ cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+ ctx->cipher_iv.offset);
+
+ n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
+ cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+ if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return UINT64_MAX;
+ }
+
+ in_sgl->num = n_src;
+
+ /* Out-Of-Place operation */
+ if (unlikely((op->sym->m_dst != NULL) &&
+ (op->sym->m_dst != op->sym->m_src))) {
+ int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+ cipher_len, out_sgl->vec,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return UINT64_MAX;
+ }
+
+ out_sgl->num = n_dst;
+ } else
+ out_sgl->num = 0;
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
+ struct qat_sym_session *ctx,
+ struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+ struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+ struct rte_crypto_va_iova_ptr *auth_iv,
+ struct rte_crypto_va_iova_ptr *digest)
+{
+ uint32_t auth_ofs = 0, auth_len = 0;
+ int n_src, ret;
+
+ ret = qat_chk_len_in_bits_auth(ctx, op);
+ switch (ret) {
+ case 1:
+ auth_ofs = op->sym->auth.data.offset >> 3;
+ auth_len = op->sym->auth.data.length >> 3;
+ auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+ ctx->auth_iv.offset);
+ auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+ ctx->auth_iv.offset);
+ break;
+ case 0:
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /* AES-GMAC */
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+ auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+ ctx->auth_iv.offset);
+ auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+ ctx->auth_iv.offset);
+ } else {
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+ auth_iv->va = NULL;
+ auth_iv->iova = 0;
+ }
+ break;
+ default:
+ QAT_DP_LOG(ERR,
+ "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return UINT64_MAX;
+ }
+
+ n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
+ auth_ofs + auth_len, in_sgl->vec,
+ QAT_SYM_SGL_MAX_NUMBER);
+ if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return UINT64_MAX;
+ }
+
+ in_sgl->num = n_src;
+
+ /* Out-Of-Place operation */
+ if (unlikely((op->sym->m_dst != NULL) &&
+ (op->sym->m_dst != op->sym->m_src))) {
+ int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
+ auth_ofs + auth_len, out_sgl->vec,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return UINT64_MAX;
+ }
+ out_sgl->num = n_dst;
+ } else
+ out_sgl->num = 0;
+
+ digest->va = (void *)op->sym->auth.digest.data;
+ digest->iova = op->sym->auth.digest.phys_addr;
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+ struct qat_sym_session *ctx,
+ struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+ struct rte_crypto_va_iova_ptr *cipher_iv,
+ struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+ struct rte_crypto_va_iova_ptr *digest)
+{
+ union rte_crypto_sym_ofs ofs;
+ uint32_t min_ofs = 0, max_len = 0;
+ uint32_t cipher_len = 0, cipher_ofs = 0;
+ uint32_t auth_len = 0, auth_ofs = 0;
+ int is_oop = (op->sym->m_dst != NULL) &&
+ (op->sym->m_dst != op->sym->m_src);
+ int is_sgl = op->sym->m_src->nb_segs > 1;
+ int n_src;
+ int ret;
+
+ if (unlikely(is_oop))
+ is_sgl |= op->sym->m_dst->nb_segs > 1;
+
+ cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+ ctx->cipher_iv.offset);
+ cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+ ctx->cipher_iv.offset);
+ auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
+ ctx->auth_iv.offset);
+ auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
+ ctx->auth_iv.offset);
+ digest->va = (void *)op->sym->auth.digest.data;
+ digest->iova = op->sym->auth.digest.phys_addr;
+
+ ret = qat_chk_len_in_bits_cipher(ctx, op);
+ switch (ret) {
+ case 1:
+ cipher_len = op->sym->aead.data.length >> 3;
+ cipher_ofs = op->sym->aead.data.offset >> 3;
+ break;
+ case 0:
+ cipher_len = op->sym->aead.data.length;
+ cipher_ofs = op->sym->aead.data.offset;
+ break;
+ default:
+ QAT_DP_LOG(ERR,
+ "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ ret = qat_chk_len_in_bits_auth(ctx, op);
+ switch (ret) {
+ case 1:
+ auth_len = op->sym->auth.data.length >> 3;
+ auth_ofs = op->sym->auth.data.offset >> 3;
+ break;
+ case 0:
+ auth_len = op->sym->auth.data.length;
+ auth_ofs = op->sym->auth.data.offset;
+ break;
+ default:
+ QAT_DP_LOG(ERR,
+ "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+ max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+
+ /* digest in buffer check. Needed only for wireless algos */
+ if (ret == 1) {
+ /* Handle digest-encrypted cases, i.e.
+ * auth-gen-then-cipher-encrypt and
+ * cipher-decrypt-then-auth-verify
+ */
+ uint64_t auth_end_iova;
+
+ if (unlikely(is_sgl)) {
+ uint32_t remaining_off = auth_ofs + auth_len;
+ struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
+ op->sym->m_src);
+
+ while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+ && sgl_buf->next != NULL) {
+ remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+ sgl_buf = sgl_buf->next;
+ }
+
+ auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+ sgl_buf, remaining_off);
+ } else
+ auth_end_iova = (is_oop ?
+ rte_pktmbuf_iova(op->sym->m_dst) :
+ rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
+ auth_len;
+
+ /* Then check if digest-encrypted conditions are met */
+ if ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
+ (digest->iova == auth_end_iova))
+ max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+ ctx->digest_length);
+ }
+
+ n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,
+ in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+ if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -1;
+ }
+ in_sgl->num = n_src;
+
+ if (unlikely((op->sym->m_dst != NULL) &&
+ (op->sym->m_dst != op->sym->m_src))) {
+ int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,
+ max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+
+ if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -1;
+ }
+ out_sgl->num = n_dst;
+ } else
+ out_sgl->num = 0;
+
+ ofs.ofs.cipher.head = cipher_ofs;
+ ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
+ ofs.ofs.auth.head = auth_ofs;
+ ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
+
+ return ofs.raw;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
+ struct qat_sym_session *ctx,
+ struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+ struct rte_crypto_va_iova_ptr *cipher_iv,
+ struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+ struct rte_crypto_va_iova_ptr *digest)
+{
+ uint32_t cipher_len = 0, cipher_ofs = 0;
+ int32_t n_src = 0;
+
+ cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+ ctx->cipher_iv.offset);
+ cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+ ctx->cipher_iv.offset);
+ auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
+ auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
+ digest->va = (void *)op->sym->aead.digest.data;
+ digest->iova = op->sym->aead.digest.phys_addr;
+
+ cipher_len = op->sym->aead.data.length;
+ cipher_ofs = op->sym->aead.data.offset;
+
+ n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
+ in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+ if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return UINT64_MAX;
+ }
+ in_sgl->num = n_src;
+
+ /* Out-Of-Place operation */
+ if (unlikely((op->sym->m_dst != NULL) &&
+ (op->sym->m_dst != op->sym->m_src))) {
+ int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+ cipher_len, out_sgl->vec,
+ QAT_SYM_SGL_MAX_NUMBER);
+ if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return UINT64_MAX;
+ }
+
+ out_sgl->num = n_dst;
+ } else
+ out_sgl->num = 0;
+
+ return 0;
+}
+
+static __rte_always_inline void
+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
+ struct icp_qat_fw_la_bulk_req *qat_req)
+{
+ /* copy IV into request if it fits */
+ if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+ rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
+ iv_len);
+ else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
+ }
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+ uint32_t i;
+
+ for (i = 0; i < n; i++)
+ sta[i] = status;
+}
+
+static __rte_always_inline void
+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
+ struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_va_iova_ptr *iv,
+ union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+
+ /* cipher IV */
+ qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+ cipher_param->cipher_offset = ofs.ofs.cipher.head;
+ cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+ ofs.ofs.cipher.tail;
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
+ struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *auth_iv,
+ union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ auth_param->auth_off = ofs.ofs.auth.head;
+ auth_param->auth_len = data_len - ofs.ofs.auth.head -
+ ofs.ofs.auth.tail;
+ auth_param->auth_res_addr = digest->iova;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = auth_iv->iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
+ ctx->auth_iv.length);
+ break;
+ default:
+ break;
+ }
+}
+
+static __rte_always_inline int
+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
+ struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_vec *src_vec,
+ uint16_t n_src_vecs,
+ struct rte_crypto_vec *dst_vec,
+ uint16_t n_dst_vecs,
+ struct rte_crypto_va_iova_ptr *cipher_iv,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *auth_iv,
+ union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
+ dst_vec : src_vec;
+ rte_iova_t auth_iova_end;
+ int cipher_len, auth_len;
+ int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ cipher_len = data_len - ofs.ofs.cipher.head -
+ ofs.ofs.cipher.tail;
+ auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+ if (unlikely(cipher_len < 0 || auth_len < 0))
+ return -1;
+
+ cipher_param->cipher_offset = ofs.ofs.cipher.head;
+ cipher_param->cipher_length = cipher_len;
+ qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
+
+ auth_param->auth_off = ofs.ofs.auth.head;
+ auth_param->auth_len = auth_len;
+ auth_param->auth_res_addr = digest->iova;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = auth_iv->iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ break;
+ default:
+ break;
+ }
+
+ if (unlikely(is_sgl)) {
+ /* sgl */
+ int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
+ uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
+
+ while (remaining_off >= cvec->len && i >= 1) {
+ i--;
+ remaining_off -= cvec->len;
+ cvec++;
+ }
+
+ auth_iova_end = cvec->iova + remaining_off;
+ } else
+ auth_iova_end = cvec[0].iova + auth_param->auth_off +
+ auth_param->auth_len;
+
+ /* Then check if digest-encrypted conditions are met */
+ if ((auth_param->auth_off + auth_param->auth_len <
+ cipher_param->cipher_offset + cipher_param->cipher_length) &&
+ (digest->iova == auth_iova_end)) {
+ /* Handle partial digest encryption */
+ if (cipher_param->cipher_offset + cipher_param->cipher_length <
+ auth_param->auth_off + auth_param->auth_len +
+ ctx->digest_length && !is_sgl)
+ req->comn_mid.dst_length = req->comn_mid.src_length =
+ auth_param->auth_off + auth_param->auth_len +
+ ctx->digest_length;
+ struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ }
+
+ return 0;
+}
+
+static __rte_always_inline void
+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+ struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_va_iova_ptr *iv,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *aad,
+ union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+ struct icp_qat_fw_la_cipher_req_params *cipher_param =
+ (void *)&req->serv_specif_rqpars;
+ struct icp_qat_fw_la_auth_req_params *auth_param =
+ (void *)((uint8_t *)&req->serv_specif_rqpars +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+ uint8_t *aad_data;
+ uint8_t aad_ccm_real_len;
+ uint8_t aad_len_field_sz;
+ uint32_t msg_len_be;
+ rte_iova_t aad_iova = 0;
+ uint8_t q;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+ ctx->cipher_iv.length);
+ aad_iova = aad->iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ aad_data = aad->va;
+ aad_iova = aad->iova;
+ aad_ccm_real_len = 0;
+ aad_len_field_sz = 0;
+ msg_len_be = rte_bswap32((uint32_t)data_len -
+ ofs.ofs.cipher.head);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ aad_data = iv->va;
+ aad_iova = iv->iova;
+ }
+
+ q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+ aad_len_field_sz, ctx->digest_length, q);
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be +
+ (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+ rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len +
+ aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len +
+ aad_len_field_sz;
+ memset(&aad_data[pad_idx], 0, pad_len);
+ }
+ }
+
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+ + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)iv->va +
+ ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ rte_memcpy((uint8_t *)aad->va +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+ break;
+ default:
+ break;
+ }
+
+ cipher_param->cipher_offset = ofs.ofs.cipher.head;
+ cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+ ofs.ofs.cipher.tail;
+ auth_param->auth_off = ofs.ofs.cipher.head;
+ auth_param->auth_len = cipher_param->cipher_length;
+ auth_param->auth_res_addr = digest->iova;
+ auth_param->u1.aad_adr = aad_iova;
+}
extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie);
+
+/* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_vec *data, uint16_t n_data_vecs,
+ union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_va_iova_ptr *iv,
+ struct rte_crypto_va_iova_ptr *digest __rte_unused,
+ struct rte_crypto_va_iova_ptr *aad __rte_unused,
+ void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+ void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_vec *data, uint16_t n_data_vecs,
+ union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_va_iova_ptr *iv __rte_unused,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *auth_iv,
+ void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+ void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_vec *data, uint16_t n_data_vecs,
+ union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_va_iova_ptr *cipher_iv,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *auth_iv,
+ void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+ void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_vec *data, uint16_t n_data_vecs,
+ union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_va_iova_ptr *iv,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *aad,
+ void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+ void *user_data[], int *status);
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+ int *dequeue_status, enum rte_crypto_op_status *op_status);
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+ rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+ uint32_t max_nb_to_dequeue,
+ rte_cryptodev_raw_post_dequeue_t post_dequeue,
+ void **out_user_data, uint8_t is_user_data_array,
+ uint32_t *n_success_jobs, int *return_status);
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_dp_denqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
+
/* -----------------GENx control path APIs ---------------- */
uint64_t
qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+
void
qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
uint8_t hash_flag);
@@ -23,6 +1091,9 @@ qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
struct qat_capabilities_info
qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
+int
+qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
+
uint64_t
qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
@@ -269,6 +269,901 @@ qat_sym_create_security_gen1(void *cryptodev)
#endif
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie)
+{
+ register struct icp_qat_fw_la_bulk_req *req;
+ struct rte_crypto_op *op = in_op;
+ struct qat_sym_op_cookie *cookie = op_cookie;
+ struct rte_crypto_sgl in_sgl, out_sgl;
+ struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+ out_vec[QAT_SYM_SGL_MAX_NUMBER];
+ struct rte_crypto_va_iova_ptr cipher_iv;
+ union rte_crypto_sym_ofs ofs;
+ int32_t total_len;
+
+ in_sgl.vec = in_vec;
+ out_sgl.vec = out_vec;
+
+ req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+ ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
+ &cipher_iv, NULL, NULL);
+ if (unlikely(ofs.raw == UINT64_MAX)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+ in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+ if (unlikely(total_len < 0)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+ NULL, NULL, NULL);
+#endif
+
+ return 0;
+}
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie)
+{
+ register struct icp_qat_fw_la_bulk_req *req;
+ struct rte_crypto_op *op = in_op;
+ struct qat_sym_op_cookie *cookie = op_cookie;
+ struct rte_crypto_sgl in_sgl, out_sgl;
+ struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+ out_vec[QAT_SYM_SGL_MAX_NUMBER];
+ struct rte_crypto_va_iova_ptr auth_iv;
+ struct rte_crypto_va_iova_ptr digest;
+ union rte_crypto_sym_ofs ofs;
+ int32_t total_len;
+
+ in_sgl.vec = in_vec;
+ out_sgl.vec = out_vec;
+
+ req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+ ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+ NULL, &auth_iv, &digest);
+ if (unlikely(ofs.raw == UINT64_MAX)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+ in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+ if (unlikely(total_len < 0)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
+ total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+ &auth_iv, NULL, &digest);
+#endif
+
+ return 0;
+}
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie)
+{
+ register struct icp_qat_fw_la_bulk_req *req;
+ struct rte_crypto_op *op = in_op;
+ struct qat_sym_op_cookie *cookie = op_cookie;
+ struct rte_crypto_sgl in_sgl, out_sgl;
+ struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+ out_vec[QAT_SYM_SGL_MAX_NUMBER];
+ struct rte_crypto_va_iova_ptr cipher_iv;
+ struct rte_crypto_va_iova_ptr aad;
+ struct rte_crypto_va_iova_ptr digest;
+ union rte_crypto_sym_ofs ofs;
+ int32_t total_len;
+
+ in_sgl.vec = in_vec;
+ out_sgl.vec = out_vec;
+
+ req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+ ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+ &cipher_iv, &aad, &digest);
+ if (unlikely(ofs.raw == UINT64_MAX)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+ in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+ if (unlikely(total_len < 0)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
+ total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+ NULL, &aad, &digest);
+#endif
+
+ return 0;
+}
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie)
+{
+ register struct icp_qat_fw_la_bulk_req *req;
+ struct rte_crypto_op *op = in_op;
+ struct qat_sym_op_cookie *cookie = op_cookie;
+ struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
+ struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+ out_vec[QAT_SYM_SGL_MAX_NUMBER];
+ struct rte_crypto_va_iova_ptr cipher_iv;
+ struct rte_crypto_va_iova_ptr auth_iv;
+ struct rte_crypto_va_iova_ptr digest;
+ union rte_crypto_sym_ofs ofs;
+ int32_t total_len;
+
+ in_sgl.vec = in_vec;
+ out_sgl.vec = out_vec;
+
+ req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+ ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
+ &cipher_iv, &auth_iv, &digest);
+ if (unlikely(ofs.raw == UINT64_MAX)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+ in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+ if (unlikely(total_len < 0)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
+ out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
+ ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+ &auth_iv, &digest);
+#endif
+
+ return 0;
+}
+
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_vec *data, uint16_t n_data_vecs,
+ union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_va_iova_ptr *iv,
+ struct rte_crypto_va_iova_ptr *digest __rte_unused,
+ struct rte_crypto_va_iova_ptr *aad __rte_unused,
+ void *user_data)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx = dp_ctx->session;
+ struct qat_sym_op_cookie *cookie;
+ struct icp_qat_fw_la_bulk_req *req;
+ int32_t data_len;
+ uint32_t tail = dp_ctx->tail;
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ cookie = qp->op_cookies[tail >> tx_queue->trailz];
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+
+ data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+ data, n_data_vecs, NULL, 0);
+ if (unlikely(data_len < 0))
+ return -1;
+
+ enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, &iv,
+ NULL, NULL, NULL);
+#endif
+
+ dp_ctx->tail = tail;
+ dp_ctx->cached_enqueue++;
+
+ return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+ void *user_data[], int *status)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx = dp_ctx->session;
+ uint32_t i, n;
+ uint32_t tail;
+ struct icp_qat_fw_la_bulk_req *req;
+ int32_t data_len;
+
+ n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+ if (unlikely(n == 0)) {
+ qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+ *status = 0;
+ return 0;
+ }
+
+ tail = dp_ctx->tail;
+
+ for (i = 0; i < n; i++) {
+ struct qat_sym_op_cookie *cookie =
+ qp->op_cookies[tail >> tx_queue->trailz];
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+ data_len = qat_sym_build_req_set_data(req, user_data[i],
+ cookie, vec->src_sgl[i].vec,
+ vec->src_sgl[i].num, NULL, 0);
+ if (unlikely(data_len < 0))
+ break;
+ enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
+ (uint32_t)data_len);
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+ vec->src_sgl[i].num, &vec->iv[i],
+ NULL, NULL, NULL);
+#endif
+ }
+
+ if (unlikely(i < n))
+ qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+ dp_ctx->tail = tail;
+ dp_ctx->cached_enqueue += i;
+ *status = 0;
+ return i;
+}
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_vec *data, uint16_t n_data_vecs,
+ union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_va_iova_ptr *iv __rte_unused,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *auth_iv,
+ void *user_data)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sym_session *ctx = dp_ctx->session;
+ struct icp_qat_fw_la_bulk_req *req;
+ int32_t data_len;
+ uint32_t tail = dp_ctx->tail;
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ cookie = qp->op_cookies[tail >> tx_queue->trailz];
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+ data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+ data, n_data_vecs, NULL, 0);
+ if (unlikely(data_len < 0))
+ return -1;
+
+ enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+ (uint32_t)data_len);
+
+ dp_ctx->tail = tail;
+ dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
+ auth_iv, NULL, digest);
+#endif
+ return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+ void *user_data[], int *status)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx = dp_ctx->session;
+ uint32_t i, n;
+ uint32_t tail;
+ struct icp_qat_fw_la_bulk_req *req;
+ int32_t data_len;
+
+ n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+ if (unlikely(n == 0)) {
+ qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+ *status = 0;
+ return 0;
+ }
+
+ tail = dp_ctx->tail;
+
+ for (i = 0; i < n; i++) {
+ struct qat_sym_op_cookie *cookie =
+ qp->op_cookies[tail >> tx_queue->trailz];
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+ data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+ if (unlikely(data_len < 0))
+ break;
+ enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
+ &vec->auth_iv[i], ofs, (uint32_t)data_len);
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+ vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
+ NULL, &vec->digest[i]);
+#endif
+ }
+
+ if (unlikely(i < n))
+ qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+ dp_ctx->tail = tail;
+ dp_ctx->cached_enqueue += i;
+ *status = 0;
+ return i;
+}
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_vec *data, uint16_t n_data_vecs,
+ union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_va_iova_ptr *cipher_iv,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *auth_iv,
+ void *user_data)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sym_session *ctx = dp_ctx->session;
+ struct icp_qat_fw_la_bulk_req *req;
+ int32_t data_len;
+ uint32_t tail = dp_ctx->tail;
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ cookie = qp->op_cookies[tail >> tx_queue->trailz];
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+ data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+ data, n_data_vecs, NULL, 0);
+ if (unlikely(data_len < 0))
+ return -1;
+
+ if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
+ NULL, 0, cipher_iv, digest, auth_iv, ofs,
+ (uint32_t)data_len)))
+ return -1;
+
+ dp_ctx->tail = tail;
+ dp_ctx->cached_enqueue++;
+
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
+ auth_iv, NULL, digest);
+#endif
+ return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+ void *user_data[], int *status)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx = dp_ctx->session;
+ uint32_t i, n;
+ uint32_t tail;
+ struct icp_qat_fw_la_bulk_req *req;
+ int32_t data_len;
+
+ n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+ if (unlikely(n == 0)) {
+ qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+ *status = 0;
+ return 0;
+ }
+
+ tail = dp_ctx->tail;
+
+ for (i = 0; i < n; i++) {
+ struct qat_sym_op_cookie *cookie =
+ qp->op_cookies[tail >> tx_queue->trailz];
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+ data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+ if (unlikely(data_len < 0))
+ break;
+
+ if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num,
+ NULL, 0,
+ &vec->iv[i], &vec->digest[i],
+ &vec->auth_iv[i], ofs, (uint32_t)data_len)))
+ break;
+
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+ vec->src_sgl[i].num, &vec->iv[i],
+ &vec->auth_iv[i],
+ NULL, &vec->digest[i]);
+#endif
+ }
+
+ if (unlikely(i < n))
+ qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+ dp_ctx->tail = tail;
+ dp_ctx->cached_enqueue += i;
+ *status = 0;
+ return i;
+}
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_vec *data, uint16_t n_data_vecs,
+ union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_va_iova_ptr *iv,
+ struct rte_crypto_va_iova_ptr *digest,
+ struct rte_crypto_va_iova_ptr *aad,
+ void *user_data)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sym_session *ctx = dp_ctx->session;
+ struct icp_qat_fw_la_bulk_req *req;
+
+ int32_t data_len;
+ uint32_t tail = dp_ctx->tail;
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ cookie = qp->op_cookies[tail >> tx_queue->trailz];
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+ data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+ data, n_data_vecs, NULL, 0);
+ if (unlikely(data_len < 0))
+ return -1;
+
+ enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
+ (uint32_t)data_len);
+
+ dp_ctx->tail = tail;
+ dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+ NULL, aad, digest);
+#endif
+ return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+ struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+ void *user_data[], int *status)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx = dp_ctx->session;
+ uint32_t i, n;
+ uint32_t tail;
+ struct icp_qat_fw_la_bulk_req *req;
+ int32_t data_len;
+
+ n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+ if (unlikely(n == 0)) {
+ qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+ *status = 0;
+ return 0;
+ }
+
+ tail = dp_ctx->tail;
+
+ for (i = 0; i < n; i++) {
+ struct qat_sym_op_cookie *cookie =
+ qp->op_cookies[tail >> tx_queue->trailz];
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+ data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+ vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+ if (unlikely(data_len < 0))
+ break;
+
+ enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
+ &vec->digest[i], &vec->aad[i], ofs,
+ (uint32_t)data_len);
+
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+ vec->src_sgl[i].num, &vec->iv[i], NULL,
+ &vec->aad[i], &vec->digest[i]);
+#endif
+ }
+
+ if (unlikely(i < n))
+ qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+ dp_ctx->tail = tail;
+ dp_ctx->cached_enqueue += i;
+ *status = 0;
+ return i;
+}
+
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+ rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+ uint32_t max_nb_to_dequeue,
+ rte_cryptodev_raw_post_dequeue_t post_dequeue,
+ void **out_user_data, uint8_t is_user_data_array,
+ uint32_t *n_success_jobs, int *return_status)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ void *resp_opaque;
+ uint32_t i, n, inflight;
+ uint32_t head;
+ uint8_t status;
+
+ *n_success_jobs = 0;
+ *return_status = 0;
+ head = dp_ctx->head;
+
+ inflight = qp->enqueued - qp->dequeued;
+ if (unlikely(inflight == 0))
+ return 0;
+
+ resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+ head);
+ /* no operation ready */
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+ return 0;
+
+ resp_opaque = (void *)(uintptr_t)resp->opaque_data;
+ /* get the dequeue count */
+ if (get_dequeue_count) {
+ n = get_dequeue_count(resp_opaque);
+ if (unlikely(n == 0))
+ return 0;
+ } else {
+ if (unlikely(max_nb_to_dequeue == 0))
+ return 0;
+ n = max_nb_to_dequeue;
+ }
+
+ out_user_data[0] = resp_opaque;
+ status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+ post_dequeue(resp_opaque, 0, status);
+ *n_success_jobs += status;
+
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+
+ /* we already finished dequeue when n == 1 */
+ if (unlikely(n == 1)) {
+ i = 1;
+ goto end_deq;
+ }
+
+ if (is_user_data_array) {
+ for (i = 1; i < n; i++) {
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+ if (unlikely(*(uint32_t *)resp ==
+ ADF_RING_EMPTY_SIG))
+ goto end_deq;
+ out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
+ status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+ *n_success_jobs += status;
+ post_dequeue(out_user_data[i], i, status);
+ head = (head + rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+ }
+
+ goto end_deq;
+ }
+
+ /* opaque is not array */
+ for (i = 1; i < n; i++) {
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+ status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+ goto end_deq;
+ head = (head + rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+ post_dequeue(resp_opaque, i, status);
+ *n_success_jobs += status;
+ }
+
+end_deq:
+ dp_ctx->head = head;
+ dp_ctx->cached_dequeue += i;
+ return i;
+}
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+ int *dequeue_status, enum rte_crypto_op_status *op_status)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ register struct icp_qat_fw_comn_resp *resp;
+
+ resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+ dp_ctx->head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+ return NULL;
+
+ dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+ dp_ctx->cached_dequeue++;
+
+ *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
+ RTE_CRYPTO_OP_STATUS_SUCCESS :
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ *dequeue_status = 0;
+ return (void *)(uintptr_t)resp->opaque_data;
+}
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+ if (unlikely(dp_ctx->cached_enqueue != n))
+ return -1;
+
+ qp->enqueued += n;
+ qp->stats.enqueued_count += n;
+
+ tx_queue->tail = dp_ctx->tail;
+
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number, tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ dp_ctx->cached_enqueue = 0;
+
+ return 0;
+}
+
+int
+qat_sym_dp_denqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+ if (unlikely(dp_ctx->cached_dequeue != n))
+ return -1;
+
+ rx_queue->head = dp_ctx->head;
+ rx_queue->nb_processed_responses += n;
+ qp->dequeued += n;
+ qp->stats.dequeued_count += n;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+ uint32_t old_head, new_head;
+ uint32_t max_head;
+
+ old_head = rx_queue->csr_head;
+ new_head = rx_queue->head;
+ max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+ /* write out free descriptors */
+ void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+ if (new_head < old_head) {
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+ max_head - old_head);
+ memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+ new_head);
+ } else {
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+ old_head);
+ }
+ rx_queue->nb_processed_responses = 0;
+ rx_queue->csr_head = new_head;
+
+ /* write current head to CSR */
+ WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
+ rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+ new_head);
+ }
+
+ dp_ctx->cached_dequeue = 0;
+ return 0;
+}
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
+{
+ struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+ struct qat_sym_session *ctx = _ctx;
+
+ raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
+ raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
+ raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
+ raw_dp_ctx->dequeue_done = qat_sym_dp_denqueue_done_gen1;
+
+ if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+ !ctx->is_gmac) {
+ /* AES-GCM or AES-CCM */
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+ && ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+ raw_dp_ctx->enqueue_burst =
+ qat_sym_dp_enqueue_aead_jobs_gen1;
+ raw_dp_ctx->enqueue =
+ qat_sym_dp_enqueue_single_aead_gen1;
+ } else {
+ raw_dp_ctx->enqueue_burst =
+ qat_sym_dp_enqueue_chain_jobs_gen1;
+ raw_dp_ctx->enqueue =
+ qat_sym_dp_enqueue_single_chain_gen1;
+ }
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+ raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
+ raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+ raw_dp_ctx->enqueue_burst =
+ qat_sym_dp_enqueue_aead_jobs_gen1;
+ raw_dp_ctx->enqueue =
+ qat_sym_dp_enqueue_single_aead_gen1;
+ } else {
+ raw_dp_ctx->enqueue_burst =
+ qat_sym_dp_enqueue_cipher_jobs_gen1;
+ raw_dp_ctx->enqueue =
+ qat_sym_dp_enqueue_single_cipher_gen1;
+ }
+ } else
+ return -1;
+
+ return 0;
+}
+
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+ struct qat_sym_session *ctx = session;
+ qat_sym_build_request_t build_request = NULL;
+ enum rte_proc_type_t proc_type = rte_eal_process_type();
+ int handle_mixed = 0;
+
+ if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+ !ctx->is_gmac) {
+ /* AES-GCM or AES-CCM */
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+ && ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+ /* do_aead = 1; */
+ build_request = qat_sym_build_op_aead_gen1;
+ } else {
+ /* do_auth = 1; do_cipher = 1; */
+ build_request = qat_sym_build_op_chain_gen1;
+ handle_mixed = 1;
+ }
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+ /* do_auth = 1; do_cipher = 0;*/
+ build_request = qat_sym_build_op_auth_gen1;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ /* do_auth = 0; do_cipher = 1; */
+ build_request = qat_sym_build_op_cipher_gen1;
+ }
+
+ if (!build_request)
+ return 0;
+ ctx->build_request[proc_type] = build_request;
+
+ if (!handle_mixed)
+ return 0;
+
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+ ctx->qat_cipher_alg !=
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+ return -ENOTSUP;
+ } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+ ctx->qat_cipher_alg !=
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+ return -ENOTSUP;
+ } else if ((ctx->aes_cmac ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+ (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
RTE_INIT(qat_sym_crypto_gen1_init)
{
qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -54,6 +54,14 @@ struct qat_sym_op_cookie {
} opt;
};
+struct qat_sym_dp_ctx {
+ struct qat_sym_session *session;
+ uint32_t tail;
+ uint32_t head;
+ uint16_t cached_enqueue;
+ uint16_t cached_dequeue;
+};
+
static __rte_always_inline int
refactor_qat_sym_process_response(__rte_unused void **op,
__rte_unused uint8_t *resp, __rte_unused void *op_cookie,
@@ -13,14 +13,6 @@
#include "qat_sym_session.h"
#include "qat_qp.h"
-struct qat_sym_dp_ctx {
- struct qat_sym_session *session;
- uint32_t tail;
- uint32_t head;
- uint16_t cached_enqueue;
- uint16_t cached_dequeue;
-};
-
static __rte_always_inline int32_t
qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
struct rte_crypto_vec *data, uint16_t n_data_vecs)
new file mode 100644
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <openssl/evp.h>
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_crypto_sym.h>
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+
+#include "qat_sym.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
+
+uint8_t qat_sym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+ .name = qat_sym_drv_name,
+ .alias = qat_sym_drv_name
+};
+
+void
+qat_sym_init_op_cookie(void *op_cookie)
+{
+ struct qat_sym_op_cookie *cookie = op_cookie;
+
+ cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ qat_sgl_src);
+
+ cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ qat_sgl_dst);
+
+ cookie->opt.spc_gmac.cd_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ opt.spc_gmac.cd_cipher);
+}
+
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
+{
+ struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+ void *sess = (void *)opaque[0];
+ qat_sym_build_request_t build_request = (void *)opaque[1];
+ struct qat_sym_session *ctx = NULL;
+
+ if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+ ctx = get_sym_session_private_data(op->sym->session,
+ qat_sym_driver_id);
+ if (unlikely(!ctx)) {
+ QAT_DP_LOG(ERR, "No session for this device");
+ return -EINVAL;
+ }
+ if (sess != ctx) {
+ struct rte_cryptodev *cdev;
+ struct qat_cryptodev_private *internals;
+ enum rte_proc_type_t proc_type;
+
+ cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+ internals = cdev->data->dev_private;
+ proc_type = rte_eal_process_type();
+
+ if (internals->qat_dev->qat_dev_gen != dev_gen) {
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
+ if (unlikely(ctx->build_request[proc_type] == NULL)) {
+ int ret =
+ qat_sym_gen_dev_ops[dev_gen].set_session(
+ (void *)cdev, sess);
+ if (ret < 0) {
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+ }
+
+ build_request = ctx->build_request[proc_type];
+ opaque[0] = (uintptr_t)ctx;
+ opaque[1] = (uintptr_t)build_request;
+ }
+ }
+
+#ifdef RTE_LIB_SECURITY
+ else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ if (sess != (void *)op->sym->sec_session) {
+ struct rte_cryptodev *cdev;
+ struct qat_cryptodev_private *internals;
+ enum rte_proc_type_t proc_type;
+
+ ctx = get_sec_session_private_data(
+ op->sym->sec_session);
+ if (unlikely(!ctx)) {
+ QAT_DP_LOG(ERR, "No session for this device");
+ return -EINVAL;
+ }
+ if (unlikely(ctx->bpi_ctx == NULL)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports security"
+ " operation requests for"
+ " DOCSIS, op (%p) is not for"
+ " DOCSIS.", op);
+ return -EINVAL;
+ } else if (unlikely(((op->sym->m_dst != NULL) &&
+ (op->sym->m_dst != op->sym->m_src)) ||
+ op->sym->m_src->nb_segs > 1)) {
+ QAT_DP_LOG(ERR, "OOP and/or multi-segment"
+ " buffers not supported for"
+ " DOCSIS security.");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+ internals = cdev->data->dev_private;
+ proc_type = rte_eal_process_type();
+
+ if (internals->qat_dev->qat_dev_gen != dev_gen) {
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
+ if (unlikely(ctx->build_request[proc_type] == NULL)) {
+ int ret =
+ qat_sym_gen_dev_ops[dev_gen].set_session(
+ (void *)cdev, sess);
+ if (ret < 0) {
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+ }
+
+ sess = (void *)op->sym->sec_session;
+ build_request = ctx->build_request[proc_type];
+ opaque[0] = (uintptr_t)sess;
+ opaque[1] = (uintptr_t)build_request;
+ }
+ }
+#endif
+ else { /* RTE_CRYPTO_OP_SESSIONLESS */
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+ return -1;
+ }
+
+ return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_enqueue_op_burst(qp, qat_sym_build_request,
+ (void **)ops, nb_ops);
+}
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_dequeue_op_burst(qp, (void **)ops,
+ qat_sym_process_response, nb_ops);
+}
+
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+ struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+ int i = 0, ret = 0;
+ struct qat_device_info *qat_dev_instance =
+ &qat_pci_devs[qat_pci_dev->qat_dev_id];
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_dev_instance->pci_dev->device.numa_node,
+ .private_data_size = sizeof(struct qat_cryptodev_private)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *cryptodev;
+ struct qat_cryptodev_private *internals;
+ struct qat_capabilities_info capa_info;
+ const struct rte_cryptodev_capabilities *capabilities;
+ const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+ &qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+ uint64_t capa_size;
+
+ snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "sym");
+ QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+ if (gen_dev_ops->cryptodev_ops == NULL) {
+ QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+ name);
+ return -(EFAULT);
+ }
+
+ /*
+ * All processes must use same driver id so they can share sessions.
+ * Store driver_id so we can validate that all processes have the same
+ * value, typically they have, but could differ if binaries built
+ * separately.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ qat_pci_dev->qat_sym_driver_id =
+ qat_sym_driver_id;
+ } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ if (qat_pci_dev->qat_sym_driver_id !=
+ qat_sym_driver_id) {
+ QAT_LOG(ERR,
+ "Device %s have different driver id than corresponding device in primary process",
+ name);
+ return -(EFAULT);
+ }
+ }
+
+ /* Populate subset device to use in cryptodev device creation */
+ qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+ qat_dev_instance->sym_rte_dev.numa_node =
+ qat_dev_instance->pci_dev->device.numa_node;
+ qat_dev_instance->sym_rte_dev.devargs = NULL;
+
+ cryptodev = rte_cryptodev_pmd_create(name,
+ &(qat_dev_instance->sym_rte_dev), &init_params);
+
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+ cryptodev->driver_id = qat_sym_driver_id;
+ cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+ cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+ cryptodev->dequeue_burst = qat_sym_dequeue_burst;
+
+ cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+#ifdef RTE_LIB_SECURITY
+ if (gen_dev_ops->create_security_ctx) {
+ cryptodev->security_ctx =
+ gen_dev_ops->create_security_ctx((void *)cryptodev);
+ if (cryptodev->security_ctx == NULL) {
+ QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+ QAT_LOG(INFO, "Device %s rte_security support enabled", name);
+ } else {
+ QAT_LOG(INFO, "Device %s rte_security support disabled", name);
+ }
+#endif
+ snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "QAT_SYM_CAPA_GEN_%d",
+ qat_pci_dev->qat_dev_gen);
+
+ internals = cryptodev->data->dev_private;
+ internals->qat_dev = qat_pci_dev;
+
+ internals->dev_id = cryptodev->data->dev_id;
+
+ capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+ capabilities = capa_info.data;
+ capa_size = capa_info.size;
+
+ internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+ if (internals->capa_mz == NULL) {
+ internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+ capa_size, rte_socket_id(), 0);
+ if (internals->capa_mz == NULL) {
+ QAT_LOG(DEBUG,
+ "Error allocating memzone for capabilities, "
+ "destroying PMD for %s",
+ name);
+ ret = -EFAULT;
+ goto error;
+ }
+ }
+
+ memcpy(internals->capa_mz->addr, capabilities, capa_size);
+ internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+ while (1) {
+ if (qat_dev_cmd_param[i].name == NULL)
+ break;
+ if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+ internals->min_enq_burst_threshold =
+ qat_dev_cmd_param[i].val;
+ i++;
+ }
+
+ internals->service_type = QAT_SERVICE_SYMMETRIC;
+ qat_pci_dev->sym_dev = internals;
+ QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+ cryptodev->data->name, internals->dev_id);
+
+ return 0;
+
+error:
+#ifdef RTE_LIB_SECURITY
+ rte_free(cryptodev->security_ctx);
+ cryptodev->security_ctx = NULL;
+#endif
+ rte_cryptodev_pmd_destroy(cryptodev);
+ memset(&qat_dev_instance->sym_rte_dev, 0,
+ sizeof(qat_dev_instance->sym_rte_dev));
+
+ return ret;
+}
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
+
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+ if (qat_pci_dev->sym_dev == NULL)
+ return 0;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
+
+ /* free crypto device */
+ cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+ rte_free(cryptodev->security_ctx);
+ cryptodev->security_ctx = NULL;
+#endif
+ rte_cryptodev_pmd_destroy(cryptodev);
+ qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+ qat_pci_dev->sym_dev = NULL;
+
+ return 0;
+}
+
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+ struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+ enum rte_crypto_op_sess_type sess_type,
+ union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+ struct qat_cryptodev_private *internals = dev->data->dev_private;
+ enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+ struct qat_crypto_gen_dev_ops *gen_dev_ops =
+ &qat_sym_gen_dev_ops[qat_dev_gen];
+ struct qat_qp *qp;
+ struct qat_sym_session *ctx;
+ struct qat_sym_dp_ctx *dp_ctx;
+
+ if (!gen_dev_ops->set_raw_dp_ctx) {
+ QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+ qat_dev_gen);
+ return -ENOTSUP;
+ }
+
+ qp = dev->data->queue_pairs[qp_id];
+ dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+ if (!is_update) {
+ memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+ sizeof(struct qat_sym_dp_ctx));
+ raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+ dp_ctx->tail = qp->tx_q.tail;
+ dp_ctx->head = qp->rx_q.head;
+ dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
+ }
+
+ if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+ return -EINVAL;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session_ctx.crypto_sess, qat_sym_driver_id);
+
+ dp_ctx->session = ctx;
+
+ return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct qat_sym_dp_ctx);
+}
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+ cryptodev_qat_sym_driver,
+ qat_sym_driver_id);
new file mode 100644
@@ -0,0 +1,402 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_H_
+#define _QAT_SYM_H_
+
+#include <rte_cryptodev_pmd.h>
+#ifdef RTE_LIB_SECURITY
+#include <rte_net_crc.h>
+#endif
+
+#include <openssl/evp.h>
+
+#include "qat_common.h"
+#include "qat_sym_session.h"
+#include "qat_crypto.h"
+#include "qat_logs.h"
+
+#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
+
+#define BYTE_LENGTH 8
+/* bpi is only used for partial blocks of DES and AES
+ * so AES block len can be assumed as max len for iv, src and dst
+ */
+#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
+
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO (1 << 0)
+#define QAT_SYM_CAP_VALID (1 << 31)
+
+/* Macro to add a capability */
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d) \
+ { \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_##n, \
+ b, d \
+ }, } \
+ }, } \
+ }
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i) \
+ { \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_##n, \
+ b, k, d, a, i \
+ }, } \
+ }, } \
+ }
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i) \
+ { \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_##n, \
+ b, k, d, a, i \
+ }, } \
+ }, } \
+ }
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i) \
+ { \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_##n, \
+ b, k, i \
+ }, } \
+ }, } \
+ }
+
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SYM_SGL_MAX_NUMBER 16
+
+/* Maximum data length for single pass GMAC: 2^14-1 */
+#define QAT_AES_GMAC_SPC_MAX_SIZE 16383
+
+struct qat_sym_session;
+
+struct qat_sym_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_sym_op_cookie {
+ struct qat_sym_sgl qat_sgl_src;
+ struct qat_sym_sgl qat_sgl_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+ union {
+ /* Used for Single-Pass AES-GMAC only */
+ struct {
+ struct icp_qat_hw_cipher_algo_blk cd_cipher
+ __rte_packed __rte_cache_aligned;
+ phys_addr_t cd_phys_addr;
+ } spc_gmac;
+ } opt;
+};
+
+struct qat_sym_dp_ctx {
+ struct qat_sym_session *session;
+ uint32_t tail;
+ uint32_t head;
+ uint16_t cached_enqueue;
+ uint16_t cached_dequeue;
+};
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+/** Encrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
+
+ /* ECB method: encrypt the IV, then XOR this with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_encrypt_err;
+
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
+
+ return 0;
+
+cipher_encrypt_err:
+ QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
+ return -EINVAL;
+}
+
+static inline uint32_t
+qat_bpicipher_postprocess(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
+
+ if (last_block_len > 0 &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+
+ /* Encrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset;
+
+ last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = dst - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG,
+ "BPI: dst before post-process:",
+ dst, last_block_len);
+#endif
+ bpi_cipher_encrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG,
+ "BPI: dst after post-process:",
+ dst, last_block_len);
+#endif
+ }
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+#ifdef RTE_LIB_SECURITY
+static inline void
+qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint32_t crc_data_ofs, crc_data_len, crc;
+ uint8_t *crc_data;
+
+ if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
+ sym_op->auth.data.length != 0) {
+
+ crc_data_ofs = sym_op->auth.data.offset;
+ crc_data_len = sym_op->auth.data.length;
+ crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+ crc_data_ofs);
+
+ crc = rte_net_crc_calc(crc_data, crc_data_len,
+ RTE_NET_CRC32_ETH);
+
+ if (crc != *(uint32_t *)(crc_data + crc_data_len))
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+}
+
+static inline void
+qat_crc_generate(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint32_t *crc, crc_data_len;
+ uint8_t *crc_data;
+
+ if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT &&
+ sym_op->auth.data.length != 0 &&
+ sym_op->m_src->nb_segs == 1) {
+
+ crc_data_len = sym_op->auth.data.length;
+ crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+ sym_op->auth.data.offset);
+ crc = (uint32_t *)(crc_data + crc_data_len);
+ *crc = rte_net_crc_calc(crc_data, crc_data_len,
+ RTE_NET_CRC32_ETH);
+ }
+}
+
+static inline void
+qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op *op;
+ struct qat_sym_session *ctx;
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ op = (struct rte_crypto_op *)ops[i];
+
+ if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ ctx = (struct qat_sym_session *)
+ get_sec_session_private_data(
+ op->sym->sec_session);
+
+ if (ctx == NULL || ctx->bpi_ctx == NULL)
+ continue;
+
+ qat_crc_generate(ctx, op);
+ }
+ }
+}
+#endif
+
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+ uint64_t *dequeue_err_count __rte_unused)
+{
+ struct icp_qat_fw_comn_resp *resp_msg =
+ (struct icp_qat_fw_comn_resp *)resp;
+ struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+ struct qat_sym_session *sess;
+ uint8_t is_docsis_sec;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+#ifdef RTE_LIB_SECURITY
+ if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ /*
+ * Assuming at this point that if it's a security
+ * op, that this is for DOCSIS
+ */
+ sess = (struct qat_sym_session *)
+ get_sec_session_private_data(
+ rx_op->sym->sec_session);
+ is_docsis_sec = 1;
+ } else
+#endif
+ {
+ sess = (struct qat_sym_session *)
+ get_sym_session_private_data(
+ rx_op->sym->session,
+ qat_sym_driver_id);
+ is_docsis_sec = 0;
+ }
+
+ if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status)) {
+
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (sess->bpi_ctx) {
+ qat_bpicipher_postprocess(sess, rx_op);
+#ifdef RTE_LIB_SECURITY
+ if (is_docsis_sec)
+ qat_crc_verify(sess, rx_op);
+#endif
+ }
+ }
+
+ if (sess->is_single_pass_gmac) {
+ struct qat_sym_op_cookie *cookie =
+ (struct qat_sym_op_cookie *) op_cookie;
+ memset(cookie->opt.spc_gmac.cd_cipher.key, 0,
+ sess->auth_key_length);
+ }
+
+ *op = (void *)rx_op;
+
+ return 1;
+}
+
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+ struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+ enum rte_crypto_op_sess_type sess_type,
+ union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+ struct qat_sym_session *ctx,
+ struct rte_crypto_vec *vec, uint32_t vec_len,
+ struct rte_crypto_va_iova_ptr *cipher_iv,
+ struct rte_crypto_va_iova_ptr *auth_iv,
+ struct rte_crypto_va_iova_ptr *aad,
+ struct rte_crypto_va_iova_ptr *digest)
+{
+ uint32_t i;
+
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ for (i = 0; i < vec_len; i++)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+ if (cipher_iv && ctx->cipher_iv.length > 0)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+ ctx->cipher_iv.length);
+ if (auth_iv && ctx->auth_iv.length > 0)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+ ctx->auth_iv.length);
+ if (aad && ctx->aad_len > 0)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+ ctx->aad_len);
+ if (digest && ctx->digest_length > 0)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+ ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+ struct qat_sym_session *ctx __rte_unused,
+ struct rte_crypto_vec *vec __rte_unused,
+ uint32_t vec_len __rte_unused,
+ struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+ struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+ struct rte_crypto_va_iova_ptr *aad __rte_unused,
+ struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{}
+#endif
+
+#endif /* _QAT_SYM_H_ */