@@ -1221,6 +1221,9 @@ F: drivers/crypto/virtio/
F: doc/guides/cryptodevs/virtio.rst
F: doc/guides/cryptodevs/features/virtio.ini
+ZTE Storage Data Accelerator
+M: Hanxiao Li <li.hanxiao@zte.com.cn>
+F: drivers/crypto/zsda/
Compression Drivers
-------------------
@@ -97,17 +97,39 @@
enum zsda_service_type {
ZSDA_SERVICE_COMPRESSION = 0,
ZSDA_SERVICE_DECOMPRESSION,
+ ZSDA_SERVICE_SYMMETRIC_ENCRYPT,
+ ZSDA_SERVICE_SYMMETRIC_DECRYPT,
+ ZSDA_SERVICE_HASH_ENCODE = 6,
ZSDA_SERVICE_INVALID,
};
#define ZSDA_MAX_SERVICES (ZSDA_SERVICE_INVALID)
+#define ZSDA_OPC_EC_AES_XTS_256 0x0 /* Encry AES-XTS-256 */
+#define ZSDA_OPC_EC_AES_XTS_512 0x01 /* Encry AES-XTS-512 */
+#define ZSDA_OPC_EC_SM4_XTS_256 0x02 /* Encry SM4-XTS-256 */
+#define ZSDA_OPC_DC_AES_XTS_256 0x08 /* Decry AES-XTS-256 */
+#define ZSDA_OPC_DC_AES_XTS_512 0x09 /* Decry AES-XTS-512 */
+#define ZSDA_OPC_DC_SM4_XTS_256 0x0A /* Decry SM4-XTS-256 */
#define ZSDA_OPC_COMP_GZIP 0x10 /* Encomp deflate-Gzip */
#define ZSDA_OPC_COMP_ZLIB 0x11 /* Encomp deflate-Zlib */
#define ZSDA_OPC_DECOMP_GZIP 0x18 /* Decompinfalte-Gzip */
#define ZSDA_OPC_DECOMP_ZLIB 0x19 /* Decompinfalte-Zlib */
+#define ZSDA_OPC_HASH_SHA1 0x20 /* Hash-SHA1 */
+#define ZSDA_OPC_HASH_SHA2_224 0x21 /* Hash-SHA2-224 */
+#define ZSDA_OPC_HASH_SHA2_256 0x22 /* Hash-SHA2-256 */
+#define ZSDA_OPC_HASH_SHA2_384 0x23 /* Hash-SHA2-384 */
+#define ZSDA_OPC_HASH_SHA2_512 0x24 /* Hash-SHA2-512 */
+#define ZSDA_OPC_HASH_SM3 0x25 /* Hash-SM3 */
#define ZSDA_OPC_INVALID 0xff
+#define ZSDA_DIGEST_SIZE_SHA1 (20)
+#define ZSDA_DIGEST_SIZE_SHA2_224 (28)
+#define ZSDA_DIGEST_SIZE_SHA2_256 (32)
+#define ZSDA_DIGEST_SIZE_SHA2_384 (48)
+#define ZSDA_DIGEST_SIZE_SHA2_512 (64)
+#define ZSDA_DIGEST_SIZE_SM3 (32)
+
#define SET_CYCLE 0xff
#define SET_HEAD_INTI 0x0
@@ -237,9 +259,34 @@ struct zsda_op_cookie {
uint8_t comp_head[COMP_REMOVE_SPACE_LEN];
} __rte_packed;
+#define ZSDA_CIPHER_KEY_MAX_LEN 64
+struct crypto_cfg {
+ uint8_t slba_L[8];
+ uint8_t key[ZSDA_CIPHER_KEY_MAX_LEN];
+ uint8_t lbads : 4;
+ uint8_t resv1 : 4;
+ uint8_t resv2[7];
+ uint8_t slba_H[8];
+ uint8_t resv3[8];
+} __rte_packed;
+
struct compress_cfg {
} __rte_packed;
+struct zsda_wqe_crpt {
+ uint8_t valid;
+ uint8_t op_code;
+ uint16_t sid;
+ uint8_t resv[3];
+ uint8_t rx_sgl_type : 4;
+ uint8_t tx_sgl_type : 4;
+ uint64_t rx_addr;
+ uint32_t rx_length;
+ uint64_t tx_addr;
+ uint32_t tx_length;
+ struct crypto_cfg cfg;
+} __rte_packed;
+
struct zsda_wqe_comp {
uint8_t valid;
uint8_t op_code;
@@ -281,6 +328,9 @@ struct zsda_common_stat {
enum zsda_algo_core {
ZSDA_CORE_COMP,
ZSDA_CORE_DECOMP,
+ ZSDA_CORE_ENCRY,
+ ZSDA_CORE_DECRY,
+ ZSDA_CORE_HASH,
ZSDA_CORE_INVALID,
};
@@ -7,6 +7,7 @@
#include <stdint.h>
#include "zsda_device.h"
+#include "zsda_qp.h"
/* per-process array of device data */
struct zsda_device_info zsda_devs[RTE_PMD_ZSDA_MAX_PCI_DEVICES];
@@ -306,7 +307,8 @@ zsda_pci_device_release(const struct rte_pci_device *pci_dev)
inst = &zsda_devs[zsda_pci_dev->zsda_dev_id];
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- if (zsda_pci_dev->comp_dev != NULL) {
+ if ((zsda_pci_dev->sym_dev != NULL) ||
+ (zsda_pci_dev->comp_dev != NULL)) {
ZSDA_LOG(DEBUG, "ZSDA device %s is busy", name);
return -EBUSY;
}
@@ -322,47 +324,12 @@ static int
zsda_pci_dev_destroy(struct zsda_pci_device *zsda_pci_dev,
const struct rte_pci_device *pci_dev)
{
+ zsda_sym_dev_destroy(zsda_pci_dev);
zsda_comp_dev_destroy(zsda_pci_dev);
return zsda_pci_device_release(pci_dev);
}
-int
-zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
- const uint8_t qid, struct qinfo *qcfg)
-{
- struct zsda_admin_req_qcfg req = {0};
- struct zsda_admin_resp_qcfg resp = {0};
- int ret = 0;
- struct rte_pci_device *pci_dev =
- zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
-
- if (qid >= MAX_QPS_ON_FUNCTION) {
- ZSDA_LOG(ERR, "qid beyond limit!");
- return ZSDA_FAILED;
- }
-
- zsda_admin_msg_init(pci_dev);
- req.msg_type = ZSDA_ADMIN_QUEUE_CFG_REQ;
- req.qid = qid;
-
- ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
- if (ret) {
- ZSDA_LOG(ERR, "Failed! Send msg");
- return ret;
- }
-
- ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
- if (ret) {
- ZSDA_LOG(ERR, "Failed! Receive msg");
- return ret;
- }
-
- memcpy(qcfg, &resp.qcfg, sizeof(*qcfg));
-
- return ZSDA_SUCCESS;
-}
-
static int
zsda_unmask_flr(const struct zsda_pci_device *zsda_pci_dev)
{
@@ -432,6 +399,7 @@ zsda_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
return ret;
}
+ ret |= zsda_sym_dev_create(zsda_pci_dev);
ret |= zsda_comp_dev_create(zsda_pci_dev);
if (ret) {
@@ -18,6 +18,13 @@ struct zsda_device_info {
struct rte_pci_device *pci_dev;
+ // struct rte_device sym_rte_dev;
+ struct rte_device sym_rte_dev;
+ /**< This represents the crypto sym subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a crypto-specific name
+ */
+
struct rte_device comp_rte_dev;
/**< This represents the compression subset of this pci device.
* Register with this rather than with the one in
@@ -27,6 +34,7 @@ struct zsda_device_info {
extern struct zsda_device_info zsda_devs[];
+struct zsda_sym_dev_private;
struct zsda_comp_dev_private;
struct zsda_qp_hw_data {
@@ -64,6 +72,10 @@ struct zsda_pci_device {
struct rte_pci_device *pci_dev;
+ /* Data relating to symmetric crypto service */
+ struct zsda_sym_dev_private *sym_dev;
+ /**< link back to cryptodev private data */
+
/* Data relating to compression service */
struct zsda_comp_dev_private *comp_dev;
/**< link back to compressdev private data */
@@ -79,7 +91,10 @@ struct zsda_pci_device *
zsda_get_zsda_dev_from_pci_dev(const struct rte_pci_device *pci_dev);
__rte_weak int
-zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev);
+zsda_sym_dev_create(struct zsda_pci_device *zsda_pci_dev);
+
+__rte_weak int
+zsda_sym_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
__rte_weak int
zsda_comp_dev_create(struct zsda_pci_device *zsda_pci_dev);
@@ -87,9 +102,6 @@ zsda_comp_dev_create(struct zsda_pci_device *zsda_pci_dev);
__rte_weak int
zsda_comp_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
-int zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
- const uint8_t qid, struct qinfo *qcfg);
-
int zsda_queue_start(const struct rte_pci_device *pci_dev);
int zsda_queue_stop(const struct rte_pci_device *pci_dev);
int zsda_queue_clear(const struct rte_pci_device *pci_dev);
@@ -20,8 +20,11 @@ struct ring_size {
};
struct ring_size zsda_qp_hw_ring_size[ZSDA_MAX_SERVICES] = {
+ [ZSDA_SERVICE_SYMMETRIC_ENCRYPT] = {128, 16},
+ [ZSDA_SERVICE_SYMMETRIC_DECRYPT] = {128, 16},
[ZSDA_SERVICE_COMPRESSION] = {32, 16},
[ZSDA_SERVICE_DECOMPRESSION] = {32, 16},
+ [ZSDA_SERVICE_HASH_ENCODE] = {32, 16},
};
static void
@@ -36,6 +39,43 @@ zsda_set_queue_head_tail(const struct zsda_pci_device *zsda_pci_dev,
SET_HEAD_INTI);
}
+static int
+zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
+ const uint8_t qid, struct qinfo *qcfg)
+{
+ struct zsda_admin_req_qcfg req = {0};
+ struct zsda_admin_resp_qcfg resp = {0};
+ int ret = 0;
+ struct rte_pci_device *pci_dev =
+ zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+
+ if (qid >= MAX_QPS_ON_FUNCTION) {
+ ZSDA_LOG(ERR, "qid beyond limit!");
+ return ZSDA_FAILED;
+ }
+
+ zsda_admin_msg_init(pci_dev);
+ req.msg_type = ZSDA_ADMIN_QUEUE_CFG_REQ;
+ req.qid = qid;
+
+ ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! Send msg");
+ return ret;
+ }
+
+ ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! Receive msg");
+ return ret;
+ }
+
+ memcpy(qcfg, &resp.qcfg, sizeof(*qcfg));
+
+ return ZSDA_SUCCESS;
+}
+
+
int
zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev)
{
@@ -43,7 +83,7 @@ zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev)
uint32_t index;
enum zsda_service_type type;
struct zsda_qp_hw *zsda_hw_qps = zsda_pci_dev->zsda_hw_qps;
- struct qinfo qcfg;
+ struct qinfo qcfg = {0};
int ret = 0;
for (i = 0; i < zsda_num_used_qps; i++) {
@@ -115,6 +155,30 @@ zsda_comp_max_nb_qps(const struct zsda_pci_device *zsda_pci_dev)
return min;
}
+uint16_t
+zsda_crypto_max_nb_qps(struct zsda_pci_device *zsda_pci_dev)
+{
+ uint16_t encrypt = zsda_qps_per_service(zsda_pci_dev,
+ ZSDA_SERVICE_SYMMETRIC_ENCRYPT);
+ uint16_t decrypt = zsda_qps_per_service(zsda_pci_dev,
+ ZSDA_SERVICE_SYMMETRIC_DECRYPT);
+ uint16_t hash =
+ zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_HASH_ENCODE);
+ uint16_t min = 0;
+
+ if ((encrypt == MAX_QPS_ON_FUNCTION) ||
+ (decrypt == MAX_QPS_ON_FUNCTION) ||
+ (hash == MAX_QPS_ON_FUNCTION))
+ min = MAX_QPS_ON_FUNCTION;
+ else {
+ min = (encrypt < decrypt) ? encrypt : decrypt;
+ min = (min < hash) ? min : hash;
+ }
+
+ if (min == 0)
+ return MAX_QPS_ON_FUNCTION;
+ return min;
+}
void
zsda_stats_get(void **queue_pairs, const uint32_t nb_queue_pairs,
@@ -176,8 +176,10 @@ enum rte_crypto_cipher_algorithm {
/**< ShangMi 4 (SM4) algorithm in CTR mode */
RTE_CRYPTO_CIPHER_SM4_OFB,
/**< ShangMi 4 (SM4) algorithm in OFB mode */
- RTE_CRYPTO_CIPHER_SM4_CFB
+ RTE_CRYPTO_CIPHER_SM4_CFB,
/**< ShangMi 4 (SM4) algorithm in CFB mode */
+ RTE_CRYPTO_CIPHER_SM4_XTS
+ /**< ShangMi 4 (SM4) algorithm in XTS mode */
};
/** Symmetric Cipher Direction */