@@ -549,6 +549,22 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
return data & modulo_mask;
}
+uint16_t
+refactor_qat_enqueue_op_burst(__rte_unused void *qp,
+ __rte_unused qat_op_build_request_t op_build_request,
+ __rte_unused void **ops, __rte_unused uint16_t nb_ops)
+{
+ return 0;
+}
+
+uint16_t
+refactor_qat_dequeue_op_burst(__rte_unused void *qp, __rte_unused void **ops,
+ __rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+ __rte_unused uint16_t nb_ops)
+{
+ return 0;
+}
+
uint16_t
qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
{
@@ -36,6 +36,51 @@ struct qat_queue {
/* number of responses processed since last CSR head write */
};
+/**
+ * Type define qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ *
+ * @param in_op
+ * An input op pointer
+ * @param out_msg
+ * out_meg pointer
+ * @param op_cookie
+ * op cookie pointer
+ * @param opaque
+ * an opaque data may be used to store context may be useful between
+ * 2 enqueue operations.
+ * @param dev_gen
+ * qat device gen id
+ * @return
+ * - 0 if the crypto request is build successfully,
+ * - error code otherwise
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+ void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+/**
+ * Type define qat_op_dequeue_t function pointer, passed in as argument
+ * in dequeue op burst, where a dequeue op assigned base on the type of
+ * crypto op.
+ *
+ * @param op
+ * An input op pointer
+ * @param resp
+ * qat response msg pointer
+ * @param op_cookie
+ * op cookie pointer
+ * @param dequeue_err_count
+ * dequeue error counter
+ * @return
+ * - 0 if dequeue OP is successful
+ * - error code otherwise
+ **/
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+ uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE 2
+
struct qat_qp {
void *mmap_bar_addr;
struct qat_queue tx_q;
@@ -44,6 +89,7 @@ struct qat_qp {
struct rte_mempool *op_cookie_pool;
void **op_cookies;
uint32_t nb_descriptors;
+ uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
enum qat_device_gen qat_dev_gen;
enum qat_service_type service_type;
struct qat_pci_device *qat_dev;
@@ -77,6 +123,14 @@ struct qat_qp_config {
const char *service_str;
};
+uint16_t
+refactor_qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+ void **ops, uint16_t nb_ops);
+
+uint16_t
+refactor_qat_dequeue_op_burst(void *qp, void **ops,
+ qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
+
uint16_t
qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
@@ -456,6 +456,49 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
return 0;
}
+/*
+ * Asym build request refactor function template,
+ * will be removed in the later patchset.
+ */
+static __rte_always_inline int
+refactor_qat_asym_build_request(__rte_unused void *in_op,
+ __rte_unused uint8_t *out_msg, __rte_unused void *op_cookie,
+ __rte_unused uint64_t *opaque,
+ __rte_unused enum qat_device_gen dev_gen)
+{
+ return 0;
+}
+
+/*
+ * Asym process response refactor function template,
+ * will be removed in the later patchset.
+ */
+int
+refactor_qat_asym_process_response(__rte_unused void **op,
+ __rte_unused uint8_t *resp,
+ __rte_unused void *op_cookie,
+ __rte_unused uint64_t *dequeue_err_count)
+{
+ return 0;
+}
+
+uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return refactor_qat_enqueue_op_burst(qp,
+ refactor_qat_asym_build_request,
+ (void **)ops, nb_ops);
+}
+
+uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return refactor_qat_dequeue_op_burst(qp, (void **)ops,
+ refactor_qat_asym_process_response, nb_ops);
+}
+
int
qat_asym_build_request(void *in_op,
uint8_t *out_msg,
@@ -92,4 +92,19 @@ void
qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
void *op_cookie);
+int
+refactor_qat_asym_process_response(__rte_unused void **op,
+ __rte_unused uint8_t *resp,
+ __rte_unused void *op_cookie,
+ __rte_unused uint64_t *dequeue_err_count);
+
+uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+
+uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
#endif /* _QAT_ASYM_H_ */
@@ -210,6 +210,43 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
ICP_QAT_FW_LA_NO_PROTO);
}
+/*
+ * Sym build request refactor function template,
+ * will be removed in the later patchset.
+ */
+static __rte_always_inline int
+refactor_qat_sym_build_request(__rte_unused void *in_op,
+ __rte_unused uint8_t *out_msg, __rte_unused void *op_cookie,
+ __rte_unused uint64_t *opaque,
+ __rte_unused enum qat_device_gen dev_gen)
+{
+ return 0;
+}
+
+/*
+ * Sym enqueue burst refactor function template,
+ * will be removed in the later patchset.
+ */
+uint16_t
+refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return refactor_qat_enqueue_op_burst(qp, refactor_qat_sym_build_request,
+ (void **)ops, nb_ops);
+}
+
+/*
+ * Sym dequeue burst refactor function template,
+ * will be removed in the later patchset.
+ */
+uint16_t
+refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return refactor_qat_dequeue_op_burst(qp, (void **)ops,
+ refactor_qat_sym_process_response, nb_ops);
+}
+
int
qat_sym_build_request(void *in_op, uint8_t *out_msg,
void *op_cookie, enum qat_device_gen qat_dev_gen)
@@ -54,6 +54,22 @@ struct qat_sym_op_cookie {
} opt;
};
+static __rte_always_inline int
+refactor_qat_sym_process_response(__rte_unused void **op,
+ __rte_unused uint8_t *resp, __rte_unused void *op_cookie,
+ __rte_unused uint64_t *dequeue_err_count)
+{
+ return 0;
+}
+
+uint16_t
+refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+uint16_t
+refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
int
qat_sym_build_request(void *in_op, uint8_t *out_msg,
void *op_cookie, enum qat_device_gen qat_dev_gen);
@@ -55,6 +55,11 @@
#define QAT_SESSION_IS_SLICE_SET(flags, flag) \
(!!((flags) & (flag)))
+struct qat_sym_session;
+
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie);
+
enum qat_sym_proto_flag {
QAT_CRYPTO_PROTO_FLAG_NONE = 0,
QAT_CRYPTO_PROTO_FLAG_CCM = 1,
@@ -107,6 +112,7 @@ struct qat_sym_session {
/* Some generations need different setup of counter */
uint32_t slice_types;
enum qat_sym_proto_flag qat_proto_flag;
+ qat_sym_build_request_t build_request[2];
};
int