@@ -540,6 +540,10 @@ CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG=n
CONFIG_RTE_LIBRTE_COMPRESSDEV=y
CONFIG_RTE_COMPRESS_MAX_DEVS=64
+# Compile PMD for ISA-L device
+#
+CONFIG_RTE_LIBRTE_PMD_ISAL=n
+
#
# Compile generic security library
#
@@ -10,6 +10,8 @@ DIRS-y += net
DEPDIRS-net := bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_BBDEV) += bbdev
DEPDIRS-bbdev := bus mempool
+DIRS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += compress
+DEPDIRS-compress := bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto
DEPDIRS-crypto := bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event
new file mode 100644
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isa-l
+
+include $(RTE_SDK)/mk/rte.subdir.mk
+
new file mode 100644
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_isal_comp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# external library dependencies
+LDLIBS += -lisal
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_compressdev
+LDLIBS += -lrte_bus_vdev
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_isa-l_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isa-l_compress_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isa-l_compress_pmd_ops.c
+
+# export include files
+include $(RTE_SDK)/mk/rte.lib.mk
new file mode 100644
@@ -0,0 +1,491 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <isa-l.h>
+
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_compressdev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+
+#include "isa-l_compress_pmd_private.h"
+
+#define RTE_COMP_ISAL_WINDOW_SIZE 32768
+#define RTE_COMP_ISAL_LEVEL_ZERO 0 /* ISA-L Level 0 used for fixed Huffman */
+#define RTE_COMP_ISAL_LEVEL_ONE 1
+#define RTE_COMP_ISAL_LEVEL_TWO 2
+#define RTE_COMP_ISAL_LEVEL_THREE 3 /* Optimised for AVX512 only */
+
+static uint8_t compdev_driver_id;
+int isal_logtype_driver;
+
+/* Verify and set session parameters */
+int
+isal_comp_set_session_parameters(struct isal_comp_session *sess,
+ const struct rte_comp_xform *xform)
+{
+ if (xform == NULL)
+ return -EINVAL;
+
+ /* Check for chained xforms */
+ if (xform->next != NULL)
+ return -ENOTSUP;
+
+ /* Set compression session variables*/
+ if (xform->type == RTE_COMP_COMPRESS) {
+ /* Set session type */
+ sess->type = RTE_COMP_COMPRESS;
+
+ /* Set session algorithm */
+ if (xform->compress.algo != RTE_COMP_DEFLATE) {
+ if (xform->compress.algo == RTE_COMP_NULL) {
+ ISAL_PMD_LOG(ERR, "By-pass not supported\n");
+ return -ENOTSUP;
+ }
+ ISAL_PMD_LOG(ERR, "Algorithm not supported\n");
+ return -ENOTSUP;
+ }
+ sess->compress.algo = RTE_COMP_DEFLATE;
+
+
+ /* Set session checksum */
+ if (xform->compress.chksum == RTE_COMP_NONE)
+ sess->compress.chksum = RTE_COMP_NONE;
+ else if (xform->compress.chksum == RTE_COMP_ADLER32)
+ sess->compress.chksum = RTE_COMP_ADLER32;
+ else if (xform->compress.chksum == RTE_COMP_CRC32)
+ sess->compress.chksum = RTE_COMP_CRC32;
+ else {
+ ISAL_PMD_LOG(ERR, "Checksum not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set session window size, 32K supported */
+ if (xform->compress.window_size == RTE_COMP_ISAL_WINDOW_SIZE)
+ sess->compress.window_size = RTE_COMP_ISAL_WINDOW_SIZE;
+ else {
+ ISAL_PMD_LOG(ERR, "Window size not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set session huffman type */
+ if (xform->compress.deflate.huffman == RTE_COMP_DEFAULT)
+ sess->compress.deflate.huffman = RTE_COMP_DEFAULT;
+ else if (xform->compress.deflate.huffman == RTE_COMP_FIXED)
+ sess->compress.deflate.huffman = RTE_COMP_FIXED;
+ else if (xform->compress.deflate.huffman == RTE_COMP_DYNAMIC)
+ sess->compress.deflate.huffman = RTE_COMP_DYNAMIC;
+ else {
+ ISAL_PMD_LOG(ERR, "Huffman code not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set session level */
+ /* Checking compliance with compressdev API, -1 <= level => 9 */
+ if (xform->compress.level < RTE_COMP_LEVEL_PMD_DEFAULT ||
+ xform->compress.level > RTE_COMP_LEVEL_MAX) {
+ ISAL_PMD_LOG(ERR, "Compression level out of range\n");
+ return -EINVAL;
+ }
+ /* Check for Compressdev API level 0, No compression
+ * not supported in ISA-L
+ */
+ else if (xform->compress.level == RTE_COMP_LEVEL_NONE) {
+ ISAL_PMD_LOG(ERR, "No Compression not supported\n");
+ return -ENOTSUP;
+ }
+ /* If using fixed huffman code level must be 0 */
+ else if (sess->compress.deflate.huffman == RTE_COMP_FIXED) {
+ ISAL_PMD_LOG(ERR, "ISA-L level 0 used due to"
+ " fixed huffman code\n");
+ sess->compress.level = RTE_COMP_ISAL_LEVEL_ZERO;
+ } else {
+ /* Mapping API levels to ISA-L levels 1,2 & 3 */
+ switch (xform->compress.level) {
+ case RTE_COMP_LEVEL_PMD_DEFAULT:
+ /* Default is 1 if not using fixed huffman */
+ sess->compress.level = RTE_COMP_ISAL_LEVEL_ONE;
+ break;
+ case RTE_COMP_LEVEL_MIN:
+ sess->compress.level = RTE_COMP_ISAL_LEVEL_ONE;
+ break;
+ case RTE_COMP_ISAL_LEVEL_TWO:
+ sess->compress.level = RTE_COMP_ISAL_LEVEL_TWO;
+ break;
+ default: /* Level 3 or higher */
+ if (rte_cpu_get_flag_enabled(
+ RTE_CPUFLAG_AVX512F))
+ sess->compress.level =
+ RTE_COMP_ISAL_LEVEL_THREE;
+ else{
+ ISAL_PMD_LOG(INFO, "ISA-L level 3 "
+ "optimized for AVX512 only,"
+ " level changed to 2\n");
+ sess->compress.level =
+ RTE_COMP_ISAL_LEVEL_TWO;
+ }
+ }
+ }
+ }
+
+ /* Set decompression session variables */
+ else if (xform->type == RTE_COMP_DECOMPRESS) {
+
+ /* Set session type */
+ sess->type = RTE_COMP_DECOMPRESS;
+
+ /* Set session algorithm */
+ if (xform->decompress.algo != RTE_COMP_DEFLATE) {
+ if (xform->decompress.algo == RTE_COMP_NULL) {
+ ISAL_PMD_LOG(ERR, "By pass not supported\n");
+ return -ENOTSUP;
+ }
+ ISAL_PMD_LOG(ERR, "Algorithm not supported\n");
+ return -ENOTSUP;
+ }
+ sess->decompress.algo = RTE_COMP_DEFLATE;
+
+ /* Set session checksum */
+ if (xform->decompress.chksum == RTE_COMP_NONE)
+ sess->decompress.chksum = RTE_COMP_NONE;
+ else if (xform->decompress.chksum == RTE_COMP_ADLER32)
+ sess->decompress.chksum = RTE_COMP_ADLER32;
+ else if (xform->decompress.chksum == RTE_COMP_CRC32)
+ sess->decompress.chksum = RTE_COMP_CRC32;
+ else {
+ ISAL_PMD_LOG(ERR, "Checksum not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set session window size, 32K supported */
+ if (xform->decompress.window_size == RTE_COMP_ISAL_WINDOW_SIZE)
+ sess->decompress.window_size = ISAL_DEF_HIST_SIZE;
+ else {
+ ISAL_PMD_LOG(ERR, "Window size not supported\n");
+ return -ENOTSUP;
+ }
+ }
+ return 0;
+}
+
+/* Stateless Compression Function */
+static int
+process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp,
+ const struct isal_comp_session *sess)
+{
+ int ret = 0;
+
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ /* Initialize compression stream */
+ isal_deflate_stateless_init(qp->stream);
+ qp->stream->flush = NO_FLUSH;
+
+ /* Set op checksum, none by default */
+ if (sess->compress.chksum == RTE_COMP_CRC32)
+ qp->stream->gzip_flag = IGZIP_GZIP;
+ else if (sess->compress.chksum == RTE_COMP_ADLER32)
+ qp->stream->gzip_flag = IGZIP_ZLIB;
+
+ /* set op level & intermediate level buffer */
+ if (sess->compress.level == RTE_COMP_ISAL_LEVEL_ZERO) {
+ qp->stream->level = ISAL_DEF_MIN_LEVEL;
+ qp->stream->level_buf_size = ISAL_DEF_LVL0_DEFAULT;
+ } else if (sess->compress.level == RTE_COMP_ISAL_LEVEL_ONE) {
+ qp->stream->level = RTE_COMP_ISAL_LEVEL_ONE;
+ qp->stream->level_buf_size = ISAL_DEF_LVL1_DEFAULT;
+ } else if (sess->compress.level == RTE_COMP_ISAL_LEVEL_TWO) {
+ qp->stream->level = RTE_COMP_ISAL_LEVEL_TWO;
+ qp->stream->level_buf_size = ISAL_DEF_LVL2_DEFAULT;
+ } else {
+ qp->stream->level = ISAL_DEF_MAX_LEVEL;
+ qp->stream->level_buf_size = ISAL_DEF_LVL3_DEFAULT;
+ }
+
+ /* Point compression stream structure to input/output buffers */
+ qp->stream->avail_in = op->src.length;
+ qp->stream->next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
+ qp->stream->avail_out = op->m_dst->data_len;
+ qp->stream->next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
+ qp->stream->end_of_stream = 1;
+
+ /* Set op huffman code */
+ if (sess->compress.deflate.huffman == RTE_COMP_FIXED)
+ isal_deflate_set_hufftables(qp->stream, NULL,
+ IGZIP_HUFFTABLE_STATIC);
+ else if (sess->compress.deflate.huffman == RTE_COMP_DEFAULT)
+ isal_deflate_set_hufftables(qp->stream, NULL,
+ IGZIP_HUFFTABLE_DEFAULT);
+ /* Dynamically change the huffman code to suit the input data */
+ else if (sess->compress.deflate.huffman == RTE_COMP_DYNAMIC) {
+ struct isal_hufftables hufftable;
+ struct isal_huff_histogram histogram;
+ memset(&histogram, 0, sizeof(struct isal_huff_histogram));
+
+ isal_update_histogram(qp->stream->next_in, qp->stream->avail_in,
+ &histogram);
+ isal_create_hufftables(&hufftable, &histogram);
+ isal_deflate_set_hufftables(qp->stream, &hufftable,
+ IGZIP_HUFFTABLE_CUSTOM);
+ }
+
+ /* Execute compression operation */
+ ret = isal_deflate_stateless(qp->stream);
+
+ /* Check that input buffer has been fully consumed */
+ if (qp->stream->avail_in != (uint32_t)0) {
+ ISAL_PMD_LOG(ERR, "Input buffer could not be read entirely\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ if (ret != COMP_OK) {
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+
+ op->consumed = op->src.length - qp->stream->avail_in;
+ op->produced = qp->stream->total_out;
+
+ return ret;
+}
+
+/* Stateless Decompression Function */
+static int
+process_isal_inflate(struct rte_comp_op *op, struct isal_comp_qp *qp,
+ const struct isal_comp_session *sess)
+{
+ int ret = 0;
+
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ /* Initialize decompression state */
+ isal_inflate_init(qp->state);
+
+ /* Set op checksum, none by default */
+ if (sess->decompress.chksum == RTE_COMP_CRC32)
+ qp->state->crc_flag = ISAL_GZIP;
+ else if (sess->decompress.chksum == RTE_COMP_ADLER32)
+ qp->state->crc_flag = ISAL_ZLIB;
+
+ /* Point decompression state structure to input/output buffers */
+ qp->state->avail_in = op->src.length;
+ qp->state->next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
+ qp->state->avail_out = op->m_dst->data_len;
+ qp->state->next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
+
+ /* Execute decompression operation */
+ ret = isal_inflate_stateless(qp->state);
+
+ /* Check that input buffer has been fully consumed */
+ if (qp->state->avail_in != (uint32_t)0) {
+ ISAL_PMD_LOG(ERR, "Input buffer could not be read entirely\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ if (ret != ISAL_DECOMP_OK) {
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+
+ op->consumed = op->src.length - qp->state->avail_in;
+ op->produced = qp->state->total_out;
+
+ return ret;
+}
+
+/* Process compression operation */
+static int
+process_op(struct isal_comp_qp *qp, struct rte_comp_op *op,
+ struct isal_comp_session *sess)
+{
+ switch (sess->type) {
+ case RTE_COMP_COMPRESS:
+ process_isal_deflate(op, qp, sess);
+ break;
+ case RTE_COMP_DECOMPRESS:
+ process_isal_inflate(op, qp, sess);
+ break;
+ default:
+ ISAL_PMD_LOG(ERR, "Operation Not Supported\n");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static struct isal_comp_session *
+get_session(struct rte_comp_op *op)
+{
+ struct isal_comp_session *sess = NULL;
+
+ if (likely(op->session != NULL))
+ sess = (struct isal_comp_session *)
+ get_session_private_data(op->session, compdev_driver_id);
+
+ return sess;
+}
+
+/* Enqueue burst */
+static uint16_t
+isal_comp_pmd_enqueue_burst(void *queue_pair, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ struct isal_comp_session *sess;
+ struct isal_comp_qp *qp = queue_pair;
+ uint16_t i;
+ int retval;
+ uint16_t num_enq = RTE_MIN(qp->num_free_elements, nb_ops);
+
+ for (i = 0; i < num_enq; i++) {
+ sess = get_session(ops[i]);
+ if (unlikely(sess == NULL)) {
+ ops[i]->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ qp->qp_stats.enqueue_err_count++;
+ break;
+ }
+
+ retval = process_op(qp, ops[i], sess);
+ if (unlikely(retval < 0)) {
+ qp->qp_stats.enqueue_err_count++;
+ break;
+ }
+
+ if (ops[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
+ qp->qp_stats.enqueue_err_count++;
+ break;
+ }
+ }
+
+ retval = rte_ring_enqueue_burst(qp->processed_pkts, (void *)ops, i,
+ NULL);
+ qp->num_free_elements -= retval;
+ qp->qp_stats.enqueued_count += retval;
+
+ return retval;
+}
+
+/* Dequeue burst */
+static uint16_t
+isal_comp_pmd_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ struct isal_comp_qp *qp = queue_pair;
+ uint16_t nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, (void **)ops,
+ nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/* Create ISA-L compression device */
+static int
+compdev_isal_create(const char *name, struct rte_vdev_device *vdev,
+ struct rte_compressdev_pmd_init_params *init_params)
+{
+ struct rte_compressdev *dev;
+ struct isal_comp_private *internals;
+
+ dev = rte_compressdev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ ISAL_PMD_LOG(ERR, "failed to create compressdev vdev");
+ return -EFAULT;
+ }
+
+ dev->driver_id = compdev_driver_id;
+ dev->dev_ops = isal_compress_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = isal_comp_pmd_dequeue_burst;
+ dev->enqueue_burst = isal_comp_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_COMP_FF_STATEFUL_COMPRESSION |
+ RTE_COMP_FF_STATEFUL_DECOMPRESSION |
+ RTE_COMP_FF_MULTI_PKT_CHECKSUM |
+ RTE_COMPDEV_FF_CPU_AVX512 |
+ RTE_COMPDEV_FF_CPU_AVX2 |
+ RTE_COMPDEV_FF_CPU_AVX |
+ RTE_COMPDEV_FF_CPU_SSE;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+
+ return 0;
+}
+
+
+/* Initialise ISA-L compression device */
+static int
+compdev_isal_probe(struct rte_vdev_device *dev)
+{
+ struct rte_compressdev_pmd_init_params init_params = {
+ "",
+ sizeof(struct isal_comp_private),
+ rte_socket_id(),
+ RTE_COMPRESSDEV_PMD_DEFAULT_MAX_NB_QPS
+ };
+ const char *name, *args;
+ int retval;
+
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
+
+ args = rte_vdev_device_args(dev);
+
+ retval = rte_compressdev_pmd_parse_input_args(&init_params, args);
+ if (retval) {
+ ISAL_PMD_LOG(ERR,
+ "Failed to parse initialisation arguments[%s]\n", args);
+ return -EINVAL;
+ }
+
+ return compdev_isal_create(name, dev, &init_params);
+}
+
+static int
+compdev_isal_remove_dev(struct rte_vdev_device *vdev)
+{
+ struct rte_compressdev *compdev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ compdev = rte_compressdev_pmd_get_named_dev(name);
+ if (compdev == NULL)
+ return -ENODEV;
+
+ return rte_compressdev_pmd_destroy(compdev);
+}
+
+static struct rte_vdev_driver compdev_isal_pmd_drv = {
+ .probe = compdev_isal_probe,
+ .remove = compdev_isal_remove_dev,
+};
+
+static struct compressdev_driver isal_comp_drv;
+
+RTE_PMD_REGISTER_VDEV(COMPDEV_NAME_ISAL_PMD, compdev_isal_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(COMPDEV_NAME_ISAL_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_COMPRESSDEV_DRIVER(isal_comp_drv, compdev_isal_pmd_drv,
+ compdev_driver_id);
+
+RTE_INIT(isal_init_log);
+
+static void
+isal_init_log(void)
+{
+ isal_logtype_driver = rte_log_register("comp_isal");
+ if (isal_logtype_driver >= 0)
+ rte_log_set_level(isal_logtype_driver, RTE_LOG_DEBUG);
+}
new file mode 100644
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_compressdev_pmd.h>
+
+#include "isa-l_compress_pmd_private.h"
+
+static const struct rte_compressdev_capabilities isal_pmd_capabilities[] = {
+ {
+ .algo = RTE_COMP_DEFLATE,
+ .comp_feature_flags = RTE_COMP_FF_ADLER32_CHECKSUM ||
+ RTE_COMP_FF_CRC32_CHECKSUM,
+ .window_size = {
+ .min = 15,
+ .max = 15,
+ .increment = 0
+ },
+ },
+ RTE_COMP_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+isal_comp_pmd_config(__rte_unused struct rte_compressdev *dev,
+ __rte_unused struct rte_compressdev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+isal_comp_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+isal_comp_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+}
+
+/** Close device */
+static int
+isal_comp_pmd_close(__rte_unused struct rte_compressdev *dev)
+{
+ return 0;
+}
+
+/** Get device statistics */
+static void
+isal_comp_pmd_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ uint16_t qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+isal_comp_pmd_stats_reset(struct rte_compressdev *dev)
+{
+ uint16_t qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+isal_comp_pmd_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *dev_info)
+{
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = isal_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+isal_comp_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp == NULL)
+ return -EINVAL;
+
+ if (qp->stream != NULL)
+ rte_free(qp->stream);
+
+ if (qp->state != NULL)
+ rte_free(qp->state);
+
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ rte_free(dev->data->queue_pairs[qp_id]);
+
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+isal_comp_pmd_qp_set_unique_name(struct rte_compressdev *dev,
+ struct isal_comp_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "isal_compression_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+isal_comp_pmd_qp_create_processed_pkts_ring(struct isal_comp_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ISAL_PMD_LOG(ERR,
+ "Reusing existing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+
+ ISAL_PMD_LOG(ERR,
+ "Unable to reuse existing ring %s for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/* Setup a queue pair */
+static int
+isal_comp_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct isal_comp_private *internals = dev->data->dev_private;
+ struct isal_comp_qp *qp = NULL;
+ int retval;
+
+ if (qp_id >= internals->max_nb_qpairs) {
+ ISAL_PMD_LOG(ERR, "Invalid qp_id %u, greater than maximum "
+ "number of queue pairs supported (%u).",
+ qp_id, internals->max_nb_qpairs);
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ isal_comp_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("Isa-l compression PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ ISAL_PMD_LOG(ERR, "Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ /* Initialize memory for compression stream structure */
+ qp->stream = rte_zmalloc_socket("Isa-l compression stream ",
+ sizeof(struct isal_zstream), RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ /* Initialize memory for compression level buffer */
+ qp->stream->level_buf = rte_zmalloc_socket("Isa-l compression lev_buf",
+ ISAL_DEF_LVL3_DEFAULT, RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ /* Initialize memory for decompression state structure */
+ qp->state = rte_zmalloc_socket("Isa-l decompression state",
+ sizeof(struct inflate_state), RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = isal_comp_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ ISAL_PMD_LOG(ERR, "Failed to create unique name for isal "
+ "compression device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = isal_comp_pmd_qp_create_processed_pkts_ring(qp,
+ max_inflight_ops, socket_id);
+ if (qp->processed_pkts == NULL) {
+ ISAL_PMD_LOG(ERR, "Failed to create unique name for isal "
+ "compression device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->num_free_elements = rte_ring_free_count(qp->processed_pkts);
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+isal_comp_pmd_qp_count(struct rte_compressdev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the NULL comp session structure */
+static unsigned
+isal_comp_pmd_session_get_size(struct rte_compressdev *dev __rte_unused)
+{
+ return sizeof(struct isal_comp_session);
+}
+
+/** Configure an isal comp session from a comp xform chain */
+static int
+isal_comp_pmd_session_configure(struct rte_compressdev *dev __rte_unused,
+ struct rte_comp_xform *xform,
+ struct rte_comp_session *sess,
+ struct rte_mempool *mp)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ ISAL_PMD_LOG(ERR, "Invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mp, &sess_private_data)) {
+ ISAL_PMD_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = isal_comp_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ ISAL_PMD_LOG(ERR, "Failed to configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mp, sess_private_data);
+ return ret;
+ }
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+isal_comp_pmd_session_clear(struct rte_compressdev *dev,
+ struct rte_comp_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct isal_comp_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_compressdev_ops isal_pmd_ops = {
+ .dev_configure = isal_comp_pmd_config,
+ .dev_start = isal_comp_pmd_start,
+ .dev_stop = isal_comp_pmd_stop,
+ .dev_close = isal_comp_pmd_close,
+
+ .stats_get = isal_comp_pmd_stats_get,
+ .stats_reset = isal_comp_pmd_stats_reset,
+
+ .dev_infos_get = isal_comp_pmd_info_get,
+
+ .queue_pair_setup = isal_comp_pmd_qp_setup,
+ .queue_pair_release = isal_comp_pmd_qp_release,
+ .queue_pair_count = isal_comp_pmd_qp_count,
+
+ .session_get_size = isal_comp_pmd_session_get_size,
+ .session_configure = isal_comp_pmd_session_configure,
+ .session_clear = isal_comp_pmd_session_clear
+};
+
+struct rte_compressdev_ops *isal_compress_pmd_ops = &isal_pmd_ops;
new file mode 100644
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <isa-l.h>
+
+#ifndef _ISAL_COMP_PMD_PRIVATE_H_
+#define _ISAL_COMP_PMD_PRIVATE_H_
+
+#define COMPDEV_NAME_ISAL_PMD compress_isal
+/**< ISA-L comp PMD device name */
+
+extern int isal_logtype_driver;
+#define ISAL_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, isal_logtype_driver, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+/** private data structure for each isa-l comp device */
+struct isal_comp_private {
+ unsigned int max_nb_qpairs; /**< Max number of queue pairs */
+ unsigned int max_nb_sessions; /**< Max number of sessions */
+};
+
+/** ISA-L comp queue pair */
+struct isal_comp_qp {
+ /* Queue Pair Identifier */
+ uint16_t id;
+ /* Unique Queue Pair Name */
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ /* Ring for placing process packets */
+ struct rte_ring *processed_pkts;
+ /* Queue pair statistics */
+ struct rte_compressdev_stats qp_stats;
+ /* Compression stream information*/
+ struct isal_zstream *stream;
+ /* Decompression state information*/
+ struct inflate_state *state;
+ /* Number of free elements on ring */
+ uint16_t num_free_elements;
+} __rte_cache_aligned;
+
+/** ISA-L comp private session structure */
+struct isal_comp_session {
+ enum rte_comp_xform_type type;
+ struct rte_comp_compress_xform compress;
+ struct rte_comp_decompress_xform decompress;
+} __rte_cache_aligned;
+
+/** Set and validate NULL comp session parameters */
+extern int
+isal_comp_set_session_parameters(struct isal_comp_session *sess,
+ const struct rte_comp_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_compressdev_ops *isal_compress_pmd_ops;
+
+#endif /* _ISAL_COMP_PMD_PRIVATE_H_ */
new file mode 100644
@@ -0,0 +1,3 @@
+DPDK_18.05 {
+ local: *;
+};
@@ -41,7 +41,9 @@ enum rte_comp_op_status {
/** Compression Algorithms */
enum rte_comp_algorithm {
- RTE_COMP_NULL = 0,
+ RTE_COMP_UNSPECIFIED = 0,
+ /** No Compression algorithm */
+ RTE_COMP_NULL,
/**< No compression.
* Pass-through, data is copied unchanged from source buffer to
* destination buffer.
@@ -94,7 +94,7 @@ struct rte_compressdev_capabilities {
/** Macro used at end of comp PMD list */
#define RTE_COMP_END_OF_CAPABILITIES_LIST() \
- { RTE_COMP_ALGO_LIST_END }
+ { RTE_COMP_UNSPECIFIED }
/**
* compression device supported feature flags
@@ -218,6 +218,12 @@ endif # CONFIG_RTE_LIBRTE_DPAA_BUS
endif # CONFIG_RTE_LIBRTE_CRYPTODEV
+ifeq ($(CONFIG_RTE_LIBRTE_COMPRESSDEV),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += -lrte_pmd_isal_comp
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += -lisal
+endif # CONFIG_RTE_LIBRTE_COMPRESSDEV
+
+
ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += -lrte_pmd_skeleton_event
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += -lrte_pmd_sw_event