[dpdk-dev,v1,2/6] compress/zlib: add device setup PMD ops

Message ID 1526380346-7386-3-git-send-email-shally.verma@caviumnetworks.com (mailing list archive)
State Changes Requested, archived
Delegated to: Pablo de Lara Guarch
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Shally Verma May 15, 2018, 10:32 a.m. UTC
  Implement device configure and PMD ops

Signed-off-by: Sunila Sahu <sunila.sahu@caviumnetworks.com>
Signed-off-by: Shally Verma <shally.verma@caviumnetworks.com>
Signed-off-by: Ashish Gupta <ashish.gupta@caviumnetworks.com>
---
 drivers/compress/zlib/zlib_pmd_ops.c     | 238 +++++++++++++++++++++++++++++++
 drivers/compress/zlib/zlib_pmd_private.h |  77 ++++++++++
 2 files changed, 315 insertions(+)
  

Comments

Daly, Lee June 15, 2018, 11:08 a.m. UTC | #1
Hi Shally,
Comments inline.


> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Shally Verma
> Sent: Tuesday, May 15, 2018 11:32 AM
> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> Cc: Trahe, Fiona <fiona.trahe@intel.com>; dev@dpdk.org;
> pathreay@caviumnetworks.com; Sunila Sahu
> <sunila.sahu@caviumnetworks.com>; Ashish Gupta
> <ashish.gupta@caviumnetworks.com>
> Subject: [dpdk-dev] [PATCH v1 2/6] compress/zlib: add device setup PMD
> ops
> 
>diff --git a/drivers/compress/zlib/zlib_pmd_ops.c b/drivers/compress/zlib/zlib_pmd_ops.c
>new file mode 100644
>index 0000000..0bd42f3
>--- /dev/null
>+++ b/drivers/compress/zlib/zlib_pmd_ops.c
>@@ -0,0 +1,238 @@ 
>+/* SPDX-License-Identifier: BSD-3-Clause
>+ * Copyright(c) 2018 Cavium Networks
>+ */
>+
>+#include <string.h>
>+
>+#include <rte_common.h>
>+#include <rte_malloc.h>
>+#include <rte_compressdev_pmd.h>
>+
>+#include "zlib_pmd_private.h"
>+
>+static const struct rte_compressdev_capabilities zlib_pmd_capabilities[] = {
>+	{   /* Deflate */
>+		.algo = RTE_COMP_ALGO_DEFLATE,
>+		.comp_feature_flags = RTE_COMP_FF_SHAREABLE_PRIV_XFORM,
[Lee] The priv_xform structure in this case is not shareable, as it contains your zlib_stream structure, which contains zlibs own zstream struct. This is not read only, the contents of this zstream will be written to, which means it is not shareable across queue pairs or devices.

>+		.window_size = {
>+			.min = 8,
>+			.max = 15,
>+			.increment = 2
>+		},
>+	},
>+
>+	RTE_COMP_END_OF_CAPABILITIES_LIST()
>+
>+};
> +/** Configure device */
> +static int
> +zlib_pmd_config(struct rte_compressdev *dev,
> +		struct rte_compressdev_config *config) {
> +	struct rte_mempool *mp;
> +
> +	struct zlib_private *internals = dev->data->dev_private;
> +	snprintf(internals->mp_name, RTE_MEMPOOL_NAMESIZE,
> +			"stream_mp_%u", dev->data->dev_id);
> +	mp = rte_mempool_create(internals->mp_name,
> +			config->max_nb_priv_xforms + config-
> >max_nb_streams,
> +			sizeof(struct zlib_priv_xform),
> +			0, 0, NULL, NULL, NULL,
> +			NULL, config->socket_id,
> +			0);
[Lee] Could you add a mempool_lookup here to ensure its not already created please.

> +	if (mp == NULL) {
> +		ZLIB_LOG_ERR("Cannot create private xform pool on socket
> %d\n",
> +				config->socket_id);
> +		return -ENOMEM;
> +	}
> +	return 0;
> +}
> +
> +/** Start device */
> +static int
> +zlib_pmd_start(__rte_unused struct rte_compressdev *dev) {
> +	return 0;
> +}
> +
> +/** Stop device */
> +static void
> +zlib_pmd_stop(struct rte_compressdev *dev) {
> +	struct zlib_private *internals = dev->data->dev_private;
> +	struct rte_mempool *mp = rte_mempool_lookup(internals-
> >mp_name);
> +	rte_mempool_free(mp);
> +}
> +
[Lee] I believe it would be better to have the freeing functionality in the pmd_close function, as a user may want to stop a device, without freeing its memory, especially since the start function does nothing here. i.e. if the user stops device then starts again, memory needed has been free'd but not realloc'ed. Hope this makes sense.

> +/** Close device */
> +static int
> +zlib_pmd_close(__rte_unused struct rte_compressdev *dev) {
> +	return 0;
> +}

<...>
> diff --git a/drivers/compress/zlib/zlib_pmd_private.h
> b/drivers/compress/zlib/zlib_pmd_private.h
> new file mode 100644
> index 0000000..d29dc59
> --- /dev/null
> +++ b/drivers/compress/zlib/zlib_pmd_private.h
> @@ -0,0 +1,77 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2017-2018 Cavium Networks  */
> +
> +#ifndef _RTE_ZLIB_PMD_PRIVATE_H_
> +#define _RTE_ZLIB_PMD_PRIVATE_H_
> +
> +#include <zlib.h>
> +#include <rte_comp.h>
> +#include <rte_compressdev.h>
> +#include <rte_compressdev_pmd.h>
> +
> +#define COMPRESSDEV_NAME_ZLIB_PMD	compress_zlib
> +/**< ZLIB PMD device name */
> +
> +#define ZLIB_PMD_MAX_NB_QUEUE_PAIRS	1
> +/**< ZLIB PMD specified queue pairs */
[Lee] Doesn't look like this macro is being used anywhere, may be better to remove this altogether as there is no limit in software for queue pairs.

> +
> +#define DEF_MEM_LEVEL			8
> +
> +int zlib_logtype_driver;
> +#define ZLIB_LOG(level, fmt, args...) \
> +	rte_log(RTE_LOG_ ## level, zlib_logtype_driver, "%s(): "fmt "\n", \
> +			__func__, ##args)
> +
> +#define ZLIB_LOG_INFO(fmt, args...) \
> +	ZLIB_LOG(INFO, fmt, ## args)
> +#define ZLIB_LOG_ERR(fmt, args...) \
> +	ZLIB_LOG(ERR, fmt, ## args)
> +#define ZLIB_LOG_WARN(fmt, args...) \
> +	ZLIB_LOG(WARNING, fmt, ## args)
[Lee] See previous comments re/ static logging.

> +
> +struct zlib_private {
> +	uint32_t max_nb_queue_pairs;
> +	char mp_name[RTE_MEMPOOL_NAMESIZE];
> +};
> +
Thanks,
Lee.
  
Verma, Shally June 22, 2018, 1:21 p.m. UTC | #2
Hi Lee


>-----Original Message-----
>From: Daly, Lee [mailto:lee.daly@intel.com]
>Sent: 15 June 2018 16:39
>To: Verma, Shally <Shally.Verma@cavium.com>
>Cc: Trahe, Fiona <fiona.trahe@intel.com>; dev@dpdk.org; pathreay@caviumnetworks.com; Sahu, Sunila <Sunila.Sahu@cavium.com>;
>Gupta, Ashish <Ashish.Gupta@cavium.com>; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
>Subject: RE: [dpdk-dev] [PATCH v1 2/6] compress/zlib: add device setup PMD ops
>
>External Email
>
>Hi Shally,
>Comments inline.
>
>
>> -----Original Message-----
>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Shally Verma
>> Sent: Tuesday, May 15, 2018 11:32 AM
>> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
>> Cc: Trahe, Fiona <fiona.trahe@intel.com>; dev@dpdk.org;
>> pathreay@caviumnetworks.com; Sunila Sahu
>> <sunila.sahu@caviumnetworks.com>; Ashish Gupta
>> <ashish.gupta@caviumnetworks.com>
>> Subject: [dpdk-dev] [PATCH v1 2/6] compress/zlib: add device setup PMD
>> ops
>>
>>diff --git a/drivers/compress/zlib/zlib_pmd_ops.c b/drivers/compress/zlib/zlib_pmd_ops.c
>>new file mode 100644
>>index 0000000..0bd42f3
>>--- /dev/null
>>+++ b/drivers/compress/zlib/zlib_pmd_ops.c
>>@@ -0,0 +1,238 @@
>>+/* SPDX-License-Identifier: BSD-3-Clause
>>+ * Copyright(c) 2018 Cavium Networks
>>+ */
>>+
>>+#include <string.h>
>>+
>>+#include <rte_common.h>
>>+#include <rte_malloc.h>
>>+#include <rte_compressdev_pmd.h>
>>+
>>+#include "zlib_pmd_private.h"
>>+
>>+static const struct rte_compressdev_capabilities zlib_pmd_capabilities[] = {
>>+      {   /* Deflate */
>>+              .algo = RTE_COMP_ALGO_DEFLATE,
>>+              .comp_feature_flags = RTE_COMP_FF_SHAREABLE_PRIV_XFORM,
>[Lee] The priv_xform structure in this case is not shareable, as it contains your zlib_stream structure, which contains zlibs own zstream
>struct. This is not read only, the contents of this zstream will be written to, which means it is not shareable across queue pairs or
>devices.
>
[Shally] Per my understanding, SHAREABLE_PRIV_XFORM here means xform is shareable by all ops in one single enqueue_burst() but not across devices or qps by multiple threads in parallel. Does your implementation support such usage of shareable priv_xforms?

Thanks for review.
Shally

>>+              .window_size = {
>>+                      .min = 8,
>>+                      .max = 15,
>>+                      .increment = 2
>>+              },
>>+      },
>>+
>>+      RTE_COMP_END_OF_CAPABILITIES_LIST()
>>+
>>+};
>> +/** Configure device */
>> +static int
>> +zlib_pmd_config(struct rte_compressdev *dev,
>> +             struct rte_compressdev_config *config) {
>> +     struct rte_mempool *mp;
>> +
>> +     struct zlib_private *internals = dev->data->dev_private;
>> +     snprintf(internals->mp_name, RTE_MEMPOOL_NAMESIZE,
>> +                     "stream_mp_%u", dev->data->dev_id);
>> +     mp = rte_mempool_create(internals->mp_name,
>> +                     config->max_nb_priv_xforms + config-
>> >max_nb_streams,
>> +                     sizeof(struct zlib_priv_xform),
>> +                     0, 0, NULL, NULL, NULL,
>> +                     NULL, config->socket_id,
>> +                     0);
>[Lee] Could you add a mempool_lookup here to ensure its not already created please.
>
>> +     if (mp == NULL) {
>> +             ZLIB_LOG_ERR("Cannot create private xform pool on socket
>> %d\n",
>> +                             config->socket_id);
>> +             return -ENOMEM;
>> +     }
>> +     return 0;
>> +}
>> +
>> +/** Start device */
>> +static int
>> +zlib_pmd_start(__rte_unused struct rte_compressdev *dev) {
>> +     return 0;
>> +}
>> +
>> +/** Stop device */
>> +static void
>> +zlib_pmd_stop(struct rte_compressdev *dev) {
>> +     struct zlib_private *internals = dev->data->dev_private;
>> +     struct rte_mempool *mp = rte_mempool_lookup(internals-
>> >mp_name);
>> +     rte_mempool_free(mp);
>> +}
>> +
>[Lee] I believe it would be better to have the freeing functionality in the pmd_close function, as a user may want to stop a device,
>without freeing its memory, especially since the start function does nothing here. i.e. if the user stops device then starts again,
>memory needed has been free'd but not realloc'ed. Hope this makes sense.
>
>> +/** Close device */
>> +static int
>> +zlib_pmd_close(__rte_unused struct rte_compressdev *dev) {
>> +     return 0;
>> +}
>
><...>
>> diff --git a/drivers/compress/zlib/zlib_pmd_private.h
>> b/drivers/compress/zlib/zlib_pmd_private.h
>> new file mode 100644
>> index 0000000..d29dc59
>> --- /dev/null
>> +++ b/drivers/compress/zlib/zlib_pmd_private.h
>> @@ -0,0 +1,77 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2017-2018 Cavium Networks  */
>> +
>> +#ifndef _RTE_ZLIB_PMD_PRIVATE_H_
>> +#define _RTE_ZLIB_PMD_PRIVATE_H_
>> +
>> +#include <zlib.h>
>> +#include <rte_comp.h>
>> +#include <rte_compressdev.h>
>> +#include <rte_compressdev_pmd.h>
>> +
>> +#define COMPRESSDEV_NAME_ZLIB_PMD    compress_zlib
>> +/**< ZLIB PMD device name */
>> +
>> +#define ZLIB_PMD_MAX_NB_QUEUE_PAIRS  1
>> +/**< ZLIB PMD specified queue pairs */
>[Lee] Doesn't look like this macro is being used anywhere, may be better to remove this altogether as there is no limit in software for
>queue pairs.
>
>> +
>> +#define DEF_MEM_LEVEL                        8
>> +
>> +int zlib_logtype_driver;
>> +#define ZLIB_LOG(level, fmt, args...) \
>> +     rte_log(RTE_LOG_ ## level, zlib_logtype_driver, "%s(): "fmt "\n", \
>> +                     __func__, ##args)
>> +
>> +#define ZLIB_LOG_INFO(fmt, args...) \
>> +     ZLIB_LOG(INFO, fmt, ## args)
>> +#define ZLIB_LOG_ERR(fmt, args...) \
>> +     ZLIB_LOG(ERR, fmt, ## args)
>> +#define ZLIB_LOG_WARN(fmt, args...) \
>> +     ZLIB_LOG(WARNING, fmt, ## args)
>[Lee] See previous comments re/ static logging.
>
>> +
>> +struct zlib_private {
>> +     uint32_t max_nb_queue_pairs;
>> +     char mp_name[RTE_MEMPOOL_NAMESIZE];
>> +};
>> +
>Thanks,
>Lee.
  
Daly, Lee June 25, 2018, 10:05 a.m. UTC | #3
> >
> >> -----Original Message-----
> >> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Shally Verma
> >> Sent: Tuesday, May 15, 2018 11:32 AM
> >> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> >> Cc: Trahe, Fiona <fiona.trahe@intel.com>; dev@dpdk.org;
> >> pathreay@caviumnetworks.com; Sunila Sahu
> >> <sunila.sahu@caviumnetworks.com>; Ashish Gupta
> >> <ashish.gupta@caviumnetworks.com>
> >> Subject: [dpdk-dev] [PATCH v1 2/6] compress/zlib: add device setup
> >> PMD ops
> >>
> >>diff --git a/drivers/compress/zlib/zlib_pmd_ops.c
> >>b/drivers/compress/zlib/zlib_pmd_ops.c
> >>new file mode 100644
> >>index 0000000..0bd42f3
> >>--- /dev/null
> >>+++ b/drivers/compress/zlib/zlib_pmd_ops.c
> >>@@ -0,0 +1,238 @@
> >>+/* SPDX-License-Identifier: BSD-3-Clause
> >>+ * Copyright(c) 2018 Cavium Networks
> >>+ */
> >>+
> >>+#include <string.h>
> >>+
> >>+#include <rte_common.h>
> >>+#include <rte_malloc.h>
> >>+#include <rte_compressdev_pmd.h>
> >>+
> >>+#include "zlib_pmd_private.h"
> >>+
> >>+static const struct rte_compressdev_capabilities zlib_pmd_capabilities[]
> = {
> >>+      {   /* Deflate */
> >>+              .algo = RTE_COMP_ALGO_DEFLATE,
> >>+              .comp_feature_flags =
> RTE_COMP_FF_SHAREABLE_PRIV_XFORM,
> >[Lee] The priv_xform structure in this case is not shareable, as it
> >contains your zlib_stream structure, which contains zlibs own zstream
> >struct. This is not read only, the contents of this zstream will be written to,
> which means it is not shareable across queue pairs or devices.
> >
> [Shally] Per my understanding, SHAREABLE_PRIV_XFORM here means xform
> is shareable by all ops in one single enqueue_burst() but not across devices
> or qps by multiple threads in parallel. Does your implementation support
> such usage of shareable priv_xforms?
> 
> Thanks for review.
> Shally
[Lee]
Hey Shally, I have just clarified this with Fiona and Pablo and the intended use of Shareable priv xforms is to allow a xform to be shared across devices & qps not just all the ops in a burst, yes the ISA-L PMD has a shareable private xform due to all its contents being read only. 

> 
> >>+              .window_size = {
> >>+                      .min = 8,
> >>+                      .max = 15,
> >>+                      .increment = 2
> >>+              },
> >>+      },
> >>+
> >>+      RTE_COMP_END_OF_CAPABILITIES_LIST()
> >>+
  
Verma, Shally June 25, 2018, 10:07 a.m. UTC | #4
HI Lee

>-----Original Message-----
>From: Daly, Lee [mailto:lee.daly@intel.com]
>Sent: 25 June 2018 15:35
>To: Verma, Shally <Shally.Verma@cavium.com>
>Cc: Trahe, Fiona <fiona.trahe@intel.com>; dev@dpdk.org; pathreay@caviumnetworks.com; Sahu, Sunila <Sunila.Sahu@cavium.com>;
>Gupta, Ashish <Ashish.Gupta@cavium.com>; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
>Subject: RE: [dpdk-dev] [PATCH v1 2/6] compress/zlib: add device setup PMD ops
>
//snip

>> >>+static const struct rte_compressdev_capabilities zlib_pmd_capabilities[]
>> = {
>> >>+      {   /* Deflate */
>> >>+              .algo = RTE_COMP_ALGO_DEFLATE,
>> >>+              .comp_feature_flags =
>> RTE_COMP_FF_SHAREABLE_PRIV_XFORM,
>> >[Lee] The priv_xform structure in this case is not shareable, as it
>> >contains your zlib_stream structure, which contains zlibs own zstream
>> >struct. This is not read only, the contents of this zstream will be written to,
>> which means it is not shareable across queue pairs or devices.
>> >
>> [Shally] Per my understanding, SHAREABLE_PRIV_XFORM here means xform
>> is shareable by all ops in one single enqueue_burst() but not across devices
>> or qps by multiple threads in parallel. Does your implementation support
>> such usage of shareable priv_xforms?
>>
>> Thanks for review.
>> Shally
>[Lee]
>Hey Shally, I have just clarified this with Fiona and Pablo and the intended use of Shareable priv xforms is to allow a xform to be shared
>across devices & qps not just all the ops in a burst, yes the ISA-L PMD has a shareable private xform due to all its contents being read
>only.
>
[Shally] Ok. Got that. Will change it accordingly.

Thanks
Shally

>>
>> >>+              .window_size = {
>> >>+                      .min = 8,
>> >>+                      .max = 15,
>> >>+                      .increment = 2
>> >>+              },
>> >>+      },
>> >>+
>> >>+      RTE_COMP_END_OF_CAPABILITIES_LIST()
>> >>+
  

Patch

diff --git a/drivers/compress/zlib/zlib_pmd_ops.c b/drivers/compress/zlib/zlib_pmd_ops.c
new file mode 100644
index 0000000..0bd42f3
--- /dev/null
+++ b/drivers/compress/zlib/zlib_pmd_ops.c
@@ -0,0 +1,238 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_compressdev_pmd.h>
+
+#include "zlib_pmd_private.h"
+
+static const struct rte_compressdev_capabilities zlib_pmd_capabilities[] = {
+	{   /* Deflate */
+		.algo = RTE_COMP_ALGO_DEFLATE,
+		.comp_feature_flags = RTE_COMP_FF_SHAREABLE_PRIV_XFORM,
+		.window_size = {
+			.min = 8,
+			.max = 15,
+			.increment = 2
+		},
+	},
+
+	RTE_COMP_END_OF_CAPABILITIES_LIST()
+
+};
+
+/** Configure device */
+static int
+zlib_pmd_config(struct rte_compressdev *dev,
+		struct rte_compressdev_config *config)
+{
+	struct rte_mempool *mp;
+
+	struct zlib_private *internals = dev->data->dev_private;
+	snprintf(internals->mp_name, RTE_MEMPOOL_NAMESIZE,
+			"stream_mp_%u", dev->data->dev_id);
+	mp = rte_mempool_create(internals->mp_name,
+			config->max_nb_priv_xforms + config->max_nb_streams,
+			sizeof(struct zlib_priv_xform),
+			0, 0, NULL, NULL, NULL,
+			NULL, config->socket_id,
+			0);
+	if (mp == NULL) {
+		ZLIB_LOG_ERR("Cannot create private xform pool on socket %d\n",
+				config->socket_id);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/** Start device */
+static int
+zlib_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+	return 0;
+}
+
+/** Stop device */
+static void
+zlib_pmd_stop(struct rte_compressdev *dev)
+{
+	struct zlib_private *internals = dev->data->dev_private;
+	struct rte_mempool *mp = rte_mempool_lookup(internals->mp_name);
+	rte_mempool_free(mp);
+}
+
+/** Close device */
+static int
+zlib_pmd_close(__rte_unused struct rte_compressdev *dev)
+{
+	return 0;
+}
+
+/** Get device statistics */
+static void
+zlib_pmd_stats_get(struct rte_compressdev *dev,
+		struct rte_compressdev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->qp_stats.enqueued_count;
+		stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+	}
+}
+
+/** Reset device statistics */
+static void
+zlib_pmd_stats_reset(struct rte_compressdev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	}
+}
+
+/** Get device info */
+static void
+zlib_pmd_info_get(struct rte_compressdev *dev,
+		struct rte_compressdev_info *dev_info)
+{
+	struct zlib_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->driver_name = dev->device->name;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = zlib_pmd_capabilities;
+		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+	}
+}
+
+/** Release queue pair */
+static int
+zlib_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+	struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+	struct rte_ring *r = NULL;
+
+	if (qp != NULL) {
+		r = rte_ring_lookup(qp->name);
+		if (r)
+			rte_ring_free(r);
+		rte_free(qp);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+zlib_pmd_qp_set_unique_name(struct rte_compressdev *dev,
+		struct zlib_qp *qp)
+{
+	unsigned int n = snprintf(qp->name, sizeof(qp->name),
+				"zlib_pmd_%u_qp_%u",
+				dev->data->dev_id, qp->id);
+
+	if (n >= sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+zlib_pmd_qp_create_processed_pkts_ring(struct zlib_qp *qp,
+		unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r;
+
+	r = rte_ring_lookup(qp->name);
+	if (r) {
+		if (rte_ring_get_size(r) >= ring_size) {
+			ZLIB_LOG_INFO("Reusing existing ring %s for processed"
+					" packets", qp->name);
+			return r;
+		}
+
+		ZLIB_LOG_ERR("Unable to reuse existing ring %s for processed"
+				" packets", qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+						RING_F_EXACT_SZ);
+}
+
+/** Setup a queue pair */
+static int
+zlib_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+		uint32_t max_inflight_ops, int socket_id)
+{
+	struct zlib_qp *qp = NULL;
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		zlib_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("ZLIB PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL)
+		return (-ENOMEM);
+
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	if (zlib_pmd_qp_set_unique_name(dev, qp))
+		goto qp_setup_cleanup;
+
+	qp->processed_pkts = zlib_pmd_qp_create_processed_pkts_ring(qp,
+			max_inflight_ops, socket_id);
+	if (qp->processed_pkts == NULL)
+		goto qp_setup_cleanup;
+
+	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	return 0;
+
+qp_setup_cleanup:
+	if (qp) {
+		rte_free(qp);
+		qp = NULL;
+	}
+	return -1;
+}
+
+struct rte_compressdev_ops zlib_pmd_ops = {
+		.dev_configure		= zlib_pmd_config,
+		.dev_start		= zlib_pmd_start,
+		.dev_stop		= zlib_pmd_stop,
+		.dev_close		= zlib_pmd_close,
+
+		.stats_get		= zlib_pmd_stats_get,
+		.stats_reset		= zlib_pmd_stats_reset,
+
+		.dev_infos_get		= zlib_pmd_info_get,
+
+		/* TODO:Check if no q-pair count needed*/
+		.queue_pair_setup	= zlib_pmd_qp_setup,
+		.queue_pair_release	= zlib_pmd_qp_release,
+
+		.private_xform_create	= NULL,
+		.private_xform_free	= NULL,
+
+		.stream_create	= NULL,
+		.stream_free	= NULL
+};
+
+struct rte_compressdev_ops *rte_zlib_pmd_ops = &zlib_pmd_ops;
diff --git a/drivers/compress/zlib/zlib_pmd_private.h b/drivers/compress/zlib/zlib_pmd_private.h
new file mode 100644
index 0000000..d29dc59
--- /dev/null
+++ b/drivers/compress/zlib/zlib_pmd_private.h
@@ -0,0 +1,77 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Cavium Networks
+ */
+
+#ifndef _RTE_ZLIB_PMD_PRIVATE_H_
+#define _RTE_ZLIB_PMD_PRIVATE_H_
+
+#include <zlib.h>
+#include <rte_comp.h>
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#define COMPRESSDEV_NAME_ZLIB_PMD	compress_zlib
+/**< ZLIB PMD device name */
+
+#define ZLIB_PMD_MAX_NB_QUEUE_PAIRS	1
+/**< ZLIB PMD specified queue pairs */
+
+#define DEF_MEM_LEVEL			8
+
+int zlib_logtype_driver;
+#define ZLIB_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, zlib_logtype_driver, "%s(): "fmt "\n", \
+			__func__, ##args)
+
+#define ZLIB_LOG_INFO(fmt, args...) \
+	ZLIB_LOG(INFO, fmt, ## args)
+#define ZLIB_LOG_ERR(fmt, args...) \
+	ZLIB_LOG(ERR, fmt, ## args)
+#define ZLIB_LOG_WARN(fmt, args...) \
+	ZLIB_LOG(WARNING, fmt, ## args)
+
+struct zlib_private {
+	uint32_t max_nb_queue_pairs;
+	char mp_name[RTE_MEMPOOL_NAMESIZE];
+};
+
+struct zlib_qp {
+	struct rte_ring *processed_pkts;
+	/**< Ring for placing process packets */
+	struct rte_compressdev_stats qp_stats;
+	/**< Queue pair statistics */
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+	/**< Unique Queue Pair Name */
+} __rte_cache_aligned;
+
+/* Algorithm handler function prototype */
+typedef void (*comp_func_t)(struct rte_comp_op *op, z_stream *strm);
+
+typedef int (*comp_free_t)(z_stream *strm);
+
+/** ZLIB Stream structure */
+struct zlib_stream {
+	z_stream strm;
+	/**< zlib stream structure */
+	comp_func_t comp;
+	/**< Operation (compression/decompression) */
+	comp_free_t free;
+	/**< Free Operation (compression/decompression) */
+} __rte_cache_aligned;
+
+/** ZLIB private xform structure */
+struct zlib_priv_xform {
+	struct zlib_stream stream;
+} __rte_cache_aligned;
+
+/** Set ZLIB compression private-xform/Stream parameters */
+extern int
+zlib_set_stream_parameters(const struct rte_comp_xform *xform,
+	struct zlib_stream *stream);
+
+/** Device specific operations function pointer structure */
+extern struct rte_compressdev_ops *rte_zlib_pmd_ops;
+
+#endif /* _RTE_ZLIB_PMD_PRIVATE_H_ */