[dpdk-dev,RFC,v3,1/1] lib: add compressdev API
Checks
Commit Message
Signed-off-by: Trahe, Fiona <fiona.trahe@intel.com>
---
config/common_base | 7 +
lib/Makefile | 3 +
lib/librte_compressdev/Makefile | 54 +
lib/librte_compressdev/rte_comp.h | 608 ++++++++++
lib/librte_compressdev/rte_compressdev.c | 1167 ++++++++++++++++++++
lib/librte_compressdev/rte_compressdev.h | 892 +++++++++++++++
lib/librte_compressdev/rte_compressdev_pmd.c | 194 ++++
lib/librte_compressdev/rte_compressdev_pmd.h | 533 +++++++++
lib/librte_compressdev/rte_compressdev_version.map | 50 +
lib/librte_eal/common/include/rte_log.h | 1 +
mk/rte.app.mk | 1 +
11 files changed, 3510 insertions(+), 0 deletions(-)
create mode 100644 lib/librte_compressdev/Makefile
create mode 100644 lib/librte_compressdev/rte_comp.h
create mode 100644 lib/librte_compressdev/rte_compressdev.c
create mode 100644 lib/librte_compressdev/rte_compressdev.h
create mode 100644 lib/librte_compressdev/rte_compressdev_pmd.c
create mode 100644 lib/librte_compressdev/rte_compressdev_pmd.h
create mode 100644 lib/librte_compressdev/rte_compressdev_version.map
Comments
On 12/15/2017 11:19 PM, Trahe, Fiona wrote:
.. <snip>
> +
> +/** Compression Algorithms */
> +enum rte_comp_algorithm {
> + RTE_COMP_NULL = 0,
> + /**< No compression.
> + * Pass-through, data is copied unchanged from source buffer to
> + * destination buffer.
> + */
> + RTE_COMP_DEFLATE,
> + /**< DEFLATE compression algorithm
> + * https://emea01.safelinks.protection.outlook.com/?url=https%3A%2F%2Ftools.ietf.org%2Fhtml%2Frfc1951&data=02%7C01%7Cahmed.mansour%40nxp.com%7Cf3edbd70b38b49eb1f0308d54634e444%7C686ea1d3bc2b4c6fa92cd99c5c301635%7C0%7C0%7C636492115333128845&sdata=B3G0aIncVAK17dXlnSivXi0e56h2D7pEQZ9gK%2Fh3qZQ%3D&reserved=0
> + */
> + RTE_COMP_LZS,
> + /**< LZS compression algorithm
> + * https://emea01.safelinks.protection.outlook.com/?url=https%3A%2F%2Ftools.ietf.org%2Fhtml%2Frfc2395&data=02%7C01%7Cahmed.mansour%40nxp.com%7Cf3edbd70b38b49eb1f0308d54634e444%7C686ea1d3bc2b4c6fa92cd99c5c301635%7C0%7C0%7C636492115333128845&sdata=aNRFIfkelXlCUUgpp%2BzCYaTu28tp6fF0m6k7F13w1Ps%3D&reserved=0
> + */
> + RTE_COMP_ALGO_LIST_END
> +};
> +
> +/**< Compression Level.
> + * The number is interpreted by each PMD differently. However, lower numbers
> + * give fastest compression, at the expense of compression ratio while
> + * higher numbers may give better compression ratios but are likely slower.
> + */
> +#define RTE_COMP_LEVEL_PMD_DEFAULT (-1)
> +/** Use PMD Default */
> +#define RTE_COMP_LEVEL_NONE (0)
> +/** Output uncompressed blocks if supported by the specified algorithm */
> +#define RTE_COMP_LEVEL_MIN (1)
> +/** Use minimum compression level supported by the PMD */
> +#define RTE_COMP_LEVEL_MAX (9)
> +/** Use maximum compression level supported by the PMD */
> +
> +/** Compression checksum types */
> +enum rte_comp_checksum_type {
> + RTE_COMP_NONE,
> + /**< No checksum generated */
> + RTE_COMP_CRC32,
> + /**< Generates a CRC32 checksum, as used by gzip */
> + RTE_COMP_ADLER32,
> + /**< Generates an Adler-32 checksum, as used by zlib */
> + RTE_COMP_CRC32_ADLER32,
> + /**< Generates both Adler-32 and CRC32 checksums, concatenated.
> + * CRC32 is in the lower 32bits, Adler-32 in the upper 32 bits.
> + */
What would be a real life use case for returning both CRC32 and ADLER32?
Packaging the data once as Gzip and once as zlib?
> +};
> +
> +/*
> + * enum rte_comp_hash_algo {
> + * RTE_COMP_HASH_NONE,
> + * RTE_COMP_HASH_SHA1,
> + * RTE_COMP_HASH_SHA256,
> + * };
> + * Need further input from cavium on this
> + * xform will need a flag with above enum value
> + * op will need to provide a virt/phys ptr to a data buffer of appropriate size.
> + * And via capability PMD can say whether supported or not.
> + */
> +
> +/** Compression Huffman Type - used by DEFLATE algorithm */
> +enum rte_comp_huffman {
> + RTE_COMP_DEFAULT,
> + /**< PMD may choose which Huffman codes to use */
> + RTE_COMP_FIXED,
> + /**< Use Fixed Huffman codes */
> + RTE_COMP_DYNAMIC,
> + /**< Use Dynamic Huffman codes */
> +};
> +
> +
> +enum rte_comp_flush_flag {
> + RTE_COMP_FLUSH_NONE,
> + /**< Data is not flushed. Output may remain in the compressor and be
> + * processed during a following op. It may not be possible to decompress
> + * output until a later op with some other flush flag has been sent.
> + */
> + RTE_COMP_FLUSH_SYNC,
> + /**< All data should be flushed to output buffer. Output data can be
> + * decompressed. However state and history is not cleared, so future
> + * ops may use history from this op */
> + RTE_COMP_FLUSH_FULL,
> + /**< All data should be flushed to output buffer. Output data can be
> + * decompressed. State and history data is cleared, so future
> + * ops will be independent of ops processed before this.
> + */
> + RTE_COMP_FLUSH_FINAL
> + /**< Same as RTE_COMP_FLUSH_FULL but also bfinal bit is set in last block
> + */
> + /* TODO:
> + * describe flag meanings for decompression.
> + * describe behavous in OUT_OF_SPACE case.
> + * At least the last flag is specific to deflate algo. Should this be
> + * called rte_comp_deflate_flush_flag? And should there be
> + * comp_op_deflate_params in the op? */
What about Z_BLOCK and Z_TREES? Those are needed for sfwr zlib.net
replacements.
> +};
> +
> +/** Compression transform types */
> +enum rte_comp_xform_type {
> + RTE_COMP_COMPRESS,
> + /**< Compression service - compress */
> + RTE_COMP_DECOMPRESS,
> + /**< Compression service - decompress */
> +};
> +
... <snip>
> +struct rte_comp_session;
> +/**
> + * Compression Operation.
> + *
> + * This structure contains data relating to performing a compression
> + * operation on the referenced mbuf data buffers.
> + *
> + * All compression operations are Out-of-place (OOP) operations,
> + * as the size of the output data is different to the size of the input data.
> + *
> + * Comp operations are enqueued and dequeued in comp PMDs using the
> + * rte_compressdev_enqueue_burst() / rte_compressdev_dequeue_burst() APIs
> + */
> +struct rte_comp_op {
> +
> + enum rte_comp_op_type op_type;
> + void * stream_private;
> + /* location where PMD maintains stream state
> + * only required if op_type is STATEFUL, else should be NULL
> + */
> + struct rte_comp_session *session;
> + /**< Handle for the initialised session context */
> + struct rte_mempool *mempool;
> + /**< mempool from which operation is allocated */
> + phys_addr_t phys_addr;
> + /**< physical address of this operation */
iova_addr?
> + struct rte_mbuf *m_src;
> + /**< source mbuf
> + * The total size of the input buffer(s) can be retrieved using
> + * rte_pktmbuf_data_len(m_src)
> + */
> + struct rte_mbuf *m_dst;
> + /**< destination mbuf
> + * The total size of the output buffer(s) can be retrieved using
> + * rte_pktmbuf_data_len(m_dst)
> + */
> +
> + struct {
> + uint32_t offset;
> + /**< Starting point for compression or decompression,
> + * specified as number of bytes from start of packet in
> + * source buffer.
> + * Starting point for checksum generation in compress direction.
> + */
Why should offset have two different meanings for compression and
decompression. It seems that the use case of offset on input is
applicable to both use modes
<snip>
...
Hi Ahmed,
> -----Original Message-----
> From: Ahmed Mansour [mailto:ahmed.mansour@nxp.com]
> Sent: Monday, December 18, 2017 9:44 PM
> To: dev@dpdk.org; Shally.Verma@cavium.com
> Cc: Mahipal.Challa@cavium.com; NarayanaPrasad.Athreya@cavium.com; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Trahe, Fiona <fiona.trahe@intel.com>; Roy Pledge
> <roy.pledge@nxp.com>; Youri Querry <youri.querry_1@nxp.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>
> Subject: Re: [RFC v3 1/1] lib: add compressdev API
>
> On 12/15/2017 11:19 PM, Trahe, Fiona wrote:
> .. <snip>
>
> > +
> > +/** Compression Algorithms */
> > +enum rte_comp_algorithm {
> > + RTE_COMP_NULL = 0,
> > + /**< No compression.
> > + * Pass-through, data is copied unchanged from source buffer to
> > + * destination buffer.
> > + */
> > + RTE_COMP_DEFLATE,
> > + /**< DEFLATE compression algorithm
> > + *
> https://emea01.safelinks.protection.outlook.com/?url=https%3A%2F%2Ftools.ietf.org%2Fhtml%2Frfc195
> 1&data=02%7C01%7Cahmed.mansour%40nxp.com%7Cf3edbd70b38b49eb1f0308d54634e444%7C686ea
> 1d3bc2b4c6fa92cd99c5c301635%7C0%7C0%7C636492115333128845&sdata=B3G0aIncVAK17dXlnSivXi0
> e56h2D7pEQZ9gK%2Fh3qZQ%3D&reserved=0
> > + */
> > + RTE_COMP_LZS,
> > + /**< LZS compression algorithm
> > + *
> https://emea01.safelinks.protection.outlook.com/?url=https%3A%2F%2Ftools.ietf.org%2Fhtml%2Frfc239
> 5&data=02%7C01%7Cahmed.mansour%40nxp.com%7Cf3edbd70b38b49eb1f0308d54634e444%7C686ea
> 1d3bc2b4c6fa92cd99c5c301635%7C0%7C0%7C636492115333128845&sdata=aNRFIfkelXlCUUgpp%2BzC
> YaTu28tp6fF0m6k7F13w1Ps%3D&reserved=0
> > + */
> > + RTE_COMP_ALGO_LIST_END
> > +};
> > +
> > +/**< Compression Level.
> > + * The number is interpreted by each PMD differently. However, lower numbers
> > + * give fastest compression, at the expense of compression ratio while
> > + * higher numbers may give better compression ratios but are likely slower.
> > + */
> > +#define RTE_COMP_LEVEL_PMD_DEFAULT (-1)
> > +/** Use PMD Default */
> > +#define RTE_COMP_LEVEL_NONE (0)
> > +/** Output uncompressed blocks if supported by the specified algorithm */
> > +#define RTE_COMP_LEVEL_MIN (1)
> > +/** Use minimum compression level supported by the PMD */
> > +#define RTE_COMP_LEVEL_MAX (9)
> > +/** Use maximum compression level supported by the PMD */
> > +
> > +/** Compression checksum types */
> > +enum rte_comp_checksum_type {
> > + RTE_COMP_NONE,
> > + /**< No checksum generated */
> > + RTE_COMP_CRC32,
> > + /**< Generates a CRC32 checksum, as used by gzip */
> > + RTE_COMP_ADLER32,
> > + /**< Generates an Adler-32 checksum, as used by zlib */
> > + RTE_COMP_CRC32_ADLER32,
> > + /**< Generates both Adler-32 and CRC32 checksums, concatenated.
> > + * CRC32 is in the lower 32bits, Adler-32 in the upper 32 bits.
> > + */
>
> What would be a real life use case for returning both CRC32 and ADLER32?
> Packaging the data once as Gzip and once as zlib?
[Fiona] We've had requests for this from customers.
>
> > +};
> > +
> > +/*
> > + * enum rte_comp_hash_algo {
> > + * RTE_COMP_HASH_NONE,
> > + * RTE_COMP_HASH_SHA1,
> > + * RTE_COMP_HASH_SHA256,
> > + * };
> > + * Need further input from cavium on this
> > + * xform will need a flag with above enum value
> > + * op will need to provide a virt/phys ptr to a data buffer of appropriate size.
> > + * And via capability PMD can say whether supported or not.
> > + */
> > +
> > +/** Compression Huffman Type - used by DEFLATE algorithm */
> > +enum rte_comp_huffman {
> > + RTE_COMP_DEFAULT,
> > + /**< PMD may choose which Huffman codes to use */
> > + RTE_COMP_FIXED,
> > + /**< Use Fixed Huffman codes */
> > + RTE_COMP_DYNAMIC,
> > + /**< Use Dynamic Huffman codes */
> > +};
> > +
> > +
> > +enum rte_comp_flush_flag {
> > + RTE_COMP_FLUSH_NONE,
> > + /**< Data is not flushed. Output may remain in the compressor and be
> > + * processed during a following op. It may not be possible to decompress
> > + * output until a later op with some other flush flag has been sent.
> > + */
> > + RTE_COMP_FLUSH_SYNC,
> > + /**< All data should be flushed to output buffer. Output data can be
> > + * decompressed. However state and history is not cleared, so future
> > + * ops may use history from this op */
> > + RTE_COMP_FLUSH_FULL,
> > + /**< All data should be flushed to output buffer. Output data can be
> > + * decompressed. State and history data is cleared, so future
> > + * ops will be independent of ops processed before this.
> > + */
> > + RTE_COMP_FLUSH_FINAL
> > + /**< Same as RTE_COMP_FLUSH_FULL but also bfinal bit is set in last block
> > + */
> > + /* TODO:
> > + * describe flag meanings for decompression.
> > + * describe behavous in OUT_OF_SPACE case.
> > + * At least the last flag is specific to deflate algo. Should this be
> > + * called rte_comp_deflate_flush_flag? And should there be
> > + * comp_op_deflate_params in the op? */
>
> What about Z_BLOCK and Z_TREES? Those are needed for sfwr zlib.net
> replacements.
[Fiona] We haven't seen a need for those. I would suggest proposing a patch later once the initial API is stabilised.
> > +};
> > +
> > +/** Compression transform types */
> > +enum rte_comp_xform_type {
> > + RTE_COMP_COMPRESS,
> > + /**< Compression service - compress */
> > + RTE_COMP_DECOMPRESS,
> > + /**< Compression service - decompress */
> > +};
> > +
>
> ... <snip>
>
> > +struct rte_comp_session;
> > +/**
> > + * Compression Operation.
> > + *
> > + * This structure contains data relating to performing a compression
> > + * operation on the referenced mbuf data buffers.
> > + *
> > + * All compression operations are Out-of-place (OOP) operations,
> > + * as the size of the output data is different to the size of the input data.
> > + *
> > + * Comp operations are enqueued and dequeued in comp PMDs using the
> > + * rte_compressdev_enqueue_burst() / rte_compressdev_dequeue_burst() APIs
> > + */
> > +struct rte_comp_op {
> > +
> > + enum rte_comp_op_type op_type;
> > + void * stream_private;
> > + /* location where PMD maintains stream state
> > + * only required if op_type is STATEFUL, else should be NULL
> > + */
> > + struct rte_comp_session *session;
> > + /**< Handle for the initialised session context */
> > + struct rte_mempool *mempool;
> > + /**< mempool from which operation is allocated */
> > + phys_addr_t phys_addr;
> > + /**< physical address of this operation */
>
> iova_addr?
[Fiona] Yes, this should be rte_iova_t. will fix this in the v1.
>
> > + struct rte_mbuf *m_src;
> > + /**< source mbuf
> > + * The total size of the input buffer(s) can be retrieved using
> > + * rte_pktmbuf_data_len(m_src)
> > + */
> > + struct rte_mbuf *m_dst;
> > + /**< destination mbuf
> > + * The total size of the output buffer(s) can be retrieved using
> > + * rte_pktmbuf_data_len(m_dst)
> > + */
> > +
> > + struct {
> > + uint32_t offset;
> > + /**< Starting point for compression or decompression,
> > + * specified as number of bytes from start of packet in
> > + * source buffer.
> > + * Starting point for checksum generation in compress direction.
> > + */
>
> Why should offset have two different meanings for compression and
> decompression. It seems that the use case of offset on input is
> applicable to both use modes
[Fiona] It does have the same meaning for compression/decompression?
Just for checksum it's different. Checksum is usually required on the uncompressed data.
In decompression the input buffer contains compressed data.
> <snip>
> ...
Hi Fiona
While revisiting this, we identified few questions and additions. Please see them inline.
> -----Original Message-----
> From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
> Sent: 15 December 2017 23:19
> To: dev@dpdk.org; Verma, Shally <Shally.Verma@cavium.com>
> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> Prasad <NarayanaPrasad.Athreya@cavium.com>;
> pablo.de.lara.guarch@intel.com; fiona.trahe@intel.com
> Subject: [RFC v3 1/1] lib: add compressdev API
>
> Signed-off-by: Trahe, Fiona <fiona.trahe@intel.com>
> ---
//snip
> +
> +int
> +rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t
> queue_pair_id,
> + uint32_t max_inflight_ops, int socket_id)
[Shally] Is max_inflights_ops different from nb_streams_per_qp in struct rte_compressdev_info?
I assume they both carry same purpose. If yes, then it will be better to use single naming convention to avoid confusion.
Also, is it optional API? Like Is this a valid use case?:
dev_configure() --> dev_start() --> qp_start() --> enqueue/dequeue() --> qp_stop() --> dev_stop() --> dev_close()?
//snip
> +
> +#define RTE_COMPRESSDEV_PMD_NAME_ARG
> ("name")
> +#define RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG
> ("max_nb_queue_pairs")
> +#define RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG ("socket_id")
> +
[Shally] Need to define argument macro for max_nb_session_per_qp and max_nb_streams_per_qp as well
> +
> +static const char * const compressdev_pmd_valid_params[] = {
> + RTE_COMPRESSDEV_PMD_NAME_ARG,
> + RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG,
> + RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG
> +};
[Shally] Likewise, array need to be updated with other mentioned two arguments
> +
> +/**
> + * @internal
> + * Initialisation parameters for comp devices
> + */
> +struct rte_compressdev_pmd_init_params {
> + char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
> + size_t private_data_size;
> + int socket_id;
> + unsigned int max_nb_queue_pairs;
[Shally] And this also need to be updated with max_nb_sessions_per_qp and max_streams_per_qp
//snip
Thanks
Shally
Hi Shally,
> -----Original Message-----
> From: Verma, Shally [mailto:Shally.Verma@cavium.com]
> Sent: Thursday, January 18, 2018 12:54 PM
> To: Trahe, Fiona <fiona.trahe@intel.com>; dev@dpdk.org
> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana Prasad
> <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> Gupta, Ashish <Ashish.Gupta@cavium.com>; Sahu, Sunila <Sunila.Sahu@cavium.com>; Jain, Deepak K
> <deepak.k.jain@intel.com>; Hemant Agrawal <hemant.agrawal@nxp.com>; Roy Pledge
> <roy.pledge@nxp.com>; Youri Querry <youri.querry_1@nxp.com>; Ahmed Mansour
> <ahmed.mansour@nxp.com>
> Subject: RE: [RFC v3 1/1] lib: add compressdev API
>
> Hi Fiona
>
> While revisiting this, we identified few questions and additions. Please see them inline.
>
>
> > -----Original Message-----
> > From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
> > Sent: 15 December 2017 23:19
> > To: dev@dpdk.org; Verma, Shally <Shally.Verma@cavium.com>
> > Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> > Prasad <NarayanaPrasad.Athreya@cavium.com>;
> > pablo.de.lara.guarch@intel.com; fiona.trahe@intel.com
> > Subject: [RFC v3 1/1] lib: add compressdev API
> >
> > Signed-off-by: Trahe, Fiona <fiona.trahe@intel.com>
> > ---
>
> //snip
>
> > +
> > +int
> > +rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t
> > queue_pair_id,
> > + uint32_t max_inflight_ops, int socket_id)
>
> [Shally] Is max_inflights_ops different from nb_streams_per_qp in struct rte_compressdev_info?
> I assume they both carry same purpose. If yes, then it will be better to use single naming convention to
> avoid confusion.
[Fiona] No, I think they have different purposes.
max_inflight_ops should be used to configure the qp with the number of ops the application expects to be able to submit to the qp before it needs to poll for a response. It can be configured differently for each qp. In the QAT case it dictates the depth of the qp created, it may have different implications on other PMDs.
nb_sessions_per_qp and nb_streams_per_qp are limitations the devices reports and are same for all qps on the device. QAT doesn't have those limitations and so would report 0, however I assumed they may be necessary for other devices.
This assumption is based on the patch submitted by NXP to cryptodev in Feb 2017
http://dpdk.org/ml/archives/dev/2017-March/060740.html
I also assume these are not necessarily the max number of sessions in ops on the qp at a given time, but the total number attached, i.e. if the device has this limitation then sessions must be attached to qps, and presumably reserve some resources. Being attached doesn't imply there is an op on the qp at that time using that session. So it's not to relating to the inflight op count, but to the number of sessions attached/detached to the qp.
Including Akhil on the To list, maybe NXP can confirm if these params are needed.
> Also, is it optional API? Like Is this a valid use case?:
> dev_configure() --> dev_start() --> qp_start() --> enqueue/dequeue() --> qp_stop() --> dev_stop() -->
> dev_close()?
[Fiona] I don't think it should be optional as some PMDs need to allocate resources based on the setup data passed in on this API.
> //snip
>
> > +
> > +#define RTE_COMPRESSDEV_PMD_NAME_ARG
> > ("name")
> > +#define RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG
> > ("max_nb_queue_pairs")
> > +#define RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG ("socket_id")
> > +
>
> [Shally] Need to define argument macro for max_nb_session_per_qp and max_nb_streams_per_qp as
> well
[Fiona] ok
> > +
> > +static const char * const compressdev_pmd_valid_params[] = {
> > + RTE_COMPRESSDEV_PMD_NAME_ARG,
> > + RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG,
> > + RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG
> > +};
>
> [Shally] Likewise, array need to be updated with other mentioned two arguments
Fiona] ok
> > +
> > +/**
> > + * @internal
> > + * Initialisation parameters for comp devices
> > + */
> > +struct rte_compressdev_pmd_init_params {
> > + char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
> > + size_t private_data_size;
> > + int socket_id;
> > + unsigned int max_nb_queue_pairs;
>
> [Shally] And this also need to be updated with max_nb_sessions_per_qp and max_streams_per_qp
[Fiona] ok
> //snip
>
> Thanks
> Shally
Hi Fiona
> -----Original Message-----
> From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
> Sent: 19 January 2018 17:30
> To: Verma, Shally <Shally.Verma@cavium.com>; dev@dpdk.org;
> akhil.goyal@nxp.com
> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> Prasad <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Gupta, Ashish
> <Ashish.Gupta@cavium.com>; Sahu, Sunila <Sunila.Sahu@cavium.com>;
> Jain, Deepak K <deepak.k.jain@intel.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>; Roy Pledge <roy.pledge@nxp.com>; Youri
> Querry <youri.querry_1@nxp.com>; Ahmed Mansour
> <ahmed.mansour@nxp.com>; Trahe, Fiona <fiona.trahe@intel.com>
> Subject: RE: [RFC v3 1/1] lib: add compressdev API
>
> Hi Shally,
>
> > -----Original Message-----
> > From: Verma, Shally [mailto:Shally.Verma@cavium.com]
> > Sent: Thursday, January 18, 2018 12:54 PM
> > To: Trahe, Fiona <fiona.trahe@intel.com>; dev@dpdk.org
> > Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> Prasad
> > <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>;
> > Gupta, Ashish <Ashish.Gupta@cavium.com>; Sahu, Sunila
> <Sunila.Sahu@cavium.com>; Jain, Deepak K
> > <deepak.k.jain@intel.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>; Roy Pledge
> > <roy.pledge@nxp.com>; Youri Querry <youri.querry_1@nxp.com>;
> Ahmed Mansour
> > <ahmed.mansour@nxp.com>
> > Subject: RE: [RFC v3 1/1] lib: add compressdev API
> >
> > Hi Fiona
> >
> > While revisiting this, we identified few questions and additions. Please see
> them inline.
> >
> >
> > > -----Original Message-----
> > > From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
> > > Sent: 15 December 2017 23:19
> > > To: dev@dpdk.org; Verma, Shally <Shally.Verma@cavium.com>
> > > Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> > > Prasad <NarayanaPrasad.Athreya@cavium.com>;
> > > pablo.de.lara.guarch@intel.com; fiona.trahe@intel.com
> > > Subject: [RFC v3 1/1] lib: add compressdev API
> > >
> > > Signed-off-by: Trahe, Fiona <fiona.trahe@intel.com>
> > > ---
> >
> > //snip
> >
> > > +
> > > +int
> > > +rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t
> > > queue_pair_id,
> > > + uint32_t max_inflight_ops, int socket_id)
> >
> > [Shally] Is max_inflights_ops different from nb_streams_per_qp in struct
> rte_compressdev_info?
> > I assume they both carry same purpose. If yes, then it will be better to use
> single naming convention to
> > avoid confusion.
> [Fiona] No, I think they have different purposes.
> max_inflight_ops should be used to configure the qp with the number of ops
> the application expects to be able to submit to the qp before it needs to poll
> for a response. It can be configured differently for each qp. In the QAT case it
> dictates the depth of the qp created, it may have different implications on
> other PMDs.
> nb_sessions_per_qp and nb_streams_per_qp are limitations the devices
> reports and are same for all qps on the device. QAT doesn't have those
> limitations and so would report 0, however I assumed they may be necessary
> for other devices.
> This assumption is based on the patch submitted by NXP to cryptodev in Feb
> 2017
> http://dpdk.org/ml/archives/dev/2017-March/060740.html
> I also assume these are not necessarily the max number of sessions in ops on
> the qp at a given time, but the total number attached, i.e. if the device has
> this limitation then sessions must be attached to qps, and presumably
> reserve some resources. Being attached doesn't imply there is an op on the
> qp at that time using that session. So it's not to relating to the inflight op
> count, but to the number of sessions attached/detached to the qp.
> Including Akhil on the To list, maybe NXP can confirm if these params are
> needed.
[Shally] Ok. Then let's wait for NXP to confirm on this requirement as currently spec doesn't have any API to attach queue_pair_to_specific_session_or_stream as cryptodev.
But then how application could know limit on max_inflight_ops supported on a qp? As it can pass any random number during qp_setup().
Do you believe we need to add a capability field in dev_info to indicate limit on max_inflight_ops?
Thanks
Shally
>
>
> > Also, is it optional API? Like Is this a valid use case?:
> > dev_configure() --> dev_start() --> qp_start() --> enqueue/dequeue() -->
> qp_stop() --> dev_stop() -->
> > dev_close()?
> [Fiona] I don't think it should be optional as some PMDs need to allocate
> resources based on the setup data passed in on this API.
>
> > //snip
> >
> > > +
> > > +#define RTE_COMPRESSDEV_PMD_NAME_ARG
> > > ("name")
> > > +#define RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG
> > > ("max_nb_queue_pairs")
> > > +#define RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG
> ("socket_id")
> > > +
> >
> > [Shally] Need to define argument macro for max_nb_session_per_qp and
> max_nb_streams_per_qp as
> > well
> [Fiona] ok
>
> > > +
> > > +static const char * const compressdev_pmd_valid_params[] = {
> > > + RTE_COMPRESSDEV_PMD_NAME_ARG,
> > > + RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG,
> > > + RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG
> > > +};
> >
> > [Shally] Likewise, array need to be updated with other mentioned two
> arguments
> Fiona] ok
>
>
> > > +
> > > +/**
> > > + * @internal
> > > + * Initialisation parameters for comp devices
> > > + */
> > > +struct rte_compressdev_pmd_init_params {
> > > + char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
> > > + size_t private_data_size;
> > > + int socket_id;
> > > + unsigned int max_nb_queue_pairs;
> >
> > [Shally] And this also need to be updated with max_nb_sessions_per_qp
> and max_streams_per_qp
> [Fiona] ok
>
> > //snip
> >
> > Thanks
> > Shally
Hi All,
Please see responses in line.
Thanks,
Ahmed
On 1/23/2018 6:58 AM, Verma, Shally wrote:
> Hi Fiona
>
>> -----Original Message-----
>> From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
>> Sent: 19 January 2018 17:30
>> To: Verma, Shally <Shally.Verma@cavium.com>; dev@dpdk.org;
>> akhil.goyal@nxp.com
>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
>> Prasad <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
>> <pablo.de.lara.guarch@intel.com>; Gupta, Ashish
>> <Ashish.Gupta@cavium.com>; Sahu, Sunila <Sunila.Sahu@cavium.com>;
>> Jain, Deepak K <deepak.k.jain@intel.com>; Hemant Agrawal
>> <hemant.agrawal@nxp.com>; Roy Pledge <roy.pledge@nxp.com>; Youri
>> Querry <youri.querry_1@nxp.com>; Ahmed Mansour
>> <ahmed.mansour@nxp.com>; Trahe, Fiona <fiona.trahe@intel.com>
>> Subject: RE: [RFC v3 1/1] lib: add compressdev API
>>
>> Hi Shally,
>>
>>> -----Original Message-----
>>> From: Verma, Shally [mailto:Shally.Verma@cavium.com]
>>> Sent: Thursday, January 18, 2018 12:54 PM
>>> To: Trahe, Fiona <fiona.trahe@intel.com>; dev@dpdk.org
>>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
>> Prasad
>>> <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
>> <pablo.de.lara.guarch@intel.com>;
>>> Gupta, Ashish <Ashish.Gupta@cavium.com>; Sahu, Sunila
>> <Sunila.Sahu@cavium.com>; Jain, Deepak K
>>> <deepak.k.jain@intel.com>; Hemant Agrawal
>> <hemant.agrawal@nxp.com>; Roy Pledge
>>> <roy.pledge@nxp.com>; Youri Querry <youri.querry_1@nxp.com>;
>> Ahmed Mansour
>>> <ahmed.mansour@nxp.com>
>>> Subject: RE: [RFC v3 1/1] lib: add compressdev API
>>>
>>> Hi Fiona
>>>
>>> While revisiting this, we identified few questions and additions. Please see
>> them inline.
>>>
>>>> -----Original Message-----
>>>> From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
>>>> Sent: 15 December 2017 23:19
>>>> To: dev@dpdk.org; Verma, Shally <Shally.Verma@cavium.com>
>>>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
>>>> Prasad <NarayanaPrasad.Athreya@cavium.com>;
>>>> pablo.de.lara.guarch@intel.com; fiona.trahe@intel.com
>>>> Subject: [RFC v3 1/1] lib: add compressdev API
>>>>
>>>> Signed-off-by: Trahe, Fiona <fiona.trahe@intel.com>
>>>> ---
>>> //snip
>>>
>>>> +
>>>> +int
>>>> +rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t
>>>> queue_pair_id,
>>>> + uint32_t max_inflight_ops, int socket_id)
>>> [Shally] Is max_inflights_ops different from nb_streams_per_qp in struct
>> rte_compressdev_info?
>>> I assume they both carry same purpose. If yes, then it will be better to use
>> single naming convention to
>>> avoid confusion.
>> [Fiona] No, I think they have different purposes.
>> max_inflight_ops should be used to configure the qp with the number of ops
>> the application expects to be able to submit to the qp before it needs to poll
>> for a response. It can be configured differently for each qp. In the QAT case it
>> dictates the depth of the qp created, it may have different implications on
>> other PMDs.
>> nb_sessions_per_qp and nb_streams_per_qp are limitations the devices
>> reports and are same for all qps on the device. QAT doesn't have those
>> limitations and so would report 0, however I assumed they may be necessary
>> for other devices.
>> This assumption is based on the patch submitted by NXP to cryptodev in Feb
>> 2017
>> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fdpdk.org%2Fml%2Farchives%2Fdev%2F2017-March%2F060740.html&data=02%7C01%7Cahmed.mansour%40nxp.com%7Cb012d74d7530493b155108d56258955f%7C686ea1d3bc2b4c6fa92cd99c5c301635%7C0%7C0%7C636523054981379413&sdata=2SazlEazMxcBGS7R58CpNrX0G5OeWx8PLMwf%2FYzqv34%3D&reserved=0
>> I also assume these are not necessarily the max number of sessions in ops on
>> the qp at a given time, but the total number attached, i.e. if the device has
>> this limitation then sessions must be attached to qps, and presumably
>> reserve some resources. Being attached doesn't imply there is an op on the
>> qp at that time using that session. So it's not to relating to the inflight op
>> count, but to the number of sessions attached/detached to the qp.
>> Including Akhil on the To list, maybe NXP can confirm if these params are
>> needed.
> [Shally] Ok. Then let's wait for NXP to confirm on this requirement as currently spec doesn't have any API to attach queue_pair_to_specific_session_or_stream as cryptodev.
>
> But then how application could know limit on max_inflight_ops supported on a qp? As it can pass any random number during qp_setup().
> Do you believe we need to add a capability field in dev_info to indicate limit on max_inflight_ops?
>
> Thanks
> Shally
[Ahmed] @Fiona This looks ok. max_inflight_ops makes sense. I understand
it as a push back mechanism per qp. We do not have physical limit for
number of streams or sessions on a qp in our hardware, so we would
return 0 here as well.
@Shally in our PMD implementation we do not attach streams or sessions
to a particular qp. Regarding max_inflight_ops. I think that limit
should be independent of hardware. Not all enqueues must succeed. The
hardware can push back against the enqueuer dynamically if the resources
needed to accommodate additional ops are not available yet. This push
back happens in the software if the user sets a max_inflight_ops that is
less that the hardware max_inflight_ops. The same return pathway can be
exercised if the user actually attempts to enqueue more than the
supported max_inflight_ops because of the hardware.
>>
>>> Also, is it optional API? Like Is this a valid use case?:
>>> dev_configure() --> dev_start() --> qp_start() --> enqueue/dequeue() -->
>> qp_stop() --> dev_stop() -->
>>> dev_close()?
>> [Fiona] I don't think it should be optional as some PMDs need to allocate
>> resources based on the setup data passed in on this API.
>>
>>> //snip
>>>
>>>> +
>>>> +#define RTE_COMPRESSDEV_PMD_NAME_ARG
>>>> ("name")
>>>> +#define RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG
>>>> ("max_nb_queue_pairs")
>>>> +#define RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG
>> ("socket_id")
>>>> +
>>> [Shally] Need to define argument macro for max_nb_session_per_qp and
>> max_nb_streams_per_qp as
>>> well
>> [Fiona] ok
>>
>>>> +
>>>> +static const char * const compressdev_pmd_valid_params[] = {
>>>> + RTE_COMPRESSDEV_PMD_NAME_ARG,
>>>> + RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG,
>>>> + RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG
>>>> +};
>>> [Shally] Likewise, array need to be updated with other mentioned two
>> arguments
>> Fiona] ok
>>
>>
>>>> +
>>>> +/**
>>>> + * @internal
>>>> + * Initialisation parameters for comp devices
>>>> + */
>>>> +struct rte_compressdev_pmd_init_params {
>>>> + char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
>>>> + size_t private_data_size;
>>>> + int socket_id;
>>>> + unsigned int max_nb_queue_pairs;
>>> [Shally] And this also need to be updated with max_nb_sessions_per_qp
>> and max_streams_per_qp
>> [Fiona] ok
>>
>>> //snip
>>>
>>> Thanks
>>> Shally
> -----Original Message-----
> From: Ahmed Mansour [mailto:ahmed.mansour@nxp.com]
> Sent: 25 January 2018 01:06
> To: Verma, Shally <Shally.Verma@cavium.com>; Trahe, Fiona
> <fiona.trahe@intel.com>; dev@dpdk.org; Akhil Goyal
> <akhil.goyal@nxp.com>
> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> Prasad <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Gupta, Ashish
> <Ashish.Gupta@cavium.com>; Sahu, Sunila <Sunila.Sahu@cavium.com>;
> Jain, Deepak K <deepak.k.jain@intel.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>; Roy Pledge <roy.pledge@nxp.com>; Youri
> Querry <youri.querry_1@nxp.com>
> Subject: Re: [RFC v3 1/1] lib: add compressdev API
>
> Hi All,
>
> Please see responses in line.
>
> Thanks,
>
> Ahmed
>
> On 1/23/2018 6:58 AM, Verma, Shally wrote:
> > Hi Fiona
> >
> >> -----Original Message-----
> >> From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
> >> Sent: 19 January 2018 17:30
> >> To: Verma, Shally <Shally.Verma@cavium.com>; dev@dpdk.org;
> >> akhil.goyal@nxp.com
> >> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> >> Prasad <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
> >> <pablo.de.lara.guarch@intel.com>; Gupta, Ashish
> >> <Ashish.Gupta@cavium.com>; Sahu, Sunila <Sunila.Sahu@cavium.com>;
> >> Jain, Deepak K <deepak.k.jain@intel.com>; Hemant Agrawal
> >> <hemant.agrawal@nxp.com>; Roy Pledge <roy.pledge@nxp.com>; Youri
> >> Querry <youri.querry_1@nxp.com>; Ahmed Mansour
> >> <ahmed.mansour@nxp.com>; Trahe, Fiona <fiona.trahe@intel.com>
> >> Subject: RE: [RFC v3 1/1] lib: add compressdev API
> >>
> >> Hi Shally,
> >>
> >>> -----Original Message-----
> >>> From: Verma, Shally [mailto:Shally.Verma@cavium.com]
> >>> Sent: Thursday, January 18, 2018 12:54 PM
> >>> To: Trahe, Fiona <fiona.trahe@intel.com>; dev@dpdk.org
> >>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> >> Prasad
> >>> <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
> >> <pablo.de.lara.guarch@intel.com>;
> >>> Gupta, Ashish <Ashish.Gupta@cavium.com>; Sahu, Sunila
> >> <Sunila.Sahu@cavium.com>; Jain, Deepak K
> >>> <deepak.k.jain@intel.com>; Hemant Agrawal
> >> <hemant.agrawal@nxp.com>; Roy Pledge
> >>> <roy.pledge@nxp.com>; Youri Querry <youri.querry_1@nxp.com>;
> >> Ahmed Mansour
> >>> <ahmed.mansour@nxp.com>
> >>> Subject: RE: [RFC v3 1/1] lib: add compressdev API
> >>>
> >>> Hi Fiona
> >>>
> >>> While revisiting this, we identified few questions and additions. Please
> see
> >> them inline.
> >>>
> >>>> -----Original Message-----
> >>>> From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
> >>>> Sent: 15 December 2017 23:19
> >>>> To: dev@dpdk.org; Verma, Shally <Shally.Verma@cavium.com>
> >>>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> >>>> Prasad <NarayanaPrasad.Athreya@cavium.com>;
> >>>> pablo.de.lara.guarch@intel.com; fiona.trahe@intel.com
> >>>> Subject: [RFC v3 1/1] lib: add compressdev API
> >>>>
> >>>> Signed-off-by: Trahe, Fiona <fiona.trahe@intel.com>
> >>>> ---
> >>> //snip
> >>>
> >>>> +
> >>>> +int
> >>>> +rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t
> >>>> queue_pair_id,
> >>>> + uint32_t max_inflight_ops, int socket_id)
> >>> [Shally] Is max_inflights_ops different from nb_streams_per_qp in
> struct
> >> rte_compressdev_info?
> >>> I assume they both carry same purpose. If yes, then it will be better to
> use
> >> single naming convention to
> >>> avoid confusion.
> >> [Fiona] No, I think they have different purposes.
> >> max_inflight_ops should be used to configure the qp with the number of
> ops
> >> the application expects to be able to submit to the qp before it needs to
> poll
> >> for a response. It can be configured differently for each qp. In the QAT
> case it
> >> dictates the depth of the qp created, it may have different implications on
> >> other PMDs.
> >> nb_sessions_per_qp and nb_streams_per_qp are limitations the devices
> >> reports and are same for all qps on the device. QAT doesn't have those
> >> limitations and so would report 0, however I assumed they may be
> necessary
> >> for other devices.
> >> This assumption is based on the patch submitted by NXP to cryptodev in
> Feb
> >> 2017
> >>
> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fdpd
> k.org%2Fml%2Farchives%2Fdev%2F2017-
> March%2F060740.html&data=02%7C01%7Cahmed.mansour%40nxp.com%7C
> b012d74d7530493b155108d56258955f%7C686ea1d3bc2b4c6fa92cd99c5c30163
> 5%7C0%7C0%7C636523054981379413&sdata=2SazlEazMxcBGS7R58CpNrX0G5
> OeWx8PLMwf%2FYzqv34%3D&reserved=0
> >> I also assume these are not necessarily the max number of sessions in ops
> on
> >> the qp at a given time, but the total number attached, i.e. if the device
> has
> >> this limitation then sessions must be attached to qps, and presumably
> >> reserve some resources. Being attached doesn't imply there is an op on
> the
> >> qp at that time using that session. So it's not to relating to the inflight op
> >> count, but to the number of sessions attached/detached to the qp.
> >> Including Akhil on the To list, maybe NXP can confirm if these params are
> >> needed.
> > [Shally] Ok. Then let's wait for NXP to confirm on this requirement as
> currently spec doesn't have any API to attach
> queue_pair_to_specific_session_or_stream as cryptodev.
> >
> > But then how application could know limit on max_inflight_ops supported
> on a qp? As it can pass any random number during qp_setup().
> > Do you believe we need to add a capability field in dev_info to indicate limit
> on max_inflight_ops?
> >
> > Thanks
> > Shally
> [Ahmed] @Fiona This looks ok. max_inflight_ops makes sense. I understand
> it as a push back mechanism per qp. We do not have physical limit for
> number of streams or sessions on a qp in our hardware, so we would
> return 0 here as well.
> @Shally in our PMD implementation we do not attach streams or sessions
> to a particular qp. Regarding max_inflight_ops. I think that limit
[Shally] Ok. We too don't have any such limit defined. So, if these are redundant fields then can be removed until requirement is identified in context of compressdev.
> should be independent of hardware. Not all enqueues must succeed. The
> hardware can push back against the enqueuer dynamically if the resources
> needed to accommodate additional ops are not available yet. This push
> back happens in the software if the user sets a max_inflight_ops that is
> less that the hardware max_inflight_ops. The same return pathway can be
> exercised if the user actually attempts to enqueue more than the
> supported max_inflight_ops because of the hardware.
[Shally] Ok. This sounds fine to me. As you mentioned, we can let application setup a queue pair with any max_inflight_ops and, during enqueue_burst(), leave it on hardware to consume as much as it can subject to the limit set in qp_setup().
So, this doesn't seem to be a hard requirement on dev_info to expose. Only knock-on effect I see is, same testcase can then behave differently with different PMDs as each PMD may have different support level for same max_inflight_ops in their qp_setup().
> >>
> >>> Also, is it optional API? Like Is this a valid use case?:
> >>> dev_configure() --> dev_start() --> qp_start() --> enqueue/dequeue() --
> >
> >> qp_stop() --> dev_stop() -->
> >>> dev_close()?
> >> [Fiona] I don't think it should be optional as some PMDs need to allocate
> >> resources based on the setup data passed in on this API.
> >>
> >>> //snip
> >>>
> >>>> +
> >>>> +#define RTE_COMPRESSDEV_PMD_NAME_ARG
> >>>> ("name")
> >>>> +#define RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG
> >>>> ("max_nb_queue_pairs")
> >>>> +#define RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG
> >> ("socket_id")
> >>>> +
> >>> [Shally] Need to define argument macro for max_nb_session_per_qp
> and
> >> max_nb_streams_per_qp as
> >>> well
> >> [Fiona] ok
> >>
> >>>> +
> >>>> +static const char * const compressdev_pmd_valid_params[] = {
> >>>> + RTE_COMPRESSDEV_PMD_NAME_ARG,
> >>>> + RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG,
> >>>> + RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG
> >>>> +};
> >>> [Shally] Likewise, array need to be updated with other mentioned two
> >> arguments
> >> Fiona] ok
> >>
> >>
> >>>> +
> >>>> +/**
> >>>> + * @internal
> >>>> + * Initialisation parameters for comp devices
> >>>> + */
> >>>> +struct rte_compressdev_pmd_init_params {
> >>>> + char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
> >>>> + size_t private_data_size;
> >>>> + int socket_id;
> >>>> + unsigned int max_nb_queue_pairs;
> >>> [Shally] And this also need to be updated with
> max_nb_sessions_per_qp
> >> and max_streams_per_qp
> >> [Fiona] ok
> >>
> >>> //snip
> >>>
> >>> Thanks
> >>> Shally
>
Hi Shally, Ahmed,
> -----Original Message-----
> From: Verma, Shally [mailto:Shally.Verma@cavium.com]
> Sent: Thursday, January 25, 2018 10:25 AM
> To: Ahmed Mansour <ahmed.mansour@nxp.com>; Trahe, Fiona <fiona.trahe@intel.com>;
> dev@dpdk.org; Akhil Goyal <akhil.goyal@nxp.com>
> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana Prasad
> <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> Gupta, Ashish <Ashish.Gupta@cavium.com>; Sahu, Sunila <Sunila.Sahu@cavium.com>; Jain, Deepak K
> <deepak.k.jain@intel.com>; Hemant Agrawal <hemant.agrawal@nxp.com>; Roy Pledge
> <roy.pledge@nxp.com>; Youri Querry <youri.querry_1@nxp.com>
> Subject: RE: [RFC v3 1/1] lib: add compressdev API
>
>
>
> > -----Original Message-----
> > From: Ahmed Mansour [mailto:ahmed.mansour@nxp.com]
> > Sent: 25 January 2018 01:06
> > To: Verma, Shally <Shally.Verma@cavium.com>; Trahe, Fiona
> > <fiona.trahe@intel.com>; dev@dpdk.org; Akhil Goyal
> > <akhil.goyal@nxp.com>
> > Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> > Prasad <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
> > <pablo.de.lara.guarch@intel.com>; Gupta, Ashish
> > <Ashish.Gupta@cavium.com>; Sahu, Sunila <Sunila.Sahu@cavium.com>;
> > Jain, Deepak K <deepak.k.jain@intel.com>; Hemant Agrawal
> > <hemant.agrawal@nxp.com>; Roy Pledge <roy.pledge@nxp.com>; Youri
> > Querry <youri.querry_1@nxp.com>
> > Subject: Re: [RFC v3 1/1] lib: add compressdev API
> >
> > Hi All,
> >
> > Please see responses in line.
> >
> > Thanks,
> >
> > Ahmed
> >
> > On 1/23/2018 6:58 AM, Verma, Shally wrote:
> > > Hi Fiona
> > >
> > >> -----Original Message-----
> > >> From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
> > >> Sent: 19 January 2018 17:30
> > >> To: Verma, Shally <Shally.Verma@cavium.com>; dev@dpdk.org;
> > >> akhil.goyal@nxp.com
> > >> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> > >> Prasad <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
> > >> <pablo.de.lara.guarch@intel.com>; Gupta, Ashish
> > >> <Ashish.Gupta@cavium.com>; Sahu, Sunila <Sunila.Sahu@cavium.com>;
> > >> Jain, Deepak K <deepak.k.jain@intel.com>; Hemant Agrawal
> > >> <hemant.agrawal@nxp.com>; Roy Pledge <roy.pledge@nxp.com>; Youri
> > >> Querry <youri.querry_1@nxp.com>; Ahmed Mansour
> > >> <ahmed.mansour@nxp.com>; Trahe, Fiona <fiona.trahe@intel.com>
> > >> Subject: RE: [RFC v3 1/1] lib: add compressdev API
> > >>
> > >> Hi Shally,
> > >>
> > >>> -----Original Message-----
> > >>> From: Verma, Shally [mailto:Shally.Verma@cavium.com]
> > >>> Sent: Thursday, January 18, 2018 12:54 PM
> > >>> To: Trahe, Fiona <fiona.trahe@intel.com>; dev@dpdk.org
> > >>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> > >> Prasad
> > >>> <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
> > >> <pablo.de.lara.guarch@intel.com>;
> > >>> Gupta, Ashish <Ashish.Gupta@cavium.com>; Sahu, Sunila
> > >> <Sunila.Sahu@cavium.com>; Jain, Deepak K
> > >>> <deepak.k.jain@intel.com>; Hemant Agrawal
> > >> <hemant.agrawal@nxp.com>; Roy Pledge
> > >>> <roy.pledge@nxp.com>; Youri Querry <youri.querry_1@nxp.com>;
> > >> Ahmed Mansour
> > >>> <ahmed.mansour@nxp.com>
> > >>> Subject: RE: [RFC v3 1/1] lib: add compressdev API
> > >>>
> > >>> Hi Fiona
> > >>>
> > >>> While revisiting this, we identified few questions and additions. Please
> > see
> > >> them inline.
> > >>>
> > >>>> -----Original Message-----
> > >>>> From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
> > >>>> Sent: 15 December 2017 23:19
> > >>>> To: dev@dpdk.org; Verma, Shally <Shally.Verma@cavium.com>
> > >>>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> > >>>> Prasad <NarayanaPrasad.Athreya@cavium.com>;
> > >>>> pablo.de.lara.guarch@intel.com; fiona.trahe@intel.com
> > >>>> Subject: [RFC v3 1/1] lib: add compressdev API
> > >>>>
> > >>>> Signed-off-by: Trahe, Fiona <fiona.trahe@intel.com>
> > >>>> ---
> > >>> //snip
> > >>>
> > >>>> +
> > >>>> +int
> > >>>> +rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t
> > >>>> queue_pair_id,
> > >>>> + uint32_t max_inflight_ops, int socket_id)
> > >>> [Shally] Is max_inflights_ops different from nb_streams_per_qp in
> > struct
> > >> rte_compressdev_info?
> > >>> I assume they both carry same purpose. If yes, then it will be better to
> > use
> > >> single naming convention to
> > >>> avoid confusion.
> > >> [Fiona] No, I think they have different purposes.
> > >> max_inflight_ops should be used to configure the qp with the number of
> > ops
> > >> the application expects to be able to submit to the qp before it needs to
> > poll
> > >> for a response. It can be configured differently for each qp. In the QAT
> > case it
> > >> dictates the depth of the qp created, it may have different implications on
> > >> other PMDs.
> > >> nb_sessions_per_qp and nb_streams_per_qp are limitations the devices
> > >> reports and are same for all qps on the device. QAT doesn't have those
> > >> limitations and so would report 0, however I assumed they may be
> > necessary
> > >> for other devices.
> > >> This assumption is based on the patch submitted by NXP to cryptodev in
> > Feb
> > >> 2017
> > >>
> > https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fdpd
> > k.org%2Fml%2Farchives%2Fdev%2F2017-
> > March%2F060740.html&data=02%7C01%7Cahmed.mansour%40nxp.com%7C
> > b012d74d7530493b155108d56258955f%7C686ea1d3bc2b4c6fa92cd99c5c30163
> > 5%7C0%7C0%7C636523054981379413&sdata=2SazlEazMxcBGS7R58CpNrX0G5
> > OeWx8PLMwf%2FYzqv34%3D&reserved=0
> > >> I also assume these are not necessarily the max number of sessions in ops
> > on
> > >> the qp at a given time, but the total number attached, i.e. if the device
> > has
> > >> this limitation then sessions must be attached to qps, and presumably
> > >> reserve some resources. Being attached doesn't imply there is an op on
> > the
> > >> qp at that time using that session. So it's not to relating to the inflight op
> > >> count, but to the number of sessions attached/detached to the qp.
> > >> Including Akhil on the To list, maybe NXP can confirm if these params are
> > >> needed.
> > > [Shally] Ok. Then let's wait for NXP to confirm on this requirement as
> > currently spec doesn't have any API to attach
> > queue_pair_to_specific_session_or_stream as cryptodev.
> > >
> > > But then how application could know limit on max_inflight_ops supported
> > on a qp? As it can pass any random number during qp_setup().
> > > Do you believe we need to add a capability field in dev_info to indicate limit
> > on max_inflight_ops?
> > >
> > > Thanks
> > > Shally
> > [Ahmed] @Fiona This looks ok. max_inflight_ops makes sense. I understand
> > it as a push back mechanism per qp. We do not have physical limit for
> > number of streams or sessions on a qp in our hardware, so we would
> > return 0 here as well.
> > @Shally in our PMD implementation we do not attach streams or sessions
> > to a particular qp. Regarding max_inflight_ops. I think that limit
>
> [Shally] Ok. We too don't have any such limit defined. So, if these are redundant fields then can be
> removed until requirement is identified in context of compressdev.
[Fiona] Ok, so it seems we're all agreed to remove max_nb_sessions_per_qp and
max_nb_streams_per_qp from rte_compressdev_info.
I think we're also agreed to keep max_inflight_ops on the qp_setup.
It's not available on the info and if I understand you both correctly we don't
need to add it there as a hw limitation or capability. I'd expect the appl to set it to
some value which is probably lower than any hardware limitation. The appl may then
perform many enqueue_bursts until the qp is full and if unable to enqueue a burst
should try dequeueing to free up space on the qp for more enqueue_bursts.
I think the value it's set to can give the application some influence over latency vs throughput.
E.g. if it's set to a very large number then it allows the PMD to stockpile requests,
which can result in longer latency, but optimal throughput as easier to keep the
engines supplied with requests. If set very small, latency may be short, as requests get
to engines sooner, but there's a risk of the engines running out of requests
if the PMD manages to process everything before the application tops up the qp.
>
>
> > should be independent of hardware. Not all enqueues must succeed. The
> > hardware can push back against the enqueuer dynamically if the resources
> > needed to accommodate additional ops are not available yet. This push
> > back happens in the software if the user sets a max_inflight_ops that is
> > less that the hardware max_inflight_ops. The same return pathway can be
> > exercised if the user actually attempts to enqueue more than the
> > supported max_inflight_ops because of the hardware.
>
> [Shally] Ok. This sounds fine to me. As you mentioned, we can let application setup a queue pair with
> any max_inflight_ops and, during enqueue_burst(), leave it on hardware to consume as much as it can
> subject to the limit set in qp_setup().
> So, this doesn't seem to be a hard requirement on dev_info to expose. Only knock-on effect I see is,
> same testcase can then behave differently with different PMDs as each PMD may have different support
> level for same max_inflight_ops in their qp_setup().
Hi
> -----Original Message-----
> From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
> Sent: 26 January 2018 00:13
> To: Verma, Shally <Shally.Verma@cavium.com>; Ahmed Mansour
> <ahmed.mansour@nxp.com>; dev@dpdk.org; Akhil Goyal
> <akhil.goyal@nxp.com>
> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> Prasad <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Gupta, Ashish
> <Ashish.Gupta@cavium.com>; Sahu, Sunila <Sunila.Sahu@cavium.com>;
> Jain, Deepak K <deepak.k.jain@intel.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>; Roy Pledge <roy.pledge@nxp.com>; Youri
> Querry <youri.querry_1@nxp.com>; Trahe, Fiona <fiona.trahe@intel.com>
> Subject: RE: [RFC v3 1/1] lib: add compressdev API
>
> Hi Shally, Ahmed,
>
>
> > -----Original Message-----
> > From: Verma, Shally [mailto:Shally.Verma@cavium.com]
> > Sent: Thursday, January 25, 2018 10:25 AM
> > To: Ahmed Mansour <ahmed.mansour@nxp.com>; Trahe, Fiona
> <fiona.trahe@intel.com>;
> > dev@dpdk.org; Akhil Goyal <akhil.goyal@nxp.com>
> > Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> Prasad
> > <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>;
> > Gupta, Ashish <Ashish.Gupta@cavium.com>; Sahu, Sunila
> <Sunila.Sahu@cavium.com>; Jain, Deepak K
> > <deepak.k.jain@intel.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>; Roy Pledge
> > <roy.pledge@nxp.com>; Youri Querry <youri.querry_1@nxp.com>
> > Subject: RE: [RFC v3 1/1] lib: add compressdev API
> >
> >
> >
> > > -----Original Message-----
> > > From: Ahmed Mansour [mailto:ahmed.mansour@nxp.com]
> > > Sent: 25 January 2018 01:06
> > > To: Verma, Shally <Shally.Verma@cavium.com>; Trahe, Fiona
> > > <fiona.trahe@intel.com>; dev@dpdk.org; Akhil Goyal
> > > <akhil.goyal@nxp.com>
> > > Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> > > Prasad <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
> > > <pablo.de.lara.guarch@intel.com>; Gupta, Ashish
> > > <Ashish.Gupta@cavium.com>; Sahu, Sunila <Sunila.Sahu@cavium.com>;
> > > Jain, Deepak K <deepak.k.jain@intel.com>; Hemant Agrawal
> > > <hemant.agrawal@nxp.com>; Roy Pledge <roy.pledge@nxp.com>; Youri
> > > Querry <youri.querry_1@nxp.com>
> > > Subject: Re: [RFC v3 1/1] lib: add compressdev API
> > >
> > > Hi All,
> > >
> > > Please see responses in line.
> > >
> > > Thanks,
> > >
> > > Ahmed
> > >
> > > On 1/23/2018 6:58 AM, Verma, Shally wrote:
> > > > Hi Fiona
> > > >
> > > >> -----Original Message-----
> > > >> From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
> > > >> Sent: 19 January 2018 17:30
> > > >> To: Verma, Shally <Shally.Verma@cavium.com>; dev@dpdk.org;
> > > >> akhil.goyal@nxp.com
> > > >> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
> > > >> Prasad <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch,
> Pablo
> > > >> <pablo.de.lara.guarch@intel.com>; Gupta, Ashish
> > > >> <Ashish.Gupta@cavium.com>; Sahu, Sunila
> <Sunila.Sahu@cavium.com>;
> > > >> Jain, Deepak K <deepak.k.jain@intel.com>; Hemant Agrawal
> > > >> <hemant.agrawal@nxp.com>; Roy Pledge <roy.pledge@nxp.com>;
> Youri
> > > >> Querry <youri.querry_1@nxp.com>; Ahmed Mansour
> > > >> <ahmed.mansour@nxp.com>; Trahe, Fiona <fiona.trahe@intel.com>
> > > >> Subject: RE: [RFC v3 1/1] lib: add compressdev API
> > > >>
> > > >> Hi Shally,
> > > >>
> > > >>> -----Original Message-----
> > > >>> From: Verma, Shally [mailto:Shally.Verma@cavium.com]
> > > >>> Sent: Thursday, January 18, 2018 12:54 PM
> > > >>> To: Trahe, Fiona <fiona.trahe@intel.com>; dev@dpdk.org
> > > >>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya,
> Narayana
> > > >> Prasad
> > > >>> <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
> > > >> <pablo.de.lara.guarch@intel.com>;
> > > >>> Gupta, Ashish <Ashish.Gupta@cavium.com>; Sahu, Sunila
> > > >> <Sunila.Sahu@cavium.com>; Jain, Deepak K
> > > >>> <deepak.k.jain@intel.com>; Hemant Agrawal
> > > >> <hemant.agrawal@nxp.com>; Roy Pledge
> > > >>> <roy.pledge@nxp.com>; Youri Querry <youri.querry_1@nxp.com>;
> > > >> Ahmed Mansour
> > > >>> <ahmed.mansour@nxp.com>
> > > >>> Subject: RE: [RFC v3 1/1] lib: add compressdev API
> > > >>>
> > > >>> Hi Fiona
> > > >>>
> > > >>> While revisiting this, we identified few questions and additions.
> Please
> > > see
> > > >> them inline.
> > > >>>
> > > >>>> -----Original Message-----
> > > >>>> From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
> > > >>>> Sent: 15 December 2017 23:19
> > > >>>> To: dev@dpdk.org; Verma, Shally <Shally.Verma@cavium.com>
> > > >>>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya,
> Narayana
> > > >>>> Prasad <NarayanaPrasad.Athreya@cavium.com>;
> > > >>>> pablo.de.lara.guarch@intel.com; fiona.trahe@intel.com
> > > >>>> Subject: [RFC v3 1/1] lib: add compressdev API
> > > >>>>
> > > >>>> Signed-off-by: Trahe, Fiona <fiona.trahe@intel.com>
> > > >>>> ---
> > > >>> //snip
> > > >>>
> > > >>>> +
> > > >>>> +int
> > > >>>> +rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t
> > > >>>> queue_pair_id,
> > > >>>> + uint32_t max_inflight_ops, int socket_id)
> > > >>> [Shally] Is max_inflights_ops different from nb_streams_per_qp in
> > > struct
> > > >> rte_compressdev_info?
> > > >>> I assume they both carry same purpose. If yes, then it will be better
> to
> > > use
> > > >> single naming convention to
> > > >>> avoid confusion.
> > > >> [Fiona] No, I think they have different purposes.
> > > >> max_inflight_ops should be used to configure the qp with the number
> of
> > > ops
> > > >> the application expects to be able to submit to the qp before it needs
> to
> > > poll
> > > >> for a response. It can be configured differently for each qp. In the QAT
> > > case it
> > > >> dictates the depth of the qp created, it may have different
> implications on
> > > >> other PMDs.
> > > >> nb_sessions_per_qp and nb_streams_per_qp are limitations the
> devices
> > > >> reports and are same for all qps on the device. QAT doesn't have
> those
> > > >> limitations and so would report 0, however I assumed they may be
> > > necessary
> > > >> for other devices.
> > > >> This assumption is based on the patch submitted by NXP to cryptodev
> in
> > > Feb
> > > >> 2017
> > > >>
> > >
> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fdpd
> > > k.org%2Fml%2Farchives%2Fdev%2F2017-
> > >
> March%2F060740.html&data=02%7C01%7Cahmed.mansour%40nxp.com%7C
> > >
> b012d74d7530493b155108d56258955f%7C686ea1d3bc2b4c6fa92cd99c5c30163
> > >
> 5%7C0%7C0%7C636523054981379413&sdata=2SazlEazMxcBGS7R58CpNrX0G5
> > > OeWx8PLMwf%2FYzqv34%3D&reserved=0
> > > >> I also assume these are not necessarily the max number of sessions in
> ops
> > > on
> > > >> the qp at a given time, but the total number attached, i.e. if the device
> > > has
> > > >> this limitation then sessions must be attached to qps, and presumably
> > > >> reserve some resources. Being attached doesn't imply there is an op
> on
> > > the
> > > >> qp at that time using that session. So it's not to relating to the inflight
> op
> > > >> count, but to the number of sessions attached/detached to the qp.
> > > >> Including Akhil on the To list, maybe NXP can confirm if these params
> are
> > > >> needed.
> > > > [Shally] Ok. Then let's wait for NXP to confirm on this requirement as
> > > currently spec doesn't have any API to attach
> > > queue_pair_to_specific_session_or_stream as cryptodev.
> > > >
> > > > But then how application could know limit on max_inflight_ops
> supported
> > > on a qp? As it can pass any random number during qp_setup().
> > > > Do you believe we need to add a capability field in dev_info to indicate
> limit
> > > on max_inflight_ops?
> > > >
> > > > Thanks
> > > > Shally
> > > [Ahmed] @Fiona This looks ok. max_inflight_ops makes sense. I
> understand
> > > it as a push back mechanism per qp. We do not have physical limit for
> > > number of streams or sessions on a qp in our hardware, so we would
> > > return 0 here as well.
> > > @Shally in our PMD implementation we do not attach streams or sessions
> > > to a particular qp. Regarding max_inflight_ops. I think that limit
> >
> > [Shally] Ok. We too don't have any such limit defined. So, if these are
> redundant fields then can be
> > removed until requirement is identified in context of compressdev.
> [Fiona] Ok, so it seems we're all agreed to remove max_nb_sessions_per_qp
> and
> max_nb_streams_per_qp from rte_compressdev_info.
> I think we're also agreed to keep max_inflight_ops on the qp_setup.
[Shally] yes, by me.
> It's not available on the info and if I understand you both correctly we don't
> need to add it there as a hw limitation or capability.
[Shally] I'm fine with either ways. No preferences here currently.
> I'd expect the appl to set it to
> some value which is probably lower than any hardware limitation. The appl
> may then
> perform many enqueue_bursts until the qp is full and if unable to enqueue a
> burst
> should try dequeueing to free up space on the qp for more enqueue_bursts.
[Shally] qp not necessarily has to be full (depending upon PMD implementation though) to run into this condition, especially when, say, Hw limit < application max_inflight_ops.
Thus, would rephrase it as:
"application may enqueue bursts up to limit setup in qp_setup and if enqueue_burst() returns with number < total nb_ops , then wait on dequeue to free-up space".
> I think the value it's set to can give the application some influence over
> latency vs throughput.
> E.g. if it's set to a very large number then it allows the PMD to stockpile
> requests,
> which can result in longer latency, but optimal throughput as easier to keep
> the
> engines supplied with requests. If set very small, latency may be short, as
> requests get
> to engines sooner, but there's a risk of the engines running out of requests
> if the PMD manages to process everything before the application tops up the
> qp.
[Shally] I concur from you.
>
> >
> >
> > > should be independent of hardware. Not all enqueues must succeed.
> The
> > > hardware can push back against the enqueuer dynamically if the
> resources
> > > needed to accommodate additional ops are not available yet. This push
> > > back happens in the software if the user sets a max_inflight_ops that is
> > > less that the hardware max_inflight_ops. The same return pathway can
> be
> > > exercised if the user actually attempts to enqueue more than the
> > > supported max_inflight_ops because of the hardware.
> >
> > [Shally] Ok. This sounds fine to me. As you mentioned, we can let
> application setup a queue pair with
> > any max_inflight_ops and, during enqueue_burst(), leave it on hardware
> to consume as much as it can
> > subject to the limit set in qp_setup().
> > So, this doesn't seem to be a hard requirement on dev_info to expose.
> Only knock-on effect I see is,
> > same testcase can then behave differently with different PMDs as each
> PMD may have different support
> > level for same max_inflight_ops in their qp_setup().
>
On 1/29/2018 7:26 AM, Verma, Shally wrote:
> Hi
>
>> -----Original Message-----
>> From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
>> Sent: 26 January 2018 00:13
>> To: Verma, Shally <Shally.Verma@cavium.com>; Ahmed Mansour
>> <ahmed.mansour@nxp.com>; dev@dpdk.org; Akhil Goyal
>> <akhil.goyal@nxp.com>
>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
>> Prasad <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
>> <pablo.de.lara.guarch@intel.com>; Gupta, Ashish
>> <Ashish.Gupta@cavium.com>; Sahu, Sunila <Sunila.Sahu@cavium.com>;
>> Jain, Deepak K <deepak.k.jain@intel.com>; Hemant Agrawal
>> <hemant.agrawal@nxp.com>; Roy Pledge <roy.pledge@nxp.com>; Youri
>> Querry <youri.querry_1@nxp.com>; Trahe, Fiona <fiona.trahe@intel.com>
>> Subject: RE: [RFC v3 1/1] lib: add compressdev API
>>
>> Hi Shally, Ahmed,
>>
>>
>>> -----Original Message-----
>>> From: Verma, Shally [mailto:Shally.Verma@cavium.com]
>>> Sent: Thursday, January 25, 2018 10:25 AM
>>> To: Ahmed Mansour <ahmed.mansour@nxp.com>; Trahe, Fiona
>> <fiona.trahe@intel.com>;
>>> dev@dpdk.org; Akhil Goyal <akhil.goyal@nxp.com>
>>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
>> Prasad
>>> <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
>> <pablo.de.lara.guarch@intel.com>;
>>> Gupta, Ashish <Ashish.Gupta@cavium.com>; Sahu, Sunila
>> <Sunila.Sahu@cavium.com>; Jain, Deepak K
>>> <deepak.k.jain@intel.com>; Hemant Agrawal
>> <hemant.agrawal@nxp.com>; Roy Pledge
>>> <roy.pledge@nxp.com>; Youri Querry <youri.querry_1@nxp.com>
>>> Subject: RE: [RFC v3 1/1] lib: add compressdev API
>>>
>>>
>>>
>>>> -----Original Message-----
>>>> From: Ahmed Mansour [mailto:ahmed.mansour@nxp.com]
>>>> Sent: 25 January 2018 01:06
>>>> To: Verma, Shally <Shally.Verma@cavium.com>; Trahe, Fiona
>>>> <fiona.trahe@intel.com>; dev@dpdk.org; Akhil Goyal
>>>> <akhil.goyal@nxp.com>
>>>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
>>>> Prasad <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
>>>> <pablo.de.lara.guarch@intel.com>; Gupta, Ashish
>>>> <Ashish.Gupta@cavium.com>; Sahu, Sunila <Sunila.Sahu@cavium.com>;
>>>> Jain, Deepak K <deepak.k.jain@intel.com>; Hemant Agrawal
>>>> <hemant.agrawal@nxp.com>; Roy Pledge <roy.pledge@nxp.com>; Youri
>>>> Querry <youri.querry_1@nxp.com>
>>>> Subject: Re: [RFC v3 1/1] lib: add compressdev API
>>>>
>>>> Hi All,
>>>>
>>>> Please see responses in line.
>>>>
>>>> Thanks,
>>>>
>>>> Ahmed
>>>>
>>>> On 1/23/2018 6:58 AM, Verma, Shally wrote:
>>>>> Hi Fiona
>>>>>
>>>>>> -----Original Message-----
>>>>>> From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
>>>>>> Sent: 19 January 2018 17:30
>>>>>> To: Verma, Shally <Shally.Verma@cavium.com>; dev@dpdk.org;
>>>>>> akhil.goyal@nxp.com
>>>>>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya, Narayana
>>>>>> Prasad <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch,
>> Pablo
>>>>>> <pablo.de.lara.guarch@intel.com>; Gupta, Ashish
>>>>>> <Ashish.Gupta@cavium.com>; Sahu, Sunila
>> <Sunila.Sahu@cavium.com>;
>>>>>> Jain, Deepak K <deepak.k.jain@intel.com>; Hemant Agrawal
>>>>>> <hemant.agrawal@nxp.com>; Roy Pledge <roy.pledge@nxp.com>;
>> Youri
>>>>>> Querry <youri.querry_1@nxp.com>; Ahmed Mansour
>>>>>> <ahmed.mansour@nxp.com>; Trahe, Fiona <fiona.trahe@intel.com>
>>>>>> Subject: RE: [RFC v3 1/1] lib: add compressdev API
>>>>>>
>>>>>> Hi Shally,
>>>>>>
>>>>>>> -----Original Message-----
>>>>>>> From: Verma, Shally [mailto:Shally.Verma@cavium.com]
>>>>>>> Sent: Thursday, January 18, 2018 12:54 PM
>>>>>>> To: Trahe, Fiona <fiona.trahe@intel.com>; dev@dpdk.org
>>>>>>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya,
>> Narayana
>>>>>> Prasad
>>>>>>> <NarayanaPrasad.Athreya@cavium.com>; De Lara Guarch, Pablo
>>>>>> <pablo.de.lara.guarch@intel.com>;
>>>>>>> Gupta, Ashish <Ashish.Gupta@cavium.com>; Sahu, Sunila
>>>>>> <Sunila.Sahu@cavium.com>; Jain, Deepak K
>>>>>>> <deepak.k.jain@intel.com>; Hemant Agrawal
>>>>>> <hemant.agrawal@nxp.com>; Roy Pledge
>>>>>>> <roy.pledge@nxp.com>; Youri Querry <youri.querry_1@nxp.com>;
>>>>>> Ahmed Mansour
>>>>>>> <ahmed.mansour@nxp.com>
>>>>>>> Subject: RE: [RFC v3 1/1] lib: add compressdev API
>>>>>>>
>>>>>>> Hi Fiona
>>>>>>>
>>>>>>> While revisiting this, we identified few questions and additions.
>> Please
>>>> see
>>>>>> them inline.
>>>>>>>> -----Original Message-----
>>>>>>>> From: Trahe, Fiona [mailto:fiona.trahe@intel.com]
>>>>>>>> Sent: 15 December 2017 23:19
>>>>>>>> To: dev@dpdk.org; Verma, Shally <Shally.Verma@cavium.com>
>>>>>>>> Cc: Challa, Mahipal <Mahipal.Challa@cavium.com>; Athreya,
>> Narayana
>>>>>>>> Prasad <NarayanaPrasad.Athreya@cavium.com>;
>>>>>>>> pablo.de.lara.guarch@intel.com; fiona.trahe@intel.com
>>>>>>>> Subject: [RFC v3 1/1] lib: add compressdev API
>>>>>>>>
>>>>>>>> Signed-off-by: Trahe, Fiona <fiona.trahe@intel.com>
>>>>>>>> ---
>>>>>>> //snip
>>>>>>>
>>>>>>>> +
>>>>>>>> +int
>>>>>>>> +rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t
>>>>>>>> queue_pair_id,
>>>>>>>> + uint32_t max_inflight_ops, int socket_id)
>>>>>>> [Shally] Is max_inflights_ops different from nb_streams_per_qp in
>>>> struct
>>>>>> rte_compressdev_info?
>>>>>>> I assume they both carry same purpose. If yes, then it will be better
>> to
>>>> use
>>>>>> single naming convention to
>>>>>>> avoid confusion.
>>>>>> [Fiona] No, I think they have different purposes.
>>>>>> max_inflight_ops should be used to configure the qp with the number
>> of
>>>> ops
>>>>>> the application expects to be able to submit to the qp before it needs
>> to
>>>> poll
>>>>>> for a response. It can be configured differently for each qp. In the QAT
>>>> case it
>>>>>> dictates the depth of the qp created, it may have different
>> implications on
>>>>>> other PMDs.
>>>>>> nb_sessions_per_qp and nb_streams_per_qp are limitations the
>> devices
>>>>>> reports and are same for all qps on the device. QAT doesn't have
>> those
>>>>>> limitations and so would report 0, however I assumed they may be
>>>> necessary
>>>>>> for other devices.
>>>>>> This assumption is based on the patch submitted by NXP to cryptodev
>> in
>>>> Feb
>>>>>> 2017
>>>>>>
>> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fdpd
>>>> k.org%2Fml%2Farchives%2Fdev%2F2017-
>>>>
>> March%2F060740.html&data=02%7C01%7Cahmed.mansour%40nxp.com%7C
>> b012d74d7530493b155108d56258955f%7C686ea1d3bc2b4c6fa92cd99c5c30163
>> 5%7C0%7C0%7C636523054981379413&sdata=2SazlEazMxcBGS7R58CpNrX0G5
>>>> OeWx8PLMwf%2FYzqv34%3D&reserved=0
>>>>>> I also assume these are not necessarily the max number of sessions in
>> ops
>>>> on
>>>>>> the qp at a given time, but the total number attached, i.e. if the device
>>>> has
>>>>>> this limitation then sessions must be attached to qps, and presumably
>>>>>> reserve some resources. Being attached doesn't imply there is an op
>> on
>>>> the
>>>>>> qp at that time using that session. So it's not to relating to the inflight
>> op
>>>>>> count, but to the number of sessions attached/detached to the qp.
>>>>>> Including Akhil on the To list, maybe NXP can confirm if these params
>> are
>>>>>> needed.
>>>>> [Shally] Ok. Then let's wait for NXP to confirm on this requirement as
>>>> currently spec doesn't have any API to attach
>>>> queue_pair_to_specific_session_or_stream as cryptodev.
>>>>> But then how application could know limit on max_inflight_ops
>> supported
>>>> on a qp? As it can pass any random number during qp_setup().
>>>>> Do you believe we need to add a capability field in dev_info to indicate
>> limit
>>>> on max_inflight_ops?
>>>>> Thanks
>>>>> Shally
>>>> [Ahmed] @Fiona This looks ok. max_inflight_ops makes sense. I
>> understand
>>>> it as a push back mechanism per qp. We do not have physical limit for
>>>> number of streams or sessions on a qp in our hardware, so we would
>>>> return 0 here as well.
>>>> @Shally in our PMD implementation we do not attach streams or sessions
>>>> to a particular qp. Regarding max_inflight_ops. I think that limit
>>> [Shally] Ok. We too don't have any such limit defined. So, if these are
>> redundant fields then can be
>>> removed until requirement is identified in context of compressdev.
>> [Fiona] Ok, so it seems we're all agreed to remove max_nb_sessions_per_qp
>> and
>> max_nb_streams_per_qp from rte_compressdev_info.
>> I think we're also agreed to keep max_inflight_ops on the qp_setup.
> [Shally] yes, by me.
[Ahmed] That works.
>
>> It's not available on the info and if I understand you both correctly we don't
>> need to add it there as a hw limitation or capability.
> [Shally] I'm fine with either ways. No preferences here currently.
[Ahmed] Yes.
>
>> I'd expect the appl to set it to
>> some value which is probably lower than any hardware limitation. The appl
>> may then
>> perform many enqueue_bursts until the qp is full and if unable to enqueue a
>> burst
>> should try dequeueing to free up space on the qp for more enqueue_bursts.
> [Shally] qp not necessarily has to be full (depending upon PMD implementation though) to run into this condition, especially when, say, Hw limit < application max_inflight_ops.
> Thus, would rephrase it as:
> "application may enqueue bursts up to limit setup in qp_setup and if enqueue_burst() returns with number < total nb_ops , then wait on dequeue to free-up space".
[Ahmed] Agreed. The hard limit is left to the implementation.
>
>> I think the value it's set to can give the application some influence over
>> latency vs throughput.
>> E.g. if it's set to a very large number then it allows the PMD to stockpile
>> requests,
>> which can result in longer latency, but optimal throughput as easier to keep
>> the
>> engines supplied with requests. If set very small, latency may be short, as
>> requests get
>> to engines sooner, but there's a risk of the engines running out of requests
>> if the PMD manages to process everything before the application tops up the
>> qp.
> [Shally] I concur from you.
[Ahmed] Makes sense.
>
>>>
>>>> should be independent of hardware. Not all enqueues must succeed.
>> The
>>>> hardware can push back against the enqueuer dynamically if the
>> resources
>>>> needed to accommodate additional ops are not available yet. This push
>>>> back happens in the software if the user sets a max_inflight_ops that is
>>>> less that the hardware max_inflight_ops. The same return pathway can
>> be
>>>> exercised if the user actually attempts to enqueue more than the
>>>> supported max_inflight_ops because of the hardware.
>>> [Shally] Ok. This sounds fine to me. As you mentioned, we can let
>> application setup a queue pair with
>>> any max_inflight_ops and, during enqueue_burst(), leave it on hardware
>> to consume as much as it can
>>> subject to the limit set in qp_setup().
>>> So, this doesn't seem to be a hard requirement on dev_info to expose.
>> Only knock-on effect I see is,
>>> same testcase can then behave differently with different PMDs as each
>> PMD may have different support
>>> level for same max_inflight_ops in their qp_setup().
>
@@ -563,6 +563,13 @@ CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n
CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG=n
#
+# Compile generic compression device library
+#
+CONFIG_RTE_LIBRTE_COMPRESSDEV=y
+CONFIG_RTE_LIBRTE_COMPRESSDEV_DEBUG=n
+CONFIG_RTE_COMPRESS_MAX_DEVS=64
+
+#
# Compile generic security library
#
CONFIG_RTE_LIBRTE_SECURITY=y
@@ -52,6 +52,9 @@ DEPDIRS-librte_ether += librte_mbuf
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += librte_cryptodev
DEPDIRS-librte_cryptodev := librte_eal librte_mempool librte_ring librte_mbuf
DEPDIRS-librte_cryptodev += librte_kvargs
+DIRS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += librte_compressdev
+DEPDIRS-librte_compressdev := librte_eal librte_mempool librte_ring librte_mbuf
+DEPDIRS-librte_compressdev += librte_kvargs
DIRS-$(CONFIG_RTE_LIBRTE_SECURITY) += librte_security
DEPDIRS-librte_security := librte_eal librte_mempool librte_ring librte_mbuf
DEPDIRS-librte_security += librte_ether
new file mode 100644
@@ -0,0 +1,54 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_compressdev.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library source files
+SRCS-y += rte_compressdev.c rte_compressdev_pmd.c
+
+# export include files
+SYMLINK-y-include += rte_comp.h
+SYMLINK-y-include += rte_compressdev.h
+SYMLINK-y-include += rte_compressdev_pmd.h
+
+# versioning export map
+EXPORT_MAP := rte_compressdev_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
new file mode 100644
@@ -0,0 +1,608 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_COMP_H_
+#define _RTE_COMP_H_
+
+/**
+ * @file rte_comp.h
+ *
+ * RTE definitions for Data Compression Service
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+#include <rte_mempool.h>
+
+
+/** Status of comp operation */
+enum rte_comp_op_status {
+ RTE_COMP_OP_STATUS_SUCCESS = 0,
+ /**< Operation completed successfully */
+ RTE_COMP_OP_STATUS_NOT_PROCESSED,
+ /**< Operation has not yet been processed by the device */
+ RTE_COMP_OP_STATUS_INVALID_SESSION,
+ /**< Operation failed due to invalid session arguments */
+ RTE_COMP_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_COMP_OP_STATUS_ERROR,
+ /**< Error handling operation */
+ RTE_COMP_OP_STATUS_INVALID_STATE,
+ /**< Operation is invoked in invalid state */
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE,
+ /**< Output buffer ran out of space before operation completed */
+
+ /* Note:
+ * QAT API has 19 error types.
+ * xISA-l has 5 inflate and 6 deflate errors.
+ * zlib has 6 errors
+ * Propose only include common subset in status - only values where appl
+ * would have different behaviour.
+ * Add separate error field on op return which a PMD could populate
+ */
+};
+
+
+/** Compression Algorithms */
+enum rte_comp_algorithm {
+ RTE_COMP_NULL = 0,
+ /**< No compression.
+ * Pass-through, data is copied unchanged from source buffer to
+ * destination buffer.
+ */
+ RTE_COMP_DEFLATE,
+ /**< DEFLATE compression algorithm
+ * https://tools.ietf.org/html/rfc1951
+ */
+ RTE_COMP_LZS,
+ /**< LZS compression algorithm
+ * https://tools.ietf.org/html/rfc2395
+ */
+ RTE_COMP_ALGO_LIST_END
+};
+
+/**< Compression Level.
+ * The number is interpreted by each PMD differently. However, lower numbers
+ * give fastest compression, at the expense of compression ratio while
+ * higher numbers may give better compression ratios but are likely slower.
+ */
+#define RTE_COMP_LEVEL_PMD_DEFAULT (-1)
+/** Use PMD Default */
+#define RTE_COMP_LEVEL_NONE (0)
+/** Output uncompressed blocks if supported by the specified algorithm */
+#define RTE_COMP_LEVEL_MIN (1)
+/** Use minimum compression level supported by the PMD */
+#define RTE_COMP_LEVEL_MAX (9)
+/** Use maximum compression level supported by the PMD */
+
+/** Compression checksum types */
+enum rte_comp_checksum_type {
+ RTE_COMP_NONE,
+ /**< No checksum generated */
+ RTE_COMP_CRC32,
+ /**< Generates a CRC32 checksum, as used by gzip */
+ RTE_COMP_ADLER32,
+ /**< Generates an Adler-32 checksum, as used by zlib */
+ RTE_COMP_CRC32_ADLER32,
+ /**< Generates both Adler-32 and CRC32 checksums, concatenated.
+ * CRC32 is in the lower 32bits, Adler-32 in the upper 32 bits.
+ */
+};
+
+/*
+ * enum rte_comp_hash_algo {
+ * RTE_COMP_HASH_NONE,
+ * RTE_COMP_HASH_SHA1,
+ * RTE_COMP_HASH_SHA256,
+ * };
+ * Need further input from cavium on this
+ * xform will need a flag with above enum value
+ * op will need to provide a virt/phys ptr to a data buffer of appropriate size.
+ * And via capability PMD can say whether supported or not.
+ */
+
+/** Compression Huffman Type - used by DEFLATE algorithm */
+enum rte_comp_huffman {
+ RTE_COMP_DEFAULT,
+ /**< PMD may choose which Huffman codes to use */
+ RTE_COMP_FIXED,
+ /**< Use Fixed Huffman codes */
+ RTE_COMP_DYNAMIC,
+ /**< Use Dynamic Huffman codes */
+};
+
+
+enum rte_comp_flush_flag {
+ RTE_COMP_FLUSH_NONE,
+ /**< Data is not flushed. Output may remain in the compressor and be
+ * processed during a following op. It may not be possible to decompress
+ * output until a later op with some other flush flag has been sent.
+ */
+ RTE_COMP_FLUSH_SYNC,
+ /**< All data should be flushed to output buffer. Output data can be
+ * decompressed. However state and history is not cleared, so future
+ * ops may use history from this op */
+ RTE_COMP_FLUSH_FULL,
+ /**< All data should be flushed to output buffer. Output data can be
+ * decompressed. State and history data is cleared, so future
+ * ops will be independent of ops processed before this.
+ */
+ RTE_COMP_FLUSH_FINAL
+ /**< Same as RTE_COMP_FLUSH_FULL but also bfinal bit is set in last block
+ */
+ /* TODO:
+ * describe flag meanings for decompression.
+ * describe behavous in OUT_OF_SPACE case.
+ * At least the last flag is specific to deflate algo. Should this be
+ * called rte_comp_deflate_flush_flag? And should there be
+ * comp_op_deflate_params in the op? */
+};
+
+/** Compression transform types */
+enum rte_comp_xform_type {
+ RTE_COMP_COMPRESS,
+ /**< Compression service - compress */
+ RTE_COMP_DECOMPRESS,
+ /**< Compression service - decompress */
+};
+
+enum rte_comp_op_type {
+ RTE_COMP_OP_STATELESS,
+ /**< All data to be processed is submitted in the op, no state or history
+ * from previous ops is used and none will be stored for future ops.
+ * flush must be set to either FLUSH_FULL or FLUSH_FINAL
+ */
+ RTE_COMP_OP_STATEFUL
+ /**< There may be more data to be processed after this op, it's part of a
+ * stream of data. State and history from previous ops can be used
+ * and resulting state and history can be stored for future ops,
+ * depending on flush_flag.
+ */
+};
+
+
+/** Parameters specific to the deflate algorithm */
+struct rte_comp_deflate_params {
+ enum rte_comp_huffman huffman;
+ /**< Compression huffman encoding type */
+};
+
+/**
+ * Session Setup Data common to all compress transforms.
+ * Includes params common to stateless and stateful
+ */
+struct rte_comp_compress_common_params {
+ enum rte_comp_algorithm algo;
+ /**< Algorithm to use for compress operation */
+ union {
+ struct rte_comp_deflate_params deflate;
+ /**< Parameters specific to the deflate algorithm */
+ }; /**< Algorithm specific parameters */
+ int level;
+ /**< Compression level */
+ uint16_t window_size;
+ /**< depth of sliding window to be used */
+ enum rte_comp_checksum_type chksum;
+ /**< Type of checksum to generate on the uncompressed data */
+};
+
+/**
+ * Session Setup Data for stateful compress transform.
+ * Extra params for stateful transform
+ */
+struct rte_comp_compress_stateful_params {
+ /*TODO : add extra params just needed for stateful, e.g. */
+ /* history buffer size, window size, state, state buffers, etc...?*/
+};
+/* Session Setup Data for compress transform. */
+struct rte_comp_compress_xform {
+ struct rte_comp_compress_common_params cmn;
+ struct rte_comp_compress_stateful_params stateful;
+};
+
+/**
+ * Session Setup Data common to all decompress transforms.
+ * Includes params common to stateless and stateful
+ */
+struct rte_comp_decompress_common_params {
+ enum rte_comp_algorithm algo;
+ /**< Algorithm to use for decompression */
+ enum rte_comp_checksum_type chksum;
+ /**< Type of checksum to generate on the decompressed data. */
+ uint16_t window_size;
+ /**< depth of sliding window which was used on compression */
+};
+/**
+ * Session Setup Data for decompress transform.
+ * Extra params for stateful transform
+ */
+struct rte_comp_decompress_stateful_params {
+ /*TODO : add extra params just needed for stateful, e.g.*/
+ /* history buffer size, window size, state, state buffers, etc...?*/
+};
+/* Session Setup Data for decompress transform. */
+struct rte_comp_decompress_xform {
+ struct rte_comp_decompress_common_params cmn;
+ struct rte_comp_decompress_stateful_params stateful;
+};
+
+
+/**
+ * Compression transform structure.
+ *
+ * This is used to specify the compression transforms required.
+ * Each transform structure can hold a single transform, the type field is
+ * used to specify which transform is contained within the union.
+ * There are no chain cases currently supported, just single xforms of
+ * - compress-only
+ * - decompress-only
+ *
+ */
+struct rte_comp_xform {
+ struct rte_comp_xform *next;
+ /**< next xform in chain */
+ enum rte_comp_xform_type type;
+ /**< xform type */
+ union {
+ struct rte_comp_compress_xform compress;
+ /**< xform for compress operation */
+ struct rte_comp_decompress_xform decompress;
+ /**< decompress xform */
+ };
+};
+
+
+struct rte_comp_session;
+/**
+ * Compression Operation.
+ *
+ * This structure contains data relating to performing a compression
+ * operation on the referenced mbuf data buffers.
+ *
+ * All compression operations are Out-of-place (OOP) operations,
+ * as the size of the output data is different to the size of the input data.
+ *
+ * Comp operations are enqueued and dequeued in comp PMDs using the
+ * rte_compressdev_enqueue_burst() / rte_compressdev_dequeue_burst() APIs
+ */
+struct rte_comp_op {
+
+ enum rte_comp_op_type op_type;
+ void * stream_private;
+ /* location where PMD maintains stream state
+ * only required if op_type is STATEFUL, else should be NULL
+ */
+ struct rte_comp_session *session;
+ /**< Handle for the initialised session context */
+ struct rte_mempool *mempool;
+ /**< mempool from which operation is allocated */
+ phys_addr_t phys_addr;
+ /**< physical address of this operation */
+ struct rte_mbuf *m_src;
+ /**< source mbuf
+ * The total size of the input buffer(s) can be retrieved using
+ * rte_pktmbuf_data_len(m_src)
+ */
+ struct rte_mbuf *m_dst;
+ /**< destination mbuf
+ * The total size of the output buffer(s) can be retrieved using
+ * rte_pktmbuf_data_len(m_dst)
+ */
+
+ struct {
+ uint32_t offset;
+ /**< Starting point for compression or decompression,
+ * specified as number of bytes from start of packet in
+ * source buffer.
+ * Starting point for checksum generation in compress direction.
+ */
+ uint32_t length;
+ /**< The length, in bytes, of the data in source buffer
+ * to be compressed or decompressed.
+ * Also the length of the data over which the checksum
+ * should be generated in compress direction
+ */
+ } src;
+ struct {
+ uint32_t offset;
+ /**< Starting point for writing output data, specified as
+ * number of bytes from start of packet in dest
+ * buffer. Starting point for checksum generation in
+ * decompress direction.
+ */
+ } dst;
+ enum rte_comp_flush_flag flush_flag;
+ /**< defines flush characteristics for the output data.
+ * Only applicable in compress direction
+ */
+ uint64_t input_chksum;
+ /**< An input checksum can be provided to generate a
+ * cumulative checksum across sequential blocks.
+ * Checksum type is as specified in xform chksum_type
+ */
+ uint64_t output_chksum;
+ /**< If a checksum is generated it will be written in here.
+ * Checksum type is as specified in xform chksum_type.
+ */
+ uint32_t consumed;
+ /**< The number of bytes from the source buffer
+ * which were compressed/decompressed.
+ */
+ uint32_t produced;
+ /**< The number of bytes written to the destination buffer
+ * which were compressed/decompressed.
+ */
+ uint64_t debug_status;
+ /**<
+ * Status of the operation is returned in the status param.
+ * This field allows the PMD to pass back extra
+ * pmd-specific debug information. Value is not defined on the API.
+ */
+ uint8_t status;
+ /**<
+ * operation status - use values from enum rte_comp_status.
+ * This is reset to
+ * RTE_COMP_OP_STATUS_NOT_PROCESSED on allocation from mempool and
+ * will be set to RTE_COMP_OP_STATUS_SUCCESS after operation
+ * is successfully processed by a PMD
+ */
+
+ /*
+ * TODO - Are extra params needed on stateful op or are all in xform?
+ * rte_comp_op_common_params/_stateful_params?
+ */
+};
+
+
+/**
+ * Reset the fields of an operation to their default values.
+ *
+ * @param op The operation to be reset.
+ */
+static inline void
+__rte_comp_op_reset(struct rte_comp_op *op)
+{
+ struct rte_mempool *tmp_mp = op->mempool;
+ phys_addr_t tmp_phys_addr = op->phys_addr;
+
+ memset(op, 0, tmp_mp->elt_size);
+ op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
+ op->phys_addr = tmp_phys_addr;
+ op->mempool = tmp_mp;
+}
+
+
+/**
+ * Attach a session to a compression operation
+ *
+ * @param op operation
+ * @param sess session
+ */
+static inline int
+__rte_comp_op_attach_comp_session(struct rte_comp_op *op,
+ struct rte_comp_session *sess)
+{
+ op->session = sess;
+
+ return 0;
+}
+
+
+/**
+ * Private data structure belonging to an operation pool.
+ */
+struct rte_comp_op_pool_private {
+ uint16_t user_size;
+ /**< Size of private user data with each operation. */
+};
+
+
+/**
+ * Returns the size of private user data allocated with each object in
+ * the mempool
+ *
+ * @param mempool mempool for operations
+ *
+ * @return user data size
+ */
+static inline uint16_t
+__rte_comp_op_get_user_data_size(struct rte_mempool *mempool)
+{
+ struct rte_comp_op_pool_private *priv =
+ (struct rte_comp_op_pool_private *)rte_mempool_get_priv(mempool);
+
+ return priv->user_size;
+}
+
+
+/**
+ * Creates an operation pool
+ *
+ * @param name pool name
+ * @param nb_elts number of elements in pool
+ * @param cache_size Number of elements to cache on lcore, see
+ * *rte_mempool_create* for further details about
+ * cache size
+ * @param user_size Size of private data to allocate for user with
+ * each operation
+ * @param socket_id Socket to allocate memory on
+ *
+ * @return
+ * - On success pointer to mempool
+ * - On failure NULL
+ */
+extern struct rte_mempool *
+rte_comp_op_pool_create(const char *name,
+ unsigned int nb_elts, unsigned int cache_size,
+ uint16_t user_size, int socket_id);
+
+/**
+ * Bulk allocate raw element from mempool and return as comp operations
+ *
+ * @param mempool operation mempool.
+ * @param ops Array to place allocated operations
+ * @param nb_ops Number of operations to allocate
+ *
+ * @returns
+ * - On success returns number of ops allocated
+ */
+static inline int
+__rte_comp_op_raw_bulk_alloc(struct rte_mempool *mempool,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+
+ if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0)
+ return nb_ops;
+
+ return 0;
+}
+
+/**
+ * Allocate an operation from a mempool with default parameters set
+ *
+ * @param mempool operation mempool
+ *
+ * @returns
+ * - On success returns a valid rte_comp_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_comp_op *
+rte_comp_op_alloc(struct rte_mempool *mempool)
+{
+ struct rte_comp_op *op = NULL;
+ int retval;
+
+ retval = __rte_comp_op_raw_bulk_alloc(mempool, &op, 1);
+ if (unlikely(retval != 1))
+ return NULL;
+
+ __rte_comp_op_reset(op);
+
+ return op;
+}
+
+
+/**
+ * Bulk allocate operations from a mempool with default parameters set
+ *
+ * @param mempool comp operation mempool
+ * @param ops Array to place allocated operations
+ * @param nb_ops Number of operations to allocate
+ *
+ * @returns
+ * - nb_ops if the number of operations requested were allocated.
+ * - 0 if the requested number of ops are not available.
+ * None are allocated in this case.
+ */
+
+static inline unsigned
+rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ if (unlikely(__rte_comp_op_raw_bulk_alloc(mempool, ops, nb_ops)
+ != nb_ops))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++)
+ __rte_comp_op_reset(ops[i]);
+
+ return nb_ops;
+}
+
+
+
+/**
+ * Returns a pointer to the private user data of an operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param op operation.
+ * @param size size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of user data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_comp_op_get_user_data(struct rte_comp_op *op, uint32_t size)
+{
+ uint32_t user_size;
+
+ if (likely(op->mempool != NULL)) {
+ user_size = __rte_comp_op_get_user_data_size(op->mempool);
+
+ if (likely(user_size >= size))
+ return (void *)(op + 1);
+
+ }
+
+ return NULL;
+}
+
+/**
+ * free operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param op operation
+ */
+static inline void
+rte_comp_op_free(struct rte_comp_op *op)
+{
+ if (op != NULL && op->mempool != NULL)
+ rte_mempool_put(op->mempool, op);
+}
+
+/**
+ * Attach a session to an operation
+ *
+ * @param op operation
+ * @param sess session
+ */
+static inline int
+rte_comp_op_attach_session(struct rte_comp_op *op,
+ struct rte_comp_session *sess)
+{
+ op->session = sess;
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_COMP_H_ */
new file mode 100644
@@ -0,0 +1,1167 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_interrupts.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_errno.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "rte_comp.h"
+#include "rte_compressdev.h"
+#include "rte_compressdev_pmd.h"
+
+static uint8_t nb_drivers;
+
+struct rte_compressdev rte_comp_devices[RTE_COMPRESS_MAX_DEVS];
+
+struct rte_compressdev *rte_compressdevs = &rte_comp_devices[0];
+
+static struct rte_compressdev_global compressdev_globals = {
+ .devs = &rte_comp_devices[0],
+ .data = { NULL },
+ .nb_devs = 0,
+ .max_devs = RTE_COMPRESS_MAX_DEVS
+};
+
+struct rte_compressdev_global *rte_compressdev_globals = &compressdev_globals;
+
+/* spinlock for comp device callbacks */
+static rte_spinlock_t rte_compressdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
+
+
+/**
+ * The user application callback description.
+ *
+ * It contains callback address to be registered by user application,
+ * the pointer to the parameters for callback, and the event type.
+ */
+struct rte_compressdev_callback {
+ TAILQ_ENTRY(rte_compressdev_callback) next; /**< Callbacks list */
+ rte_compressdev_cb_fn cb_fn; /**< Callback address */
+ void *cb_arg; /**< Parameter for callback */
+ enum rte_compressdev_event_type event; /**< Interrupt event type */
+ uint32_t active; /**< Callback is executing */
+};
+
+/**
+ * The compression algorithm strings identifiers.
+ * It could be used in application command line.
+ */
+const char *
+rte_comp_algorithm_strings[] = {
+ [RTE_COMP_DEFLATE] = "deflate",
+ [RTE_COMP_LZS] = "lzs",
+
+};
+
+
+#define param_range_check(x, y) \
+ (((x < y.min) || (x > y.max)) || \
+ (y.increment != 0 && (x % y.increment) != 0))
+
+
+const char *
+rte_compressdev_get_feature_name(uint64_t flag)
+{
+ switch (flag) {
+ case RTE_COMP_FF_HW_ACCELERATED:
+ return "HW_ACCELERATED";
+ case RTE_COMP_FF_CPU_SSE:
+ return "CPU_SSE";
+ case RTE_COMP_FF_CPU_AVX:
+ return "CPU_AVX";
+ case RTE_COMP_FF_CPU_AVX2:
+ return "CPU_AVX2";
+ case RTE_COMP_FF_CPU_AVX512:
+ return "CPU_AVX512";
+ case RTE_COMP_FF_CPU_NEON:
+ return "CPU_NEON";
+ default:
+ return NULL;
+ }
+}
+
+const char *
+rte_comp_get_feature_name(uint64_t flag)
+{
+ switch (flag) {
+ case RTE_COMP_FF_MBUF_SCATTER_GATHER:
+ return "MBUF_SCATTER_GATHER";
+ case RTE_COMP_FF_MULTI_PKT_CHECKSUM:
+ return "MULTI_PKT_CHKSUM";
+ case RTE_COMP_FF_STATEFUL_COMPRESSION:
+ return "STATEFUL_COMPRESSION";
+ case RTE_COMP_FF_STATEFUL_DECOMPRESSION:
+ return "STATEFUL_DECOMPRESSION";
+ default:
+ return NULL;
+ }
+}
+
+struct rte_compressdev *
+rte_compressdev_pmd_get_dev(uint8_t dev_id)
+{
+ return &rte_compressdev_globals->devs[dev_id];
+}
+
+struct rte_compressdev *
+rte_compressdev_pmd_get_named_dev(const char *name)
+{
+ struct rte_compressdev *dev;
+ unsigned int i;
+
+ if (name == NULL)
+ return NULL;
+
+ for (i = 0; i < rte_compressdev_globals->max_devs; i++) {
+ dev = &rte_compressdev_globals->devs[i];
+
+ if ((dev->attached == RTE_COMPRESSDEV_ATTACHED) &&
+ (strcmp(dev->data->name, name) == 0))
+ return dev;
+ }
+
+ return NULL;
+}
+
+unsigned int
+rte_compressdev_pmd_is_valid_dev(uint8_t dev_id)
+{
+ struct rte_compressdev *dev = NULL;
+
+ if (dev_id >= rte_compressdev_globals->nb_devs)
+ return 0;
+
+ dev = rte_compressdev_pmd_get_dev(dev_id);
+ if (dev->attached != RTE_COMPRESSDEV_ATTACHED)
+ return 0;
+ else
+ return 1;
+}
+
+
+int
+rte_compressdev_get_dev_id(const char *name)
+{
+ unsigned int i;
+
+ if (name == NULL)
+ return -1;
+
+ for (i = 0; i < rte_compressdev_globals->nb_devs; i++)
+ if ((strcmp(rte_compressdev_globals->devs[i].data->name, name)
+ == 0) &&
+ (rte_compressdev_globals->devs[i].attached ==
+ RTE_COMPRESSDEV_ATTACHED))
+ return i;
+
+ return -1;
+}
+
+uint8_t
+rte_compressdev_count(void)
+{
+ return rte_compressdev_globals->nb_devs;
+}
+
+uint8_t
+rte_compressdev_device_count_by_driver(uint8_t driver_id)
+{
+ uint8_t i, dev_count = 0;
+
+ for (i = 0; i < rte_compressdev_globals->max_devs; i++)
+ if (rte_compressdev_globals->devs[i].driver_id == driver_id &&
+ rte_compressdev_globals->devs[i].attached ==
+ RTE_COMPRESSDEV_ATTACHED)
+ dev_count++;
+
+ return dev_count;
+}
+
+uint8_t
+rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
+ uint8_t nb_devices)
+{
+ uint8_t i, count = 0;
+ struct rte_compressdev *devs = rte_compressdev_globals->devs;
+ uint8_t max_devs = rte_compressdev_globals->max_devs;
+
+ for (i = 0; i < max_devs && count < nb_devices; i++) {
+
+ if (devs[i].attached == RTE_COMPRESSDEV_ATTACHED) {
+ int cmp;
+
+ cmp = strncmp(devs[i].device->driver->name,
+ driver_name,
+ strlen(driver_name));
+
+ if (cmp == 0)
+ devices[count++] = devs[i].data->dev_id;
+ }
+ }
+
+ return count;
+}
+
+
+int
+rte_compressdev_socket_id(uint8_t dev_id)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id))
+ return -1;
+
+ dev = rte_compressdev_pmd_get_dev(dev_id);
+
+ return dev->data->socket_id;
+}
+
+static inline int
+rte_compressdev_data_alloc(uint8_t dev_id, struct rte_compressdev_data **data,
+ int socket_id)
+{
+ char mz_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ const struct rte_memzone *mz;
+ int n;
+
+ /* generate memzone name */
+ n = snprintf(mz_name, sizeof(mz_name),
+ "rte_compressdev_data_%u", dev_id);
+ if (n >= (int)sizeof(mz_name))
+ return -EINVAL;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ mz = rte_memzone_reserve(mz_name,
+ sizeof(struct rte_compressdev_data),
+ socket_id, 0);
+ } else
+ mz = rte_memzone_lookup(mz_name);
+
+ if (mz == NULL)
+ return -ENOMEM;
+
+ *data = mz->addr;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ memset(*data, 0, sizeof(struct rte_compressdev_data));
+
+ return 0;
+}
+
+static uint8_t
+rte_compressdev_find_free_device_index(void)
+{
+ uint8_t dev_id;
+
+ for (dev_id = 0; dev_id < RTE_COMPRESS_MAX_DEVS; dev_id++) {
+ if (rte_comp_devices[dev_id].attached ==
+ RTE_COMPRESSDEV_DETACHED)
+ return dev_id;
+ }
+ return RTE_COMPRESS_MAX_DEVS;
+}
+
+struct rte_compressdev *
+rte_compressdev_pmd_allocate(const char *name, int socket_id)
+{
+ struct rte_compressdev *compressdev;
+ uint8_t dev_id;
+
+ if (rte_compressdev_pmd_get_named_dev(name) != NULL) {
+ COMPDEV_LOG_ERR("comp device with name %s already allocated!",
+ name);
+ return NULL;
+ }
+
+ dev_id = rte_compressdev_find_free_device_index();
+ if (dev_id == RTE_COMPRESS_MAX_DEVS) {
+ COMPDEV_LOG_ERR("Reached maximum number of comp devices");
+ return NULL;
+ }
+
+ compressdev = rte_compressdev_pmd_get_dev(dev_id);
+
+ if (compressdev->data == NULL) {
+ struct rte_compressdev_data *compressdev_data =
+ compressdev_globals.data[dev_id];
+
+ int retval = rte_compressdev_data_alloc(dev_id,
+ &compressdev_data, socket_id);
+
+ if (retval < 0 || compressdev_data == NULL)
+ return NULL;
+
+ compressdev->data = compressdev_data;
+
+ snprintf(compressdev->data->name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+ "%s", name);
+
+ compressdev->data->dev_id = dev_id;
+ compressdev->data->socket_id = socket_id;
+ compressdev->data->dev_started = 0;
+
+ /* init user callbacks */
+ TAILQ_INIT(&(compressdev->link_intr_cbs));
+
+ compressdev->attached = RTE_COMPRESSDEV_ATTACHED;
+
+ compressdev_globals.nb_devs++;
+ }
+
+ return compressdev;
+}
+
+int
+rte_compressdev_pmd_release_device(struct rte_compressdev *compressdev)
+{
+ int ret;
+
+ if (compressdev == NULL)
+ return -EINVAL;
+
+ /* Close device only if device operations have been set */
+ if (compressdev->dev_ops) {
+ ret = rte_compressdev_close(compressdev->data->dev_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ compressdev->attached = RTE_COMPRESSDEV_DETACHED;
+ compressdev_globals.nb_devs--;
+ return 0;
+}
+
+uint16_t
+rte_compressdev_queue_pair_count(uint8_t dev_id)
+{
+ struct rte_compressdev *dev;
+
+ dev = &rte_comp_devices[dev_id];
+ return dev->data->nb_queue_pairs;
+}
+
+static int
+rte_compressdev_queue_pairs_config(struct rte_compressdev *dev,
+ uint16_t nb_qpairs, int socket_id)
+{
+ struct rte_compressdev_info dev_info;
+ void **qp;
+ unsigned int i;
+
+ if ((dev == NULL) || (nb_qpairs < 1)) {
+ COMPDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
+ dev, nb_qpairs);
+ return -EINVAL;
+ }
+
+ COMPDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
+ nb_qpairs, dev->data->dev_id);
+
+ memset(&dev_info, 0, sizeof(struct rte_compressdev_info));
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
+
+ if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
+ COMPDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
+ nb_qpairs, dev->data->dev_id);
+ return -EINVAL;
+ }
+
+ if (dev->data->queue_pairs == NULL) { /* first time configuration */
+ dev->data->queue_pairs = rte_zmalloc_socket(
+ "compressdev->queue_pairs",
+ sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (dev->data->queue_pairs == NULL) {
+ dev->data->nb_queue_pairs = 0;
+ COMPDEV_LOG_ERR(
+ "failed to get memory for qp meta data, nb_queues %u",
+ nb_qpairs);
+ return -(ENOMEM);
+ }
+ } else { /* re-configure */
+ int ret;
+ uint16_t old_nb_queues = dev->data->nb_queue_pairs;
+
+ qp = dev->data->queue_pairs;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
+ -ENOTSUP);
+
+ for (i = nb_qpairs; i < old_nb_queues; i++) {
+ ret = (*dev->dev_ops->queue_pair_release)(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
+ RTE_CACHE_LINE_SIZE);
+ if (qp == NULL) {
+ COMPDEV_LOG_ERR(
+ "failed to realloc qp meta data, nb_queues %u",
+ nb_qpairs);
+ return -(ENOMEM);
+ }
+
+ if (nb_qpairs > old_nb_queues) {
+ uint16_t new_qs = nb_qpairs - old_nb_queues;
+
+ memset(qp + old_nb_queues, 0,
+ sizeof(qp[0]) * new_qs);
+ }
+
+ dev->data->queue_pairs = qp;
+
+ }
+ dev->data->nb_queue_pairs = nb_qpairs;
+ return 0;
+}
+
+int
+rte_compressdev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+ COMPDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+ if (queue_pair_id >= dev->data->nb_queue_pairs) {
+ COMPDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_start, -ENOTSUP);
+
+ return dev->dev_ops->queue_pair_start(dev, queue_pair_id);
+
+}
+
+int
+rte_compressdev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+ COMPDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+ if (queue_pair_id >= dev->data->nb_queue_pairs) {
+ COMPDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_stop, -ENOTSUP);
+
+ return dev->dev_ops->queue_pair_stop(dev, queue_pair_id);
+
+}
+
+int
+rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
+{
+ struct rte_compressdev *dev;
+ int diag;
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+ COMPDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+
+ if (dev->data->dev_started) {
+ COMPDEV_LOG_ERR(
+ "device %d must be stopped to allow configuration", dev_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
+ /* Setup new number of queue pairs and reconfigure device. */
+ diag = rte_compressdev_queue_pairs_config(dev, config->nb_queue_pairs,
+ config->socket_id);
+ if (diag != 0) {
+ COMPDEV_LOG_ERR("dev%d rte_comp_dev_queue_pairs_config = %d",
+ dev_id, diag);
+ return diag;
+ }
+
+ return (*dev->dev_ops->dev_configure)(dev, config);
+}
+
+
+int
+rte_compressdev_start(uint8_t dev_id)
+{
+ struct rte_compressdev *dev;
+ int diag;
+
+ COMPDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+ COMPDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+
+ if (dev->data->dev_started != 0) {
+ COMPDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
+ dev_id);
+ return 0;
+ }
+
+ diag = (*dev->dev_ops->dev_start)(dev);
+ if (diag == 0)
+ dev->data->dev_started = 1;
+ else
+ return diag;
+
+ return 0;
+}
+
+void
+rte_compressdev_stop(uint8_t dev_id)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+ COMPDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
+
+ if (dev->data->dev_started == 0) {
+ COMPDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
+ dev_id);
+ return;
+ }
+
+ (*dev->dev_ops->dev_stop)(dev);
+ dev->data->dev_started = 0;
+}
+
+int
+rte_compressdev_close(uint8_t dev_id)
+{
+ struct rte_compressdev *dev;
+ int retval;
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+ COMPDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -1;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+
+ /* Device must be stopped before it can be closed */
+ if (dev->data->dev_started == 1) {
+ COMPDEV_LOG_ERR("Device %u must be stopped before closing",
+ dev_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+ retval = (*dev->dev_ops->dev_close)(dev);
+
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+int
+rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+ COMPDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+ if (queue_pair_id >= dev->data->nb_queue_pairs) {
+ COMPDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_started) {
+ COMPDEV_LOG_ERR(
+ "device %d must be stopped to allow configuration", dev_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
+
+ return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id,
+ max_inflight_ops, socket_id);
+}
+
+
+int
+rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+ COMPDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+ return -ENODEV;
+ }
+
+ if (stats == NULL) {
+ COMPDEV_LOG_ERR("Invalid stats ptr");
+ return -EINVAL;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+ memset(stats, 0, sizeof(*stats));
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+ (*dev->dev_ops->stats_get)(dev, stats);
+ return 0;
+}
+
+void
+rte_compressdev_stats_reset(uint8_t dev_id)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+ COMPDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
+ (*dev->dev_ops->stats_reset)(dev);
+}
+
+
+void
+rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
+{
+ struct rte_compressdev *dev;
+
+ if (dev_id >= compressdev_globals.nb_devs) {
+ COMPDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+ return;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+
+ memset(dev_info, 0, sizeof(struct rte_compressdev_info));
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
+ (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+
+ dev_info->driver_name = dev->device->driver->name;
+}
+
+
+int
+rte_compressdev_callback_register(uint8_t dev_id,
+ enum rte_compressdev_event_type event,
+ rte_compressdev_cb_fn cb_fn, void *cb_arg)
+{
+ struct rte_compressdev *dev;
+ struct rte_compressdev_callback *user_cb;
+
+ if (!cb_fn)
+ return -EINVAL;
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+ COMPDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+ rte_spinlock_lock(&rte_compressdev_cb_lock);
+
+ TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
+ if (user_cb->cb_fn == cb_fn &&
+ user_cb->cb_arg == cb_arg &&
+ user_cb->event == event) {
+ break;
+ }
+ }
+
+ /* create a new callback. */
+ if (user_cb == NULL) {
+ user_cb = rte_zmalloc("INTR_USER_CALLBACK",
+ sizeof(struct rte_compressdev_callback), 0);
+ if (user_cb != NULL) {
+ user_cb->cb_fn = cb_fn;
+ user_cb->cb_arg = cb_arg;
+ user_cb->event = event;
+ TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
+ }
+ }
+
+ rte_spinlock_unlock(&rte_compressdev_cb_lock);
+ return (user_cb == NULL) ? -ENOMEM : 0;
+}
+
+int
+rte_compressdev_callback_unregister(uint8_t dev_id,
+ enum rte_compressdev_event_type event,
+ rte_compressdev_cb_fn cb_fn, void *cb_arg)
+{
+ int ret;
+ struct rte_compressdev *dev;
+ struct rte_compressdev_callback *cb, *next;
+
+ if (!cb_fn)
+ return -EINVAL;
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+ COMPDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+ rte_spinlock_lock(&rte_compressdev_cb_lock);
+
+ ret = 0;
+ for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
+
+ next = TAILQ_NEXT(cb, next);
+
+ if (cb->cb_fn != cb_fn || cb->event != event ||
+ (cb->cb_arg != (void *)-1 &&
+ cb->cb_arg != cb_arg))
+ continue;
+
+ /*
+ * if this callback is not executing right now,
+ * then remove it.
+ */
+ if (cb->active == 0) {
+ TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
+ rte_free(cb);
+ } else {
+ ret = -EAGAIN;
+ }
+ }
+
+ rte_spinlock_unlock(&rte_compressdev_cb_lock);
+ return ret;
+}
+
+void
+rte_compressdev_pmd_callback_process(struct rte_compressdev *dev,
+ enum rte_compressdev_event_type event)
+{
+ struct rte_compressdev_callback *cb_lst;
+ struct rte_compressdev_callback dev_cb;
+
+ rte_spinlock_lock(&rte_compressdev_cb_lock);
+ TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
+ if (cb_lst->cb_fn == NULL || cb_lst->event != event)
+ continue;
+ dev_cb = *cb_lst;
+ cb_lst->active = 1;
+ rte_spinlock_unlock(&rte_compressdev_cb_lock);
+ dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
+ dev_cb.cb_arg);
+ rte_spinlock_lock(&rte_compressdev_cb_lock);
+ cb_lst->active = 0;
+ }
+ rte_spinlock_unlock(&rte_compressdev_cb_lock);
+}
+
+
+int
+rte_compressdev_session_init(uint8_t dev_id,
+ struct rte_comp_session *sess,
+ struct rte_comp_xform *xforms,
+ struct rte_mempool *mp)
+{
+ struct rte_compressdev *dev;
+ uint8_t index;
+ int ret;
+
+ dev = rte_compressdev_pmd_get_dev(dev_id);
+
+ if (sess == NULL || xforms == NULL || dev == NULL)
+ return -EINVAL;
+
+ index = dev->driver_id;
+
+ if (sess->sess_private_data[index] == NULL) {
+ ret = dev->dev_ops->session_configure(dev, xforms, sess, mp);
+ if (ret < 0) {
+ COMPDEV_LOG_ERR(
+ "dev_id %d failed to configure session details",
+ dev_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+struct rte_comp_session *
+rte_compressdev_session_create(struct rte_mempool *mp)
+{
+ struct rte_comp_session *sess;
+
+ /* Allocate a session structure from the session pool */
+ if (rte_mempool_get(mp, (void *)&sess)) {
+ COMPDEV_LOG_ERR("couldn't get object from session mempool");
+ return NULL;
+ }
+
+ /* Clear device session pointer */
+ memset(sess, 0, (sizeof(void *) * nb_drivers));
+
+ return sess;
+}
+
+int
+rte_compressdev_queue_pair_attach_session(uint8_t dev_id, uint16_t qp_id,
+ struct rte_comp_session *sess)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+ COMPDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+
+ /* The API is optional, not returning error if driver do not suuport */
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_attach_session, 0);
+
+ void *sess_priv = get_session_private_data(sess, dev->driver_id);
+
+ if (dev->dev_ops->qp_attach_session(dev, qp_id, sess_priv)) {
+ COMPDEV_LOG_ERR("dev_id %d failed to attach qp: %d with session",
+ dev_id, qp_id);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int
+rte_compressdev_queue_pair_detach_session(uint8_t dev_id, uint16_t qp_id,
+ struct rte_comp_session *sess)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+ COMPDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+
+ /* The API is optional, not returning error if driver do not suuport */
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_detach_session, 0);
+
+ void *sess_priv = get_session_private_data(sess, dev->driver_id);
+
+ if (dev->dev_ops->qp_detach_session(dev, qp_id, sess_priv)) {
+ COMPDEV_LOG_ERR("dev_id %d failed to detach qp: %d from session",
+ dev_id, qp_id);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int
+rte_compressdev_session_clear(uint8_t dev_id,
+ struct rte_comp_session *sess)
+{
+ struct rte_compressdev *dev;
+
+ dev = rte_compressdev_pmd_get_dev(dev_id);
+
+ if (dev == NULL || sess == NULL)
+ return -EINVAL;
+
+ dev->dev_ops->session_clear(dev, sess);
+
+ return 0;
+}
+
+int
+rte_compressdev_session_terminate(struct rte_comp_session *sess)
+{
+ uint8_t i;
+ void *sess_priv;
+ struct rte_mempool *sess_mp;
+
+ if (sess == NULL)
+ return -EINVAL;
+
+ /* Check that all device private data has been freed */
+ for (i = 0; i < nb_drivers; i++) {
+ sess_priv = get_session_private_data(sess, i);
+ if (sess_priv != NULL)
+ return -EBUSY;
+ }
+
+ /* Return session to mempool */
+ sess_mp = rte_mempool_from_obj(sess);
+ rte_mempool_put(sess_mp, sess);
+
+ return 0;
+}
+
+unsigned int
+rte_compressdev_get_header_session_size(void)
+{
+ /*
+ * Header contains pointers to the private data
+ * of all registered drivers
+ */
+ return (sizeof(void *) * nb_drivers);
+}
+
+unsigned int
+rte_compressdev_get_private_session_size(uint8_t dev_id)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_pmd_is_valid_dev(dev_id))
+ return 0;
+
+ dev = rte_compressdev_pmd_get_dev(dev_id);
+
+ if (*dev->dev_ops->session_get_size == NULL)
+ return 0;
+
+ return (*dev->dev_ops->session_get_size)(dev);
+
+}
+
+/** Initialise rte_comp_op mempool element */
+static void
+rte_comp_op_init(struct rte_mempool *mempool,
+ __rte_unused void *opaque_arg,
+ void *_op_data,
+ __rte_unused unsigned int i)
+{
+ struct rte_comp_op *op = _op_data;
+
+ memset(_op_data, 0, mempool->elt_size);
+
+ op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
+ op->phys_addr = rte_mem_virt2iova(_op_data);
+ op->mempool = mempool;
+}
+
+
+struct rte_mempool *
+rte_comp_op_pool_create(const char *name,
+ unsigned int nb_elts, unsigned int cache_size,
+ uint16_t user_size, int socket_id)
+{
+ struct rte_comp_op_pool_private *priv;
+
+ unsigned int elt_size = sizeof(struct rte_comp_op) + user_size;
+
+ /* lookup mempool in case already allocated */
+ struct rte_mempool *mp = rte_mempool_lookup(name);
+
+ if (mp != NULL) {
+ priv = (struct rte_comp_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ if (mp->elt_size != elt_size ||
+ mp->cache_size < cache_size ||
+ mp->size < nb_elts ||
+ priv->user_size < user_size) {
+ mp = NULL;
+ COMPDEV_LOG_ERR(
+ "Mempool %s already exists but with incompatible parameters",
+ name);
+ return NULL;
+ }
+ return mp;
+ }
+
+ mp = rte_mempool_create(
+ name,
+ nb_elts,
+ elt_size,
+ cache_size,
+ sizeof(struct rte_comp_op_pool_private),
+ NULL,
+ NULL,
+ rte_comp_op_init,
+ NULL,
+ socket_id,
+ 0);
+
+ if (mp == NULL) {
+ COMPDEV_LOG_ERR("Failed to create mempool %s", name);
+ return NULL;
+ }
+
+ priv = (struct rte_comp_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ priv->user_size = user_size;
+
+ return mp;
+}
+
+int
+rte_compressdev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
+{
+ struct rte_compressdev *dev = NULL;
+ uint32_t i = 0;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < RTE_COMPRESS_MAX_DEVS; i++) {
+ int ret = snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+ "%s_%u", dev_name_prefix, i);
+
+ if (ret < 0)
+ return ret;
+
+ dev = rte_compressdev_pmd_get_named_dev(name);
+ if (!dev)
+ return 0;
+ }
+
+ return -1;
+}
+
+TAILQ_HEAD(compressdev_driver_list, compressdev_driver);
+
+static struct compressdev_driver_list compressdev_driver_list =
+ TAILQ_HEAD_INITIALIZER(compressdev_driver_list);
+
+int
+rte_compressdev_driver_id_get(const char *name)
+{
+ struct compressdev_driver *driver;
+ const char *driver_name;
+
+ if (name == NULL) {
+ RTE_LOG(DEBUG, COMPRESSDEV, "name pointer NULL");
+ return -1;
+ }
+
+ TAILQ_FOREACH(driver, &compressdev_driver_list, next) {
+ driver_name = driver->driver->name;
+ if (strncmp(driver_name, name, strlen(driver_name)) == 0)
+ return driver->id;
+ }
+ return -1;
+}
+
+const char *
+rte_compressdev_name_get(uint8_t dev_id)
+{
+ struct rte_compressdev *dev = rte_compressdev_pmd_get_dev(dev_id);
+
+ if (dev == NULL)
+ return NULL;
+
+ return dev->data->name;
+}
+
+const char *
+rte_compressdev_driver_name_get(uint8_t driver_id)
+{
+ struct compressdev_driver *driver;
+
+ TAILQ_FOREACH(driver, &compressdev_driver_list, next)
+ if (driver->id == driver_id)
+ return driver->driver->name;
+ return NULL;
+}
+
+uint8_t
+rte_compressdev_allocate_driver(struct compressdev_driver *comp_drv,
+ const struct rte_driver *drv)
+{
+ comp_drv->driver = drv;
+ comp_drv->id = nb_drivers;
+
+ TAILQ_INSERT_TAIL(&compressdev_driver_list, comp_drv, next);
+
+ return nb_drivers++;
+}
new file mode 100644
@@ -0,0 +1,892 @@
+/*-
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_COMPRESSDEV_H_
+#define _RTE_COMPRESSDEV_H_
+
+/**
+ * @file rte_compressdev.h
+ *
+ * RTE Compression Device APIs
+ *
+ * Defines RTE comp Device APIs for the provisioning of compression operations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "rte_kvargs.h"
+#include "rte_comp.h"
+#include "rte_dev.h"
+#include <rte_common.h>
+
+extern const char **rte_cyptodev_names;
+
+/* Logging Macros */
+
+#define COMPDEV_LOG_ERR(...) \
+ RTE_LOG(ERR, COMPRESSDEV, \
+ RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__, ) "\n", \
+ __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#define COMPDEV_LOG_INFO(...) \
+ RTE_LOG(INFO, COMPRESSDEV, \
+ RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#ifdef RTE_LIBRTE_COMPRESSDEV_DEBUG
+#define COMPDEV_LOG_DEBUG(...) \
+ RTE_LOG(DEBUG, COMPRESSDEV, \
+ RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#define COMPDEV_PMD_TRACE(...) \
+ RTE_LOG(DEBUG, COMPRESSDEV, \
+ RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#else
+#define COMPDEV_LOG_DEBUG(...) (void)0
+#define COMPDEV_PMD_TRACE(...) (void)0
+#endif
+
+
+
+/**
+ * A macro that points to an offset from the start
+ * of the comp operation structure (rte_comp_op)
+ *
+ * The returned pointer is cast to type t.
+ *
+ * @param c
+ * The comp operation.
+ * @param o
+ * The offset from the start of the comp operation.
+ * @param t
+ * The type to cast the result into.
+ */
+#define rte_comp_op_ctod_offset(c, t, o) \
+ ((t)((char *)(c) + (o)))
+
+/**
+ * A macro that returns the physical address that points
+ * to an offset from the start of the comp operation
+ * (rte_comp_op)
+ *
+ * @param c
+ * The comp operation.
+ * @param o
+ * The offset from the start of the comp operation
+ * to calculate address from.
+ */
+#define rte_comp_op_ctophys_offset(c, o) \
+ (rte_iova_t)((c)->phys_addr + (o))
+
+/**
+ * comp parameters range description
+ */
+struct rte_comp_param_range {
+ uint16_t min; /**< minimum size */
+ uint16_t max; /**< maximum size */
+ uint16_t increment;
+ /**< if a range of sizes are supported,
+ * this parameter is used to indicate
+ * increments in byte size that are supported
+ * between the minimum and maximum
+ */
+};
+
+
+/** Structure used to capture a capability of a comp device */
+struct rte_compressdev_capabilities {
+ /* TODO */
+ enum rte_comp_algorithm algo;
+ uint64_t comp_feature_flags;
+ /**< bitmap of flags for compression service features*/
+ struct rte_comp_param_range window_size;
+ /**< window size range in bytes */
+};
+
+
+/** Macro used at end of comp PMD list */
+#define RTE_COMP_END_OF_CAPABILITIES_LIST() \
+ { RTE_COMP_ALGO_LIST_END }
+
+
+/**
+ * compression device supported feature flags
+ *
+ * Note:
+ * New features flags should be added to the end of the list
+ *
+ * Keep these flags synchronised with rte_compressdev_get_feature_name()
+ */
+
+#define RTE_COMP_FF_HW_ACCELERATED (1ULL << 0)
+/**< Operations are off-loaded to an external hardware accelerator */
+#define RTE_COMP_FF_CPU_SSE (1ULL << 1)
+/**< Utilises CPU SIMD SSE instructions */
+#define RTE_COMP_FF_CPU_AVX (1ULL << 2)
+/**< Utilises CPU SIMD AVX instructions */
+#define RTE_COMP_FF_CPU_AVX2 (1ULL << 3)
+/**< Utilises CPU SIMD AVX2 instructions */
+#define RTE_COMP_FF_CPU_AVX512 (1ULL << 4)
+/**< Utilises CPU SIMD AVX512 instructions */
+#define RTE_COMP_FF_CPU_NEON (1ULL << 5)
+/**< Utilises CPU NEON instructions */
+
+/**
+ * compression service feature flags
+ *
+ * Note:
+ * New features flags should be added to the end of the list
+ *
+ * Keep these flags synchronised with rte_comp_get_feature_name() TODO
+ */
+#define RTE_COMP_FF_MBUF_SCATTER_GATHER (1ULL << 0)
+/**< Scatter-gather mbufs are supported */
+#define RTE_COMP_FF_MULTI_PKT_CHECKSUM (1ULL << 1)
+/**< Generation of checksum across multiple stateless packets is supported */
+#define RTE_COMP_FF_STATEFUL_COMPRESSION (1ULL << 2)
+/**< Stateful compression is supported */
+#define RTE_COMP_FF_STATEFUL_DECOMPRESSION (1ULL << 3)
+/**< Stateful decompression is supported */
+
+/**
+ * Get the name of a comp device feature flag
+ *
+ * @param flag The mask describing the flag.
+ *
+ * @return
+ * The name of this flag, or NULL if it's not a valid feature flag.
+ */
+
+extern const char *
+rte_compressdev_get_feature_name(uint64_t flag);
+extern const char *
+rte_comp_get_feature_name(uint64_t flag);
+
+/** comp device information */
+struct rte_compressdev_info {
+ const char *driver_name; /**< Driver name. */
+ uint8_t driver_id; /**< Driver identifier */
+ struct rte_pci_device *pci_dev; /**< PCI information. */
+
+ uint64_t feature_flags; /**< Feature flags */
+
+ const struct rte_compressdev_capabilities *capabilities;
+ /**< Array of devices supported capabilities */
+
+ unsigned int max_nb_queue_pairs;
+ /**< Maximum number of queues pairs supported by device. */
+
+ unsigned int max_nb_sessions_per_qp;
+ /**< Maximum number of sessions per queue pair.
+ * Default 0 for infinite sessions
+ */
+ unsigned int max_nb_streams_per_qp;
+ /**< Maximum number of streams per queue pair.
+ * Default 0 for infinite streams
+ */
+
+};
+
+#define RTE_COMPRESSDEV_DETACHED (0)
+#define RTE_COMPRESSDEV_ATTACHED (1)
+
+/** Definitions of comp device event types */
+enum rte_compressdev_event_type {
+ RTE_COMPRESSDEV_EVENT_UNKNOWN, /**< unknown event type */
+ RTE_COMPRESSDEV_EVENT_ERROR, /**< error interrupt event */
+ RTE_COMPRESSDEV_EVENT_MAX /**< max value of this enum */
+};
+
+/**
+ * Typedef for application callback function to be registered by application
+ * software for notification of device events
+ *
+ * @param dev_id comp device identifier
+ * @param event comp device event to register for notification of.
+ * @param cb_arg User specified parameter to be passed as to passed to
+ * users callback function.
+ */
+typedef void (*rte_compressdev_cb_fn)(uint8_t dev_id,
+ enum rte_compressdev_event_type event, void *cb_arg);
+
+
+/** comp device statistics */
+struct rte_compressdev_stats {
+ uint64_t enqueued_count;
+ /**< Count of all operations enqueued */
+ uint64_t dequeued_count;
+ /**< Count of all operations dequeued */
+
+ uint64_t enqueue_err_count;
+ /**< Total error count on operations enqueued */
+ uint64_t dequeue_err_count;
+ /**< Total error count on operations dequeued */
+};
+
+#define RTE_COMPRESSDEV_NAME_MAX_LEN (64)
+/**< Max length of name of comp PMD */
+
+/**
+ * Get the device identifier for the named comp device.
+ *
+ * @param name device name to select the device structure.
+ *
+ * @return
+ * - Returns comp device identifier on success.
+ * - Return -1 on failure to find named comp device.
+ */
+extern int
+rte_compressdev_get_dev_id(const char *name);
+
+/**
+ * Get the comp device name given a device identifier.
+ *
+ * @param dev_id
+ * The identifier of the device
+ *
+ * @return
+ * - Returns comp device name.
+ * - Returns NULL if comp device is not present.
+ */
+extern const char *
+rte_compressdev_name_get(uint8_t dev_id);
+
+/**
+ * Get the total number of comp devices that have been successfully
+ * initialised.
+ *
+ * @return
+ * - The total number of usable comp devices.
+ */
+extern uint8_t
+rte_compressdev_count(void);
+
+/**
+ * Get number of comp device defined type.
+ *
+ * @param driver_id driver identifier.
+ *
+ * @return
+ * Returns number of comp device.
+ */
+extern uint8_t
+rte_compressdev_device_count_by_driver(uint8_t driver_id);
+
+/**
+ * Get number and identifiers of attached comp devices that
+ * use the same comp driver.
+ *
+ * @param driver_name driver name.
+ * @param devices output devices identifiers.
+ * @param nb_devices maximal number of devices.
+ *
+ * @return
+ * Returns number of attached comp device.
+ */
+uint8_t
+rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
+ uint8_t nb_devices);
+/*
+ * Return the NUMA socket to which a device is connected
+ *
+ * @param dev_id
+ * The identifier of the device
+ * @return
+ * The NUMA socket id to which the device is connected or
+ * a default of zero if the socket could not be determined.
+ * -1 if returned is the dev_id value is out of range.
+ */
+extern int
+rte_compressdev_socket_id(uint8_t dev_id);
+
+/** comp device configuration structure */
+struct rte_compressdev_config {
+ int socket_id;
+ /**< Socket on which to allocate resources */
+ uint16_t nb_queue_pairs;
+ /**< Total number of queue pairs to configure on a device */
+};
+
+/**
+ * Configure a device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id The identifier of the device to configure.
+ * @param config The comp device configuration structure.
+ *
+ * @return
+ * - 0: Success, device configured.
+ * - <0: Error code returned by the driver configuration function.
+ */
+extern int
+rte_compressdev_configure(uint8_t dev_id,
+ struct rte_compressdev_config *config);
+
+/**
+ * Start an device.
+ *
+ * The device start step is the last one and consists of setting the configured
+ * offload features and in starting the transmit and the receive units of the
+ * device.
+ * On success, all basic functions exported by the API (link status,
+ * receive/transmit, and so on) can be invoked.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @return
+ * - 0: Success, device started.
+ * - <0: Error code of the driver device start function.
+ */
+extern int
+rte_compressdev_start(uint8_t dev_id);
+
+/**
+ * Stop an device. The device can be restarted with a call to
+ * rte_compressdev_start()
+ *
+ * @param dev_id The identifier of the device.
+ */
+extern void
+rte_compressdev_stop(uint8_t dev_id);
+
+/**
+ * Close an device. The device cannot be restarted!
+ *
+ * @param dev_id The identifier of the device.
+ *
+ * @return
+ * - 0 on successfully closing device
+ * - <0 on failure to close device
+ */
+extern int
+rte_compressdev_close(uint8_t dev_id);
+
+/**
+ * Allocate and set up a receive queue pair for a device.
+ *
+ *
+ * @param dev_id The identifier of the device.
+ * @param queue_pair_id The index of the queue pairs to set up. The
+ * value must be in the range [0, nb_queue_pair
+ * - 1] previously supplied to
+ * rte_compressdev_configure().
+ * @param max_inflight_ops max number of ops which the qp will have to
+ * accommodate simultaneously.
+ * @param socket_id The *socket_id* argument is the socket
+ * identifier in case of NUMA. The value can be
+ * *SOCKET_ID_ANY* if there is no NUMA constraint
+ * for the DMA memory allocated for the receive
+ * queue pair.
+ * @return
+ * - 0: Success, queue pair correctly set up.
+ * - <0: Queue pair configuration failed
+ */
+extern int
+rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+ uint32_t max_inflight_ops, int socket_id);
+
+/**
+ * Start a specified queue pair of a device. It is used
+ * when deferred_start flag of the specified queue is true.
+ *
+ * @param dev_id The identifier of the device
+ * @param queue_pair_id The index of the queue pair to start. The value
+ * must be in the range [0, nb_queue_pair - 1]
+ * previously supplied to
+ * rte_comp_dev_configure().
+ * @return
+ * - 0: Success, the transmit queue is correctly set up.
+ * - -EINVAL: The dev_id or the queue_id out of range.
+ * - -ENOTSUP: The function not supported in PMD driver.
+ */
+extern int
+rte_compressdev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id);
+
+/**
+ * Stop specified queue pair of a device
+ *
+ * @param dev_id The identifier of the device
+ * @param queue_pair_id The index of the queue pair to stop. The value
+ * must be in the range [0, nb_queue_pair - 1]
+ * previously supplied to
+ * rte_compressdev_configure().
+ * @return
+ * - 0: Success, the transmit queue is correctly set up.
+ * - -EINVAL: The dev_id or the queue_id out of range.
+ * - -ENOTSUP: The function not supported in PMD driver.
+ */
+extern int
+rte_compressdev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id);
+
+/**
+ * Get the number of queue pairs on a specific comp device
+ *
+ * @param dev_id comp device identifier.
+ * @return
+ * - The number of configured queue pairs.
+ */
+extern uint16_t
+rte_compressdev_queue_pair_count(uint8_t dev_id);
+
+
+/**
+ * Retrieve the general I/O statistics of a device.
+ *
+ * @param dev_id The identifier of the device.
+ * @param stats A pointer to a structure of type
+ * *rte_compressdev_stats* to be filled with the
+ * values of device counters.
+ * @return
+ * - Zero if successful.
+ * - Non-zero otherwise.
+ */
+extern int
+rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats);
+
+/**
+ * Reset the general I/O statistics of a device.
+ *
+ * @param dev_id The identifier of the device.
+ */
+extern void
+rte_compressdev_stats_reset(uint8_t dev_id);
+
+/**
+ * Retrieve the contextual information of a device.
+ *
+ * @param dev_id The identifier of the device.
+ * @param dev_info A pointer to a structure of type
+ * *rte_compressdev_info* to be filled with the
+ * contextual information of the device.
+ *
+ * @note The capabilities field of dev_info is set to point to the first
+ * element of an array of struct rte_compressdev_capabilities. The element after
+ * the last valid element has it's op field set to
+ * RTE_COMP_ALGO_LIST_END.
+ */
+extern void
+rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info);
+
+
+/**
+ * Register a callback function for specific device id.
+ *
+ * @param dev_id Device id.
+ * @param event Event interested.
+ * @param cb_fn User supplied callback function to be called.
+ * @param cb_arg Pointer to the parameters for the registered
+ * callback.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+extern int
+rte_compressdev_callback_register(uint8_t dev_id,
+ enum rte_compressdev_event_type event,
+ rte_compressdev_cb_fn cb_fn, void *cb_arg);
+
+/**
+ * Unregister a callback function for specific device id.
+ *
+ * @param dev_id The device identifier.
+ * @param event Event interested.
+ * @param cb_fn User supplied callback function to be called.
+ * @param cb_arg Pointer to the parameters for the registered
+ * callback.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+extern int
+rte_compressdev_callback_unregister(uint8_t dev_id,
+ enum rte_compressdev_event_type event,
+ rte_compressdev_cb_fn cb_fn, void *cb_arg);
+
+
+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
+ struct rte_comp_op **ops, uint16_t nb_ops);
+/**< Dequeue processed packets from queue pair of a device. */
+
+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
+ struct rte_comp_op **ops, uint16_t nb_ops);
+/**< Enqueue packets for processing on queue pair of a device. */
+
+
+
+
+struct rte_compressdev_callback;
+
+/** Structure to keep track of registered callbacks */
+TAILQ_HEAD(rte_compressdev_cb_list, rte_compressdev_callback);
+
+/** The data structure associated with each comp device. */
+struct rte_compressdev {
+ dequeue_pkt_burst_t dequeue_burst;
+ /**< Pointer to PMD receive function. */
+ enqueue_pkt_burst_t enqueue_burst;
+ /**< Pointer to PMD transmit function. */
+
+ struct rte_compressdev_data *data;
+ /**< Pointer to device data */
+ struct rte_compressdev_ops *dev_ops;
+ /**< Functions exported by PMD */
+ uint64_t feature_flags;
+ /**< Supported features */
+ struct rte_device *device;
+ /**< Backing device */
+
+ uint8_t driver_id;
+ /**< comp driver identifier*/
+
+ struct rte_compressdev_cb_list link_intr_cbs;
+ /**< User application callback for interrupts if present */
+
+ __extension__
+ uint8_t attached : 1;
+ /**< Flag indicating the device is attached */
+} __rte_cache_aligned;
+
+
+/**
+ *
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_compressdev_data {
+ uint8_t dev_id;
+ /**< Device ID for this instance */
+ uint8_t socket_id;
+ /**< Socket ID where memory is allocated */
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ /**< Unique identifier name */
+
+ __extension__
+ uint8_t dev_started : 1;
+ /**< Device state: STARTED(1)/STOPPED(0) */
+
+ void **queue_pairs;
+ /**< Array of pointers to queue pairs. */
+ uint16_t nb_queue_pairs;
+ /**< Number of device queue pairs. */
+
+ void *dev_private;
+ /**< PMD-specific private data */
+} __rte_cache_aligned;
+
+extern struct rte_compressdev *rte_compressdevs;
+/**
+ *
+ * Dequeue a burst of processed compression operations from a queue on the comp
+ * device. The dequeued operation are stored in *rte_comp_op* structures
+ * whose pointers are supplied in the *ops* array.
+ *
+ * The rte_compressdev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_comp_op* data structures
+ * effectively supplied into the *ops* array.
+ *
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_compressdev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
+ *
+ * The rte_compressdev_dequeue_burst() function does not provide any error
+ * notification to avoid the corresponding overhead.
+ *
+ * @param dev_id The compression device identifier
+ * @param qp_id The index of the queue pair from which to
+ * retrieve processed operations. The value must be
+ * in the range [0, nb_queue_pair - 1] previously
+ * supplied to rte_compressdev_configure().
+ * @param ops The address of an array of pointers to
+ * *rte_comp_op* structures that must be
+ * large enough to store *nb_ops* pointers in it.
+ * @param nb_ops The maximum number of operations to dequeue.
+ *
+ * @return
+ * - The number of operations actually dequeued, which is the number
+ * of pointers to *rte_comp_op* structures effectively supplied to the
+ * *ops* array.
+ */
+static inline uint16_t
+rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct rte_compressdev *dev = &rte_compressdevs[dev_id];
+
+ nb_ops = (*dev->dequeue_burst)
+ (dev->data->queue_pairs[qp_id], ops, nb_ops);
+
+ return nb_ops;
+}
+
+/**
+ * Enqueue a burst of operations for processing on a compression device.
+ *
+ * The rte_compressdev_enqueue_burst() function is invoked to place
+ * comp operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
+ *
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_comp_op* structures.
+ *
+ * The rte_compressdev_enqueue_burst() function returns the number of
+ * operations it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
+ *
+ * @param dev_id The identifier of the device.
+ * @param qp_id The index of the queue pair on which operations
+ * are to be enqueued for processing. The value
+ * must be in the range [0, nb_queue_pairs - 1]
+ * previously supplied to
+ * *rte_compressdev_configure*.
+ * @param ops The address of an array of *nb_ops* pointers
+ * to *rte_comp_op* structures which contain
+ * the operations to be processed.
+ * @param nb_ops The number of operations to process.
+ *
+ * @return
+ * The number of operations actually enqueued on the device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * comp devices queue is full or if invalid parameters are specified in
+ * a *rte_comp_op*.
+ */
+static inline uint16_t
+rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct rte_compressdev *dev = &rte_compressdevs[dev_id];
+
+ return (*dev->enqueue_burst)(
+ dev->data->queue_pairs[qp_id], ops, nb_ops);
+}
+
+
+/** compressdev session */
+struct rte_comp_session {
+ __extension__ void *sess_private_data[0];
+ /**< Private session material */
+};
+
+
+/**
+ * Create symmetric comp session header (generic with no private data)
+ *
+ * @param mempool Symmetric session mempool to allocate session
+ * objects from
+ * @return
+ * - On success return pointer to sym-session
+ * - On failure returns NULL
+ */
+struct rte_comp_session *
+rte_compressdev_session_create(struct rte_mempool *mempool);
+
+/**
+ * Frees comp session header, after checking that all
+ * the device private data has been freed, returning it
+ * to its original mempool.
+ *
+ * @param sess Session header to be freed.
+ *
+ * @return
+ * - 0 if successful.
+ * - -EINVAL if session is NULL.
+ * - -EBUSY if not all device private data has been freed.
+ */
+int
+rte_compressdev_session_terminate(struct rte_comp_session *sess);
+
+/**
+ * Fill out private session data for the device, based on its device id.
+ * The same private session data is shared by all devices exposed by a driver
+ * so this only needs to be called for one device per driver-type.
+ * All private data stored must be shareable across devices, so read-only.
+ * A session initialised for more than one device (of different driver types)
+ * must used the same xform for each init.
+ *
+ * @param dev_id ID of device that we want the session to be used on
+ * @param sess Session where the private data will be attached to
+ * @param xforms comp transform operations to apply on flow
+ * processed with this session.
+ * @param mempool Mempool from where the private data should be allocated.
+ *
+ * @return
+ * - On success, zero.
+ * - -EINVAL if input parameters are invalid.
+ * - -ENOTSUP if comp device does not support the comp transform.
+ * - -ENOMEM if the private session could not be allocated.
+ */
+int
+rte_compressdev_session_init(uint8_t dev_id,
+ struct rte_comp_session *sess,
+ struct rte_comp_xform *xforms,
+ struct rte_mempool *mempool);
+
+/**
+ * Frees private data for the device id, based on its device type,
+ * returning it to its mempool.
+ *
+ * @param dev_id ID of device that uses the session.
+ * @param sess Session containing the reference to the private data
+ *
+ * @return
+ * - 0 if successful.
+ * - -EINVAL if device is invalid or session is NULL.
+ */
+int
+rte_compressdev_session_clear(uint8_t dev_id,
+ struct rte_comp_session *sess);
+
+/**
+ * Get the size of the header session, for all registered drivers.
+ *
+ * @return
+ * Size of the header session.
+ */
+unsigned int
+rte_compressdev_get_header_session_size(void);
+
+/**
+ * Get the size of the private session data for a device.
+ *
+ * @param dev_id The device identifier.
+ *
+ * @return
+ * - Size of the private data, if successful
+ * - 0 if device is invalid or does not have private session
+ */
+unsigned int
+rte_compressdev_get_private_session_size(uint8_t dev_id);
+
+/**
+ * Attach queue pair with sym session.
+ *
+ * @param dev_id Device to which the session will be attached.
+ * @param qp_id Queue pair to which the session will be attached
+ * @param session Session pointer previously allocated by
+ * *rte_compressdev_session_create*.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+rte_compressdev_queue_pair_attach_session(uint8_t dev_id, uint16_t qp_id,
+ struct rte_comp_session *session);
+
+/**
+ * Detach queue pair with comp session.
+ *
+ * @param dev_id Device to which the session is attached.
+ * @param qp_id Queue pair to which the session is attached.
+ * @param session Session pointer previously allocated by
+ * *rte_compressdev_session_create*.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+
+int
+rte_compressdev_queue_pair_detach_session(uint8_t dev_id, uint16_t qp_id,
+ struct rte_comp_session *session);
+/**
+ * This should alloc a stream from the device’s mempool and initialise it.
+ * This handle will be passed to the PMD with every op in the stream.
+ *
+ * @param dev_id The identifier of the device.
+ * @param session Session pointer previously allocated by
+ * *rte_compressdev_session_create*.
+ * @param stream ptr to where ptr to PMD's private stream data
+ * will be stored.
+ *
+ * @return
+ *
+ * TODO: Should qp_id also be added, with constraint that all ops in the same
+ * stream should be sent to the same qp?
+ *
+ */
+int
+rte_comp_stream_create(uint8_t dev_id, struct rte_comp_session *sess,
+ void ** stream);
+
+/**
+ * This should clear the stream and return it to the device’s mempool.
+ *
+ * @param dev_id The identifier of the device.
+ *
+ * @param stream ptr to PMD's private stream data
+ *
+ *
+ * @return
+ */
+int
+rte_comp_stream_free(uint8_t dev_id, void * stream);
+
+/**
+ * Provide driver identifier.
+ *
+ * @param name
+ * The pointer to a driver name.
+ * @return
+ * The driver type identifier or -1 if no driver found
+ */
+int rte_compressdev_driver_id_get(const char *name);
+
+/**
+ * Provide driver name.
+ *
+ * @param driver_id
+ * The driver identifier.
+ * @return
+ * The driver name or null if no driver found
+ */
+const char *rte_compressdev_driver_name_get(uint8_t driver_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_COMPRESSDEV_H_ */
new file mode 100644
@@ -0,0 +1,194 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+
+#include "rte_compressdev_pmd.h"
+
+/**
+ * Parse name from argument
+ */
+static int
+rte_compressdev_pmd_parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_compressdev_pmd_init_params *params = extra_args;
+ int n;
+
+ n = snprintf(params->name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s", value);
+ if (n >= RTE_COMPRESSDEV_NAME_MAX_LEN)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * Parse unsigned integer from argument
+ */
+static int
+rte_compressdev_pmd_parse_uint_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int i;
+ char *end;
+
+ errno = 0;
+ i = strtol(value, &end, 10);
+ if (*end != 0 || errno != 0 || i < 0)
+ return -EINVAL;
+
+ *((uint32_t *)extra_args) = i;
+ return 0;
+}
+
+int
+rte_compressdev_pmd_parse_input_args(
+ struct rte_compressdev_pmd_init_params *params,
+ const char *args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (args) {
+ kvlist = rte_kvargs_parse(args, compressdev_pmd_valid_params);
+ if (kvlist == NULL)
+ return -EINVAL;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG,
+ &rte_compressdev_pmd_parse_uint_arg,
+ ¶ms->max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG,
+ &rte_compressdev_pmd_parse_uint_arg,
+ ¶ms->socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_COMPRESSDEV_PMD_NAME_ARG,
+ &rte_compressdev_pmd_parse_name_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+struct rte_compressdev *
+rte_compressdev_pmd_create(const char *name,
+ struct rte_device *device,
+ struct rte_compressdev_pmd_init_params *params)
+{
+ struct rte_compressdev *compressdev;
+
+ if (params->name[0] != '\0') {
+ COMPDEV_LOG_INFO("[%s] User specified device name = %s\n",
+ device->driver->name, params->name);
+ name = params->name;
+ }
+
+ COMPDEV_LOG_INFO("[%s] - Creating compressdev %s\n",
+ device->driver->name, name);
+
+ COMPDEV_LOG_INFO(
+ "[%s] - Init parameters - name: %s, socket id: %d, max queue pairs: %u",
+ device->driver->name, name,
+ params->socket_id, params->max_nb_queue_pairs);
+
+ /* allocate device structure */
+ compressdev = rte_compressdev_pmd_allocate(name, params->socket_id);
+ if (compressdev == NULL) {
+ COMPDEV_LOG_ERR("[%s] Failed to allocate comp device for %s",
+ device->driver->name, name);
+ return NULL;
+ }
+
+ /* allocate private device structure */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ compressdev->data->dev_private =
+ rte_zmalloc_socket("compressdev device private",
+ params->private_data_size,
+ RTE_CACHE_LINE_SIZE,
+ params->socket_id);
+
+ if (compressdev->data->dev_private == NULL) {
+ COMPDEV_LOG_ERR(
+ "[%s] Cannot allocate memory for compressdev %s private data",
+ device->driver->name, name);
+
+ rte_compressdev_pmd_release_device(compressdev);
+ return NULL;
+ }
+ }
+
+ compressdev->device = device;
+
+ /* initialise user call-back tail queue */
+ TAILQ_INIT(&(compressdev->link_intr_cbs));
+
+ return compressdev;
+}
+
+int
+rte_compressdev_pmd_destroy(struct rte_compressdev *compressdev)
+{
+ int retval;
+
+ COMPDEV_LOG_INFO("[%s] Closing comp device %s",
+ compressdev->device->driver->name,
+ compressdev->device->name);
+
+ /* free comp device */
+ retval = rte_compressdev_pmd_release_device(compressdev);
+ if (retval)
+ return retval;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(compressdev->data->dev_private);
+
+
+ compressdev->device = NULL;
+ compressdev->data = NULL;
+
+ return 0;
+}
+
new file mode 100644
@@ -0,0 +1,533 @@
+/*-
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_COMPRESSDEV_PMD_H_
+#define _RTE_COMPRESSDEV_PMD_H_
+
+/** @file
+ * RTE comp PMD APIs
+ *
+ * @note
+ * These API are from comp PMD only and user applications should not call
+ * them directly.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_log.h>
+#include <rte_common.h>
+
+#include "rte_comp.h"
+#include "rte_compressdev.h"
+
+
+#define RTE_COMPRESSDEV_PMD_NAME_ARG ("name")
+#define RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG ("max_nb_queue_pairs")
+#define RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG ("socket_id")
+
+
+static const char * const compressdev_pmd_valid_params[] = {
+ RTE_COMPRESSDEV_PMD_NAME_ARG,
+ RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG,
+ RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG
+};
+
+/**
+ * @internal
+ * Initialisation parameters for comp devices
+ */
+struct rte_compressdev_pmd_init_params {
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ size_t private_data_size;
+ int socket_id;
+ unsigned int max_nb_queue_pairs;
+};
+
+/** Global structure used for maintaining state of allocated comp devices */
+struct rte_compressdev_global {
+ struct rte_compressdev *devs; /**< Device information array */
+ struct rte_compressdev_data *data[RTE_COMPRESS_MAX_DEVS];
+ /**< Device private data */
+ uint8_t nb_devs; /**< Number of devices found */
+ uint8_t max_devs; /**< Max number of devices */
+};
+
+/* compressdev driver, containing the driver ID */
+struct compressdev_driver {
+ TAILQ_ENTRY(compressdev_driver) next; /**< Next in list. */
+ const struct rte_driver *driver;
+ uint8_t id;
+};
+
+/** pointer to global comp devices data structure. */
+extern struct rte_compressdev_global *rte_compressdev_globals;
+
+/**
+ * Get the rte_compressdev structure device pointer for the device. Assumes a
+ * valid device index.
+ *
+ * @param dev_id Device ID value to select the device structure.
+ *
+ * @return
+ * - The rte_compressdev structure pointer for the given device ID.
+ */
+struct rte_compressdev *
+rte_compressdev_pmd_get_dev(uint8_t dev_id);
+
+/**
+ * Get the rte_compressdev structure device pointer for the named device.
+ *
+ * @param name device name to select the device structure.
+ *
+ * @return
+ * - The rte_compressdev structure pointer for the given device ID.
+ */
+struct rte_compressdev *
+rte_compressdev_pmd_get_named_dev(const char *name);
+
+/**
+ * Validate if the comp device index is valid attached comp device.
+ *
+ * @param dev_id comp device index.
+ *
+ * @return
+ * - If the device index is valid (1) or not (0).
+ */
+unsigned int
+rte_compressdev_pmd_is_valid_dev(uint8_t dev_id);
+
+/**
+ * The pool of rte_compressdev structures.
+ */
+extern struct rte_compressdev *rte_compressdevs;
+
+
+/**
+ * Definitions of all functions exported by a driver through the
+ * the generic structure of type *comp_dev_ops* supplied in the
+ * *rte_compressdev* structure associated with a device.
+ */
+
+/**
+ * Function used to configure device.
+ *
+ * @param dev comp device pointer
+ * config comp device configurations
+ *
+ * @return Returns 0 on success
+ */
+typedef int (*compressdev_configure_t)(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config);
+
+/**
+ * Function used to start a configured device.
+ *
+ * @param dev comp device pointer
+ *
+ * @return Returns 0 on success
+ */
+typedef int (*compressdev_start_t)(struct rte_compressdev *dev);
+
+/**
+ * Function used to stop a configured device.
+ *
+ * @param dev comp device pointer
+ */
+typedef void (*compressdev_stop_t)(struct rte_compressdev *dev);
+
+/**
+ * Function used to close a configured device.
+ *
+ * @param dev comp device pointer
+ * @return
+ * - 0 on success.
+ * - EAGAIN if can't close as device is busy
+ */
+typedef int (*compressdev_close_t)(struct rte_compressdev *dev);
+
+
+/**
+ * Function used to get statistics of a device.
+ *
+ * @param dev comp device pointer
+ * @param stats Pointer to comp device stats structure to populate
+ */
+typedef void (*compressdev_stats_get_t)(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats);
+
+
+/**
+ * Function used to reset statistics of a device.
+ *
+ * @param dev comp device pointer
+ */
+typedef void (*compressdev_stats_reset_t)(struct rte_compressdev *dev);
+
+
+/**
+ * Function used to get specific information of a device.
+ *
+ * @param dev comp device pointer
+ */
+typedef void (*compressdev_info_get_t)(struct rte_compressdev *dev,
+ struct rte_compressdev_info *dev_info);
+
+/**
+ * Start queue pair of a device.
+ *
+ * @param dev comp device pointer
+ * @param qp_id Queue Pair Index
+ *
+ * @return Returns 0 on success.
+ */
+typedef int (*compressdev_queue_pair_start_t)(struct rte_compressdev *dev,
+ uint16_t qp_id);
+
+/**
+ * Stop queue pair of a device.
+ *
+ * @param dev comp device pointer
+ * @param qp_id Queue Pair Index
+ *
+ * @return Returns 0 on success.
+ */
+typedef int (*compressdev_queue_pair_stop_t)(struct rte_compressdev *dev,
+ uint16_t qp_id);
+
+/**
+ * Setup a queue pair for a device.
+ *
+ * @param dev comp device pointer
+ * @param qp_id Queue Pair Index
+ * @param max_inflight_ops Max inflight ops which qp must accommodate
+ * @param socket_id Socket Index
+ *
+ * @return Returns 0 on success.
+ */
+typedef int (*compressdev_queue_pair_setup_t)(struct rte_compressdev *dev,
+ uint16_t qp_id, uint32_t max_inflight_ops, int socket_id);
+
+/**
+ * Release memory resources allocated by given queue pair.
+ *
+ * @param dev comp device pointer
+ * @param qp_id Queue Pair Index
+ *
+ * @return
+ * - 0 on success.
+ * - EAGAIN if can't close as device is busy
+ */
+typedef int (*compressdev_queue_pair_release_t)(struct rte_compressdev *dev,
+ uint16_t qp_id);
+
+/**
+ * Get number of available queue pairs of a device.
+ *
+ * @param dev comp device pointer
+ *
+ * @return Returns number of queue pairs on success.
+ */
+typedef uint32_t (*compressdev_queue_pair_count_t)(struct rte_compressdev *dev);
+
+/**
+ * Create a session mempool to allocate sessions from
+ *
+ * @param dev comp device pointer
+ * @param nb_objs number of sessions objects in mempool
+ * @param obj_cache l-core object cache size, see *rte_ring_create*
+ * @param socket_id Socket Id to allocate mempool on.
+ *
+ * @return
+ * - On success returns a pointer to a rte_mempool
+ * - On failure returns a NULL pointer
+ */
+typedef int (*compressdev_create_session_pool_t)(
+ struct rte_compressdev *dev, unsigned int nb_objs,
+ unsigned int obj_cache_size, int socket_id);
+
+
+/**
+ * Get the size of a compressdev session
+ *
+ * @param dev comp device pointer
+ *
+ * @return
+ * - On success returns the size of the session structure for device
+ * - On failure returns 0
+ */
+typedef unsigned int (*compressdev_get_session_private_size_t)(
+ struct rte_compressdev *dev);
+
+/**
+ * Configure a comp session on a device.
+ *
+ * @param dev comp device pointer
+ * @param xform Single or chain of comp xforms
+ * @param priv_sess Ptr to compressdev's private session structure
+ * @param mp Mempool where the private session is allocated
+ *
+ * @return
+ * - Returns 0 if private session structure have been created successfully.
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if comp device does not support the comp transform.
+ * - Returns -ENOMEM if the private session could not be allocated.
+ */
+typedef int (*compressdev_configure_session_t)(struct rte_compressdev *dev,
+ struct rte_comp_xform *xform,
+ struct rte_comp_session *session,
+ struct rte_mempool *mp);
+
+/**
+ * Free driver private session data.
+ *
+ * @param dev comp device pointer
+ * @param sess compressdev session structure
+ */
+typedef void (*compressdev_free_session_t)(struct rte_compressdev *dev,
+ struct rte_comp_session *sess);
+
+/**
+ * Optional API for drivers to attach sessions with queue pair.
+ * @param dev comp device pointer
+ * @param qp_id queue pair id for attaching session
+ * @param priv_sess Ptr to compressdev's private session structure
+ * @return
+ * - Return 0 on success
+ */
+typedef int (*compressdev_queue_pair_attach_session_t)(
+ struct rte_compressdev *dev,
+ uint16_t qp_id,
+ void *session_private);
+
+/**
+ * Optional API for drivers to detach sessions from queue pair.
+ * @param dev comp device pointer
+ * @param qp_id queue pair id for detaching session
+ * @param priv_sess Ptr to compressdev's private session structure
+ * @return
+ * - Return 0 on success
+ */
+typedef int (*compressdev_queue_pair_detach_session_t)(
+ struct rte_compressdev *dev,
+ uint16_t qp_id,
+ void *session_private);
+
+/** comp device operations function pointer table */
+struct rte_compressdev_ops {
+ compressdev_configure_t dev_configure; /**< Configure device. */
+ compressdev_start_t dev_start; /**< Start device. */
+ compressdev_stop_t dev_stop; /**< Stop device. */
+ compressdev_close_t dev_close; /**< Close device. */
+
+ compressdev_info_get_t dev_infos_get; /**< Get device info. */
+
+ compressdev_stats_get_t stats_get;
+ /**< Get device statistics. */
+ compressdev_stats_reset_t stats_reset;
+ /**< Reset device statistics. */
+
+ compressdev_queue_pair_setup_t queue_pair_setup;
+ /**< Set up a device queue pair. */
+ compressdev_queue_pair_release_t queue_pair_release;
+ /**< Release a queue pair. */
+ compressdev_queue_pair_start_t queue_pair_start;
+ /**< Start a queue pair. */
+ compressdev_queue_pair_stop_t queue_pair_stop;
+ /**< Stop a queue pair. */
+ compressdev_queue_pair_count_t queue_pair_count;
+ /**< Get count of the queue pairs. */
+
+ compressdev_get_session_private_size_t session_get_size;
+ /**< Return private session. */
+ compressdev_configure_session_t session_configure;
+ /**< Configure a comp session. */
+ compressdev_free_session_t session_clear;
+ /**< Clear a comp sessions private data. */
+ compressdev_queue_pair_attach_session_t qp_attach_session;
+ /**< Attach session to queue pair. */
+ compressdev_queue_pair_detach_session_t qp_detach_session;
+ /**< Detach session from queue pair. */
+};
+
+
+/**
+ * Function for internal use by dummy drivers primarily, e.g. ring-based
+ * driver.
+ * Allocates a new compressdev slot for an comp device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name Unique identifier name for each device
+ * @param socket_id Socket to allocate resources on.
+ * @return
+ * - Slot in the rte_dev_devices array for a new device;
+ */
+struct rte_compressdev *
+rte_compressdev_pmd_allocate(const char *name, int socket_id);
+
+/**
+ * Function for internal use by dummy drivers primarily, e.g. ring-based
+ * driver.
+ * Release the specified compressdev device.
+ *
+ * @param compressdev
+ * The *compressdev* pointer is the address of the *rte_compressdev* structure.
+ * @return
+ * - 0 on success, negative on error
+ */
+extern int
+rte_compressdev_pmd_release_device(struct rte_compressdev *compressdev);
+
+
+/**
+ * @internal
+ *
+ * PMD assist function to parse initialisation arguments for comp driver
+ * when creating a new comp PMD device instance.
+ *
+ * PMD driver should set default values for that PMD before calling function,
+ * these default values will be over-written with successfully parsed values
+ * from args string.
+ *
+ * @param params parsed PMD initialisation parameters
+ * @param args input argument string to parse
+ *
+ * @return
+ * - 0 on success
+ * - errno on failure
+ */
+int
+rte_compressdev_pmd_parse_input_args(
+ struct rte_compressdev_pmd_init_params *params,
+ const char *args);
+
+/**
+ * @internal
+ *
+ * PMD assist function to provide boiler plate code for comp driver to create
+ * and allocate resources for a new comp PMD device instance.
+ *
+ * @param name comp device name.
+ * @param device base device instance
+ * @param params PMD initialisation parameters
+ *
+ * @return
+ * - comp device instance on success
+ * - NULL on creation failure
+ */
+struct rte_compressdev *
+rte_compressdev_pmd_create(const char *name,
+ struct rte_device *device,
+ struct rte_compressdev_pmd_init_params *params);
+
+/**
+ * @internal
+ *
+ * PMD assist function to provide boiler plate code for comp driver to
+ * destroy and free resources associated with a comp PMD device instance.
+ *
+ * @param compressdev comp device handle.
+ *
+ * @return
+ * - 0 on success
+ * - errno on failure
+ */
+int
+rte_compressdev_pmd_destroy(struct rte_compressdev *compressdev);
+
+/**
+ * Executes all the user application registered callbacks for the specific
+ * device.
+ * *
+ * @param dev Pointer to compressdev struct
+ * @param event comp device interrupt event type.
+ *
+ * @return
+ * void
+ */
+void rte_compressdev_pmd_callback_process(struct rte_compressdev *dev,
+ enum rte_compressdev_event_type event);
+
+/**
+ * @internal
+ * Create unique device name
+ */
+int
+rte_compressdev_pmd_create_dev_name(char *name, const char *dev_name_prefix);
+
+/**
+ * @internal
+ * Allocate compressdev driver.
+ *
+ * @param comp_drv
+ * Pointer to compressdev_driver.
+ * @param drv
+ * Pointer to rte_driver.
+ *
+ * @return
+ * The driver type identifier
+ */
+uint8_t rte_compressdev_allocate_driver(struct compressdev_driver *comp_drv,
+ const struct rte_driver *drv);
+
+
+#define RTE_PMD_REGISTER_COMPRESSDEV_DRIVER(comp_drv, drv, driver_id)\
+RTE_INIT(init_ ##driver_id);\
+static void init_ ##driver_id(void)\
+{\
+ driver_id = rte_compressdev_allocate_driver(&comp_drv, &(drv).driver);\
+}
+
+static inline void *
+get_session_private_data(const struct rte_comp_session *sess,
+ uint8_t driver_id) {
+ return sess->sess_private_data[driver_id];
+}
+
+static inline void
+set_session_private_data(struct rte_comp_session *sess,
+ uint8_t driver_id, void *private_data)
+{
+ sess->sess_private_data[driver_id] = private_data;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_COMPRESSDEV_PMD_H_ */
new file mode 100644
@@ -0,0 +1,50 @@
+EXPERIMENTAL {
+ global:
+
+ rte_compressdevs;
+ rte_compressdev_allocate_driver;
+ rte_compressdev_callback_register;
+ rte_compressdev_callback_unregister;
+ rte_compressdev_close;
+ rte_compressdev_configure;
+ rte_compressdev_count;
+ rte_compressdev_device_count_by_driver;
+ rte_compressdev_devices_get;
+ rte_compressdev_driver_id_get;
+ rte_compressdev_driver_name_get;
+ rte_compressdev_get_dev_id;
+ rte_compressdev_get_feature_name;
+ rte_compressdev_get_header_session_size;
+ rte_compressdev_get_private_session_size;
+ rte_compressdev_info_get;
+ rte_compressdev_name_get;
+ rte_compressdev_pmd_allocate;
+ rte_compressdev_pmd_callback_process;
+ rte_compressdev_pmd_create;
+ rte_compressdev_pmd_create_dev_name;
+ rte_compressdev_pmd_destroy;
+ rte_compressdev_pmd_get_dev;
+ rte_compressdev_pmd_get_named_dev;
+ rte_compressdev_pmd_is_valid_dev;
+ rte_compressdev_pmd_parse_input_args;
+ rte_compressdev_pmd_release_device;
+ rte_compressdev_queue_pair_count;
+ rte_compressdev_queue_pair_setup;
+ rte_compressdev_queue_pair_start;
+ rte_compressdev_queue_pair_stop;
+ rte_compressdev_queue_pair_attach_comp_session;
+ rte_compressdev_queue_pair_detach_comp_session;
+ rte_compressdev_session_create;
+ rte_compressdev_session_init;
+ rte_compressdev_session_clear;
+ rte_compressdev_session_terminate;
+ rte_compressdev_socket_id;
+ rte_compressdev_start;
+ rte_compressdev_stats_get;
+ rte_compressdev_stats_reset;
+ rte_compressdev_stop;
+ rte_comp_op_pool_create;
+
+ local: *;
+};
+
@@ -88,6 +88,7 @@ struct rte_logs {
#define RTE_LOGTYPE_EFD 18 /**< Log related to EFD. */
#define RTE_LOGTYPE_EVENTDEV 19 /**< Log related to eventdev. */
#define RTE_LOGTYPE_GSO 20 /**< Log related to GSO. */
+#define RTE_LOGTYPE_COMPRESSDEV 21 /**< Log related to compressdev. */
/* these log types can be used in an application */
#define RTE_LOGTYPE_USER1 24 /**< User-defined log type 1. */
@@ -94,6 +94,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_MBUF) += -lrte_mbuf
_LDLIBS-$(CONFIG_RTE_LIBRTE_NET) += -lrte_net
_LDLIBS-$(CONFIG_RTE_LIBRTE_ETHER) += -lrte_ethdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += -lrte_cryptodev
+_LDLIBS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += -lrte_compressdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_SECURITY) += -lrte_security
_LDLIBS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += -lrte_eventdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += -lrte_mempool