@@ -12,7 +12,6 @@
/**
* @file rte_ring_hts.h
- * @b EXPERIMENTAL: this API may change without prior notice
* It is not recommended to include this file directly.
* Please include <rte_ring.h> instead.
*
@@ -50,7 +49,6 @@ extern "C" {
* @return
* The number of objects enqueued, either 0 or n
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mp_hts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
unsigned int esize, unsigned int n, unsigned int *free_space)
@@ -78,7 +76,6 @@ rte_ring_mp_hts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
* @return
* The number of objects dequeued, either 0 or n
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mc_hts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
unsigned int esize, unsigned int n, unsigned int *available)
@@ -106,7 +103,6 @@ rte_ring_mc_hts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
* @return
* - n: Actual number of objects enqueued.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mp_hts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
unsigned int esize, unsigned int n, unsigned int *free_space)
@@ -136,7 +132,6 @@ rte_ring_mp_hts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mc_hts_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
unsigned int esize, unsigned int n, unsigned int *available)
@@ -160,7 +155,6 @@ rte_ring_mc_hts_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
* @return
* The number of objects enqueued, either 0 or n
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mp_hts_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
@@ -184,7 +178,6 @@ rte_ring_mp_hts_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @return
* The number of objects dequeued, either 0 or n
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mc_hts_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
@@ -208,7 +201,6 @@ rte_ring_mc_hts_dequeue_bulk(struct rte_ring *r, void **obj_table,
* @return
* - n: Actual number of objects enqueued.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mp_hts_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
@@ -234,7 +226,6 @@ rte_ring_mp_hts_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mc_hts_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
@@ -12,7 +12,6 @@
/**
* @file
- * @b EXPERIMENTAL: this API may change without prior notice
* It is not recommended to include this file directly.
* Please include <rte_ring_elem.h> instead.
*
@@ -67,7 +66,6 @@ extern "C" {
* @return
* The number of objects that can be enqueued, either 0 or n
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_enqueue_bulk_elem_start(struct rte_ring *r, unsigned int n,
unsigned int *free_space)
@@ -93,7 +91,6 @@ rte_ring_enqueue_bulk_elem_start(struct rte_ring *r, unsigned int n,
* @return
* The number of objects that can be enqueued, either 0 or n
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_enqueue_bulk_start(struct rte_ring *r, unsigned int n,
unsigned int *free_space)
@@ -118,7 +115,6 @@ rte_ring_enqueue_bulk_start(struct rte_ring *r, unsigned int n,
* @return
* Actual number of objects that can be enqueued.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_enqueue_burst_elem_start(struct rte_ring *r, unsigned int n,
unsigned int *free_space)
@@ -144,7 +140,6 @@ rte_ring_enqueue_burst_elem_start(struct rte_ring *r, unsigned int n,
* @return
* Actual number of objects that can be enqueued.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_enqueue_burst_start(struct rte_ring *r, unsigned int n,
unsigned int *free_space)
@@ -168,7 +163,6 @@ rte_ring_enqueue_burst_start(struct rte_ring *r, unsigned int n,
* @param n
* The number of objects to add to the ring from the obj_table.
*/
-__rte_experimental
static __rte_always_inline void
rte_ring_enqueue_elem_finish(struct rte_ring *r, const void *obj_table,
unsigned int esize, unsigned int n)
@@ -208,7 +202,6 @@ rte_ring_enqueue_elem_finish(struct rte_ring *r, const void *obj_table,
* @param n
* The number of objects to add to the ring from the obj_table.
*/
-__rte_experimental
static __rte_always_inline void
rte_ring_enqueue_finish(struct rte_ring *r, void * const *obj_table,
unsigned int n)
@@ -237,7 +230,6 @@ rte_ring_enqueue_finish(struct rte_ring *r, void * const *obj_table,
* @return
* The number of objects dequeued, either 0 or n.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_dequeue_bulk_elem_start(struct rte_ring *r, void *obj_table,
unsigned int esize, unsigned int n, unsigned int *available)
@@ -263,7 +255,6 @@ rte_ring_dequeue_bulk_elem_start(struct rte_ring *r, void *obj_table,
* @return
* Actual number of objects dequeued.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_dequeue_bulk_start(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
@@ -293,7 +284,6 @@ rte_ring_dequeue_bulk_start(struct rte_ring *r, void **obj_table,
* @return
* The actual number of objects dequeued.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_dequeue_burst_elem_start(struct rte_ring *r, void *obj_table,
unsigned int esize, unsigned int n, unsigned int *available)
@@ -319,7 +309,6 @@ rte_ring_dequeue_burst_elem_start(struct rte_ring *r, void *obj_table,
* @return
* The actual number of objects dequeued.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_dequeue_burst_start(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
@@ -338,7 +327,6 @@ rte_ring_dequeue_burst_start(struct rte_ring *r, void **obj_table,
* @param n
* The number of objects to remove from the ring.
*/
-__rte_experimental
static __rte_always_inline void
rte_ring_dequeue_elem_finish(struct rte_ring *r, unsigned int n)
{
@@ -371,7 +359,6 @@ rte_ring_dequeue_elem_finish(struct rte_ring *r, unsigned int n)
* @param n
* The number of objects to remove from the ring.
*/
-__rte_experimental
static __rte_always_inline void
rte_ring_dequeue_finish(struct rte_ring *r, unsigned int n)
{
@@ -12,7 +12,6 @@
/**
* @file
- * @b EXPERIMENTAL: this API may change without prior notice
* It is not recommended to include this file directly.
* Please include <rte_ring_elem.h> instead.
*
@@ -177,7 +176,6 @@ __rte_ring_do_enqueue_zc_elem_start(struct rte_ring *r, unsigned int esize,
* @return
* The number of objects that can be enqueued, either 0 or n
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_enqueue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize,
unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
@@ -208,7 +206,6 @@ rte_ring_enqueue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize,
* @return
* The number of objects that can be enqueued, either 0 or n
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_enqueue_zc_bulk_start(struct rte_ring *r, unsigned int n,
struct rte_ring_zc_data *zcd, unsigned int *free_space)
@@ -240,7 +237,6 @@ rte_ring_enqueue_zc_bulk_start(struct rte_ring *r, unsigned int n,
* @return
* The number of objects that can be enqueued, either 0 or n
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_enqueue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize,
unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
@@ -271,7 +267,6 @@ rte_ring_enqueue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize,
* @return
* The number of objects that can be enqueued, either 0 or n.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_enqueue_zc_burst_start(struct rte_ring *r, unsigned int n,
struct rte_ring_zc_data *zcd, unsigned int *free_space)
@@ -290,7 +285,6 @@ rte_ring_enqueue_zc_burst_start(struct rte_ring *r, unsigned int n,
* @param n
* The number of objects to add to the ring.
*/
-__rte_experimental
static __rte_always_inline void
rte_ring_enqueue_zc_elem_finish(struct rte_ring *r, unsigned int n)
{
@@ -323,7 +317,6 @@ rte_ring_enqueue_zc_elem_finish(struct rte_ring *r, unsigned int n)
* @param n
* The number of pointers to objects to add to the ring.
*/
-__rte_experimental
static __rte_always_inline void
rte_ring_enqueue_zc_finish(struct rte_ring *r, unsigned int n)
{
@@ -390,7 +383,6 @@ __rte_ring_do_dequeue_zc_elem_start(struct rte_ring *r,
* @return
* The number of objects that can be dequeued, either 0 or n.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_dequeue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize,
unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
@@ -420,7 +412,6 @@ rte_ring_dequeue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize,
* @return
* The number of objects that can be dequeued, either 0 or n.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_dequeue_zc_bulk_start(struct rte_ring *r, unsigned int n,
struct rte_ring_zc_data *zcd, unsigned int *available)
@@ -453,7 +444,6 @@ rte_ring_dequeue_zc_bulk_start(struct rte_ring *r, unsigned int n,
* @return
* The number of objects that can be dequeued, either 0 or n.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_dequeue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize,
unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
@@ -483,7 +473,6 @@ rte_ring_dequeue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize,
* @return
* The number of objects that can be dequeued, either 0 or n.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_dequeue_zc_burst_start(struct rte_ring *r, unsigned int n,
struct rte_ring_zc_data *zcd, unsigned int *available)
@@ -502,7 +491,6 @@ rte_ring_dequeue_zc_burst_start(struct rte_ring *r, unsigned int n,
* @param n
* The number of objects to remove from the ring.
*/
-__rte_experimental
static __rte_always_inline void
rte_ring_dequeue_zc_elem_finish(struct rte_ring *r, unsigned int n)
{
@@ -535,7 +523,6 @@ rte_ring_dequeue_zc_elem_finish(struct rte_ring *r, unsigned int n)
* @param n
* The number of objects to remove from the ring.
*/
-__rte_experimental
static __rte_always_inline void
rte_ring_dequeue_zc_finish(struct rte_ring *r, unsigned int n)
{
@@ -12,7 +12,6 @@
/**
* @file rte_ring_rts.h
- * @b EXPERIMENTAL: this API may change without prior notice
* It is not recommended to include this file directly.
* Please include <rte_ring.h> instead.
*
@@ -77,7 +76,6 @@ extern "C" {
* @return
* The number of objects enqueued, either 0 or n
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
unsigned int esize, unsigned int n, unsigned int *free_space)
@@ -105,7 +103,6 @@ rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
* @return
* The number of objects dequeued, either 0 or n
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
unsigned int esize, unsigned int n, unsigned int *available)
@@ -133,7 +130,6 @@ rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
* @return
* - n: Actual number of objects enqueued.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
unsigned int esize, unsigned int n, unsigned int *free_space)
@@ -163,7 +159,6 @@ rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
unsigned int esize, unsigned int n, unsigned int *available)
@@ -187,7 +182,6 @@ rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
* @return
* The number of objects enqueued, either 0 or n
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mp_rts_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
@@ -211,7 +205,6 @@ rte_ring_mp_rts_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @return
* The number of objects dequeued, either 0 or n
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mc_rts_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
@@ -235,7 +228,6 @@ rte_ring_mc_rts_dequeue_bulk(struct rte_ring *r, void **obj_table,
* @return
* - n: Actual number of objects enqueued.
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mp_rts_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
@@ -261,7 +253,6 @@ rte_ring_mp_rts_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
-__rte_experimental
static __rte_always_inline unsigned int
rte_ring_mc_rts_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
@@ -279,7 +270,6 @@ rte_ring_mc_rts_dequeue_burst(struct rte_ring *r, void **obj_table,
* Producer HTD value, if producer is set in appropriate sync mode,
* or UINT32_MAX otherwise.
*/
-__rte_experimental
static inline uint32_t
rte_ring_get_prod_htd_max(const struct rte_ring *r)
{
@@ -299,7 +289,6 @@ rte_ring_get_prod_htd_max(const struct rte_ring *r)
* @return
* Zero on success, or negative error code otherwise.
*/
-__rte_experimental
static inline int
rte_ring_set_prod_htd_max(struct rte_ring *r, uint32_t v)
{
@@ -319,7 +308,6 @@ rte_ring_set_prod_htd_max(struct rte_ring *r, uint32_t v)
* Consumer HTD value, if consumer is set in appropriate sync mode,
* or UINT32_MAX otherwise.
*/
-__rte_experimental
static inline uint32_t
rte_ring_get_cons_htd_max(const struct rte_ring *r)
{
@@ -339,7 +327,6 @@ rte_ring_get_cons_htd_max(const struct rte_ring *r)
* @return
* Zero on success, or negative error code otherwise.
*/
-__rte_experimental
static inline int
rte_ring_set_cons_htd_max(struct rte_ring *r, uint32_t v)
{