[v2] mempool/ring: add support for new ring sync modes
Checks
Commit Message
v2:
- update Release Notes (as per comments)
Two new sync modes were introduced into rte_ring:
relaxed tail sync (RTS) and head/tail sync (HTS).
This change provides user with ability to select these
modes for ring based mempool via mempool ops API.
Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Gage Eads <gage.eads@intel.com>
---
doc/guides/rel_notes/release_20_08.rst | 6 ++
drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
2 files changed, 94 insertions(+), 9 deletions(-)
Comments
Hi Konstantin,
On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> v2:
> - update Release Notes (as per comments)
>
> Two new sync modes were introduced into rte_ring:
> relaxed tail sync (RTS) and head/tail sync (HTS).
> This change provides user with ability to select these
> modes for ring based mempool via mempool ops API.
>
> Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> Acked-by: Gage Eads <gage.eads@intel.com>
> ---
> doc/guides/rel_notes/release_20_08.rst | 6 ++
> drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> 2 files changed, 94 insertions(+), 9 deletions(-)
>
> diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> index eaaf11c37..7bdcf3aac 100644
> --- a/doc/guides/rel_notes/release_20_08.rst
> +++ b/doc/guides/rel_notes/release_20_08.rst
> @@ -84,6 +84,12 @@ New Features
> * Dump ``rte_flow`` memory consumption.
> * Measure packet per second forwarding.
>
> +* **Added support for new sync modes into mempool ring driver.**
> +
> + Added ability to select new ring synchronisation modes:
> + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> + via mempool ops API.
> +
>
> Removed Items
> -------------
> diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> index bc123fc52..15ec7dee7 100644
> --- a/drivers/mempool/ring/rte_mempool_ring.c
> +++ b/drivers/mempool/ring/rte_mempool_ring.c
> @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> }
>
> +static int
> +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> + unsigned int n)
> +{
> + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> +}
> +
> +static int
> +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> + unsigned int n)
> +{
> + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> +}
> +
> static int
> common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> {
> @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> }
>
> +static int
> +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> +{
> + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> +}
> +
> +static int
> +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> +{
> + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> +}
> +
> static unsigned
> common_ring_get_count(const struct rte_mempool *mp)
> {
> return rte_ring_count(mp->pool_data);
> }
>
> -
> static int
> -common_ring_alloc(struct rte_mempool *mp)
> +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> {
> - int rg_flags = 0, ret;
> + int ret;
> char rg_name[RTE_RING_NAMESIZE];
> struct rte_ring *r;
>
> @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> return -rte_errno;
> }
>
> - /* ring flags */
> - if (mp->flags & MEMPOOL_F_SP_PUT)
> - rg_flags |= RING_F_SP_ENQ;
> - if (mp->flags & MEMPOOL_F_SC_GET)
> - rg_flags |= RING_F_SC_DEQ;
> -
> /*
> * Allocate the ring that will be used to store objects.
> * Ring functions will return appropriate errors if we are
> @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> return 0;
> }
>
> +static int
> +common_ring_alloc(struct rte_mempool *mp)
> +{
> + uint32_t rg_flags;
> +
> + rg_flags = 0;
Maybe it could go on the same line
> +
> + /* ring flags */
Not sure we need to keep this comment
> + if (mp->flags & MEMPOOL_F_SP_PUT)
> + rg_flags |= RING_F_SP_ENQ;
> + if (mp->flags & MEMPOOL_F_SC_GET)
> + rg_flags |= RING_F_SC_DEQ;
> +
> + return ring_alloc(mp, rg_flags);
> +}
> +
> +static int
> +rts_ring_alloc(struct rte_mempool *mp)
> +{
> + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> + return -EINVAL;
Why do we need this? It is a problem to allow sc/sp in this mode (even
if it's not optimal)?
> +
> + return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
> +}
> +
> +static int
> +hts_ring_alloc(struct rte_mempool *mp)
> +{
> + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> + return -EINVAL;
> +
> + return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
> +}
> +
> static void
> common_ring_free(struct rte_mempool *mp)
> {
> @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
> .get_count = common_ring_get_count,
> };
>
> +/* ops for mempool with ring in MT_RTS sync mode */
> +static const struct rte_mempool_ops ops_mt_rts = {
> + .name = "ring_mt_rts",
> + .alloc = rts_ring_alloc,
> + .free = common_ring_free,
> + .enqueue = rts_ring_mp_enqueue,
> + .dequeue = rts_ring_mc_dequeue,
> + .get_count = common_ring_get_count,
> +};
> +
> +/* ops for mempool with ring in MT_HTS sync mode */
> +static const struct rte_mempool_ops ops_mt_hts = {
> + .name = "ring_mt_hts",
> + .alloc = hts_ring_alloc,
> + .free = common_ring_free,
> + .enqueue = hts_ring_mp_enqueue,
> + .dequeue = hts_ring_mc_dequeue,
> + .get_count = common_ring_get_count,
> +};
> +
> MEMPOOL_REGISTER_OPS(ops_mp_mc);
> MEMPOOL_REGISTER_OPS(ops_sp_sc);
> MEMPOOL_REGISTER_OPS(ops_mp_sc);
> MEMPOOL_REGISTER_OPS(ops_sp_mc);
> +MEMPOOL_REGISTER_OPS(ops_mt_rts);
> +MEMPOOL_REGISTER_OPS(ops_mt_hts);
Not really related to your patch, but I think we need a function to
dump the name of available mempool ops. We could even add a description.
The problem we have is that a user does not know on which criteria is
should use a driver or another (except for platform drivers).
Olivier
Hi Olivier,
> Hi Konstantin,
>
> On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > v2:
> > - update Release Notes (as per comments)
> >
> > Two new sync modes were introduced into rte_ring:
> > relaxed tail sync (RTS) and head/tail sync (HTS).
> > This change provides user with ability to select these
> > modes for ring based mempool via mempool ops API.
> >
> > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > Acked-by: Gage Eads <gage.eads@intel.com>
> > ---
> > doc/guides/rel_notes/release_20_08.rst | 6 ++
> > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > 2 files changed, 94 insertions(+), 9 deletions(-)
> >
> > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > index eaaf11c37..7bdcf3aac 100644
> > --- a/doc/guides/rel_notes/release_20_08.rst
> > +++ b/doc/guides/rel_notes/release_20_08.rst
> > @@ -84,6 +84,12 @@ New Features
> > * Dump ``rte_flow`` memory consumption.
> > * Measure packet per second forwarding.
> >
> > +* **Added support for new sync modes into mempool ring driver.**
> > +
> > + Added ability to select new ring synchronisation modes:
> > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > + via mempool ops API.
> > +
> >
> > Removed Items
> > -------------
> > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > index bc123fc52..15ec7dee7 100644
> > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > }
> >
> > +static int
> > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > + unsigned int n)
> > +{
> > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > +}
> > +
> > +static int
> > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > + unsigned int n)
> > +{
> > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > +}
> > +
> > static int
> > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > {
> > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > }
> >
> > +static int
> > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > +{
> > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > +}
> > +
> > +static int
> > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > +{
> > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > +}
> > +
> > static unsigned
> > common_ring_get_count(const struct rte_mempool *mp)
> > {
> > return rte_ring_count(mp->pool_data);
> > }
> >
> > -
> > static int
> > -common_ring_alloc(struct rte_mempool *mp)
> > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > {
> > - int rg_flags = 0, ret;
> > + int ret;
> > char rg_name[RTE_RING_NAMESIZE];
> > struct rte_ring *r;
> >
> > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > return -rte_errno;
> > }
> >
> > - /* ring flags */
> > - if (mp->flags & MEMPOOL_F_SP_PUT)
> > - rg_flags |= RING_F_SP_ENQ;
> > - if (mp->flags & MEMPOOL_F_SC_GET)
> > - rg_flags |= RING_F_SC_DEQ;
> > -
> > /*
> > * Allocate the ring that will be used to store objects.
> > * Ring functions will return appropriate errors if we are
> > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > return 0;
> > }
> >
> > +static int
> > +common_ring_alloc(struct rte_mempool *mp)
> > +{
> > + uint32_t rg_flags;
> > +
> > + rg_flags = 0;
>
> Maybe it could go on the same line
>
> > +
> > + /* ring flags */
>
> Not sure we need to keep this comment
>
> > + if (mp->flags & MEMPOOL_F_SP_PUT)
> > + rg_flags |= RING_F_SP_ENQ;
> > + if (mp->flags & MEMPOOL_F_SC_GET)
> > + rg_flags |= RING_F_SC_DEQ;
> > +
> > + return ring_alloc(mp, rg_flags);
> > +}
> > +
> > +static int
> > +rts_ring_alloc(struct rte_mempool *mp)
> > +{
> > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > + return -EINVAL;
>
> Why do we need this? It is a problem to allow sc/sp in this mode (even
> if it's not optimal)?
These new sync modes (RTS, HTS) are for MT.
For SP/SC - there is simply no point to use MT sync modes.
I suppose there are few choices:
1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
and create actual ring with ST sync mode for prod/cons.
2. Report an error.
3. Silently ignore these flags.
As I can see for "ring_mp_mc" ops, we doing #1,
while for "stack" we are doing #3.
For RTS/HTS I chosoe #2, as it seems cleaner to me.
Any thoughts from your side what preferable behaviour should be?
>
> > +
> > + return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
> > +}
> > +
> > +static int
> > +hts_ring_alloc(struct rte_mempool *mp)
> > +{
> > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > + return -EINVAL;
> > +
> > + return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
> > +}
> > +
> > static void
> > common_ring_free(struct rte_mempool *mp)
> > {
> > @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
> > .get_count = common_ring_get_count,
> > };
> >
> > +/* ops for mempool with ring in MT_RTS sync mode */
> > +static const struct rte_mempool_ops ops_mt_rts = {
> > + .name = "ring_mt_rts",
> > + .alloc = rts_ring_alloc,
> > + .free = common_ring_free,
> > + .enqueue = rts_ring_mp_enqueue,
> > + .dequeue = rts_ring_mc_dequeue,
> > + .get_count = common_ring_get_count,
> > +};
> > +
> > +/* ops for mempool with ring in MT_HTS sync mode */
> > +static const struct rte_mempool_ops ops_mt_hts = {
> > + .name = "ring_mt_hts",
> > + .alloc = hts_ring_alloc,
> > + .free = common_ring_free,
> > + .enqueue = hts_ring_mp_enqueue,
> > + .dequeue = hts_ring_mc_dequeue,
> > + .get_count = common_ring_get_count,
> > +};
> > +
> > MEMPOOL_REGISTER_OPS(ops_mp_mc);
> > MEMPOOL_REGISTER_OPS(ops_sp_sc);
> > MEMPOOL_REGISTER_OPS(ops_mp_sc);
> > MEMPOOL_REGISTER_OPS(ops_sp_mc);
> > +MEMPOOL_REGISTER_OPS(ops_mt_rts);
> > +MEMPOOL_REGISTER_OPS(ops_mt_hts);
> Not really related to your patch, but I think we need a function to
> dump the name of available mempool ops. We could even add a description.
> The problem we have is that a user does not know on which criteria is
> should use a driver or another (except for platform drivers).
Agree, it will be usefull.
Though it probably subject for a separate patch.
Hi Konstantin,
On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> Hi Olivier,
>
> > Hi Konstantin,
> >
> > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > v2:
> > > - update Release Notes (as per comments)
> > >
> > > Two new sync modes were introduced into rte_ring:
> > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > This change provides user with ability to select these
> > > modes for ring based mempool via mempool ops API.
> > >
> > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > ---
> > > doc/guides/rel_notes/release_20_08.rst | 6 ++
> > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > 2 files changed, 94 insertions(+), 9 deletions(-)
> > >
> > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > index eaaf11c37..7bdcf3aac 100644
> > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > @@ -84,6 +84,12 @@ New Features
> > > * Dump ``rte_flow`` memory consumption.
> > > * Measure packet per second forwarding.
> > >
> > > +* **Added support for new sync modes into mempool ring driver.**
> > > +
> > > + Added ability to select new ring synchronisation modes:
> > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > + via mempool ops API.
> > > +
> > >
> > > Removed Items
> > > -------------
> > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > index bc123fc52..15ec7dee7 100644
> > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > }
> > >
> > > +static int
> > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > + unsigned int n)
> > > +{
> > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > +}
> > > +
> > > +static int
> > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > + unsigned int n)
> > > +{
> > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > +}
> > > +
> > > static int
> > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > {
> > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > }
> > >
> > > +static int
> > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > +{
> > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > +}
> > > +
> > > +static int
> > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > +{
> > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > +}
> > > +
> > > static unsigned
> > > common_ring_get_count(const struct rte_mempool *mp)
> > > {
> > > return rte_ring_count(mp->pool_data);
> > > }
> > >
> > > -
> > > static int
> > > -common_ring_alloc(struct rte_mempool *mp)
> > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > {
> > > - int rg_flags = 0, ret;
> > > + int ret;
> > > char rg_name[RTE_RING_NAMESIZE];
> > > struct rte_ring *r;
> > >
> > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > return -rte_errno;
> > > }
> > >
> > > - /* ring flags */
> > > - if (mp->flags & MEMPOOL_F_SP_PUT)
> > > - rg_flags |= RING_F_SP_ENQ;
> > > - if (mp->flags & MEMPOOL_F_SC_GET)
> > > - rg_flags |= RING_F_SC_DEQ;
> > > -
> > > /*
> > > * Allocate the ring that will be used to store objects.
> > > * Ring functions will return appropriate errors if we are
> > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > return 0;
> > > }
> > >
> > > +static int
> > > +common_ring_alloc(struct rte_mempool *mp)
> > > +{
> > > + uint32_t rg_flags;
> > > +
> > > + rg_flags = 0;
> >
> > Maybe it could go on the same line
> >
> > > +
> > > + /* ring flags */
> >
> > Not sure we need to keep this comment
> >
> > > + if (mp->flags & MEMPOOL_F_SP_PUT)
> > > + rg_flags |= RING_F_SP_ENQ;
> > > + if (mp->flags & MEMPOOL_F_SC_GET)
> > > + rg_flags |= RING_F_SC_DEQ;
> > > +
> > > + return ring_alloc(mp, rg_flags);
> > > +}
> > > +
> > > +static int
> > > +rts_ring_alloc(struct rte_mempool *mp)
> > > +{
> > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > + return -EINVAL;
> >
> > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > if it's not optimal)?
>
> These new sync modes (RTS, HTS) are for MT.
> For SP/SC - there is simply no point to use MT sync modes.
> I suppose there are few choices:
> 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> and create actual ring with ST sync mode for prod/cons.
> 2. Report an error.
> 3. Silently ignore these flags.
>
> As I can see for "ring_mp_mc" ops, we doing #1,
> while for "stack" we are doing #3.
> For RTS/HTS I chosoe #2, as it seems cleaner to me.
> Any thoughts from your side what preferable behaviour should be?
The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
ring_mp_mc). I don't think we should look at it when using specific ops.
So I'll tend to say 3. is the correct thing to do.
>
> >
> > > +
> > > + return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
> > > +}
> > > +
> > > +static int
> > > +hts_ring_alloc(struct rte_mempool *mp)
> > > +{
> > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > + return -EINVAL;
> > > +
> > > + return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
> > > +}
> > > +
> > > static void
> > > common_ring_free(struct rte_mempool *mp)
> > > {
> > > @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
> > > .get_count = common_ring_get_count,
> > > };
> > >
> > > +/* ops for mempool with ring in MT_RTS sync mode */
> > > +static const struct rte_mempool_ops ops_mt_rts = {
> > > + .name = "ring_mt_rts",
> > > + .alloc = rts_ring_alloc,
> > > + .free = common_ring_free,
> > > + .enqueue = rts_ring_mp_enqueue,
> > > + .dequeue = rts_ring_mc_dequeue,
> > > + .get_count = common_ring_get_count,
> > > +};
> > > +
> > > +/* ops for mempool with ring in MT_HTS sync mode */
> > > +static const struct rte_mempool_ops ops_mt_hts = {
> > > + .name = "ring_mt_hts",
> > > + .alloc = hts_ring_alloc,
> > > + .free = common_ring_free,
> > > + .enqueue = hts_ring_mp_enqueue,
> > > + .dequeue = hts_ring_mc_dequeue,
> > > + .get_count = common_ring_get_count,
> > > +};
> > > +
> > > MEMPOOL_REGISTER_OPS(ops_mp_mc);
> > > MEMPOOL_REGISTER_OPS(ops_sp_sc);
> > > MEMPOOL_REGISTER_OPS(ops_mp_sc);
> > > MEMPOOL_REGISTER_OPS(ops_sp_mc);
> > > +MEMPOOL_REGISTER_OPS(ops_mt_rts);
> > > +MEMPOOL_REGISTER_OPS(ops_mt_hts);
>
> > Not really related to your patch, but I think we need a function to
> > dump the name of available mempool ops. We could even add a description.
> > The problem we have is that a user does not know on which criteria is
> > should use a driver or another (except for platform drivers).
>
> Agree, it will be usefull.
> Though it probably subject for a separate patch.
>
Hi Olivier,
> Hi Konstantin,
>
> On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > Hi Olivier,
> >
> > > Hi Konstantin,
> > >
> > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > > v2:
> > > > - update Release Notes (as per comments)
> > > >
> > > > Two new sync modes were introduced into rte_ring:
> > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > This change provides user with ability to select these
> > > > modes for ring based mempool via mempool ops API.
> > > >
> > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > > ---
> > > > doc/guides/rel_notes/release_20_08.rst | 6 ++
> > > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > > 2 files changed, 94 insertions(+), 9 deletions(-)
> > > >
> > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > > index eaaf11c37..7bdcf3aac 100644
> > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > @@ -84,6 +84,12 @@ New Features
> > > > * Dump ``rte_flow`` memory consumption.
> > > > * Measure packet per second forwarding.
> > > >
> > > > +* **Added support for new sync modes into mempool ring driver.**
> > > > +
> > > > + Added ability to select new ring synchronisation modes:
> > > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > > + via mempool ops API.
> > > > +
> > > >
> > > > Removed Items
> > > > -------------
> > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > > index bc123fc52..15ec7dee7 100644
> > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > }
> > > >
> > > > +static int
> > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > + unsigned int n)
> > > > +{
> > > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > +}
> > > > +
> > > > +static int
> > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > + unsigned int n)
> > > > +{
> > > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > +}
> > > > +
> > > > static int
> > > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > {
> > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > }
> > > >
> > > > +static int
> > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > +{
> > > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > +}
> > > > +
> > > > +static int
> > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > +{
> > > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > +}
> > > > +
> > > > static unsigned
> > > > common_ring_get_count(const struct rte_mempool *mp)
> > > > {
> > > > return rte_ring_count(mp->pool_data);
> > > > }
> > > >
> > > > -
> > > > static int
> > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > > {
> > > > - int rg_flags = 0, ret;
> > > > + int ret;
> > > > char rg_name[RTE_RING_NAMESIZE];
> > > > struct rte_ring *r;
> > > >
> > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > return -rte_errno;
> > > > }
> > > >
> > > > - /* ring flags */
> > > > - if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > - rg_flags |= RING_F_SP_ENQ;
> > > > - if (mp->flags & MEMPOOL_F_SC_GET)
> > > > - rg_flags |= RING_F_SC_DEQ;
> > > > -
> > > > /*
> > > > * Allocate the ring that will be used to store objects.
> > > > * Ring functions will return appropriate errors if we are
> > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > return 0;
> > > > }
> > > >
> > > > +static int
> > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > +{
> > > > + uint32_t rg_flags;
> > > > +
> > > > + rg_flags = 0;
> > >
> > > Maybe it could go on the same line
> > >
> > > > +
> > > > + /* ring flags */
> > >
> > > Not sure we need to keep this comment
> > >
> > > > + if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > + rg_flags |= RING_F_SP_ENQ;
> > > > + if (mp->flags & MEMPOOL_F_SC_GET)
> > > > + rg_flags |= RING_F_SC_DEQ;
> > > > +
> > > > + return ring_alloc(mp, rg_flags);
> > > > +}
> > > > +
> > > > +static int
> > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > +{
> > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > + return -EINVAL;
> > >
> > > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > > if it's not optimal)?
> >
> > These new sync modes (RTS, HTS) are for MT.
> > For SP/SC - there is simply no point to use MT sync modes.
> > I suppose there are few choices:
> > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> > and create actual ring with ST sync mode for prod/cons.
> > 2. Report an error.
> > 3. Silently ignore these flags.
> >
> > As I can see for "ring_mp_mc" ops, we doing #1,
> > while for "stack" we are doing #3.
> > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > Any thoughts from your side what preferable behaviour should be?
>
> The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
> the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> ring_mp_mc).
As I understand, nothing prevents user from doing:
mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
>I don't think we should look at it when using specific ops.
>
> So I'll tend to say 3. is the correct thing to do.
Ok, will resend v3 then.
>
> >
> > >
> > > > +
> > > > + return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
> > > > +}
> > > > +
> > > > +static int
> > > > +hts_ring_alloc(struct rte_mempool *mp)
> > > > +{
> > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > + return -EINVAL;
> > > > +
> > > > + return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
> > > > +}
> > > > +
> > > > static void
> > > > common_ring_free(struct rte_mempool *mp)
> > > > {
> > > > @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
> > > > .get_count = common_ring_get_count,
> > > > };
> > > >
> > > > +/* ops for mempool with ring in MT_RTS sync mode */
> > > > +static const struct rte_mempool_ops ops_mt_rts = {
> > > > + .name = "ring_mt_rts",
> > > > + .alloc = rts_ring_alloc,
> > > > + .free = common_ring_free,
> > > > + .enqueue = rts_ring_mp_enqueue,
> > > > + .dequeue = rts_ring_mc_dequeue,
> > > > + .get_count = common_ring_get_count,
> > > > +};
> > > > +
> > > > +/* ops for mempool with ring in MT_HTS sync mode */
> > > > +static const struct rte_mempool_ops ops_mt_hts = {
> > > > + .name = "ring_mt_hts",
> > > > + .alloc = hts_ring_alloc,
> > > > + .free = common_ring_free,
> > > > + .enqueue = hts_ring_mp_enqueue,
> > > > + .dequeue = hts_ring_mc_dequeue,
> > > > + .get_count = common_ring_get_count,
> > > > +};
> > > > +
> > > > MEMPOOL_REGISTER_OPS(ops_mp_mc);
> > > > MEMPOOL_REGISTER_OPS(ops_sp_sc);
> > > > MEMPOOL_REGISTER_OPS(ops_mp_sc);
> > > > MEMPOOL_REGISTER_OPS(ops_sp_mc);
> > > > +MEMPOOL_REGISTER_OPS(ops_mt_rts);
> > > > +MEMPOOL_REGISTER_OPS(ops_mt_hts);
> >
> > > Not really related to your patch, but I think we need a function to
> > > dump the name of available mempool ops. We could even add a description.
> > > The problem we have is that a user does not know on which criteria is
> > > should use a driver or another (except for platform drivers).
> >
> > Agree, it will be usefull.
> > Though it probably subject for a separate patch.
> >
>
> Hi Olivier,
>
> > Hi Konstantin,
> >
> > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > > Hi Olivier,
> > >
> > > > Hi Konstantin,
> > > >
> > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > > > v2:
> > > > > - update Release Notes (as per comments)
> > > > >
> > > > > Two new sync modes were introduced into rte_ring:
> > > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > > This change provides user with ability to select these
> > > > > modes for ring based mempool via mempool ops API.
> > > > >
> > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > > > ---
> > > > > doc/guides/rel_notes/release_20_08.rst | 6 ++
> > > > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > > > 2 files changed, 94 insertions(+), 9 deletions(-)
> > > > >
> > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > > > index eaaf11c37..7bdcf3aac 100644
> > > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > > @@ -84,6 +84,12 @@ New Features
> > > > > * Dump ``rte_flow`` memory consumption.
> > > > > * Measure packet per second forwarding.
> > > > >
> > > > > +* **Added support for new sync modes into mempool ring driver.**
> > > > > +
> > > > > + Added ability to select new ring synchronisation modes:
> > > > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > > > + via mempool ops API.
> > > > > +
> > > > >
> > > > > Removed Items
> > > > > -------------
> > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > index bc123fc52..15ec7dee7 100644
> > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > }
> > > > >
> > > > > +static int
> > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > + unsigned int n)
> > > > > +{
> > > > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > +}
> > > > > +
> > > > > +static int
> > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > + unsigned int n)
> > > > > +{
> > > > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > +}
> > > > > +
> > > > > static int
> > > > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > {
> > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > }
> > > > >
> > > > > +static int
> > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > +{
> > > > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > +}
> > > > > +
> > > > > +static int
> > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > +{
> > > > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > +}
> > > > > +
> > > > > static unsigned
> > > > > common_ring_get_count(const struct rte_mempool *mp)
> > > > > {
> > > > > return rte_ring_count(mp->pool_data);
> > > > > }
> > > > >
> > > > > -
> > > > > static int
> > > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > > > {
> > > > > - int rg_flags = 0, ret;
> > > > > + int ret;
> > > > > char rg_name[RTE_RING_NAMESIZE];
> > > > > struct rte_ring *r;
> > > > >
> > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > return -rte_errno;
> > > > > }
> > > > >
> > > > > - /* ring flags */
> > > > > - if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > - rg_flags |= RING_F_SP_ENQ;
> > > > > - if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > - rg_flags |= RING_F_SC_DEQ;
> > > > > -
> > > > > /*
> > > > > * Allocate the ring that will be used to store objects.
> > > > > * Ring functions will return appropriate errors if we are
> > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > return 0;
> > > > > }
> > > > >
> > > > > +static int
> > > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > > +{
> > > > > + uint32_t rg_flags;
> > > > > +
> > > > > + rg_flags = 0;
> > > >
> > > > Maybe it could go on the same line
> > > >
> > > > > +
> > > > > + /* ring flags */
> > > >
> > > > Not sure we need to keep this comment
> > > >
> > > > > + if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > + rg_flags |= RING_F_SP_ENQ;
> > > > > + if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > + rg_flags |= RING_F_SC_DEQ;
> > > > > +
> > > > > + return ring_alloc(mp, rg_flags);
> > > > > +}
> > > > > +
> > > > > +static int
> > > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > > +{
> > > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > > + return -EINVAL;
> > > >
> > > > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > > > if it's not optimal)?
> > >
> > > These new sync modes (RTS, HTS) are for MT.
> > > For SP/SC - there is simply no point to use MT sync modes.
> > > I suppose there are few choices:
> > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> > > and create actual ring with ST sync mode for prod/cons.
> > > 2. Report an error.
> > > 3. Silently ignore these flags.
> > >
> > > As I can see for "ring_mp_mc" ops, we doing #1,
> > > while for "stack" we are doing #3.
> > > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > > Any thoughts from your side what preferable behaviour should be?
> >
> > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
> > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> > ring_mp_mc).
>
> As I understand, nothing prevents user from doing:
>
> mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
> sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
Apologies, hit send accidently.
I meant user can do:
mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET);
rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
An in that case, he'll get SP/SC ring underneath.
>
>
> >I don't think we should look at it when using specific ops.
> >
> > So I'll tend to say 3. is the correct thing to do.
>
> Ok, will resend v3 then.
>
Hi Konstantin,
On Fri, Jul 10, 2020 at 03:20:12PM +0000, Ananyev, Konstantin wrote:
>
>
> >
> > Hi Olivier,
> >
> > > Hi Konstantin,
> > >
> > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > > > Hi Olivier,
> > > >
> > > > > Hi Konstantin,
> > > > >
> > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > > > > v2:
> > > > > > - update Release Notes (as per comments)
> > > > > >
> > > > > > Two new sync modes were introduced into rte_ring:
> > > > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > > > This change provides user with ability to select these
> > > > > > modes for ring based mempool via mempool ops API.
> > > > > >
> > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > > > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > > > > ---
> > > > > > doc/guides/rel_notes/release_20_08.rst | 6 ++
> > > > > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > > > > 2 files changed, 94 insertions(+), 9 deletions(-)
> > > > > >
> > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > > > > index eaaf11c37..7bdcf3aac 100644
> > > > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > > > @@ -84,6 +84,12 @@ New Features
> > > > > > * Dump ``rte_flow`` memory consumption.
> > > > > > * Measure packet per second forwarding.
> > > > > >
> > > > > > +* **Added support for new sync modes into mempool ring driver.**
> > > > > > +
> > > > > > + Added ability to select new ring synchronisation modes:
> > > > > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > > > > + via mempool ops API.
> > > > > > +
> > > > > >
> > > > > > Removed Items
> > > > > > -------------
> > > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > index bc123fc52..15ec7dee7 100644
> > > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > }
> > > > > >
> > > > > > +static int
> > > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > + unsigned int n)
> > > > > > +{
> > > > > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > +}
> > > > > > +
> > > > > > +static int
> > > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > + unsigned int n)
> > > > > > +{
> > > > > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > +}
> > > > > > +
> > > > > > static int
> > > > > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > {
> > > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > }
> > > > > >
> > > > > > +static int
> > > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > +{
> > > > > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > +}
> > > > > > +
> > > > > > +static int
> > > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > +{
> > > > > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > +}
> > > > > > +
> > > > > > static unsigned
> > > > > > common_ring_get_count(const struct rte_mempool *mp)
> > > > > > {
> > > > > > return rte_ring_count(mp->pool_data);
> > > > > > }
> > > > > >
> > > > > > -
> > > > > > static int
> > > > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > > > > {
> > > > > > - int rg_flags = 0, ret;
> > > > > > + int ret;
> > > > > > char rg_name[RTE_RING_NAMESIZE];
> > > > > > struct rte_ring *r;
> > > > > >
> > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > return -rte_errno;
> > > > > > }
> > > > > >
> > > > > > - /* ring flags */
> > > > > > - if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > - rg_flags |= RING_F_SP_ENQ;
> > > > > > - if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > - rg_flags |= RING_F_SC_DEQ;
> > > > > > -
> > > > > > /*
> > > > > > * Allocate the ring that will be used to store objects.
> > > > > > * Ring functions will return appropriate errors if we are
> > > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > return 0;
> > > > > > }
> > > > > >
> > > > > > +static int
> > > > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > > > +{
> > > > > > + uint32_t rg_flags;
> > > > > > +
> > > > > > + rg_flags = 0;
> > > > >
> > > > > Maybe it could go on the same line
> > > > >
> > > > > > +
> > > > > > + /* ring flags */
> > > > >
> > > > > Not sure we need to keep this comment
> > > > >
> > > > > > + if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > + rg_flags |= RING_F_SP_ENQ;
> > > > > > + if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > + rg_flags |= RING_F_SC_DEQ;
> > > > > > +
> > > > > > + return ring_alloc(mp, rg_flags);
> > > > > > +}
> > > > > > +
> > > > > > +static int
> > > > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > > > +{
> > > > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > > > + return -EINVAL;
> > > > >
> > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > > > > if it's not optimal)?
> > > >
> > > > These new sync modes (RTS, HTS) are for MT.
> > > > For SP/SC - there is simply no point to use MT sync modes.
> > > > I suppose there are few choices:
> > > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> > > > and create actual ring with ST sync mode for prod/cons.
> > > > 2. Report an error.
> > > > 3. Silently ignore these flags.
> > > >
> > > > As I can see for "ring_mp_mc" ops, we doing #1,
> > > > while for "stack" we are doing #3.
> > > > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > > > Any thoughts from your side what preferable behaviour should be?
> > >
> > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
> > > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> > > ring_mp_mc).
> >
> > As I understand, nothing prevents user from doing:
> >
> > mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
> > sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
>
> Apologies, hit send accidently.
> I meant user can do:
>
> mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET);
> rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
>
> An in that case, he'll get SP/SC ring underneath.
It looks it's not the case. Since commit 449c49b93a6b ("mempool: support
handler operations"), the flags SP_PUT/SC_GET are converted into a call
to rte_mempool_set_ops_byname() in rte_mempool_create() only.
In rte_mempool_create_empty(), these flags are ignored. It is expected
that the user calls rte_mempool_set_ops_byname() by itself.
I don't think it is a good behavior:
1/ The documentation of rte_mempool_create_empty() does not say that the
flags are ignored, and a user can expect that F_SP_PUT | F_SC_GET
sets the default ops like rte_mempool_create().
2/ If rte_mempool_set_ops_byname() is not called after
rte_mempool_create_empty() (and it looks it happens in dpdk's code),
the default ops are the ones registered at index 0. This depends on
the link order.
So I propose to move the following code in
rte_mempool_create_empty().
if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
else if (flags & MEMPOOL_F_SP_PUT)
ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
else if (flags & MEMPOOL_F_SC_GET)
ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
else
ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
What do you think?
>
> >
> >
> > >I don't think we should look at it when using specific ops.
> > >
> > > So I'll tend to say 3. is the correct thing to do.
> >
> > Ok, will resend v3 then.
> >
Hi Olivier,
> Hi Konstantin,
>
> On Fri, Jul 10, 2020 at 03:20:12PM +0000, Ananyev, Konstantin wrote:
> >
> >
> > >
> > > Hi Olivier,
> > >
> > > > Hi Konstantin,
> > > >
> > > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > > > > Hi Olivier,
> > > > >
> > > > > > Hi Konstantin,
> > > > > >
> > > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > > > > > v2:
> > > > > > > - update Release Notes (as per comments)
> > > > > > >
> > > > > > > Two new sync modes were introduced into rte_ring:
> > > > > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > > > > This change provides user with ability to select these
> > > > > > > modes for ring based mempool via mempool ops API.
> > > > > > >
> > > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > > > > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > > > > > ---
> > > > > > > doc/guides/rel_notes/release_20_08.rst | 6 ++
> > > > > > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > > > > > 2 files changed, 94 insertions(+), 9 deletions(-)
> > > > > > >
> > > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > index eaaf11c37..7bdcf3aac 100644
> > > > > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > @@ -84,6 +84,12 @@ New Features
> > > > > > > * Dump ``rte_flow`` memory consumption.
> > > > > > > * Measure packet per second forwarding.
> > > > > > >
> > > > > > > +* **Added support for new sync modes into mempool ring driver.**
> > > > > > > +
> > > > > > > + Added ability to select new ring synchronisation modes:
> > > > > > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > > > > > + via mempool ops API.
> > > > > > > +
> > > > > > >
> > > > > > > Removed Items
> > > > > > > -------------
> > > > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > index bc123fc52..15ec7dee7 100644
> > > > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > }
> > > > > > >
> > > > > > > +static int
> > > > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > + unsigned int n)
> > > > > > > +{
> > > > > > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > +}
> > > > > > > +
> > > > > > > +static int
> > > > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > + unsigned int n)
> > > > > > > +{
> > > > > > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > +}
> > > > > > > +
> > > > > > > static int
> > > > > > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > > {
> > > > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > }
> > > > > > >
> > > > > > > +static int
> > > > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > +{
> > > > > > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > +}
> > > > > > > +
> > > > > > > +static int
> > > > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > +{
> > > > > > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > +}
> > > > > > > +
> > > > > > > static unsigned
> > > > > > > common_ring_get_count(const struct rte_mempool *mp)
> > > > > > > {
> > > > > > > return rte_ring_count(mp->pool_data);
> > > > > > > }
> > > > > > >
> > > > > > > -
> > > > > > > static int
> > > > > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > > > > > {
> > > > > > > - int rg_flags = 0, ret;
> > > > > > > + int ret;
> > > > > > > char rg_name[RTE_RING_NAMESIZE];
> > > > > > > struct rte_ring *r;
> > > > > > >
> > > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > return -rte_errno;
> > > > > > > }
> > > > > > >
> > > > > > > - /* ring flags */
> > > > > > > - if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > - rg_flags |= RING_F_SP_ENQ;
> > > > > > > - if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > - rg_flags |= RING_F_SC_DEQ;
> > > > > > > -
> > > > > > > /*
> > > > > > > * Allocate the ring that will be used to store objects.
> > > > > > > * Ring functions will return appropriate errors if we are
> > > > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > return 0;
> > > > > > > }
> > > > > > >
> > > > > > > +static int
> > > > > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > > > > +{
> > > > > > > + uint32_t rg_flags;
> > > > > > > +
> > > > > > > + rg_flags = 0;
> > > > > >
> > > > > > Maybe it could go on the same line
> > > > > >
> > > > > > > +
> > > > > > > + /* ring flags */
> > > > > >
> > > > > > Not sure we need to keep this comment
> > > > > >
> > > > > > > + if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > + rg_flags |= RING_F_SP_ENQ;
> > > > > > > + if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > + rg_flags |= RING_F_SC_DEQ;
> > > > > > > +
> > > > > > > + return ring_alloc(mp, rg_flags);
> > > > > > > +}
> > > > > > > +
> > > > > > > +static int
> > > > > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > > > > +{
> > > > > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > > > > + return -EINVAL;
> > > > > >
> > > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > > > > > if it's not optimal)?
> > > > >
> > > > > These new sync modes (RTS, HTS) are for MT.
> > > > > For SP/SC - there is simply no point to use MT sync modes.
> > > > > I suppose there are few choices:
> > > > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> > > > > and create actual ring with ST sync mode for prod/cons.
> > > > > 2. Report an error.
> > > > > 3. Silently ignore these flags.
> > > > >
> > > > > As I can see for "ring_mp_mc" ops, we doing #1,
> > > > > while for "stack" we are doing #3.
> > > > > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > > > > Any thoughts from your side what preferable behaviour should be?
> > > >
> > > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
> > > > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> > > > ring_mp_mc).
> > >
> > > As I understand, nothing prevents user from doing:
> > >
> > > mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
> > > sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
> >
> > Apologies, hit send accidently.
> > I meant user can do:
> >
> > mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET);
> > rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> >
> > An in that case, he'll get SP/SC ring underneath.
>
> It looks it's not the case. Since commit 449c49b93a6b ("mempool: support
> handler operations"), the flags SP_PUT/SC_GET are converted into a call
> to rte_mempool_set_ops_byname() in rte_mempool_create() only.
>
> In rte_mempool_create_empty(), these flags are ignored. It is expected
> that the user calls rte_mempool_set_ops_byname() by itself.
As I understand the code - not exactly.
rte_mempool_create_empty() doesn't make any specific actions based on 'flags' value,
but it does store it's value inside mp->flags.
Later, when mempool_ops_alloc_once() is called these flags will be used by
common_ring_alloc() and might override selected by ops ring behaviour.
>
> I don't think it is a good behavior:
>
> 1/ The documentation of rte_mempool_create_empty() does not say that the
> flags are ignored, and a user can expect that F_SP_PUT | F_SC_GET
> sets the default ops like rte_mempool_create().
>
> 2/ If rte_mempool_set_ops_byname() is not called after
> rte_mempool_create_empty() (and it looks it happens in dpdk's code),
> the default ops are the ones registered at index 0. This depends on
> the link order.
>
> So I propose to move the following code in
> rte_mempool_create_empty().
>
> if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
> ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
> else if (flags & MEMPOOL_F_SP_PUT)
> ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
> else if (flags & MEMPOOL_F_SC_GET)
> ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
> else
> ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
>
> What do you think?
I think it will be a good thing - as in that case we'll always have
"ring_mp_mc" selected as default one.
As another thought, it porbably would be good to deprecate and later remove
MEMPOOL_F_SP_PUT and MEMPOOL_F_SC_GET completely.
These days user can select this behaviour via mempool ops and such dualism
just makes things more error-prone and harder to maintain.
Especially as we don't have clear policy what should be the higher priority
for sync mode selection: mempool ops or flags.
On Mon, Jul 13, 2020 at 02:46:35PM +0000, Ananyev, Konstantin wrote:
> Hi Olivier,
>
> > Hi Konstantin,
> >
> > On Fri, Jul 10, 2020 at 03:20:12PM +0000, Ananyev, Konstantin wrote:
> > >
> > >
> > > >
> > > > Hi Olivier,
> > > >
> > > > > Hi Konstantin,
> > > > >
> > > > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > > > > > Hi Olivier,
> > > > > >
> > > > > > > Hi Konstantin,
> > > > > > >
> > > > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > > > > > > v2:
> > > > > > > > - update Release Notes (as per comments)
> > > > > > > >
> > > > > > > > Two new sync modes were introduced into rte_ring:
> > > > > > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > > > > > This change provides user with ability to select these
> > > > > > > > modes for ring based mempool via mempool ops API.
> > > > > > > >
> > > > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > > > > > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > > > > > > ---
> > > > > > > > doc/guides/rel_notes/release_20_08.rst | 6 ++
> > > > > > > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > > > > > > 2 files changed, 94 insertions(+), 9 deletions(-)
> > > > > > > >
> > > > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > index eaaf11c37..7bdcf3aac 100644
> > > > > > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > @@ -84,6 +84,12 @@ New Features
> > > > > > > > * Dump ``rte_flow`` memory consumption.
> > > > > > > > * Measure packet per second forwarding.
> > > > > > > >
> > > > > > > > +* **Added support for new sync modes into mempool ring driver.**
> > > > > > > > +
> > > > > > > > + Added ability to select new ring synchronisation modes:
> > > > > > > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > > > > > > + via mempool ops API.
> > > > > > > > +
> > > > > > > >
> > > > > > > > Removed Items
> > > > > > > > -------------
> > > > > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > index bc123fc52..15ec7dee7 100644
> > > > > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > }
> > > > > > > >
> > > > > > > > +static int
> > > > > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > + unsigned int n)
> > > > > > > > +{
> > > > > > > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > +}
> > > > > > > > +
> > > > > > > > +static int
> > > > > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > + unsigned int n)
> > > > > > > > +{
> > > > > > > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > +}
> > > > > > > > +
> > > > > > > > static int
> > > > > > > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > > > {
> > > > > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > }
> > > > > > > >
> > > > > > > > +static int
> > > > > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > > +{
> > > > > > > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > +}
> > > > > > > > +
> > > > > > > > +static int
> > > > > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > > +{
> > > > > > > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > +}
> > > > > > > > +
> > > > > > > > static unsigned
> > > > > > > > common_ring_get_count(const struct rte_mempool *mp)
> > > > > > > > {
> > > > > > > > return rte_ring_count(mp->pool_data);
> > > > > > > > }
> > > > > > > >
> > > > > > > > -
> > > > > > > > static int
> > > > > > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > > > > > > {
> > > > > > > > - int rg_flags = 0, ret;
> > > > > > > > + int ret;
> > > > > > > > char rg_name[RTE_RING_NAMESIZE];
> > > > > > > > struct rte_ring *r;
> > > > > > > >
> > > > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > return -rte_errno;
> > > > > > > > }
> > > > > > > >
> > > > > > > > - /* ring flags */
> > > > > > > > - if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > > - rg_flags |= RING_F_SP_ENQ;
> > > > > > > > - if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > > - rg_flags |= RING_F_SC_DEQ;
> > > > > > > > -
> > > > > > > > /*
> > > > > > > > * Allocate the ring that will be used to store objects.
> > > > > > > > * Ring functions will return appropriate errors if we are
> > > > > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > return 0;
> > > > > > > > }
> > > > > > > >
> > > > > > > > +static int
> > > > > > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > +{
> > > > > > > > + uint32_t rg_flags;
> > > > > > > > +
> > > > > > > > + rg_flags = 0;
> > > > > > >
> > > > > > > Maybe it could go on the same line
> > > > > > >
> > > > > > > > +
> > > > > > > > + /* ring flags */
> > > > > > >
> > > > > > > Not sure we need to keep this comment
> > > > > > >
> > > > > > > > + if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > > + rg_flags |= RING_F_SP_ENQ;
> > > > > > > > + if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > > + rg_flags |= RING_F_SC_DEQ;
> > > > > > > > +
> > > > > > > > + return ring_alloc(mp, rg_flags);
> > > > > > > > +}
> > > > > > > > +
> > > > > > > > +static int
> > > > > > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > > > > > +{
> > > > > > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > > > > > + return -EINVAL;
> > > > > > >
> > > > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > > > > > > if it's not optimal)?
> > > > > >
> > > > > > These new sync modes (RTS, HTS) are for MT.
> > > > > > For SP/SC - there is simply no point to use MT sync modes.
> > > > > > I suppose there are few choices:
> > > > > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> > > > > > and create actual ring with ST sync mode for prod/cons.
> > > > > > 2. Report an error.
> > > > > > 3. Silently ignore these flags.
> > > > > >
> > > > > > As I can see for "ring_mp_mc" ops, we doing #1,
> > > > > > while for "stack" we are doing #3.
> > > > > > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > > > > > Any thoughts from your side what preferable behaviour should be?
> > > > >
> > > > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
> > > > > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> > > > > ring_mp_mc).
> > > >
> > > > As I understand, nothing prevents user from doing:
> > > >
> > > > mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
> > > > sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
> > >
> > > Apologies, hit send accidently.
> > > I meant user can do:
> > >
> > > mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET);
> > > rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> > >
> > > An in that case, he'll get SP/SC ring underneath.
> >
> > It looks it's not the case. Since commit 449c49b93a6b ("mempool: support
> > handler operations"), the flags SP_PUT/SC_GET are converted into a call
> > to rte_mempool_set_ops_byname() in rte_mempool_create() only.
> >
> > In rte_mempool_create_empty(), these flags are ignored. It is expected
> > that the user calls rte_mempool_set_ops_byname() by itself.
>
> As I understand the code - not exactly.
> rte_mempool_create_empty() doesn't make any specific actions based on 'flags' value,
> but it does store it's value inside mp->flags.
> Later, when mempool_ops_alloc_once() is called these flags will be used by
> common_ring_alloc() and might override selected by ops ring behaviour.
>
> >
> > I don't think it is a good behavior:
> >
> > 1/ The documentation of rte_mempool_create_empty() does not say that the
> > flags are ignored, and a user can expect that F_SP_PUT | F_SC_GET
> > sets the default ops like rte_mempool_create().
> >
> > 2/ If rte_mempool_set_ops_byname() is not called after
> > rte_mempool_create_empty() (and it looks it happens in dpdk's code),
> > the default ops are the ones registered at index 0. This depends on
> > the link order.
> >
> > So I propose to move the following code in
> > rte_mempool_create_empty().
> >
> > if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
> > ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
> > else if (flags & MEMPOOL_F_SP_PUT)
> > ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
> > else if (flags & MEMPOOL_F_SC_GET)
> > ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
> > else
> > ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> >
> > What do you think?
>
> I think it will be a good thing - as in that case we'll always have
> "ring_mp_mc" selected as default one.
> As another thought, it porbably would be good to deprecate and later remove
> MEMPOOL_F_SP_PUT and MEMPOOL_F_SC_GET completely.
> These days user can select this behaviour via mempool ops and such dualism
> just makes things more error-prone and harder to maintain.
> Especially as we don't have clear policy what should be the higher priority
> for sync mode selection: mempool ops or flags.
>
I'll tend to agree, however it would mean deprecate rte_mempool_create()
too, because we wouldn't be able to set ops with it. Or we would have to
add a 12th (!) argument to the function, to set the ops name.
I don't like having that many arguments to this function, but it seems
it is widely used, probably because it is just one function call (vs
create_empty + set_ops + populate). So adding a "ops_name" argument is
maybe the right thing to do, given we can keep abi compat.
> On Mon, Jul 13, 2020 at 02:46:35PM +0000, Ananyev, Konstantin wrote:
> > Hi Olivier,
> >
> > > Hi Konstantin,
> > >
> > > On Fri, Jul 10, 2020 at 03:20:12PM +0000, Ananyev, Konstantin wrote:
> > > >
> > > >
> > > > >
> > > > > Hi Olivier,
> > > > >
> > > > > > Hi Konstantin,
> > > > > >
> > > > > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > > > > > > Hi Olivier,
> > > > > > >
> > > > > > > > Hi Konstantin,
> > > > > > > >
> > > > > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > > > > > > > v2:
> > > > > > > > > - update Release Notes (as per comments)
> > > > > > > > >
> > > > > > > > > Two new sync modes were introduced into rte_ring:
> > > > > > > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > > > > > > This change provides user with ability to select these
> > > > > > > > > modes for ring based mempool via mempool ops API.
> > > > > > > > >
> > > > > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > > > > > > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > > > > > > > ---
> > > > > > > > > doc/guides/rel_notes/release_20_08.rst | 6 ++
> > > > > > > > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > > > > > > > 2 files changed, 94 insertions(+), 9 deletions(-)
> > > > > > > > >
> > > > > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > > index eaaf11c37..7bdcf3aac 100644
> > > > > > > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > > @@ -84,6 +84,12 @@ New Features
> > > > > > > > > * Dump ``rte_flow`` memory consumption.
> > > > > > > > > * Measure packet per second forwarding.
> > > > > > > > >
> > > > > > > > > +* **Added support for new sync modes into mempool ring driver.**
> > > > > > > > > +
> > > > > > > > > + Added ability to select new ring synchronisation modes:
> > > > > > > > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > > > > > > > + via mempool ops API.
> > > > > > > > > +
> > > > > > > > >
> > > > > > > > > Removed Items
> > > > > > > > > -------------
> > > > > > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > > index bc123fc52..15ec7dee7 100644
> > > > > > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > }
> > > > > > > > >
> > > > > > > > > +static int
> > > > > > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > > + unsigned int n)
> > > > > > > > > +{
> > > > > > > > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > +static int
> > > > > > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > > + unsigned int n)
> > > > > > > > > +{
> > > > > > > > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > static int
> > > > > > > > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > > > > {
> > > > > > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > }
> > > > > > > > >
> > > > > > > > > +static int
> > > > > > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > > > +{
> > > > > > > > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > +static int
> > > > > > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > > > +{
> > > > > > > > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > static unsigned
> > > > > > > > > common_ring_get_count(const struct rte_mempool *mp)
> > > > > > > > > {
> > > > > > > > > return rte_ring_count(mp->pool_data);
> > > > > > > > > }
> > > > > > > > >
> > > > > > > > > -
> > > > > > > > > static int
> > > > > > > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > > > > > > > {
> > > > > > > > > - int rg_flags = 0, ret;
> > > > > > > > > + int ret;
> > > > > > > > > char rg_name[RTE_RING_NAMESIZE];
> > > > > > > > > struct rte_ring *r;
> > > > > > > > >
> > > > > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > return -rte_errno;
> > > > > > > > > }
> > > > > > > > >
> > > > > > > > > - /* ring flags */
> > > > > > > > > - if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > > > - rg_flags |= RING_F_SP_ENQ;
> > > > > > > > > - if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > > > - rg_flags |= RING_F_SC_DEQ;
> > > > > > > > > -
> > > > > > > > > /*
> > > > > > > > > * Allocate the ring that will be used to store objects.
> > > > > > > > > * Ring functions will return appropriate errors if we are
> > > > > > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > return 0;
> > > > > > > > > }
> > > > > > > > >
> > > > > > > > > +static int
> > > > > > > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > +{
> > > > > > > > > + uint32_t rg_flags;
> > > > > > > > > +
> > > > > > > > > + rg_flags = 0;
> > > > > > > >
> > > > > > > > Maybe it could go on the same line
> > > > > > > >
> > > > > > > > > +
> > > > > > > > > + /* ring flags */
> > > > > > > >
> > > > > > > > Not sure we need to keep this comment
> > > > > > > >
> > > > > > > > > + if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > > > + rg_flags |= RING_F_SP_ENQ;
> > > > > > > > > + if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > > > + rg_flags |= RING_F_SC_DEQ;
> > > > > > > > > +
> > > > > > > > > + return ring_alloc(mp, rg_flags);
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > +static int
> > > > > > > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > +{
> > > > > > > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > > > > > > + return -EINVAL;
> > > > > > > >
> > > > > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > > > > > > > if it's not optimal)?
> > > > > > >
> > > > > > > These new sync modes (RTS, HTS) are for MT.
> > > > > > > For SP/SC - there is simply no point to use MT sync modes.
> > > > > > > I suppose there are few choices:
> > > > > > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> > > > > > > and create actual ring with ST sync mode for prod/cons.
> > > > > > > 2. Report an error.
> > > > > > > 3. Silently ignore these flags.
> > > > > > >
> > > > > > > As I can see for "ring_mp_mc" ops, we doing #1,
> > > > > > > while for "stack" we are doing #3.
> > > > > > > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > > > > > > Any thoughts from your side what preferable behaviour should be?
> > > > > >
> > > > > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
> > > > > > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> > > > > > ring_mp_mc).
> > > > >
> > > > > As I understand, nothing prevents user from doing:
> > > > >
> > > > > mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
> > > > > sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
> > > >
> > > > Apologies, hit send accidently.
> > > > I meant user can do:
> > > >
> > > > mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET);
> > > > rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> > > >
> > > > An in that case, he'll get SP/SC ring underneath.
> > >
> > > It looks it's not the case. Since commit 449c49b93a6b ("mempool: support
> > > handler operations"), the flags SP_PUT/SC_GET are converted into a call
> > > to rte_mempool_set_ops_byname() in rte_mempool_create() only.
> > >
> > > In rte_mempool_create_empty(), these flags are ignored. It is expected
> > > that the user calls rte_mempool_set_ops_byname() by itself.
> >
> > As I understand the code - not exactly.
> > rte_mempool_create_empty() doesn't make any specific actions based on 'flags' value,
> > but it does store it's value inside mp->flags.
> > Later, when mempool_ops_alloc_once() is called these flags will be used by
> > common_ring_alloc() and might override selected by ops ring behaviour.
> >
> > >
> > > I don't think it is a good behavior:
> > >
> > > 1/ The documentation of rte_mempool_create_empty() does not say that the
> > > flags are ignored, and a user can expect that F_SP_PUT | F_SC_GET
> > > sets the default ops like rte_mempool_create().
> > >
> > > 2/ If rte_mempool_set_ops_byname() is not called after
> > > rte_mempool_create_empty() (and it looks it happens in dpdk's code),
> > > the default ops are the ones registered at index 0. This depends on
> > > the link order.
> > >
> > > So I propose to move the following code in
> > > rte_mempool_create_empty().
> > >
> > > if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
> > > ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
> > > else if (flags & MEMPOOL_F_SP_PUT)
> > > ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
> > > else if (flags & MEMPOOL_F_SC_GET)
> > > ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
> > > else
> > > ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> > >
> > > What do you think?
> >
> > I think it will be a good thing - as in that case we'll always have
> > "ring_mp_mc" selected as default one.
> > As another thought, it porbably would be good to deprecate and later remove
> > MEMPOOL_F_SP_PUT and MEMPOOL_F_SC_GET completely.
> > These days user can select this behaviour via mempool ops and such dualism
> > just makes things more error-prone and harder to maintain.
> > Especially as we don't have clear policy what should be the higher priority
> > for sync mode selection: mempool ops or flags.
> >
>
> I'll tend to agree, however it would mean deprecate rte_mempool_create()
> too, because we wouldn't be able to set ops with it. Or we would have to
> add a 12th (!) argument to the function, to set the ops name.
>
> I don't like having that many arguments to this function, but it seems
> it is widely used, probably because it is just one function call (vs
> create_empty + set_ops + populate). So adding a "ops_name" argument is
> maybe the right thing to do, given we can keep abi compat.
My thought was - just keep rte_mempool_create()
parameter list as it is, and always set ops to "ring_mp_mc" for it.
Users who'd like some other ops would force to use
create_empty+set_ops+populate.
That's pretty much the same what we have right now,
the only diff will be ring with SP/SC mode.
@@ -84,6 +84,12 @@ New Features
* Dump ``rte_flow`` memory consumption.
* Measure packet per second forwarding.
+* **Added support for new sync modes into mempool ring driver.**
+
+ Added ability to select new ring synchronisation modes:
+ ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
+ via mempool ops API.
+
Removed Items
-------------
@@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}
+static int
+rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n)
+{
+ return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n)
+{
+ return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
static int
common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
@@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}
+static int
+rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+ return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+ return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
static unsigned
common_ring_get_count(const struct rte_mempool *mp)
{
return rte_ring_count(mp->pool_data);
}
-
static int
-common_ring_alloc(struct rte_mempool *mp)
+ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
{
- int rg_flags = 0, ret;
+ int ret;
char rg_name[RTE_RING_NAMESIZE];
struct rte_ring *r;
@@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
return -rte_errno;
}
- /* ring flags */
- if (mp->flags & MEMPOOL_F_SP_PUT)
- rg_flags |= RING_F_SP_ENQ;
- if (mp->flags & MEMPOOL_F_SC_GET)
- rg_flags |= RING_F_SC_DEQ;
-
/*
* Allocate the ring that will be used to store objects.
* Ring functions will return appropriate errors if we are
@@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
return 0;
}
+static int
+common_ring_alloc(struct rte_mempool *mp)
+{
+ uint32_t rg_flags;
+
+ rg_flags = 0;
+
+ /* ring flags */
+ if (mp->flags & MEMPOOL_F_SP_PUT)
+ rg_flags |= RING_F_SP_ENQ;
+ if (mp->flags & MEMPOOL_F_SC_GET)
+ rg_flags |= RING_F_SC_DEQ;
+
+ return ring_alloc(mp, rg_flags);
+}
+
+static int
+rts_ring_alloc(struct rte_mempool *mp)
+{
+ if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
+ return -EINVAL;
+
+ return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
+}
+
+static int
+hts_ring_alloc(struct rte_mempool *mp)
+{
+ if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
+ return -EINVAL;
+
+ return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
+}
+
static void
common_ring_free(struct rte_mempool *mp)
{
@@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
.get_count = common_ring_get_count,
};
+/* ops for mempool with ring in MT_RTS sync mode */
+static const struct rte_mempool_ops ops_mt_rts = {
+ .name = "ring_mt_rts",
+ .alloc = rts_ring_alloc,
+ .free = common_ring_free,
+ .enqueue = rts_ring_mp_enqueue,
+ .dequeue = rts_ring_mc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+/* ops for mempool with ring in MT_HTS sync mode */
+static const struct rte_mempool_ops ops_mt_hts = {
+ .name = "ring_mt_hts",
+ .alloc = hts_ring_alloc,
+ .free = common_ring_free,
+ .enqueue = hts_ring_mp_enqueue,
+ .dequeue = hts_ring_mc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
MEMPOOL_REGISTER_OPS(ops_mp_mc);
MEMPOOL_REGISTER_OPS(ops_sp_sc);
MEMPOOL_REGISTER_OPS(ops_mp_sc);
MEMPOOL_REGISTER_OPS(ops_sp_mc);
+MEMPOOL_REGISTER_OPS(ops_mt_rts);
+MEMPOOL_REGISTER_OPS(ops_mt_hts);