mbuf: implement generic format for sched field
Checks
Commit Message
This patch implements the changes proposed in the deprecation
notes [1][2].
The opaque mbuf->hash.sched field is updated to support generic
definition in line with the ethdev TM and MTR APIs. The new generic
format contains: queue ID, traffic class, color.
In addtion, following API functions of the sched library have
been modified with an additional parameter of type struct
rte_sched_port to accomodate the changes made to mbuf sched field.
(i) rte_sched_port_pkt_write()
(ii) rte_sched_port_pkt_read()
The other libraries, sample applications and tests which use mbuf
sched field have been updated as well.
[1] http://mails.dpdk.org/archives/dev/2018-February/090651.html
[2] https://mails.dpdk.org/archives/dev/2018-November/119051.html
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Signed-off-by: Reshma Pattan <reshma.pattan@intel.com>
---
examples/qos_sched/app_thread.c | 6 +-
examples/qos_sched/main.c | 1 +
.../rte_event_eth_tx_adapter.h | 4 +-
lib/librte_mbuf/Makefile | 2 +-
lib/librte_mbuf/rte_mbuf.h | 10 +--
lib/librte_pipeline/rte_table_action.c | 60 ++++++++-----
lib/librte_sched/Makefile | 2 +-
lib/librte_sched/rte_sched.c | 89 +++++++------------
lib/librte_sched/rte_sched.h | 10 ++-
test/test/test_sched.c | 8 +-
10 files changed, 92 insertions(+), 100 deletions(-)
Comments
Hi Jasvinder,
> -----Original Message-----
> From: Singh, Jasvinder
> Sent: Friday, November 23, 2018 4:54 PM
> To: dev@dpdk.org
> Cc: Dumitrescu, Cristian <cristian.dumitrescu@intel.com>; Pattan, Reshma
> <reshma.pattan@intel.com>
> Subject: [PATCH] mbuf: implement generic format for sched field
>
> This patch implements the changes proposed in the deprecation
> notes [1][2].
>
> The opaque mbuf->hash.sched field is updated to support generic
> definition in line with the ethdev TM and MTR APIs. The new generic
> format contains: queue ID, traffic class, color.
>
> In addtion, following API functions of the sched library have
> been modified with an additional parameter of type struct
> rte_sched_port to accomodate the changes made to mbuf sched field.
> (i) rte_sched_port_pkt_write()
> (ii) rte_sched_port_pkt_read()
>
> The other libraries, sample applications and tests which use mbuf
> sched field have been updated as well.
>
> [1] http://mails.dpdk.org/archives/dev/2018-February/090651.html
> [2] https://mails.dpdk.org/archives/dev/2018-November/119051.html
>
> Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
> Signed-off-by: Reshma Pattan <reshma.pattan@intel.com>
> ---
> examples/qos_sched/app_thread.c | 6 +-
> examples/qos_sched/main.c | 1 +
> .../rte_event_eth_tx_adapter.h | 4 +-
> lib/librte_mbuf/Makefile | 2 +-
> lib/librte_mbuf/rte_mbuf.h | 10 +--
> lib/librte_pipeline/rte_table_action.c | 60 ++++++++-----
> lib/librte_sched/Makefile | 2 +-
> lib/librte_sched/rte_sched.c | 89 +++++++------------
> lib/librte_sched/rte_sched.h | 10 ++-
> test/test/test_sched.c | 8 +-
> 10 files changed, 92 insertions(+), 100 deletions(-)
>
<snip>
> diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
> index 3dbc6695e..98428bd21 100644
> --- a/lib/librte_mbuf/rte_mbuf.h
> +++ b/lib/librte_mbuf/rte_mbuf.h
> @@ -575,12 +575,10 @@ struct rte_mbuf {
> */
> } fdir; /**< Filter identifier if FDIR enabled */
> struct {
> - uint32_t lo;
> - uint32_t hi;
> - /**< The event eth Tx adapter uses this field
> - * to store Tx queue id.
> - * @see
> rte_event_eth_tx_adapter_txq_set()
> - */
> + uint32_t queue_id; /**< Queue ID. */
> + uint8_t traffic_class; /**< Traffic class ID. */
We should add comment here that traffic class 0 is the highest priority traffic class.
> + uint8_t color; /**< Color. */
We should create a new file rte_color.h in a common place (librte_eal/common/include) to consolidate the color definition, which is currently replicated in too many places, such as: rte_meter.h, rte_mtr.h, rte_tm.h.
We should include the rte_color.h file here (and in the above header files)
We should also document the link between this field and the color enum type (@see ...).
> + uint16_t reserved; /**< Reserved. */
> } sched; /**< Hierarchical scheduler */
> /**< User defined tags. See
> rte_distributor_process() */
> uint32_t usr;
We should also add trivial inline functions to read/write these mbuf fields as part of this header file. We want to discourage people from accessing these fields directly.
Besides the functions to read/write each field individually, we should also have a function to read all the sched fields in one operation, as well as another one to write all the sched fields in one operation.
> diff --git a/lib/librte_pipeline/rte_table_action.c
> b/lib/librte_pipeline/rte_table_action.c
> index 7c7c8dd82..99f2d779b 100644
> --- a/lib/librte_pipeline/rte_table_action.c
> +++ b/lib/librte_pipeline/rte_table_action.c
> @@ -108,12 +108,12 @@ mtr_cfg_check(struct rte_table_action_mtr_config
> *mtr)
> }
>
> #define MBUF_SCHED_QUEUE_TC_COLOR(queue, tc, color) \
> - ((uint16_t)((((uint64_t)(queue)) & 0x3) | \
> - ((((uint64_t)(tc)) & 0x3) << 2) | \
> - ((((uint64_t)(color)) & 0x3) << 4)))
> + ((uint64_t)((((uint64_t)(queue)) & 0xffffffff) | \
> + ((((uint64_t)(tc)) & 0xff) << 32) | \
> + ((((uint64_t)(color)) & 0xff) << 40)))
>
> #define MBUF_SCHED_COLOR(sched, color) \
> - (((sched) & (~0x30LLU)) | ((color) << 4))
> + ((uint64_t)((sched) & (~0xff000000LLU)) | (((uint64_t)(color)) << 40))
>
Given the read/write mbuf->sched field functions, the above two macros are no longer needed.
> struct mtr_trtcm_data {
> struct rte_meter_trtcm trtcm;
> @@ -176,7 +176,7 @@ mtr_data_size(struct rte_table_action_mtr_config
> *mtr)
> struct dscp_table_entry_data {
> enum rte_meter_color color;
> uint16_t tc;
> - uint16_t queue_tc_color;
> + uint32_t queue;
> };
>
> struct dscp_table_data {
> @@ -368,7 +368,6 @@ tm_cfg_check(struct rte_table_action_tm_config
> *tm)
> }
>
> struct tm_data {
> - uint16_t queue_tc_color;
> uint16_t subport;
> uint32_t pipe;
> } __attribute__((__packed__));
> @@ -397,26 +396,40 @@ tm_apply(struct tm_data *data,
> return status;
>
> /* Apply */
> - data->queue_tc_color = 0;
> data->subport = (uint16_t) p->subport_id;
> data->pipe = p->pipe_id;
>
> return 0;
> }
>
> +static uint32_t
> +tm_sched_qindex(struct tm_data *data,
> + struct dscp_table_entry_data *dscp,
> + struct rte_table_action_tm_config *cfg) {
> +
> + uint32_t result;
> +
> + result = data->subport * cfg->n_pipes_per_subport + data->pipe;
Since n_subports_per_pipe and n_pipes_per_subport are enforced to be power of 2, we should replace multiplication/division with shift left/right. We probably need to store log2 correspondents in the action context.
> + result = result * RTE_TABLE_ACTION_TC_MAX + dscp->tc;
> + result = result * RTE_TABLE_ACTION_TC_QUEUE_MAX + dscp-
> >queue;
> +
> + return result;
> +}
> +
> static __rte_always_inline void
> pkt_work_tm(struct rte_mbuf *mbuf,
> struct tm_data *data,
> struct dscp_table_data *dscp_table,
> - uint32_t dscp)
> + uint32_t dscp,
> + struct rte_table_action_tm_config *cfg)
> {
> struct dscp_table_entry_data *dscp_entry = &dscp_table-
> >entry[dscp];
> - struct tm_data *sched_ptr = (struct tm_data *) &mbuf->hash.sched;
> - struct tm_data sched;
> + uint64_t *sched_ptr = (uint64_t *) &mbuf->hash.sched;
> + uint32_t queue = tm_sched_qindex(data, dscp_entry, cfg);
>
> - sched = *data;
> - sched.queue_tc_color = dscp_entry->queue_tc_color;
> - *sched_ptr = sched;
> + *sched_ptr = MBUF_SCHED_QUEUE_TC_COLOR(queue,
> + dscp_entry->tc,
> + dscp_entry->color);
> }
>
> /**
> @@ -2580,17 +2593,13 @@ rte_table_action_dscp_table_update(struct
> rte_table_action *action,
> &action->dscp_table.entry[i];
> struct rte_table_action_dscp_table_entry *entry =
> &table->entry[i];
> - uint16_t queue_tc_color =
> - MBUF_SCHED_QUEUE_TC_COLOR(entry-
> >tc_queue_id,
> - entry->tc_id,
> - entry->color);
>
> if ((dscp_mask & (1LLU << i)) == 0)
> continue;
>
> data->color = entry->color;
> data->tc = entry->tc_id;
> - data->queue_tc_color = queue_tc_color;
> + data->queue = entry->tc_queue_id;
> }
>
> return 0;
> @@ -2882,7 +2891,8 @@ pkt_work(struct rte_mbuf *mbuf,
> pkt_work_tm(mbuf,
> data,
> &action->dscp_table,
> - dscp);
> + dscp,
> + &cfg->tm);
> }
>
> if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
> @@ -3108,22 +3118,26 @@ pkt4_work(struct rte_mbuf **mbufs,
> pkt_work_tm(mbuf0,
> data0,
> &action->dscp_table,
> - dscp0);
> + dscp0,
> + &cfg->tm);
>
> pkt_work_tm(mbuf1,
> data1,
> &action->dscp_table,
> - dscp1);
> + dscp1,
> + &cfg->tm);
>
> pkt_work_tm(mbuf2,
> data2,
> &action->dscp_table,
> - dscp2);
> + dscp2,
> + &cfg->tm);
>
> pkt_work_tm(mbuf3,
> data3,
> &action->dscp_table,
> - dscp3);
> + dscp3,
> + &cfg->tm);
> }
>
> if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
> diff --git a/lib/librte_sched/Makefile b/lib/librte_sched/Makefile
> index 46c53ed71..644fd9d15 100644
> --- a/lib/librte_sched/Makefile
> +++ b/lib/librte_sched/Makefile
> @@ -18,7 +18,7 @@ LDLIBS += -lrte_timer
>
> EXPORT_MAP := rte_sched_version.map
>
> -LIBABIVER := 1
> +LIBABIVER := 2
>
> #
> # all source are stored in SRCS-y
> diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
> index 587d5e602..7bf4d6400 100644
> --- a/lib/librte_sched/rte_sched.c
> +++ b/lib/librte_sched/rte_sched.c
> @@ -128,22 +128,6 @@ enum grinder_state {
> e_GRINDER_READ_MBUF
> };
>
> -/*
> - * Path through the scheduler hierarchy used by the scheduler enqueue
> - * operation to identify the destination queue for the current
> - * packet. Stored in the field pkt.hash.sched of struct rte_mbuf of
> - * each packet, typically written by the classification stage and read
> - * by scheduler enqueue.
> - */
> -struct rte_sched_port_hierarchy {
> - uint16_t queue:2; /**< Queue ID (0 .. 3) */
> - uint16_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
> - uint32_t color:2; /**< Color */
> - uint16_t unused:10;
> - uint16_t subport; /**< Subport ID */
> - uint32_t pipe; /**< Pipe ID */
> -};
> -
> struct rte_sched_grinder {
> /* Pipe cache */
> uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
> @@ -241,16 +225,12 @@ enum rte_sched_port_array {
> e_RTE_SCHED_PORT_ARRAY_TOTAL,
> };
>
> -#ifdef RTE_SCHED_COLLECT_STATS
> -
> static inline uint32_t
> rte_sched_port_queues_per_subport(struct rte_sched_port *port)
> {
> return RTE_SCHED_QUEUES_PER_PIPE * port-
> >n_pipes_per_subport;
> }
>
> -#endif
> -
> static inline uint32_t
> rte_sched_port_queues_per_port(struct rte_sched_port *port)
> {
> @@ -1006,44 +986,50 @@ rte_sched_port_pipe_profile_add(struct
> rte_sched_port *port,
> return 0;
> }
>
> +static inline uint32_t
> +rte_sched_port_qindex(struct rte_sched_port *port,
> + uint32_t subport,
> + uint32_t pipe,
> + uint32_t traffic_class,
> + uint32_t queue)
> +{
> + uint32_t result;
> +
> + result = subport * port->n_pipes_per_subport + pipe;
Since n_subports_per_pipe and n_pipes_per_subport are enforced to be power of 2, we should replace multiplication/division with shift left/right.
> + result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE +
> traffic_class;
> + result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
> +
> + return result;
> +}
> +
> void
> -rte_sched_port_pkt_write(struct rte_mbuf *pkt,
> +rte_sched_port_pkt_write(struct rte_sched_port *port,
> + struct rte_mbuf *pkt,
> uint32_t subport, uint32_t pipe, uint32_t
> traffic_class,
> uint32_t queue, enum rte_meter_color color)
> {
> - struct rte_sched_port_hierarchy *sched
> - = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
> -
> - RTE_BUILD_BUG_ON(sizeof(*sched) > sizeof(pkt->hash.sched));
> -
> - sched->color = (uint32_t) color;
> - sched->subport = subport;
> - sched->pipe = pipe;
> - sched->traffic_class = traffic_class;
> - sched->queue = queue;
> + pkt->hash.sched.traffic_class = traffic_class;
> + pkt->hash.sched.queue_id = rte_sched_port_qindex(port, subport,
> pipe,
> + traffic_class, queue);
> + pkt->hash.sched.color = (uint8_t) color;
> }
>
> void
> -rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
> +rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
> + const struct rte_mbuf *pkt,
> uint32_t *subport, uint32_t *pipe,
> uint32_t *traffic_class, uint32_t *queue)
> {
> - const struct rte_sched_port_hierarchy *sched
> - = (const struct rte_sched_port_hierarchy *) &pkt-
> >hash.sched;
> -
> - *subport = sched->subport;
> - *pipe = sched->pipe;
> - *traffic_class = sched->traffic_class;
> - *queue = sched->queue;
> + *subport = pkt->hash.sched.queue_id /
> rte_sched_port_queues_per_subport(port);
> + *pipe = pkt->hash.sched.queue_id /
> RTE_SCHED_QUEUES_PER_PIPE;
Since n_subports_per_pipe and n_pipes_per_subport are enforced to be power of 2, we should replace multiplication/division with shift left/right.
> + *traffic_class = pkt->hash.sched.traffic_class;
> + *queue = pkt->hash.sched.queue_id %
> RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
Since RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS is enforced to be a power of 2, please replace modulo with bitwise AND of (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1).
> }
>
> enum rte_meter_color
> rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
> {
> - const struct rte_sched_port_hierarchy *sched
> - = (const struct rte_sched_port_hierarchy *) &pkt-
> >hash.sched;
> -
> - return (enum rte_meter_color) sched->color;
> + return (enum rte_meter_color) pkt->hash.sched.color;
> }
Should use the mbuf->sched.color read function to be added in rte_mbuf.h.
>
> int
> @@ -1100,18 +1086,6 @@ rte_sched_queue_read_stats(struct
> rte_sched_port *port,
> return 0;
> }
>
> -static inline uint32_t
> -rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport,
> uint32_t pipe, uint32_t traffic_class, uint32_t queue)
> -{
> - uint32_t result;
> -
> - result = subport * port->n_pipes_per_subport + pipe;
> - result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE +
> traffic_class;
> - result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
> -
> - return result;
> -
Since n_subports_per_pipe and n_pipes_per_subport are enforced to be power of 2, we should replace multiplication/division with shift left/right.
}
> -
> #ifdef RTE_SCHED_DEBUG
>
> static inline int
> @@ -1272,11 +1246,8 @@ rte_sched_port_enqueue_qptrs_prefetch0(struct
> rte_sched_port *port,
> #ifdef RTE_SCHED_COLLECT_STATS
> struct rte_sched_queue_extra *qe;
> #endif
> - uint32_t subport, pipe, traffic_class, queue, qindex;
> -
> - rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe,
> &traffic_class, &queue);
> + uint32_t qindex = pkt->hash.sched.queue_id;
Let's use the read function for this mbuf field.
>
> - qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class,
> queue);
> q = port->queue + qindex;
> rte_prefetch0(q);
> #ifdef RTE_SCHED_COLLECT_STATS
> diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
> index 84fa896de..4d9f869eb 100644
> --- a/lib/librte_sched/rte_sched.h
> +++ b/lib/librte_sched/rte_sched.h
> @@ -355,6 +355,8 @@ rte_sched_queue_read_stats(struct rte_sched_port
> *port,
> * Scheduler hierarchy path write to packet descriptor. Typically
> * called by the packet classification stage.
> *
> + * @param port
> + * Handle to port scheduler instance
> * @param pkt
> * Packet descriptor handle
> * @param subport
> @@ -369,7 +371,8 @@ rte_sched_queue_read_stats(struct rte_sched_port
> *port,
> * Packet color set
> */
> void
> -rte_sched_port_pkt_write(struct rte_mbuf *pkt,
> +rte_sched_port_pkt_write(struct rte_sched_port *port,
> + struct rte_mbuf *pkt,
> uint32_t subport, uint32_t pipe, uint32_t
> traffic_class,
> uint32_t queue, enum rte_meter_color color);
>
> @@ -379,6 +382,8 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,
> * enqueue operation. The subport, pipe, traffic class and queue
> * parameters need to be pre-allocated by the caller.
> *
> + * @param port
> + * Handle to port scheduler instance
> * @param pkt
> * Packet descriptor handle
> * @param subport
> @@ -392,7 +397,8 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,
> *
> */
> void
> -rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
> +rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
> + const struct rte_mbuf *pkt,
> uint32_t *subport, uint32_t *pipe,
> uint32_t *traffic_class, uint32_t *queue);
>
<snip>
Regards,
Cristian
<snip>
>
> > diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
> > index 3dbc6695e..98428bd21 100644
> > --- a/lib/librte_mbuf/rte_mbuf.h
> > +++ b/lib/librte_mbuf/rte_mbuf.h
> > @@ -575,12 +575,10 @@ struct rte_mbuf {
> > */
> > } fdir; /**< Filter identifier if FDIR enabled */
> > struct {
> > - uint32_t lo;
> > - uint32_t hi;
> > - /**< The event eth Tx adapter uses this field
> > - * to store Tx queue id.
> > - * @see
> > rte_event_eth_tx_adapter_txq_set()
> > - */
> > + uint32_t queue_id; /**< Queue ID. */
> > + uint8_t traffic_class; /**< Traffic class ID. */
>
> We should add comment here that traffic class 0 is the highest priority traffic
> class.
Will add the suggested comment.
>
> > + uint8_t color; /**< Color. */
>
> We should create a new file rte_color.h in a common place
> (librte_eal/common/include) to consolidate the color definition, which is
> currently replicated in too many places, such as: rte_meter.h, rte_mtr.h,
> rte_tm.h.
>
> We should include the rte_color.h file here (and in the above header files)
>
> We should also document the link between this field and the color enum type
> (@see ...).
Replacing the existing color definition with the above suggested one in rte_meter.h (librte_meter)
would be ABI break. We can do it separately through different patch.
>
> > + uint16_t reserved; /**< Reserved. */
> > } sched; /**< Hierarchical scheduler */
> > /**< User defined tags. See
> > rte_distributor_process() */
> > uint32_t usr;
>
> We should also add trivial inline functions to read/write these mbuf fields as
> part of this header file. We want to discourage people from accessing these
> fields directly.
>
> Besides the functions to read/write each field individually, we should also
> have a function to read all the sched fields in one operation, as well as another
> one to write all the sched fields in one operation.
Will create inline functions for reading and writing each field separately and jointly.
>
> > diff --git a/lib/librte_pipeline/rte_table_action.c
> > b/lib/librte_pipeline/rte_table_action.c
> > index 7c7c8dd82..99f2d779b 100644
> > --- a/lib/librte_pipeline/rte_table_action.c
> > +++ b/lib/librte_pipeline/rte_table_action.c
> > @@ -108,12 +108,12 @@ mtr_cfg_check(struct rte_table_action_mtr_config
> > *mtr)
> > }
> >
> > #define MBUF_SCHED_QUEUE_TC_COLOR(queue, tc, color) \
> > - ((uint16_t)((((uint64_t)(queue)) & 0x3) | \
> > - ((((uint64_t)(tc)) & 0x3) << 2) | \
> > - ((((uint64_t)(color)) & 0x3) << 4)))
> > + ((uint64_t)((((uint64_t)(queue)) & 0xffffffff) | \
> > + ((((uint64_t)(tc)) & 0xff) << 32) | \
> > + ((((uint64_t)(color)) & 0xff) << 40)))
> >
> > #define MBUF_SCHED_COLOR(sched, color) \
> > - (((sched) & (~0x30LLU)) | ((color) << 4))
> > + ((uint64_t)((sched) & (~0xff000000LLU)) | (((uint64_t)(color)) <<
> > +40))
> >
>
> Given the read/write mbuf->sched field functions, the above two macros are
> no longer needed.
Will remove this.
> > struct mtr_trtcm_data {
> > struct rte_meter_trtcm trtcm;
> > @@ -176,7 +176,7 @@ mtr_data_size(struct rte_table_action_mtr_config
> > *mtr)
> > struct dscp_table_entry_data {
> > enum rte_meter_color color;
> > uint16_t tc;
> > - uint16_t queue_tc_color;
> > + uint32_t queue;
> > };
> >
> > struct dscp_table_data {
> > @@ -368,7 +368,6 @@ tm_cfg_check(struct rte_table_action_tm_config
> > *tm)
> > }
> >
> > struct tm_data {
> > - uint16_t queue_tc_color;
> > uint16_t subport;
> > uint32_t pipe;
> > } __attribute__((__packed__));
> > @@ -397,26 +396,40 @@ tm_apply(struct tm_data *data,
> > return status;
> >
> > /* Apply */
> > - data->queue_tc_color = 0;
> > data->subport = (uint16_t) p->subport_id;
> > data->pipe = p->pipe_id;
> >
> > return 0;
> > }
> >
> > +static uint32_t
> > +tm_sched_qindex(struct tm_data *data,
> > + struct dscp_table_entry_data *dscp,
> > + struct rte_table_action_tm_config *cfg) {
> > +
> > + uint32_t result;
> > +
> > + result = data->subport * cfg->n_pipes_per_subport + data->pipe;
>
> Since n_subports_per_pipe and n_pipes_per_subport are enforced to be
> power of 2, we should replace multiplication/division with shift left/right. We
> probably need to store log2 correspondents in the action context.
Thanks. Will store log2 component in action context and use them in run time for shift operations.
> > + result = result * RTE_TABLE_ACTION_TC_MAX + dscp->tc;
> > + result = result * RTE_TABLE_ACTION_TC_QUEUE_MAX + dscp-
> > >queue;
> > +
> > + return result;
> > +}
> > +
> > static __rte_always_inline void
> > pkt_work_tm(struct rte_mbuf *mbuf,
> > struct tm_data *data,
> > struct dscp_table_data *dscp_table,
> > - uint32_t dscp)
> > + uint32_t dscp,
> > + struct rte_table_action_tm_config *cfg)
> > {
> > struct dscp_table_entry_data *dscp_entry = &dscp_table-
> > >entry[dscp];
> > - struct tm_data *sched_ptr = (struct tm_data *) &mbuf->hash.sched;
> > - struct tm_data sched;
> > + uint64_t *sched_ptr = (uint64_t *) &mbuf->hash.sched;
> > + uint32_t queue = tm_sched_qindex(data, dscp_entry, cfg);
> >
> > - sched = *data;
> > - sched.queue_tc_color = dscp_entry->queue_tc_color;
> > - *sched_ptr = sched;
> > + *sched_ptr = MBUF_SCHED_QUEUE_TC_COLOR(queue,
> > + dscp_entry->tc,
> > + dscp_entry->color);
> > }
> >
> > /**
> > @@ -2580,17 +2593,13 @@ rte_table_action_dscp_table_update(struct
> > rte_table_action *action,
> > &action->dscp_table.entry[i];
> > struct rte_table_action_dscp_table_entry *entry =
> > &table->entry[i];
> > - uint16_t queue_tc_color =
> > - MBUF_SCHED_QUEUE_TC_COLOR(entry-
> > >tc_queue_id,
> > - entry->tc_id,
> > - entry->color);
> >
> > if ((dscp_mask & (1LLU << i)) == 0)
> > continue;
> >
> > data->color = entry->color;
> > data->tc = entry->tc_id;
> > - data->queue_tc_color = queue_tc_color;
> > + data->queue = entry->tc_queue_id;
> > }
> >
> > return 0;
> > @@ -2882,7 +2891,8 @@ pkt_work(struct rte_mbuf *mbuf,
> > pkt_work_tm(mbuf,
> > data,
> > &action->dscp_table,
> > - dscp);
> > + dscp,
> > + &cfg->tm);
> > }
> >
> > if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) { @@
> > -3108,22 +3118,26 @@ pkt4_work(struct rte_mbuf **mbufs,
> > pkt_work_tm(mbuf0,
> > data0,
> > &action->dscp_table,
> > - dscp0);
> > + dscp0,
> > + &cfg->tm);
> >
> > pkt_work_tm(mbuf1,
> > data1,
> > &action->dscp_table,
> > - dscp1);
> > + dscp1,
> > + &cfg->tm);
> >
> > pkt_work_tm(mbuf2,
> > data2,
> > &action->dscp_table,
> > - dscp2);
> > + dscp2,
> > + &cfg->tm);
> >
> > pkt_work_tm(mbuf3,
> > data3,
> > &action->dscp_table,
> > - dscp3);
> > + dscp3,
> > + &cfg->tm);
> > }
> >
> > if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) { diff
> > --git a/lib/librte_sched/Makefile b/lib/librte_sched/Makefile index
> > 46c53ed71..644fd9d15 100644
> > --- a/lib/librte_sched/Makefile
> > +++ b/lib/librte_sched/Makefile
> > @@ -18,7 +18,7 @@ LDLIBS += -lrte_timer
> >
> > EXPORT_MAP := rte_sched_version.map
> >
> > -LIBABIVER := 1
> > +LIBABIVER := 2
> >
> > #
> > # all source are stored in SRCS-y
> > diff --git a/lib/librte_sched/rte_sched.c
> > b/lib/librte_sched/rte_sched.c index 587d5e602..7bf4d6400 100644
> > --- a/lib/librte_sched/rte_sched.c
> > +++ b/lib/librte_sched/rte_sched.c
> > @@ -128,22 +128,6 @@ enum grinder_state {
> > e_GRINDER_READ_MBUF
> > };
> >
> > -/*
> > - * Path through the scheduler hierarchy used by the scheduler enqueue
> > - * operation to identify the destination queue for the current
> > - * packet. Stored in the field pkt.hash.sched of struct rte_mbuf of
> > - * each packet, typically written by the classification stage and
> > read
> > - * by scheduler enqueue.
> > - */
> > -struct rte_sched_port_hierarchy {
> > - uint16_t queue:2; /**< Queue ID (0 .. 3) */
> > - uint16_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
> > - uint32_t color:2; /**< Color */
> > - uint16_t unused:10;
> > - uint16_t subport; /**< Subport ID */
> > - uint32_t pipe; /**< Pipe ID */
> > -};
> > -
> > struct rte_sched_grinder {
> > /* Pipe cache */
> > uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
> > @@ -241,16 +225,12 @@ enum rte_sched_port_array {
> > e_RTE_SCHED_PORT_ARRAY_TOTAL,
> > };
> >
> > -#ifdef RTE_SCHED_COLLECT_STATS
> > -
> > static inline uint32_t
> > rte_sched_port_queues_per_subport(struct rte_sched_port *port) {
> > return RTE_SCHED_QUEUES_PER_PIPE * port-
> > >n_pipes_per_subport;
> > }
> >
> > -#endif
> > -
> > static inline uint32_t
> > rte_sched_port_queues_per_port(struct rte_sched_port *port) { @@
> > -1006,44 +986,50 @@ rte_sched_port_pipe_profile_add(struct
> > rte_sched_port *port,
> > return 0;
> > }
> >
> > +static inline uint32_t
> > +rte_sched_port_qindex(struct rte_sched_port *port,
> > + uint32_t subport,
> > + uint32_t pipe,
> > + uint32_t traffic_class,
> > + uint32_t queue)
> > +{
> > + uint32_t result;
> > +
> > + result = subport * port->n_pipes_per_subport + pipe;
>
> Since n_subports_per_pipe and n_pipes_per_subport are enforced to be
> power of 2, we should replace multiplication/division with shift left/right.
Will replace with shift operation using log2 of n_subports_per_pipe and n_pipes_per_subport.
> > + result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE +
> > traffic_class;
> > + result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
> > +
> > + return result;
> > +}
> > +
> > void
> > -rte_sched_port_pkt_write(struct rte_mbuf *pkt,
> > +rte_sched_port_pkt_write(struct rte_sched_port *port,
> > + struct rte_mbuf *pkt,
> > uint32_t subport, uint32_t pipe, uint32_t traffic_class,
> > uint32_t queue, enum rte_meter_color color) {
> > - struct rte_sched_port_hierarchy *sched
> > - = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
> > -
> > - RTE_BUILD_BUG_ON(sizeof(*sched) > sizeof(pkt->hash.sched));
> > -
> > - sched->color = (uint32_t) color;
> > - sched->subport = subport;
> > - sched->pipe = pipe;
> > - sched->traffic_class = traffic_class;
> > - sched->queue = queue;
> > + pkt->hash.sched.traffic_class = traffic_class;
> > + pkt->hash.sched.queue_id = rte_sched_port_qindex(port, subport,
> > pipe,
> > + traffic_class, queue);
> > + pkt->hash.sched.color = (uint8_t) color;
> > }
> >
> > void
> > -rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
> > +rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
> > + const struct rte_mbuf *pkt,
> > uint32_t *subport, uint32_t *pipe,
> > uint32_t *traffic_class, uint32_t *queue) {
> > - const struct rte_sched_port_hierarchy *sched
> > - = (const struct rte_sched_port_hierarchy *) &pkt-
> > >hash.sched;
> > -
> > - *subport = sched->subport;
> > - *pipe = sched->pipe;
> > - *traffic_class = sched->traffic_class;
> > - *queue = sched->queue;
> > + *subport = pkt->hash.sched.queue_id /
> > rte_sched_port_queues_per_subport(port);
> > + *pipe = pkt->hash.sched.queue_id /
> > RTE_SCHED_QUEUES_PER_PIPE;
>
> Since n_subports_per_pipe and n_pipes_per_subport are enforced to be
> power of 2, we should replace multiplication/division with shift left/right.
Will do that.
> > + *traffic_class = pkt->hash.sched.traffic_class;
> > + *queue = pkt->hash.sched.queue_id %
> > RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
>
> Since RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS is enforced to be a power of
> 2, please replace modulo with bitwise AND of
> (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1).
>
> > }
Will do that.
> > enum rte_meter_color
> > rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt) {
> > - const struct rte_sched_port_hierarchy *sched
> > - = (const struct rte_sched_port_hierarchy *) &pkt-
> > >hash.sched;
> > -
> > - return (enum rte_meter_color) sched->color;
> > + return (enum rte_meter_color) pkt->hash.sched.color;
> > }
>
> Should use the mbuf->sched.color read function to be added in rte_mbuf.h.
Yes. Will change this.
> > int
> > @@ -1100,18 +1086,6 @@ rte_sched_queue_read_stats(struct
> > rte_sched_port *port,
> > return 0;
> > }
> >
> > -static inline uint32_t
> > -rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport,
> > uint32_t pipe, uint32_t traffic_class, uint32_t queue) -{
> > - uint32_t result;
> > -
> > - result = subport * port->n_pipes_per_subport + pipe;
> > - result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE +
> > traffic_class;
> > - result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
> > -
> > - return result;
> > -
>
> Since n_subports_per_pipe and n_pipes_per_subport are enforced to be
> power of 2, we should replace multiplication/division with shift left/right.
Will change this as well.
> }
> > -
> > #ifdef RTE_SCHED_DEBUG
> >
> > static inline int
> > @@ -1272,11 +1246,8 @@ rte_sched_port_enqueue_qptrs_prefetch0(struct
> > rte_sched_port *port,
> > #ifdef RTE_SCHED_COLLECT_STATS
> > struct rte_sched_queue_extra *qe;
> > #endif
> > - uint32_t subport, pipe, traffic_class, queue, qindex;
> > -
> > - rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe,
> > &traffic_class, &queue);
> > + uint32_t qindex = pkt->hash.sched.queue_id;
>
> Let's use the read function for this mbuf field.
Yes.
> >
> > - qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class,
> > queue);
> > q = port->queue + qindex;
> > rte_prefetch0(q);
> > #ifdef RTE_SCHED_COLLECT_STATS
> > diff --git a/lib/librte_sched/rte_sched.h
> > b/lib/librte_sched/rte_sched.h index 84fa896de..4d9f869eb 100644
> > --- a/lib/librte_sched/rte_sched.h
> > +++ b/lib/librte_sched/rte_sched.h
> > @@ -355,6 +355,8 @@ rte_sched_queue_read_stats(struct rte_sched_port
> > *port,
> > * Scheduler hierarchy path write to packet descriptor. Typically
> > * called by the packet classification stage.
> > *
> > + * @param port
> > + * Handle to port scheduler instance
> > * @param pkt
> > * Packet descriptor handle
> > * @param subport
> > @@ -369,7 +371,8 @@ rte_sched_queue_read_stats(struct rte_sched_port
> > *port,
> > * Packet color set
> > */
> > void
> > -rte_sched_port_pkt_write(struct rte_mbuf *pkt,
> > +rte_sched_port_pkt_write(struct rte_sched_port *port,
> > + struct rte_mbuf *pkt,
> > uint32_t subport, uint32_t pipe, uint32_t traffic_class,
> > uint32_t queue, enum rte_meter_color color);
> >
> > @@ -379,6 +382,8 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,
> > * enqueue operation. The subport, pipe, traffic class and queue
> > * parameters need to be pre-allocated by the caller.
> > *
> > + * @param port
> > + * Handle to port scheduler instance
> > * @param pkt
> > * Packet descriptor handle
> > * @param subport
> > @@ -392,7 +397,8 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,
> > *
> > */
> > void
> > -rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
> > +rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
> > + const struct rte_mbuf *pkt,
> > uint32_t *subport, uint32_t *pipe,
> > uint32_t *traffic_class, uint32_t *queue);
> >
>
> <snip>
>
> Regards,
> Cristian
We will work on the suggested changes and send another version. Thank you, Cristian.
-----Original Message-----
> Date: Fri, 23 Nov 2018 16:54:23 +0000
> From: Jasvinder Singh <jasvinder.singh@intel.com>
> To: dev@dpdk.org
> CC: cristian.dumitrescu@intel.com, Reshma Pattan <reshma.pattan@intel.com>
> Subject: [dpdk-dev] [PATCH] mbuf: implement generic format for sched field
> X-Mailer: git-send-email 2.17.1
>
> This patch implements the changes proposed in the deprecation
> notes [1][2].
>
> The opaque mbuf->hash.sched field is updated to support generic
> definition in line with the ethdev TM and MTR APIs. The new generic
> format contains: queue ID, traffic class, color.
>
> In addtion, following API functions of the sched library have
> been modified with an additional parameter of type struct
> rte_sched_port to accomodate the changes made to mbuf sched field.
> (i) rte_sched_port_pkt_write()
> (ii) rte_sched_port_pkt_read()
>
> The other libraries, sample applications and tests which use mbuf
> sched field have been updated as well.
>
> [1] http://mails.dpdk.org/archives/dev/2018-February/090651.html
> [2] https://mails.dpdk.org/archives/dev/2018-November/119051.html
>
> Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
> Signed-off-by: Reshma Pattan <reshma.pattan@intel.com>
> ---
> @@ -575,12 +575,10 @@ struct rte_mbuf {
> */
> } fdir; /**< Filter identifier if FDIR enabled */
> struct {
> - uint32_t lo;
> - uint32_t hi;
> - /**< The event eth Tx adapter uses this field
> - * to store Tx queue id.
> - * @see rte_event_eth_tx_adapter_txq_set()
> - */
> + uint32_t queue_id; /**< Queue ID. */
> + uint8_t traffic_class; /**< Traffic class ID. */
> + uint8_t color; /**< Color. */
> + uint16_t reserved; /**< Reserved. */
> } sched; /**< Hierarchical scheduler */
+Nikhil.
Currently rte_event_eth_tx_adapter_txq_set() and
rte_event_eth_tx_adapter_txq_get() implemented using
hash.sched.queue_id. How about moving out from "sched" to "txadapter"?
Something like below,
$ git diff
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 3dbc6695e..b73bbef93 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -575,13 +575,20 @@ struct rte_mbuf {
*/
} fdir; /**< Filter identifier if FDIR enabled
*/
struct {
- uint32_t lo;
- uint32_t hi;
+ uint32_t queue_id; /**< Queue ID. */
+ uint8_t traffic_class; /**< Traffic class ID. */
+ uint8_t color; /**< Color. */
+ uint16_t reserved; /**< Reserved. */
+ } sched; /**< Hierarchical scheduler */
+ struct {
+ uint32_t reserved1;
+ uint16_t reserved2;
+ uint16_t txq;
/**< The event eth Tx adapter uses this field
* to store Tx queue id.
* @see rte_event_eth_tx_adapter_txq_set()
*/
- } sched; /**< Hierarchical scheduler */
+ } txadapter; /**< Eventdev ethdev Tx adapter */
/**< User defined tags. See rte_distributor_process() */
uint32_t usr;
} hash; /**< hash information */
> rte_event_eth_tx_adapter_txq_set(struct rte_mbuf *pkt, uint16_t queue)
> {
> - uint16_t *p = (uint16_t *)&pkt->hash.sched.hi;
> + uint16_t *p = (uint16_t *)&pkt->hash.sched.queue_id;
> p[1] = queue;
> }
>
> @@ -320,7 +320,7 @@ rte_event_eth_tx_adapter_txq_set(struct rte_mbuf *pkt, uint16_t queue)
> static __rte_always_inline uint16_t __rte_experimental
> rte_event_eth_tx_adapter_txq_get(struct rte_mbuf *pkt)
> {
> - uint16_t *p = (uint16_t *)&pkt->hash.sched.hi;
> + uint16_t *p = (uint16_t *)&pkt->hash.sched.queue_id;
> return p[1];
> }
>
> >
> > > + uint8_t color; /**< Color. */
> >
> > We should create a new file rte_color.h in a common place
> > (librte_eal/common/include) to consolidate the color definition, which is
> > currently replicated in too many places, such as: rte_meter.h, rte_mtr.h,
> > rte_tm.h.
> >
> > We should include the rte_color.h file here (and in the above header files)
> >
> > We should also document the link between this field and the color enum
> type
> > (@see ...).
>
> Replacing the existing color definition with the above suggested one in
> rte_meter.h (librte_meter)
> would be ABI break. We can do it separately through different patch.
>
> >
We can avoid any such issues in a two step process:
1. Create aliases to the new color in rte_meter.h, rte_mtr.h and rte_tm.h at this point.
2. Announce deprecation of the existing color enums in favor of the new unified color enum at this point and remove during next release.
Example:
//file "rte_meter.h"
#include <rte_color.h>
#define rte_meter_color rte_color
#define RTE_METER_GREEN RTE_COLOR_GREEN
#define RTE_METER_YELLOW RTE_COLOR_YELLOW
#define RTE_METER_RED RTE_COLOR_RED
Makes sense?
> -----Original Message-----
> From: Jerin Jacob [mailto:jerin.jacob@caviumnetworks.com]
> Sent: Saturday, December 1, 2018 2:23 PM
> To: Singh, Jasvinder <jasvinder.singh@intel.com>
> Cc: dev@dpdk.org; Dumitrescu, Cristian <cristian.dumitrescu@intel.com>;
> Pattan, Reshma <reshma.pattan@intel.com>; Rao, Nikhil
> <nikhil.rao@intel.com>
> Subject: Re: [dpdk-dev] [PATCH] mbuf: implement generic format for sched
> field
>
> -----Original Message-----
> > Date: Fri, 23 Nov 2018 16:54:23 +0000
> > From: Jasvinder Singh <jasvinder.singh@intel.com>
> > To: dev@dpdk.org
> > CC: cristian.dumitrescu@intel.com, Reshma Pattan
> > <reshma.pattan@intel.com>
> > Subject: [dpdk-dev] [PATCH] mbuf: implement generic format for sched
> > field
> > X-Mailer: git-send-email 2.17.1
> >
> > This patch implements the changes proposed in the deprecation notes
> > [1][2].
> >
> > The opaque mbuf->hash.sched field is updated to support generic
> > definition in line with the ethdev TM and MTR APIs. The new generic
> > format contains: queue ID, traffic class, color.
> >
> > In addtion, following API functions of the sched library have been
> > modified with an additional parameter of type struct rte_sched_port to
> > accomodate the changes made to mbuf sched field.
> > (i) rte_sched_port_pkt_write()
> > (ii) rte_sched_port_pkt_read()
> >
> > The other libraries, sample applications and tests which use mbuf
> > sched field have been updated as well.
> >
> > [1] http://mails.dpdk.org/archives/dev/2018-February/090651.html
> > [2] https://mails.dpdk.org/archives/dev/2018-November/119051.html
> >
> > Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
> > Signed-off-by: Reshma Pattan <reshma.pattan@intel.com>
> > ---
> > @@ -575,12 +575,10 @@ struct rte_mbuf {
> > */
> > } fdir; /**< Filter identifier if FDIR enabled */
> > struct {
> > - uint32_t lo;
> > - uint32_t hi;
> > - /**< The event eth Tx adapter uses this field
> > - * to store Tx queue id.
> > - * @see rte_event_eth_tx_adapter_txq_set()
> > - */
> > + uint32_t queue_id; /**< Queue ID. */
> > + uint8_t traffic_class; /**< Traffic class ID. */
> > + uint8_t color; /**< Color. */
> > + uint16_t reserved; /**< Reserved. */
> > } sched; /**< Hierarchical scheduler */
>
> +Nikhil.
>
> Currently rte_event_eth_tx_adapter_txq_set() and
> rte_event_eth_tx_adapter_txq_get() implemented using
> hash.sched.queue_id. How about moving out from "sched" to "txadapter"?
> Something like below,
>
> $ git diff
> diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index
> 3dbc6695e..b73bbef93 100644
> --- a/lib/librte_mbuf/rte_mbuf.h
> +++ b/lib/librte_mbuf/rte_mbuf.h
> @@ -575,13 +575,20 @@ struct rte_mbuf {
> */
> } fdir; /**< Filter identifier if FDIR enabled */
> struct {
> - uint32_t lo;
> - uint32_t hi;
> + uint32_t queue_id; /**< Queue ID. */
> + uint8_t traffic_class; /**< Traffic class ID. */
> + uint8_t color; /**< Color. */
> + uint16_t reserved; /**< Reserved. */
> + } sched; /**< Hierarchical scheduler */
> + struct {
> + uint32_t reserved1;
> + uint16_t reserved2;
> + uint16_t txq;
> /**< The event eth Tx adapter uses this field
> * to store Tx queue id.
> * @see rte_event_eth_tx_adapter_txq_set()
> */
> - } sched; /**< Hierarchical scheduler */
> + } txadapter; /**< Eventdev ethdev Tx adapter */
> /**< User defined tags. See rte_distributor_process() */
> uint32_t usr;
> } hash; /**< hash information */
>
>
> > rte_event_eth_tx_adapter_txq_set(struct rte_mbuf *pkt, uint16_t
> > queue) {
> > - uint16_t *p = (uint16_t *)&pkt->hash.sched.hi;
> > + uint16_t *p = (uint16_t *)&pkt->hash.sched.queue_id;
> > p[1] = queue;
> > }
> >
> > @@ -320,7 +320,7 @@ rte_event_eth_tx_adapter_txq_set(struct rte_mbuf
> > *pkt, uint16_t queue) static __rte_always_inline uint16_t
> > __rte_experimental rte_event_eth_tx_adapter_txq_get(struct rte_mbuf
> > *pkt) {
> > - uint16_t *p = (uint16_t *)&pkt->hash.sched.hi;
> > + uint16_t *p = (uint16_t *)&pkt->hash.sched.queue_id;
> > return p[1];
> > }
> >
Will make this change in the next version. Thanks.
> -----Original Message-----
> From: Dumitrescu, Cristian
> Sent: Tuesday, December 4, 2018 5:39 PM
> To: Singh, Jasvinder <jasvinder.singh@intel.com>; dev@dpdk.org
> Cc: Pattan, Reshma <reshma.pattan@intel.com>
> Subject: RE: [PATCH] mbuf: implement generic format for sched field
>
> > >
> > > > + uint8_t color; /**< Color. */
> > >
> > > We should create a new file rte_color.h in a common place
> > > (librte_eal/common/include) to consolidate the color definition,
> > > which is currently replicated in too many places, such as:
> > > rte_meter.h, rte_mtr.h, rte_tm.h.
> > >
> > > We should include the rte_color.h file here (and in the above header
> > > files)
> > >
> > > We should also document the link between this field and the color
> > > enum
> > type
> > > (@see ...).
> >
> > Replacing the existing color definition with the above suggested one
> > in rte_meter.h (librte_meter) would be ABI break. We can do it
> > separately through different patch.
> >
> > >
>
> We can avoid any such issues in a two step process:
> 1. Create aliases to the new color in rte_meter.h, rte_mtr.h and rte_tm.h at
> this point.
> 2. Announce deprecation of the existing color enums in favor of the new
> unified color enum at this point and remove during next release.
>
> Example:
> //file "rte_meter.h"
> #include <rte_color.h>
>
> #define rte_meter_color rte_color
> #define RTE_METER_GREEN RTE_COLOR_GREEN
> #define RTE_METER_YELLOW RTE_COLOR_YELLOW #define RTE_METER_RED
> RTE_COLOR_RED
>
> Makes sense?
Looks ok. We will do this clean-up work in a separate patch instead of mixing with mbuf patch. Thanks.
> -----Original Message-----
> From: Jerin Jacob [mailto:jerin.jacob@caviumnetworks.com]
> Sent: Saturday, December 1, 2018 2:23 PM
> To: Singh, Jasvinder <jasvinder.singh@intel.com>
> Cc: dev@dpdk.org; Dumitrescu, Cristian <cristian.dumitrescu@intel.com>;
> Pattan, Reshma <reshma.pattan@intel.com>; Rao, Nikhil
> <nikhil.rao@intel.com>
> Subject: Re: [dpdk-dev] [PATCH] mbuf: implement generic format for sched
> field
>
> -----Original Message-----
> > Date: Fri, 23 Nov 2018 16:54:23 +0000
> > From: Jasvinder Singh <jasvinder.singh@intel.com>
> > To: dev@dpdk.org
> > CC: cristian.dumitrescu@intel.com, Reshma Pattan
> <reshma.pattan@intel.com>
> > Subject: [dpdk-dev] [PATCH] mbuf: implement generic format for sched
> field
> > X-Mailer: git-send-email 2.17.1
> >
> > This patch implements the changes proposed in the deprecation
> > notes [1][2].
> >
> > The opaque mbuf->hash.sched field is updated to support generic
> > definition in line with the ethdev TM and MTR APIs. The new generic
> > format contains: queue ID, traffic class, color.
> >
> > In addtion, following API functions of the sched library have
> > been modified with an additional parameter of type struct
> > rte_sched_port to accomodate the changes made to mbuf sched field.
> > (i) rte_sched_port_pkt_write()
> > (ii) rte_sched_port_pkt_read()
> >
> > The other libraries, sample applications and tests which use mbuf
> > sched field have been updated as well.
> >
> > [1] http://mails.dpdk.org/archives/dev/2018-February/090651.html
> > [2] https://mails.dpdk.org/archives/dev/2018-November/119051.html
> >
> > Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
> > Signed-off-by: Reshma Pattan <reshma.pattan@intel.com>
> > ---
> > @@ -575,12 +575,10 @@ struct rte_mbuf {
> > */
> > } fdir; /**< Filter identifier if FDIR enabled */
> > struct {
> > - uint32_t lo;
> > - uint32_t hi;
> > - /**< The event eth Tx adapter uses this field
> > - * to store Tx queue id.
> > - * @see rte_event_eth_tx_adapter_txq_set()
> > - */
> > + uint32_t queue_id; /**< Queue ID. */
> > + uint8_t traffic_class; /**< Traffic class ID. */
> > + uint8_t color; /**< Color. */
> > + uint16_t reserved; /**< Reserved. */
> > } sched; /**< Hierarchical scheduler */
>
> +Nikhil.
>
> Currently rte_event_eth_tx_adapter_txq_set() and
> rte_event_eth_tx_adapter_txq_get() implemented using
> hash.sched.queue_id. How about moving out from "sched" to "txadapter"?
> Something like below,
>
> $ git diff
> diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
> index 3dbc6695e..b73bbef93 100644
> --- a/lib/librte_mbuf/rte_mbuf.h
> +++ b/lib/librte_mbuf/rte_mbuf.h
> @@ -575,13 +575,20 @@ struct rte_mbuf {
> */
> } fdir; /**< Filter identifier if FDIR enabled
> */
> struct {
> - uint32_t lo;
> - uint32_t hi;
> + uint32_t queue_id; /**< Queue ID. */
> + uint8_t traffic_class; /**< Traffic class ID. */
> + uint8_t color; /**< Color. */
> + uint16_t reserved; /**< Reserved. */
> + } sched; /**< Hierarchical scheduler */
> + struct {
> + uint32_t reserved1;
> + uint16_t reserved2;
> + uint16_t txq;
> /**< The event eth Tx adapter uses this field
> * to store Tx queue id.
> * @see rte_event_eth_tx_adapter_txq_set()
> */
> - } sched; /**< Hierarchical scheduler */
> + } txadapter; /**< Eventdev ethdev Tx adapter */
> /**< User defined tags. See rte_distributor_process() */
> uint32_t usr;
> } hash; /**< hash information */
>
>
> > rte_event_eth_tx_adapter_txq_set(struct rte_mbuf *pkt, uint16_t
> queue)
> > {
> > - uint16_t *p = (uint16_t *)&pkt->hash.sched.hi;
> > + uint16_t *p = (uint16_t *)&pkt->hash.sched.queue_id;
> > p[1] = queue;
> > }
> >
> > @@ -320,7 +320,7 @@ rte_event_eth_tx_adapter_txq_set(struct
> rte_mbuf *pkt, uint16_t queue)
> > static __rte_always_inline uint16_t __rte_experimental
> > rte_event_eth_tx_adapter_txq_get(struct rte_mbuf *pkt)
> > {
> > - uint16_t *p = (uint16_t *)&pkt->hash.sched.hi;
> > + uint16_t *p = (uint16_t *)&pkt->hash.sched.queue_id;
> > return p[1];
> > }
> >
Hi Jerin,
Is there a reason why eventdev cannot use the generic 32-bit queue_id field in mbuf->hash.sched?
We can definitely do it the way you suggest, but want to check with you first.
Regards,
Cristian
@@ -73,8 +73,10 @@ app_rx_thread(struct thread_conf **confs)
for(i = 0; i < nb_rx; i++) {
get_pkt_sched(rx_mbufs[i],
&subport, &pipe, &traffic_class, &queue, &color);
- rte_sched_port_pkt_write(rx_mbufs[i], subport, pipe,
- traffic_class, queue, (enum rte_meter_color) color);
+ rte_sched_port_pkt_write(conf->sched_port, rx_mbufs[i],
+ subport, pipe,
+ traffic_class, queue,
+ (enum rte_meter_color) color);
}
if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
@@ -55,6 +55,7 @@ app_main_loop(__attribute__((unused))void *dummy)
flow->rx_thread.rx_port = flow->rx_port;
flow->rx_thread.rx_ring = flow->rx_ring;
flow->rx_thread.rx_queue = flow->rx_queue;
+ flow->rx_thread.sched_port = flow->sched_port;
rx_confs[rx_idx++] = &flow->rx_thread;
@@ -300,7 +300,7 @@ rte_event_eth_tx_adapter_queue_del(uint8_t id,
static __rte_always_inline void __rte_experimental
rte_event_eth_tx_adapter_txq_set(struct rte_mbuf *pkt, uint16_t queue)
{
- uint16_t *p = (uint16_t *)&pkt->hash.sched.hi;
+ uint16_t *p = (uint16_t *)&pkt->hash.sched.queue_id;
p[1] = queue;
}
@@ -320,7 +320,7 @@ rte_event_eth_tx_adapter_txq_set(struct rte_mbuf *pkt, uint16_t queue)
static __rte_always_inline uint16_t __rte_experimental
rte_event_eth_tx_adapter_txq_get(struct rte_mbuf *pkt)
{
- uint16_t *p = (uint16_t *)&pkt->hash.sched.hi;
+ uint16_t *p = (uint16_t *)&pkt->hash.sched.queue_id;
return p[1];
}
@@ -11,7 +11,7 @@ LDLIBS += -lrte_eal -lrte_mempool
EXPORT_MAP := rte_mbuf_version.map
-LIBABIVER := 4
+LIBABIVER := 5
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_MBUF) := rte_mbuf.c rte_mbuf_ptype.c rte_mbuf_pool_ops.c
@@ -575,12 +575,10 @@ struct rte_mbuf {
*/
} fdir; /**< Filter identifier if FDIR enabled */
struct {
- uint32_t lo;
- uint32_t hi;
- /**< The event eth Tx adapter uses this field
- * to store Tx queue id.
- * @see rte_event_eth_tx_adapter_txq_set()
- */
+ uint32_t queue_id; /**< Queue ID. */
+ uint8_t traffic_class; /**< Traffic class ID. */
+ uint8_t color; /**< Color. */
+ uint16_t reserved; /**< Reserved. */
} sched; /**< Hierarchical scheduler */
/**< User defined tags. See rte_distributor_process() */
uint32_t usr;
@@ -108,12 +108,12 @@ mtr_cfg_check(struct rte_table_action_mtr_config *mtr)
}
#define MBUF_SCHED_QUEUE_TC_COLOR(queue, tc, color) \
- ((uint16_t)((((uint64_t)(queue)) & 0x3) | \
- ((((uint64_t)(tc)) & 0x3) << 2) | \
- ((((uint64_t)(color)) & 0x3) << 4)))
+ ((uint64_t)((((uint64_t)(queue)) & 0xffffffff) | \
+ ((((uint64_t)(tc)) & 0xff) << 32) | \
+ ((((uint64_t)(color)) & 0xff) << 40)))
#define MBUF_SCHED_COLOR(sched, color) \
- (((sched) & (~0x30LLU)) | ((color) << 4))
+ ((uint64_t)((sched) & (~0xff000000LLU)) | (((uint64_t)(color)) << 40))
struct mtr_trtcm_data {
struct rte_meter_trtcm trtcm;
@@ -176,7 +176,7 @@ mtr_data_size(struct rte_table_action_mtr_config *mtr)
struct dscp_table_entry_data {
enum rte_meter_color color;
uint16_t tc;
- uint16_t queue_tc_color;
+ uint32_t queue;
};
struct dscp_table_data {
@@ -368,7 +368,6 @@ tm_cfg_check(struct rte_table_action_tm_config *tm)
}
struct tm_data {
- uint16_t queue_tc_color;
uint16_t subport;
uint32_t pipe;
} __attribute__((__packed__));
@@ -397,26 +396,40 @@ tm_apply(struct tm_data *data,
return status;
/* Apply */
- data->queue_tc_color = 0;
data->subport = (uint16_t) p->subport_id;
data->pipe = p->pipe_id;
return 0;
}
+static uint32_t
+tm_sched_qindex(struct tm_data *data,
+ struct dscp_table_entry_data *dscp,
+ struct rte_table_action_tm_config *cfg) {
+
+ uint32_t result;
+
+ result = data->subport * cfg->n_pipes_per_subport + data->pipe;
+ result = result * RTE_TABLE_ACTION_TC_MAX + dscp->tc;
+ result = result * RTE_TABLE_ACTION_TC_QUEUE_MAX + dscp->queue;
+
+ return result;
+}
+
static __rte_always_inline void
pkt_work_tm(struct rte_mbuf *mbuf,
struct tm_data *data,
struct dscp_table_data *dscp_table,
- uint32_t dscp)
+ uint32_t dscp,
+ struct rte_table_action_tm_config *cfg)
{
struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
- struct tm_data *sched_ptr = (struct tm_data *) &mbuf->hash.sched;
- struct tm_data sched;
+ uint64_t *sched_ptr = (uint64_t *) &mbuf->hash.sched;
+ uint32_t queue = tm_sched_qindex(data, dscp_entry, cfg);
- sched = *data;
- sched.queue_tc_color = dscp_entry->queue_tc_color;
- *sched_ptr = sched;
+ *sched_ptr = MBUF_SCHED_QUEUE_TC_COLOR(queue,
+ dscp_entry->tc,
+ dscp_entry->color);
}
/**
@@ -2580,17 +2593,13 @@ rte_table_action_dscp_table_update(struct rte_table_action *action,
&action->dscp_table.entry[i];
struct rte_table_action_dscp_table_entry *entry =
&table->entry[i];
- uint16_t queue_tc_color =
- MBUF_SCHED_QUEUE_TC_COLOR(entry->tc_queue_id,
- entry->tc_id,
- entry->color);
if ((dscp_mask & (1LLU << i)) == 0)
continue;
data->color = entry->color;
data->tc = entry->tc_id;
- data->queue_tc_color = queue_tc_color;
+ data->queue = entry->tc_queue_id;
}
return 0;
@@ -2882,7 +2891,8 @@ pkt_work(struct rte_mbuf *mbuf,
pkt_work_tm(mbuf,
data,
&action->dscp_table,
- dscp);
+ dscp,
+ &cfg->tm);
}
if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
@@ -3108,22 +3118,26 @@ pkt4_work(struct rte_mbuf **mbufs,
pkt_work_tm(mbuf0,
data0,
&action->dscp_table,
- dscp0);
+ dscp0,
+ &cfg->tm);
pkt_work_tm(mbuf1,
data1,
&action->dscp_table,
- dscp1);
+ dscp1,
+ &cfg->tm);
pkt_work_tm(mbuf2,
data2,
&action->dscp_table,
- dscp2);
+ dscp2,
+ &cfg->tm);
pkt_work_tm(mbuf3,
data3,
&action->dscp_table,
- dscp3);
+ dscp3,
+ &cfg->tm);
}
if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
@@ -18,7 +18,7 @@ LDLIBS += -lrte_timer
EXPORT_MAP := rte_sched_version.map
-LIBABIVER := 1
+LIBABIVER := 2
#
# all source are stored in SRCS-y
@@ -128,22 +128,6 @@ enum grinder_state {
e_GRINDER_READ_MBUF
};
-/*
- * Path through the scheduler hierarchy used by the scheduler enqueue
- * operation to identify the destination queue for the current
- * packet. Stored in the field pkt.hash.sched of struct rte_mbuf of
- * each packet, typically written by the classification stage and read
- * by scheduler enqueue.
- */
-struct rte_sched_port_hierarchy {
- uint16_t queue:2; /**< Queue ID (0 .. 3) */
- uint16_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
- uint32_t color:2; /**< Color */
- uint16_t unused:10;
- uint16_t subport; /**< Subport ID */
- uint32_t pipe; /**< Pipe ID */
-};
-
struct rte_sched_grinder {
/* Pipe cache */
uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
@@ -241,16 +225,12 @@ enum rte_sched_port_array {
e_RTE_SCHED_PORT_ARRAY_TOTAL,
};
-#ifdef RTE_SCHED_COLLECT_STATS
-
static inline uint32_t
rte_sched_port_queues_per_subport(struct rte_sched_port *port)
{
return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport;
}
-#endif
-
static inline uint32_t
rte_sched_port_queues_per_port(struct rte_sched_port *port)
{
@@ -1006,44 +986,50 @@ rte_sched_port_pipe_profile_add(struct rte_sched_port *port,
return 0;
}
+static inline uint32_t
+rte_sched_port_qindex(struct rte_sched_port *port,
+ uint32_t subport,
+ uint32_t pipe,
+ uint32_t traffic_class,
+ uint32_t queue)
+{
+ uint32_t result;
+
+ result = subport * port->n_pipes_per_subport + pipe;
+ result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + traffic_class;
+ result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
+
+ return result;
+}
+
void
-rte_sched_port_pkt_write(struct rte_mbuf *pkt,
+rte_sched_port_pkt_write(struct rte_sched_port *port,
+ struct rte_mbuf *pkt,
uint32_t subport, uint32_t pipe, uint32_t traffic_class,
uint32_t queue, enum rte_meter_color color)
{
- struct rte_sched_port_hierarchy *sched
- = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
-
- RTE_BUILD_BUG_ON(sizeof(*sched) > sizeof(pkt->hash.sched));
-
- sched->color = (uint32_t) color;
- sched->subport = subport;
- sched->pipe = pipe;
- sched->traffic_class = traffic_class;
- sched->queue = queue;
+ pkt->hash.sched.traffic_class = traffic_class;
+ pkt->hash.sched.queue_id = rte_sched_port_qindex(port, subport, pipe,
+ traffic_class, queue);
+ pkt->hash.sched.color = (uint8_t) color;
}
void
-rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
+rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
+ const struct rte_mbuf *pkt,
uint32_t *subport, uint32_t *pipe,
uint32_t *traffic_class, uint32_t *queue)
{
- const struct rte_sched_port_hierarchy *sched
- = (const struct rte_sched_port_hierarchy *) &pkt->hash.sched;
-
- *subport = sched->subport;
- *pipe = sched->pipe;
- *traffic_class = sched->traffic_class;
- *queue = sched->queue;
+ *subport = pkt->hash.sched.queue_id / rte_sched_port_queues_per_subport(port);
+ *pipe = pkt->hash.sched.queue_id / RTE_SCHED_QUEUES_PER_PIPE;
+ *traffic_class = pkt->hash.sched.traffic_class;
+ *queue = pkt->hash.sched.queue_id % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
}
enum rte_meter_color
rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
{
- const struct rte_sched_port_hierarchy *sched
- = (const struct rte_sched_port_hierarchy *) &pkt->hash.sched;
-
- return (enum rte_meter_color) sched->color;
+ return (enum rte_meter_color) pkt->hash.sched.color;
}
int
@@ -1100,18 +1086,6 @@ rte_sched_queue_read_stats(struct rte_sched_port *port,
return 0;
}
-static inline uint32_t
-rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue)
-{
- uint32_t result;
-
- result = subport * port->n_pipes_per_subport + pipe;
- result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + traffic_class;
- result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
-
- return result;
-}
-
#ifdef RTE_SCHED_DEBUG
static inline int
@@ -1272,11 +1246,8 @@ rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_port *port,
#ifdef RTE_SCHED_COLLECT_STATS
struct rte_sched_queue_extra *qe;
#endif
- uint32_t subport, pipe, traffic_class, queue, qindex;
-
- rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe, &traffic_class, &queue);
+ uint32_t qindex = pkt->hash.sched.queue_id;
- qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
q = port->queue + qindex;
rte_prefetch0(q);
#ifdef RTE_SCHED_COLLECT_STATS
@@ -355,6 +355,8 @@ rte_sched_queue_read_stats(struct rte_sched_port *port,
* Scheduler hierarchy path write to packet descriptor. Typically
* called by the packet classification stage.
*
+ * @param port
+ * Handle to port scheduler instance
* @param pkt
* Packet descriptor handle
* @param subport
@@ -369,7 +371,8 @@ rte_sched_queue_read_stats(struct rte_sched_port *port,
* Packet color set
*/
void
-rte_sched_port_pkt_write(struct rte_mbuf *pkt,
+rte_sched_port_pkt_write(struct rte_sched_port *port,
+ struct rte_mbuf *pkt,
uint32_t subport, uint32_t pipe, uint32_t traffic_class,
uint32_t queue, enum rte_meter_color color);
@@ -379,6 +382,8 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,
* enqueue operation. The subport, pipe, traffic class and queue
* parameters need to be pre-allocated by the caller.
*
+ * @param port
+ * Handle to port scheduler instance
* @param pkt
* Packet descriptor handle
* @param subport
@@ -392,7 +397,8 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,
*
*/
void
-rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
+rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
+ const struct rte_mbuf *pkt,
uint32_t *subport, uint32_t *pipe,
uint32_t *traffic_class, uint32_t *queue);
@@ -76,7 +76,7 @@ create_mempool(void)
}
static void
-prepare_pkt(struct rte_mbuf *mbuf)
+prepare_pkt(struct rte_sched_port *port, struct rte_mbuf *mbuf)
{
struct ether_hdr *eth_hdr;
struct vlan_hdr *vlan1, *vlan2;
@@ -95,7 +95,7 @@ prepare_pkt(struct rte_mbuf *mbuf)
ip_hdr->dst_addr = IPv4(0,0,TC,QUEUE);
- rte_sched_port_pkt_write(mbuf, SUBPORT, PIPE, TC, QUEUE, e_RTE_METER_YELLOW);
+ rte_sched_port_pkt_write(port, mbuf, SUBPORT, PIPE, TC, QUEUE, e_RTE_METER_YELLOW);
/* 64 byte packet */
mbuf->pkt_len = 60;
@@ -138,7 +138,7 @@ test_sched(void)
for (i = 0; i < 10; i++) {
in_mbufs[i] = rte_pktmbuf_alloc(mp);
TEST_ASSERT_NOT_NULL(in_mbufs[i], "Packet allocation failed\n");
- prepare_pkt(in_mbufs[i]);
+ prepare_pkt(port, in_mbufs[i]);
}
@@ -155,7 +155,7 @@ test_sched(void)
color = rte_sched_port_pkt_read_color(out_mbufs[i]);
TEST_ASSERT_EQUAL(color, e_RTE_METER_YELLOW, "Wrong color\n");
- rte_sched_port_pkt_read_tree_path(out_mbufs[i],
+ rte_sched_port_pkt_read_tree_path(port, out_mbufs[i],
&subport, &pipe, &traffic_class, &queue);
TEST_ASSERT_EQUAL(subport, SUBPORT, "Wrong subport\n");