[v2,2/4] sched: Always enable stats in HQoS library.
Checks
Commit Message
Removed "RTE_SCHED_COLLECT_STATS" flag from HQoS.
Signed-off-by: Megha Ajmera <megha.ajmera@intel.com>
---
lib/sched/rte_sched.c | 12 ------------
1 file changed, 12 deletions(-)
Comments
> -----Original Message-----
> From: Ajmera, Megha <megha.ajmera@intel.com>
> Sent: Friday, February 18, 2022 9:37 AM
> To: dev@dpdk.org; Singh, Jasvinder <jasvinder.singh@intel.com>;
> Dumitrescu, Cristian <cristian.dumitrescu@intel.com>;
> thomas@monjalon.net; david.marchand@redhat.com
> Subject: [PATCH v2 2/4] sched: Always enable stats in HQoS library.
>
> Removed "RTE_SCHED_COLLECT_STATS" flag from HQoS.
>
> Signed-off-by: Megha Ajmera <megha.ajmera@intel.com>
> ---
> lib/sched/rte_sched.c | 12 ------------
> 1 file changed, 12 deletions(-)
>
> diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c
> index 6c3e3bb0bf..6f2d85edc0 100644
> --- a/lib/sched/rte_sched.c
> +++ b/lib/sched/rte_sched.c
> @@ -1790,8 +1790,6 @@ rte_sched_port_queue_is_empty(struct
> rte_sched_subport *subport,
>
> #endif /* RTE_SCHED_DEBUG */
>
> -#ifdef RTE_SCHED_COLLECT_STATS
> -
> static inline void
> rte_sched_port_update_subport_stats(struct rte_sched_port *port,
> struct rte_sched_subport *subport,
> @@ -1849,8 +1847,6 @@
> rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport
> *subport,
> #endif
> }
>
> -#endif /* RTE_SCHED_COLLECT_STATS */
> -
> #ifdef RTE_SCHED_CMAN
>
> static inline int
> @@ -1989,18 +1985,14 @@
> rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport
> *subport,
> struct rte_mbuf *pkt, uint32_t subport_qmask)
> {
> struct rte_sched_queue *q;
> -#ifdef RTE_SCHED_COLLECT_STATS
> struct rte_sched_queue_extra *qe;
> -#endif
> uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
> uint32_t subport_queue_id = subport_qmask & qindex;
>
> q = subport->queue + subport_queue_id;
> rte_prefetch0(q);
> -#ifdef RTE_SCHED_COLLECT_STATS
> qe = subport->queue_extra + subport_queue_id;
> rte_prefetch0(qe);
> -#endif
>
> return subport_queue_id;
> }
> @@ -2042,12 +2034,10 @@ rte_sched_port_enqueue_qwa(struct
> rte_sched_port *port,
> if (unlikely(rte_sched_port_cman_drop(port, subport, pkt, qindex,
> qlen) ||
> (qlen >= qsize))) {
> rte_pktmbuf_free(pkt);
> -#ifdef RTE_SCHED_COLLECT_STATS
> rte_sched_port_update_subport_stats_on_drop(port,
> subport,
> qindex, pkt, qlen < qsize);
> rte_sched_port_update_queue_stats_on_drop(subport,
> qindex, pkt,
> qlen < qsize);
> -#endif
> return 0;
> }
>
> @@ -2059,10 +2049,8 @@ rte_sched_port_enqueue_qwa(struct
> rte_sched_port *port,
> rte_bitmap_set(subport->bmp, qindex);
>
> /* Statistics */
> -#ifdef RTE_SCHED_COLLECT_STATS
> rte_sched_port_update_subport_stats(port, subport, qindex, pkt);
> rte_sched_port_update_queue_stats(subport, qindex, pkt);
> -#endif
>
> return 1;
> }
> --
> 2.25.1
Please adjust the patch title to meet the requirements:
-start with a verb
-do not start with an upper letter
And also please do not mention HQoS anywhere (title, body, ...), the name of the library is sched, not HQoS.
Title proposal:
sched: enable statistics unconditionally
@@ -1790,8 +1790,6 @@ rte_sched_port_queue_is_empty(struct rte_sched_subport *subport,
#endif /* RTE_SCHED_DEBUG */
-#ifdef RTE_SCHED_COLLECT_STATS
-
static inline void
rte_sched_port_update_subport_stats(struct rte_sched_port *port,
struct rte_sched_subport *subport,
@@ -1849,8 +1847,6 @@ rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
#endif
}
-#endif /* RTE_SCHED_COLLECT_STATS */
-
#ifdef RTE_SCHED_CMAN
static inline int
@@ -1989,18 +1985,14 @@ rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport *subport,
struct rte_mbuf *pkt, uint32_t subport_qmask)
{
struct rte_sched_queue *q;
-#ifdef RTE_SCHED_COLLECT_STATS
struct rte_sched_queue_extra *qe;
-#endif
uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
uint32_t subport_queue_id = subport_qmask & qindex;
q = subport->queue + subport_queue_id;
rte_prefetch0(q);
-#ifdef RTE_SCHED_COLLECT_STATS
qe = subport->queue_extra + subport_queue_id;
rte_prefetch0(qe);
-#endif
return subport_queue_id;
}
@@ -2042,12 +2034,10 @@ rte_sched_port_enqueue_qwa(struct rte_sched_port *port,
if (unlikely(rte_sched_port_cman_drop(port, subport, pkt, qindex, qlen) ||
(qlen >= qsize))) {
rte_pktmbuf_free(pkt);
-#ifdef RTE_SCHED_COLLECT_STATS
rte_sched_port_update_subport_stats_on_drop(port, subport,
qindex, pkt, qlen < qsize);
rte_sched_port_update_queue_stats_on_drop(subport, qindex, pkt,
qlen < qsize);
-#endif
return 0;
}
@@ -2059,10 +2049,8 @@ rte_sched_port_enqueue_qwa(struct rte_sched_port *port,
rte_bitmap_set(subport->bmp, qindex);
/* Statistics */
-#ifdef RTE_SCHED_COLLECT_STATS
rte_sched_port_update_subport_stats(port, subport, qindex, pkt);
rte_sched_port_update_queue_stats(subport, qindex, pkt);
-#endif
return 1;
}