sched: fix port time rounding error

Message ID 20200416084821.12591-1-alan.dewar@att.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series sched: fix port time rounding error |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-intel-Performance fail Performance Testing issues
ci/iol-mellanox-Performance success Performance Testing PASS
ci/travis-robot success Travis build: passed
ci/iol-testing success Testing PASS
ci/Intel-compilation success Compilation OK

Commit Message

Alan Dewar April 16, 2020, 8:48 a.m. UTC
  From: Alan Dewar <alan.dewar@att.com>

The QoS scheduler works off port time that is computed from the number
of CPU cycles that have elapsed since the last time the port was
polled.   It divides the number of elapsed cycles to calculate how
many bytes can be sent, however this division can generate rounding
errors, where some fraction of a byte sent may be lost.

Lose enough of these fractional bytes and the QoS scheduler
underperforms.  The problem is worse with low bandwidths.

To compensate for this rounding error this fix doesn't advance the
port's time_cpu_cycles by the number of cycles that have elapsed,
but by multiplying the computed number of bytes that can be sent
(which has been rounded down) by number of cycles per byte.
This will mean that port's time_cpu_cycles will lag behind the CPU
cycles momentarily.  At the next poll, the lag will be taken into
account.

Fixes: de3cfa2c98 ("sched: initial import")

Signed-off-by: Alan Dewar <alan.dewar@att.com>
---
 lib/librte_sched/rte_sched.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)
  

Comments

Cristian Dumitrescu April 17, 2020, 9:19 p.m. UTC | #1
> -----Original Message-----
> From: alangordondewar@gmail.com <alangordondewar@gmail.com>
> Sent: Thursday, April 16, 2020 9:48 AM
> To: Dumitrescu, Cristian <cristian.dumitrescu@intel.com>
> Cc: dev@dpdk.org; Alan Dewar <alan.dewar@att.com>
> Subject: [PATCH] sched: fix port time rounding error
> 
> From: Alan Dewar <alan.dewar@att.com>
> 
> The QoS scheduler works off port time that is computed from the number
> of CPU cycles that have elapsed since the last time the port was
> polled.   It divides the number of elapsed cycles to calculate how
> many bytes can be sent, however this division can generate rounding
> errors, where some fraction of a byte sent may be lost.
> 
> Lose enough of these fractional bytes and the QoS scheduler
> underperforms.  The problem is worse with low bandwidths.
> 
> To compensate for this rounding error this fix doesn't advance the
> port's time_cpu_cycles by the number of cycles that have elapsed,
> but by multiplying the computed number of bytes that can be sent
> (which has been rounded down) by number of cycles per byte.
> This will mean that port's time_cpu_cycles will lag behind the CPU
> cycles momentarily.  At the next poll, the lag will be taken into
> account.
> 
> Fixes: de3cfa2c98 ("sched: initial import")
> 
> Signed-off-by: Alan Dewar <alan.dewar@att.com>
> ---
>  lib/librte_sched/rte_sched.c | 12 ++++++++++--
>  1 file changed, 10 insertions(+), 2 deletions(-)
> 
> diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
> index c0983ddda..c656dba2d 100644
> --- a/lib/librte_sched/rte_sched.c
> +++ b/lib/librte_sched/rte_sched.c
> @@ -222,6 +222,7 @@ struct rte_sched_port {
>  	uint64_t time_cpu_bytes;      /* Current CPU time measured in bytes
> */
>  	uint64_t time;                /* Current NIC TX time measured in bytes */
>  	struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
> +	uint64_t cycles_per_byte;
> 
>  	/* Grinders */
>  	struct rte_mbuf **pkts_out;
> @@ -852,6 +853,7 @@ rte_sched_port_config(struct
> rte_sched_port_params *params)
>  	cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
>  		/ params->rate;
>  	port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
> +	port->cycles_per_byte = cycles_per_byte;
> 
>  	/* Grinders */
>  	port->pkts_out = NULL;
> @@ -2673,20 +2675,26 @@ static inline void
>  rte_sched_port_time_resync(struct rte_sched_port *port)
>  {
>  	uint64_t cycles = rte_get_tsc_cycles();
> -	uint64_t cycles_diff = cycles - port->time_cpu_cycles;
> +	uint64_t cycles_diff;
>  	uint64_t bytes_diff;
>  	uint32_t i;
> 
> +	if (cycles < port->time_cpu_cycles)
> +		goto end;
> +
> +	cycles_diff = cycles - port->time_cpu_cycles;
>  	/* Compute elapsed time in bytes */
>  	bytes_diff = rte_reciprocal_divide(cycles_diff <<
> RTE_SCHED_TIME_SHIFT,
>  					   port->inv_cycles_per_byte);
> 
>  	/* Advance port time */
> -	port->time_cpu_cycles = cycles;
> +	port->time_cpu_cycles +=
> +		(bytes_diff * port->cycles_per_byte) >>
> RTE_SCHED_TIME_SHIFT;
>  	port->time_cpu_bytes += bytes_diff;
>  	if (port->time < port->time_cpu_bytes)
>  		port->time = port->time_cpu_bytes;
> 
> +end:
>  	/* Reset pipe loop detection */
>  	for (i = 0; i < port->n_subports_per_port; i++)
>  		port->subports[i]->pipe_loop = RTE_SCHED_PIPE_INVALID;
> --
> 2.17.1

Adding Jasvinder.
  
Jasvinder Singh April 20, 2020, 11:23 a.m. UTC | #2
> -----Original Message-----
> From: Dumitrescu, Cristian <cristian.dumitrescu@intel.com>
> Sent: Friday, April 17, 2020 10:19 PM
> To: alangordondewar@gmail.com
> Cc: dev@dpdk.org; Alan Dewar <alan.dewar@att.com>; Singh, Jasvinder
> <jasvinder.singh@intel.com>
> Subject: RE: [PATCH] sched: fix port time rounding error
> 
> 
> 
> > -----Original Message-----
> > From: alangordondewar@gmail.com <alangordondewar@gmail.com>
> > Sent: Thursday, April 16, 2020 9:48 AM
> > To: Dumitrescu, Cristian <cristian.dumitrescu@intel.com>
> > Cc: dev@dpdk.org; Alan Dewar <alan.dewar@att.com>
> > Subject: [PATCH] sched: fix port time rounding error
> >
> > From: Alan Dewar <alan.dewar@att.com>
> >
> > The QoS scheduler works off port time that is computed from the number
> > of CPU cycles that have elapsed since the last time the port was
> > polled.   It divides the number of elapsed cycles to calculate how
> > many bytes can be sent, however this division can generate rounding
> > errors, where some fraction of a byte sent may be lost.
> >
> > Lose enough of these fractional bytes and the QoS scheduler
> > underperforms.  The problem is worse with low bandwidths.
> >
> > To compensate for this rounding error this fix doesn't advance the
> > port's time_cpu_cycles by the number of cycles that have elapsed, but
> > by multiplying the computed number of bytes that can be sent (which
> > has been rounded down) by number of cycles per byte.
> > This will mean that port's time_cpu_cycles will lag behind the CPU
> > cycles momentarily.  At the next poll, the lag will be taken into
> > account.
> >
> > Fixes: de3cfa2c98 ("sched: initial import")
> >
> > Signed-off-by: Alan Dewar <alan.dewar@att.com>
> > ---
> >  lib/librte_sched/rte_sched.c | 12 ++++++++++--
> >  1 file changed, 10 insertions(+), 2 deletions(-)
> >
> > diff --git a/lib/librte_sched/rte_sched.c
> > b/lib/librte_sched/rte_sched.c index c0983ddda..c656dba2d 100644
> > --- a/lib/librte_sched/rte_sched.c
> > +++ b/lib/librte_sched/rte_sched.c
> > @@ -222,6 +222,7 @@ struct rte_sched_port {
> >  	uint64_t time_cpu_bytes;      /* Current CPU time measured in bytes
> > */
> >  	uint64_t time;                /* Current NIC TX time measured in bytes */
> >  	struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
> > +	uint64_t cycles_per_byte;
> >
> >  	/* Grinders */
> >  	struct rte_mbuf **pkts_out;
> > @@ -852,6 +853,7 @@ rte_sched_port_config(struct
> rte_sched_port_params
> > *params)
> >  	cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
> >  		/ params->rate;
> >  	port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
> > +	port->cycles_per_byte = cycles_per_byte;
> >
> >  	/* Grinders */
> >  	port->pkts_out = NULL;
> > @@ -2673,20 +2675,26 @@ static inline void
> > rte_sched_port_time_resync(struct rte_sched_port *port)  {
> >  	uint64_t cycles = rte_get_tsc_cycles();
> > -	uint64_t cycles_diff = cycles - port->time_cpu_cycles;
> > +	uint64_t cycles_diff;
> >  	uint64_t bytes_diff;
> >  	uint32_t i;
> >
> > +	if (cycles < port->time_cpu_cycles)
> > +		goto end;

Above check seems redundant as port->time_cpu_cycles will always be less than the current cycles due to roundoff in previous iteration.


> > +	cycles_diff = cycles - port->time_cpu_cycles;
> >  	/* Compute elapsed time in bytes */
> >  	bytes_diff = rte_reciprocal_divide(cycles_diff <<
> > RTE_SCHED_TIME_SHIFT,
> >  					   port->inv_cycles_per_byte);
> >
> >  	/* Advance port time */
> > -	port->time_cpu_cycles = cycles;
> > +	port->time_cpu_cycles +=
> > +		(bytes_diff * port->cycles_per_byte) >>
> > RTE_SCHED_TIME_SHIFT;
> >  	port->time_cpu_bytes += bytes_diff;
> >  	if (port->time < port->time_cpu_bytes)
> >  		port->time = port->time_cpu_bytes;
> >
> > +end:
> >  	/* Reset pipe loop detection */
> >  	for (i = 0; i < port->n_subports_per_port; i++)
> >  		port->subports[i]->pipe_loop = RTE_SCHED_PIPE_INVALID;
> > --
> > 2.17.1
> 
> Adding Jasvinder.
  
Dewar, Alan April 21, 2020, 8:21 a.m. UTC | #3
> -----Original Message-----
> From: Singh, Jasvinder <jasvinder.singh@intel.com> 
> Sent: Monday, April 20, 2020 12:23 PM
> To: Dumitrescu, Cristian <cristian.dumitrescu@intel.com>; alangordondewar@gmail.com
> Cc: dev@dpdk.org; Alan Dewar <alan.dewar@att.com>
> Subject: RE: [PATCH] sched: fix port time rounding error
> 
> 
> 
> > -----Original Message-----
> > From: Dumitrescu, Cristian <cristian.dumitrescu@intel.com>
> > Sent: Friday, April 17, 2020 10:19 PM
> > To: alangordondewar@gmail.com
> > Cc: dev@dpdk.org; Alan Dewar <alan.dewar@att.com>; Singh, Jasvinder 
> > <jasvinder.singh@intel.com>
> > Subject: RE: [PATCH] sched: fix port time rounding error
> > 
> > 
> > 
> > > -----Original Message-----
> > > From: alangordondewar@gmail.com <alangordondewar@gmail.com>
> > > Sent: Thursday, April 16, 2020 9:48 AM
> > > To: Dumitrescu, Cristian <cristian.dumitrescu@intel.com>
> > > Cc: dev@dpdk.org; Alan Dewar <alan.dewar@att.com>
> > > Subject: [PATCH] sched: fix port time rounding error
> > >
> > > From: Alan Dewar <alan.dewar@att.com>
> > >
> > > The QoS scheduler works off port time that is computed from the 
> > > number of CPU cycles that have elapsed since the last time the port was
> > > polled.   It divides the number of elapsed cycles to calculate how
> > > many bytes can be sent, however this division can generate rounding 
> > > errors, where some fraction of a byte sent may be lost.
> > >
> > > Lose enough of these fractional bytes and the QoS scheduler 
> > > underperforms.  The problem is worse with low bandwidths.
> > >
> > > To compensate for this rounding error this fix doesn't advance the 
> > > port's time_cpu_cycles by the number of cycles that have elapsed, 
> > > but by multiplying the computed number of bytes that can be sent 
> > > (which has been rounded down) by number of cycles per byte.
> > > This will mean that port's time_cpu_cycles will lag behind the CPU 
> > > cycles momentarily.  At the next poll, the lag will be taken into 
> > > account.
> > >
> > > Fixes: de3cfa2c98 ("sched: initial import")
> > >
> > > Signed-off-by: Alan Dewar <alan.dewar@att.com>
> > > ---
> > >  lib/librte_sched/rte_sched.c | 12 ++++++++++--
> > >  1 file changed, 10 insertions(+), 2 deletions(-)
> > >
> > > diff --git a/lib/librte_sched/rte_sched.c 
> > > b/lib/librte_sched/rte_sched.c index c0983ddda..c656dba2d 100644
> > > --- a/lib/librte_sched/rte_sched.c
> > > +++ b/lib/librte_sched/rte_sched.c
> > > @@ -222,6 +222,7 @@ struct rte_sched_port {
> > >  	uint64_t time_cpu_bytes;      /* Current CPU time measured in bytes
> > > */
> > >  	uint64_t time;                /* Current NIC TX time measured in bytes */
> > >  	struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte 
> > > */
> > > +	uint64_t cycles_per_byte;
> > >
> > >  	/* Grinders */
> > >  	struct rte_mbuf **pkts_out;
> > > @@ -852,6 +853,7 @@ rte_sched_port_config(struct
> > rte_sched_port_params
> > > *params)
> > >  	cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
> > >  		/ params->rate;
> > >  	port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
> > > +	port->cycles_per_byte = cycles_per_byte;
> > >
> > >  	/* Grinders */
> > >  	port->pkts_out = NULL;
> > > @@ -2673,20 +2675,26 @@ static inline void 
> > > rte_sched_port_time_resync(struct rte_sched_port *port)  {
> > >  	uint64_t cycles = rte_get_tsc_cycles();
> > > -	uint64_t cycles_diff = cycles - port->time_cpu_cycles;
> > > +	uint64_t cycles_diff;
> > >  	uint64_t bytes_diff;
> > >  	uint32_t i;
> > >
> > > +	if (cycles < port->time_cpu_cycles)
> > > +		goto end;
> 
> Above check seems redundant as port->time_cpu_cycles will always be less than the current cycles due to roundoff in previous iteration.
> 

This was to catch the condition where the cycles wraps back to zero (after 100+ years?? depending on clock speed).  
Rather than just going to end: the conditional should at least reset port->time_cpu_cycles back to zero.
So there would be a very temporary glitch in accuracy once every 100+ years. 

>
> > > +	cycles_diff = cycles - port->time_cpu_cycles;
> > >  	/* Compute elapsed time in bytes */
> > >  	bytes_diff = rte_reciprocal_divide(cycles_diff << 
> > > RTE_SCHED_TIME_SHIFT,
> > >  					   port->inv_cycles_per_byte);
> > >
> > >  	/* Advance port time */
> > > -	port->time_cpu_cycles = cycles;
> > > +	port->time_cpu_cycles +=
> > > +		(bytes_diff * port->cycles_per_byte) >>
> > > RTE_SCHED_TIME_SHIFT;
> > >  	port->time_cpu_bytes += bytes_diff;
> > >  	if (port->time < port->time_cpu_bytes)
> > >  		port->time = port->time_cpu_bytes;
> > >
> > > +end:
> > >  	/* Reset pipe loop detection */
> > >  	for (i = 0; i < port->n_subports_per_port; i++)
> > >  		port->subports[i]->pipe_loop = RTE_SCHED_PIPE_INVALID;
> > > --
> > > 2.17.1
> > 
> > Adding Jasvinder.
  
Thomas Monjalon June 24, 2020, 10:50 p.m. UTC | #4
Jasvinder, what is the conclusion of this patch?

21/04/2020 10:21, Dewar, Alan:
> From: Singh, Jasvinder <jasvinder.singh@intel.com> 
> > > > From: Alan Dewar <alan.dewar@att.com>
> > > >
> > > > The QoS scheduler works off port time that is computed from the 
> > > > number of CPU cycles that have elapsed since the last time the port was
> > > > polled.   It divides the number of elapsed cycles to calculate how
> > > > many bytes can be sent, however this division can generate rounding 
> > > > errors, where some fraction of a byte sent may be lost.
> > > >
> > > > Lose enough of these fractional bytes and the QoS scheduler 
> > > > underperforms.  The problem is worse with low bandwidths.
> > > >
> > > > To compensate for this rounding error this fix doesn't advance the 
> > > > port's time_cpu_cycles by the number of cycles that have elapsed, 
> > > > but by multiplying the computed number of bytes that can be sent 
> > > > (which has been rounded down) by number of cycles per byte.
> > > > This will mean that port's time_cpu_cycles will lag behind the CPU 
> > > > cycles momentarily.  At the next poll, the lag will be taken into 
> > > > account.
> > > >
> > > > Fixes: de3cfa2c98 ("sched: initial import")
> > > >
> > > > Signed-off-by: Alan Dewar <alan.dewar@att.com>
> > > > ---
> > > >  lib/librte_sched/rte_sched.c | 12 ++++++++++--
> > > >  1 file changed, 10 insertions(+), 2 deletions(-)
> > > >
> > > > diff --git a/lib/librte_sched/rte_sched.c 
> > > > b/lib/librte_sched/rte_sched.c index c0983ddda..c656dba2d 100644
> > > > --- a/lib/librte_sched/rte_sched.c
> > > > +++ b/lib/librte_sched/rte_sched.c
> > > > @@ -222,6 +222,7 @@ struct rte_sched_port {
> > > >  	uint64_t time_cpu_bytes;      /* Current CPU time measured in bytes
> > > > */
> > > >  	uint64_t time;                /* Current NIC TX time measured in bytes */
> > > >  	struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte 
> > > > */
> > > > +	uint64_t cycles_per_byte;
> > > >
> > > >  	/* Grinders */
> > > >  	struct rte_mbuf **pkts_out;
> > > > @@ -852,6 +853,7 @@ rte_sched_port_config(struct
> > > rte_sched_port_params
> > > > *params)
> > > >  	cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
> > > >  		/ params->rate;
> > > >  	port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
> > > > +	port->cycles_per_byte = cycles_per_byte;
> > > >
> > > >  	/* Grinders */
> > > >  	port->pkts_out = NULL;
> > > > @@ -2673,20 +2675,26 @@ static inline void 
> > > > rte_sched_port_time_resync(struct rte_sched_port *port)  {
> > > >  	uint64_t cycles = rte_get_tsc_cycles();
> > > > -	uint64_t cycles_diff = cycles - port->time_cpu_cycles;
> > > > +	uint64_t cycles_diff;
> > > >  	uint64_t bytes_diff;
> > > >  	uint32_t i;
> > > >
> > > > +	if (cycles < port->time_cpu_cycles)
> > > > +		goto end;
> > 
> > Above check seems redundant as port->time_cpu_cycles will always be less than the current cycles due to roundoff in previous iteration.
> > 
> 
> This was to catch the condition where the cycles wraps back to zero (after 100+ years?? depending on clock speed).  
> Rather than just going to end: the conditional should at least reset port->time_cpu_cycles back to zero.
> So there would be a very temporary glitch in accuracy once every 100+ years.
  
Jasvinder Singh June 25, 2020, 8:32 a.m. UTC | #5
> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Wednesday, June 24, 2020 11:50 PM
> To: Singh, Jasvinder <jasvinder.singh@intel.com>
> Cc: Dumitrescu, Cristian <cristian.dumitrescu@intel.com>;
> 'alangordondewar@gmail.com' <alangordondewar@gmail.com>;
> dev@dpdk.org; 'Alan Dewar' <alan.dewar@att.com>; Dewar, Alan
> <alan.dewar@intl.att.com>
> Subject: Re: [dpdk-dev] [PATCH] sched: fix port time rounding error
> 
> Jasvinder, what is the conclusion of this patch?
> 
> 21/04/2020 10:21, Dewar, Alan:
> > From: Singh, Jasvinder <jasvinder.singh@intel.com>
> > > > > From: Alan Dewar <alan.dewar@att.com>
> > > > >
> > > > > The QoS scheduler works off port time that is computed from the
> > > > > number of CPU cycles that have elapsed since the last time the port
> was
> > > > > polled.   It divides the number of elapsed cycles to calculate how
> > > > > many bytes can be sent, however this division can generate
> > > > > rounding errors, where some fraction of a byte sent may be lost.
> > > > >
> > > > > Lose enough of these fractional bytes and the QoS scheduler
> > > > > underperforms.  The problem is worse with low bandwidths.
> > > > >
> > > > > To compensate for this rounding error this fix doesn't advance
> > > > > the port's time_cpu_cycles by the number of cycles that have
> > > > > elapsed, but by multiplying the computed number of bytes that
> > > > > can be sent (which has been rounded down) by number of cycles per
> byte.
> > > > > This will mean that port's time_cpu_cycles will lag behind the
> > > > > CPU cycles momentarily.  At the next poll, the lag will be taken
> > > > > into account.
> > > > >
> > > > > Fixes: de3cfa2c98 ("sched: initial import")
> > > > >
> > > > > Signed-off-by: Alan Dewar <alan.dewar@att.com>
> > > > > ---
> > > > >  lib/librte_sched/rte_sched.c | 12 ++++++++++--
> > > > >  1 file changed, 10 insertions(+), 2 deletions(-)
> > > > >
> > > > > diff --git a/lib/librte_sched/rte_sched.c
> > > > > b/lib/librte_sched/rte_sched.c index c0983ddda..c656dba2d 100644
> > > > > --- a/lib/librte_sched/rte_sched.c
> > > > > +++ b/lib/librte_sched/rte_sched.c
> > > > > @@ -222,6 +222,7 @@ struct rte_sched_port {
> > > > >  	uint64_t time_cpu_bytes;      /* Current CPU time measured in bytes
> > > > > */
> > > > >  	uint64_t time;                /* Current NIC TX time measured in bytes */
> > > > >  	struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per
> > > > > byte */
> > > > > +	uint64_t cycles_per_byte;
> > > > >
> > > > >  	/* Grinders */
> > > > >  	struct rte_mbuf **pkts_out;
> > > > > @@ -852,6 +853,7 @@ rte_sched_port_config(struct
> > > > rte_sched_port_params
> > > > > *params)
> > > > >  	cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
> > > > >  		/ params->rate;
> > > > >  	port->inv_cycles_per_byte =
> > > > > rte_reciprocal_value(cycles_per_byte);
> > > > > +	port->cycles_per_byte = cycles_per_byte;
> > > > >
> > > > >  	/* Grinders */
> > > > >  	port->pkts_out = NULL;
> > > > > @@ -2673,20 +2675,26 @@ static inline void
> > > > > rte_sched_port_time_resync(struct rte_sched_port *port)  {
> > > > >  	uint64_t cycles = rte_get_tsc_cycles();
> > > > > -	uint64_t cycles_diff = cycles - port->time_cpu_cycles;
> > > > > +	uint64_t cycles_diff;
> > > > >  	uint64_t bytes_diff;
> > > > >  	uint32_t i;
> > > > >
> > > > > +	if (cycles < port->time_cpu_cycles)
> > > > > +		goto end;
> > >
> > > Above check seems redundant as port->time_cpu_cycles will always be
> less than the current cycles due to roundoff in previous iteration.
> > >
> >
> > This was to catch the condition where the cycles wraps back to zero (after
> 100+ years?? depending on clock speed).
> > Rather than just going to end: the conditional should at least reset port-
> >time_cpu_cycles back to zero.
> > So there would be a very temporary glitch in accuracy once every 100+
> years.
> 

Alan, Could you please resubmit the patch with above change? Other than that, patch looks good to me.
  
Alan Dewar June 25, 2020, 8:40 a.m. UTC | #6
Okay will do.

On Thu, Jun 25, 2020 at 9:32 AM Singh, Jasvinder
<jasvinder.singh@intel.com> wrote:
>
>
>
> > -----Original Message-----
> > From: Thomas Monjalon <thomas@monjalon.net>
> > Sent: Wednesday, June 24, 2020 11:50 PM
> > To: Singh, Jasvinder <jasvinder.singh@intel.com>
> > Cc: Dumitrescu, Cristian <cristian.dumitrescu@intel.com>;
> > 'alangordondewar@gmail.com' <alangordondewar@gmail.com>;
> > dev@dpdk.org; 'Alan Dewar' <alan.dewar@att.com>; Dewar, Alan
> > <alan.dewar@intl.att.com>
> > Subject: Re: [dpdk-dev] [PATCH] sched: fix port time rounding error
> >
> > Jasvinder, what is the conclusion of this patch?
> >
> > 21/04/2020 10:21, Dewar, Alan:
> > > From: Singh, Jasvinder <jasvinder.singh@intel.com>
> > > > > > From: Alan Dewar <alan.dewar@att.com>
> > > > > >
> > > > > > The QoS scheduler works off port time that is computed from the
> > > > > > number of CPU cycles that have elapsed since the last time the port
> > was
> > > > > > polled.   It divides the number of elapsed cycles to calculate how
> > > > > > many bytes can be sent, however this division can generate
> > > > > > rounding errors, where some fraction of a byte sent may be lost.
> > > > > >
> > > > > > Lose enough of these fractional bytes and the QoS scheduler
> > > > > > underperforms.  The problem is worse with low bandwidths.
> > > > > >
> > > > > > To compensate for this rounding error this fix doesn't advance
> > > > > > the port's time_cpu_cycles by the number of cycles that have
> > > > > > elapsed, but by multiplying the computed number of bytes that
> > > > > > can be sent (which has been rounded down) by number of cycles per
> > byte.
> > > > > > This will mean that port's time_cpu_cycles will lag behind the
> > > > > > CPU cycles momentarily.  At the next poll, the lag will be taken
> > > > > > into account.
> > > > > >
> > > > > > Fixes: de3cfa2c98 ("sched: initial import")
> > > > > >
> > > > > > Signed-off-by: Alan Dewar <alan.dewar@att.com>
> > > > > > ---
> > > > > >  lib/librte_sched/rte_sched.c | 12 ++++++++++--
> > > > > >  1 file changed, 10 insertions(+), 2 deletions(-)
> > > > > >
> > > > > > diff --git a/lib/librte_sched/rte_sched.c
> > > > > > b/lib/librte_sched/rte_sched.c index c0983ddda..c656dba2d 100644
> > > > > > --- a/lib/librte_sched/rte_sched.c
> > > > > > +++ b/lib/librte_sched/rte_sched.c
> > > > > > @@ -222,6 +222,7 @@ struct rte_sched_port {
> > > > > >       uint64_t time_cpu_bytes;      /* Current CPU time measured in bytes
> > > > > > */
> > > > > >       uint64_t time;                /* Current NIC TX time measured in bytes */
> > > > > >       struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per
> > > > > > byte */
> > > > > > +     uint64_t cycles_per_byte;
> > > > > >
> > > > > >       /* Grinders */
> > > > > >       struct rte_mbuf **pkts_out;
> > > > > > @@ -852,6 +853,7 @@ rte_sched_port_config(struct
> > > > > rte_sched_port_params
> > > > > > *params)
> > > > > >       cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
> > > > > >               / params->rate;
> > > > > >       port->inv_cycles_per_byte =
> > > > > > rte_reciprocal_value(cycles_per_byte);
> > > > > > +     port->cycles_per_byte = cycles_per_byte;
> > > > > >
> > > > > >       /* Grinders */
> > > > > >       port->pkts_out = NULL;
> > > > > > @@ -2673,20 +2675,26 @@ static inline void
> > > > > > rte_sched_port_time_resync(struct rte_sched_port *port)  {
> > > > > >       uint64_t cycles = rte_get_tsc_cycles();
> > > > > > -     uint64_t cycles_diff = cycles - port->time_cpu_cycles;
> > > > > > +     uint64_t cycles_diff;
> > > > > >       uint64_t bytes_diff;
> > > > > >       uint32_t i;
> > > > > >
> > > > > > +     if (cycles < port->time_cpu_cycles)
> > > > > > +             goto end;
> > > >
> > > > Above check seems redundant as port->time_cpu_cycles will always be
> > less than the current cycles due to roundoff in previous iteration.
> > > >
> > >
> > > This was to catch the condition where the cycles wraps back to zero (after
> > 100+ years?? depending on clock speed).
> > > Rather than just going to end: the conditional should at least reset port-
> > >time_cpu_cycles back to zero.
> > > So there would be a very temporary glitch in accuracy once every 100+
> > years.
> >
>
> Alan, Could you please resubmit the patch with above change? Other than that, patch looks good to me.
>
  

Patch

diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index c0983ddda..c656dba2d 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -222,6 +222,7 @@  struct rte_sched_port {
 	uint64_t time_cpu_bytes;      /* Current CPU time measured in bytes */
 	uint64_t time;                /* Current NIC TX time measured in bytes */
 	struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
+	uint64_t cycles_per_byte;
 
 	/* Grinders */
 	struct rte_mbuf **pkts_out;
@@ -852,6 +853,7 @@  rte_sched_port_config(struct rte_sched_port_params *params)
 	cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
 		/ params->rate;
 	port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
+	port->cycles_per_byte = cycles_per_byte;
 
 	/* Grinders */
 	port->pkts_out = NULL;
@@ -2673,20 +2675,26 @@  static inline void
 rte_sched_port_time_resync(struct rte_sched_port *port)
 {
 	uint64_t cycles = rte_get_tsc_cycles();
-	uint64_t cycles_diff = cycles - port->time_cpu_cycles;
+	uint64_t cycles_diff;
 	uint64_t bytes_diff;
 	uint32_t i;
 
+	if (cycles < port->time_cpu_cycles)
+		goto end;
+
+	cycles_diff = cycles - port->time_cpu_cycles;
 	/* Compute elapsed time in bytes */
 	bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT,
 					   port->inv_cycles_per_byte);
 
 	/* Advance port time */
-	port->time_cpu_cycles = cycles;
+	port->time_cpu_cycles +=
+		(bytes_diff * port->cycles_per_byte) >> RTE_SCHED_TIME_SHIFT;
 	port->time_cpu_bytes += bytes_diff;
 	if (port->time < port->time_cpu_bytes)
 		port->time = port->time_cpu_bytes;
 
+end:
 	/* Reset pipe loop detection */
 	for (i = 0; i < port->n_subports_per_port; i++)
 		port->subports[i]->pipe_loop = RTE_SCHED_PIPE_INVALID;