[v2] lpm: fix unchecked return value

Message ID 20200716154920.167185-1-ruifeng.wang@arm.com (mailing list archive)
State Accepted, archived
Delegated to: David Marchand
Headers
Series [v2] lpm: fix unchecked return value |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/travis-robot success Travis build: passed
ci/iol-mellanox-Performance success Performance Testing PASS
ci/Intel-compilation success Compilation OK
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-testing success Testing PASS

Commit Message

Ruifeng Wang July 16, 2020, 3:49 p.m. UTC
  Coverity complains about unchecked return value of rte_rcu_qsbr_dq_enqueue.
By default, defer queue size is big enough to hold all tbl8 groups. When
enqueue fails, return error to the user to indicate system issue.

Coverity issue: 360832
Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")

Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
---
v2:
Converted return value to conform to LPM API convention. (Vladimir)

 lib/librte_lpm/rte_lpm.c | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)
  

Comments

Vladimir Medvedkin July 17, 2020, 5:12 p.m. UTC | #1
Hi Ruifeng,

On 16/07/2020 16:49, Ruifeng Wang wrote:
> Coverity complains about unchecked return value of rte_rcu_qsbr_dq_enqueue.
> By default, defer queue size is big enough to hold all tbl8 groups. When
> enqueue fails, return error to the user to indicate system issue.
> 
> Coverity issue: 360832
> Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")
> 
> Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
> ---
> v2:
> Converted return value to conform to LPM API convention. (Vladimir)
> 
>   lib/librte_lpm/rte_lpm.c | 19 +++++++++++++------
>   1 file changed, 13 insertions(+), 6 deletions(-)
> 
> diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
> index 2db9e16a2..757436f49 100644
> --- a/lib/librte_lpm/rte_lpm.c
> +++ b/lib/librte_lpm/rte_lpm.c
> @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm)
>   	return group_idx;
>   }
>   
> -static void
> +static int32_t
>   tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
>   {
>   	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
>   	struct __rte_lpm *internal_lpm;
> +	int status;
>   
>   	internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
>   	if (internal_lpm->v == NULL) {
> @@ -552,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
>   				__ATOMIC_RELAXED);
>   	} else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
>   		/* Push into QSBR defer queue. */
> -		rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> +		status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
>   				(void *)&tbl8_group_start);
> +		if (status == 1) {
> +			RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
> +			return -rte_errno;
> +		}
>   	}
> +
> +	return 0;
>   }
>   
>   static __rte_noinline int32_t
> @@ -1040,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
>   #define group_idx next_hop
>   	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
>   			tbl8_range, i;
> -	int32_t tbl8_recycle_index;
> +	int32_t tbl8_recycle_index, status = 0;
>   
>   	/*
>   	 * Calculate the index into tbl24 and range. Note: All depths larger
> @@ -1097,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
>   		 */
>   		lpm->tbl24[tbl24_index].valid = 0;
>   		__atomic_thread_fence(__ATOMIC_RELEASE);
> -		tbl8_free(lpm, tbl8_group_start);
> +		status = tbl8_free(lpm, tbl8_group_start);
>   	} else if (tbl8_recycle_index > -1) {
>   		/* Update tbl24 entry. */
>   		struct rte_lpm_tbl_entry new_tbl24_entry = {
> @@ -1113,10 +1120,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
>   		__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
>   				__ATOMIC_RELAXED);
>   		__atomic_thread_fence(__ATOMIC_RELEASE);
> -		tbl8_free(lpm, tbl8_group_start);
> +		status = tbl8_free(lpm, tbl8_group_start);
>   	}
>   #undef group_idx
> -	return 0;
> +	return status;

This will change rte_lpm_delete API. As a suggestion, you can leave it 
as it was before ("return 0"), and send separate patch (with "return 
status)" which will be targeted to 20.11.

>   }
>   
>   /*
>
  
Ruifeng Wang July 18, 2020, 9:22 a.m. UTC | #2
> -----Original Message-----
> From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> Sent: Saturday, July 18, 2020 1:12 AM
> To: Ruifeng Wang <Ruifeng.Wang@arm.com>; Bruce Richardson
> <bruce.richardson@intel.com>
> Cc: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli
> <Honnappa.Nagarahalli@arm.com>; Phil Yang <Phil.Yang@arm.com>
> Subject: Re: [PATCH v2] lpm: fix unchecked return value
> 
> Hi Ruifeng,
> 
Hi Vladimir,

> On 16/07/2020 16:49, Ruifeng Wang wrote:
> > Coverity complains about unchecked return value of
> rte_rcu_qsbr_dq_enqueue.
> > By default, defer queue size is big enough to hold all tbl8 groups.
> > When enqueue fails, return error to the user to indicate system issue.
> >
> > Coverity issue: 360832
> > Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")
> >
> > Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
> > ---
> > v2:
> > Converted return value to conform to LPM API convention. (Vladimir)
> >
> >   lib/librte_lpm/rte_lpm.c | 19 +++++++++++++------
> >   1 file changed, 13 insertions(+), 6 deletions(-)
> >
> > diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index
> > 2db9e16a2..757436f49 100644
> > --- a/lib/librte_lpm/rte_lpm.c
> > +++ b/lib/librte_lpm/rte_lpm.c
> > @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm)
> >   	return group_idx;
> >   }
> >
> > -static void
> > +static int32_t
> >   tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
> >   {
> >   	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
> >   	struct __rte_lpm *internal_lpm;
> > +	int status;
> >
> >   	internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
> >   	if (internal_lpm->v == NULL) {
> > @@ -552,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t
> tbl8_group_start)
> >   				__ATOMIC_RELAXED);
> >   	} else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
> >   		/* Push into QSBR defer queue. */
> > -		rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> > +		status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> >   				(void *)&tbl8_group_start);
> > +		if (status == 1) {
> > +			RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
> > +			return -rte_errno;
> > +		}
> >   	}
> > +
> > +	return 0;
> >   }
> >
> >   static __rte_noinline int32_t
> > @@ -1040,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t
> ip_masked,
> >   #define group_idx next_hop
> >   	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start,
> tbl8_index,
> >   			tbl8_range, i;
> > -	int32_t tbl8_recycle_index;
> > +	int32_t tbl8_recycle_index, status = 0;
> >
> >   	/*
> >   	 * Calculate the index into tbl24 and range. Note: All depths
> > larger @@ -1097,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm,
> uint32_t ip_masked,
> >   		 */
> >   		lpm->tbl24[tbl24_index].valid = 0;
> >   		__atomic_thread_fence(__ATOMIC_RELEASE);
> > -		tbl8_free(lpm, tbl8_group_start);
> > +		status = tbl8_free(lpm, tbl8_group_start);
> >   	} else if (tbl8_recycle_index > -1) {
> >   		/* Update tbl24 entry. */
> >   		struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1113,10
> +1120,10
> > @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
> >   		__atomic_store(&lpm->tbl24[tbl24_index],
> &new_tbl24_entry,
> >   				__ATOMIC_RELAXED);
> >   		__atomic_thread_fence(__ATOMIC_RELEASE);
> > -		tbl8_free(lpm, tbl8_group_start);
> > +		status = tbl8_free(lpm, tbl8_group_start);
> >   	}
> >   #undef group_idx
> > -	return 0;
> > +	return status;
> 
> This will change rte_lpm_delete API. As a suggestion, you can leave it as it
> was before ("return 0"), and send separate patch (with "return status)"
> which will be targeted to 20.11.
> 

Is the change of API  because a variable is returned instead of constant?
The patch passed ABI check on Travis: http://mails.dpdk.org/archives/test-report/2020-July/144864.html
So I didn't know there is API/ABI issue.

Thanks.
/Ruifeng
> >   }
> >
> >   /*
> >
> 
> --
> Regards,
> Vladimir
  
Vladimir Medvedkin July 21, 2020, 4:23 p.m. UTC | #3
Hi Ruifeng,

On 18/07/2020 10:22, Ruifeng Wang wrote:
> 
>> -----Original Message-----
>> From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
>> Sent: Saturday, July 18, 2020 1:12 AM
>> To: Ruifeng Wang <Ruifeng.Wang@arm.com>; Bruce Richardson
>> <bruce.richardson@intel.com>
>> Cc: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli
>> <Honnappa.Nagarahalli@arm.com>; Phil Yang <Phil.Yang@arm.com>
>> Subject: Re: [PATCH v2] lpm: fix unchecked return value
>>
>> Hi Ruifeng,
>>
> Hi Vladimir,
> 
>> On 16/07/2020 16:49, Ruifeng Wang wrote:
>>> Coverity complains about unchecked return value of
>> rte_rcu_qsbr_dq_enqueue.
>>> By default, defer queue size is big enough to hold all tbl8 groups.
>>> When enqueue fails, return error to the user to indicate system issue.
>>>
>>> Coverity issue: 360832
>>> Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")
>>>
>>> Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
>>> ---
>>> v2:
>>> Converted return value to conform to LPM API convention. (Vladimir)
>>>
>>>    lib/librte_lpm/rte_lpm.c | 19 +++++++++++++------
>>>    1 file changed, 13 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index
>>> 2db9e16a2..757436f49 100644
>>> --- a/lib/librte_lpm/rte_lpm.c
>>> +++ b/lib/librte_lpm/rte_lpm.c
>>> @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm)
>>>    	return group_idx;
>>>    }
>>>
>>> -static void
>>> +static int32_t
>>>    tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
>>>    {
>>>    	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
>>>    	struct __rte_lpm *internal_lpm;
>>> +	int status;
>>>
>>>    	internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
>>>    	if (internal_lpm->v == NULL) {
>>> @@ -552,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t
>> tbl8_group_start)
>>>    				__ATOMIC_RELAXED);
>>>    	} else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
>>>    		/* Push into QSBR defer queue. */
>>> -		rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
>>> +		status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
>>>    				(void *)&tbl8_group_start);
>>> +		if (status == 1) {
>>> +			RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
>>> +			return -rte_errno;
>>> +		}
>>>    	}
>>> +
>>> +	return 0;
>>>    }
>>>
>>>    static __rte_noinline int32_t
>>> @@ -1040,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t
>> ip_masked,
>>>    #define group_idx next_hop
>>>    	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start,
>> tbl8_index,
>>>    			tbl8_range, i;
>>> -	int32_t tbl8_recycle_index;
>>> +	int32_t tbl8_recycle_index, status = 0;
>>>
>>>    	/*
>>>    	 * Calculate the index into tbl24 and range. Note: All depths
>>> larger @@ -1097,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm,
>> uint32_t ip_masked,
>>>    		 */
>>>    		lpm->tbl24[tbl24_index].valid = 0;
>>>    		__atomic_thread_fence(__ATOMIC_RELEASE);
>>> -		tbl8_free(lpm, tbl8_group_start);
>>> +		status = tbl8_free(lpm, tbl8_group_start);
>>>    	} else if (tbl8_recycle_index > -1) {
>>>    		/* Update tbl24 entry. */
>>>    		struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1113,10
>> +1120,10
>>> @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
>>>    		__atomic_store(&lpm->tbl24[tbl24_index],
>> &new_tbl24_entry,
>>>    				__ATOMIC_RELAXED);
>>>    		__atomic_thread_fence(__ATOMIC_RELEASE);
>>> -		tbl8_free(lpm, tbl8_group_start);
>>> +		status = tbl8_free(lpm, tbl8_group_start);
>>>    	}
>>>    #undef group_idx
>>> -	return 0;
>>> +	return status;
>>
>> This will change rte_lpm_delete API. As a suggestion, you can leave it as it
>> was before ("return 0"), and send separate patch (with "return status)"
>> which will be targeted to 20.11.
>>
> 
> Is the change of API  because a variable is returned instead of constant?
> The patch passed ABI check on Travis: http://mails.dpdk.org/archives/test-report/2020-July/144864.html
> So I didn't know there is API/ABI issue.


Because new error status codes are returned. At the moment 
rte_lpm_delete() returns only -EINVAL. After patches it will also 
returns -ENOSPC. The user's code may not handle this returned error status.

On the other hand, from documentation about returned value:
"0 on success, negative value otherwise",
and given the fact that this behavior is only after calling 
rte_lpm_rcu_qsbr_add(), I think we can accept this patch.
Bruce, please correct me.

> 
> Thanks.
> /Ruifeng
>>>    }
>>>
>>>    /*
>>>
>>
>> --
>> Regards,
>> Vladimir

Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
  
Bruce Richardson July 21, 2020, 5:10 p.m. UTC | #4
On Tue, Jul 21, 2020 at 05:23:02PM +0100, Medvedkin, Vladimir wrote:
> Hi Ruifeng,
> 
> On 18/07/2020 10:22, Ruifeng Wang wrote:
> > 
> > > -----Original Message-----
> > > From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> > > Sent: Saturday, July 18, 2020 1:12 AM
> > > To: Ruifeng Wang <Ruifeng.Wang@arm.com>; Bruce Richardson
> > > <bruce.richardson@intel.com>
> > > Cc: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli
> > > <Honnappa.Nagarahalli@arm.com>; Phil Yang <Phil.Yang@arm.com>
> > > Subject: Re: [PATCH v2] lpm: fix unchecked return value
> > > 
> > > Hi Ruifeng,
> > > 
> > Hi Vladimir,
> > 
> > > On 16/07/2020 16:49, Ruifeng Wang wrote:
> > > > Coverity complains about unchecked return value of
> > > rte_rcu_qsbr_dq_enqueue.
> > > > By default, defer queue size is big enough to hold all tbl8 groups.
> > > > When enqueue fails, return error to the user to indicate system issue.
> > > > 
> > > > Coverity issue: 360832
> > > > Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")
> > > > 
> > > > Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
> > > > ---
> > > > v2:
> > > > Converted return value to conform to LPM API convention. (Vladimir)
> > > > 
> > > >    lib/librte_lpm/rte_lpm.c | 19 +++++++++++++------
> > > >    1 file changed, 13 insertions(+), 6 deletions(-)
> > > > 
> > > > diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index
> > > > 2db9e16a2..757436f49 100644
> > > > --- a/lib/librte_lpm/rte_lpm.c
> > > > +++ b/lib/librte_lpm/rte_lpm.c
> > > > @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm)
> > > >    	return group_idx;
> > > >    }
> > > > 
> > > > -static void
> > > > +static int32_t
> > > >    tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
> > > >    {
> > > >    	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
> > > >    	struct __rte_lpm *internal_lpm;
> > > > +	int status;
> > > > 
> > > >    	internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
> > > >    	if (internal_lpm->v == NULL) {
> > > > @@ -552,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t
> > > tbl8_group_start)
> > > >    				__ATOMIC_RELAXED);
> > > >    	} else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
> > > >    		/* Push into QSBR defer queue. */
> > > > -		rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> > > > +		status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> > > >    				(void *)&tbl8_group_start);
> > > > +		if (status == 1) {
> > > > +			RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
> > > > +			return -rte_errno;
> > > > +		}
> > > >    	}
> > > > +
> > > > +	return 0;
> > > >    }
> > > > 
> > > >    static __rte_noinline int32_t
> > > > @@ -1040,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t
> > > ip_masked,
> > > >    #define group_idx next_hop
> > > >    	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start,
> > > tbl8_index,
> > > >    			tbl8_range, i;
> > > > -	int32_t tbl8_recycle_index;
> > > > +	int32_t tbl8_recycle_index, status = 0;
> > > > 
> > > >    	/*
> > > >    	 * Calculate the index into tbl24 and range. Note: All depths
> > > > larger @@ -1097,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm,
> > > uint32_t ip_masked,
> > > >    		 */
> > > >    		lpm->tbl24[tbl24_index].valid = 0;
> > > >    		__atomic_thread_fence(__ATOMIC_RELEASE);
> > > > -		tbl8_free(lpm, tbl8_group_start);
> > > > +		status = tbl8_free(lpm, tbl8_group_start);
> > > >    	} else if (tbl8_recycle_index > -1) {
> > > >    		/* Update tbl24 entry. */
> > > >    		struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1113,10
> > > +1120,10
> > > > @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
> > > >    		__atomic_store(&lpm->tbl24[tbl24_index],
> > > &new_tbl24_entry,
> > > >    				__ATOMIC_RELAXED);
> > > >    		__atomic_thread_fence(__ATOMIC_RELEASE);
> > > > -		tbl8_free(lpm, tbl8_group_start);
> > > > +		status = tbl8_free(lpm, tbl8_group_start);
> > > >    	}
> > > >    #undef group_idx
> > > > -	return 0;
> > > > +	return status;
> > > 
> > > This will change rte_lpm_delete API. As a suggestion, you can leave it as it
> > > was before ("return 0"), and send separate patch (with "return status)"
> > > which will be targeted to 20.11.
> > > 
> > 
> > Is the change of API  because a variable is returned instead of constant?
> > The patch passed ABI check on Travis: http://mails.dpdk.org/archives/test-report/2020-July/144864.html
> > So I didn't know there is API/ABI issue.
> 
> 
> Because new error status codes are returned. At the moment rte_lpm_delete()
> returns only -EINVAL. After patches it will also returns -ENOSPC. The user's
> code may not handle this returned error status.
> 
> On the other hand, from documentation about returned value:
> "0 on success, negative value otherwise",
> and given the fact that this behavior is only after calling
> rte_lpm_rcu_qsbr_add(), I think we can accept this patch.
> Bruce, please correct me.
> 
That sounds reasonable to me. No change in the committed ABI, since the
specific values are not called out.
  
David Marchand July 21, 2020, 5:33 p.m. UTC | #5
On Tue, Jul 21, 2020 at 7:16 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Tue, Jul 21, 2020 at 05:23:02PM +0100, Medvedkin, Vladimir wrote:
> > Hi Ruifeng,
> >
> > On 18/07/2020 10:22, Ruifeng Wang wrote:
> > >
> > > > -----Original Message-----
> > > > From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> > > > Sent: Saturday, July 18, 2020 1:12 AM
> > > > To: Ruifeng Wang <Ruifeng.Wang@arm.com>; Bruce Richardson
> > > > <bruce.richardson@intel.com>
> > > > Cc: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli
> > > > <Honnappa.Nagarahalli@arm.com>; Phil Yang <Phil.Yang@arm.com>
> > > > Subject: Re: [PATCH v2] lpm: fix unchecked return value
> > > >
> > > > Hi Ruifeng,
> > > >
> > > Hi Vladimir,
> > >
> > > > On 16/07/2020 16:49, Ruifeng Wang wrote:
> > > > > Coverity complains about unchecked return value of
> > > > rte_rcu_qsbr_dq_enqueue.
> > > > > By default, defer queue size is big enough to hold all tbl8 groups.
> > > > > When enqueue fails, return error to the user to indicate system issue.
> > > > >
> > > > > Coverity issue: 360832
> > > > > Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")
> > > > >
> > > > > Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
> > > > > ---
> > > > > v2:
> > > > > Converted return value to conform to LPM API convention. (Vladimir)
> > > > >
> > > > >    lib/librte_lpm/rte_lpm.c | 19 +++++++++++++------
> > > > >    1 file changed, 13 insertions(+), 6 deletions(-)
> > > > >
> > > > > diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index
> > > > > 2db9e16a2..757436f49 100644
> > > > > --- a/lib/librte_lpm/rte_lpm.c
> > > > > +++ b/lib/librte_lpm/rte_lpm.c
> > > > > @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm)
> > > > >         return group_idx;
> > > > >    }
> > > > >
> > > > > -static void
> > > > > +static int32_t
> > > > >    tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
> > > > >    {
> > > > >         struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
> > > > >         struct __rte_lpm *internal_lpm;
> > > > > +       int status;
> > > > >
> > > > >         internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
> > > > >         if (internal_lpm->v == NULL) {
> > > > > @@ -552,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t
> > > > tbl8_group_start)
> > > > >                                 __ATOMIC_RELAXED);
> > > > >         } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
> > > > >                 /* Push into QSBR defer queue. */
> > > > > -               rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> > > > > +               status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> > > > >                                 (void *)&tbl8_group_start);
> > > > > +               if (status == 1) {
> > > > > +                       RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
> > > > > +                       return -rte_errno;
> > > > > +               }
> > > > >         }
> > > > > +
> > > > > +       return 0;
> > > > >    }
> > > > >
> > > > >    static __rte_noinline int32_t
> > > > > @@ -1040,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t
> > > > ip_masked,
> > > > >    #define group_idx next_hop
> > > > >         uint32_t tbl24_index, tbl8_group_index, tbl8_group_start,
> > > > tbl8_index,
> > > > >                         tbl8_range, i;
> > > > > -       int32_t tbl8_recycle_index;
> > > > > +       int32_t tbl8_recycle_index, status = 0;
> > > > >
> > > > >         /*
> > > > >          * Calculate the index into tbl24 and range. Note: All depths
> > > > > larger @@ -1097,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm,
> > > > uint32_t ip_masked,
> > > > >                  */
> > > > >                 lpm->tbl24[tbl24_index].valid = 0;
> > > > >                 __atomic_thread_fence(__ATOMIC_RELEASE);
> > > > > -               tbl8_free(lpm, tbl8_group_start);
> > > > > +               status = tbl8_free(lpm, tbl8_group_start);
> > > > >         } else if (tbl8_recycle_index > -1) {
> > > > >                 /* Update tbl24 entry. */
> > > > >                 struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1113,10
> > > > +1120,10
> > > > > @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
> > > > >                 __atomic_store(&lpm->tbl24[tbl24_index],
> > > > &new_tbl24_entry,
> > > > >                                 __ATOMIC_RELAXED);
> > > > >                 __atomic_thread_fence(__ATOMIC_RELEASE);
> > > > > -               tbl8_free(lpm, tbl8_group_start);
> > > > > +               status = tbl8_free(lpm, tbl8_group_start);
> > > > >         }
> > > > >    #undef group_idx
> > > > > -       return 0;
> > > > > +       return status;
> > > >
> > > > This will change rte_lpm_delete API. As a suggestion, you can leave it as it
> > > > was before ("return 0"), and send separate patch (with "return status)"
> > > > which will be targeted to 20.11.
> > > >
> > >
> > > Is the change of API  because a variable is returned instead of constant?
> > > The patch passed ABI check on Travis: http://mails.dpdk.org/archives/test-report/2020-July/144864.html
> > > So I didn't know there is API/ABI issue.
> >
> >
> > Because new error status codes are returned. At the moment rte_lpm_delete()
> > returns only -EINVAL. After patches it will also returns -ENOSPC. The user's
> > code may not handle this returned error status.
> >
> > On the other hand, from documentation about returned value:
> > "0 on success, negative value otherwise",
> > and given the fact that this behavior is only after calling
> > rte_lpm_rcu_qsbr_add(), I think we can accept this patch.
> > Bruce, please correct me.
> >
> That sounds reasonable to me. No change in the committed ABI, since the
> specific values are not called out.
>

I will take this as a second ack and merge this fix for rc2.
Thanks.
  
David Marchand July 21, 2020, 6:49 p.m. UTC | #6
On Thu, Jul 16, 2020 at 5:49 PM Ruifeng Wang <ruifeng.wang@arm.com> wrote:
>
> Coverity complains about unchecked return value of rte_rcu_qsbr_dq_enqueue.
> By default, defer queue size is big enough to hold all tbl8 groups. When
> enqueue fails, return error to the user to indicate system issue.
>
> Coverity issue: 360832
> Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")
>
> Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>

Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>

Applied, thanks.
  

Patch

diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index 2db9e16a2..757436f49 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -532,11 +532,12 @@  tbl8_alloc(struct rte_lpm *lpm)
 	return group_idx;
 }
 
-static void
+static int32_t
 tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
 {
 	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
 	struct __rte_lpm *internal_lpm;
+	int status;
 
 	internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
 	if (internal_lpm->v == NULL) {
@@ -552,9 +553,15 @@  tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
 				__ATOMIC_RELAXED);
 	} else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
 		/* Push into QSBR defer queue. */
-		rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
+		status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
 				(void *)&tbl8_group_start);
+		if (status == 1) {
+			RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
+			return -rte_errno;
+		}
 	}
+
+	return 0;
 }
 
 static __rte_noinline int32_t
@@ -1040,7 +1047,7 @@  delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
 #define group_idx next_hop
 	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
 			tbl8_range, i;
-	int32_t tbl8_recycle_index;
+	int32_t tbl8_recycle_index, status = 0;
 
 	/*
 	 * Calculate the index into tbl24 and range. Note: All depths larger
@@ -1097,7 +1104,7 @@  delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
 		 */
 		lpm->tbl24[tbl24_index].valid = 0;
 		__atomic_thread_fence(__ATOMIC_RELEASE);
-		tbl8_free(lpm, tbl8_group_start);
+		status = tbl8_free(lpm, tbl8_group_start);
 	} else if (tbl8_recycle_index > -1) {
 		/* Update tbl24 entry. */
 		struct rte_lpm_tbl_entry new_tbl24_entry = {
@@ -1113,10 +1120,10 @@  delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
 		__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
 				__ATOMIC_RELAXED);
 		__atomic_thread_fence(__ATOMIC_RELEASE);
-		tbl8_free(lpm, tbl8_group_start);
+		status = tbl8_free(lpm, tbl8_group_start);
 	}
 #undef group_idx
-	return 0;
+	return status;
 }
 
 /*