lpm: fix unchecked return value

Message ID 20200716051903.94195-1-ruifeng.wang@arm.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series lpm: fix unchecked return value |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/travis-robot success Travis build: passed
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-testing success Testing PASS

Commit Message

Ruifeng Wang July 16, 2020, 5:19 a.m. UTC
  Coverity complains about unchecked return value of rte_rcu_qsbr_dq_enqueue.
By default, defer queue size is big enough to hold all tbl8 groups. When
enqueue fails, return error to the user to indicate system issue.

Coverity issue: 360832
Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")

Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
---
 lib/librte_lpm/rte_lpm.c | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)
  

Comments

Vladimir Medvedkin July 16, 2020, 10:59 a.m. UTC | #1
Hi Ruifeng,

On 16/07/2020 06:19, Ruifeng Wang wrote:
> Coverity complains about unchecked return value of rte_rcu_qsbr_dq_enqueue.
> By default, defer queue size is big enough to hold all tbl8 groups. When
> enqueue fails, return error to the user to indicate system issue.
> 
> Coverity issue: 360832
> Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")
> 
> Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
> ---
>   lib/librte_lpm/rte_lpm.c | 16 +++++++++++-----
>   1 file changed, 11 insertions(+), 5 deletions(-)
> 
> diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
> index 2db9e16a2..a6d3a7894 100644
> --- a/lib/librte_lpm/rte_lpm.c
> +++ b/lib/librte_lpm/rte_lpm.c
> @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm)
>   	return group_idx;
>   }
>   
> -static void
> +static int
>   tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
>   {
>   	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
>   	struct __rte_lpm *internal_lpm;
> +	int rc = 0;
>   
>   	internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
>   	if (internal_lpm->v == NULL) {
> @@ -552,9 +553,13 @@ tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
>   				__ATOMIC_RELAXED);
>   	} else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
>   		/* Push into QSBR defer queue. */
> -		rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> +		rc = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
>   				(void *)&tbl8_group_start);

On failure rte_rcu_qsbr_dq_enqueue() returns 1 and sets rte_errno. 
Consequently, rc value is propagated to delete_depth_big() -> 
rte_lpm_delete(), and on failure the latter returns "1" value, which 
conflicts with the LPM API:
"0 on success, negative value otherwise"
I would suggest here to return -rte_errno if rc is equal to 1.


> +		if (rc != 0)
> +			RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
>   	}
> +
> +	return rc;
>   }
>   
>   static __rte_noinline int32_t
> @@ -1041,6 +1046,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
>   	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
>   			tbl8_range, i;
>   	int32_t tbl8_recycle_index;
> +	int rc = 0;
>   
>   	/*
>   	 * Calculate the index into tbl24 and range. Note: All depths larger
> @@ -1097,7 +1103,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
>   		 */
>   		lpm->tbl24[tbl24_index].valid = 0;
>   		__atomic_thread_fence(__ATOMIC_RELEASE);
> -		tbl8_free(lpm, tbl8_group_start);
> +		rc = tbl8_free(lpm, tbl8_group_start);
>   	} else if (tbl8_recycle_index > -1) {
>   		/* Update tbl24 entry. */
>   		struct rte_lpm_tbl_entry new_tbl24_entry = {
> @@ -1113,10 +1119,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
>   		__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
>   				__ATOMIC_RELAXED);
>   		__atomic_thread_fence(__ATOMIC_RELEASE);
> -		tbl8_free(lpm, tbl8_group_start);
> +		rc = tbl8_free(lpm, tbl8_group_start);
>   	}
>   #undef group_idx
> -	return 0;
> +	return (int32_t)rc;
>   }
>   
>   /*
>
  
Ruifeng Wang July 16, 2020, 2:43 p.m. UTC | #2
> -----Original Message-----
> From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> Sent: Thursday, July 16, 2020 7:00 PM
> To: Ruifeng Wang <Ruifeng.Wang@arm.com>; Bruce Richardson
> <bruce.richardson@intel.com>
> Cc: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli
> <Honnappa.Nagarahalli@arm.com>; Phil Yang <Phil.Yang@arm.com>
> Subject: Re: [PATCH] lpm: fix unchecked return value
> 
> Hi Ruifeng,
> 
> On 16/07/2020 06:19, Ruifeng Wang wrote:
> > Coverity complains about unchecked return value of
> rte_rcu_qsbr_dq_enqueue.
> > By default, defer queue size is big enough to hold all tbl8 groups.
> > When enqueue fails, return error to the user to indicate system issue.
> >
> > Coverity issue: 360832
> > Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")
> >
> > Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
> > ---
> >   lib/librte_lpm/rte_lpm.c | 16 +++++++++++-----
> >   1 file changed, 11 insertions(+), 5 deletions(-)
> >
> > diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index
> > 2db9e16a2..a6d3a7894 100644
> > --- a/lib/librte_lpm/rte_lpm.c
> > +++ b/lib/librte_lpm/rte_lpm.c
> > @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm)
> >   	return group_idx;
> >   }
> >
> > -static void
> > +static int
> >   tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
> >   {
> >   	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
> >   	struct __rte_lpm *internal_lpm;
> > +	int rc = 0;
> >
> >   	internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
> >   	if (internal_lpm->v == NULL) {
> > @@ -552,9 +553,13 @@ tbl8_free(struct rte_lpm *lpm, uint32_t
> tbl8_group_start)
> >   				__ATOMIC_RELAXED);
> >   	} else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
> >   		/* Push into QSBR defer queue. */
> > -		rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> > +		rc = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> >   				(void *)&tbl8_group_start);
> 
> On failure rte_rcu_qsbr_dq_enqueue() returns 1 and sets rte_errno.
> Consequently, rc value is propagated to delete_depth_big() ->
> rte_lpm_delete(), and on failure the latter returns "1" value, which conflicts
> with the LPM API:
> "0 on success, negative value otherwise"
> I would suggest here to return -rte_errno if rc is equal to 1.
> 
Yes, the return value is a little different from LPM APIs.
Will change it in next version to keep consistent with other LPM APIs.

Thanks.
/Ruifeng
> 
> > +		if (rc != 0)
> > +			RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
> >   	}
> > +
> > +	return rc;
> >   }
> >
> >   static __rte_noinline int32_t
> > @@ -1041,6 +1046,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t
> ip_masked,
> >   	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start,
> tbl8_index,
> >   			tbl8_range, i;
> >   	int32_t tbl8_recycle_index;
> > +	int rc = 0;
> >
> >   	/*
> >   	 * Calculate the index into tbl24 and range. Note: All depths
> > larger @@ -1097,7 +1103,7 @@ delete_depth_big(struct rte_lpm *lpm,
> uint32_t ip_masked,
> >   		 */
> >   		lpm->tbl24[tbl24_index].valid = 0;
> >   		__atomic_thread_fence(__ATOMIC_RELEASE);
> > -		tbl8_free(lpm, tbl8_group_start);
> > +		rc = tbl8_free(lpm, tbl8_group_start);
> >   	} else if (tbl8_recycle_index > -1) {
> >   		/* Update tbl24 entry. */
> >   		struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1113,10
> +1119,10
> > @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
> >   		__atomic_store(&lpm->tbl24[tbl24_index],
> &new_tbl24_entry,
> >   				__ATOMIC_RELAXED);
> >   		__atomic_thread_fence(__ATOMIC_RELEASE);
> > -		tbl8_free(lpm, tbl8_group_start);
> > +		rc = tbl8_free(lpm, tbl8_group_start);
> >   	}
> >   #undef group_idx
> > -	return 0;
> > +	return (int32_t)rc;
> >   }
> >
> >   /*
> >
> 
> --
> Regards,
> Vladimir
  

Patch

diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index 2db9e16a2..a6d3a7894 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -532,11 +532,12 @@  tbl8_alloc(struct rte_lpm *lpm)
 	return group_idx;
 }
 
-static void
+static int
 tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
 {
 	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
 	struct __rte_lpm *internal_lpm;
+	int rc = 0;
 
 	internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
 	if (internal_lpm->v == NULL) {
@@ -552,9 +553,13 @@  tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
 				__ATOMIC_RELAXED);
 	} else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
 		/* Push into QSBR defer queue. */
-		rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
+		rc = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
 				(void *)&tbl8_group_start);
+		if (rc != 0)
+			RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
 	}
+
+	return rc;
 }
 
 static __rte_noinline int32_t
@@ -1041,6 +1046,7 @@  delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
 	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
 			tbl8_range, i;
 	int32_t tbl8_recycle_index;
+	int rc = 0;
 
 	/*
 	 * Calculate the index into tbl24 and range. Note: All depths larger
@@ -1097,7 +1103,7 @@  delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
 		 */
 		lpm->tbl24[tbl24_index].valid = 0;
 		__atomic_thread_fence(__ATOMIC_RELEASE);
-		tbl8_free(lpm, tbl8_group_start);
+		rc = tbl8_free(lpm, tbl8_group_start);
 	} else if (tbl8_recycle_index > -1) {
 		/* Update tbl24 entry. */
 		struct rte_lpm_tbl_entry new_tbl24_entry = {
@@ -1113,10 +1119,10 @@  delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
 		__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
 				__ATOMIC_RELAXED);
 		__atomic_thread_fence(__ATOMIC_RELEASE);
-		tbl8_free(lpm, tbl8_group_start);
+		rc = tbl8_free(lpm, tbl8_group_start);
 	}
 #undef group_idx
-	return 0;
+	return (int32_t)rc;
 }
 
 /*