[15/15] net/dpaa: fix buffer free in slow path

Message ID 20220928052516.1279442-16-g.singh@nxp.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series DPAA and DPAA2 driver changes |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/github-robot: build success github build: passed
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/intel-Testing success Testing PASS

Commit Message

Gagandeep Singh Sept. 28, 2022, 5:25 a.m. UTC
  Adding a check in slow path to free those buffers
which are not external.

Fixes: 9124e65dd3eb ("net/dpaa: enable Tx queue taildrop")
Cc: stable@dpdk.org

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/net/dpaa/dpaa_rxtx.c | 23 ++++++++---------------
 1 file changed, 8 insertions(+), 15 deletions(-)
  

Comments

Ferruh Yigit Oct. 5, 2022, 2:21 p.m. UTC | #1
On 9/28/2022 6:25 AM, Gagandeep Singh wrote:
> Adding a check in slow path to free those buffers
> which are not external.
> 

Can you please explain what was the error before fix, what was happening 
when you try to free all mbufs?

Also it seems previous logic was different, with 'prev_seg' etc, can you 
explain what/why changed there?

> Fixes: 9124e65dd3eb ("net/dpaa: enable Tx queue taildrop")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> ---
>   drivers/net/dpaa/dpaa_rxtx.c | 23 ++++++++---------------
>   1 file changed, 8 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
> index 4d285b4f38..ce4f3d6c85 100644
> --- a/drivers/net/dpaa/dpaa_rxtx.c
> +++ b/drivers/net/dpaa/dpaa_rxtx.c
> @@ -455,7 +455,7 @@ dpaa_free_mbuf(const struct qm_fd *fd)
>   	bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
>   	format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
>   	if (unlikely(format == qm_fd_sg)) {
> -		struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
> +		struct rte_mbuf *first_seg, *cur_seg;
>   		struct qm_sg_entry *sgt, *sg_temp;
>   		void *vaddr, *sg_vaddr;
>   		int i = 0;
> @@ -469,32 +469,25 @@ dpaa_free_mbuf(const struct qm_fd *fd)
>   		sgt = vaddr + fd_offset;
>   		sg_temp = &sgt[i++];
>   		hw_sg_to_cpu(sg_temp);
> -		temp = (struct rte_mbuf *)
> -			((char *)vaddr - bp_info->meta_data_size);
>   		sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
>   						qm_sg_entry_get64(sg_temp));
> -
>   		first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
>   						bp_info->meta_data_size);
>   		first_seg->nb_segs = 1;
> -		prev_seg = first_seg;
>   		while (i < DPAA_SGT_MAX_ENTRIES) {
>   			sg_temp = &sgt[i++];
>   			hw_sg_to_cpu(sg_temp);
> -			sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
> +			if (sg_temp->bpid != 0xFF) {
> +				bp_info = DPAA_BPID_TO_POOL_INFO(sg_temp->bpid);
> +				sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
>   						qm_sg_entry_get64(sg_temp));
> -			cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
> +				cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
>   						      bp_info->meta_data_size);
> -			first_seg->nb_segs += 1;
> -			prev_seg->next = cur_seg;
> -			if (sg_temp->final) {
> -				cur_seg->next = NULL;
> -				break;
> +				rte_pktmbuf_free_seg(cur_seg);
>   			}
> -			prev_seg = cur_seg;
> +			if (sg_temp->final)
> +				break;
>   		}
> -
> -		rte_pktmbuf_free_seg(temp);
>   		rte_pktmbuf_free_seg(first_seg);
>   		return 0;
>   	}
  
Gagandeep Singh Oct. 6, 2022, 8:51 a.m. UTC | #2
Hi,

> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@amd.com>
> Sent: Wednesday, October 5, 2022 7:52 PM
> To: Gagandeep Singh <G.Singh@nxp.com>; dev@dpdk.org
> Cc: stable@dpdk.org
> Subject: Re: [PATCH 15/15] net/dpaa: fix buffer free in slow path
> 
> On 9/28/2022 6:25 AM, Gagandeep Singh wrote:
> > Adding a check in slow path to free those buffers which are not
> > external.
> >
> 
> Can you please explain what was the error before fix, what was happening
> when you try to free all mbufs?
> 
> Also it seems previous logic was different, with 'prev_seg' etc, can you
> explain what/why changed there?
> 
Actually, there were two issues, this function was converting all the segments present in HW frame
descriptor to mbuf SG list by doing while on segments in FD (HW descriptor) and in the end
it frees only one segment by calling the API rte_pktmbuf_free_seg(), so for other segments
memory will be leaked.

Now in this change, doing the loop on each segment in FD and if the segment has a valid
buffer pool id (HW pool id), freeing that segment in the loop itself without converting to a mbuf list.
if we free all the buffers even those with invalid HW bpid (which will only be the external buffer case),
then there can be double free because all the external buffer free handling is being done by the
Xmit function.

> > Fixes: 9124e65dd3eb ("net/dpaa: enable Tx queue taildrop")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> > ---
> >   drivers/net/dpaa/dpaa_rxtx.c | 23 ++++++++---------------
> >   1 file changed, 8 insertions(+), 15 deletions(-)
> >
> > diff --git a/drivers/net/dpaa/dpaa_rxtx.c
> > b/drivers/net/dpaa/dpaa_rxtx.c index 4d285b4f38..ce4f3d6c85 100644
> > --- a/drivers/net/dpaa/dpaa_rxtx.c
> > +++ b/drivers/net/dpaa/dpaa_rxtx.c
> > @@ -455,7 +455,7 @@ dpaa_free_mbuf(const struct qm_fd *fd)
> >   	bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
> >   	format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
> DPAA_FD_FORMAT_SHIFT;
> >   	if (unlikely(format == qm_fd_sg)) {
> > -		struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
> > +		struct rte_mbuf *first_seg, *cur_seg;
> >   		struct qm_sg_entry *sgt, *sg_temp;
> >   		void *vaddr, *sg_vaddr;
> >   		int i = 0;
> > @@ -469,32 +469,25 @@ dpaa_free_mbuf(const struct qm_fd *fd)
> >   		sgt = vaddr + fd_offset;
> >   		sg_temp = &sgt[i++];
> >   		hw_sg_to_cpu(sg_temp);
> > -		temp = (struct rte_mbuf *)
> > -			((char *)vaddr - bp_info->meta_data_size);
> >   		sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
> >
> 	qm_sg_entry_get64(sg_temp));
> > -
> >   		first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
> >   						bp_info->meta_data_size);
> >   		first_seg->nb_segs = 1;
> > -		prev_seg = first_seg;
> >   		while (i < DPAA_SGT_MAX_ENTRIES) {
> >   			sg_temp = &sgt[i++];
> >   			hw_sg_to_cpu(sg_temp);
> > -			sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
> > +			if (sg_temp->bpid != 0xFF) {
> > +				bp_info =
> DPAA_BPID_TO_POOL_INFO(sg_temp->bpid);
> > +				sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
> >
> 	qm_sg_entry_get64(sg_temp));
> > -			cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
> > +				cur_seg = (struct rte_mbuf *)((char
> *)sg_vaddr -
> >   						      bp_info-
> >meta_data_size);
> > -			first_seg->nb_segs += 1;
> > -			prev_seg->next = cur_seg;
> > -			if (sg_temp->final) {
> > -				cur_seg->next = NULL;
> > -				break;
> > +				rte_pktmbuf_free_seg(cur_seg);
> >   			}
> > -			prev_seg = cur_seg;
> > +			if (sg_temp->final)
> > +				break;
> >   		}
> > -
> > -		rte_pktmbuf_free_seg(temp);
> >   		rte_pktmbuf_free_seg(first_seg);
> >   		return 0;
> >   	}
  
Ferruh Yigit Oct. 6, 2022, 9:42 a.m. UTC | #3
On 10/6/2022 9:51 AM, Gagandeep Singh wrote:
> Hi,
> 
>> -----Original Message-----
>> From: Ferruh Yigit <ferruh.yigit@amd.com>
>> Sent: Wednesday, October 5, 2022 7:52 PM
>> To: Gagandeep Singh <G.Singh@nxp.com>; dev@dpdk.org
>> Cc: stable@dpdk.org
>> Subject: Re: [PATCH 15/15] net/dpaa: fix buffer free in slow path
>>
>> On 9/28/2022 6:25 AM, Gagandeep Singh wrote:
>>> Adding a check in slow path to free those buffers which are not
>>> external.
>>>
>>
>> Can you please explain what was the error before fix, what was happening
>> when you try to free all mbufs?
>>
>> Also it seems previous logic was different, with 'prev_seg' etc, can you
>> explain what/why changed there?
>>
> Actually, there were two issues, this function was converting all the segments present in HW frame
> descriptor to mbuf SG list by doing while on segments in FD (HW descriptor) and in the end
> it frees only one segment by calling the API rte_pktmbuf_free_seg(), so for other segments
> memory will be leaked.
> 

ack

> Now in this change, doing the loop on each segment in FD and if the segment has a valid
> buffer pool id (HW pool id), freeing that segment in the loop itself without converting to a mbuf list.
> if we free all the buffers even those with invalid HW bpid (which will only be the external buffer case),
> then there can be double free because all the external buffer free handling is being done by the
> Xmit function.
> 

Got it, can you please give more information in the commit log as above, 
and can you please elaborate impact of possible double free, will it 
crash etc?

>>> Fixes: 9124e65dd3eb ("net/dpaa: enable Tx queue taildrop")
>>> Cc: stable@dpdk.org
>>>
>>> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
>>> ---
>>>    drivers/net/dpaa/dpaa_rxtx.c | 23 ++++++++---------------
>>>    1 file changed, 8 insertions(+), 15 deletions(-)
>>>
>>> diff --git a/drivers/net/dpaa/dpaa_rxtx.c
>>> b/drivers/net/dpaa/dpaa_rxtx.c index 4d285b4f38..ce4f3d6c85 100644
>>> --- a/drivers/net/dpaa/dpaa_rxtx.c
>>> +++ b/drivers/net/dpaa/dpaa_rxtx.c
>>> @@ -455,7 +455,7 @@ dpaa_free_mbuf(const struct qm_fd *fd)
>>>    	bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
>>>    	format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
>> DPAA_FD_FORMAT_SHIFT;
>>>    	if (unlikely(format == qm_fd_sg)) {
>>> -		struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
>>> +		struct rte_mbuf *first_seg, *cur_seg;
>>>    		struct qm_sg_entry *sgt, *sg_temp;
>>>    		void *vaddr, *sg_vaddr;
>>>    		int i = 0;
>>> @@ -469,32 +469,25 @@ dpaa_free_mbuf(const struct qm_fd *fd)
>>>    		sgt = vaddr + fd_offset;
>>>    		sg_temp = &sgt[i++];
>>>    		hw_sg_to_cpu(sg_temp);
>>> -		temp = (struct rte_mbuf *)
>>> -			((char *)vaddr - bp_info->meta_data_size);
>>>    		sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
>>>
>> 	qm_sg_entry_get64(sg_temp));
>>> -
>>>    		first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
>>>    						bp_info->meta_data_size);
>>>    		first_seg->nb_segs = 1;
>>> -		prev_seg = first_seg;
>>>    		while (i < DPAA_SGT_MAX_ENTRIES) {
>>>    			sg_temp = &sgt[i++];
>>>    			hw_sg_to_cpu(sg_temp);
>>> -			sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
>>> +			if (sg_temp->bpid != 0xFF) {
>>> +				bp_info =
>> DPAA_BPID_TO_POOL_INFO(sg_temp->bpid);
>>> +				sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
>>>
>> 	qm_sg_entry_get64(sg_temp));
>>> -			cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
>>> +				cur_seg = (struct rte_mbuf *)((char
>> *)sg_vaddr -
>>>    						      bp_info-
>>> meta_data_size);
>>> -			first_seg->nb_segs += 1;
>>> -			prev_seg->next = cur_seg;
>>> -			if (sg_temp->final) {
>>> -				cur_seg->next = NULL;
>>> -				break;
>>> +				rte_pktmbuf_free_seg(cur_seg);
>>>    			}
>>> -			prev_seg = cur_seg;
>>> +			if (sg_temp->final)
>>> +				break;
>>>    		}
>>> -
>>> -		rte_pktmbuf_free_seg(temp);
>>>    		rte_pktmbuf_free_seg(first_seg);
>>>    		return 0;
>>>    	}
>
  
Gagandeep Singh Oct. 6, 2022, 11:19 a.m. UTC | #4
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@amd.com>
> Sent: Thursday, October 6, 2022 3:12 PM
> To: Gagandeep Singh <G.Singh@nxp.com>; dev@dpdk.org
> Cc: stable@dpdk.org
> Subject: Re: [PATCH 15/15] net/dpaa: fix buffer free in slow path
> 
> On 10/6/2022 9:51 AM, Gagandeep Singh wrote:
> > Hi,
> >
> >> -----Original Message-----
> >> From: Ferruh Yigit <ferruh.yigit@amd.com>
> >> Sent: Wednesday, October 5, 2022 7:52 PM
> >> To: Gagandeep Singh <G.Singh@nxp.com>; dev@dpdk.org
> >> Cc: stable@dpdk.org
> >> Subject: Re: [PATCH 15/15] net/dpaa: fix buffer free in slow path
> >>
> >> On 9/28/2022 6:25 AM, Gagandeep Singh wrote:
> >>> Adding a check in slow path to free those buffers which are not
> >>> external.
> >>>
> >>
> >> Can you please explain what was the error before fix, what was
> >> happening when you try to free all mbufs?
> >>
> >> Also it seems previous logic was different, with 'prev_seg' etc, can
> >> you explain what/why changed there?
> >>
> > Actually, there were two issues, this function was converting all the
> > segments present in HW frame descriptor to mbuf SG list by doing while
> > on segments in FD (HW descriptor) and in the end it frees only one
> > segment by calling the API rte_pktmbuf_free_seg(), so for other segments
> memory will be leaked.
> >
> 
> ack
> 
> > Now in this change, doing the loop on each segment in FD and if the
> > segment has a valid buffer pool id (HW pool id), freeing that segment in the
> loop itself without converting to a mbuf list.
> > if we free all the buffers even those with invalid HW bpid (which will
> > only be the external buffer case), then there can be double free
> > because all the external buffer free handling is being done by the Xmit
> function.
> >
> 
> Got it, can you please give more information in the commit log as above, and
> can you please elaborate impact of possible double free, will it crash etc?
> 
Ok. I will update the commit message.

> >>> Fixes: 9124e65dd3eb ("net/dpaa: enable Tx queue taildrop")
> >>> Cc: stable@dpdk.org
> >>>
> >>> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> >>> ---
> >>>    drivers/net/dpaa/dpaa_rxtx.c | 23 ++++++++---------------
> >>>    1 file changed, 8 insertions(+), 15 deletions(-)
> >>>
> >>> diff --git a/drivers/net/dpaa/dpaa_rxtx.c
> >>> b/drivers/net/dpaa/dpaa_rxtx.c index 4d285b4f38..ce4f3d6c85 100644
> >>> --- a/drivers/net/dpaa/dpaa_rxtx.c
> >>> +++ b/drivers/net/dpaa/dpaa_rxtx.c
> >>> @@ -455,7 +455,7 @@ dpaa_free_mbuf(const struct qm_fd *fd)
> >>>    	bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
> >>>    	format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
> >> DPAA_FD_FORMAT_SHIFT;
> >>>    	if (unlikely(format == qm_fd_sg)) {
> >>> -		struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
> >>> +		struct rte_mbuf *first_seg, *cur_seg;
> >>>    		struct qm_sg_entry *sgt, *sg_temp;
> >>>    		void *vaddr, *sg_vaddr;
> >>>    		int i = 0;
> >>> @@ -469,32 +469,25 @@ dpaa_free_mbuf(const struct qm_fd *fd)
> >>>    		sgt = vaddr + fd_offset;
> >>>    		sg_temp = &sgt[i++];
> >>>    		hw_sg_to_cpu(sg_temp);
> >>> -		temp = (struct rte_mbuf *)
> >>> -			((char *)vaddr - bp_info->meta_data_size);
> >>>    		sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
> >>>
> >> 	qm_sg_entry_get64(sg_temp));
> >>> -
> >>>    		first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
> >>>    						bp_info->meta_data_size);
> >>>    		first_seg->nb_segs = 1;
> >>> -		prev_seg = first_seg;
> >>>    		while (i < DPAA_SGT_MAX_ENTRIES) {
> >>>    			sg_temp = &sgt[i++];
> >>>    			hw_sg_to_cpu(sg_temp);
> >>> -			sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
> >>> +			if (sg_temp->bpid != 0xFF) {
> >>> +				bp_info =
> >> DPAA_BPID_TO_POOL_INFO(sg_temp->bpid);
> >>> +				sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
> >>>
> >> 	qm_sg_entry_get64(sg_temp));
> >>> -			cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
> >>> +				cur_seg = (struct rte_mbuf *)((char
> >> *)sg_vaddr -
> >>>    						      bp_info-
> >>> meta_data_size);
> >>> -			first_seg->nb_segs += 1;
> >>> -			prev_seg->next = cur_seg;
> >>> -			if (sg_temp->final) {
> >>> -				cur_seg->next = NULL;
> >>> -				break;
> >>> +				rte_pktmbuf_free_seg(cur_seg);
> >>>    			}
> >>> -			prev_seg = cur_seg;
> >>> +			if (sg_temp->final)
> >>> +				break;
> >>>    		}
> >>> -
> >>> -		rte_pktmbuf_free_seg(temp);
> >>>    		rte_pktmbuf_free_seg(first_seg);
> >>>    		return 0;
> >>>    	}
> >
  

Patch

diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 4d285b4f38..ce4f3d6c85 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -455,7 +455,7 @@  dpaa_free_mbuf(const struct qm_fd *fd)
 	bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
 	format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
 	if (unlikely(format == qm_fd_sg)) {
-		struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
+		struct rte_mbuf *first_seg, *cur_seg;
 		struct qm_sg_entry *sgt, *sg_temp;
 		void *vaddr, *sg_vaddr;
 		int i = 0;
@@ -469,32 +469,25 @@  dpaa_free_mbuf(const struct qm_fd *fd)
 		sgt = vaddr + fd_offset;
 		sg_temp = &sgt[i++];
 		hw_sg_to_cpu(sg_temp);
-		temp = (struct rte_mbuf *)
-			((char *)vaddr - bp_info->meta_data_size);
 		sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
 						qm_sg_entry_get64(sg_temp));
-
 		first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
 						bp_info->meta_data_size);
 		first_seg->nb_segs = 1;
-		prev_seg = first_seg;
 		while (i < DPAA_SGT_MAX_ENTRIES) {
 			sg_temp = &sgt[i++];
 			hw_sg_to_cpu(sg_temp);
-			sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
+			if (sg_temp->bpid != 0xFF) {
+				bp_info = DPAA_BPID_TO_POOL_INFO(sg_temp->bpid);
+				sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
 						qm_sg_entry_get64(sg_temp));
-			cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
+				cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
 						      bp_info->meta_data_size);
-			first_seg->nb_segs += 1;
-			prev_seg->next = cur_seg;
-			if (sg_temp->final) {
-				cur_seg->next = NULL;
-				break;
+				rte_pktmbuf_free_seg(cur_seg);
 			}
-			prev_seg = cur_seg;
+			if (sg_temp->final)
+				break;
 		}
-
-		rte_pktmbuf_free_seg(temp);
 		rte_pktmbuf_free_seg(first_seg);
 		return 0;
 	}