diff mbox

[dpdk-dev,3/6] fm10k: Fix data integrity issue with multi-segment frame

Message ID 1432887044-24777-4-git-send-email-jing.d.chen@intel.com (mailing list archive)
State Accepted, archived
Headers show

Commit Message

Chen, Jing D May 29, 2015, 8:10 a.m. UTC
From: "Chen Jing D(Mark)" <jing.d.chen@intel.com>

In TX side, bit FM10K_TXD_FLAG_LAST in TX descriptor only is set
in the last descriptor for multi-segment packets. But current
implementation didn't set all the fields of TX descriptor, which
will cause descriptors processed now to re-use fields set in last
scroll. If FM10K_TXD_FLAG_LAST bit was set in the last round and
it happened this is not the last descriptor of a multi-segnment
packet, HW will send out the incomplete packet out and leads to
data intergrity issue.

Signed-off-by: Chen Jing D(Mark) <jing.d.chen@intel.com>
---
 drivers/net/fm10k/fm10k_rxtx.c |   15 +++++++++++++--
 1 files changed, 13 insertions(+), 2 deletions(-)

Comments

Michael Qiu June 16, 2015, 12:07 p.m. UTC | #1
Tested-by: Michael Qiu <michael.qiu@intel.com>

- OS: Fedora20  3.11.10-301
- GCC: gcc version 4.8.3 2014911
- CPU: Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
- NIC: Ethernet controller: Intel Corporation Device 15a4 (rev 01)
- Default x86_64-native-linuxapp-gcc configuration

- Total 5 cases, 5 passed, 0 failed

- Case: Normal frames with no jumbo frame support
- Case: Jumbo frames with no jumbo frame support
- Case: Normal frames with jumbo frame support
- Case: Jumbo frames with jumbo frame support
- Case: Frames bigger than jumbo frames, wwith jumbo frame support


On 5/29/2015 4:11 PM, Chen, Jing D wrote:
> From: "Chen Jing D(Mark)" <jing.d.chen@intel.com>
>
> In TX side, bit FM10K_TXD_FLAG_LAST in TX descriptor only is set
> in the last descriptor for multi-segment packets. But current
> implementation didn't set all the fields of TX descriptor, which
> will cause descriptors processed now to re-use fields set in last
> scroll. If FM10K_TXD_FLAG_LAST bit was set in the last round and
> it happened this is not the last descriptor of a multi-segnment
> packet, HW will send out the incomplete packet out and leads to
> data intergrity issue.
>
> Signed-off-by: Chen Jing D(Mark) <jing.d.chen@intel.com>
> ---
>  drivers/net/fm10k/fm10k_rxtx.c |   15 +++++++++++++--
>  1 files changed, 13 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c
> index 56df6cd..f5d1ad0 100644
> --- a/drivers/net/fm10k/fm10k_rxtx.c
> +++ b/drivers/net/fm10k/fm10k_rxtx.c
> @@ -402,9 +402,9 @@ static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
>  		q->nb_used = q->nb_used + mb->nb_segs;
>  	}
>  
> -	q->hw_ring[last_id].flags = flags;
>  	q->nb_free -= mb->nb_segs;
>  
> +	q->hw_ring[q->next_free].flags = 0;
>  	/* set checksum flags on first descriptor of packet. SCTP checksum
>  	 * offload is not supported, but we do not explicitly check for this
>  	 * case in favor of greatly simplified processing. */
> @@ -415,16 +415,27 @@ static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
>  	if (mb->ol_flags & PKT_TX_VLAN_PKT)
>  		q->hw_ring[q->next_free].vlan = mb->vlan_tci;
>  
> +	q->sw_ring[q->next_free] = mb;
> +	q->hw_ring[q->next_free].buffer_addr =
> +			rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
> +	q->hw_ring[q->next_free].buflen =
> +			rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
> +	if (++q->next_free == q->nb_desc)
> +		q->next_free = 0;
> +
>  	/* fill up the rings */
> -	for (; mb != NULL; mb = mb->next) {
> +	for (mb = mb->next; mb != NULL; mb = mb->next) {
>  		q->sw_ring[q->next_free] = mb;
>  		q->hw_ring[q->next_free].buffer_addr =
>  				rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
>  		q->hw_ring[q->next_free].buflen =
>  				rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
> +		q->hw_ring[q->next_free].flags = 0;
>  		if (++q->next_free == q->nb_desc)
>  			q->next_free = 0;
>  	}
> +
> +	q->hw_ring[last_id].flags = flags;
>  }
>  
>  uint16_t
diff mbox

Patch

diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c
index 56df6cd..f5d1ad0 100644
--- a/drivers/net/fm10k/fm10k_rxtx.c
+++ b/drivers/net/fm10k/fm10k_rxtx.c
@@ -402,9 +402,9 @@  static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
 		q->nb_used = q->nb_used + mb->nb_segs;
 	}
 
-	q->hw_ring[last_id].flags = flags;
 	q->nb_free -= mb->nb_segs;
 
+	q->hw_ring[q->next_free].flags = 0;
 	/* set checksum flags on first descriptor of packet. SCTP checksum
 	 * offload is not supported, but we do not explicitly check for this
 	 * case in favor of greatly simplified processing. */
@@ -415,16 +415,27 @@  static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
 	if (mb->ol_flags & PKT_TX_VLAN_PKT)
 		q->hw_ring[q->next_free].vlan = mb->vlan_tci;
 
+	q->sw_ring[q->next_free] = mb;
+	q->hw_ring[q->next_free].buffer_addr =
+			rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
+	q->hw_ring[q->next_free].buflen =
+			rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
+	if (++q->next_free == q->nb_desc)
+		q->next_free = 0;
+
 	/* fill up the rings */
-	for (; mb != NULL; mb = mb->next) {
+	for (mb = mb->next; mb != NULL; mb = mb->next) {
 		q->sw_ring[q->next_free] = mb;
 		q->hw_ring[q->next_free].buffer_addr =
 				rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
 		q->hw_ring[q->next_free].buflen =
 				rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
+		q->hw_ring[q->next_free].flags = 0;
 		if (++q->next_free == q->nb_desc)
 			q->next_free = 0;
 	}
+
+	q->hw_ring[last_id].flags = flags;
 }
 
 uint16_t