[v2,1/8] net/bonding: fix buf corruption in packets

Message ID 1535718368-15803-2-git-send-email-amo@semihalf.com (mailing list archive)
State Not Applicable, archived
Delegated to: Ferruh Yigit
Headers
Series Add Marvell NETA PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Andrzej Ostruszka Aug. 31, 2018, 12:25 p.m. UTC
  From: Jia Yu <jyu@vmware.com>

When bond slave devices cannot transmit all packets in bufs array,
tx_burst callback shall merge the un-transmitted packets back to
bufs array. Recent merge logic introduced a bug which causes
invalid mbuf addresses being written to bufs array.
When caller frees the un-transmitted packets, due to invalid addresses,
application will crash.

The fix is avoid shifting mbufs, and directly write un-transmitted
packets back to bufs array.

Fixes: 09150784a776 ("net/bonding: burst mode hash calculation")
Cc: stable@dpdk.org

Signed-off-by: Jia Yu <jyu@vmware.com>
Acked-by: Chas Williams <chas3@att.com>
---
 drivers/net/bonding/rte_eth_bond_pmd.c | 116 +++++++--------------------------
 1 file changed, 23 insertions(+), 93 deletions(-)
  

Comments

Andrzej Ostruszka Aug. 31, 2018, 12:33 p.m. UTC | #1
Please forgive me this particular patch - that was my mistake when
sending.  That was leftover from wrong format-patch that was picked by
shell wildcard.

Apologies for the noise.

Best regards
Andrzej
  

Patch

diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 4417422..b84f322 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -301,10 +301,10 @@  bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
 	/* Mapping array generated by hash function to map mbufs to slaves */
 	uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
 
-	uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+	uint16_t slave_tx_count;
 	uint16_t total_tx_count = 0, total_tx_fail_count = 0;
 
-	uint16_t i, j;
+	uint16_t i;
 
 	if (unlikely(nb_bufs == 0))
 		return 0;
@@ -359,34 +359,12 @@  bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
 
 		/* If tx burst fails move packets to end of bufs */
 		if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
-			slave_tx_fail_count[i] = slave_nb_bufs[i] -
+			int slave_tx_fail_count = slave_nb_bufs[i] -
 					slave_tx_count;
-			total_tx_fail_count += slave_tx_fail_count[i];
-
-			/*
-			 * Shift bufs to beginning of array to allow reordering
-			 * later
-			 */
-			for (j = 0; j < slave_tx_fail_count[i]; j++) {
-				slave_bufs[i][j] =
-					slave_bufs[i][(slave_tx_count - 1) + j];
-			}
-		}
-	}
-
-	/*
-	 * If there are tx burst failures we move packets to end of bufs to
-	 * preserve expected PMD behaviour of all failed transmitted being
-	 * at the end of the input mbuf array
-	 */
-	if (unlikely(total_tx_fail_count > 0)) {
-		int bufs_idx = nb_bufs - total_tx_fail_count - 1;
-
-		for (i = 0; i < slave_count; i++) {
-			if (slave_tx_fail_count[i] > 0) {
-				for (j = 0; j < slave_tx_fail_count[i]; j++)
-					bufs[bufs_idx++] = slave_bufs[i][j];
-			}
+			total_tx_fail_count += slave_tx_fail_count;
+			memcpy(&bufs[nb_bufs - total_tx_fail_count],
+			       &slave_bufs[i][slave_tx_count],
+			       slave_tx_fail_count * sizeof(bufs[0]));
 		}
 	}
 
@@ -716,8 +694,8 @@  bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
 				tx_fail_total += tx_fail_slave;
 
 				memcpy(&bufs[nb_pkts - tx_fail_total],
-						&slave_bufs[i][num_tx_slave],
-						tx_fail_slave * sizeof(bufs[0]));
+				       &slave_bufs[i][num_tx_slave],
+				       tx_fail_slave * sizeof(bufs[0]));
 			}
 			num_tx_total += num_tx_slave;
 		}
@@ -1222,10 +1200,10 @@  bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
 	/* Mapping array generated by hash function to map mbufs to slaves */
 	uint16_t bufs_slave_port_idxs[nb_bufs];
 
-	uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+	uint16_t slave_tx_count;
 	uint16_t total_tx_count = 0, total_tx_fail_count = 0;
 
-	uint16_t i, j;
+	uint16_t i;
 
 	if (unlikely(nb_bufs == 0))
 		return 0;
@@ -1266,34 +1244,12 @@  bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
 
 		/* If tx burst fails move packets to end of bufs */
 		if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
-			slave_tx_fail_count[i] = slave_nb_bufs[i] -
+			int slave_tx_fail_count = slave_nb_bufs[i] -
 					slave_tx_count;
-			total_tx_fail_count += slave_tx_fail_count[i];
-
-			/*
-			 * Shift bufs to beginning of array to allow reordering
-			 * later
-			 */
-			for (j = 0; j < slave_tx_fail_count[i]; j++) {
-				slave_bufs[i][j] =
-					slave_bufs[i][(slave_tx_count - 1) + j];
-			}
-		}
-	}
-
-	/*
-	 * If there are tx burst failures we move packets to end of bufs to
-	 * preserve expected PMD behaviour of all failed transmitted being
-	 * at the end of the input mbuf array
-	 */
-	if (unlikely(total_tx_fail_count > 0)) {
-		int bufs_idx = nb_bufs - total_tx_fail_count - 1;
-
-		for (i = 0; i < slave_count; i++) {
-			if (slave_tx_fail_count[i] > 0) {
-				for (j = 0; j < slave_tx_fail_count[i]; j++)
-					bufs[bufs_idx++] = slave_bufs[i][j];
-			}
+			total_tx_fail_count += slave_tx_fail_count;
+			memcpy(&bufs[nb_bufs - total_tx_fail_count],
+			       &slave_bufs[i][slave_tx_count],
+			       slave_tx_fail_count * sizeof(bufs[0]));
 		}
 	}
 
@@ -1320,10 +1276,10 @@  bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 	/* Mapping array generated by hash function to map mbufs to slaves */
 	uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
 
-	uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+	uint16_t slave_tx_count;
 	uint16_t total_tx_count = 0, total_tx_fail_count = 0;
 
-	uint16_t i, j;
+	uint16_t i;
 
 	if (unlikely(nb_bufs == 0))
 		return 0;
@@ -1381,39 +1337,13 @@  bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 
 			/* If tx burst fails move packets to end of bufs */
 			if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
-				slave_tx_fail_count[i] = slave_nb_bufs[i] -
+				int slave_tx_fail_count = slave_nb_bufs[i] -
 						slave_tx_count;
-				total_tx_fail_count += slave_tx_fail_count[i];
-
-				/*
-				 * Shift bufs to beginning of array to allow
-				 * reordering later
-				 */
-				for (j = 0; j < slave_tx_fail_count[i]; j++)
-					slave_bufs[i][j] =
-						slave_bufs[i]
-							[(slave_tx_count - 1)
-							+ j];
-			}
-		}
+				total_tx_fail_count += slave_tx_fail_count;
 
-		/*
-		 * If there are tx burst failures we move packets to end of
-		 * bufs to preserve expected PMD behaviour of all failed
-		 * transmitted being at the end of the input mbuf array
-		 */
-		if (unlikely(total_tx_fail_count > 0)) {
-			int bufs_idx = nb_bufs - total_tx_fail_count - 1;
-
-			for (i = 0; i < slave_count; i++) {
-				if (slave_tx_fail_count[i] > 0) {
-					for (j = 0;
-						j < slave_tx_fail_count[i];
-						j++) {
-						bufs[bufs_idx++] =
-							slave_bufs[i][j];
-					}
-				}
+				memcpy(&bufs[nb_bufs - total_tx_fail_count],
+				       &slave_bufs[i][slave_tx_count],
+				       slave_tx_fail_count * sizeof(bufs[0]));
 			}
 		}
 	}