From patchwork Mon Sep 1 08:31:27 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Doherty, Declan" X-Patchwork-Id: 271 Return-Path: Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id ED4ABB38B for ; Mon, 1 Sep 2014 10:27:16 +0200 (CEST) Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP; 01 Sep 2014 01:31:43 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.04,441,1406617200"; d="scan'208";a="592735595" Received: from irvmail001.ir.intel.com ([163.33.26.43]) by fmsmga002.fm.intel.com with ESMTP; 01 Sep 2014 01:31:38 -0700 Received: from sivswdev02.ir.intel.com (sivswdev02.ir.intel.com [10.237.217.46]) by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id s818VbC4010183; Mon, 1 Sep 2014 09:31:37 +0100 Received: from sivswdev02.ir.intel.com (localhost [127.0.0.1]) by sivswdev02.ir.intel.com with ESMTP id s818Vb6U002122; Mon, 1 Sep 2014 09:31:37 +0100 Received: (from dwdohert@localhost) by sivswdev02.ir.intel.com with id s818VbSI002117; Mon, 1 Sep 2014 09:31:37 +0100 From: Declan Doherty To: dev@dpdk.org Date: Mon, 1 Sep 2014 09:31:27 +0100 Message-Id: <1409560289-29558-5-git-send-email-declan.doherty@intel.com> X-Mailer: git-send-email 1.7.0.7 In-Reply-To: <1408456313-28812-1-git-send-email-declan.doherty@intel.com> References: <1408456313-28812-1-git-send-email-declan.doherty@intel.com> Subject: [dpdk-dev] [PATCH v2 4/6] bond: free mbufs if transmission fails in bonding tx_burst functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 01 Sep 2014 08:27:19 -0000 Fixing a number of corner cases that if transmission failed on slave devices then this could lead to leaked mbufs V2 addresses behaviouraly issues in the first version and packets are no longer freed in the bonding layer, except in the case of broadcast mode where in failures happen on more than a single slave then mbufs will be freed in all slaves except the one where the least errors occured. Also contains new unit tests to test the transmission failure case in slaves for round-robin, balance, and broadcast modes. Signed-off-by: Declan Doherty --- app/test/test_link_bonding.c | 393 +++++++++++++++++++++++++++++++- app/test/virtual_pmd.c | 80 +++++-- app/test/virtual_pmd.h | 7 + lib/librte_pmd_bond/rte_eth_bond_pmd.c | 83 ++++++-- 4 files changed, 525 insertions(+), 38 deletions(-) diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c index cce32ed..c5aaa80 100644 --- a/app/test/test_link_bonding.c +++ b/app/test/test_link_bonding.c @@ -663,6 +663,9 @@ enable_bonded_slaves(void) int i; for (i = 0; i < test_params->bonded_slave_count; i++) { + virtual_ethdev_tx_burst_fn_set_success(test_params->slave_port_ids[i], + 1); + virtual_ethdev_simulate_link_status_interrupt( test_params->slave_port_ids[i], 1); } @@ -1413,6 +1416,135 @@ test_roundrobin_tx_burst(void) } static int +verify_mbufs_ref_count(struct rte_mbuf **mbufs, int nb_mbufs, int val) +{ + int i, refcnt; + + for (i = 0; i < nb_mbufs; i++) { + refcnt = rte_mbuf_refcnt_read(mbufs[i]); + TEST_ASSERT_EQUAL(refcnt, val, + "mbuf ref count (%d)is not the expected value (%d)", + refcnt, val); + } + return 0; +} + + +static void +free_mbufs(struct rte_mbuf **mbufs, int nb_mbufs) +{ + int i; + + for (i = 0; i < nb_mbufs; i++) + rte_pktmbuf_free(mbufs[i]); +} + +#define TEST_RR_SLAVE_TX_FAIL_SLAVE_COUNT (2) +#define TEST_RR_SLAVE_TX_FAIL_BURST_SIZE (64) +#define TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT (22) +#define TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX (1) + +static int +test_roundrobin_tx_burst_slave_tx_fail(void) +{ + struct rte_mbuf *pkt_burst[MAX_PKT_BURST]; + struct rte_mbuf *expected_tx_fail_pkts[MAX_PKT_BURST]; + + struct rte_eth_stats port_stats; + + int i, first_fail_idx, tx_count; + + TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + BONDING_MODE_ROUND_ROBIN, 0, + TEST_RR_SLAVE_TX_FAIL_SLAVE_COUNT, 1), + "Failed to intialise bonded device"); + + /* Generate test bursts of packets to transmit */ + TEST_ASSERT_EQUAL(generate_test_burst(pkt_burst, + TEST_RR_SLAVE_TX_FAIL_BURST_SIZE, 0, 1, 0, 0, 0), + TEST_RR_SLAVE_TX_FAIL_BURST_SIZE, + "Failed to generate test packet burst"); + + /* Copy references to packets which we expect not to be transmitted */ + first_fail_idx = (TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - + (TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT * + TEST_RR_SLAVE_TX_FAIL_SLAVE_COUNT)) + + TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX; + + for (i = 0; i < TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT; i++) { + expected_tx_fail_pkts[i] = pkt_burst[first_fail_idx + + (i * TEST_RR_SLAVE_TX_FAIL_SLAVE_COUNT)]; + } + + /* Set virtual slave to only fail transmission of + * TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT packets in burst */ + virtual_ethdev_tx_burst_fn_set_success( + test_params->slave_port_ids[TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX], + 0); + + virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( + test_params->slave_port_ids[TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX], + TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT); + + tx_count = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkt_burst, + TEST_RR_SLAVE_TX_FAIL_BURST_SIZE); + + TEST_ASSERT_EQUAL(tx_count, TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - + TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT, + "Transmitted (%d) an unexpected (%d) number of packets", tx_count, + TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - + TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT); + + /* Verify that failed packet are expected failed packets */ + for (i = 0; i < TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT; i++) { + TEST_ASSERT_EQUAL(expected_tx_fail_pkts[i], pkt_burst[i + tx_count], + "expected mbuf (%d) pointer %p not expected pointer %p", + i, expected_tx_fail_pkts[i], pkt_burst[i + tx_count]); + } + + /* Verify bonded port tx stats */ + rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + + TEST_ASSERT_EQUAL(port_stats.opackets, + (uint64_t)TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - + TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT, + "Bonded Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonded_port_id, (unsigned int)port_stats.opackets, + TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - + TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT); + + /* Verify slave ports tx stats */ + for (i = 0; i < test_params->bonded_slave_count; i++) { + int slave_expected_tx_count; + + rte_eth_stats_get(test_params->slave_port_ids[i], &port_stats); + + slave_expected_tx_count = TEST_RR_SLAVE_TX_FAIL_BURST_SIZE / + test_params->bonded_slave_count; + + if (i == TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX) + slave_expected_tx_count = slave_expected_tx_count - + TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT; + + TEST_ASSERT_EQUAL(port_stats.opackets, + (uint64_t)slave_expected_tx_count, + "Slave Port (%d) opackets value (%u) not as expected (%d)", + test_params->slave_port_ids[i], + (unsigned int)port_stats.opackets, slave_expected_tx_count); + } + + /* Verify that all mbufs have a ref value of zero */ + TEST_ASSERT_SUCCESS(verify_mbufs_ref_count(&pkt_burst[tx_count], + TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT, 1), + "mbufs refcnts not as expected"); + + free_mbufs(&pkt_burst[tx_count], TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT); + + /* Clean up and remove slaves from bonded device */ + return remove_slaves_and_stop_bonded_device(); +} + +static int test_roundrobin_rx_burst_on_single_slave(void) { struct rte_mbuf *gen_pkt_burst[MAX_PKT_BURST] = { NULL }; @@ -2900,6 +3032,141 @@ test_balance_l34_tx_burst_ipv6_toggle_udp_port(void) return balance_l34_tx_burst(0, 0, 0, 0, 1); } +#define TEST_BAL_SLAVE_TX_FAIL_SLAVE_COUNT (2) +#define TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 (40) +#define TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2 (20) +#define TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT (25) +#define TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX (0) + +static int +test_balance_tx_burst_slave_tx_fail(void) +{ + struct rte_mbuf *pkts_burst_1[TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1]; + struct rte_mbuf *pkts_burst_2[TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2]; + + struct rte_mbuf *expected_fail_pkts[TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT]; + + struct rte_eth_stats port_stats; + + int i, first_tx_fail_idx, tx_count_1, tx_count_2; + + TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + BONDING_MODE_BALANCE, 0, + TEST_BAL_SLAVE_TX_FAIL_SLAVE_COUNT, 1), + "Failed to intialise bonded device"); + + TEST_ASSERT_SUCCESS(rte_eth_bond_xmit_policy_set( + test_params->bonded_port_id, BALANCE_XMIT_POLICY_LAYER2), + "Failed to set balance xmit policy."); + + + /* Generate test bursts for transmission */ + TEST_ASSERT_EQUAL(generate_test_burst(pkts_burst_1, + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1, 0, 0, 0, 0, 0), + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1, + "Failed to generate test packet burst 1"); + + first_tx_fail_idx = TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT; + + /* copy mbuf referneces for expected transmission failures */ + for (i = 0; i < TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT; i++) { + expected_fail_pkts[i] = pkts_burst_1[i + first_tx_fail_idx]; + } + + TEST_ASSERT_EQUAL(generate_test_burst(pkts_burst_2, + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2, 0, 0, 1, 0, 0), + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2, + "Failed to generate test packet burst 2"); + + + /* Set virtual slave TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX to only fail + * transmission of TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT packets of burst */ + virtual_ethdev_tx_burst_fn_set_success( + test_params->slave_port_ids[TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX], + 0); + + virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( + test_params->slave_port_ids[TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX], + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT); + + + /* Transmit burst 1 */ + tx_count_1 = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst_1, + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1); + + TEST_ASSERT_EQUAL(tx_count_1, TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT, + "Transmitted (%d) packets, expected to transmit (%d) packets", + tx_count_1, TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT); + + /* Verify that failed packet are expected failed packets */ + for (i = 0; i < TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT; i++) { + TEST_ASSERT_EQUAL(expected_fail_pkts[i], pkts_burst_1[i + tx_count_1], + "expected mbuf (%d) pointer %p not expected pointer %p", + i, expected_fail_pkts[i], pkts_burst_1[i + tx_count_1]); + } + + /* Transmit burst 2 */ + tx_count_2 = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst_2, + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2); + + TEST_ASSERT_EQUAL(tx_count_2, TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2, + "Transmitted (%d) packets, expected to transmit (%d) packets", + tx_count_2, TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2); + + + /* Verify bonded port tx stats */ + rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + + TEST_ASSERT_EQUAL(port_stats.opackets, + (uint64_t)((TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT) + + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2), + "Bonded Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonded_port_id, (unsigned int)port_stats.opackets, + (TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT) + + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2); + + /* Verify slave ports tx stats */ + + rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + + TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t) + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT, + "Slave Port (%d) opackets value (%u) not as expected (%d)", + test_params->slave_port_ids[0], + (unsigned int)port_stats.opackets, + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT); + + + + + rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + + TEST_ASSERT_EQUAL(port_stats.opackets, + (uint64_t)TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2, + "Slave Port (%d) opackets value (%u) not as expected (%d)", + test_params->slave_port_ids[1], + (unsigned int)port_stats.opackets, + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2); + + /* Verify that all mbufs have a ref value of zero */ + TEST_ASSERT_SUCCESS(verify_mbufs_ref_count(&pkts_burst_1[tx_count_1], + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT, 1), + "mbufs refcnts not as expected"); + + free_mbufs(&pkts_burst_1[tx_count_1], + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT); + + /* Clean up and remove slaves from bonded device */ + return remove_slaves_and_stop_bonded_device(); +} + #define TEST_BALANCE_RX_BURST_SLAVE_COUNT (3) static int @@ -3412,7 +3679,7 @@ test_broadcast_tx_burst(void) /* Send burst on bonded port */ nb_tx = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst, burst_size); - if (nb_tx != burst_size * test_params->bonded_slave_count) { + if (nb_tx != burst_size) { printf("Bonded Port (%d) rx burst failed, packets transmitted value (%u) not as expected (%d)\n", test_params->bonded_port_id, nb_tx, burst_size); @@ -3455,6 +3722,125 @@ test_broadcast_tx_burst(void) return remove_slaves_and_stop_bonded_device(); } + +#define TEST_BCAST_SLAVE_TX_FAIL_SLAVE_COUNT (3) +#define TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE (40) +#define TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT (15) +#define TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT (10) + +static int +test_broadcast_tx_burst_slave_tx_fail(void) +{ + struct rte_mbuf *pkts_burst[TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE]; + struct rte_mbuf *expected_fail_pkts[TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT]; + + struct rte_eth_stats port_stats; + + int i, tx_count; + + TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + BONDING_MODE_BROADCAST, 0, + TEST_BCAST_SLAVE_TX_FAIL_SLAVE_COUNT, 1), + "Failed to intialise bonded device"); + + /* Generate test bursts for transmission */ + TEST_ASSERT_EQUAL(generate_test_burst(pkts_burst, + TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE, 0, 0, 0, 0, 0), + TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE, + "Failed to generate test packet burst"); + + for (i = 0; i < TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT; i++) { + expected_fail_pkts[i] = pkts_burst[TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT + i]; + } + + /* Set virtual slave TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX to only fail + * transmission of TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT packets of burst */ + virtual_ethdev_tx_burst_fn_set_success( + test_params->slave_port_ids[0], + 0); + virtual_ethdev_tx_burst_fn_set_success( + test_params->slave_port_ids[1], + 0); + virtual_ethdev_tx_burst_fn_set_success( + test_params->slave_port_ids[2], + 0); + + virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( + test_params->slave_port_ids[0], + TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT); + + virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( + test_params->slave_port_ids[1], + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT); + + virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( + test_params->slave_port_ids[2], + TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT); + + /* Transmit burst */ + tx_count = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst, + TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE); + + TEST_ASSERT_EQUAL(tx_count, TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT, + "Transmitted (%d) packets, expected to transmit (%d) packets", + tx_count, TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT); + + /* Verify that failed packet are expected failed packets */ + for (i = 0; i < TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT; i++) { + TEST_ASSERT_EQUAL(expected_fail_pkts[i], pkts_burst[i + tx_count], + "expected mbuf (%d) pointer %p not expected pointer %p", + i, expected_fail_pkts[i], pkts_burst[i + tx_count]); + } + + /* Verify slave ports tx stats */ + + rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + + TEST_ASSERT_EQUAL(port_stats.opackets, + (uint64_t)TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT, + "Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonded_port_id, (unsigned int)port_stats.opackets, + TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT); + + + rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + + TEST_ASSERT_EQUAL(port_stats.opackets, + (uint64_t)TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT, + "Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonded_port_id, (unsigned int)port_stats.opackets, + TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE -s + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT); + + rte_eth_stats_get(test_params->slave_port_ids[2], &port_stats); + + TEST_ASSERT_EQUAL(port_stats.opackets, + (uint64_t)TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT, + "Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonded_port_id, (unsigned int)port_stats.opackets, + TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT); + + + /* Verify that all mbufs who transmission failed have a ref value of one */ + TEST_ASSERT_SUCCESS(verify_mbufs_ref_count(&pkts_burst[tx_count], + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT, 1), + "mbufs refcnts not as expected"); + + free_mbufs(&pkts_burst[tx_count], + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT); + + /* Clean up and remove slaves from bonded device */ + return remove_slaves_and_stop_bonded_device(); +} + #define BROADCAST_RX_BURST_NUM_OF_SLAVES (3) static int @@ -3767,7 +4153,7 @@ test_broadcast_verify_slave_link_status_change_behaviour(void) } if (rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt_burst[0][0], - burst_size) != (burst_size * slave_count)) { + burst_size) != burst_size) { printf("rte_eth_tx_burst failed\n"); return -1; } @@ -3915,6 +4301,7 @@ static struct unit_test_suite link_bonding_test_suite = { TEST_CASE(test_status_interrupt), TEST_CASE(test_adding_slave_after_bonded_device_started), TEST_CASE(test_roundrobin_tx_burst), + TEST_CASE(test_roundrobin_tx_burst_slave_tx_fail), TEST_CASE(test_roundrobin_rx_burst_on_single_slave), TEST_CASE(test_roundrobin_rx_burst_on_multiple_slaves), TEST_CASE(test_roundrobin_verify_promiscuous_enable_disable), @@ -3938,11 +4325,13 @@ static struct unit_test_suite link_bonding_test_suite = { TEST_CASE(test_balance_l34_tx_burst_ipv6_toggle_ip_addr), TEST_CASE(test_balance_l34_tx_burst_vlan_ipv6_toggle_ip_addr), TEST_CASE(test_balance_l34_tx_burst_ipv6_toggle_udp_port), + TEST_CASE(test_balance_tx_burst_slave_tx_fail), TEST_CASE(test_balance_rx_burst), TEST_CASE(test_balance_verify_promiscuous_enable_disable), TEST_CASE(test_balance_verify_mac_assignment), TEST_CASE(test_balance_verify_slave_link_status_change_behaviour), TEST_CASE(test_broadcast_tx_burst), + TEST_CASE(test_broadcast_tx_burst_slave_tx_fail), TEST_CASE(test_broadcast_rx_burst), TEST_CASE(test_broadcast_verify_promiscuous_enable_disable), TEST_CASE(test_broadcast_verify_mac_assignment), diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c index e861c5b..f9bd841 100644 --- a/app/test/virtual_pmd.c +++ b/app/test/virtual_pmd.c @@ -48,6 +48,8 @@ struct virtual_ethdev_private { struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST]; int rx_pkt_burst_len; + + int tx_burst_fail_count; }; struct virtual_ethdev_queue { @@ -350,42 +352,67 @@ virtual_ethdev_rx_burst_fail(void *queue __rte_unused, } static uint16_t -virtual_ethdev_tx_burst_success(void *queue, - struct rte_mbuf **bufs __rte_unused, - uint16_t nb_pkts) +virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) { + struct virtual_ethdev_queue *tx_q = (struct virtual_ethdev_queue *)queue; + struct rte_eth_dev *vrtl_eth_dev; - struct virtual_ethdev_queue *tx_q; struct virtual_ethdev_private *dev_private; - int i; - tx_q = (struct virtual_ethdev_queue *)queue; + int i; vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; + dev_private = vrtl_eth_dev->data->dev_private; if (vrtl_eth_dev->data->dev_link.link_status) { - dev_private = vrtl_eth_dev->data->dev_private; + /* increment opacket count */ dev_private->eth_stats.opackets += nb_pkts; - return nb_pkts; - } - - /* free packets in burst */ - for (i = 0; i < nb_pkts; i++) { - if (bufs[i] != NULL) + /* free packets in burst */ + for (i = 0; i < nb_pkts; i++) { rte_pktmbuf_free(bufs[i]); + bufs[i] = NULL; + } - bufs[i] = NULL; + return nb_pkts; } return 0; } - static uint16_t -virtual_ethdev_tx_burst_fail(void *queue __rte_unused, - struct rte_mbuf **bufs __rte_unused, uint16_t nb_pkts __rte_unused) +virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) { + struct rte_eth_dev *vrtl_eth_dev = NULL; + struct virtual_ethdev_queue *tx_q = NULL; + struct virtual_ethdev_private *dev_private = NULL; + + int i; + + tx_q = (struct virtual_ethdev_queue *)queue; + vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; + dev_private = vrtl_eth_dev->data->dev_private; + + if (dev_private->tx_burst_fail_count < nb_pkts) { + int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count; + + /* increment opacket count */ + dev_private->eth_stats.opackets += successfully_txd; + + /* free packets in burst */ + for (i = 0; i < successfully_txd; i++) { + /* free packets in burst */ + if (bufs[i] != NULL) + rte_pktmbuf_free(bufs[i]); + + bufs[i] = NULL; + } + + return successfully_txd; + } + return 0; } @@ -405,17 +432,34 @@ virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success) void virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success) { + struct virtual_ethdev_private *dev_private = NULL; struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; + dev_private = vrtl_eth_dev->data->dev_private; + if (success) vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success; else vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail; + + dev_private->tx_burst_fail_count = 0; } +void +virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id, + uint8_t packet_fail_count) +{ + struct virtual_ethdev_private *dev_private = NULL; + struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; + + + dev_private = vrtl_eth_dev->data->dev_private; + dev_private->tx_burst_fail_count = packet_fail_count; +} void -virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id, uint8_t link_status) +virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id, + uint8_t link_status) { struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; diff --git a/app/test/virtual_pmd.h b/app/test/virtual_pmd.h index 766b6ac..3b5c911 100644 --- a/app/test/virtual_pmd.h +++ b/app/test/virtual_pmd.h @@ -67,6 +67,13 @@ void virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success); void virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success); +/* if a value greater than zero is set for packet_fail_count then virtual + * device tx burst function will fail that many packet from burst or all + * packets if packet_fail_count is greater than the number of packets in the + * burst */ +void virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id, + uint8_t packet_fail_count); + #ifdef __cplusplus } #endif diff --git a/lib/librte_pmd_bond/rte_eth_bond_pmd.c b/lib/librte_pmd_bond/rte_eth_bond_pmd.c index 70123fc..38cc1ae 100644 --- a/lib/librte_pmd_bond/rte_eth_bond_pmd.c +++ b/lib/librte_pmd_bond/rte_eth_bond_pmd.c @@ -101,10 +101,10 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs, uint8_t num_of_slaves; uint8_t slaves[RTE_MAX_ETHPORTS]; - uint16_t num_tx_total = 0; + uint16_t num_tx_total = 0, num_tx_slave; static int slave_idx = 0; - int i, cs_idx = 0; + int i, cslave_idx = 0, tx_fail_total = 0; bd_tx_q = (struct bond_tx_queue *)queue; internals = bd_tx_q->dev_private; @@ -120,19 +120,32 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs, /* Populate slaves mbuf with which packets are to be sent on it */ for (i = 0; i < nb_pkts; i++) { - cs_idx = (slave_idx + i) % num_of_slaves; - slave_bufs[cs_idx][(slave_nb_pkts[cs_idx])++] = bufs[i]; + cslave_idx = (slave_idx + i) % num_of_slaves; + slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i]; } /* increment current slave index so the next call to tx burst starts on the * next slave */ - slave_idx = ++cs_idx; + slave_idx = ++cslave_idx; /* Send packet burst on each slave device */ - for (i = 0; i < num_of_slaves; i++) - if (slave_nb_pkts[i] > 0) - num_tx_total += rte_eth_tx_burst(slaves[i], - bd_tx_q->queue_id, slave_bufs[i], slave_nb_pkts[i]); + for (i = 0; i < num_of_slaves; i++) { + if (slave_nb_pkts[i] > 0) { + num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + slave_bufs[i], slave_nb_pkts[i]); + + /* if tx burst fails move packets to end of bufs */ + if (unlikely(num_tx_slave < slave_nb_pkts[i])) { + int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave; + + tx_fail_total += tx_fail_slave; + + memcpy(&bufs[nb_pkts - tx_fail_total], + &slave_bufs[i][num_tx_slave], tx_fail_slave * sizeof(bufs[0])); + } + num_tx_total += num_tx_slave; + } + } return num_tx_total; } @@ -283,7 +296,7 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint8_t num_of_slaves; uint8_t slaves[RTE_MAX_ETHPORTS]; - uint16_t num_tx_total = 0; + uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0; int i, op_slave_id; @@ -315,11 +328,23 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, /* Send packet burst on each slave device */ for (i = 0; i < num_of_slaves; i++) { if (slave_nb_pkts[i] > 0) { - num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, slave_bufs[i], slave_nb_pkts[i]); + + /* if tx burst fails move packets to end of bufs */ + if (unlikely(num_tx_slave < slave_nb_pkts[i])) { + int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave; + + tx_fail_total += slave_tx_fail_count; + memcpy(bufs[nb_pkts - tx_fail_total], + slave_bufs[i][num_tx_slave], slave_tx_fail_count); + } + + num_tx_total += num_tx_slave; } } + return num_tx_total; } @@ -330,12 +355,13 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, struct bond_dev_private *internals; struct bond_tx_queue *bd_tx_q; - uint8_t num_of_slaves; + uint8_t tx_failed_flag = 0, num_of_slaves; uint8_t slaves[RTE_MAX_ETHPORTS]; - uint16_t num_tx_total = 0; + uint16_t max_nb_of_tx_pkts = 0; - int i; + int slave_tx_total[RTE_MAX_ETHPORTS]; + int i, most_scuccesful_tx_slave; bd_tx_q = (struct bond_tx_queue *)queue; internals = bd_tx_q->dev_private; @@ -354,11 +380,32 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1); /* Transmit burst on each active slave */ - for (i = 0; i < num_of_slaves; i++) - num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, - bufs, nb_pkts); + for (i = 0; i < num_of_slaves; i++) { + slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + bufs, nb_pkts); - return num_tx_total; + if (unlikely(slave_tx_total[i] < nb_pkts)) + tx_failed_flag = 1; + + /* record the value and slave index for the slave which transmits the + * maximum number of packets */ + if (slave_tx_total[i] > max_nb_of_tx_pkts) { + max_nb_of_tx_pkts = slave_tx_total[i]; + most_scuccesful_tx_slave = i; + } + } + + /* if slaves fail to transmit packets from burst, the calling application + * is not expected to know about multiple references to packets so we must + * handle failures of all packets except those of the most successful slave + */ + if (unlikely(tx_failed_flag)) + for (i = 0; i < num_of_slaves; i++) + if (i != most_scuccesful_tx_slave) + while (slave_tx_total[i] < nb_pkts) + rte_pktmbuf_free(bufs[slave_tx_total[i]++]); + + return max_nb_of_tx_pkts; } void