From patchwork Thu May 5 15:14:57 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Iremonger, Bernard" X-Patchwork-Id: 12401 X-Patchwork-Delegate: bruce.richardson@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id C0A545947; Thu, 5 May 2016 17:38:21 +0200 (CEST) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 86E3758F1 for ; Thu, 5 May 2016 17:38:19 +0200 (CEST) Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga103.jf.intel.com with ESMTP; 05 May 2016 08:38:18 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.24,582,1455004800"; d="scan'208";a="799609690" Received: from irvmail001.ir.intel.com ([163.33.26.43]) by orsmga003.jf.intel.com with ESMTP; 05 May 2016 08:38:10 -0700 Received: from sivswdev01.ir.intel.com (sivswdev01.ir.intel.com [10.237.217.45]) by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id u45FF7hq016508; Thu, 5 May 2016 16:15:08 +0100 Received: from sivswdev01.ir.intel.com (localhost [127.0.0.1]) by sivswdev01.ir.intel.com with ESMTP id u45FF7CO010060; Thu, 5 May 2016 16:15:07 +0100 Received: (from bairemon@localhost) by sivswdev01.ir.intel.com with id u45FF7PH010056; Thu, 5 May 2016 16:15:07 +0100 From: Bernard Iremonger To: dev@dpdk.org Cc: declan.doherty@intel.com, Bernard Iremonger Date: Thu, 5 May 2016 16:14:57 +0100 Message-Id: <1462461300-9962-3-git-send-email-bernard.iremonger@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1462461300-9962-1-git-send-email-bernard.iremonger@intel.com> References: <1462461300-9962-1-git-send-email-bernard.iremonger@intel.com> Subject: [dpdk-dev] [PATCH 2/5] bonding: add read/write lock to rx/tx burst functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Signed-off-by: Bernard Iremonger --- drivers/net/bonding/rte_eth_bond_pmd.c | 112 +++++++++++++++++++++++++-------- 1 file changed, 85 insertions(+), 27 deletions(-) diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index ed6245b..c3e772c 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -92,7 +92,7 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) internals = bd_rx_q->dev_private; - + rte_rwlock_read_lock(&internals->rwlock); for (i = 0; i < internals->active_slave_count && nb_pkts; i++) { /* Offset of pointer to *bufs increases as packets are received * from other slaves */ @@ -103,6 +103,7 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) nb_pkts -= num_rx_slave; } } + rte_rwlock_read_unlock(&internals->rwlock); return num_rx_total; } @@ -112,14 +113,20 @@ bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { struct bond_dev_private *internals; + uint16_t num_rx_total; /* Cast to structure, containing bonded device's port id and queue id */ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; internals = bd_rx_q->dev_private; + rte_rwlock_read_lock(&internals->rwlock); + + num_rx_total = rte_eth_rx_burst(internals->current_primary_port, + bd_rx_q->queue_id, bufs, nb_pkts); - return rte_eth_rx_burst(internals->current_primary_port, - bd_rx_q->queue_id, bufs, nb_pkts); + rte_rwlock_read_unlock(&internals->rwlock); + + return num_rx_total; } static uint16_t @@ -149,12 +156,17 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, memcpy(slaves, internals->active_slaves, sizeof(internals->active_slaves[0]) * slave_count); - for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) { + rte_rwlock_read_lock(&internals->rwlock); + for (i = 0; i < internals->active_slave_count && num_rx_total < nb_pkts; + i++) { j = num_rx_total; - collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING); + collecting = ACTOR_STATE( + &mode_8023ad_ports[internals->active_slaves[i]], + COLLECTING); /* Read packets from this slave */ - num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id, + num_rx_total += rte_eth_rx_burst(internals->active_slaves[i], + bd_rx_q->queue_id, &bufs[num_rx_total], nb_pkts - num_rx_total); for (k = j; k < 2 && k < num_rx_total; k++) @@ -175,7 +187,9 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) { if (hdr->ether_type == ether_type_slow_be) { - bond_mode_8023ad_handle_slow_pkt(internals, slaves[i], + bond_mode_8023ad_handle_slow_pkt( + internals, + internals->active_slaves[i], bufs[j]); } else rte_pktmbuf_free(bufs[j]); @@ -190,6 +204,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, j++; } } + rte_rwlock_read_unlock(&internals->rwlock); return num_rx_total; } @@ -408,12 +423,14 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs, /* Copy slave list to protect against slave up/down changes during tx * bursting */ + rte_rwlock_read_lock(&internals->rwlock); num_of_slaves = internals->active_slave_count; memcpy(slaves, internals->active_slaves, sizeof(internals->active_slaves[0]) * num_of_slaves); - - if (num_of_slaves < 1) + if (num_of_slaves < 1) { + rte_rwlock_read_unlock(&internals->rwlock); return num_tx_total; + } /* Populate slaves mbuf with which packets are to be sent on it */ for (i = 0; i < nb_pkts; i++) { @@ -428,8 +445,10 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs, /* Send packet burst on each slave device */ for (i = 0; i < num_of_slaves; i++) { if (slave_nb_pkts[i] > 0) { - num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, - slave_bufs[i], slave_nb_pkts[i]); + num_tx_slave = rte_eth_tx_burst( + internals->active_slaves[i], + bd_tx_q->queue_id, slave_bufs[i], + slave_nb_pkts[i]); /* if tx burst fails move packets to end of bufs */ if (unlikely(num_tx_slave < slave_nb_pkts[i])) { @@ -444,6 +463,7 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs, num_tx_total += num_tx_slave; } } + rte_rwlock_read_unlock(&internals->rwlock); return num_tx_total; } @@ -454,15 +474,23 @@ bond_ethdev_tx_burst_active_backup(void *queue, { struct bond_dev_private *internals; struct bond_tx_queue *bd_tx_q; + uint16_t num_tx_total; bd_tx_q = (struct bond_tx_queue *)queue; internals = bd_tx_q->dev_private; - if (internals->active_slave_count < 1) + rte_rwlock_read_lock(&internals->rwlock); + if (internals->active_slave_count < 1) { + rte_rwlock_read_unlock(&internals->rwlock); return 0; + } + + num_tx_total = rte_eth_tx_burst(internals->current_primary_port, + bd_tx_q->queue_id, bufs, nb_pkts); - return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id, - bufs, nb_pkts); + rte_rwlock_read_unlock(&internals->rwlock); + + return num_tx_total; } static inline uint16_t @@ -693,16 +721,19 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) &rte_eth_devices[internals->primary_port]; uint16_t num_tx_total = 0; uint8_t i, j; - - uint8_t num_of_slaves = internals->active_slave_count; + uint8_t num_of_slaves; uint8_t slaves[RTE_MAX_ETHPORTS]; struct ether_hdr *ether_hdr; struct ether_addr primary_slave_addr; struct ether_addr active_slave_addr; - if (num_of_slaves < 1) + rte_rwlock_read_lock(&internals->rwlock); + num_of_slaves = internals->active_slave_count; + if (num_of_slaves < 1) { + rte_rwlock_read_unlock(&internals->rwlock); return num_tx_total; + } memcpy(slaves, internals->tlb_slaves_order, sizeof(internals->tlb_slaves_order[0]) * num_of_slaves); @@ -716,7 +747,8 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } for (i = 0; i < num_of_slaves; i++) { - rte_eth_macaddr_get(slaves[i], &active_slave_addr); + rte_eth_macaddr_get(internals->tlb_slaves_order[i], + &active_slave_addr); for (j = num_tx_total; j < nb_pkts; j++) { if (j + 3 < nb_pkts) rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*)); @@ -729,12 +761,15 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) #endif } - num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + num_tx_total += rte_eth_tx_burst( + internals->tlb_slaves_order[i], + bd_tx_q->queue_id, bufs + num_tx_total, nb_pkts - num_tx_total); if (num_tx_total == nb_pkts) break; } + rte_rwlock_read_unlock(&internals->rwlock); return num_tx_total; } @@ -836,6 +871,8 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) internals->mode6.ntt = 0; } + rte_rwlock_read_lock(&internals->rwlock); + /* Send ARP packets on proper slaves */ for (i = 0; i < RTE_MAX_ETHPORTS; i++) { if (slave_bufs_pkts[i] > 0) { @@ -876,6 +913,8 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } } + rte_rwlock_read_unlock(&internals->rwlock); + /* Send non-ARP packets using tlb policy */ if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) { num_send = bond_ethdev_tx_burst_tlb(queue, @@ -916,12 +955,16 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, /* Copy slave list to protect against slave up/down changes during tx * bursting */ + rte_rwlock_read_lock(&internals->rwlock); num_of_slaves = internals->active_slave_count; + memcpy(slaves, internals->active_slaves, sizeof(internals->active_slaves[0]) * num_of_slaves); - if (num_of_slaves < 1) + if (num_of_slaves < 1) { + rte_rwlock_read_unlock(&internals->rwlock); return num_tx_total; + } /* Populate slaves mbuf with the packets which are to be sent on it */ for (i = 0; i < nb_pkts; i++) { @@ -935,7 +978,9 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, /* Send packet burst on each slave device */ for (i = 0; i < num_of_slaves; i++) { if (slave_nb_pkts[i] > 0) { - num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + num_tx_slave = rte_eth_tx_burst( + internals->active_slaves[i], + bd_tx_q->queue_id, slave_bufs[i], slave_nb_pkts[i]); /* if tx burst fails move packets to end of bufs */ @@ -952,6 +997,7 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, } } + rte_rwlock_read_unlock(&internals->rwlock); return num_tx_total; } @@ -986,15 +1032,20 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, /* Copy slave list to protect against slave up/down changes during tx * bursting */ + rte_rwlock_read_lock(&internals->rwlock); num_of_slaves = internals->active_slave_count; - if (num_of_slaves < 1) + if (num_of_slaves < 1) { + rte_rwlock_read_unlock(&internals->rwlock); return num_tx_total; + } memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves); distributing_count = 0; for (i = 0; i < num_of_slaves; i++) { - struct port *port = &mode_8023ad_ports[slaves[i]]; + struct port *port; + + port = &mode_8023ad_ports[internals->active_slaves[i]]; slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring, slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS); @@ -1026,7 +1077,8 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, if (slave_nb_pkts[i] == 0) continue; - num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + num_tx_slave = rte_eth_tx_burst( + internals->active_slaves[i], bd_tx_q->queue_id, slave_bufs[i], slave_nb_pkts[i]); /* If tx burst fails drop slow packets */ @@ -1044,6 +1096,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, } } + rte_rwlock_read_unlock(&internals->rwlock); return num_tx_total; } @@ -1067,12 +1120,15 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, /* Copy slave list to protect against slave up/down changes during tx * bursting */ + rte_rwlock_read_lock(&internals->rwlock); num_of_slaves = internals->active_slave_count; memcpy(slaves, internals->active_slaves, sizeof(internals->active_slaves[0]) * num_of_slaves); - if (num_of_slaves < 1) + if (num_of_slaves < 1) { + rte_rwlock_read_unlock(&internals->rwlock); return 0; + } /* Increment reference count on mbufs */ for (i = 0; i < nb_pkts; i++) @@ -1080,8 +1136,9 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, /* Transmit burst on each active slave */ for (i = 0; i < num_of_slaves; i++) { - slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, - bufs, nb_pkts); + slave_tx_total[i] = rte_eth_tx_burst( + internals->active_slaves[i], + bd_tx_q->queue_id, bufs, nb_pkts); if (unlikely(slave_tx_total[i] < nb_pkts)) tx_failed_flag = 1; @@ -1104,6 +1161,7 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, while (slave_tx_total[i] < nb_pkts) rte_pktmbuf_free(bufs[slave_tx_total[i]++]); + rte_rwlock_read_unlock(&internals->rwlock); return max_nb_of_tx_pkts; }