From patchwork Thu May 26 16:38:44 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Iremonger, Bernard" X-Patchwork-Id: 13032 X-Patchwork-Delegate: bruce.richardson@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 6CBC55686; Thu, 26 May 2016 18:39:33 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id C17085598 for ; Thu, 26 May 2016 18:39:27 +0200 (CEST) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga102.jf.intel.com with ESMTP; 26 May 2016 09:39:01 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.26,369,1459839600"; d="scan'208";a="962962094" Received: from irvmail001.ir.intel.com ([163.33.26.43]) by orsmga001.jf.intel.com with ESMTP; 26 May 2016 09:39:01 -0700 Received: from sivswdev01.ir.intel.com (sivswdev01.ir.intel.com [10.237.217.45]) by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id u4QGcxIS013459; Thu, 26 May 2016 17:38:59 +0100 Received: from sivswdev01.ir.intel.com (localhost [127.0.0.1]) by sivswdev01.ir.intel.com with ESMTP id u4QGcxvs026306; Thu, 26 May 2016 17:38:59 +0100 Received: (from bairemon@localhost) by sivswdev01.ir.intel.com with id u4QGcxOp026301; Thu, 26 May 2016 17:38:59 +0100 From: Bernard Iremonger To: dev@dpdk.org Cc: declan.doherty@intel.com, konstantin.ananyev@intel.com, Bernard Iremonger Date: Thu, 26 May 2016 17:38:44 +0100 Message-Id: <1464280727-25752-4-git-send-email-bernard.iremonger@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1464280727-25752-1-git-send-email-bernard.iremonger@intel.com> References: <1462461300-9962-1-git-send-email-bernard.iremonger@intel.com> <1464280727-25752-1-git-send-email-bernard.iremonger@intel.com> Subject: [dpdk-dev] [PATCH v2 3/6] bonding: take queue spinlock in rx/tx burst functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Signed-off-by: Bernard Iremonger Acked-by: Konstantin Ananyev --- drivers/net/bonding/rte_eth_bond_pmd.c | 116 ++++++++++++++++++++++++--------- 1 file changed, 84 insertions(+), 32 deletions(-) diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index 2e624bb..93043ef 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -92,16 +92,22 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) internals = bd_rx_q->dev_private; - - for (i = 0; i < internals->active_slave_count && nb_pkts; i++) { - /* Offset of pointer to *bufs increases as packets are received - * from other slaves */ - num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i], - bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts); - if (num_rx_slave) { - num_rx_total += num_rx_slave; - nb_pkts -= num_rx_slave; + if (rte_spinlock_trylock(&bd_rx_q->lock)) { + for (i = 0; i < internals->active_slave_count && nb_pkts; i++) { + /* Offset of pointer to *bufs increases as packets + * are received from other slaves + */ + num_rx_slave = rte_eth_rx_burst( + internals->active_slaves[i], + bd_rx_q->queue_id, + bufs + num_rx_total, + nb_pkts); + if (num_rx_slave) { + num_rx_total += num_rx_slave; + nb_pkts -= num_rx_slave; + } } + rte_spinlock_unlock(&bd_rx_q->lock); } return num_rx_total; @@ -112,14 +118,19 @@ bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { struct bond_dev_private *internals; + uint16_t ret = 0; /* Cast to structure, containing bonded device's port id and queue id */ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; internals = bd_rx_q->dev_private; - return rte_eth_rx_burst(internals->current_primary_port, - bd_rx_q->queue_id, bufs, nb_pkts); + if (rte_spinlock_trylock(&bd_rx_q->lock)) { + ret = rte_eth_rx_burst(internals->current_primary_port, + bd_rx_q->queue_id, bufs, nb_pkts); + rte_spinlock_unlock(&bd_rx_q->lock); + } + return ret; } static uint16_t @@ -143,8 +154,10 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint8_t i, j, k; rte_eth_macaddr_get(internals->port_id, &bond_mac); - /* Copy slave list to protect against slave up/down changes during tx - * bursting */ + + if (rte_spinlock_trylock(&bd_rx_q->lock) == 0) + return num_rx_total; + slave_count = internals->active_slave_count; memcpy(slaves, internals->active_slaves, sizeof(internals->active_slaves[0]) * slave_count); @@ -190,7 +203,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, j++; } } - + rte_spinlock_unlock(&bd_rx_q->lock); return num_rx_total; } @@ -406,14 +419,19 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs, bd_tx_q = (struct bond_tx_queue *)queue; internals = bd_tx_q->dev_private; + if (rte_spinlock_trylock(&bd_tx_q->lock) == 0) + return num_tx_total; + /* Copy slave list to protect against slave up/down changes during tx * bursting */ num_of_slaves = internals->active_slave_count; memcpy(slaves, internals->active_slaves, sizeof(internals->active_slaves[0]) * num_of_slaves); - if (num_of_slaves < 1) + if (num_of_slaves < 1) { + rte_spinlock_unlock(&bd_tx_q->lock); return num_tx_total; + } /* Populate slaves mbuf with which packets are to be sent on it */ for (i = 0; i < nb_pkts; i++) { @@ -444,7 +462,7 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs, num_tx_total += num_tx_slave; } } - + rte_spinlock_unlock(&bd_tx_q->lock); return num_tx_total; } @@ -454,15 +472,23 @@ bond_ethdev_tx_burst_active_backup(void *queue, { struct bond_dev_private *internals; struct bond_tx_queue *bd_tx_q; + uint16_t ret = 0; bd_tx_q = (struct bond_tx_queue *)queue; internals = bd_tx_q->dev_private; - if (internals->active_slave_count < 1) - return 0; + if (rte_spinlock_trylock(&bd_tx_q->lock)) { + if (internals->active_slave_count < 1) { + rte_spinlock_unlock(&bd_tx_q->lock); + return 0; + } - return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id, - bufs, nb_pkts); + ret = rte_eth_tx_burst(internals->current_primary_port, + bd_tx_q->queue_id, + bufs, nb_pkts); + rte_spinlock_unlock(&bd_tx_q->lock); + } + return ret; } static inline uint16_t @@ -694,20 +720,25 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) uint16_t num_tx_total = 0; uint8_t i, j; - uint8_t num_of_slaves = internals->active_slave_count; + uint8_t num_of_slaves; uint8_t slaves[RTE_MAX_ETHPORTS]; struct ether_hdr *ether_hdr; struct ether_addr primary_slave_addr; struct ether_addr active_slave_addr; - if (num_of_slaves < 1) + if (rte_spinlock_trylock(&bd_tx_q->lock) == 0) return num_tx_total; + num_of_slaves = internals->active_slave_count; + if (num_of_slaves < 1) { + rte_spinlock_unlock(&bd_tx_q->lock); + return num_tx_total; + } + memcpy(slaves, internals->tlb_slaves_order, sizeof(internals->tlb_slaves_order[0]) * num_of_slaves); - ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr); if (nb_pkts > 3) { @@ -735,7 +766,7 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) if (num_tx_total == nb_pkts) break; } - + rte_spinlock_unlock(&bd_tx_q->lock); return num_tx_total; } @@ -785,6 +816,9 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) int i, j; + if (rte_spinlock_trylock(&bd_tx_q->lock) == 0) + return num_tx_total; + /* Search tx buffer for ARP packets and forward them to alb */ for (i = 0; i < nb_pkts; i++) { eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *); @@ -875,6 +909,7 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) #endif } } + rte_spinlock_unlock(&bd_tx_q->lock); /* Send non-ARP packets using tlb policy */ if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) { @@ -914,14 +949,19 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, bd_tx_q = (struct bond_tx_queue *)queue; internals = bd_tx_q->dev_private; + if (rte_spinlock_trylock(&bd_tx_q->lock) == 0) + return num_tx_total; + /* Copy slave list to protect against slave up/down changes during tx * bursting */ num_of_slaves = internals->active_slave_count; memcpy(slaves, internals->active_slaves, sizeof(internals->active_slaves[0]) * num_of_slaves); - if (num_of_slaves < 1) + if (num_of_slaves < 1) { + rte_spinlock_unlock(&bd_tx_q->lock); return num_tx_total; + } /* Populate slaves mbuf with the packets which are to be sent on it */ for (i = 0; i < nb_pkts; i++) { @@ -951,7 +991,7 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, num_tx_total += num_tx_slave; } } - + rte_spinlock_unlock(&bd_tx_q->lock); return num_tx_total; } @@ -984,17 +1024,24 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, bd_tx_q = (struct bond_tx_queue *)queue; internals = bd_tx_q->dev_private; + if (rte_spinlock_trylock(&bd_tx_q->lock) == 0) + return num_tx_total; + /* Copy slave list to protect against slave up/down changes during tx * bursting */ num_of_slaves = internals->active_slave_count; - if (num_of_slaves < 1) + if (num_of_slaves < 1) { + rte_spinlock_unlock(&bd_tx_q->lock); return num_tx_total; + } memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves); distributing_count = 0; for (i = 0; i < num_of_slaves; i++) { - struct port *port = &mode_8023ad_ports[slaves[i]]; + struct port *port; + + port = &mode_8023ad_ports[internals->active_slaves[i]]; slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring, slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS); @@ -1043,7 +1090,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, bufs[j] = slave_bufs[i][num_tx_slave]; } } - + rte_spinlock_unlock(&bd_tx_q->lock); return num_tx_total; } @@ -1065,14 +1112,19 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, bd_tx_q = (struct bond_tx_queue *)queue; internals = bd_tx_q->dev_private; + if (rte_spinlock_trylock(&bd_tx_q->lock) == 0) + return 0; + /* Copy slave list to protect against slave up/down changes during tx * bursting */ num_of_slaves = internals->active_slave_count; memcpy(slaves, internals->active_slaves, sizeof(internals->active_slaves[0]) * num_of_slaves); - if (num_of_slaves < 1) + if (num_of_slaves < 1) { + rte_spinlock_unlock(&bd_tx_q->lock); return 0; + } /* Increment reference count on mbufs */ for (i = 0; i < nb_pkts; i++) @@ -1093,6 +1145,7 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, most_successful_tx_slave = i; } } + rte_spinlock_unlock(&bd_tx_q->lock); /* if slaves fail to transmit packets from burst, the calling application * is not expected to know about multiple references to packets so we must @@ -1819,7 +1872,6 @@ bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev, bonded_eth_dev->data->dev_link.link_status = link_up; } - return 0; }