get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/12401/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 12401,
    "url": "http://patches.dpdk.org/api/patches/12401/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1462461300-9962-3-git-send-email-bernard.iremonger@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1462461300-9962-3-git-send-email-bernard.iremonger@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1462461300-9962-3-git-send-email-bernard.iremonger@intel.com",
    "date": "2016-05-05T15:14:57",
    "name": "[dpdk-dev,2/5] bonding: add read/write lock to rx/tx burst functions",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "c0171d935c7124517661711dad5facdff21ba510",
    "submitter": {
        "id": 91,
        "url": "http://patches.dpdk.org/api/people/91/?format=api",
        "name": "Iremonger, Bernard",
        "email": "bernard.iremonger@intel.com"
    },
    "delegate": {
        "id": 10,
        "url": "http://patches.dpdk.org/api/users/10/?format=api",
        "username": "bruce",
        "first_name": "Bruce",
        "last_name": "Richardson",
        "email": "bruce.richardson@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1462461300-9962-3-git-send-email-bernard.iremonger@intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/12401/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/12401/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id C0A545947;\n\tThu,  5 May 2016 17:38:21 +0200 (CEST)",
            "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n\tby dpdk.org (Postfix) with ESMTP id 86E3758F1\n\tfor <dev@dpdk.org>; Thu,  5 May 2016 17:38:19 +0200 (CEST)",
            "from orsmga003.jf.intel.com ([10.7.209.27])\n\tby orsmga103.jf.intel.com with ESMTP; 05 May 2016 08:38:18 -0700",
            "from irvmail001.ir.intel.com ([163.33.26.43])\n\tby orsmga003.jf.intel.com with ESMTP; 05 May 2016 08:38:10 -0700",
            "from sivswdev01.ir.intel.com (sivswdev01.ir.intel.com\n\t[10.237.217.45])\n\tby irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id\n\tu45FF7hq016508; Thu, 5 May 2016 16:15:08 +0100",
            "from sivswdev01.ir.intel.com (localhost [127.0.0.1])\n\tby sivswdev01.ir.intel.com with ESMTP id u45FF7CO010060;\n\tThu, 5 May 2016 16:15:07 +0100",
            "(from bairemon@localhost)\n\tby sivswdev01.ir.intel.com with  id u45FF7PH010056;\n\tThu, 5 May 2016 16:15:07 +0100"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.24,582,1455004800\"; d=\"scan'208\";a=\"799609690\"",
        "From": "Bernard Iremonger <bernard.iremonger@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "declan.doherty@intel.com, Bernard Iremonger <bernard.iremonger@intel.com>",
        "Date": "Thu,  5 May 2016 16:14:57 +0100",
        "Message-Id": "<1462461300-9962-3-git-send-email-bernard.iremonger@intel.com>",
        "X-Mailer": "git-send-email 1.7.4.1",
        "In-Reply-To": "<1462461300-9962-1-git-send-email-bernard.iremonger@intel.com>",
        "References": "<1462461300-9962-1-git-send-email-bernard.iremonger@intel.com>",
        "Subject": "[dpdk-dev] [PATCH 2/5] bonding: add read/write lock to rx/tx burst\n\tfunctions",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>\n---\n drivers/net/bonding/rte_eth_bond_pmd.c | 112 +++++++++++++++++++++++++--------\n 1 file changed, 85 insertions(+), 27 deletions(-)",
    "diff": "diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c\nindex ed6245b..c3e772c 100644\n--- a/drivers/net/bonding/rte_eth_bond_pmd.c\n+++ b/drivers/net/bonding/rte_eth_bond_pmd.c\n@@ -92,7 +92,7 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \n \tinternals = bd_rx_q->dev_private;\n \n-\n+\trte_rwlock_read_lock(&internals->rwlock);\n \tfor (i = 0; i < internals->active_slave_count && nb_pkts; i++) {\n \t\t/* Offset of pointer to *bufs increases as packets are received\n \t\t * from other slaves */\n@@ -103,6 +103,7 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\t\tnb_pkts -= num_rx_slave;\n \t\t}\n \t}\n+\trte_rwlock_read_unlock(&internals->rwlock);\n \n \treturn num_rx_total;\n }\n@@ -112,14 +113,20 @@ bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,\n \t\tuint16_t nb_pkts)\n {\n \tstruct bond_dev_private *internals;\n+\tuint16_t num_rx_total;\n \n \t/* Cast to structure, containing bonded device's port id and queue id */\n \tstruct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;\n \n \tinternals = bd_rx_q->dev_private;\n+\trte_rwlock_read_lock(&internals->rwlock);\n+\n+\tnum_rx_total = rte_eth_rx_burst(internals->current_primary_port,\n+\t\t\t\t\tbd_rx_q->queue_id, bufs, nb_pkts);\n \n-\treturn rte_eth_rx_burst(internals->current_primary_port,\n-\t\t\tbd_rx_q->queue_id, bufs, nb_pkts);\n+\trte_rwlock_read_unlock(&internals->rwlock);\n+\n+\treturn num_rx_total;\n }\n \n static uint16_t\n@@ -149,12 +156,17 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,\n \tmemcpy(slaves, internals->active_slaves,\n \t\t\tsizeof(internals->active_slaves[0]) * slave_count);\n \n-\tfor (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {\n+\trte_rwlock_read_lock(&internals->rwlock);\n+\tfor (i = 0; i < internals->active_slave_count && num_rx_total < nb_pkts;\n+\t\t i++) {\n \t\tj = num_rx_total;\n-\t\tcollecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);\n+\t\tcollecting = ACTOR_STATE(\n+\t\t\t\t&mode_8023ad_ports[internals->active_slaves[i]],\n+\t\t\t\tCOLLECTING);\n \n \t\t/* Read packets from this slave */\n-\t\tnum_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,\n+\t\tnum_rx_total += rte_eth_rx_burst(internals->active_slaves[i],\n+\t\t\t\tbd_rx_q->queue_id,\n \t\t\t\t&bufs[num_rx_total], nb_pkts - num_rx_total);\n \n \t\tfor (k = j; k < 2 && k < num_rx_total; k++)\n@@ -175,7 +187,9 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,\n \t\t\t\t\t!is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {\n \n \t\t\t\tif (hdr->ether_type == ether_type_slow_be) {\n-\t\t\t\t\tbond_mode_8023ad_handle_slow_pkt(internals, slaves[i],\n+\t\t\t\t\tbond_mode_8023ad_handle_slow_pkt(\n+\t\t\t\t\t\tinternals,\n+\t\t\t\t\t\tinternals->active_slaves[i],\n \t\t\t\t\t\tbufs[j]);\n \t\t\t\t} else\n \t\t\t\t\trte_pktmbuf_free(bufs[j]);\n@@ -190,6 +204,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,\n \t\t\t\tj++;\n \t\t}\n \t}\n+\trte_rwlock_read_unlock(&internals->rwlock);\n \n \treturn num_rx_total;\n }\n@@ -408,12 +423,14 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,\n \n \t/* Copy slave list to protect against slave up/down changes during tx\n \t * bursting */\n+\trte_rwlock_read_lock(&internals->rwlock);\n \tnum_of_slaves = internals->active_slave_count;\n \tmemcpy(slaves, internals->active_slaves,\n \t\t\tsizeof(internals->active_slaves[0]) * num_of_slaves);\n-\n-\tif (num_of_slaves < 1)\n+\tif (num_of_slaves < 1) {\n+\t\trte_rwlock_read_unlock(&internals->rwlock);\n \t\treturn num_tx_total;\n+\t}\n \n \t/* Populate slaves mbuf with which packets are to be sent on it  */\n \tfor (i = 0; i < nb_pkts; i++) {\n@@ -428,8 +445,10 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,\n \t/* Send packet burst on each slave device */\n \tfor (i = 0; i < num_of_slaves; i++) {\n \t\tif (slave_nb_pkts[i] > 0) {\n-\t\t\tnum_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,\n-\t\t\t\t\tslave_bufs[i], slave_nb_pkts[i]);\n+\t\t\tnum_tx_slave = rte_eth_tx_burst(\n+\t\t\t\t\tinternals->active_slaves[i],\n+\t\t\t\t\tbd_tx_q->queue_id, slave_bufs[i],\n+\t\t\t\t\tslave_nb_pkts[i]);\n \n \t\t\t/* if tx burst fails move packets to end of bufs */\n \t\t\tif (unlikely(num_tx_slave < slave_nb_pkts[i])) {\n@@ -444,6 +463,7 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,\n \t\t\tnum_tx_total += num_tx_slave;\n \t\t}\n \t}\n+\trte_rwlock_read_unlock(&internals->rwlock);\n \n \treturn num_tx_total;\n }\n@@ -454,15 +474,23 @@ bond_ethdev_tx_burst_active_backup(void *queue,\n {\n \tstruct bond_dev_private *internals;\n \tstruct bond_tx_queue *bd_tx_q;\n+\tuint16_t num_tx_total;\n \n \tbd_tx_q = (struct bond_tx_queue *)queue;\n \tinternals = bd_tx_q->dev_private;\n \n-\tif (internals->active_slave_count < 1)\n+\trte_rwlock_read_lock(&internals->rwlock);\n+\tif (internals->active_slave_count < 1) {\n+\t\trte_rwlock_read_unlock(&internals->rwlock);\n \t\treturn 0;\n+\t}\n+\n+\tnum_tx_total = rte_eth_tx_burst(internals->current_primary_port,\n+\t\t\t\t\tbd_tx_q->queue_id, bufs, nb_pkts);\n \n-\treturn rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,\n-\t\t\tbufs, nb_pkts);\n+\trte_rwlock_read_unlock(&internals->rwlock);\n+\n+\treturn num_tx_total;\n }\n \n static inline uint16_t\n@@ -693,16 +721,19 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\t\t&rte_eth_devices[internals->primary_port];\n \tuint16_t num_tx_total = 0;\n \tuint8_t i, j;\n-\n-\tuint8_t num_of_slaves = internals->active_slave_count;\n+\tuint8_t num_of_slaves;\n \tuint8_t slaves[RTE_MAX_ETHPORTS];\n \n \tstruct ether_hdr *ether_hdr;\n \tstruct ether_addr primary_slave_addr;\n \tstruct ether_addr active_slave_addr;\n \n-\tif (num_of_slaves < 1)\n+\trte_rwlock_read_lock(&internals->rwlock);\n+\tnum_of_slaves = internals->active_slave_count;\n+\tif (num_of_slaves < 1) {\n+\t\trte_rwlock_read_unlock(&internals->rwlock);\n \t\treturn num_tx_total;\n+\t}\n \n \tmemcpy(slaves, internals->tlb_slaves_order,\n \t\t\t\tsizeof(internals->tlb_slaves_order[0]) * num_of_slaves);\n@@ -716,7 +747,8 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t}\n \n \tfor (i = 0; i < num_of_slaves; i++) {\n-\t\trte_eth_macaddr_get(slaves[i], &active_slave_addr);\n+\t\trte_eth_macaddr_get(internals->tlb_slaves_order[i],\n+\t\t\t\t&active_slave_addr);\n \t\tfor (j = num_tx_total; j < nb_pkts; j++) {\n \t\t\tif (j + 3 < nb_pkts)\n \t\t\t\trte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));\n@@ -729,12 +761,15 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n #endif\n \t\t}\n \n-\t\tnum_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,\n+\t\tnum_tx_total += rte_eth_tx_burst(\n+\t\t\t\tinternals->tlb_slaves_order[i],\n+\t\t\t\tbd_tx_q->queue_id,\n \t\t\t\tbufs + num_tx_total, nb_pkts - num_tx_total);\n \n \t\tif (num_tx_total == nb_pkts)\n \t\t\tbreak;\n \t}\n+\trte_rwlock_read_unlock(&internals->rwlock);\n \n \treturn num_tx_total;\n }\n@@ -836,6 +871,8 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\tinternals->mode6.ntt = 0;\n \t}\n \n+\trte_rwlock_read_lock(&internals->rwlock);\n+\n \t/* Send ARP packets on proper slaves */\n \tfor (i = 0; i < RTE_MAX_ETHPORTS; i++) {\n \t\tif (slave_bufs_pkts[i] > 0) {\n@@ -876,6 +913,8 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\t}\n \t}\n \n+\trte_rwlock_read_unlock(&internals->rwlock);\n+\n \t/* Send non-ARP packets using tlb policy */\n \tif (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {\n \t\tnum_send = bond_ethdev_tx_burst_tlb(queue,\n@@ -916,12 +955,16 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,\n \n \t/* Copy slave list to protect against slave up/down changes during tx\n \t * bursting */\n+\trte_rwlock_read_lock(&internals->rwlock);\n \tnum_of_slaves = internals->active_slave_count;\n+\n \tmemcpy(slaves, internals->active_slaves,\n \t\t\tsizeof(internals->active_slaves[0]) * num_of_slaves);\n \n-\tif (num_of_slaves < 1)\n+\tif (num_of_slaves < 1) {\n+\t\trte_rwlock_read_unlock(&internals->rwlock);\n \t\treturn num_tx_total;\n+\t}\n \n \t/* Populate slaves mbuf with the packets which are to be sent on it  */\n \tfor (i = 0; i < nb_pkts; i++) {\n@@ -935,7 +978,9 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,\n \t/* Send packet burst on each slave device */\n \tfor (i = 0; i < num_of_slaves; i++) {\n \t\tif (slave_nb_pkts[i] > 0) {\n-\t\t\tnum_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,\n+\t\t\tnum_tx_slave = rte_eth_tx_burst(\n+\t\t\t\t\tinternals->active_slaves[i],\n+\t\t\t\t\tbd_tx_q->queue_id,\n \t\t\t\t\tslave_bufs[i], slave_nb_pkts[i]);\n \n \t\t\t/* if tx burst fails move packets to end of bufs */\n@@ -952,6 +997,7 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,\n \t\t}\n \t}\n \n+\trte_rwlock_read_unlock(&internals->rwlock);\n \treturn num_tx_total;\n }\n \n@@ -986,15 +1032,20 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,\n \n \t/* Copy slave list to protect against slave up/down changes during tx\n \t * bursting */\n+\trte_rwlock_read_lock(&internals->rwlock);\n \tnum_of_slaves = internals->active_slave_count;\n-\tif (num_of_slaves < 1)\n+\tif (num_of_slaves < 1) {\n+\t\trte_rwlock_read_unlock(&internals->rwlock);\n \t\treturn num_tx_total;\n+\t}\n \n \tmemcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);\n \n \tdistributing_count = 0;\n \tfor (i = 0; i < num_of_slaves; i++) {\n-\t\tstruct port *port = &mode_8023ad_ports[slaves[i]];\n+\t\tstruct port *port;\n+\n+\t\tport = &mode_8023ad_ports[internals->active_slaves[i]];\n \n \t\tslave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,\n \t\t\t\tslow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);\n@@ -1026,7 +1077,8 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,\n \t\tif (slave_nb_pkts[i] == 0)\n \t\t\tcontinue;\n \n-\t\tnum_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,\n+\t\tnum_tx_slave = rte_eth_tx_burst(\n+\t\t\t\tinternals->active_slaves[i], bd_tx_q->queue_id,\n \t\t\t\tslave_bufs[i], slave_nb_pkts[i]);\n \n \t\t/* If tx burst fails drop slow packets */\n@@ -1044,6 +1096,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,\n \t\t}\n \t}\n \n+\trte_rwlock_read_unlock(&internals->rwlock);\n \treturn num_tx_total;\n }\n \n@@ -1067,12 +1120,15 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,\n \n \t/* Copy slave list to protect against slave up/down changes during tx\n \t * bursting */\n+\trte_rwlock_read_lock(&internals->rwlock);\n \tnum_of_slaves = internals->active_slave_count;\n \tmemcpy(slaves, internals->active_slaves,\n \t\t\tsizeof(internals->active_slaves[0]) * num_of_slaves);\n \n-\tif (num_of_slaves < 1)\n+\tif (num_of_slaves < 1) {\n+\t\trte_rwlock_read_unlock(&internals->rwlock);\n \t\treturn 0;\n+\t}\n \n \t/* Increment reference count on mbufs */\n \tfor (i = 0; i < nb_pkts; i++)\n@@ -1080,8 +1136,9 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,\n \n \t/* Transmit burst on each active slave */\n \tfor (i = 0; i < num_of_slaves; i++) {\n-\t\tslave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,\n-\t\t\t\t\tbufs, nb_pkts);\n+\t\tslave_tx_total[i] = rte_eth_tx_burst(\n+\t\t\t\tinternals->active_slaves[i],\n+\t\t\t\tbd_tx_q->queue_id, bufs, nb_pkts);\n \n \t\tif (unlikely(slave_tx_total[i] < nb_pkts))\n \t\t\ttx_failed_flag = 1;\n@@ -1104,6 +1161,7 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,\n \t\t\t\twhile (slave_tx_total[i] < nb_pkts)\n \t\t\t\t\trte_pktmbuf_free(bufs[slave_tx_total[i]++]);\n \n+\trte_rwlock_read_unlock(&internals->rwlock);\n \treturn max_nb_of_tx_pkts;\n }\n \n",
    "prefixes": [
        "dpdk-dev",
        "2/5"
    ]
}