get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/78394/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 78394,
    "url": "https://patches.dpdk.org/api/patches/78394/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1600770572-22716-1-git-send-email-lirongqing@baidu.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1600770572-22716-1-git-send-email-lirongqing@baidu.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1600770572-22716-1-git-send-email-lirongqing@baidu.com",
    "date": "2020-09-22T10:29:31",
    "name": "[1/2] net/bonding: fix a possible unbalance packet receiving",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "c94e8a9bd8dd3db88dcbcbfbaaab4d16a23cef80",
    "submitter": {
        "id": 1979,
        "url": "https://patches.dpdk.org/api/people/1979/?format=api",
        "name": "Li RongQing",
        "email": "lirongqing@baidu.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1600770572-22716-1-git-send-email-lirongqing@baidu.com/mbox/",
    "series": [
        {
            "id": 12406,
            "url": "https://patches.dpdk.org/api/series/12406/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=12406",
            "date": "2020-09-22T10:29:32",
            "name": "[1/2] net/bonding: fix a possible unbalance packet receiving",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/12406/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/78394/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/78394/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id A23DCA04B5;\n\tTue, 22 Sep 2020 12:29:42 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 464FB1DBB8;\n\tTue, 22 Sep 2020 12:29:37 +0200 (CEST)",
            "from tc-sys-mailedm04.tc.baidu.com (mx56.baidu.com [61.135.168.56])\n by dpdk.org (Postfix) with ESMTP id E385B1DB4C\n for <dev@dpdk.org>; Tue, 22 Sep 2020 12:29:34 +0200 (CEST)",
            "from localhost (cp01-cos-dev01.cp01.baidu.com [10.92.119.46])\n by tc-sys-mailedm04.tc.baidu.com (Postfix) with ESMTP id B7867236C002\n for <dev@dpdk.org>; Tue, 22 Sep 2020 18:29:29 +0800 (CST)"
        ],
        "From": "Li RongQing <lirongqing@baidu.com>",
        "To": "dev@dpdk.org",
        "Date": "Tue, 22 Sep 2020 18:29:31 +0800",
        "Message-Id": "<1600770572-22716-1-git-send-email-lirongqing@baidu.com>",
        "X-Mailer": "git-send-email 1.7.1",
        "Subject": "[dpdk-dev] [PATCH 1/2] net/bonding: fix a possible unbalance packet\n\treceiving",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Current Rx round robin policy for the slaves has two issue:\n\n1. active_slave in bond_dev_private is shared by multiple PMDS\nwhich maybe cause some slave Rx hungry, for example, there\nis two PMD and two slave port, both PMDs start to receive, and\nsee that active_slave is 0, and receive from slave 0, after\ncomplete, they increase active_slave by one, totally active_slave\nare increased by two, next time, they will start to receive\nfrom slave 0 again, at last, slave 1 maybe drop packets during\nto not be polled by PMD\n\n2. active_slave is shared and written by multiple PMD in RX path\nfor every time RX, this is a kind of cache false share, low\nperformance.\n\nso move active_slave from bond_dev_private to bond_rx_queue\nmake it as per queue variable\n\nSigned-off-by: Li RongQing <lirongqing@baidu.com>\nSigned-off-by: Dongsheng Rong <rongdongsheng@baidu.com>\n---\n drivers/net/bonding/eth_bond_private.h |  3 ++-\n drivers/net/bonding/rte_eth_bond_api.c |  6 ------\n drivers/net/bonding/rte_eth_bond_pmd.c | 14 +++++++-------\n 3 files changed, 9 insertions(+), 14 deletions(-)",
    "diff": "diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h\nindex c9b2d0fe4..af92a4c52 100644\n--- a/drivers/net/bonding/eth_bond_private.h\n+++ b/drivers/net/bonding/eth_bond_private.h\n@@ -50,6 +50,8 @@ extern const struct rte_flow_ops bond_flow_ops;\n /** Port Queue Mapping Structure */\n struct bond_rx_queue {\n \tuint16_t queue_id;\n+\t/**< Next active_slave to poll */\n+\tuint16_t active_slave;\n \t/**< Queue Id */\n \tstruct bond_dev_private *dev_private;\n \t/**< Reference to eth_dev private structure */\n@@ -132,7 +134,6 @@ struct bond_dev_private {\n \tuint16_t nb_rx_queues;\t\t\t/**< Total number of rx queues */\n \tuint16_t nb_tx_queues;\t\t\t/**< Total number of tx queues*/\n \n-\tuint16_t active_slave;\t\t/**< Next active_slave to poll */\n \tuint16_t active_slave_count;\t\t/**< Number of active slaves */\n \tuint16_t active_slaves[RTE_MAX_ETHPORTS];    /**< Active slave list */\n \ndiff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c\nindex 97c667e00..a4007fe07 100644\n--- a/drivers/net/bonding/rte_eth_bond_api.c\n+++ b/drivers/net/bonding/rte_eth_bond_api.c\n@@ -129,12 +129,6 @@ deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)\n \tRTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));\n \tinternals->active_slave_count = active_count;\n \n-\t/* Resetting active_slave when reaches to max\n-\t * no of slaves in active list\n-\t */\n-\tif (internals->active_slave >= active_count)\n-\t\tinternals->active_slave = 0;\n-\n \tif (eth_dev->data->dev_started) {\n \t\tif (internals->mode == BONDING_MODE_8023AD) {\n \t\t\tbond_mode_8023ad_start(eth_dev);\ndiff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c\nindex cdbd8151e..146d4dc4a 100644\n--- a/drivers/net/bonding/rte_eth_bond_pmd.c\n+++ b/drivers/net/bonding/rte_eth_bond_pmd.c\n@@ -69,7 +69,7 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \tstruct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;\n \tinternals = bd_rx_q->dev_private;\n \tslave_count = internals->active_slave_count;\n-\tactive_slave = internals->active_slave;\n+\tactive_slave = bd_rx_q->active_slave;\n \n \tfor (i = 0; i < slave_count && nb_pkts; i++) {\n \t\tuint16_t num_rx_slave;\n@@ -86,8 +86,8 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\t\tactive_slave = 0;\n \t}\n \n-\tif (++internals->active_slave >= slave_count)\n-\t\tinternals->active_slave = 0;\n+\tif (++bd_rx_q->active_slave >= slave_count)\n+\t\tbd_rx_q->active_slave = 0;\n \treturn num_rx_total;\n }\n \n@@ -303,9 +303,9 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,\n \tmemcpy(slaves, internals->active_slaves,\n \t\t\tsizeof(internals->active_slaves[0]) * slave_count);\n \n-\tidx = internals->active_slave;\n+\tidx = bd_rx_q->active_slave;\n \tif (idx >= slave_count) {\n-\t\tinternals->active_slave = 0;\n+\t\tbd_rx_q->active_slave = 0;\n \t\tidx = 0;\n \t}\n \tfor (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {\n@@ -367,8 +367,8 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,\n \t\t\tidx = 0;\n \t}\n \n-\tif (++internals->active_slave >= slave_count)\n-\t\tinternals->active_slave = 0;\n+\tif (++bd_rx_q->active_slave >= slave_count)\n+\t\tbd_rx_q->active_slave = 0;\n \n \treturn num_rx_total;\n }\n",
    "prefixes": [
        "1/2"
    ]
}