get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/66373/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 66373,
    "url": "http://patches.dpdk.org/api/patches/66373/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20200309065106.23800-5-mattias.ronnblom@ericsson.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200309065106.23800-5-mattias.ronnblom@ericsson.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200309065106.23800-5-mattias.ronnblom@ericsson.com",
    "date": "2020-03-09T06:51:02",
    "name": "[4/8] event/dsw: improve migration mechanism",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "ce0dc764a1b2af75a74e3b98a4b8069e745c7c41",
    "submitter": {
        "id": 1077,
        "url": "http://patches.dpdk.org/api/people/1077/?format=api",
        "name": "Mattias Rönnblom",
        "email": "mattias.ronnblom@ericsson.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20200309065106.23800-5-mattias.ronnblom@ericsson.com/mbox/",
    "series": [
        {
            "id": 8828,
            "url": "http://patches.dpdk.org/api/series/8828/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=8828",
            "date": "2020-03-09T06:51:04",
            "name": "DSW performance and statistics improvements",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/8828/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/66373/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/66373/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C4A3FA052E;\n\tMon,  9 Mar 2020 07:52:31 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 2A1FD1C028;\n\tMon,  9 Mar 2020 07:51:56 +0100 (CET)",
            "from EUR04-HE1-obe.outbound.protection.outlook.com\n (mail-eopbgr70058.outbound.protection.outlook.com [40.107.7.58])\n by dpdk.org (Postfix) with ESMTP id 4F29A1BFF3\n for <dev@dpdk.org>; Mon,  9 Mar 2020 07:51:46 +0100 (CET)",
            "from AM6PR05CA0022.eurprd05.prod.outlook.com (2603:10a6:20b:2e::35)\n by DB7PR07MB5879.eurprd07.prod.outlook.com (2603:10a6:10:58::14) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.2814.9; Mon, 9 Mar\n 2020 06:51:44 +0000",
            "from AM5EUR02FT046.eop-EUR02.prod.protection.outlook.com\n (2603:10a6:20b:2e:cafe::e8) by AM6PR05CA0022.outlook.office365.com\n (2603:10a6:20b:2e::35) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.2793.16 via Frontend\n Transport; Mon, 9 Mar 2020 06:51:44 +0000",
            "from oa.msg.ericsson.com (192.176.1.74) by\n AM5EUR02FT046.mail.protection.outlook.com (10.152.8.221) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256) id\n 15.20.2793.11 via Frontend Transport; Mon, 9 Mar 2020 06:51:44 +0000",
            "from ESESSMB503.ericsson.se (153.88.183.164) by\n ESESBMR506.ericsson.se (153.88.183.202) with Microsoft SMTP Server\n (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id\n 15.1.1713.5; Mon, 9 Mar 2020 07:51:43 +0100",
            "from selio1a020.lmera.ericsson.se (153.88.183.153) by\n smtp.internal.ericsson.com (153.88.183.191) with Microsoft SMTP Server id\n 15.1.1713.5 via Frontend Transport; Mon, 9 Mar 2020 07:51:43 +0100",
            "from breslau.lmera.ericsson.se (breslau.lmera.ericsson.se\n [150.132.109.241])\n by selio1a020.lmera.ericsson.se (8.15.1+Sun/8.15.1) with ESMTP id\n 0296pgIO024126; Mon, 9 Mar 2020 07:51:43 +0100 (CET)"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=XKrTL193v1O4Pd0Xy8ka7HH/rqR9Pd9cGHv+JS8XtuFVNUeu7fjz5hsp992VfZLlo2xKSfCdGeBqmAYEHM32LjfZab4SGjzCQHdfffc7IA0JuQRSC9F5e3NF6N6DHlY57bHRVPZiA3wr8zbQL9JtQWvQdWHtMUHEeNPE3pT6g8xYiuvbP4ecf74NfJtSr0JIafL2D+bFhlwwtO7QCaU+zBWheZcJztlPs5dZ5UaL3rs0X9v846MNyYBTHRCqfN947kNLMdHpHJPTe+rzypBUp+PbYnTl7D7TZLG0IJyNdRMYdwbXVisa0zpiMfe72kpH7omBZQGoTd2O0fdLqg8Baw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=ukOEtM3lDK2oodX6FT7Jmx10OuT/UuARfNtrhpazi8Q=;\n b=KgGJTNvfp1ymku/XTM1e0q1JP3rwsPRkRAq1/7g4fwrmj2UIab5ZYriAou6gGpdggTGfwkj5CUAuKs46OJ2qmyTkruuEuAPTpFwypWw0t1SWJxvkj6NnzDjMZP6d+a7oXEqRZSNlALuXhlrsqsx/yRmC60i2SUGVmHRSoZkZ+U8W5lQma3y++xGcZi42qRXxLux0HYyPq4ZJRPm6z9BpQd/ZZWBqa53BmQQ8iW9F1BxOC2VOkqlDWso+WSbxdfkctpXwMnIfxlBZDO7EC8BPoe1jtPWCR5ezLuQ5uT0bMJg+euPxaMclYTPfnu/lIq7DictElgP05IUDdyRmayE5wA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 192.176.1.74) smtp.rcpttodomain=arm.com smtp.mailfrom=ericsson.com;\n dmarc=pass (p=reject sp=none pct=100) action=none header.from=ericsson.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=ericsson.com;\n s=selector1;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=ukOEtM3lDK2oodX6FT7Jmx10OuT/UuARfNtrhpazi8Q=;\n b=Z7hVB2qPvpO0CIxr88660y7jGt6cYs57qGiruKBBKFLOPjRTxMYUI7wnrNPeSHtrZpMe0fe5bzpvfoqkHUkeyrLdS2rzo2C5GMRhQIFIecILj3wodCfb7klOv4pYj9W0StRPB8gVWHYZQLNlS4GEbgZ7tIgZAohwQC+BT/I2+Jw=",
        "Authentication-Results": "spf=pass (sender IP is 192.176.1.74)\n smtp.mailfrom=ericsson.com; arm.com; dkim=none (message not signed)\n header.d=none;arm.com; dmarc=pass action=none header.from=ericsson.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of ericsson.com designates\n 192.176.1.74 as permitted sender)\n receiver=protection.outlook.com;\n client-ip=192.176.1.74; helo=oa.msg.ericsson.com;",
        "From": "=?utf-8?q?Mattias_R=C3=B6nnblom?= <mattias.ronnblom@ericsson.com>",
        "To": "<jerinj@marvell.com>",
        "CC": "<dev@dpdk.org>, <stefan.sundkvist@ericsson.com>, <Ola.Liljedahl@arm.com>,\n\t=?utf-8?q?Mattias_R=C3=B6nnblom?= <mattias.ronnblom@ericsson.com>",
        "Date": "Mon, 9 Mar 2020 07:51:02 +0100",
        "Message-ID": "<20200309065106.23800-5-mattias.ronnblom@ericsson.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20200309065106.23800-1-mattias.ronnblom@ericsson.com>",
        "References": "<20200309065106.23800-1-mattias.ronnblom@ericsson.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"UTF-8\"",
        "Content-Transfer-Encoding": "8bit",
        "X-EOPAttributedMessage": "0",
        "X-Forefront-Antispam-Report": "CIP:192.176.1.74; IPV:; CTRY:SE; EFV:NLI;\n SFV:NSPM;\n SFS:(10009020)(4636009)(396003)(39860400002)(376002)(346002)(136003)(199004)(189003)(26005)(6666004)(356004)(107886003)(186003)(8936002)(336012)(8676002)(246002)(5660300002)(86362001)(6916009)(70586007)(70206006)(2906002)(4326008)(316002)(54906003)(7636002)(36756003)(66574012)(478600001)(1076003)(2616005)(30864003)(956004);\n DIR:OUT; SFP:1101; SCL:1; SRVR:DB7PR07MB5879; H:oa.msg.ericsson.com; FPR:;\n SPF:Pass; LANG:en; PTR:office365.se.ericsson.net; A:1; MX:1;",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "6be29bab-5ef8-4944-6798-08d7c3f654da",
        "X-MS-TrafficTypeDiagnostic": "DB7PR07MB5879:",
        "X-Microsoft-Antispam-PRVS": "\n <DB7PR07MB58795FBFAD724ED18F721BC8E1FE0@DB7PR07MB5879.eurprd07.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:10000;",
        "X-Forefront-PRVS": "0337AFFE9A",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 5L33wre5FAWZ2wKOSJxoOxZcZwvjhPNdOpLDLmZPTwWauH/ziy+BHojHgFZtIEYjCtdM8UYSR4MF2sgqTD0WiXnYs6gIF4YcLRjiYW2i9pesCvioUZOY9VK+Rzk1+LMkVMm2UEXLKDUXuDsc/8wLZ7Uj7D/F/1u2rGs0xaSEYPNSi5bpMZcXWzrFa/FS/E353Hi06wTYJrO+EUx7KELNqWxIKs74CFo9rx5qzbdtHetThYCF636noAnY7IWdSW/MZbZirfGawxDHceUxuYUg6Tz91KGeekakGZL3Qr8Ky4k6L/+w9FcE932kfoi1hLF2M2OaQJJdYbqd4iYx1fKXchvbNkPZDVT5v8Zw3ti1jP30YLvYnSGqMMzfW7lHft0UZ/FQthFN5rq+S5wVyHghfOvf02snc4NDQTd0GAp2C7Tp4O3xup9yM+9kgP/BaAqS",
        "X-OriginatorOrg": "ericsson.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "09 Mar 2020 06:51:44.8624 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 6be29bab-5ef8-4944-6798-08d7c3f654da",
        "X-MS-Exchange-CrossTenant-Id": "92e84ceb-fbfd-47ab-be52-080c6b87953f",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=92e84ceb-fbfd-47ab-be52-080c6b87953f; Ip=[192.176.1.74];\n Helo=[oa.msg.ericsson.com]",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DB7PR07MB5879",
        "Subject": "[dpdk-dev] [PATCH 4/8] event/dsw: improve migration mechanism",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Allowing moving multiple flows in one migration transaction, to\nrebalance load more quickly.\n\nIntroduce a threshold to avoid migrating flows between ports with very\nsimilar load.\n\nSimplify logic for selecting which flow to migrate. The aim is now to\nmove flows in such a way that the receiving port is as lightly-loaded\nas possible (after receiving the flow), while still migrating enough\nflows from the source port to reduce its load. This is essentially how\nlegacy strategy work as well, but the code is more readable.\n\nSigned-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>\n---\n drivers/event/dsw/dsw_evdev.h |  15 +-\n drivers/event/dsw/dsw_event.c | 541 +++++++++++++++++++++-------------\n 2 files changed, 343 insertions(+), 213 deletions(-)",
    "diff": "diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h\nindex 2c7f9efa3..ced40ef8d 100644\n--- a/drivers/event/dsw/dsw_evdev.h\n+++ b/drivers/event/dsw/dsw_evdev.h\n@@ -93,11 +93,14 @@\n #define DSW_MIGRATION_INTERVAL (1000)\n #define DSW_MIN_SOURCE_LOAD_FOR_MIGRATION (DSW_LOAD_FROM_PERCENT(70))\n #define DSW_MAX_TARGET_LOAD_FOR_MIGRATION (DSW_LOAD_FROM_PERCENT(95))\n+#define DSW_REBALANCE_THRESHOLD (DSW_LOAD_FROM_PERCENT(3))\n \n #define DSW_MAX_EVENTS_RECORDED (128)\n \n+#define DSW_MAX_FLOWS_PER_MIGRATION (8)\n+\n /* Only one outstanding migration per port is allowed */\n-#define DSW_MAX_PAUSED_FLOWS (DSW_MAX_PORTS)\n+#define DSW_MAX_PAUSED_FLOWS (DSW_MAX_PORTS*DSW_MAX_FLOWS_PER_MIGRATION)\n \n /* Enough room for paus request/confirm and unpaus request/confirm for\n  * all possible senders.\n@@ -170,8 +173,10 @@ struct dsw_port {\n \tuint64_t emigrations;\n \tuint64_t emigration_latency;\n \n-\tuint8_t emigration_target_port_id;\n-\tstruct dsw_queue_flow emigration_target_qf;\n+\tuint8_t emigration_target_port_ids[DSW_MAX_FLOWS_PER_MIGRATION];\n+\tstruct dsw_queue_flow\n+\t\temigration_target_qfs[DSW_MAX_FLOWS_PER_MIGRATION];\n+\tuint8_t emigration_targets_len;\n \tuint8_t cfm_cnt;\n \n \tuint64_t immigrations;\n@@ -244,8 +249,8 @@ struct dsw_evdev {\n struct dsw_ctl_msg {\n \tuint8_t type;\n \tuint8_t originating_port_id;\n-\tuint8_t queue_id;\n-\tuint16_t flow_hash;\n+\tuint8_t qfs_len;\n+\tstruct dsw_queue_flow qfs[DSW_MAX_FLOWS_PER_MIGRATION];\n } __rte_aligned(4);\n \n uint16_t dsw_event_enqueue(void *port, const struct rte_event *event);\ndiff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c\nindex 69cff7aa2..21c102275 100644\n--- a/drivers/event/dsw/dsw_event.c\n+++ b/drivers/event/dsw/dsw_event.c\n@@ -189,58 +189,75 @@ dsw_port_ctl_dequeue(struct dsw_port *port, struct dsw_ctl_msg *msg)\n \n static void\n dsw_port_ctl_broadcast(struct dsw_evdev *dsw, struct dsw_port *source_port,\n-\t\t       uint8_t type, uint8_t queue_id, uint16_t flow_hash)\n+\t\t       uint8_t type, struct dsw_queue_flow *qfs,\n+\t\t       uint8_t qfs_len)\n {\n \tuint16_t port_id;\n \tstruct dsw_ctl_msg msg = {\n \t\t.type = type,\n \t\t.originating_port_id = source_port->id,\n-\t\t.queue_id = queue_id,\n-\t\t.flow_hash = flow_hash\n+\t\t.qfs_len = qfs_len\n \t};\n \n+\tmemcpy(msg.qfs, qfs, sizeof(struct dsw_queue_flow) * qfs_len);\n+\n \tfor (port_id = 0; port_id < dsw->num_ports; port_id++)\n \t\tif (port_id != source_port->id)\n \t\t\tdsw_port_ctl_enqueue(&dsw->ports[port_id], &msg);\n }\n \n-static bool\n-dsw_port_is_flow_paused(struct dsw_port *port, uint8_t queue_id,\n-\t\t\tuint16_t flow_hash)\n+static __rte_always_inline bool\n+dsw_is_queue_flow_in_ary(const struct dsw_queue_flow *qfs, uint16_t qfs_len,\n+\t\t\t uint8_t queue_id, uint16_t flow_hash)\n {\n \tuint16_t i;\n \n-\tfor (i = 0; i < port->paused_flows_len; i++) {\n-\t\tstruct dsw_queue_flow *qf = &port->paused_flows[i];\n-\t\tif (qf->queue_id == queue_id &&\n-\t\t    qf->flow_hash == flow_hash)\n+\tfor (i = 0; i < qfs_len; i++)\n+\t\tif (qfs[i].queue_id == queue_id &&\n+\t\t    qfs[i].flow_hash == flow_hash)\n \t\t\treturn true;\n-\t}\n+\n \treturn false;\n }\n \n+static __rte_always_inline bool\n+dsw_port_is_flow_paused(struct dsw_port *port, uint8_t queue_id,\n+\t\t\tuint16_t flow_hash)\n+{\n+\treturn dsw_is_queue_flow_in_ary(port->paused_flows,\n+\t\t\t\t\tport->paused_flows_len,\n+\t\t\t\t\tqueue_id, flow_hash);\n+}\n+\n static void\n-dsw_port_add_paused_flow(struct dsw_port *port, uint8_t queue_id,\n-\t\t\t uint16_t paused_flow_hash)\n+dsw_port_add_paused_flows(struct dsw_port *port, struct dsw_queue_flow *qfs,\n+\t\t\t  uint8_t qfs_len)\n {\n-\tport->paused_flows[port->paused_flows_len] = (struct dsw_queue_flow) {\n-\t\t.queue_id = queue_id,\n-\t\t.flow_hash = paused_flow_hash\n+\tuint8_t i;\n+\n+\tfor (i = 0; i < qfs_len; i++) {\n+\t\tstruct dsw_queue_flow *qf = &qfs[i];\n+\n+\t\tDSW_LOG_DP_PORT(DEBUG, port->id,\n+\t\t\t\t\"Pausing queue_id %d flow_hash %d.\\n\",\n+\t\t\t\tqf->queue_id, qf->flow_hash);\n+\n+\t\tport->paused_flows[port->paused_flows_len] = *qf;\n+\t\tport->paused_flows_len++;\n \t};\n-\tport->paused_flows_len++;\n }\n \n static void\n-dsw_port_remove_paused_flow(struct dsw_port *port, uint8_t queue_id,\n-\t\t\t    uint16_t paused_flow_hash)\n+dsw_port_remove_paused_flow(struct dsw_port *port,\n+\t\t\t    struct dsw_queue_flow *target_qf)\n {\n \tuint16_t i;\n \n \tfor (i = 0; i < port->paused_flows_len; i++) {\n \t\tstruct dsw_queue_flow *qf = &port->paused_flows[i];\n \n-\t\tif (qf->queue_id == queue_id &&\n-\t\t    qf->flow_hash == paused_flow_hash) {\n+\t\tif (qf->queue_id == target_qf->queue_id &&\n+\t\t    qf->flow_hash == target_qf->flow_hash) {\n \t\t\tuint16_t last_idx = port->paused_flows_len-1;\n \t\t\tif (i != last_idx)\n \t\t\t\tport->paused_flows[i] =\n@@ -251,30 +268,37 @@ dsw_port_remove_paused_flow(struct dsw_port *port, uint8_t queue_id,\n \t}\n }\n \n+static void\n+dsw_port_remove_paused_flows(struct dsw_port *port,\n+\t\t\t     struct dsw_queue_flow *qfs, uint8_t qfs_len)\n+{\n+\tuint8_t i;\n+\n+\tfor (i = 0; i < qfs_len; i++)\n+\t\tdsw_port_remove_paused_flow(port, &qfs[i]);\n+\n+}\n+\n static void\n dsw_port_flush_out_buffers(struct dsw_evdev *dsw, struct dsw_port *source_port);\n \n static void\n-dsw_port_handle_pause_flow(struct dsw_evdev *dsw, struct dsw_port *port,\n-\t\t\t   uint8_t originating_port_id, uint8_t queue_id,\n-\t\t\t   uint16_t paused_flow_hash)\n+dsw_port_handle_pause_flows(struct dsw_evdev *dsw, struct dsw_port *port,\n+\t\t\t    uint8_t originating_port_id,\n+\t\t\t    struct dsw_queue_flow *paused_qfs,\n+\t\t\t    uint8_t qfs_len)\n {\n \tstruct dsw_ctl_msg cfm = {\n \t\t.type = DSW_CTL_CFM,\n-\t\t.originating_port_id = port->id,\n-\t\t.queue_id = queue_id,\n-\t\t.flow_hash = paused_flow_hash\n+\t\t.originating_port_id = port->id\n \t};\n \n-\tDSW_LOG_DP_PORT(DEBUG, port->id, \"Pausing queue_id %d flow_hash %d.\\n\",\n-\t\t\tqueue_id, paused_flow_hash);\n-\n \t/* There might be already-scheduled events belonging to the\n \t * paused flow in the output buffers.\n \t */\n \tdsw_port_flush_out_buffers(dsw, port);\n \n-\tdsw_port_add_paused_flow(port, queue_id, paused_flow_hash);\n+\tdsw_port_add_paused_flows(port, paused_qfs, qfs_len);\n \n \t/* Make sure any stores to the original port's in_ring is seen\n \t * before the ctl message.\n@@ -284,47 +308,11 @@ dsw_port_handle_pause_flow(struct dsw_evdev *dsw, struct dsw_port *port,\n \tdsw_port_ctl_enqueue(&dsw->ports[originating_port_id], &cfm);\n }\n \n-static void\n-dsw_find_lowest_load_port(uint8_t *port_ids, uint16_t num_port_ids,\n-\t\t\t  uint8_t exclude_port_id, int16_t *port_loads,\n-\t\t\t  uint8_t *target_port_id, int16_t *target_load)\n-{\n-\tint16_t candidate_port_id = -1;\n-\tint16_t candidate_load = DSW_MAX_LOAD;\n-\tuint16_t i;\n-\n-\tfor (i = 0; i < num_port_ids; i++) {\n-\t\tuint8_t port_id = port_ids[i];\n-\t\tif (port_id != exclude_port_id) {\n-\t\t\tint16_t load = port_loads[port_id];\n-\t\t\tif (candidate_port_id == -1 ||\n-\t\t\t    load < candidate_load) {\n-\t\t\t\tcandidate_port_id = port_id;\n-\t\t\t\tcandidate_load = load;\n-\t\t\t}\n-\t\t}\n-\t}\n-\t*target_port_id = candidate_port_id;\n-\t*target_load = candidate_load;\n-}\n-\n struct dsw_queue_flow_burst {\n \tstruct dsw_queue_flow queue_flow;\n \tuint16_t count;\n };\n \n-static inline int\n-dsw_cmp_burst(const void *v_burst_a, const void *v_burst_b)\n-{\n-\tconst struct dsw_queue_flow_burst *burst_a = v_burst_a;\n-\tconst struct dsw_queue_flow_burst *burst_b = v_burst_b;\n-\n-\tint a_count = burst_a->count;\n-\tint b_count = burst_b->count;\n-\n-\treturn a_count - b_count;\n-}\n-\n #define DSW_QF_TO_INT(_qf)\t\t\t\t\t\\\n \t((int)((((_qf)->queue_id)<<16)|((_qf)->flow_hash)))\n \n@@ -363,8 +351,6 @@ dsw_sort_qfs_to_bursts(struct dsw_queue_flow *qfs, uint16_t qfs_len,\n \t\tcurrent_burst->count++;\n \t}\n \n-\tqsort(bursts, num_bursts, sizeof(bursts[0]), dsw_cmp_burst);\n-\n \treturn num_bursts;\n }\n \n@@ -384,44 +370,158 @@ dsw_retrieve_port_loads(struct dsw_evdev *dsw, int16_t *port_loads,\n \treturn below_limit;\n }\n \n+static int16_t\n+dsw_flow_load(uint16_t num_events, int16_t port_load)\n+{\n+\treturn ((int32_t)port_load * (int32_t)num_events) /\n+\t\tDSW_MAX_EVENTS_RECORDED;\n+}\n+\n+static int16_t\n+dsw_evaluate_migration(int16_t source_load, int16_t target_load,\n+\t\t       int16_t flow_load)\n+{\n+\tint32_t res_target_load;\n+\tint32_t imbalance;\n+\n+\tif (target_load > DSW_MAX_TARGET_LOAD_FOR_MIGRATION)\n+\t\treturn -1;\n+\n+\timbalance = source_load - target_load;\n+\n+\tif (imbalance < DSW_REBALANCE_THRESHOLD)\n+\t\treturn -1;\n+\n+\tres_target_load = target_load + flow_load;\n+\n+\t/* If the estimated load of the target port will be higher\n+\t * than the source port's load, it doesn't make sense to move\n+\t * the flow.\n+\t */\n+\tif (res_target_load > source_load)\n+\t\treturn -1;\n+\n+\t/* The more idle the target will be, the better. This will\n+\t * make migration prefer moving smaller flows, and flows to\n+\t * lightly loaded ports.\n+\t */\n+\treturn DSW_MAX_LOAD - res_target_load;\n+}\n+\n+static bool\n+dsw_is_serving_port(struct dsw_evdev *dsw, uint8_t port_id, uint8_t queue_id)\n+{\n+\tstruct dsw_queue *queue = &dsw->queues[queue_id];\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < queue->num_serving_ports; i++)\n+\t\tif (queue->serving_ports[i] == port_id)\n+\t\t\treturn true;\n+\n+\treturn false;\n+}\n+\n static bool\n dsw_select_emigration_target(struct dsw_evdev *dsw,\n-\t\t\t     struct dsw_port *source_port,\n-\t\t\t     struct dsw_queue_flow_burst *bursts,\n-\t\t\t     uint16_t num_bursts, int16_t *port_loads,\n-\t\t\t     int16_t max_load, struct dsw_queue_flow *target_qf,\n-\t\t\t     uint8_t *target_port_id)\n+\t\t\t    struct dsw_queue_flow_burst *bursts,\n+\t\t\t    uint16_t num_bursts, uint8_t source_port_id,\n+\t\t\t    int16_t *port_loads, uint16_t num_ports,\n+\t\t\t    uint8_t *target_port_ids,\n+\t\t\t    struct dsw_queue_flow *target_qfs,\n+\t\t\t    uint8_t *targets_len)\n {\n-\tuint16_t source_load = port_loads[source_port->id];\n+\tint16_t source_port_load = port_loads[source_port_id];\n+\tstruct dsw_queue_flow *candidate_qf;\n+\tuint8_t candidate_port_id;\n+\tint16_t candidate_weight = -1;\n+\tint16_t candidate_flow_load;\n \tuint16_t i;\n \n+\tif (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION)\n+\t\treturn false;\n+\n \tfor (i = 0; i < num_bursts; i++) {\n-\t\tstruct dsw_queue_flow *qf = &bursts[i].queue_flow;\n+\t\tstruct dsw_queue_flow_burst *burst = &bursts[i];\n+\t\tstruct dsw_queue_flow *qf = &burst->queue_flow;\n+\t\tint16_t flow_load;\n+\t\tuint16_t port_id;\n \n-\t\tif (dsw_port_is_flow_paused(source_port, qf->queue_id,\n-\t\t\t\t\t    qf->flow_hash))\n+\t\tif (dsw_is_queue_flow_in_ary(target_qfs, *targets_len,\n+\t\t\t\t\t     qf->queue_id, qf->flow_hash))\n \t\t\tcontinue;\n \n-\t\tstruct dsw_queue *queue = &dsw->queues[qf->queue_id];\n-\t\tint16_t target_load;\n+\t\tflow_load = dsw_flow_load(burst->count, source_port_load);\n \n-\t\tdsw_find_lowest_load_port(queue->serving_ports,\n-\t\t\t\t\t  queue->num_serving_ports,\n-\t\t\t\t\t  source_port->id, port_loads,\n-\t\t\t\t\t  target_port_id, &target_load);\n+\t\tfor (port_id = 0; port_id < num_ports; port_id++) {\n+\t\t\tint16_t weight;\n \n-\t\tif (target_load < source_load &&\n-\t\t    target_load < max_load) {\n-\t\t\t*target_qf = *qf;\n-\t\t\treturn true;\n+\t\t\tif (port_id == source_port_id)\n+\t\t\t\tcontinue;\n+\n+\t\t\tif (!dsw_is_serving_port(dsw, port_id, qf->queue_id))\n+\t\t\t\tcontinue;\n+\n+\t\t\tweight = dsw_evaluate_migration(source_port_load,\n+\t\t\t\t\t\t\tport_loads[port_id],\n+\t\t\t\t\t\t\tflow_load);\n+\n+\t\t\tif (weight > candidate_weight) {\n+\t\t\t\tcandidate_qf = qf;\n+\t\t\t\tcandidate_port_id = port_id;\n+\t\t\t\tcandidate_weight = weight;\n+\t\t\t\tcandidate_flow_load = flow_load;\n+\t\t\t}\n \t\t}\n \t}\n \n-\tDSW_LOG_DP_PORT(DEBUG, source_port->id, \"For the %d flows considered, \"\n-\t\t\t\"no target port found with load less than %d.\\n\",\n-\t\t\tnum_bursts, DSW_LOAD_TO_PERCENT(max_load));\n+\tif (candidate_weight < 0)\n+\t\treturn false;\n \n-\treturn false;\n+\tDSW_LOG_DP_PORT(DEBUG, source_port_id, \"Selected queue_id %d \"\n+\t\t\t\"flow_hash %d (with flow load %d) for migration \"\n+\t\t\t\"to port %d.\\n\", candidate_qf->queue_id,\n+\t\t\tcandidate_qf->flow_hash,\n+\t\t\tDSW_LOAD_TO_PERCENT(candidate_flow_load),\n+\t\t\tcandidate_port_id);\n+\n+\tport_loads[candidate_port_id] += candidate_flow_load;\n+\tport_loads[source_port_id] -= candidate_flow_load;\n+\n+\ttarget_port_ids[*targets_len] = candidate_port_id;\n+\ttarget_qfs[*targets_len] = *candidate_qf;\n+\t(*targets_len)++;\n+\n+\treturn true;\n+}\n+\n+static void\n+dsw_select_emigration_targets(struct dsw_evdev *dsw,\n+\t\t\t      struct dsw_port *source_port,\n+\t\t\t      struct dsw_queue_flow_burst *bursts,\n+\t\t\t      uint16_t num_bursts, int16_t *port_loads)\n+{\n+\tstruct dsw_queue_flow *target_qfs = source_port->emigration_target_qfs;\n+\tuint8_t *target_port_ids = source_port->emigration_target_port_ids;\n+\tuint8_t *targets_len = &source_port->emigration_targets_len;\n+\tuint8_t i;\n+\n+\tfor (i = 0; i < DSW_MAX_FLOWS_PER_MIGRATION; i++) {\n+\t\tbool found;\n+\n+\t\tfound = dsw_select_emigration_target(dsw, bursts, num_bursts,\n+\t\t\t\t\t\t     source_port->id,\n+\t\t\t\t\t\t     port_loads, dsw->num_ports,\n+\t\t\t\t\t\t     target_port_ids,\n+\t\t\t\t\t\t     target_qfs,\n+\t\t\t\t\t\t     targets_len);\n+\t\tif (!found)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (*targets_len == 0)\n+\t\tDSW_LOG_DP_PORT(DEBUG, source_port->id,\n+\t\t\t\t\"For the %d flows considered, no target port \"\n+\t\t\t\t\"was found.\\n\", num_bursts);\n }\n \n static uint8_t\n@@ -562,7 +662,7 @@ dsw_port_buffer_event(struct dsw_evdev *dsw, struct dsw_port *source_port,\n static void\n dsw_port_flush_paused_events(struct dsw_evdev *dsw,\n \t\t\t     struct dsw_port *source_port,\n-\t\t\t     uint8_t queue_id, uint16_t paused_flow_hash)\n+\t\t\t     const struct dsw_queue_flow *qf)\n {\n \tuint16_t paused_events_len = source_port->paused_events_len;\n \tstruct rte_event paused_events[paused_events_len];\n@@ -572,7 +672,7 @@ dsw_port_flush_paused_events(struct dsw_evdev *dsw,\n \tif (paused_events_len == 0)\n \t\treturn;\n \n-\tif (dsw_port_is_flow_paused(source_port, queue_id, paused_flow_hash))\n+\tif (dsw_port_is_flow_paused(source_port, qf->queue_id, qf->flow_hash))\n \t\treturn;\n \n \trte_memcpy(paused_events, source_port->paused_events,\n@@ -580,7 +680,7 @@ dsw_port_flush_paused_events(struct dsw_evdev *dsw,\n \n \tsource_port->paused_events_len = 0;\n \n-\tdest_port_id = dsw_schedule(dsw, queue_id, paused_flow_hash);\n+\tdest_port_id = dsw_schedule(dsw, qf->queue_id, qf->flow_hash);\n \n \tfor (i = 0; i < paused_events_len; i++) {\n \t\tstruct rte_event *event = &paused_events[i];\n@@ -588,8 +688,8 @@ dsw_port_flush_paused_events(struct dsw_evdev *dsw,\n \n \t\tflow_hash = dsw_flow_id_hash(event->flow_id);\n \n-\t\tif (event->queue_id == queue_id &&\n-\t\t    flow_hash == paused_flow_hash)\n+\t\tif (event->queue_id == qf->queue_id &&\n+\t\t    flow_hash == qf->flow_hash)\n \t\t\tdsw_port_buffer_non_paused(dsw, source_port,\n \t\t\t\t\t\t   dest_port_id, event);\n \t\telse\n@@ -598,33 +698,94 @@ dsw_port_flush_paused_events(struct dsw_evdev *dsw,\n }\n \n static void\n-dsw_port_emigration_stats(struct dsw_port *port)\n+dsw_port_emigration_stats(struct dsw_port *port, uint8_t finished)\n {\n-\tuint64_t emigration_latency;\n+\tuint64_t flow_migration_latency;\n \n-\temigration_latency = (rte_get_timer_cycles() - port->emigration_start);\n-\tport->emigration_latency += emigration_latency;\n-\tport->emigrations++;\n+\tflow_migration_latency =\n+\t\t(rte_get_timer_cycles() - port->emigration_start);\n+\tport->emigration_latency += (flow_migration_latency * finished);\n+\tport->emigrations += finished;\n }\n \n static void\n-dsw_port_end_emigration(struct dsw_evdev *dsw, struct dsw_port *port)\n+dsw_port_end_emigration(struct dsw_evdev *dsw, struct dsw_port *port,\n+\t\t\tuint8_t schedule_type)\n {\n-\tuint8_t queue_id = port->emigration_target_qf.queue_id;\n-\tuint16_t flow_hash = port->emigration_target_qf.flow_hash;\n+\tuint8_t i;\n+\tstruct dsw_queue_flow left_qfs[DSW_MAX_FLOWS_PER_MIGRATION];\n+\tuint8_t left_port_ids[DSW_MAX_FLOWS_PER_MIGRATION];\n+\tuint8_t left_qfs_len = 0;\n+\tuint8_t finished;\n+\n+\tfor (i = 0; i < port->emigration_targets_len; i++) {\n+\t\tstruct dsw_queue_flow *qf = &port->emigration_target_qfs[i];\n+\t\tuint8_t queue_id = qf->queue_id;\n+\t\tuint8_t queue_schedule_type =\n+\t\t\tdsw->queues[queue_id].schedule_type;\n+\t\tuint16_t flow_hash = qf->flow_hash;\n+\n+\t\tif (queue_schedule_type != schedule_type) {\n+\t\t\tleft_port_ids[left_qfs_len] =\n+\t\t\t\tport->emigration_target_port_ids[i];\n+\t\t\tleft_qfs[left_qfs_len] = *qf;\n+\t\t\tleft_qfs_len++;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tDSW_LOG_DP_PORT(DEBUG, port->id, \"Migration completed for \"\n+\t\t\t\t\"queue_id %d flow_hash %d.\\n\", queue_id,\n+\t\t\t\tflow_hash);\n+\n+\t\tif (queue_schedule_type == RTE_SCHED_TYPE_ATOMIC) {\n+\t\t\tdsw_port_remove_paused_flow(port, qf);\n+\t\t\tdsw_port_flush_paused_events(dsw, port, qf);\n+\t\t}\n+\t}\n \n-\tport->migration_state = DSW_MIGRATION_STATE_IDLE;\n-\tport->seen_events_len = 0;\n+\tfinished = port->emigration_targets_len - left_qfs_len;\n \n-\tdsw_port_emigration_stats(port);\n+\tif (finished > 0)\n+\t\tdsw_port_emigration_stats(port, finished);\n \n-\tif (dsw->queues[queue_id].schedule_type != RTE_SCHED_TYPE_PARALLEL) {\n-\t\tdsw_port_remove_paused_flow(port, queue_id, flow_hash);\n-\t\tdsw_port_flush_paused_events(dsw, port, queue_id, flow_hash);\n+\tfor (i = 0; i < left_qfs_len; i++) {\n+\t\tport->emigration_target_port_ids[i] = left_port_ids[i];\n+\t\tport->emigration_target_qfs[i] = left_qfs[i];\n \t}\n+\tport->emigration_targets_len = left_qfs_len;\n \n-\tDSW_LOG_DP_PORT(DEBUG, port->id, \"Emigration completed for queue_id \"\n-\t\t\t\"%d flow_hash %d.\\n\", queue_id, flow_hash);\n+\tif (port->emigration_targets_len == 0) {\n+\t\tport->migration_state = DSW_MIGRATION_STATE_IDLE;\n+\t\tport->seen_events_len = 0;\n+\t}\n+}\n+\n+static void\n+dsw_port_move_parallel_flows(struct dsw_evdev *dsw,\n+\t\t\t     struct dsw_port *source_port)\n+{\n+\tuint8_t i;\n+\n+\tfor (i = 0; i < source_port->emigration_targets_len; i++) {\n+\t\tstruct dsw_queue_flow *qf =\n+\t\t\t&source_port->emigration_target_qfs[i];\n+\t\tuint8_t queue_id = qf->queue_id;\n+\n+\t\tif (dsw->queues[queue_id].schedule_type ==\n+\t\t    RTE_SCHED_TYPE_PARALLEL) {\n+\t\t\tuint8_t dest_port_id =\n+\t\t\t\tsource_port->emigration_target_port_ids[i];\n+\t\t\tuint16_t flow_hash = qf->flow_hash;\n+\n+\t\t\t/* Single byte-sized stores are always atomic. */\n+\t\t\tdsw->queues[queue_id].flow_to_port_map[flow_hash] =\n+\t\t\t\tdest_port_id;\n+\t\t}\n+\t}\n+\n+\trte_smp_wmb();\n+\n+\tdsw_port_end_emigration(dsw, source_port, RTE_SCHED_TYPE_PARALLEL);\n }\n \n static void\n@@ -678,9 +839,9 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,\n \tsource_port_load = rte_atomic16_read(&source_port->load);\n \tif (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {\n \t\tDSW_LOG_DP_PORT(DEBUG, source_port->id,\n-\t\t\t\t\"Load %d is below threshold level %d.\\n\",\n-\t\t\t\tDSW_LOAD_TO_PERCENT(source_port_load),\n-\t\t       DSW_LOAD_TO_PERCENT(DSW_MIN_SOURCE_LOAD_FOR_MIGRATION));\n+\t\t      \"Load %d is below threshold level %d.\\n\",\n+\t\t      DSW_LOAD_TO_PERCENT(source_port_load),\n+\t\t      DSW_LOAD_TO_PERCENT(DSW_MIN_SOURCE_LOAD_FOR_MIGRATION));\n \t\treturn;\n \t}\n \n@@ -697,16 +858,9 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,\n \t\treturn;\n \t}\n \n-\t/* Sort flows into 'bursts' to allow attempting to migrating\n-\t * small (but still active) flows first - this it to avoid\n-\t * having large flows moving around the worker cores too much\n-\t * (to avoid cache misses, among other things). Of course, the\n-\t * number of recorded events (queue+flow ids) are limited, and\n-\t * provides only a snapshot, so only so many conclusions can\n-\t * be drawn from this data.\n-\t */\n \tnum_bursts = dsw_sort_qfs_to_bursts(seen_events, seen_events_len,\n \t\t\t\t\t    bursts);\n+\n \t/* For non-big-little systems, there's no point in moving the\n \t * only (known) flow.\n \t */\n@@ -718,33 +872,11 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,\n \t\treturn;\n \t}\n \n-\t/* The strategy is to first try to find a flow to move to a\n-\t * port with low load (below the emigration-attempt\n-\t * threshold). If that fails, we try to find a port which is\n-\t * below the max threshold, and also less loaded than this\n-\t * port is.\n-\t */\n-\tif (!dsw_select_emigration_target(dsw, source_port, bursts, num_bursts,\n-\t\t\t\t      port_loads,\n-\t\t\t\t      DSW_MIN_SOURCE_LOAD_FOR_MIGRATION,\n-\t\t\t\t      &source_port->emigration_target_qf,\n-\t\t\t\t      &source_port->emigration_target_port_id)\n-\t    &&\n-\t    !dsw_select_emigration_target(dsw, source_port, bursts, num_bursts,\n-\t\t\t\t      port_loads,\n-\t\t\t\t      DSW_MAX_TARGET_LOAD_FOR_MIGRATION,\n-\t\t\t\t      &source_port->emigration_target_qf,\n-\t\t\t\t      &source_port->emigration_target_port_id))\n-\t\treturn;\n-\n-\tDSW_LOG_DP_PORT(DEBUG, source_port->id, \"Migrating queue_id %d \"\n-\t\t\t\"flow_hash %d from port %d to port %d.\\n\",\n-\t\t\tsource_port->emigration_target_qf.queue_id,\n-\t\t\tsource_port->emigration_target_qf.flow_hash,\n-\t\t\tsource_port->id,\n-\t\t\tsource_port->emigration_target_port_id);\n+\tdsw_select_emigration_targets(dsw, source_port, bursts, num_bursts,\n+\t\t\t\t      port_loads);\n \n-\t/* We have a winner. */\n+\tif (source_port->emigration_targets_len == 0)\n+\t\treturn;\n \n \tsource_port->migration_state = DSW_MIGRATION_STATE_PAUSING;\n \tsource_port->emigration_start = rte_get_timer_cycles();\n@@ -753,71 +885,58 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,\n \t * parallel queues, since atomic/ordered semantics need not to\n \t * be maintained.\n \t */\n+\tdsw_port_move_parallel_flows(dsw, source_port);\n \n-\tif (dsw->queues[source_port->emigration_target_qf.queue_id].\n-\t    schedule_type == RTE_SCHED_TYPE_PARALLEL) {\n-\t\tuint8_t queue_id =\n-\t\t\tsource_port->emigration_target_qf.queue_id;\n-\t\tuint16_t flow_hash =\n-\t\t\tsource_port->emigration_target_qf.flow_hash;\n-\t\tuint8_t dest_port_id =\n-\t\t\tsource_port->emigration_target_port_id;\n-\n-\t\t/* Single byte-sized stores are always atomic. */\n-\t\tdsw->queues[queue_id].flow_to_port_map[flow_hash] =\n-\t\t\tdest_port_id;\n-\t\trte_smp_wmb();\n-\n-\t\tdsw_port_end_emigration(dsw, source_port);\n-\n+\t/* All flows were on PARALLEL queues. */\n+\tif (source_port->migration_state == DSW_MIGRATION_STATE_IDLE)\n \t\treturn;\n-\t}\n \n \t/* There might be 'loopback' events already scheduled in the\n \t * output buffers.\n \t */\n \tdsw_port_flush_out_buffers(dsw, source_port);\n \n-\tdsw_port_add_paused_flow(source_port,\n-\t\t\t\t source_port->emigration_target_qf.queue_id,\n-\t\t\t\t source_port->emigration_target_qf.flow_hash);\n+\tdsw_port_add_paused_flows(source_port,\n+\t\t\t\t  source_port->emigration_target_qfs,\n+\t\t\t\t  source_port->emigration_targets_len);\n \n \tdsw_port_ctl_broadcast(dsw, source_port, DSW_CTL_PAUS_REQ,\n-\t\t\t       source_port->emigration_target_qf.queue_id,\n-\t\t\t       source_port->emigration_target_qf.flow_hash);\n+\t\t\t       source_port->emigration_target_qfs,\n+\t\t\t       source_port->emigration_targets_len);\n \tsource_port->cfm_cnt = 0;\n }\n \n static void\n dsw_port_flush_paused_events(struct dsw_evdev *dsw,\n \t\t\t     struct dsw_port *source_port,\n-\t\t\t     uint8_t queue_id, uint16_t paused_flow_hash);\n+\t\t\t     const struct dsw_queue_flow *qf);\n \n static void\n-dsw_port_handle_unpause_flow(struct dsw_evdev *dsw, struct dsw_port *port,\n-\t\t\t     uint8_t originating_port_id, uint8_t queue_id,\n-\t\t\t     uint16_t paused_flow_hash)\n+dsw_port_handle_unpause_flows(struct dsw_evdev *dsw, struct dsw_port *port,\n+\t\t\t      uint8_t originating_port_id,\n+\t\t\t      struct dsw_queue_flow *paused_qfs,\n+\t\t\t      uint8_t qfs_len)\n {\n+\tuint16_t i;\n \tstruct dsw_ctl_msg cfm = {\n \t\t.type = DSW_CTL_CFM,\n-\t\t.originating_port_id = port->id,\n-\t\t.queue_id = queue_id,\n-\t\t.flow_hash = paused_flow_hash\n+\t\t.originating_port_id = port->id\n \t};\n \n-\tDSW_LOG_DP_PORT(DEBUG, port->id, \"Un-pausing queue_id %d flow_hash %d.\\n\",\n-\t\t\tqueue_id, paused_flow_hash);\n-\n-\tdsw_port_remove_paused_flow(port, queue_id, paused_flow_hash);\n+\tdsw_port_remove_paused_flows(port, paused_qfs, qfs_len);\n \n \trte_smp_rmb();\n \n-\tif (dsw_schedule(dsw, queue_id, paused_flow_hash) == port->id)\n-\t\tport->immigrations++;\n-\n \tdsw_port_ctl_enqueue(&dsw->ports[originating_port_id], &cfm);\n \n-\tdsw_port_flush_paused_events(dsw, port, queue_id, paused_flow_hash);\n+\tfor (i = 0; i < qfs_len; i++) {\n+\t\tstruct dsw_queue_flow *qf = &paused_qfs[i];\n+\n+\t\tif (dsw_schedule(dsw, qf->queue_id, qf->flow_hash) == port->id)\n+\t\t\tport->immigrations++;\n+\n+\t\tdsw_port_flush_paused_events(dsw, port, qf);\n+\t}\n }\n \n #define FORWARD_BURST_SIZE (32)\n@@ -872,31 +991,37 @@ dsw_port_forward_emigrated_flow(struct dsw_port *source_port,\n }\n \n static void\n-dsw_port_move_migrating_flow(struct dsw_evdev *dsw,\n-\t\t\t     struct dsw_port *source_port)\n+dsw_port_move_emigrating_flows(struct dsw_evdev *dsw,\n+\t\t\t       struct dsw_port *source_port)\n {\n-\tuint8_t queue_id = source_port->emigration_target_qf.queue_id;\n-\tuint16_t flow_hash = source_port->emigration_target_qf.flow_hash;\n-\tuint8_t dest_port_id = source_port->emigration_target_port_id;\n-\tstruct dsw_port *dest_port = &dsw->ports[dest_port_id];\n+\tuint8_t i;\n \n \tdsw_port_flush_out_buffers(dsw, source_port);\n \n \trte_smp_wmb();\n \n-\tdsw->queues[queue_id].flow_to_port_map[flow_hash] =\n-\t\tdest_port_id;\n+\tfor (i = 0; i < source_port->emigration_targets_len; i++) {\n+\t\tstruct dsw_queue_flow *qf =\n+\t\t\t&source_port->emigration_target_qfs[i];\n+\t\tuint8_t dest_port_id =\n+\t\t\tsource_port->emigration_target_port_ids[i];\n+\t\tstruct dsw_port *dest_port = &dsw->ports[dest_port_id];\n+\n+\t\tdsw->queues[qf->queue_id].flow_to_port_map[qf->flow_hash] =\n+\t\t\tdest_port_id;\n \n-\tdsw_port_forward_emigrated_flow(source_port, dest_port->in_ring,\n-\t\t\t\t\tqueue_id, flow_hash);\n+\t\tdsw_port_forward_emigrated_flow(source_port, dest_port->in_ring,\n+\t\t\t\t\t\tqf->queue_id, qf->flow_hash);\n+\t}\n \n \t/* Flow table update and migration destination port's enqueues\n \t * must be seen before the control message.\n \t */\n \trte_smp_wmb();\n \n-\tdsw_port_ctl_broadcast(dsw, source_port, DSW_CTL_UNPAUS_REQ, queue_id,\n-\t\t\t       flow_hash);\n+\tdsw_port_ctl_broadcast(dsw, source_port, DSW_CTL_UNPAUS_REQ,\n+\t\t\t       source_port->emigration_target_qfs,\n+\t\t\t       source_port->emigration_targets_len);\n \tsource_port->cfm_cnt = 0;\n \tsource_port->migration_state = DSW_MIGRATION_STATE_UNPAUSING;\n }\n@@ -914,7 +1039,8 @@ dsw_port_handle_confirm(struct dsw_evdev *dsw, struct dsw_port *port)\n \t\t\tport->migration_state = DSW_MIGRATION_STATE_FORWARDING;\n \t\t\tbreak;\n \t\tcase DSW_MIGRATION_STATE_UNPAUSING:\n-\t\t\tdsw_port_end_emigration(dsw, port);\n+\t\t\tdsw_port_end_emigration(dsw, port,\n+\t\t\t\t\t\tRTE_SCHED_TYPE_ATOMIC);\n \t\t\tbreak;\n \t\tdefault:\n \t\t\tRTE_ASSERT(0);\n@@ -936,15 +1062,14 @@ dsw_port_ctl_process(struct dsw_evdev *dsw, struct dsw_port *port)\n \tif (dsw_port_ctl_dequeue(port, &msg) == 0) {\n \t\tswitch (msg.type) {\n \t\tcase DSW_CTL_PAUS_REQ:\n-\t\t\tdsw_port_handle_pause_flow(dsw, port,\n-\t\t\t\t\t\t   msg.originating_port_id,\n-\t\t\t\t\t\t   msg.queue_id, msg.flow_hash);\n+\t\t\tdsw_port_handle_pause_flows(dsw, port,\n+\t\t\t\t\t\t    msg.originating_port_id,\n+\t\t\t\t\t\t    msg.qfs, msg.qfs_len);\n \t\t\tbreak;\n \t\tcase DSW_CTL_UNPAUS_REQ:\n-\t\t\tdsw_port_handle_unpause_flow(dsw, port,\n-\t\t\t\t\t\t     msg.originating_port_id,\n-\t\t\t\t\t\t     msg.queue_id,\n-\t\t\t\t\t\t     msg.flow_hash);\n+\t\t\tdsw_port_handle_unpause_flows(dsw, port,\n+\t\t\t\t\t\t      msg.originating_port_id,\n+\t\t\t\t\t\t      msg.qfs, msg.qfs_len);\n \t\t\tbreak;\n \t\tcase DSW_CTL_CFM:\n \t\t\tdsw_port_handle_confirm(dsw, port);\n@@ -967,7 +1092,7 @@ dsw_port_bg_process(struct dsw_evdev *dsw, struct dsw_port *port)\n {\n \tif (unlikely(port->migration_state == DSW_MIGRATION_STATE_FORWARDING &&\n \t\t     port->pending_releases == 0))\n-\t\tdsw_port_move_migrating_flow(dsw, port);\n+\t\tdsw_port_move_emigrating_flows(dsw, port);\n \n \t/* Polling the control ring is relatively inexpensive, and\n \t * polling it often helps bringing down migration latency, so\n",
    "prefixes": [
        "4/8"
    ]
}