get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/96805/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 96805,
    "url": "http://patches.dpdk.org/api/patches/96805/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210811140418.393264-4-xuemingl@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210811140418.393264-4-xuemingl@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210811140418.393264-4-xuemingl@nvidia.com",
    "date": "2021-08-11T14:04:06",
    "name": "[v2,04/15] app/testpmd: make sure shared Rx queue polled on same core",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "3837120a673929ac8f9e2098fdfb2a82a061d349",
    "submitter": {
        "id": 1904,
        "url": "http://patches.dpdk.org/api/people/1904/?format=api",
        "name": "Xueming Li",
        "email": "xuemingl@nvidia.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210811140418.393264-4-xuemingl@nvidia.com/mbox/",
    "series": [
        {
            "id": 18256,
            "url": "http://patches.dpdk.org/api/series/18256/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=18256",
            "date": "2021-08-11T14:04:03",
            "name": "[v2,01/15] ethdev: introduce shared Rx queue",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/18256/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/96805/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/96805/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 415C6A0C45;\n\tWed, 11 Aug 2021 16:05:16 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 472DF41200;\n\tWed, 11 Aug 2021 16:04:56 +0200 (CEST)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2069.outbound.protection.outlook.com [40.107.243.69])\n by mails.dpdk.org (Postfix) with ESMTP id 917BD411F6\n for <dev@dpdk.org>; Wed, 11 Aug 2021 16:04:53 +0200 (CEST)",
            "from DM5PR19CA0021.namprd19.prod.outlook.com (2603:10b6:3:151::31)\n by DM6PR12MB3097.namprd12.prod.outlook.com (2603:10b6:5:11d::25) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4415.13; Wed, 11 Aug\n 2021 14:04:51 +0000",
            "from DM6NAM11FT064.eop-nam11.prod.protection.outlook.com\n (2603:10b6:3:151:cafe::9c) by DM5PR19CA0021.outlook.office365.com\n (2603:10b6:3:151::31) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4415.13 via Frontend\n Transport; Wed, 11 Aug 2021 14:04:51 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n DM6NAM11FT064.mail.protection.outlook.com (10.13.172.234) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4415.14 via Frontend Transport; Wed, 11 Aug 2021 14:04:51 +0000",
            "from nvidia.com (172.20.187.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Wed, 11 Aug\n 2021 14:04:49 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=nqSs/raatScvoD1m67cPjwCfD+GsREyC5rccj+MtD24nT9l3F8yQkaZqTPzqIp/KBt8PobmFEvz0UgWWPIZP57XODzmTJDfPr4zo8NeHMwcKvNGRiF5r0qd6EvrN1VJ/Ya8dPCqlKEFx6rLgUlocy8RG1S33YEGVbqqIE0vup7Elj2gW53BskOxbEHBM47Jj0/1UiLT9hFi0piWSBY3PLzDKTB1zOKO9Ax0a73iUZcRFDpLRdGfAwcPBnkHSbFfA4YOsL6BkB2bFmjKm/RENZxd6L8B1vH/bfDaYGwFJ4XQHpK8IqoJENLxGLyvCSQNLfPlX8PU34MHDcCo8R4qpkg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=TEJ1FRyP2sgkb45a7Zov0JpWCctTP4VtcuUPXg9k5nE=;\n b=IRN7sBV+lCudnbdbAywYk3weHO0MNnUYBAs0aTSkAyI9lFymkoKVLbhE6L1zeAGFFw94QTD5qn67f+GxlVC+WLUOwf6G1cXhaDUS0ozhFaC6DswC/CYit7QsyuiqhBxcrr3xKuPEKIwREr8EjPLdltGJAExS65PX0UUl+7cwR+UYLy/pmp2gF7QEVI17n/fxgXV9bVAdCBDzYakdKjWpx+QCnyC7EEDOhJ6LvvgyDdpyL7uUVGyevBvduUZrMDWYrM6PFPPRdPo0NYXAURe6mWISlUIEJUmL9fpYA/7Z8tlduP4Ke0DqV1rvsIDrNNtOGhGNwb8tPdyU4QtgNuYnOQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=intel.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=TEJ1FRyP2sgkb45a7Zov0JpWCctTP4VtcuUPXg9k5nE=;\n b=QJybRUy39flbDBoKGlXzOxGaCFfsVcyvYsbdbFe4eZpumf/Fw8g0hvX2+oKS9cnvo2GrbFpD6JatFqwL/8QxBk4JJPglNVRM0YHoJsiAO6mQcaUpFJNdRYQk0XEDwEG51R8CSHTSCse+0S350oyjNYVhcnAb5zjvaYyR0XXDa/dqVhaQ1qb/2vwY8zKtgKEaLQBq1Fr9pnLVv1czW+7okmaSB88/IZzQtmHnCs3dSTMQjAto4qsYWunxLP6FDl9CwWPIdaOu4ZseAVfBn2ypCzPOA77JKs/JH1pL9BeapvHE8q1hoU2aiMZI7gEAhV+jIZMySsX50U1Xa/GO6YfpWQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; intel.com; dkim=none (message not signed)\n header.d=none;intel.com; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Xueming Li <xuemingl@nvidia.com>",
        "To": "",
        "CC": "<dev@dpdk.org>, <xuemingl@nvidia.com>, Xiaoyun Li <xiaoyun.li@intel.com>",
        "Date": "Wed, 11 Aug 2021 17:04:06 +0300",
        "Message-ID": "<20210811140418.393264-4-xuemingl@nvidia.com>",
        "X-Mailer": "git-send-email 2.27.0",
        "In-Reply-To": "<20210811140418.393264-1-xuemingl@nvidia.com>",
        "References": "<20210727034204.20649-1-xuemingl@nvidia.com>\n <20210811140418.393264-1-xuemingl@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL107.nvidia.com (172.20.187.13) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "828734ae-e175-48ef-4cbf-08d95cd0fd02",
        "X-MS-TrafficTypeDiagnostic": "DM6PR12MB3097:",
        "X-Microsoft-Antispam-PRVS": "\n <DM6PR12MB3097FF27BEC2D64429CAE7A0A1F89@DM6PR12MB3097.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:6430;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n VqyQn8pDdAjzbfjw9GABpLc0PGSILWdkgfJAVVQXCUUbYH3kv5+PE1PFH1tYML7+90Mv5N+fDV8VvglotW9sOvxFp+UyFNWwisvi4WqV75HozpNHo1/U0HemqeOjwAFUGrVGWlSa10sn8Yo3OZ+4IG6e4Hz+5m7TFN3G9Z308KPzJnUsza1lUEJIMZSCbahmblEQJcoAK8yJ5KcdrYYwqUWsnFJZaeW77U9I97P3fxJgEy8awbPamdDeM1HbsBqEDbBzdAvmMk/0aKHpkSIIZYT4WazQdmlsNR/ff4Ma4IlM2+2kYKmnN2rlznssyQjL5TaAueH06L9CeNwFlqS0KWD7xV095WeFuQ4PtbgRKqKR4FWaIbag3asilIg1PDCsrHQazfaEJMlTBdv4ApZb6Cn/t90hPmf8mm36rGTj9YaBpve0fTw8tjTyaDrC3alKhH2iDPPfuMKs0vZkA1dUb+ruvCTBYqEt5Ek9TsPRN0a7JT4Mgz1AmNNOwCRWRhcYfzQJ6PnJVsKswa8n7bagNRaAx4POjdp07ST37tXx24kH4cTM2oT56NDwXfG4mx1kPPbwQ1AydpcGRoCNSypdGmE/WZ5QK1c09L8n8lsBXPLL2lQ+mRGEu5+8zWGiG19UdaCf9ufYXRC8RuR70w49RMna4bIpyETc01tMU5lI8dLrIW+Iqu8WxMeUz0sb1XKtlqsIRLnwBwZNyfTe7ZDNReMiO/r6d8qbJ7f2fhaqCzI=",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(376002)(396003)(346002)(136003)(39860400002)(46966006)(36840700001)(7696005)(8936002)(55016002)(70586007)(70206006)(8676002)(83380400001)(356005)(47076005)(336012)(82310400003)(36906005)(6666004)(54906003)(7636003)(109986005)(478600001)(36756003)(86362001)(26005)(6286002)(5660300002)(316002)(2616005)(16526019)(186003)(4326008)(426003)(1076003)(82740400003)(2906002)(36860700001)(266003);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "11 Aug 2021 14:04:51.6335 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 828734ae-e175-48ef-4cbf-08d95cd0fd02",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT064.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM6PR12MB3097",
        "Subject": "[dpdk-dev] [PATCH v2 04/15] app/testpmd: make sure shared Rx queue\n polled on same core",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Shared rxqs uses one set rx queue internally, queues must be polled from\none core.\n\nStops forwarding if shared rxq being scheduled on multiple cores.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\n---\n app/test-pmd/config.c  | 91 ++++++++++++++++++++++++++++++++++++++++++\n app/test-pmd/testpmd.c |  4 +-\n app/test-pmd/testpmd.h |  2 +\n 3 files changed, 96 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c\nindex bb882a56a4..51f7d26045 100644\n--- a/app/test-pmd/config.c\n+++ b/app/test-pmd/config.c\n@@ -2885,6 +2885,97 @@ port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,\n \t}\n }\n \n+/*\n+ * Check whether a shared rxq scheduled on other lcores.\n+ */\n+static bool\n+fwd_stream_on_other_lcores(uint16_t domain_id, portid_t src_port,\n+\t\t\t   queueid_t src_rxq, lcoreid_t src_lc)\n+{\n+\tstreamid_t sm_id;\n+\tstreamid_t nb_fs_per_lcore;\n+\tlcoreid_t  nb_fc;\n+\tlcoreid_t  lc_id;\n+\tstruct fwd_stream *fs;\n+\tstruct rte_port *port;\n+\tstruct rte_eth_rxconf *rxq_conf;\n+\n+\tnb_fc = cur_fwd_config.nb_fwd_lcores;\n+\tfor (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) {\n+\t\tsm_id = fwd_lcores[lc_id]->stream_idx;\n+\t\tnb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;\n+\t\tfor (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;\n+\t\t     sm_id++) {\n+\t\t\tfs = fwd_streams[sm_id];\n+\t\t\tport = &ports[fs->rx_port];\n+\t\t\trxq_conf = &port->rx_conf[fs->rx_queue];\n+\t\t\tif ((rxq_conf->offloads & RTE_ETH_RX_OFFLOAD_SHARED_RXQ)\n+\t\t\t    == 0)\n+\t\t\t\t/* Not shared rxq. */\n+\t\t\t\tcontinue;\n+\t\t\tif (domain_id != port->dev_info.switch_info.domain_id)\n+\t\t\t\tcontinue;\n+\t\t\tif (fs->rx_queue != src_rxq)\n+\t\t\t\tcontinue;\n+\t\t\tprintf(\"Shared RX queue can't be scheduled on different cores:\\n\");\n+\t\t\tprintf(\"  lcore %hhu Port %hu queue %hu\\n\",\n+\t\t\t       src_lc, src_port, src_rxq);\n+\t\t\tprintf(\"  lcore %hhu Port %hu queue %hu\\n\",\n+\t\t\t       lc_id, fs->rx_port, fs->rx_queue);\n+\t\t\tprintf(\"  please use --nb-cores=%hu to limit forwarding cores\\n\",\n+\t\t\t       nb_rxq);\n+\t\t\treturn true;\n+\t\t}\n+\t}\n+\treturn false;\n+}\n+\n+/*\n+ * Check shared rxq configuration.\n+ *\n+ * Shared group must not being scheduled on different core.\n+ */\n+bool\n+pkt_fwd_shared_rxq_check(void)\n+{\n+\tstreamid_t sm_id;\n+\tstreamid_t nb_fs_per_lcore;\n+\tlcoreid_t  nb_fc;\n+\tlcoreid_t  lc_id;\n+\tstruct fwd_stream *fs;\n+\tuint16_t domain_id;\n+\tstruct rte_port *port;\n+\tstruct rte_eth_rxconf *rxq_conf;\n+\n+\tnb_fc = cur_fwd_config.nb_fwd_lcores;\n+\t/*\n+\t * Check streams on each core, make sure the same switch domain +\n+\t * group + queue doesn't get scheduled on other cores.\n+\t */\n+\tfor (lc_id = 0; lc_id < nb_fc; lc_id++) {\n+\t\tsm_id = fwd_lcores[lc_id]->stream_idx;\n+\t\tnb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;\n+\t\tfor (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;\n+\t\t     sm_id++) {\n+\t\t\tfs = fwd_streams[sm_id];\n+\t\t\t/* Update lcore info stream being scheduled. */\n+\t\t\tfs->lcore = fwd_lcores[lc_id];\n+\t\t\tport = &ports[fs->rx_port];\n+\t\t\trxq_conf = &port->rx_conf[fs->rx_queue];\n+\t\t\tif ((rxq_conf->offloads & RTE_ETH_RX_OFFLOAD_SHARED_RXQ)\n+\t\t\t    == 0)\n+\t\t\t\t/* Not shared rxq. */\n+\t\t\t\tcontinue;\n+\t\t\t/* Check shared rxq not scheduled on remaining cores. */\n+\t\t\tdomain_id = port->dev_info.switch_info.domain_id;\n+\t\t\tif (fwd_stream_on_other_lcores(domain_id, fs->rx_port,\n+\t\t\t\t\t\t       fs->rx_queue, lc_id))\n+\t\t\t\treturn false;\n+\t\t}\n+\t}\n+\treturn true;\n+}\n+\n /*\n  * Setup forwarding configuration for each logical core.\n  */\ndiff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c\nindex 67fd128862..d941bd982e 100644\n--- a/app/test-pmd/testpmd.c\n+++ b/app/test-pmd/testpmd.c\n@@ -2169,10 +2169,12 @@ start_packet_forwarding(int with_tx_first)\n \n \tfwd_config_setup();\n \n+\tpkt_fwd_config_display(&cur_fwd_config);\n+\tif (!pkt_fwd_shared_rxq_check())\n+\t\treturn;\n \tif(!no_flush_rx)\n \t\tflush_fwd_rx_queues();\n \n-\tpkt_fwd_config_display(&cur_fwd_config);\n \trxtx_config_display();\n \n \tfwd_stats_reset();\ndiff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h\nindex f3b1d34e28..6497c56359 100644\n--- a/app/test-pmd/testpmd.h\n+++ b/app/test-pmd/testpmd.h\n@@ -144,6 +144,7 @@ struct fwd_stream {\n \tuint64_t     core_cycles; /**< used for RX and TX processing */\n \tstruct pkt_burst_stats rx_burst_stats;\n \tstruct pkt_burst_stats tx_burst_stats;\n+\tstruct fwd_lcore *lcore; /**< Lcore being scheduled. */\n };\n \n /**\n@@ -785,6 +786,7 @@ void port_summary_header_display(void);\n void rx_queue_infos_display(portid_t port_idi, uint16_t queue_id);\n void tx_queue_infos_display(portid_t port_idi, uint16_t queue_id);\n void fwd_lcores_config_display(void);\n+bool pkt_fwd_shared_rxq_check(void);\n void pkt_fwd_config_display(struct fwd_config *cfg);\n void rxtx_config_display(void);\n void fwd_config_setup(void);\n",
    "prefixes": [
        "v2",
        "04/15"
    ]
}