get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/45198/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 45198,
    "url": "https://patches.dpdk.org/api/patches/45198/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20180924101208.7350-1-pbhagavatula@caviumnetworks.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20180924101208.7350-1-pbhagavatula@caviumnetworks.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20180924101208.7350-1-pbhagavatula@caviumnetworks.com",
    "date": "2018-09-24T10:12:08",
    "name": "[v2] examples/eventdev_pipeline: add Tx adapter support",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "60c9edfdb04771c76a4807e268b8361a605373dc",
    "submitter": {
        "id": 768,
        "url": "https://patches.dpdk.org/api/people/768/?format=api",
        "name": "Pavan Nikhilesh",
        "email": "pbhagavatula@caviumnetworks.com"
    },
    "delegate": {
        "id": 310,
        "url": "https://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20180924101208.7350-1-pbhagavatula@caviumnetworks.com/mbox/",
    "series": [
        {
            "id": 1464,
            "url": "https://patches.dpdk.org/api/series/1464/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=1464",
            "date": "2018-09-24T10:12:08",
            "name": "[v2] examples/eventdev_pipeline: add Tx adapter support",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/1464/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/45198/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/45198/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 583292B9D;\n\tMon, 24 Sep 2018 12:12:47 +0200 (CEST)",
            "from NAM02-CY1-obe.outbound.protection.outlook.com\n\t(mail-cys01nam02on0082.outbound.protection.outlook.com\n\t[104.47.37.82]) by dpdk.org (Postfix) with ESMTP id 0FED92B8C\n\tfor <dev@dpdk.org>; Mon, 24 Sep 2018 12:12:45 +0200 (CEST)",
            "from localhost.localdomain (115.113.156.3) by\n\tBYAPR07MB4966.namprd07.prod.outlook.com (2603:10b6:a03:5b::15) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n\t15.20.1143.18; Mon, 24 Sep 2018 10:12:39 +0000"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=CAVIUMNETWORKS.onmicrosoft.com; s=selector1-cavium-com;\n\th=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n\tbh=W/Pyzh748qSfH+Ya2G08FU9dOtRQLxl2473irJjhKnc=;\n\tb=Ac10IkmTSQk33caTIiH8a1LeZhstLk3QpNBMNtlP0iQKSimgWwmHWCQS1wJA2ZxHZ7IdrNnTn0/ouNdwfcR4LGRsEgD2ir/JpnVIHwU7iwd3lBXBBwqgX3B9A7eT4ccjJMXMCU0RWASYmHFjXkljB1kiSF8AvnRc0kq2rKu+/50=",
        "Authentication-Results": "spf=none (sender IP is )\n\tsmtp.mailfrom=Pavan.Bhagavatula@cavium.com; ",
        "From": "Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>",
        "To": "jerin.jacob@caviumnetworks.com, nikhil.rao@intel.com,\n\tharry.van.haaren@intel.com, anoob.joseph@caviumnetworks.com",
        "Cc": "dev@dpdk.org,\n\tPavan Nikhilesh <pbhagavatula@caviumnetworks.com>",
        "Date": "Mon, 24 Sep 2018 15:42:08 +0530",
        "Message-Id": "<20180924101208.7350-1-pbhagavatula@caviumnetworks.com>",
        "X-Mailer": "git-send-email 2.18.0",
        "In-Reply-To": "<20180905134554.25243-1-pbhagavatula@caviumnetworks.com>",
        "References": "<20180905134554.25243-1-pbhagavatula@caviumnetworks.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[115.113.156.3]",
        "X-ClientProxiedBy": "PN1PR01CA0078.INDPRD01.PROD.OUTLOOK.COM\n\t(2603:1096:c00:1::18) To BYAPR07MB4966.namprd07.prod.outlook.com\n\t(2603:10b6:a03:5b::15)",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "e17e3d2b-2748-4272-6757-08d62206440c",
        "X-Microsoft-Antispam": "BCL:0; PCL:0;\n\tRULEID:(7020095)(4652040)(8989299)(5600074)(711020)(2017052603328)(7153060)(7193020);\n\tSRVR:BYAPR07MB4966; ",
        "X-Microsoft-Exchange-Diagnostics": [
            "1; BYAPR07MB4966;\n\t3:6XtiueFoESHRqA11FwDyo+8uoS0SzPnnweq0NFw8wjwILvQAvfJ4g6Td0QoD7dGO66BgfhktWHMAZV6ipq1OUgHtcOAajvOYBdxsK/bZVEfo0Pg/FU5toZlVHZCGTw2+l31nBrMDM5xBDn/vu9zU7gTh6ETCR247n1fBVtTgOZ+X7ufUvC8v+llgBWpE6M3oYH9iNEt+mgC5jHJ+PUH8qEdw5hIIYbLqZl/RYhPSD0Tyh4AgvDjbo1BOlOL6mGuz;\n\t25:Y69QFmsgxHjy44GYBQekamxgGeynwQ39k6JNsnTjihFoVk4QWQc8vyeyeLdMwfXbCQGVpA/nCoH/vOaPSL5u47cRR6/fyccw1p2sfcccbDA41yDXmEW4Zgjl6X9Yv+GkisJyIDnS0OlOGF2xvVzLNv6lN+I3HxuUXftGL1fKgyGlc/7ywTpK9wn7auhoi/6fV39Q/DMKLmwySzhptqUhh6v9L1Nu7Jp7sTvimSvSRS3BIpFBXLXJXTd/c/FxvdJXGsjiBrMVwQYQ5Zd2jKE/KlwdgGePvCjDcY3e/xgoVGXfDqDwP+0Z0si4uJw1cIpt7acI1wPNf9f9OtmpT4ViQQ==;\n\t31:zSUG4ZjUEQrW9qx08IGT6eF7wOny4hdDzFv3UEN9rwZTU5dXbmMNsi6JGlMWwFmy5x3utvvrIuXI4np948QtKkzyRQJU5BwsnpSS1DbzdMKo/SXrdlf29QVaw5icncVQ4/66zw4lMF88UgR28NQ90V3VpIjLhknxbMN6EfrpTN+eaUyh4w6Gig5FYd40KWyoLo84R02nryP9aYnk6IYJsdroVfP0PL5j/l3Reog+2SE=",
            "1; BYAPR07MB4966;\n\t20:1IeZUhDS+C3HOIBq+zC9iPzQvKwq4HCtX4l5Yh9DttGBmhNxzqWbwkey9x5AZVO2AWQ20YGt2G8BEt57F9BopYVmfnIOhdCkMdN49kOWClazypflkGEp4l/BN/sgywPO0LSTU1ATbFpsgxHP44LMJn2FDXxvYq/mCmaSXPGviAb6r0bIdExjXllBHcdnJnEYYvX4zpeS/76gLswOmpkMmw3IofhMJotrr0aMZM1AmBaHJOY+Az4PCBlznGmNk4BRaQ9QLItDULyFLRbxq1q29gJa4/8SKc/yAjenwijOTViFJrFtKP9dWFVCh30bBREmbbRmo6WgRKZrdOdeGHvREiOf2DdxN4m/MF2HLOjUoQkw/G23QWFwgUFByYzq4aF6CVrIRvyCnz92hxYazYy+M8xiiyhGxGaA12gifknL6VaoH67hb7yZzOTB9zQzbfkdpQWeONZUxFbhnMX7Z80FC1TEtqFRwASV3uYMj+cmsI5H3Ac7hO/m+p9UOFFy8f2iyaQtc04CEpZDXcKf8lShRLmEEcymIG+Bmb3NoGwERDvvvzQ52H4pE+LFGV3Syb5heDf/yMrAJk78RKsYwbX7Tl/K5/6PjASGYOxJpsf1pHA=;\n\t4:MlUsAtvHt26CsPQsAVU/dST7TLWS/v8xLhjlxfHwSe1iAeOngwF+E0FmJyoyKSEEIoq+eJeyIg7/icCkuMSGIsmG+mIOTR2E4FxAy7kOiOplmKNmnEKQYO3K36PdRf0UERxlFoMMABkOBuc/AA4reG7+/wssuS/uo/8xDJjkUtrgzV5/+CJHp6SR2rc1uoV1PC60hCfthfx7ktivMCCgjU+96azfaigQZrlyHMnlx/0/7Y8e9TetOUhqMhuQAwVXcDgCZAlJWfTZa74niN1luw==",
            "=?us-ascii?Q?1; BYAPR07MB4966;\n\t23:MmTcpZdq5pVLB3KMB4IggbL362AXfUl1rJ8FTJRkI?=\n\tR3bK2roO5WomORjXDmu8er5SL+TynZzgXFC+wee710FJU6rwcWqdWS1tLUF00wrpTWFP/ISJED3JMA74IZi5YslWjEA8o99AvXOGtGN+lu86B3oFCyQbVQj2BTgQCTtLHZAqeXzDugiARIxe+lGzm2tzIPwLrRkT0XTx06Kxo8W+74XGAZa9HxI9PrFAZ3k2YOExXi+YbltXf6Ff6jfO75ahBvFNU4NAotyYE3SD2Ucsm1ZjoQIMZDM5ltQOlOPW7rDl/8e0aCxtxUcshPHVQvpOz+J++keaPtnaNrvBl3VQ0Ws0BhNhpcGhHnHsTbNa8GYuw5nv/7Msrv7CeJidh8y+I3Ox6NlsEq5JyFqWqtvKVSwKCFSrajnMHn+LlFmpv0Ilfe/ckA2rbFH5EPhBYJ21nd9SP9nQUZrLjmy9uH3UfJI4ydN7YpqQi0GE/TPVZivYCURgHYyE3G8yt6tG/QuaS/Wyehn7oGVRusdXA7OSRibeuFD/eT7H+5iotL/dCiO82FnWP8zmQzl04A3555BiR2nlUR41hAe1RYTX5g/ZMp/HhC9VFlDf6KZtgBChCAOMG5TARkslwQS4mQdVL2i5aE4mRLkbw8n4mDX1vadgdCas33c1rfACH4RDSqfGWZE27ALE/rSP98PdipnfJwA65M0+I9GcDoFPb7C8XY3WFomAeqTAqZfA6y7UH48wXgmUmMkvTsGbuQaeomDESsfJj1++RTDCvRF8w/0PMCigSkG/B1Tc6mJ5IJmdHs1fyG16sMK6PHG3jdNjcMuOZUQr7KHlCplmLKjsTuQLXXwN9XP860zPz2J6IitL2eXDFBfyqbL8pz6OV4ueJfBZ+GeaFPWKRWAJ6LDTy2EqOV/qPrjkulPIOLQbmXtcDhqbtUoBvpri+6uv/X+iRAXrCNeuQ8/GaoOPR7Cbl1cKkHAB3+H/dnqOof2yZ0wONWp4gu/i/i+BzuZLCKUHueyo6vKDu5aFzAs+JCCuXfUvHmt3dC7fJvuJ8shbNlaqqOo6inL4Ncle/2Lz4OEXNszk/MU32NJU0yHP9G7ECgCo0KR48NqFwViM59KAI90ExNDFcaBWoq0Ei4VDCIkxQIx/QQ6lUDQ9U+czvu5pqKN6sE/27Zw6uuYfiBFtJkqj1sstxFeFdQccJXAHJTEHLTCFEkC/Iw/TGCJBjGZwQ/T4vWqKc/Du0PUgWk3M+YQMEwgaSP1+A/Rq/EErN1TyIxIY3hmFyYORQXfgKn16ECMRx91F4bnDOLhI6v7lVg+5LWdDOZYbuAo0kievc5mrXZ65FE9QnbZK+sSGjC68A4t+sh8eNTLfNHjwfSkgxZGRR3UT7k=",
            "1; BYAPR07MB4966;\n\t6:QJEdzH6nWUTEX31IFgOQpO4cWN0webdDQ0oWv03tkc+VGnOhyW+FgdJKArZVUMbjcBVjtaG+kbUPL6wBDS6IU1z6u9XawHBj5JzZvchIBIvAYjoOLrOelav6ukDwSgPyANJP0Y+X5losNp4Rf+JHOoK/CCCr5PM2r/zKxpV3qNiIAZj6AGcyC+3K62V97uzKCio+WHpdBi3wzzCRcS5pm97Yd8xxvQ+aSf65+wP7+zNe9LTU7UIQi/lbtHNtMhmhtROZPHhT2f7o6mBsqKFGR43bKKF78tUui7qjztFDxb8DhWcuYwiOKZhgFZW11a7syMHYukDfF/uGLN4kECH2/LR1XAq1vcTma9pDb7gYTqoPMWk8Pbm31jDvX4yAybjQlVgYUjxFdSKk+RNUZ/dz6riEa9ZhK43D+WjJazXyMNZRxsPhSu0xGJp2cZyHejXRD8/Z+WJV0jqufW60VX/O8Q==;\n\t5:kYzcoMIMek5hTNH/30mjWKpLHKs9CWDE8CAkvF2AWknMxofQwg7PP7bbXC+yy8TP1zL0tojSW72BnINjEEXJ+aDv+C1tmZ3AVn8BUU79D8z89zi5ysmkvv7a3ciY+3v7nQpiehkiXsamOi+3IrcRACoRw5LGr0AsRDZ4KDfUqmk=;\n\t7:VKmjsuOKxauwP6A0c5lK87A/JNAv7L5Yssu1DRHWGbVmzfdAsxH4wCvI4YRYzhWJOVqE+l+R0lvchcqrhH7mgWKjm+/3a7LdeFx65rmnoQHghPNgooRIQB2FoyJcGtyJm5gJteAUjll8K8AD/PndcsL0C7suUkT8a8Gua9jUnqXkqlKFmS1ruNKhOngr7+C23PYTWR0cPfKPciZoTMX8ewVkW8psrnRqdGmF+VcsuLU9Tyal6XqRf5mRuGt7yO0R"
        ],
        "X-MS-TrafficTypeDiagnostic": "BYAPR07MB4966:",
        "X-Microsoft-Antispam-PRVS": "<BYAPR07MB4966A261FD42D50E6BD36FAC80170@BYAPR07MB4966.namprd07.prod.outlook.com>",
        "X-Exchange-Antispam-Report-Test": "UriScan:;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Exchange-Antispam-Report-CFA-Test": "BCL:0; PCL:0;\n\tRULEID:(8211001083)(6040522)(2401047)(8121501046)(5005006)(10201501046)(3002001)(93006095)(3231355)(944501410)(52105095)(149066)(150027)(6041310)(20161123564045)(20161123558120)(201703131423095)(201702281528075)(20161123555045)(201703061421075)(201703061406153)(20161123562045)(20161123560045)(201708071742011)(7699051);\n\tSRVR:BYAPR07MB4966; BCL:0; PCL:0; RULEID:; SRVR:BYAPR07MB4966; ",
        "X-Forefront-PRVS": "0805EC9467",
        "X-Forefront-Antispam-Report": "SFV:NSPM;\n\tSFS:(10009020)(6069001)(39860400002)(346002)(396003)(366004)(136003)(376002)(189003)(199004)(68736007)(3846002)(6116002)(1076002)(478600001)(5660300001)(69590400006)(72206003)(97736004)(47776003)(66066001)(105586002)(106356001)(53946003)(6512007)(50466002)(53936002)(6486002)(4326008)(25786009)(48376002)(107886003)(1857600001)(16526019)(26005)(6506007)(386003)(6636002)(8936002)(7736002)(6666003)(305945005)(50226002)(36756003)(81156014)(81166006)(2906002)(575784001)(52116002)(51416003)(76176011)(8676002)(14444005)(16586007)(446003)(956004)(316002)(486006)(2616005)(476003)(42882007)(11346002)(42262002);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:BYAPR07MB4966; H:localhost.localdomain;\n\tFPR:; \n\tSPF:None; LANG:en; PTR:InfoNoRecords; MX:1; A:1; ",
        "Received-SPF": "None (protection.outlook.com: cavium.com does not designate\n\tpermitted sender hosts)",
        "X-Microsoft-Antispam-Message-Info": "LEsMZ0ognUHHqbvwTL3sLlylynqFpn6d0nL14aRox9sr5/udtTX+HRoRQFyF2Wx7+2LDIh/7vpiNJUT04DbmQMmdYRCM01hLg5vKJl4BPLrM74lmE0WApDp5phht/yXm+OtOAx9ekZRo2sRhvV36chduIjAj+x2bETI52ZHK7ripoU2GPzfn4LE0WXZjKWLOGEZ5FTeUUaIfOd43vCHsN3N7lKcwiIt9FEUVeE43TGDrhhjT931C3iUP2javekSXb4G3J4Wtwtj3rwIz8O5uZssISh1/cITID2WBt+PCSPjtJaXneR8pLsA0a8Kh2OZWh0jEQl3Z6dGx/OSNTjHaWg229EiexRfgbFXNpvG2S20=",
        "SpamDiagnosticOutput": "1:99",
        "SpamDiagnosticMetadata": "NSPM",
        "X-OriginatorOrg": "caviumnetworks.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "24 Sep 2018 10:12:39.9166\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "e17e3d2b-2748-4272-6757-08d62206440c",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "Hosted",
        "X-MS-Exchange-CrossTenant-Id": "711e4ccf-2e9b-4bcf-a551-4094005b6194",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BYAPR07MB4966",
        "Subject": "[dpdk-dev] [PATCH v2] examples/eventdev_pipeline: add Tx adapter\n\tsupport",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Redo the worker pipelines and offload transmission to service cores\nseamlessly through Tx adapter.\n\nSigned-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>\n---\n v2 Changes:\n - Updated enqueue,dequeue depth thresholds.\n - remove redundant capability checks.\n\n examples/eventdev_pipeline/main.c             |  88 +++---\n examples/eventdev_pipeline/pipeline_common.h  |  31 +-\n .../pipeline_worker_generic.c                 | 268 +++++-------------\n .../eventdev_pipeline/pipeline_worker_tx.c    | 156 +++++-----\n 4 files changed, 207 insertions(+), 336 deletions(-)\n\n--\n2.19.0",
    "diff": "diff --git a/examples/eventdev_pipeline/main.c b/examples/eventdev_pipeline/main.c\nindex 700bc696f..92e08bc0c 100644\n--- a/examples/eventdev_pipeline/main.c\n+++ b/examples/eventdev_pipeline/main.c\n@@ -26,20 +26,6 @@ core_in_use(unsigned int lcore_id) {\n \t\tfdata->tx_core[lcore_id] || fdata->worker_core[lcore_id]);\n }\n\n-static void\n-eth_tx_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,\n-\t\t\tvoid *userdata)\n-{\n-\tint port_id = (uintptr_t) userdata;\n-\tunsigned int _sent = 0;\n-\n-\tdo {\n-\t\t/* Note: hard-coded TX queue */\n-\t\t_sent += rte_eth_tx_burst(port_id, 0, &pkts[_sent],\n-\t\t\t\t\t  unsent - _sent);\n-\t} while (_sent != unsent);\n-}\n-\n /*\n  * Parse the coremask given as argument (hexadecimal string) and fill\n  * the global configuration (core role and core count) with the parsed\n@@ -263,6 +249,7 @@ parse_app_args(int argc, char **argv)\n static inline int\n port_init(uint8_t port, struct rte_mempool *mbuf_pool)\n {\n+\tstruct rte_eth_rxconf rx_conf;\n \tstatic const struct rte_eth_conf port_conf_default = {\n \t\t.rxmode = {\n \t\t\t.mq_mode = ETH_MQ_RX_RSS,\n@@ -291,6 +278,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)\n \tif (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)\n \t\tport_conf.txmode.offloads |=\n \t\t\tDEV_TX_OFFLOAD_MBUF_FAST_FREE;\n+\trx_conf = dev_info.default_rxconf;\n+\trx_conf.offloads = port_conf.rxmode.offloads;\n\n \tport_conf.rx_adv_conf.rss_conf.rss_hf &=\n \t\tdev_info.flow_type_rss_offloads;\n@@ -311,7 +300,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)\n \t/* Allocate and set up 1 RX queue per Ethernet port. */\n \tfor (q = 0; q < rx_rings; q++) {\n \t\tretval = rte_eth_rx_queue_setup(port, q, rx_ring_size,\n-\t\t\t\trte_eth_dev_socket_id(port), NULL, mbuf_pool);\n+\t\t\t\trte_eth_dev_socket_id(port), &rx_conf,\n+\t\t\t\tmbuf_pool);\n \t\tif (retval < 0)\n \t\t\treturn retval;\n \t}\n@@ -350,7 +340,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)\n static int\n init_ports(uint16_t num_ports)\n {\n-\tuint16_t portid, i;\n+\tuint16_t portid;\n\n \tif (!cdata.num_mbuf)\n \t\tcdata.num_mbuf = 16384 * num_ports;\n@@ -367,36 +357,26 @@ init_ports(uint16_t num_ports)\n \t\t\trte_exit(EXIT_FAILURE, \"Cannot init port %\"PRIu16 \"\\n\",\n \t\t\t\t\tportid);\n\n-\tRTE_ETH_FOREACH_DEV(i) {\n-\t\tvoid *userdata = (void *)(uintptr_t) i;\n-\t\tfdata->tx_buf[i] =\n-\t\t\trte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(32), 0);\n-\t\tif (fdata->tx_buf[i] == NULL)\n-\t\t\trte_panic(\"Out of memory\\n\");\n-\t\trte_eth_tx_buffer_init(fdata->tx_buf[i], 32);\n-\t\trte_eth_tx_buffer_set_err_callback(fdata->tx_buf[i],\n-\t\t\t\t\t\t   eth_tx_buffer_retry,\n-\t\t\t\t\t\t   userdata);\n-\t}\n-\n \treturn 0;\n }\n\n static void\n do_capability_setup(uint8_t eventdev_id)\n {\n+\tint ret;\n \tuint16_t i;\n-\tuint8_t mt_unsafe = 0;\n+\tuint8_t generic_pipeline = 0;\n \tuint8_t burst = 0;\n\n \tRTE_ETH_FOREACH_DEV(i) {\n-\t\tstruct rte_eth_dev_info dev_info;\n-\t\tmemset(&dev_info, 0, sizeof(struct rte_eth_dev_info));\n-\n-\t\trte_eth_dev_info_get(i, &dev_info);\n-\t\t/* Check if it is safe ask worker to tx. */\n-\t\tmt_unsafe |= !(dev_info.tx_offload_capa &\n-\t\t\t\tDEV_TX_OFFLOAD_MT_LOCKFREE);\n+\t\tuint32_t caps = 0;\n+\n+\t\tret = rte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);\n+\t\tif (ret)\n+\t\t\trte_exit(EXIT_FAILURE,\n+\t\t\t\t\"Invalid capability for Tx adptr port %d\\n\", i);\n+\t\tgeneric_pipeline |= !(caps &\n+\t\t\t\tRTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);\n \t}\n\n \tstruct rte_event_dev_info eventdev_info;\n@@ -406,21 +386,42 @@ do_capability_setup(uint8_t eventdev_id)\n \tburst = eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE ? 1 :\n \t\t0;\n\n-\tif (mt_unsafe)\n+\tif (generic_pipeline)\n \t\tset_worker_generic_setup_data(&fdata->cap, burst);\n \telse\n-\t\tset_worker_tx_setup_data(&fdata->cap, burst);\n+\t\tset_worker_tx_enq_setup_data(&fdata->cap, burst);\n }\n\n static void\n signal_handler(int signum)\n {\n+\tstatic uint8_t once;\n+\tuint16_t portid;\n+\n \tif (fdata->done)\n \t\trte_exit(1, \"Exiting on signal %d\\n\", signum);\n-\tif (signum == SIGINT || signum == SIGTERM) {\n+\tif ((signum == SIGINT || signum == SIGTERM) && !once) {\n \t\tprintf(\"\\n\\nSignal %d received, preparing to exit...\\n\",\n \t\t\t\tsignum);\n+\t\tif (cdata.dump_dev)\n+\t\t\trte_event_dev_dump(0, stdout);\n+\t\tonce = 1;\n \t\tfdata->done = 1;\n+\t\trte_smp_wmb();\n+\n+\t\tRTE_ETH_FOREACH_DEV(portid) {\n+\t\t\trte_event_eth_rx_adapter_stop(portid);\n+\t\t\trte_event_eth_tx_adapter_stop(portid);\n+\t\t\trte_eth_dev_stop(portid);\n+\t\t}\n+\n+\t\trte_eal_mp_wait_lcore();\n+\n+\t\tRTE_ETH_FOREACH_DEV(portid) {\n+\t\t\trte_eth_dev_close(portid);\n+\t\t}\n+\n+\t\trte_event_dev_close(0);\n \t}\n \tif (signum == SIGTSTP)\n \t\trte_event_dev_dump(0, stdout);\n@@ -499,7 +500,7 @@ main(int argc, char **argv)\n \tif (worker_data == NULL)\n \t\trte_panic(\"rte_calloc failed\\n\");\n\n-\tint dev_id = fdata->cap.evdev_setup(&cons_data, worker_data);\n+\tint dev_id = fdata->cap.evdev_setup(worker_data);\n \tif (dev_id < 0)\n \t\trte_exit(EXIT_FAILURE, \"Error setting up eventdev\\n\");\n\n@@ -524,8 +525,8 @@ main(int argc, char **argv)\n\n \t\tif (fdata->tx_core[lcore_id])\n \t\t\tprintf(\n-\t\t\t\t\"[%s()] lcore %d executing NIC Tx, and using eventdev port %u\\n\",\n-\t\t\t\t__func__, lcore_id, cons_data.port_id);\n+\t\t\t\t\"[%s()] lcore %d executing NIC Tx\\n\",\n+\t\t\t\t__func__, lcore_id);\n\n \t\tif (fdata->sched_core[lcore_id])\n \t\t\tprintf(\"[%s()] lcore %d executing scheduler\\n\",\n@@ -555,9 +556,6 @@ main(int argc, char **argv)\n\n \trte_eal_mp_wait_lcore();\n\n-\tif (cdata.dump_dev)\n-\t\trte_event_dev_dump(dev_id, stdout);\n-\n \tif (!cdata.quiet && (port_stat(dev_id, worker_data[0].port_id) !=\n \t\t\t(uint64_t)-ENOTSUP)) {\n \t\tprintf(\"\\nPort Workload distribution:\\n\");\ndiff --git a/examples/eventdev_pipeline/pipeline_common.h b/examples/eventdev_pipeline/pipeline_common.h\nindex 9703396f8..a6cc912fb 100644\n--- a/examples/eventdev_pipeline/pipeline_common.h\n+++ b/examples/eventdev_pipeline/pipeline_common.h\n@@ -16,6 +16,7 @@\n #include <rte_ethdev.h>\n #include <rte_eventdev.h>\n #include <rte_event_eth_rx_adapter.h>\n+#include <rte_event_eth_tx_adapter.h>\n #include <rte_service.h>\n #include <rte_service_component.h>\n\n@@ -23,38 +24,30 @@\n #define BATCH_SIZE 16\n #define MAX_NUM_CORE 64\n\n-struct cons_data {\n-\tuint8_t dev_id;\n-\tuint8_t port_id;\n-\tuint8_t release;\n-} __rte_cache_aligned;\n-\n struct worker_data {\n \tuint8_t dev_id;\n \tuint8_t port_id;\n } __rte_cache_aligned;\n\n typedef int (*worker_loop)(void *);\n-typedef int (*consumer_loop)(void);\n typedef void (*schedule_loop)(unsigned int);\n-typedef int (*eventdev_setup)(struct cons_data *, struct worker_data *);\n-typedef void (*rx_adapter_setup)(uint16_t nb_ports);\n+typedef int (*eventdev_setup)(struct worker_data *);\n+typedef void (*adapter_setup)(uint16_t nb_ports);\n typedef void (*opt_check)(void);\n\n struct setup_data {\n \tworker_loop worker;\n-\tconsumer_loop consumer;\n \tschedule_loop scheduler;\n \teventdev_setup evdev_setup;\n-\trx_adapter_setup adptr_setup;\n+\tadapter_setup adptr_setup;\n \topt_check check_opt;\n };\n\n struct fastpath_data {\n \tvolatile int done;\n-\tuint32_t tx_lock;\n \tuint32_t evdev_service_id;\n \tuint32_t rxadptr_service_id;\n+\tuint32_t txadptr_service_id;\n \tbool rx_single;\n \tbool tx_single;\n \tbool sched_single;\n@@ -62,7 +55,6 @@ struct fastpath_data {\n \tunsigned int tx_core[MAX_NUM_CORE];\n \tunsigned int sched_core[MAX_NUM_CORE];\n \tunsigned int worker_core[MAX_NUM_CORE];\n-\tstruct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];\n \tstruct setup_data cap;\n } __rte_cache_aligned;\n\n@@ -88,6 +80,8 @@ struct config_data {\n \tint16_t next_qid[MAX_NUM_STAGES+2];\n \tint16_t qid[MAX_NUM_STAGES];\n \tuint8_t rx_adapter_id;\n+\tuint8_t tx_adapter_id;\n+\tuint8_t tx_queue_id;\n \tuint64_t worker_lcore_mask;\n \tuint64_t rx_lcore_mask;\n \tuint64_t tx_lcore_mask;\n@@ -99,8 +93,6 @@ struct port_link {\n \tuint8_t priority;\n };\n\n-struct cons_data cons_data;\n-\n struct fastpath_data *fdata;\n struct config_data cdata;\n\n@@ -142,12 +134,11 @@ schedule_devices(unsigned int lcore_id)\n \t\t}\n \t}\n\n-\tif (fdata->tx_core[lcore_id] && (fdata->tx_single ||\n-\t\t\t rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {\n-\t\tfdata->cap.consumer();\n-\t\trte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));\n+\tif (fdata->tx_core[lcore_id]) {\n+\t\trte_service_run_iter_on_app_lcore(fdata->txadptr_service_id,\n+\t\t\t\t!fdata->tx_single);\n \t}\n }\n\n void set_worker_generic_setup_data(struct setup_data *caps, bool burst);\n-void set_worker_tx_setup_data(struct setup_data *caps, bool burst);\n+void set_worker_tx_enq_setup_data(struct setup_data *caps, bool burst);\ndiff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c\nindex 2215e9ebe..169064949 100644\n--- a/examples/eventdev_pipeline/pipeline_worker_generic.c\n+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c\n@@ -119,153 +119,13 @@ worker_generic_burst(void *arg)\n \treturn 0;\n }\n\n-static __rte_always_inline int\n-consumer(void)\n-{\n-\tconst uint64_t freq_khz = rte_get_timer_hz() / 1000;\n-\tstruct rte_event packet;\n-\n-\tstatic uint64_t received;\n-\tstatic uint64_t last_pkts;\n-\tstatic uint64_t last_time;\n-\tstatic uint64_t start_time;\n-\tint i;\n-\tuint8_t dev_id = cons_data.dev_id;\n-\tuint8_t port_id = cons_data.port_id;\n-\n-\tdo {\n-\t\tuint16_t n = rte_event_dequeue_burst(dev_id, port_id,\n-\t\t\t\t&packet, 1, 0);\n-\n-\t\tif (n == 0) {\n-\t\t\tRTE_ETH_FOREACH_DEV(i)\n-\t\t\t\trte_eth_tx_buffer_flush(i, 0, fdata->tx_buf[i]);\n-\t\t\treturn 0;\n-\t\t}\n-\t\tif (start_time == 0)\n-\t\t\tlast_time = start_time = rte_get_timer_cycles();\n-\n-\t\treceived++;\n-\t\tuint8_t outport = packet.mbuf->port;\n-\n-\t\texchange_mac(packet.mbuf);\n-\t\trte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],\n-\t\t\t\tpacket.mbuf);\n-\n-\t\tif (cons_data.release)\n-\t\t\trte_event_enqueue_burst(dev_id, port_id,\n-\t\t\t\t\t\t\t\t&packet, n);\n-\n-\t\t/* Print out mpps every 1<22 packets */\n-\t\tif (!cdata.quiet && received >= last_pkts + (1<<22)) {\n-\t\t\tconst uint64_t now = rte_get_timer_cycles();\n-\t\t\tconst uint64_t total_ms = (now - start_time) / freq_khz;\n-\t\t\tconst uint64_t delta_ms = (now - last_time) / freq_khz;\n-\t\t\tuint64_t delta_pkts = received - last_pkts;\n-\n-\t\t\tprintf(\"# %s RX=%\"PRIu64\", time %\"PRIu64 \"ms, \"\n-\t\t\t\t\t\"avg %.3f mpps [current %.3f mpps]\\n\",\n-\t\t\t\t\t__func__,\n-\t\t\t\t\treceived,\n-\t\t\t\t\ttotal_ms,\n-\t\t\t\t\treceived / (total_ms * 1000.0),\n-\t\t\t\t\tdelta_pkts / (delta_ms * 1000.0));\n-\t\t\tlast_pkts = received;\n-\t\t\tlast_time = now;\n-\t\t}\n-\n-\t\tcdata.num_packets--;\n-\t\tif (cdata.num_packets <= 0)\n-\t\t\tfdata->done = 1;\n-\t/* Be stuck in this loop if single. */\n-\t} while (!fdata->done && fdata->tx_single);\n-\n-\treturn 0;\n-}\n-\n-static __rte_always_inline int\n-consumer_burst(void)\n-{\n-\tconst uint64_t freq_khz = rte_get_timer_hz() / 1000;\n-\tstruct rte_event packets[BATCH_SIZE];\n-\n-\tstatic uint64_t received;\n-\tstatic uint64_t last_pkts;\n-\tstatic uint64_t last_time;\n-\tstatic uint64_t start_time;\n-\tunsigned int i, j;\n-\tuint8_t dev_id = cons_data.dev_id;\n-\tuint8_t port_id = cons_data.port_id;\n-\n-\tdo {\n-\t\tuint16_t n = rte_event_dequeue_burst(dev_id, port_id,\n-\t\t\t\tpackets, RTE_DIM(packets), 0);\n-\n-\t\tif (n == 0) {\n-\t\t\tRTE_ETH_FOREACH_DEV(j)\n-\t\t\t\trte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);\n-\t\t\treturn 0;\n-\t\t}\n-\t\tif (start_time == 0)\n-\t\t\tlast_time = start_time = rte_get_timer_cycles();\n-\n-\t\treceived += n;\n-\t\tfor (i = 0; i < n; i++) {\n-\t\t\tuint8_t outport = packets[i].mbuf->port;\n-\n-\t\t\texchange_mac(packets[i].mbuf);\n-\t\t\trte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],\n-\t\t\t\t\tpackets[i].mbuf);\n-\n-\t\t\tpackets[i].op = RTE_EVENT_OP_RELEASE;\n-\t\t}\n-\n-\t\tif (cons_data.release) {\n-\t\t\tuint16_t nb_tx;\n-\n-\t\t\tnb_tx = rte_event_enqueue_burst(dev_id, port_id,\n-\t\t\t\t\t\t\t\tpackets, n);\n-\t\t\twhile (nb_tx < n)\n-\t\t\t\tnb_tx += rte_event_enqueue_burst(dev_id,\n-\t\t\t\t\t\tport_id, packets + nb_tx,\n-\t\t\t\t\t\tn - nb_tx);\n-\t\t}\n-\n-\t\t/* Print out mpps every 1<22 packets */\n-\t\tif (!cdata.quiet && received >= last_pkts + (1<<22)) {\n-\t\t\tconst uint64_t now = rte_get_timer_cycles();\n-\t\t\tconst uint64_t total_ms = (now - start_time) / freq_khz;\n-\t\t\tconst uint64_t delta_ms = (now - last_time) / freq_khz;\n-\t\t\tuint64_t delta_pkts = received - last_pkts;\n-\n-\t\t\tprintf(\"# consumer RX=%\"PRIu64\", time %\"PRIu64 \"ms, \"\n-\t\t\t\t\t\"avg %.3f mpps [current %.3f mpps]\\n\",\n-\t\t\t\t\treceived,\n-\t\t\t\t\ttotal_ms,\n-\t\t\t\t\treceived / (total_ms * 1000.0),\n-\t\t\t\t\tdelta_pkts / (delta_ms * 1000.0));\n-\t\t\tlast_pkts = received;\n-\t\t\tlast_time = now;\n-\t\t}\n-\n-\t\tcdata.num_packets -= n;\n-\t\tif (cdata.num_packets <= 0)\n-\t\t\tfdata->done = 1;\n-\t/* Be stuck in this loop if single. */\n-\t} while (!fdata->done && fdata->tx_single);\n-\n-\treturn 0;\n-}\n-\n static int\n-setup_eventdev_generic(struct cons_data *cons_data,\n-\t\tstruct worker_data *worker_data)\n+setup_eventdev_generic(struct worker_data *worker_data)\n {\n \tconst uint8_t dev_id = 0;\n \t/* +1 stages is for a SINGLE_LINK TX stage */\n \tconst uint8_t nb_queues = cdata.num_stages + 1;\n-\t/* + 1 is one port for consumer */\n-\tconst uint8_t nb_ports = cdata.num_workers + 1;\n+\tconst uint8_t nb_ports = cdata.num_workers;\n \tstruct rte_event_dev_config config = {\n \t\t\t.nb_event_queues = nb_queues,\n \t\t\t.nb_event_ports = nb_ports,\n@@ -285,11 +145,6 @@ setup_eventdev_generic(struct cons_data *cons_data,\n \t\t\t.nb_atomic_flows = 1024,\n \t\t.nb_atomic_order_sequences = 1024,\n \t};\n-\tstruct rte_event_port_conf tx_p_conf = {\n-\t\t\t.dequeue_depth = 128,\n-\t\t\t.enqueue_depth = 128,\n-\t\t\t.new_event_threshold = 4096,\n-\t};\n \tstruct rte_event_queue_conf tx_q_conf = {\n \t\t\t.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,\n \t\t\t.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,\n@@ -297,7 +152,6 @@ setup_eventdev_generic(struct cons_data *cons_data,\n\n \tstruct port_link worker_queues[MAX_NUM_STAGES];\n \tuint8_t disable_implicit_release;\n-\tstruct port_link tx_queue;\n \tunsigned int i;\n\n \tint ret, ndev = rte_event_dev_count();\n@@ -314,7 +168,6 @@ setup_eventdev_generic(struct cons_data *cons_data,\n \t\t\tRTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);\n\n \twkr_p_conf.disable_implicit_release = disable_implicit_release;\n-\ttx_p_conf.disable_implicit_release = disable_implicit_release;\n\n \tif (dev_info.max_event_port_dequeue_depth <\n \t\t\tconfig.nb_event_port_dequeue_depth)\n@@ -372,8 +225,7 @@ setup_eventdev_generic(struct cons_data *cons_data,\n \t\tprintf(\"%d: error creating qid %d\\n\", __LINE__, i);\n \t\treturn -1;\n \t}\n-\ttx_queue.queue_id = i;\n-\ttx_queue.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;\n+\tcdata.tx_queue_id = i;\n\n \tif (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)\n \t\twkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;\n@@ -403,26 +255,6 @@ setup_eventdev_generic(struct cons_data *cons_data,\n \t\tw->port_id = i;\n \t}\n\n-\tif (tx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)\n-\t\ttx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;\n-\tif (tx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)\n-\t\ttx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;\n-\n-\t/* port for consumer, linked to TX queue */\n-\tif (rte_event_port_setup(dev_id, i, &tx_p_conf) < 0) {\n-\t\tprintf(\"Error setting up port %d\\n\", i);\n-\t\treturn -1;\n-\t}\n-\tif (rte_event_port_link(dev_id, i, &tx_queue.queue_id,\n-\t\t\t\t&tx_queue.priority, 1) != 1) {\n-\t\tprintf(\"%d: error creating link for port %d\\n\",\n-\t\t\t\t__LINE__, i);\n-\t\treturn -1;\n-\t}\n-\t*cons_data = (struct cons_data){.dev_id = dev_id,\n-\t\t\t\t\t.port_id = i,\n-\t\t\t\t\t.release = disable_implicit_release };\n-\n \tret = rte_event_dev_service_id_get(dev_id,\n \t\t\t\t&fdata->evdev_service_id);\n \tif (ret != -ESRCH && ret != 0) {\n@@ -431,76 +263,107 @@ setup_eventdev_generic(struct cons_data *cons_data,\n \t}\n \trte_service_runstate_set(fdata->evdev_service_id, 1);\n \trte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);\n-\tif (rte_event_dev_start(dev_id) < 0) {\n-\t\tprintf(\"Error starting eventdev\\n\");\n-\t\treturn -1;\n-\t}\n\n \treturn dev_id;\n }\n\n static void\n-init_rx_adapter(uint16_t nb_ports)\n+init_adapters(uint16_t nb_ports)\n {\n \tint i;\n \tint ret;\n+\tuint8_t tx_port_id = 0;\n \tuint8_t evdev_id = 0;\n \tstruct rte_event_dev_info dev_info;\n\n \tret = rte_event_dev_info_get(evdev_id, &dev_info);\n\n-\tstruct rte_event_port_conf rx_p_conf = {\n-\t\t.dequeue_depth = 8,\n-\t\t.enqueue_depth = 8,\n-\t\t.new_event_threshold = 1200,\n+\tstruct rte_event_port_conf adptr_p_conf = {\n+\t\t.dequeue_depth = cdata.worker_cq_depth,\n+\t\t.enqueue_depth = 64,\n+\t\t.new_event_threshold = 4096,\n \t};\n\n-\tif (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)\n-\t\trx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;\n-\tif (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)\n-\t\trx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;\n+\tif (adptr_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)\n+\t\tadptr_p_conf.dequeue_depth =\n+\t\t\tdev_info.max_event_port_dequeue_depth;\n+\tif (adptr_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)\n+\t\tadptr_p_conf.enqueue_depth =\n+\t\t\tdev_info.max_event_port_enqueue_depth;\n\n \t/* Create one adapter for all the ethernet ports. */\n \tret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,\n-\t\t\t&rx_p_conf);\n+\t\t\t&adptr_p_conf);\n \tif (ret)\n \t\trte_exit(EXIT_FAILURE, \"failed to create rx adapter[%d]\",\n \t\t\t\tcdata.rx_adapter_id);\n\n+\tret = rte_event_eth_tx_adapter_create(cdata.tx_adapter_id, evdev_id,\n+\t\t\t&adptr_p_conf);\n+\tif (ret)\n+\t\trte_exit(EXIT_FAILURE, \"failed to create tx adapter[%d]\",\n+\t\t\t\tcdata.tx_adapter_id);\n+\n \tstruct rte_event_eth_rx_adapter_queue_conf queue_conf;\n \tmemset(&queue_conf, 0, sizeof(queue_conf));\n \tqueue_conf.ev.sched_type = cdata.queue_type;\n \tqueue_conf.ev.queue_id = cdata.qid[0];\n\n \tfor (i = 0; i < nb_ports; i++) {\n-\t\tuint32_t cap;\n-\n-\t\tret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);\n-\t\tif (ret)\n-\t\t\trte_exit(EXIT_FAILURE,\n-\t\t\t\t\t\"failed to get event rx adapter \"\n-\t\t\t\t\t\"capabilities\");\n-\n \t\tret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,\n \t\t\t\t-1, &queue_conf);\n \t\tif (ret)\n \t\t\trte_exit(EXIT_FAILURE,\n \t\t\t\t\t\"Failed to add queues to Rx adapter\");\n+\n+\t\tret = rte_event_eth_tx_adapter_queue_add(cdata.tx_adapter_id, i,\n+\t\t\t\t-1);\n+\t\tif (ret)\n+\t\t\trte_exit(EXIT_FAILURE,\n+\t\t\t\t\t\"Failed to add queues to Tx adapter\");\n \t}\n\n+\tret = rte_event_eth_tx_adapter_event_port_get(cdata.tx_adapter_id,\n+\t\t\t&tx_port_id);\n+\tif (ret)\n+\t\trte_exit(EXIT_FAILURE,\n+\t\t\t\t\"Failed to get Tx adapter port id\");\n+\tret = rte_event_port_link(evdev_id, tx_port_id, &cdata.tx_queue_id,\n+\t\t\tNULL, 1);\n+\tif (ret != 1)\n+\t\trte_exit(EXIT_FAILURE,\n+\t\t\t\t\"Unable to link Tx adapter port to Tx queue\");\n+\n \tret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,\n \t\t\t\t&fdata->rxadptr_service_id);\n \tif (ret != -ESRCH && ret != 0) {\n \t\trte_exit(EXIT_FAILURE,\n-\t\t\t\"Error getting the service ID for sw eventdev\\n\");\n+\t\t\t\"Error getting the service ID for Rx adapter\\n\");\n \t}\n \trte_service_runstate_set(fdata->rxadptr_service_id, 1);\n \trte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);\n\n+\tret = rte_event_eth_tx_adapter_service_id_get(cdata.tx_adapter_id,\n+\t\t\t\t&fdata->txadptr_service_id);\n+\tif (ret != -ESRCH && ret != 0) {\n+\t\trte_exit(EXIT_FAILURE,\n+\t\t\t\"Error getting the service ID for Tx adapter\\n\");\n+\t}\n+\trte_service_runstate_set(fdata->txadptr_service_id, 1);\n+\trte_service_set_runstate_mapped_check(fdata->txadptr_service_id, 0);\n+\n \tret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);\n \tif (ret)\n \t\trte_exit(EXIT_FAILURE, \"Rx adapter[%d] start failed\",\n \t\t\t\tcdata.rx_adapter_id);\n+\n+\tret = rte_event_eth_tx_adapter_start(cdata.tx_adapter_id);\n+\tif (ret)\n+\t\trte_exit(EXIT_FAILURE, \"Tx adapter[%d] start failed\",\n+\t\t\t\tcdata.tx_adapter_id);\n+\n+\tif (rte_event_dev_start(evdev_id) < 0)\n+\t\trte_exit(EXIT_FAILURE, \"Error starting eventdev\");\n }\n\n static void\n@@ -510,6 +373,7 @@ generic_opt_check(void)\n \tint ret;\n \tuint32_t cap = 0;\n \tuint8_t rx_needed = 0;\n+\tuint8_t sched_needed = 0;\n \tstruct rte_event_dev_info eventdev_info;\n\n \tmemset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));\n@@ -519,6 +383,8 @@ generic_opt_check(void)\n \t\t\t\tRTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))\n \t\trte_exit(EXIT_FAILURE,\n \t\t\t\t\"Event dev doesn't support all type queues\\n\");\n+\tsched_needed = !(eventdev_info.event_dev_cap &\n+\t\tRTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED);\n\n \tRTE_ETH_FOREACH_DEV(i) {\n \t\tret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);\n@@ -531,9 +397,8 @@ generic_opt_check(void)\n\n \tif (cdata.worker_lcore_mask == 0 ||\n \t\t\t(rx_needed && cdata.rx_lcore_mask == 0) ||\n-\t\t\tcdata.tx_lcore_mask == 0 || (cdata.sched_lcore_mask == 0\n-\t\t\t\t&& !(eventdev_info.event_dev_cap &\n-\t\t\t\t\tRTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))) {\n+\t\t\t(cdata.tx_lcore_mask == 0) ||\n+\t\t\t(sched_needed && cdata.sched_lcore_mask == 0)) {\n \t\tprintf(\"Core part of pipeline was not assigned any cores. \"\n \t\t\t\"This will stall the pipeline, please check core masks \"\n \t\t\t\"(use -h for details on setting core masks):\\n\"\n@@ -545,23 +410,24 @@ generic_opt_check(void)\n \t\trte_exit(-1, \"Fix core masks\\n\");\n \t}\n\n-\tif (eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)\n+\tif (!sched_needed)\n \t\tmemset(fdata->sched_core, 0,\n \t\t\t\tsizeof(unsigned int) * MAX_NUM_CORE);\n+\tif (!rx_needed)\n+\t\tmemset(fdata->rx_core, 0,\n+\t\t\t\tsizeof(unsigned int) * MAX_NUM_CORE);\n }\n\n void\n set_worker_generic_setup_data(struct setup_data *caps, bool burst)\n {\n \tif (burst) {\n-\t\tcaps->consumer = consumer_burst;\n \t\tcaps->worker = worker_generic_burst;\n \t} else {\n-\t\tcaps->consumer = consumer;\n \t\tcaps->worker = worker_generic;\n \t}\n\n-\tcaps->adptr_setup = init_rx_adapter;\n+\tcaps->adptr_setup = init_adapters;\n \tcaps->scheduler = schedule_devices;\n \tcaps->evdev_setup = setup_eventdev_generic;\n \tcaps->check_opt = generic_opt_check;\ndiff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c\nindex 3dbde92df..85eb075fc 100644\n--- a/examples/eventdev_pipeline/pipeline_worker_tx.c\n+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c\n@@ -36,10 +36,11 @@ worker_event_enqueue_burst(const uint8_t dev, const uint8_t port,\n }\n\n static __rte_always_inline void\n-worker_tx_pkt(struct rte_mbuf *mbuf)\n+worker_tx_pkt(const uint8_t dev, const uint8_t port, struct rte_event *ev)\n {\n-\texchange_mac(mbuf);\n-\twhile (rte_eth_tx_burst(mbuf->port, 0, &mbuf, 1) != 1)\n+\texchange_mac(ev->mbuf);\n+\trte_event_eth_tx_adapter_txq_set(ev->mbuf, 0);\n+\twhile (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1))\n \t\trte_pause();\n }\n\n@@ -64,15 +65,15 @@ worker_do_tx_single(void *arg)\n \t\treceived++;\n\n \t\tif (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {\n-\t\t\tworker_tx_pkt(ev.mbuf);\n+\t\t\tworker_tx_pkt(dev, port, &ev);\n \t\t\ttx++;\n-\t\t\tcontinue;\n+\t\t} else {\n+\t\t\twork();\n+\t\t\tev.queue_id++;\n+\t\t\tworker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);\n+\t\t\tworker_event_enqueue(dev, port, &ev);\n+\t\t\tfwd++;\n \t\t}\n-\t\twork();\n-\t\tev.queue_id++;\n-\t\tworker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);\n-\t\tworker_event_enqueue(dev, port, &ev);\n-\t\tfwd++;\n \t}\n\n \tif (!cdata.quiet)\n@@ -100,14 +101,14 @@ worker_do_tx_single_atq(void *arg)\n \t\treceived++;\n\n \t\tif (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {\n-\t\t\tworker_tx_pkt(ev.mbuf);\n+\t\t\tworker_tx_pkt(dev, port, &ev);\n \t\t\ttx++;\n-\t\t\tcontinue;\n+\t\t} else {\n+\t\t\twork();\n+\t\t\tworker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);\n+\t\t\tworker_event_enqueue(dev, port, &ev);\n+\t\t\tfwd++;\n \t\t}\n-\t\twork();\n-\t\tworker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);\n-\t\tworker_event_enqueue(dev, port, &ev);\n-\t\tfwd++;\n \t}\n\n \tif (!cdata.quiet)\n@@ -141,7 +142,7 @@ worker_do_tx_single_burst(void *arg)\n \t\t\trte_prefetch0(ev[i + 1].mbuf);\n \t\t\tif (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {\n\n-\t\t\t\tworker_tx_pkt(ev[i].mbuf);\n+\t\t\t\tworker_tx_pkt(dev, port, &ev[i]);\n \t\t\t\tev[i].op = RTE_EVENT_OP_RELEASE;\n \t\t\t\ttx++;\n\n@@ -188,7 +189,7 @@ worker_do_tx_single_burst_atq(void *arg)\n \t\t\trte_prefetch0(ev[i + 1].mbuf);\n \t\t\tif (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {\n\n-\t\t\t\tworker_tx_pkt(ev[i].mbuf);\n+\t\t\t\tworker_tx_pkt(dev, port, &ev[i]);\n \t\t\t\tev[i].op = RTE_EVENT_OP_RELEASE;\n \t\t\t\ttx++;\n \t\t\t} else\n@@ -232,7 +233,7 @@ worker_do_tx(void *arg)\n\n \t\tif (cq_id >= lst_qid) {\n \t\t\tif (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {\n-\t\t\t\tworker_tx_pkt(ev.mbuf);\n+\t\t\t\tworker_tx_pkt(dev, port, &ev);\n \t\t\t\ttx++;\n \t\t\t\tcontinue;\n \t\t\t}\n@@ -280,7 +281,7 @@ worker_do_tx_atq(void *arg)\n\n \t\tif (cq_id == lst_qid) {\n \t\t\tif (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {\n-\t\t\t\tworker_tx_pkt(ev.mbuf);\n+\t\t\t\tworker_tx_pkt(dev, port, &ev);\n \t\t\t\ttx++;\n \t\t\t\tcontinue;\n \t\t\t}\n@@ -330,7 +331,7 @@ worker_do_tx_burst(void *arg)\n\n \t\t\tif (cq_id >= lst_qid) {\n \t\t\t\tif (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {\n-\t\t\t\t\tworker_tx_pkt(ev[i].mbuf);\n+\t\t\t\t\tworker_tx_pkt(dev, port, &ev[i]);\n \t\t\t\t\ttx++;\n \t\t\t\t\tev[i].op = RTE_EVENT_OP_RELEASE;\n \t\t\t\t\tcontinue;\n@@ -387,7 +388,7 @@ worker_do_tx_burst_atq(void *arg)\n\n \t\t\tif (cq_id == lst_qid) {\n \t\t\t\tif (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {\n-\t\t\t\t\tworker_tx_pkt(ev[i].mbuf);\n+\t\t\t\t\tworker_tx_pkt(dev, port, &ev[i]);\n \t\t\t\t\ttx++;\n \t\t\t\t\tev[i].op = RTE_EVENT_OP_RELEASE;\n \t\t\t\t\tcontinue;\n@@ -413,10 +414,8 @@ worker_do_tx_burst_atq(void *arg)\n }\n\n static int\n-setup_eventdev_worker_tx(struct cons_data *cons_data,\n-\t\tstruct worker_data *worker_data)\n+setup_eventdev_worker_tx_enq(struct worker_data *worker_data)\n {\n-\tRTE_SET_USED(cons_data);\n \tuint8_t i;\n \tconst uint8_t atq = cdata.all_type_queues ? 1 : 0;\n \tconst uint8_t dev_id = 0;\n@@ -575,10 +574,9 @@ setup_eventdev_worker_tx(struct cons_data *cons_data,\n \t}\n \trte_service_runstate_set(fdata->evdev_service_id, 1);\n \trte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);\n-\tif (rte_event_dev_start(dev_id) < 0) {\n-\t\tprintf(\"Error starting eventdev\\n\");\n-\t\treturn -1;\n-\t}\n+\n+\tif (rte_event_dev_start(dev_id) < 0)\n+\t\trte_exit(EXIT_FAILURE, \"Error starting eventdev\");\n\n \treturn dev_id;\n }\n@@ -602,7 +600,7 @@ service_rx_adapter(void *arg)\n }\n\n static void\n-init_rx_adapter(uint16_t nb_ports)\n+init_adapters(uint16_t nb_ports)\n {\n \tint i;\n \tint ret;\n@@ -613,17 +611,18 @@ init_rx_adapter(uint16_t nb_ports)\n \tret = rte_event_dev_info_get(evdev_id, &dev_info);\n \tadptr_services = rte_zmalloc(NULL, sizeof(struct rx_adptr_services), 0);\n\n-\tstruct rte_event_port_conf rx_p_conf = {\n-\t\t.dequeue_depth = 8,\n-\t\t.enqueue_depth = 8,\n-\t\t.new_event_threshold = 1200,\n+\tstruct rte_event_port_conf adptr_p_conf = {\n+\t\t.dequeue_depth = cdata.worker_cq_depth,\n+\t\t.enqueue_depth = 64,\n+\t\t.new_event_threshold = 4096,\n \t};\n\n-\tif (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)\n-\t\trx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;\n-\tif (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)\n-\t\trx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;\n-\n+\tif (adptr_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)\n+\t\tadptr_p_conf.dequeue_depth =\n+\t\t\tdev_info.max_event_port_dequeue_depth;\n+\tif (adptr_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)\n+\t\tadptr_p_conf.enqueue_depth =\n+\t\t\tdev_info.max_event_port_enqueue_depth;\n\n \tstruct rte_event_eth_rx_adapter_queue_conf queue_conf;\n \tmemset(&queue_conf, 0, sizeof(queue_conf));\n@@ -633,11 +632,11 @@ init_rx_adapter(uint16_t nb_ports)\n \t\tuint32_t cap;\n \t\tuint32_t service_id;\n\n-\t\tret = rte_event_eth_rx_adapter_create(i, evdev_id, &rx_p_conf);\n+\t\tret = rte_event_eth_rx_adapter_create(i, evdev_id,\n+\t\t\t\t&adptr_p_conf);\n \t\tif (ret)\n \t\t\trte_exit(EXIT_FAILURE,\n-\t\t\t\t\t\"failed to create rx adapter[%d]\",\n-\t\t\t\t\tcdata.rx_adapter_id);\n+\t\t\t\t\t\"failed to create rx adapter[%d]\", i);\n\n \t\tret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);\n \t\tif (ret)\n@@ -654,7 +653,6 @@ init_rx_adapter(uint16_t nb_ports)\n \t\t\trte_exit(EXIT_FAILURE,\n \t\t\t\t\t\"Failed to add queues to Rx adapter\");\n\n-\n \t\t/* Producer needs to be scheduled. */\n \t\tif (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {\n \t\t\tret = rte_event_eth_rx_adapter_service_id_get(i,\n@@ -680,9 +678,29 @@ init_rx_adapter(uint16_t nb_ports)\n \t\tret = rte_event_eth_rx_adapter_start(i);\n \t\tif (ret)\n \t\t\trte_exit(EXIT_FAILURE, \"Rx adapter[%d] start failed\",\n-\t\t\t\t\tcdata.rx_adapter_id);\n+\t\t\t\t\ti);\n \t}\n\n+\t/* We already know that Tx adapter has INTERNAL port cap*/\n+\tret = rte_event_eth_tx_adapter_create(cdata.tx_adapter_id, evdev_id,\n+\t\t\t&adptr_p_conf);\n+\tif (ret)\n+\t\trte_exit(EXIT_FAILURE, \"failed to create tx adapter[%d]\",\n+\t\t\t\tcdata.tx_adapter_id);\n+\n+\tfor (i = 0; i < nb_ports; i++) {\n+\t\tret = rte_event_eth_tx_adapter_queue_add(cdata.tx_adapter_id, i,\n+\t\t\t\t-1);\n+\t\tif (ret)\n+\t\t\trte_exit(EXIT_FAILURE,\n+\t\t\t\t\t\"Failed to add queues to Tx adapter\");\n+\t}\n+\n+\tret = rte_event_eth_tx_adapter_start(cdata.tx_adapter_id);\n+\tif (ret)\n+\t\trte_exit(EXIT_FAILURE, \"Tx adapter[%d] start failed\",\n+\t\t\t\tcdata.tx_adapter_id);\n+\n \tif (adptr_services->nb_rx_adptrs) {\n \t\tstruct rte_service_spec service;\n\n@@ -695,8 +713,7 @@ init_rx_adapter(uint16_t nb_ports)\n \t\t\t\t&fdata->rxadptr_service_id);\n \t\tif (ret)\n \t\t\trte_exit(EXIT_FAILURE,\n-\t\t\t\t\"Rx adapter[%d] service register failed\",\n-\t\t\t\tcdata.rx_adapter_id);\n+\t\t\t\t\"Rx adapter service register failed\");\n\n \t\trte_service_runstate_set(fdata->rxadptr_service_id, 1);\n \t\trte_service_component_runstate_set(fdata->rxadptr_service_id,\n@@ -708,23 +725,19 @@ init_rx_adapter(uint16_t nb_ports)\n \t\trte_free(adptr_services);\n \t}\n\n-\tif (!adptr_services->nb_rx_adptrs && fdata->cap.consumer == NULL &&\n-\t\t\t(dev_info.event_dev_cap &\n+\tif (!adptr_services->nb_rx_adptrs && (dev_info.event_dev_cap &\n \t\t\t RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))\n \t\tfdata->cap.scheduler = NULL;\n-\n-\tif (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)\n-\t\tmemset(fdata->sched_core, 0,\n-\t\t\t\tsizeof(unsigned int) * MAX_NUM_CORE);\n }\n\n static void\n-worker_tx_opt_check(void)\n+worker_tx_enq_opt_check(void)\n {\n \tint i;\n \tint ret;\n \tuint32_t cap = 0;\n \tuint8_t rx_needed = 0;\n+\tuint8_t sched_needed = 0;\n \tstruct rte_event_dev_info eventdev_info;\n\n \tmemset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));\n@@ -734,32 +747,38 @@ worker_tx_opt_check(void)\n \t\t\t\tRTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))\n \t\trte_exit(EXIT_FAILURE,\n \t\t\t\t\"Event dev doesn't support all type queues\\n\");\n+\tsched_needed = !(eventdev_info.event_dev_cap &\n+\t\tRTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED);\n\n \tRTE_ETH_FOREACH_DEV(i) {\n \t\tret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);\n \t\tif (ret)\n \t\t\trte_exit(EXIT_FAILURE,\n-\t\t\t\t\t\"failed to get event rx adapter \"\n-\t\t\t\t\t\"capabilities\");\n+\t\t\t\t\"failed to get event rx adapter capabilities\");\n \t\trx_needed |=\n \t\t\t!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);\n \t}\n\n \tif (cdata.worker_lcore_mask == 0 ||\n \t\t\t(rx_needed && cdata.rx_lcore_mask == 0) ||\n-\t\t\t(cdata.sched_lcore_mask == 0 &&\n-\t\t\t !(eventdev_info.event_dev_cap &\n-\t\t\t\t RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))) {\n+\t\t\t(sched_needed && cdata.sched_lcore_mask == 0)) {\n \t\tprintf(\"Core part of pipeline was not assigned any cores. \"\n \t\t\t\"This will stall the pipeline, please check core masks \"\n \t\t\t\"(use -h for details on setting core masks):\\n\"\n-\t\t\t\"\\trx: %\"PRIu64\"\\n\\ttx: %\"PRIu64\"\\n\\tsched: %\"PRIu64\n-\t\t\t\"\\n\\tworkers: %\"PRIu64\"\\n\",\n-\t\t\tcdata.rx_lcore_mask, cdata.tx_lcore_mask,\n-\t\t\tcdata.sched_lcore_mask,\n-\t\t\tcdata.worker_lcore_mask);\n+\t\t\t\"\\trx: %\"PRIu64\"\\n\\tsched: %\"PRIu64\n+\t\t\t\"\\n\\tworkers: %\"PRIu64\"\\n\", cdata.rx_lcore_mask,\n+\t\t\tcdata.sched_lcore_mask, cdata.worker_lcore_mask);\n \t\trte_exit(-1, \"Fix core masks\\n\");\n \t}\n+\n+\tif (!sched_needed)\n+\t\tmemset(fdata->sched_core, 0,\n+\t\t\t\tsizeof(unsigned int) * MAX_NUM_CORE);\n+\tif (!rx_needed)\n+\t\tmemset(fdata->rx_core, 0,\n+\t\t\t\tsizeof(unsigned int) * MAX_NUM_CORE);\n+\n+\tmemset(fdata->tx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);\n }\n\n static worker_loop\n@@ -821,18 +840,15 @@ get_worker_multi_stage(bool burst)\n }\n\n void\n-set_worker_tx_setup_data(struct setup_data *caps, bool burst)\n+set_worker_tx_enq_setup_data(struct setup_data *caps, bool burst)\n {\n \tif (cdata.num_stages == 1)\n \t\tcaps->worker = get_worker_single_stage(burst);\n \telse\n \t\tcaps->worker = get_worker_multi_stage(burst);\n\n-\tmemset(fdata->tx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);\n-\n-\tcaps->check_opt = worker_tx_opt_check;\n-\tcaps->consumer = NULL;\n+\tcaps->check_opt = worker_tx_enq_opt_check;\n \tcaps->scheduler = schedule_devices;\n-\tcaps->evdev_setup = setup_eventdev_worker_tx;\n-\tcaps->adptr_setup = init_rx_adapter;\n+\tcaps->evdev_setup = setup_eventdev_worker_tx_enq;\n+\tcaps->adptr_setup = init_adapters;\n }\n",
    "prefixes": [
        "v2"
    ]
}