get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/17081/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 17081,
    "url": "http://patches.dpdk.org/api/patches/17081/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1479447902-3700-3-git-send-email-jerin.jacob@caviumnetworks.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1479447902-3700-3-git-send-email-jerin.jacob@caviumnetworks.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1479447902-3700-3-git-send-email-jerin.jacob@caviumnetworks.com",
    "date": "2016-11-18T05:45:00",
    "name": "[dpdk-dev,2/4] eventdev: implement the northbound APIs",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "506c4b7aa6b0c1447b970e2554f891f51f5ba8b5",
    "submitter": {
        "id": 305,
        "url": "http://patches.dpdk.org/api/people/305/?format=api",
        "name": "Jerin Jacob",
        "email": "jerin.jacob@caviumnetworks.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1479447902-3700-3-git-send-email-jerin.jacob@caviumnetworks.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/17081/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/17081/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id BAA555698;\n\tFri, 18 Nov 2016 06:46:22 +0100 (CET)",
            "from NAM01-BY2-obe.outbound.protection.outlook.com\n\t(mail-by2nam01on0057.outbound.protection.outlook.com [104.47.34.57])\n\tby dpdk.org (Postfix) with ESMTP id EF6295681\n\tfor <dev@dpdk.org>; Fri, 18 Nov 2016 06:45:41 +0100 (CET)",
            "from localhost.net (50.254.132.37) by\n\tBY1PR0701MB1724.namprd07.prod.outlook.com (10.162.111.143) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384_P384) id 15.1.693.12;\n\tFri, 18 Nov 2016 05:45:32 +0000"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=CAVIUMNETWORKS.onmicrosoft.com; s=selector1-cavium-com;\n\th=From:Date:Subject:Message-ID:Content-Type:MIME-Version;\n\tbh=PIqiPTLcykDhS8DwGUlUW84ZZ8OtGlzmpcH5NvlVbqc=;\n\tb=DYTKFwYGlrH1ydDtt8lrZDEd0KiUAON8rG+fV4NDOLBz414DlbvPd/NuxY06mkZdajIKYsRwJcde6t480MnZIQAE3cOfbVzb1bR/J4tvjcpYSqe2OWDBgfLGcR6nvad1VN996AQ4E7r0dh0v5n/Huu/D7md/QPSv+EMsttXaVQ8=",
        "Authentication-Results": "spf=none (sender IP is )\n\tsmtp.mailfrom=Jerin.Jacob@cavium.com; ",
        "From": "Jerin Jacob <jerin.jacob@caviumnetworks.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<bruce.richardson@intel.com>, <harry.van.haaren@intel.com>,\n\t<hemant.agrawal@nxp.com>, <gage.eads@intel.com>, Jerin Jacob\n\t<jerin.jacob@caviumnetworks.com>",
        "Date": "Fri, 18 Nov 2016 11:15:00 +0530",
        "Message-ID": "<1479447902-3700-3-git-send-email-jerin.jacob@caviumnetworks.com>",
        "X-Mailer": "git-send-email 2.5.5",
        "In-Reply-To": "<1479447902-3700-1-git-send-email-jerin.jacob@caviumnetworks.com>",
        "References": "<1479447902-3700-1-git-send-email-jerin.jacob@caviumnetworks.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[50.254.132.37]",
        "X-ClientProxiedBy": "CY1PR1101CA0016.namprd11.prod.outlook.com (10.169.17.26)\n\tTo\n\tBY1PR0701MB1724.namprd07.prod.outlook.com (10.162.111.143)",
        "X-Microsoft-Exchange-Diagnostics": [
            "1; BY1PR0701MB1724;\n\t2:/M5ihsUghm1lEpoe8W2+PiCmEKTJx79voEmEt1aZHC32H4HuYlELW6gDacU9LWlKRJbRd18fqrr6HGo+BPdFrhD7bh609apFmI8sP9V9oSO09z/T3Qw6sC8+Ydl2Vxp1vPXgGwPF5JfdhQYpTZJtza1dsaKDCo0t9usP9/swZoA=;\n\t3:BuKCRR3oEKWRetnog/6Wm428Zk2ENXQltR+71aSYjJpTqfOhxA3LXcU064HnwrFbVSdVpidDhHNgHdMcPv0bqTu6XWHRJQKrL7cQNGvEVpqG1p6BiRnIks5AOKAr+/yfKnCQrfQVVSptadg3RAg24pxqE4RC6VK3A2tkoQL0WUU=;\n\t25:rlYFi++db2gA29iRHVzq7w5mpvvdTNpk9oKZUEgPypR2Ggg1vwi6O+XmqNA8MixikR67BbgvLZcdJkIr+H16ssi2NcdzJLcm6bocMLt9SU6yek7pzB5dvGnr4tp6i3jThjUpJoeMdT7LwqK4esdA2I4aCAZHDLNdVZt+9dz6zxRvdVfzZ7Xrx2VaWJRMFFLRx41dhXkA7I3tTVFf6cDEUGx52tnAl8Twn1cwe38mmAnEHbVY8BfMOpY68VC7FjAr1uJnZuu8hcaZfQod9qb2GWp44BW+/hvEoVSko3gD7mlE4XGh/Pxw4koqbixol6biUsBKzJMOqmZD/dyLHS+6+Zgm/1hDFlys0oe0Z7IUF0XVProWWLgVzFIsa77FFn/8XGwrGvlY0A2ZNa0u6yMyLlt2kmktNkleCSLgzabyWGtziTvQs/EdZ6xSa+dNVy6XJyy+W4a3XR7w8qeVWSZ5tw==",
            "1; BY1PR0701MB1724;\n\t31:5AJwBFmC4SbY4jpXZWw7YOxWicRCMyzzik+AjOy+cCTcvP+ir7xoC+vQa9cfihLF18Q+SxChEk0Ezksqy8Ov2iI0Xbg31Yw0vArHFhrmOmjVL/ult8HN3bMZG3zzz+aeGo4E8cKU28/z/dieDVB2v9Ufk+iZ6n6cNyWjgtI/xAi2luAep3uuBx2TI0nTSwVMVg67eeKqHiVtTrD7ePPSpBsm4f+mNnV0hpdIwMh/MSr5hBC0t9DiFLqVkp9GPd1QyGzrbesExNyP0dxItBW3Cg==;\n\t20:+wTE0TO4nxMDrPi4L00bnayxUkDV/frrGJSln1MqdPGfwacZnL8FrlvzgQAYjyoXp6ptgQK5uSZ6nXuXwbvuvA7dq3/tIfhAVJ+t+/nTV5ZVbel632O+rq/QtE4AEbccAX/LJ3xEtjp4ghRk957329/5ME3LtmUEQPMedoQB1Qbpzn7ou3qdiRGpGQH2XegSx6O7wpigzhngCYvjAqfUNZ2q6u+Wc6p0nythdaSi0H4f3gMmHUckxOVbyBWI2NAmSmliJk8qbLS3P1f72waA8e9aX0Utbu67NwKw71+rLlElFxSpMz/Eq9AbioPXo6MYZw3MFw94ew6/ym7AzMNKoHVj7+ZhUC/1JEfHUR6VoZnwtkMQl41sRFCeQePYptDdVs6Ha8GSTNRHLBNdLbsaIBGNRavGpigk4I+oqmkkAhvxbdi+OQXcYzxjQcN+8T3tkCJfM6VpcUSrnpQKBg2KxQioYYKIOdJIXr5mfBC79jr6XHcvACxoHM+MhME4gPX5vs4YI1uIuEn+nN/h7nGOZerfb+vaojZiQbwx+FzlYo/vd4OqTbWwpBnKa/lIjNo3zQaKtCKMR2aKnICcmyxdsYf8VDEOoC9Pjj97IsOk7XA=",
            "1; BY1PR0701MB1724;\n\t4:O4jjg0/A0im38hja0BA0uaJPfS7ZJTtsQ0DkaavUtXzOYx4YOF/BBVCh1PYi3NTme16kRZv9+aBfsno5XfCg2T47AWerbWhsUWBrQ5iaSjRbnFjKq7xCctYX8Hf0+mWKa9GKDQCG/q406Lu+Uz436G2JPAt2XHY3MH3mPUNbDT1zIRusgXTzjcVqE8ceDeDoBM1y2TrLjmXHaiLmaxPcNo/KiTFcLegnW75KJJHF73P6lNXxD7lRWybN1stK3K8XHzClkghpsmJnOXxGEBEsb+SB9Dta5GJolsaIcFta8lGHDli+qFytqV1k2VrCd9hAeOd/YcGOBC+S3DY9KUWy2tBOYvmg6Th7Djn4oA5KebGWi4j0ZVlNIPSo4CQ/YSDdJi+bb2nAuId+XS7t+M/0fS23KRYyMxmAzSMNReaAyARqqRaIVsA97fXOX5t6sy2DbmbVr4wS6rXPSZ73MQovXmvaH4ZSZ5HJrMHI4qnNHFZBhgfpfUQVD6tue9nrVRTA",
            "=?us-ascii?Q?1; BY1PR0701MB1724;\n\t23:f9b02Rqob3uW06NLhuZXcSyR4kx+MK/XqB8Pxoz?=\n\t=?us-ascii?Q?BzpgH4YXJI4EtSSScTL9fny4PnSrrydV5V8DmNtafA7w98NuDM35VKlAV8f+?=\n\t=?us-ascii?Q?tTNpGMGe7uscDaNzUEqk4yCrvcORgEYxy024WGfG+Enn2nROSwAiGcqIU3PJ?=\n\t=?us-ascii?Q?jB8oCJWkSM2oYClLAwm1mGacue4GSxPLfVLaHzWXBMTNzHlf+LxPtfrmmc6e?=\n\t=?us-ascii?Q?ZOKcmScycZtFQ61V7KZoCiTVbw3LpzFNltrnSLBDCh5eHxv4GA6cG2KcH6EQ?=\n\t=?us-ascii?Q?5nq0HcJVttc7KadcUVm/zPk2LaP6oQ7NHkY1P5qRb/Ni+LyYGKPuK1kbOFo5?=\n\t=?us-ascii?Q?IHxBWkUdUBaNvfI5h42Fik1HUuCEoA4AtpuQ0QsQbmuomK1VcMJze/u5MJ8l?=\n\t=?us-ascii?Q?0wFHzXs3xD0scfcwgWb0KL6/dM2VT1PDxDMyRUwS5bi18MBJBYWhsAqAn0lQ?=\n\t=?us-ascii?Q?Vl/O2cDc3lNNIfydxM0T+AvjM9PWwcx2a1N/zFDnS45yaSd/Gi42vOxX11WP?=\n\t=?us-ascii?Q?0eSSJ4ulfXID0HrpadVXm4I/BRi6Bkr+o98jdZcPMcvnBFZS9Cu55efw7s4p?=\n\t=?us-ascii?Q?lvebmUwpLePzzR7RYuScKkiR4y+ShA3DI+6dQL5EcbuYC3gVR2zY3vKJr8Ax?=\n\t=?us-ascii?Q?vxxEICWla5OAM3x1mzO044PgOqJE++h1j86xq0jaHJO0PBZaoygFlPQAmfBV?=\n\t=?us-ascii?Q?1h/AtiZTNyZlnfwqiOb7fT7FSYUV7LyIpyQvays13XAdhxRy6Zn5zWvp4hLm?=\n\t=?us-ascii?Q?GFENwXDRyc5Y6YiPT1A/Nn6DY8QM6L69Hjg2yeF4ZVdERyta82jP4OD1UxaV?=\n\t=?us-ascii?Q?4hUc1LA/GkBxjsbBLhD9MtVJpHWZN5bW8bO9BM0VajrH+KvuprqKDfScG8DN?=\n\t=?us-ascii?Q?v/ABqw2OOPSaGgIAEXig/GsAguQsfjdMwGElxzOu5ra5uRqSdIMrsHEsKyt4?=\n\t=?us-ascii?Q?/aEWKMzQHVaB8eFh1cmIYZrwex7tPHJHof6Wt6zJ5VTW6X0K7UoUqqJbDbCK?=\n\t=?us-ascii?Q?/HWArukPzS1L6/62PqQfl5Hdec0nMhmofx3RN7lSgEvHiInCSKBf7Bvu21A/?=\n\t=?us-ascii?Q?TqDT74eGLkSYQizfHd8Ne325t/yLmreXZcLU4uExKnRwplW2T9IpgCrUYCC0?=\n\t=?us-ascii?Q?Hfc+EXXMD10do+nlLfEPm0hNoQ7mbY6QiEs0G3vr17hJiBMkHz8mNCop51oj?=\n\t=?us-ascii?Q?fD4Vc6izmO4+pa7lRhX10lSLSt6WxguQH4deFoiBGaAzt3qi6MaQqHGUoyVS?=\n\t=?us-ascii?Q?WrfiCuUS6NukyHEea1BPvGNffEhdDw9a2hyrPXbbnVQI8Z/3N+9HSNNuRc3S?=\n\t=?us-ascii?Q?djm7SWtVqCC/ubIMELHzlUz4=3D?=",
            "1; BY1PR0701MB1724;\n\t6:fiA+AZhA6eQjcQd0o7fkI3AA6Fo+KYFT4ebD8dUZJ1tPlSWowIpacdXY+E9rY0BGF6+0D7DN7I+ZeUlg5dFewQJqOx7tH57QF0CjAaUJc2+upstamvOCX2OzkVV2ia1btfwFDoNMAchlkHXK46GPK5vp0qZYca/vBfwCvhKk5TUB780Dg7cVvMgDx7Ko7VTFUCkhtP9WZFTTUK85yxuMYQdiF0VoNee6FqgxYI9PWa/0dEOtQvqxJZlRwMetaV3X/NFkgw2XL3/wvb8m+g7nV0skKcNftb703HRKKBq4x19yYMVjB9QAsajpt/mLobs1JZpq8t3kuo/r/9Qww9mK0swnukp3VpJ47c40ZM9Y23o=;\n\t5:Rhxibqvy6TXZ8JaaGCnbvSvJCwX7zg5kfpvRuDL2V3AMQRt7PFdZR+FD80+WlC/oOuuhLAsVSakfmcuvqx3Bf+jKAkFOm/9ny2pJlwaVmuUvNDFrXGVt1gHgJWiw83chNXeOlSk3aUm/0DcnQWmjng==;\n\t24:oWL0eccdVdX/YeWnrk/j6HghHI9OWgn/TNXeWrvLdv8j3+02qG+LYAv2RnNPYI8RYOUnQIPct7R72xRr7yTQgbivcM8EXPpTtwf5TQj2DHM=",
            "1; BY1PR0701MB1724;\n\t7:ELR3kVW6oiXycALTdav11yJngbEOSBkBue+uyNWLj/2B2uDt4qvCF61D/AXNYV+mts05fy322j20aq8LgG/4DAWI6LxAFHPUMP4McMtfNrgacAKHtpKfzqHO07hXuRRxZoomPKRfa5x8LQnixNzUeggACwJnb6whvtQwCEIXru3Oh16UQvYOMzNxOC6ELrsSGZJiU59jMefZUuFjgJcfQUuY4jCJGrHGSEhRUtyp7RB3DEskmtz1Zx8Q1diJQCuTQcKDFUq6HwYzk2qtYgajhbflqecl/x/nPbP0Vts24j7qgkUSFgY8vgfceiuyCCOI818YRQ9kJZ+ymhKkYraMSHlKmKTdktF4COVL+9k1h9E="
        ],
        "X-MS-Office365-Filtering-Correlation-Id": "f899f9b1-6ea9-4805-1e3d-08d40f761cba",
        "X-Microsoft-Antispam": "UriScan:; BCL:0; PCL:0; RULEID:(22001);\n\tSRVR:BY1PR0701MB1724; ",
        "X-Microsoft-Antispam-PRVS": "<BY1PR0701MB17242EA9D02B4A4C57C3146F81B00@BY1PR0701MB1724.namprd07.prod.outlook.com>",
        "X-Exchange-Antispam-Report-Test": "UriScan:(278428928389397);",
        "X-Exchange-Antispam-Report-CFA-Test": "BCL:0; PCL:0;\n\tRULEID:(6060326)(6040281)(601004)(2401047)(8121501046)(5005006)(10201501046)(3002001)(6061324)(6041223);\n\tSRVR:BY1PR0701MB1724; BCL:0; PCL:0; RULEID:; SRVR:BY1PR0701MB1724; ",
        "X-Forefront-PRVS": "01304918F3",
        "X-Forefront-Antispam-Report": "SFV:NSPM;\n\tSFS:(10009020)(4630300001)(6069001)(6009001)(7916002)(189002)(199003)(68736007)(66066001)(3846002)(7736002)(50226002)(69596002)(81166006)(47776003)(4326007)(4001430100002)(48376002)(6116002)(8676002)(5890100001)(81156014)(8666005)(42186005)(33026002)(105586002)(2351001)(92566002)(2906002)(7846002)(50466002)(50986999)(110136003)(5660300001)(76176999)(42882006)(33646002)(6916009)(189998001)(21086003)(107886002)(36756003)(305945005)(2950100002)(6666003)(101416001)(575784001)(97736004)(77096005)(5003940100001)(106356001)(76506005)(7059030)(579004)(559001);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:BY1PR0701MB1724; H:localhost.net; FPR:;\n\tSPF:None; PTR:InfoNoRecords; MX:1; A:1; LANG:en; ",
        "Received-SPF": "None (protection.outlook.com: cavium.com does not designate\n\tpermitted sender hosts)",
        "SpamDiagnosticOutput": "1:99",
        "SpamDiagnosticMetadata": "NSPM",
        "X-OriginatorOrg": "caviumnetworks.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "18 Nov 2016 05:45:32.2624\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "Hosted",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BY1PR0701MB1724",
        "Subject": "[dpdk-dev]  [PATCH 2/4] eventdev: implement the northbound APIs",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch set defines the southbound driver interface\nand implements the common code required for northbound\neventdev API interface.\n\nSigned-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>\n---\n config/common_base                           |    6 +\n lib/Makefile                                 |    1 +\n lib/librte_eal/common/include/rte_log.h      |    1 +\n lib/librte_eventdev/Makefile                 |   57 ++\n lib/librte_eventdev/rte_eventdev.c           | 1211 ++++++++++++++++++++++++++\n lib/librte_eventdev/rte_eventdev_pmd.h       |  504 +++++++++++\n lib/librte_eventdev/rte_eventdev_version.map |   39 +\n mk/rte.app.mk                                |    1 +\n 8 files changed, 1820 insertions(+)\n create mode 100644 lib/librte_eventdev/Makefile\n create mode 100644 lib/librte_eventdev/rte_eventdev.c\n create mode 100644 lib/librte_eventdev/rte_eventdev_pmd.h\n create mode 100644 lib/librte_eventdev/rte_eventdev_version.map",
    "diff": "diff --git a/config/common_base b/config/common_base\nindex 4bff83a..7a8814e 100644\n--- a/config/common_base\n+++ b/config/common_base\n@@ -411,6 +411,12 @@ CONFIG_RTE_LIBRTE_PMD_ZUC_DEBUG=n\n CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y\n \n #\n+# Compile generic event device library\n+#\n+CONFIG_RTE_LIBRTE_EVENTDEV=y\n+CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n\n+CONFIG_RTE_EVENT_MAX_DEVS=16\n+CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64\n # Compile librte_ring\n #\n CONFIG_RTE_LIBRTE_RING=y\ndiff --git a/lib/Makefile b/lib/Makefile\nindex 990f23a..1a067bf 100644\n--- a/lib/Makefile\n+++ b/lib/Makefile\n@@ -41,6 +41,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile\n DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline\n DIRS-$(CONFIG_RTE_LIBRTE_ETHER) += librte_ether\n DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += librte_cryptodev\n+DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += librte_eventdev\n DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += librte_vhost\n DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash\n DIRS-$(CONFIG_RTE_LIBRTE_LPM) += librte_lpm\ndiff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h\nindex 29f7d19..9a07d92 100644\n--- a/lib/librte_eal/common/include/rte_log.h\n+++ b/lib/librte_eal/common/include/rte_log.h\n@@ -79,6 +79,7 @@ extern struct rte_logs rte_logs;\n #define RTE_LOGTYPE_PIPELINE 0x00008000 /**< Log related to pipeline. */\n #define RTE_LOGTYPE_MBUF    0x00010000 /**< Log related to mbuf. */\n #define RTE_LOGTYPE_CRYPTODEV 0x00020000 /**< Log related to cryptodev. */\n+#define RTE_LOGTYPE_EVENTDEV 0x00040000 /**< Log related to eventdev. */\n \n /* these log types can be used in an application */\n #define RTE_LOGTYPE_USER1   0x01000000 /**< User-defined log type 1. */\ndiff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile\nnew file mode 100644\nindex 0000000..dac0663\n--- /dev/null\n+++ b/lib/librte_eventdev/Makefile\n@@ -0,0 +1,57 @@\n+#   BSD LICENSE\n+#\n+#   Copyright(c) 2016 Cavium networks. All rights reserved.\n+#\n+#   Redistribution and use in source and binary forms, with or without\n+#   modification, are permitted provided that the following conditions\n+#   are met:\n+#\n+#     * Redistributions of source code must retain the above copyright\n+#       notice, this list of conditions and the following disclaimer.\n+#     * Redistributions in binary form must reproduce the above copyright\n+#       notice, this list of conditions and the following disclaimer in\n+#       the documentation and/or other materials provided with the\n+#       distribution.\n+#     * Neither the name of Cavium networks nor the names of its\n+#       contributors may be used to endorse or promote products derived\n+#       from this software without specific prior written permission.\n+#\n+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+#   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+\n+include $(RTE_SDK)/mk/rte.vars.mk\n+\n+# library name\n+LIB = librte_eventdev.a\n+\n+# library version\n+LIBABIVER := 1\n+\n+# build flags\n+CFLAGS += -O3\n+CFLAGS += $(WERROR_FLAGS)\n+\n+# library source files\n+SRCS-y += rte_eventdev.c\n+\n+# export include files\n+SYMLINK-y-include += rte_eventdev.h\n+SYMLINK-y-include += rte_eventdev_pmd.h\n+\n+# versioning export map\n+EXPORT_MAP := rte_eventdev_version.map\n+\n+# library dependencies\n+DEPDIRS-y += lib/librte_eal\n+DEPDIRS-y += lib/librte_mbuf\n+\n+include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c\nnew file mode 100644\nindex 0000000..17ce5c3\n--- /dev/null\n+++ b/lib/librte_eventdev/rte_eventdev.c\n@@ -0,0 +1,1211 @@\n+/*\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2016 Cavium networks. All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Cavium networks nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <ctype.h>\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <string.h>\n+#include <stdarg.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <inttypes.h>\n+#include <sys/types.h>\n+#include <sys/queue.h>\n+\n+#include <rte_byteorder.h>\n+#include <rte_log.h>\n+#include <rte_debug.h>\n+#include <rte_dev.h>\n+#include <rte_pci.h>\n+#include <rte_memory.h>\n+#include <rte_memcpy.h>\n+#include <rte_memzone.h>\n+#include <rte_eal.h>\n+#include <rte_per_lcore.h>\n+#include <rte_lcore.h>\n+#include <rte_atomic.h>\n+#include <rte_branch_prediction.h>\n+#include <rte_common.h>\n+#include <rte_malloc.h>\n+#include <rte_errno.h>\n+\n+#include \"rte_eventdev.h\"\n+#include \"rte_eventdev_pmd.h\"\n+\n+struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];\n+\n+struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];\n+\n+static struct rte_eventdev_global eventdev_globals = {\n+\t.nb_devs\t\t= 0\n+};\n+\n+struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;\n+\n+/* Event dev north bound API implementation */\n+\n+uint8_t\n+rte_event_dev_count(void)\n+{\n+\treturn rte_eventdev_globals->nb_devs;\n+}\n+\n+int\n+rte_event_dev_get_dev_id(const char *name)\n+{\n+\tint i;\n+\n+\tif (!name)\n+\t\treturn -EINVAL;\n+\n+\tfor (i = 0; i < rte_eventdev_globals->nb_devs; i++)\n+\t\tif ((strcmp(rte_event_devices[i].data->name, name)\n+\t\t\t\t== 0) &&\n+\t\t\t\t(rte_event_devices[i].attached ==\n+\t\t\t\t\t\tRTE_EVENTDEV_ATTACHED))\n+\t\t\treturn i;\n+\treturn -ENODEV;\n+}\n+\n+int\n+rte_event_dev_socket_id(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\treturn dev->data->socket_id;\n+}\n+\n+int\n+rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\tif (dev_info == NULL)\n+\t\treturn -EINVAL;\n+\n+\tmemset(dev_info, 0, sizeof(struct rte_event_dev_info));\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);\n+\t(*dev->dev_ops->dev_infos_get)(dev, dev_info);\n+\n+\tdev_info->pci_dev = dev->pci_dev;\n+\tif (dev->driver)\n+\t\tdev_info->driver_name = dev->driver->pci_drv.driver.name;\n+\treturn 0;\n+}\n+\n+static inline int\n+rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)\n+{\n+\tuint8_t old_nb_queues = dev->data->nb_queues;\n+\tvoid **queues;\n+\tuint8_t *queues_prio;\n+\tunsigned int i;\n+\n+\tEDEV_LOG_DEBUG(\"Setup %d queues on device %u\", nb_queues,\n+\t\t\t dev->data->dev_id);\n+\n+\t/* First time configuration */\n+\tif (dev->data->queues == NULL && nb_queues != 0) {\n+\t\tdev->data->queues = rte_zmalloc_socket(\"eventdev->data->queues\",\n+\t\t\t\tsizeof(dev->data->queues[0]) * nb_queues,\n+\t\t\t\tRTE_CACHE_LINE_SIZE, dev->data->socket_id);\n+\t\tif (dev->data->queues == NULL) {\n+\t\t\tdev->data->nb_queues = 0;\n+\t\t\tEDEV_LOG_ERR(\"failed to get memory for queue meta data,\"\n+\t\t\t\t\t\"nb_queues %u\", nb_queues);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\t\t/* Allocate memory to store queue priority */\n+\t\tdev->data->queues_prio = rte_zmalloc_socket(\n+\t\t\t\t\"eventdev->data->queues_prio\",\n+\t\t\t\tsizeof(dev->data->queues_prio[0]) * nb_queues,\n+\t\t\t\tRTE_CACHE_LINE_SIZE, dev->data->socket_id);\n+\t\tif (dev->data->queues_prio == NULL) {\n+\t\t\tdev->data->nb_queues = 0;\n+\t\t\tEDEV_LOG_ERR(\"failed to get memory for queue priority,\"\n+\t\t\t\t\t\"nb_queues %u\", nb_queues);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t} else if (dev->data->queues != NULL && nb_queues != 0) {/* re-config */\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);\n+\n+\t\tqueues = dev->data->queues;\n+\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n+\t\t\t(*dev->dev_ops->queue_release)(queues[i]);\n+\n+\t\tqueues = rte_realloc(queues, sizeof(queues[0]) * nb_queues,\n+\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\tif (queues == NULL) {\n+\t\t\tEDEV_LOG_ERR(\"failed to realloc queue meta data,\"\n+\t\t\t\t\t\t\" nb_queues %u\", nb_queues);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\t\tdev->data->queues = queues;\n+\n+\t\t/* Re allocate memory to store queue priority */\n+\t\tqueues_prio = dev->data->queues_prio;\n+\t\tqueues_prio = rte_realloc(queues_prio,\n+\t\t\t\tsizeof(queues_prio[0]) * nb_queues,\n+\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\tif (queues_prio == NULL) {\n+\t\t\tEDEV_LOG_ERR(\"failed to realloc queue priority,\"\n+\t\t\t\t\t\t\" nb_queues %u\", nb_queues);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\t\tdev->data->queues_prio = queues_prio;\n+\n+\t\tif (nb_queues > old_nb_queues) {\n+\t\t\tuint8_t new_qs = nb_queues - old_nb_queues;\n+\n+\t\t\tmemset(queues + old_nb_queues, 0,\n+\t\t\t\tsizeof(queues[0]) * new_qs);\n+\t\t\tmemset(queues_prio + old_nb_queues, 0,\n+\t\t\t\tsizeof(queues_prio[0]) * new_qs);\n+\t\t}\n+\t} else if (dev->data->queues != NULL && nb_queues == 0) {\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);\n+\n+\t\tqueues = dev->data->queues;\n+\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n+\t\t\t(*dev->dev_ops->queue_release)(queues[i]);\n+\t}\n+\n+\tdev->data->nb_queues = nb_queues;\n+\treturn 0;\n+}\n+\n+static inline int\n+rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)\n+{\n+\tuint8_t old_nb_ports = dev->data->nb_ports;\n+\tvoid **ports;\n+\tuint16_t *links_map;\n+\tuint8_t *ports_dequeue_depth;\n+\tuint8_t *ports_enqueue_depth;\n+\tunsigned int i;\n+\n+\tEDEV_LOG_DEBUG(\"Setup %d ports on device %u\", nb_ports,\n+\t\t\t dev->data->dev_id);\n+\n+\t/* First time configuration */\n+\tif (dev->data->ports == NULL && nb_ports != 0) {\n+\t\tdev->data->ports = rte_zmalloc_socket(\"eventdev->data->ports\",\n+\t\t\t\tsizeof(dev->data->ports[0]) * nb_ports,\n+\t\t\t\tRTE_CACHE_LINE_SIZE, dev->data->socket_id);\n+\t\tif (dev->data->ports == NULL) {\n+\t\t\tdev->data->nb_ports = 0;\n+\t\t\tEDEV_LOG_ERR(\"failed to get memory for port meta data,\"\n+\t\t\t\t\t\"nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t\t/* Allocate memory to store ports dequeue depth */\n+\t\tdev->data->ports_dequeue_depth =\n+\t\t\trte_zmalloc_socket(\"eventdev->ports_dequeue_depth\",\n+\t\t\tsizeof(dev->data->ports_dequeue_depth[0]) * nb_ports,\n+\t\t\tRTE_CACHE_LINE_SIZE, dev->data->socket_id);\n+\t\tif (dev->data->ports_dequeue_depth == NULL) {\n+\t\t\tdev->data->nb_ports = 0;\n+\t\t\tEDEV_LOG_ERR(\"failed to get memory for port deq meta,\"\n+\t\t\t\t\t\"nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t\t/* Allocate memory to store ports enqueue depth */\n+\t\tdev->data->ports_enqueue_depth =\n+\t\t\trte_zmalloc_socket(\"eventdev->ports_enqueue_depth\",\n+\t\t\tsizeof(dev->data->ports_enqueue_depth[0]) * nb_ports,\n+\t\t\tRTE_CACHE_LINE_SIZE, dev->data->socket_id);\n+\t\tif (dev->data->ports_enqueue_depth == NULL) {\n+\t\t\tdev->data->nb_ports = 0;\n+\t\t\tEDEV_LOG_ERR(\"failed to get memory for port enq meta,\"\n+\t\t\t\t\t\"nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t\t/* Allocate memory to store queue to port link connection */\n+\t\tdev->data->links_map =\n+\t\t\trte_zmalloc_socket(\"eventdev->links_map\",\n+\t\t\tsizeof(dev->data->links_map[0]) * nb_ports *\n+\t\t\tRTE_EVENT_MAX_QUEUES_PER_DEV,\n+\t\t\tRTE_CACHE_LINE_SIZE, dev->data->socket_id);\n+\t\tif (dev->data->links_map == NULL) {\n+\t\t\tdev->data->nb_ports = 0;\n+\t\t\tEDEV_LOG_ERR(\"failed to get memory for port_map area,\"\n+\t\t\t\t\t\"nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\t} else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);\n+\n+\t\tports = dev->data->ports;\n+\t\tports_dequeue_depth = dev->data->ports_dequeue_depth;\n+\t\tports_enqueue_depth = dev->data->ports_enqueue_depth;\n+\t\tlinks_map = dev->data->links_map;\n+\n+\t\tfor (i = nb_ports; i < old_nb_ports; i++)\n+\t\t\t(*dev->dev_ops->port_release)(ports[i]);\n+\n+\t\t/* Realloc memory for ports */\n+\t\tports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,\n+\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\tif (ports == NULL) {\n+\t\t\tEDEV_LOG_ERR(\"failed to realloc port meta data,\"\n+\t\t\t\t\t\t\" nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t\t/* Realloc memory for ports_dequeue_depth */\n+\t\tports_dequeue_depth = rte_realloc(ports_dequeue_depth,\n+\t\t\tsizeof(ports_dequeue_depth[0]) * nb_ports,\n+\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\tif (ports_dequeue_depth == NULL) {\n+\t\t\tEDEV_LOG_ERR(\"failed to realloc port deqeue meta data,\"\n+\t\t\t\t\t\t\" nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t\t/* Realloc memory for ports_enqueue_depth */\n+\t\tports_enqueue_depth = rte_realloc(ports_enqueue_depth,\n+\t\t\tsizeof(ports_enqueue_depth[0]) * nb_ports,\n+\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\tif (ports_enqueue_depth == NULL) {\n+\t\t\tEDEV_LOG_ERR(\"failed to realloc port enqueue meta data,\"\n+\t\t\t\t\t\t\" nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t\t/* Realloc memory to store queue to port link connection */\n+\t\tlinks_map = rte_realloc(links_map,\n+\t\t\tsizeof(dev->data->links_map[0]) * nb_ports *\n+\t\t\tRTE_EVENT_MAX_QUEUES_PER_DEV,\n+\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\tif (dev->data->links_map == NULL) {\n+\t\t\tdev->data->nb_ports = 0;\n+\t\t\tEDEV_LOG_ERR(\"failed to realloc mem for port_map area,\"\n+\t\t\t\t\t\"nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t\tif (nb_ports > old_nb_ports) {\n+\t\t\tuint8_t new_ps = nb_ports - old_nb_ports;\n+\n+\t\t\tmemset(ports + old_nb_ports, 0,\n+\t\t\t\tsizeof(ports[0]) * new_ps);\n+\t\t\tmemset(ports_dequeue_depth + old_nb_ports, 0,\n+\t\t\t\tsizeof(ports_dequeue_depth[0]) * new_ps);\n+\t\t\tmemset(ports_enqueue_depth + old_nb_ports, 0,\n+\t\t\t\tsizeof(ports_enqueue_depth[0]) * new_ps);\n+\t\t\tmemset(links_map +\n+\t\t\t\t(old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV),\n+\t\t\t\t0, sizeof(ports_enqueue_depth[0]) * new_ps);\n+\t\t}\n+\n+\t\tdev->data->ports = ports;\n+\t\tdev->data->ports_dequeue_depth = ports_dequeue_depth;\n+\t\tdev->data->ports_enqueue_depth = ports_enqueue_depth;\n+\t\tdev->data->links_map = links_map;\n+\t} else if (dev->data->ports != NULL && nb_ports == 0) {\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);\n+\n+\t\tports = dev->data->ports;\n+\t\tfor (i = nb_ports; i < old_nb_ports; i++)\n+\t\t\t(*dev->dev_ops->port_release)(ports[i]);\n+\t}\n+\n+\tdev->data->nb_ports = nb_ports;\n+\treturn 0;\n+}\n+\n+int\n+rte_event_dev_configure(uint8_t dev_id, struct rte_event_dev_config *dev_conf)\n+{\n+\tstruct rte_eventdev *dev;\n+\tstruct rte_event_dev_info info;\n+\tint diag;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);\n+\n+\tif (dev->data->dev_started) {\n+\t\tEDEV_LOG_ERR(\n+\t\t    \"device %d must be stopped to allow configuration\", dev_id);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tif (dev_conf == NULL)\n+\t\treturn -EINVAL;\n+\n+\t(*dev->dev_ops->dev_infos_get)(dev, &info);\n+\n+\t/* Check dequeue_wait_ns value is in limit */\n+\tif (!dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_WAIT) {\n+\t\tif (dev_conf->dequeue_wait_ns < info.min_dequeue_wait_ns ||\n+\t\t\tdev_conf->dequeue_wait_ns > info.max_dequeue_wait_ns) {\n+\t\t\tEDEV_LOG_ERR(\"dev%d invalid dequeue_wait_ns=%d\"\n+\t\t\t\" min_dequeue_wait_ns=%d max_dequeue_wait_ns=%d\",\n+\t\t\tdev_id, dev_conf->dequeue_wait_ns,\n+\t\t\tinfo.min_dequeue_wait_ns,\n+\t\t\tinfo.max_dequeue_wait_ns);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\t/* Check nb_events_limit is in limit */\n+\tif (dev_conf->nb_events_limit > info.max_num_events) {\n+\t\tEDEV_LOG_ERR(\"dev%d nb_events_limit=%d > max_num_events=%d\",\n+\t\tdev_id, dev_conf->nb_events_limit, info.max_num_events);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check nb_event_queues is in limit */\n+\tif (!dev_conf->nb_event_queues) {\n+\t\tEDEV_LOG_ERR(\"dev%d nb_event_queues cannot be zero\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_event_queues > info.max_event_queues) {\n+\t\tEDEV_LOG_ERR(\"dev%d nb_event_queues=%d > max_event_queues=%d\",\n+\t\tdev_id, dev_conf->nb_event_queues, info.max_event_queues);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check nb_event_ports is in limit */\n+\tif (!dev_conf->nb_event_ports) {\n+\t\tEDEV_LOG_ERR(\"dev%d nb_event_ports cannot be zero\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_event_ports > info.max_event_ports) {\n+\t\tEDEV_LOG_ERR(\"dev%d nb_event_ports=%d > max_event_ports= %d\",\n+\t\tdev_id, dev_conf->nb_event_ports, info.max_event_ports);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check nb_event_queue_flows is in limit */\n+\tif (!dev_conf->nb_event_queue_flows) {\n+\t\tEDEV_LOG_ERR(\"dev%d nb_flows cannot be zero\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {\n+\t\tEDEV_LOG_ERR(\"dev%d nb_flows=%x > max_flows=%x\",\n+\t\tdev_id, dev_conf->nb_event_queue_flows,\n+\t\tinfo.max_event_queue_flows);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check nb_event_port_dequeue_depth is in limit */\n+\tif (!dev_conf->nb_event_port_dequeue_depth) {\n+\t\tEDEV_LOG_ERR(\"dev%d nb_dequeue_depth cannot be zero\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_event_port_dequeue_depth >\n+\t\t\t info.max_event_port_dequeue_depth) {\n+\t\tEDEV_LOG_ERR(\"dev%d nb_dequeue_depth=%d > max_dequeue_depth=%d\",\n+\t\tdev_id, dev_conf->nb_event_port_dequeue_depth,\n+\t\tinfo.max_event_port_dequeue_depth);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check nb_event_port_enqueue_depth is in limit */\n+\tif (!dev_conf->nb_event_port_enqueue_depth) {\n+\t\tEDEV_LOG_ERR(\"dev%d nb_enqueue_depth cannot be zero\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_event_port_enqueue_depth >\n+\t\t\t info.max_event_port_enqueue_depth) {\n+\t\tEDEV_LOG_ERR(\"dev%d nb_enqueue_depth=%d > max_enqueue_depth=%d\",\n+\t\tdev_id, dev_conf->nb_event_port_enqueue_depth,\n+\t\tinfo.max_event_port_enqueue_depth);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Copy the dev_conf parameter into the dev structure */\n+\tmemcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));\n+\n+\t/* Setup new number of queues and reconfigure device. */\n+\tdiag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);\n+\tif (diag != 0) {\n+\t\tEDEV_LOG_ERR(\"dev%d rte_event_dev_queue_config = %d\",\n+\t\t\t\tdev_id, diag);\n+\t\treturn diag;\n+\t}\n+\n+\t/* Setup new number of ports and reconfigure device. */\n+\tdiag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);\n+\tif (diag != 0) {\n+\t\trte_event_dev_queue_config(dev, 0);\n+\t\tEDEV_LOG_ERR(\"dev%d rte_event_dev_port_config = %d\",\n+\t\t\t\tdev_id, diag);\n+\t\treturn diag;\n+\t}\n+\n+\t/* Configure the device */\n+\tdiag = (*dev->dev_ops->dev_configure)(dev);\n+\tif (diag != 0) {\n+\t\tEDEV_LOG_ERR(\"dev%d dev_configure = %d\", dev_id, diag);\n+\t\trte_event_dev_queue_config(dev, 0);\n+\t\trte_event_dev_port_config(dev, 0);\n+\t}\n+\n+\tdev->data->event_dev_cap = info.event_dev_cap;\n+\treturn diag;\n+}\n+\n+static inline int\n+is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)\n+{\n+\tif (queue_id < dev->data->nb_queues && queue_id <\n+\t\t\t\tRTE_EVENT_MAX_QUEUES_PER_DEV)\n+\t\treturn 1;\n+\telse\n+\t\treturn 0;\n+}\n+\n+int\n+rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,\n+\t\t\t\t struct rte_event_queue_conf *queue_conf)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\tif (queue_conf == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (!is_valid_queue(dev, queue_id)) {\n+\t\tEDEV_LOG_ERR(\"Invalid queue_id=%\" PRIu8, queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);\n+\tmemset(queue_conf, 0, sizeof(struct rte_event_queue_conf));\n+\t(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);\n+\treturn 0;\n+}\n+\n+static inline int\n+is_valid_atomic_queue_conf(struct rte_event_queue_conf *queue_conf)\n+{\n+\tif (queue_conf && (\n+\t\t((queue_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_TYPE_MASK)\n+\t\t\t== RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||\n+\t\t((queue_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_TYPE_MASK)\n+\t\t\t== RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)\n+\t\t))\n+\t\treturn 1;\n+\telse\n+\t\treturn 0;\n+}\n+\n+int\n+rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,\n+\t\t      struct rte_event_queue_conf *queue_conf)\n+{\n+\tstruct rte_eventdev *dev;\n+\tstruct rte_event_queue_conf def_conf;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\tif (!is_valid_queue(dev, queue_id)) {\n+\t\tEDEV_LOG_ERR(\"Invalid queue_id=%\" PRIu8, queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check nb_atomic_flows limit */\n+\tif (is_valid_atomic_queue_conf(queue_conf)) {\n+\t\tif (queue_conf->nb_atomic_flows == 0 ||\n+\t\t    queue_conf->nb_atomic_flows >\n+\t\t\tdev->data->dev_conf.nb_event_queue_flows) {\n+\t\t\tEDEV_LOG_ERR(\n+\t\t\"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d\",\n+\t\t\tdev_id, queue_id, queue_conf->nb_atomic_flows,\n+\t\t\tdev->data->dev_conf.nb_event_queue_flows);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\t/* Check nb_atomic_order_sequences limit */\n+\tif (is_valid_atomic_queue_conf(queue_conf)) {\n+\t\tif (queue_conf->nb_atomic_order_sequences == 0 ||\n+\t\t    queue_conf->nb_atomic_order_sequences >\n+\t\t\tdev->data->dev_conf.nb_event_queue_flows) {\n+\t\t\tEDEV_LOG_ERR(\n+\t\t\"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d\",\n+\t\t\tdev_id, queue_id, queue_conf->nb_atomic_order_sequences,\n+\t\t\tdev->data->dev_conf.nb_event_queue_flows);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\tif (dev->data->dev_started) {\n+\t\tEDEV_LOG_ERR(\n+\t\t    \"device %d must be stopped to allow queue setup\", dev_id);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);\n+\n+\tif (queue_conf == NULL) {\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,\n+\t\t\t\t\t-ENOTSUP);\n+\t\t(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);\n+\t\tdef_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_DEFAULT;\n+\t\tqueue_conf = &def_conf;\n+\t}\n+\n+\tdev->data->queues_prio[queue_id] = queue_conf->priority;\n+\treturn (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);\n+}\n+\n+uint8_t\n+rte_event_queue_count(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tdev = &rte_eventdevs[dev_id];\n+\treturn dev->data->nb_queues;\n+}\n+\n+uint8_t\n+rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tdev = &rte_eventdevs[dev_id];\n+\tif (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)\n+\t\treturn dev->data->queues_prio[queue_id];\n+\telse\n+\t\treturn RTE_EVENT_QUEUE_PRIORITY_NORMAL;\n+}\n+\n+static inline int\n+is_valid_port(struct rte_eventdev *dev, uint8_t port_id)\n+{\n+\tif (port_id < dev->data->nb_ports)\n+\t\treturn 1;\n+\telse\n+\t\treturn 0;\n+}\n+\n+int\n+rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,\n+\t\t\t\t struct rte_event_port_conf *port_conf)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\tif (port_conf == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (!is_valid_port(dev, port_id)) {\n+\t\tEDEV_LOG_ERR(\"Invalid port_id=%\" PRIu8, port_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);\n+\tmemset(port_conf, 0, sizeof(struct rte_event_port_conf));\n+\t(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);\n+\treturn 0;\n+}\n+\n+int\n+rte_event_port_setup(uint8_t dev_id, uint8_t port_id,\n+\t\t      struct rte_event_port_conf *port_conf)\n+{\n+\tstruct rte_eventdev *dev;\n+\tstruct rte_event_port_conf def_conf;\n+\tint diag;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\tif (!is_valid_port(dev, port_id)) {\n+\t\tEDEV_LOG_ERR(\"Invalid port_id=%\" PRIu8, port_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check new_event_threshold limit */\n+\tif ((port_conf && !port_conf->new_event_threshold) ||\n+\t\t\t(port_conf && port_conf->new_event_threshold >\n+\t\t\t\t dev->data->dev_conf.nb_events_limit)) {\n+\t\tEDEV_LOG_ERR(\n+\t\t   \"dev%d port%d Invalid event_threshold=%d nb_events_limit=%d\",\n+\t\t\tdev_id, port_id, port_conf->new_event_threshold,\n+\t\t\tdev->data->dev_conf.nb_events_limit);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check dequeue_depth limit */\n+\tif ((port_conf && !port_conf->dequeue_depth) ||\n+\t\t\t(port_conf && port_conf->dequeue_depth >\n+\t\tdev->data->dev_conf.nb_event_port_dequeue_depth)) {\n+\t\tEDEV_LOG_ERR(\n+\t\t   \"dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d\",\n+\t\t\tdev_id, port_id, port_conf->dequeue_depth,\n+\t\t\tdev->data->dev_conf.nb_event_port_dequeue_depth);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check enqueue_depth limit */\n+\tif ((port_conf && !port_conf->enqueue_depth) ||\n+\t\t\t(port_conf && port_conf->enqueue_depth >\n+\t\tdev->data->dev_conf.nb_event_port_enqueue_depth)) {\n+\t\tEDEV_LOG_ERR(\n+\t\t   \"dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d\",\n+\t\t\tdev_id, port_id, port_conf->enqueue_depth,\n+\t\t\tdev->data->dev_conf.nb_event_port_enqueue_depth);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (dev->data->dev_started) {\n+\t\tEDEV_LOG_ERR(\n+\t\t    \"device %d must be stopped to allow port setup\", dev_id);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);\n+\n+\tif (port_conf == NULL) {\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,\n+\t\t\t\t\t-ENOTSUP);\n+\t\t(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);\n+\t\tport_conf = &def_conf;\n+\t}\n+\n+\tdev->data->ports_dequeue_depth[port_id] =\n+\t\t\tport_conf->dequeue_depth;\n+\tdev->data->ports_enqueue_depth[port_id] =\n+\t\t\tport_conf->enqueue_depth;\n+\n+\tdiag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);\n+\n+\t/* Unlink all the queues from this port(default state after setup) */\n+\tif (!diag)\n+\t\tdiag = rte_event_port_unlink(dev_id, port_id, NULL, 0);\n+\n+\tif (diag < 0)\n+\t\treturn diag;\n+\n+\treturn 0;\n+}\n+\n+uint8_t\n+rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tdev = &rte_eventdevs[dev_id];\n+\treturn dev->data->ports_dequeue_depth[port_id];\n+}\n+\n+uint8_t\n+rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tdev = &rte_eventdevs[dev_id];\n+\treturn dev->data->ports_enqueue_depth[port_id];\n+}\n+\n+uint8_t\n+rte_event_port_count(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tdev = &rte_eventdevs[dev_id];\n+\treturn dev->data->nb_ports;\n+}\n+\n+int\n+rte_event_port_link(uint8_t dev_id, uint8_t port_id,\n+\t\t    struct rte_event_queue_link link[], uint16_t nb_links)\n+{\n+\tstruct rte_eventdev *dev;\n+\tstruct rte_event_queue_link all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];\n+\tuint16_t *links_map;\n+\tint i, diag;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP);\n+\n+\tif (!is_valid_port(dev, port_id)) {\n+\t\tEDEV_LOG_ERR(\"Invalid port_id=%\" PRIu8, port_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (link == NULL) {\n+\t\tfor (i = 0; i < dev->data->nb_queues; i++) {\n+\t\t\tall_queues[i].queue_id = i;\n+\t\t\tall_queues[i].priority =\n+\t\t\t\tRTE_EVENT_QUEUE_SERVICE_PRIORITY_NORMAL;\n+\t\t}\n+\t\tlink = all_queues;\n+\t\tnb_links = dev->data->nb_queues;\n+\t}\n+\n+\tfor (i = 0; i < nb_links; i++)\n+\t\tif (link[i].queue_id >= RTE_EVENT_MAX_QUEUES_PER_DEV)\n+\t\t\treturn -EINVAL;\n+\n+\tdiag = (*dev->dev_ops->port_link)(dev->data->ports[port_id], link,\n+\t\t\t\t\t\t nb_links);\n+\tif (diag < 0)\n+\t\treturn diag;\n+\n+\tlinks_map = dev->data->links_map;\n+\t/* Point links_map to this port specific area */\n+\tlinks_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);\n+\tfor (i = 0; i < diag; i++)\n+\t\tlinks_map[link[i].queue_id] = (uint8_t)link[i].priority;\n+\n+\treturn diag;\n+}\n+\n+#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)\n+\n+int\n+rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,\n+\t\t      uint8_t queues[], uint16_t nb_unlinks)\n+{\n+\tstruct rte_eventdev *dev;\n+\tuint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];\n+\tint i, diag;\n+\tuint16_t *links_map;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -ENOTSUP);\n+\n+\tif (!is_valid_port(dev, port_id)) {\n+\t\tEDEV_LOG_ERR(\"Invalid port_id=%\" PRIu8, port_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (queues == NULL) {\n+\t\tfor (i = 0; i < dev->data->nb_queues; i++)\n+\t\t\tall_queues[i] = i;\n+\t\tqueues = all_queues;\n+\t\tnb_unlinks = dev->data->nb_queues;\n+\t}\n+\n+\tfor (i = 0; i < nb_unlinks; i++)\n+\t\tif (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV)\n+\t\t\treturn -EINVAL;\n+\n+\tdiag = (*dev->dev_ops->port_unlink)(dev->data->ports[port_id], queues,\n+\t\t\t\t\tnb_unlinks);\n+\n+\tif (diag < 0)\n+\t\treturn diag;\n+\n+\tlinks_map = dev->data->links_map;\n+\t/* Point links_map to this port specific area */\n+\tlinks_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);\n+\tfor (i = 0; i < diag; i++)\n+\t\tlinks_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;\n+\n+\treturn diag;\n+}\n+\n+int\n+rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,\n+\t\t\tstruct rte_event_queue_link link[])\n+{\n+\tstruct rte_eventdev *dev;\n+\tuint16_t *links_map;\n+\tint i, count = 0;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\tif (!is_valid_port(dev, port_id)) {\n+\t\tEDEV_LOG_ERR(\"Invalid port_id=%\" PRIu8, port_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tlinks_map = dev->data->links_map;\n+\t/* Point links_map to this port specific area */\n+\tlinks_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);\n+\tfor (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {\n+\t\tif (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {\n+\t\t\tlink[count].queue_id = i;\n+\t\t\tlink[count].priority = (uint8_t)links_map[i];\n+\t\t\t++count;\n+\t\t}\n+\t}\n+\treturn count;\n+}\n+\n+int\n+rte_event_dequeue_wait_time(uint8_t dev_id, uint64_t ns, uint64_t *wait_ticks)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->wait_time, -ENOTSUP);\n+\n+\tif (wait_ticks == NULL)\n+\t\treturn -EINVAL;\n+\n+\t(*dev->dev_ops->wait_time)(dev, ns, wait_ticks);\n+\treturn 0;\n+}\n+\n+int\n+rte_event_dev_dump(uint8_t dev_id, FILE *f)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);\n+\n+\t(*dev->dev_ops->dump)(dev, f);\n+\treturn 0;\n+\n+}\n+\n+int\n+rte_event_dev_start(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\tint diag;\n+\n+\tEDEV_LOG_DEBUG(\"Start dev_id=%\" PRIu8, dev_id);\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);\n+\n+\tif (dev->data->dev_started != 0) {\n+\t\tEDEV_LOG_ERR(\"Device with dev_id=%\" PRIu8 \" already started\",\n+\t\t\tdev_id);\n+\t\treturn 0;\n+\t}\n+\n+\tdiag = (*dev->dev_ops->dev_start)(dev);\n+\tif (diag == 0)\n+\t\tdev->data->dev_started = 1;\n+\telse\n+\t\treturn diag;\n+\n+\treturn 0;\n+}\n+\n+void\n+rte_event_dev_stop(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tEDEV_LOG_DEBUG(\"Stop dev_id=%\" PRIu8, dev_id);\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);\n+\tdev = &rte_eventdevs[dev_id];\n+\tRTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);\n+\n+\tif (dev->data->dev_started == 0) {\n+\t\tEDEV_LOG_ERR(\"Device with dev_id=%\" PRIu8 \" already stopped\",\n+\t\t\tdev_id);\n+\t\treturn;\n+\t}\n+\n+\tdev->data->dev_started = 0;\n+\t(*dev->dev_ops->dev_stop)(dev);\n+}\n+\n+int\n+rte_event_dev_close(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);\n+\n+\t/* Device must be stopped before it can be closed */\n+\tif (dev->data->dev_started == 1) {\n+\t\tEDEV_LOG_ERR(\"Device %u must be stopped before closing\",\n+\t\t\t\tdev_id);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\treturn (*dev->dev_ops->dev_close)(dev);\n+}\n+\n+static inline int\n+rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,\n+\t\tint socket_id)\n+{\n+\tchar mz_name[RTE_EVENTDEV_NAME_MAX_LEN];\n+\tconst struct rte_memzone *mz;\n+\tint n;\n+\n+\t/* Generate memzone name */\n+\tn = snprintf(mz_name, sizeof(mz_name), \"rte_eventdev_data_%u\", dev_id);\n+\tif (n >= (int)sizeof(mz_name))\n+\t\treturn -EINVAL;\n+\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY) {\n+\t\tmz = rte_memzone_reserve(mz_name,\n+\t\t\t\tsizeof(struct rte_eventdev_data),\n+\t\t\t\tsocket_id, 0);\n+\t} else\n+\t\tmz = rte_memzone_lookup(mz_name);\n+\n+\tif (mz == NULL)\n+\t\treturn -ENOMEM;\n+\n+\t*data = mz->addr;\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY)\n+\t\tmemset(*data, 0, sizeof(struct rte_eventdev_data));\n+\n+\treturn 0;\n+}\n+\n+static uint8_t\n+rte_eventdev_find_free_device_index(void)\n+{\n+\tuint8_t dev_id;\n+\n+\tfor (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {\n+\t\tif (rte_eventdevs[dev_id].attached ==\n+\t\t\t\tRTE_EVENTDEV_DETACHED)\n+\t\t\treturn dev_id;\n+\t}\n+\treturn RTE_EVENT_MAX_DEVS;\n+}\n+\n+struct rte_eventdev *\n+rte_eventdev_pmd_allocate(const char *name, int socket_id)\n+{\n+\tstruct rte_eventdev *eventdev;\n+\tuint8_t dev_id;\n+\n+\tif (rte_eventdev_pmd_get_named_dev(name) != NULL) {\n+\t\tEDEV_LOG_ERR(\"Event device with name %s already \"\n+\t\t\t\t\"allocated!\", name);\n+\t\treturn NULL;\n+\t}\n+\n+\tdev_id = rte_eventdev_find_free_device_index();\n+\tif (dev_id == RTE_EVENT_MAX_DEVS) {\n+\t\tEDEV_LOG_ERR(\"Reached maximum number of event devices\");\n+\t\treturn NULL;\n+\t}\n+\n+\teventdev = &rte_eventdevs[dev_id];\n+\n+\tif (eventdev->data == NULL) {\n+\t\tstruct rte_eventdev_data *eventdev_data = NULL;\n+\n+\t\tint retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,\n+\t\t\t\tsocket_id);\n+\n+\t\tif (retval < 0 || eventdev_data == NULL)\n+\t\t\treturn NULL;\n+\n+\t\teventdev->data = eventdev_data;\n+\n+\t\tsnprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,\n+\t\t\t\t\"%s\", name);\n+\n+\t\teventdev->data->dev_id = dev_id;\n+\t\teventdev->data->socket_id = socket_id;\n+\t\teventdev->data->dev_started = 0;\n+\n+\t\teventdev->attached = RTE_EVENTDEV_ATTACHED;\n+\n+\t\teventdev_globals.nb_devs++;\n+\t}\n+\n+\treturn eventdev;\n+}\n+\n+int\n+rte_eventdev_pmd_release(struct rte_eventdev *eventdev)\n+{\n+\tint ret;\n+\n+\tif (eventdev == NULL)\n+\t\treturn -EINVAL;\n+\n+\tret = rte_event_dev_close(eventdev->data->dev_id);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\teventdev->attached = RTE_EVENTDEV_DETACHED;\n+\teventdev_globals.nb_devs--;\n+\teventdev->data = NULL;\n+\n+\treturn 0;\n+}\n+\n+struct rte_eventdev *\n+rte_eventdev_pmd_vdev_init(const char *name, size_t dev_private_size,\n+\t\tint socket_id)\n+{\n+\tstruct rte_eventdev *eventdev;\n+\n+\t/* Allocate device structure */\n+\teventdev = rte_eventdev_pmd_allocate(name, socket_id);\n+\tif (eventdev == NULL)\n+\t\treturn NULL;\n+\n+\t/* Allocate private device structure */\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY) {\n+\t\teventdev->data->dev_private =\n+\t\t\t\trte_zmalloc_socket(\"eventdev device private\",\n+\t\t\t\t\t\tdev_private_size,\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t\tsocket_id);\n+\n+\t\tif (eventdev->data->dev_private == NULL)\n+\t\t\trte_panic(\"Cannot allocate memzone for private device\"\n+\t\t\t\t\t\" data\");\n+\t}\n+\n+\treturn eventdev;\n+}\n+\n+int\n+rte_eventdev_pmd_pci_probe(struct rte_pci_driver *pci_drv,\n+\t\t\tstruct rte_pci_device *pci_dev)\n+{\n+\tstruct rte_eventdev_driver *eventdrv;\n+\tstruct rte_eventdev *eventdev;\n+\n+\tchar eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];\n+\n+\tint retval;\n+\n+\teventdrv = (struct rte_eventdev_driver *)pci_drv;\n+\tif (eventdrv == NULL)\n+\t\treturn -ENODEV;\n+\n+\trte_eal_pci_device_name(&pci_dev->addr, eventdev_name,\n+\t\t\tsizeof(eventdev_name));\n+\n+\teventdev = rte_eventdev_pmd_allocate(eventdev_name,\n+\t\t\t pci_dev->device.numa_node);\n+\tif (eventdev == NULL)\n+\t\treturn -ENOMEM;\n+\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY) {\n+\t\teventdev->data->dev_private =\n+\t\t\t\trte_zmalloc_socket(\n+\t\t\t\t\t\t\"eventdev private structure\",\n+\t\t\t\t\t\teventdrv->dev_private_size,\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t\trte_socket_id());\n+\n+\t\tif (eventdev->data->dev_private == NULL)\n+\t\t\trte_panic(\"Cannot allocate memzone for private \"\n+\t\t\t\t\t\"device data\");\n+\t}\n+\n+\teventdev->pci_dev = pci_dev;\n+\teventdev->driver = eventdrv;\n+\n+\t/* Invoke PMD device initialization function */\n+\tretval = (*eventdrv->eventdev_init)(eventdev);\n+\tif (retval == 0)\n+\t\treturn 0;\n+\n+\tEDEV_LOG_ERR(\"driver %s: event_dev_init(vendor_id=0x%x device_id=0x%x)\"\n+\t\t\t\" failed\", pci_drv->driver.name,\n+\t\t\t(unsigned int) pci_dev->id.vendor_id,\n+\t\t\t(unsigned int) pci_dev->id.device_id);\n+\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY)\n+\t\trte_free(eventdev->data->dev_private);\n+\n+\teventdev->attached = RTE_EVENTDEV_DETACHED;\n+\teventdev_globals.nb_devs--;\n+\n+\treturn -ENXIO;\n+}\n+\n+int\n+rte_eventdev_pmd_pci_remove(struct rte_pci_device *pci_dev)\n+{\n+\tconst struct rte_eventdev_driver *eventdrv;\n+\tstruct rte_eventdev *eventdev;\n+\tchar eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];\n+\tint ret;\n+\n+\tif (pci_dev == NULL)\n+\t\treturn -EINVAL;\n+\n+\trte_eal_pci_device_name(&pci_dev->addr, eventdev_name,\n+\t\t\tsizeof(eventdev_name));\n+\n+\teventdev = rte_eventdev_pmd_get_named_dev(eventdev_name);\n+\tif (eventdev == NULL)\n+\t\treturn -ENODEV;\n+\n+\teventdrv = (const struct rte_eventdev_driver *)pci_dev->driver;\n+\tif (eventdrv == NULL)\n+\t\treturn -ENODEV;\n+\n+\t/* Invoke PMD device uninit function */\n+\tif (*eventdrv->eventdev_uninit) {\n+\t\tret = (*eventdrv->eventdev_uninit)(eventdev);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\t/* Free event device */\n+\trte_eventdev_pmd_release(eventdev);\n+\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY)\n+\t\trte_free(eventdev->data->dev_private);\n+\n+\teventdev->pci_dev = NULL;\n+\teventdev->driver = NULL;\n+\n+\treturn 0;\n+}\ndiff --git a/lib/librte_eventdev/rte_eventdev_pmd.h b/lib/librte_eventdev/rte_eventdev_pmd.h\nnew file mode 100644\nindex 0000000..e9d9b83\n--- /dev/null\n+++ b/lib/librte_eventdev/rte_eventdev_pmd.h\n@@ -0,0 +1,504 @@\n+/*\n+ *\n+ *   Copyright(c) 2016 Cavium networks. All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Cavium networks nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#ifndef _RTE_EVENTDEV_PMD_H_\n+#define _RTE_EVENTDEV_PMD_H_\n+\n+/** @file\n+ * RTE Event PMD APIs\n+ *\n+ * @note\n+ * These API are from event PMD only and user applications should not call\n+ * them directly.\n+ */\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+#include <string.h>\n+\n+#include <rte_dev.h>\n+#include <rte_pci.h>\n+#include <rte_malloc.h>\n+#include <rte_log.h>\n+#include <rte_common.h>\n+\n+#include \"rte_eventdev.h\"\n+\n+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG\n+#define RTE_PMD_DEBUG_TRACE(...) \\\n+\trte_pmd_debug_trace(__func__, __VA_ARGS__)\n+#else\n+#define RTE_PMD_DEBUG_TRACE(...)\n+#endif\n+\n+/* Logging Macros */\n+#define EDEV_LOG_ERR(fmt, args...) \\\n+\tRTE_LOG(ERR, EVENTDEV, \"%s() line %u: \" fmt \"\\n\",  \\\n+\t\t\t__func__, __LINE__, ## args)\n+\n+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG\n+#define EDEV_LOG_DEBUG(fmt, args...) \\\n+\tRTE_LOG(DEBUG, EVENTDEV, \"%s() line %u: \" fmt \"\\n\",  \\\n+\t\t\t__func__, __LINE__, ## args)\n+#else\n+#define EDEV_LOG_DEBUG(fmt, args...) (void)0\n+#endif\n+\n+/* Macros to check for valid device */\n+#define RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, retval) do { \\\n+\tif (!rte_eventdev_pmd_is_valid_dev((dev_id))) { \\\n+\t\tEDEV_LOG_ERR(\"Invalid dev_id=%d\\n\", dev_id); \\\n+\t\treturn retval; \\\n+\t} \\\n+} while (0)\n+\n+#define RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id) do { \\\n+\tif (!rte_eventdev_pmd_is_valid_dev((dev_id))) { \\\n+\t\tEDEV_LOG_ERR(\"Invalid dev_id=%d\\n\", dev_id); \\\n+\t\treturn; \\\n+\t} \\\n+} while (0)\n+\n+#define RTE_EVENTDEV_DETACHED  (0)\n+#define RTE_EVENTDEV_ATTACHED  (1)\n+\n+/**\n+ * Initialisation function of a event driver invoked for each matching\n+ * event PCI device detected during the PCI probing phase.\n+ *\n+ * @param dev\n+ *   The dev pointer is the address of the *rte_eventdev* structure associated\n+ *   with the matching device and which has been [automatically] allocated in\n+ *   the *rte_event_devices* array.\n+ *\n+ * @return\n+ *   - 0: Success, the device is properly initialised by the driver.\n+ *        In particular, the driver MUST have set up the *dev_ops* pointer\n+ *        of the *dev* structure.\n+ *   - <0: Error code of the device initialisation failure.\n+ */\n+typedef int (*eventdev_init_t)(struct rte_eventdev *dev);\n+\n+/**\n+ * Finalisation function of a driver invoked for each matching\n+ * PCI device detected during the PCI closing phase.\n+ *\n+ * @param dev\n+ *   The dev pointer is the address of the *rte_eventdev* structure associated\n+ *   with the matching device and which\thas been [automatically] allocated in\n+ *   the *rte_event_devices* array.\n+ *\n+ * @return\n+ *   - 0: Success, the device is properly finalised by the driver.\n+ *        In particular, the driver MUST free the *dev_ops* pointer\n+ *        of the *dev* structure.\n+ *   - <0: Error code of the device initialisation failure.\n+ */\n+typedef int (*eventdev_uninit_t)(struct rte_eventdev *dev);\n+\n+/**\n+ * The structure associated with a PMD driver.\n+ *\n+ * Each driver acts as a PCI driver and is represented by a generic\n+ * *event_driver* structure that holds:\n+ *\n+ * - An *rte_pci_driver* structure (which must be the first field).\n+ *\n+ * - The *eventdev_init* function invoked for each matching PCI device.\n+ *\n+ * - The size of the private data to allocate for each matching device.\n+ */\n+struct rte_eventdev_driver {\n+\tstruct rte_pci_driver pci_drv;\t/**< The PMD is also a PCI driver. */\n+\tunsigned int dev_private_size;\t/**< Size of device private data. */\n+\n+\teventdev_init_t eventdev_init;\t/**< Device init function. */\n+\teventdev_uninit_t eventdev_uninit; /**< Device uninit function. */\n+};\n+\n+/** Global structure used for maintaining state of allocated event devices */\n+struct rte_eventdev_global {\n+\tuint8_t nb_devs;\t/**< Number of devices found */\n+\tuint8_t max_devs;\t/**< Max number of devices */\n+};\n+\n+extern struct rte_eventdev_global *rte_eventdev_globals;\n+/** Pointer to global event devices data structure. */\n+extern struct rte_eventdev *rte_eventdevs;\n+/** The pool of rte_eventdev structures. */\n+\n+/**\n+ * Get the rte_eventdev structure device pointer for the named device.\n+ *\n+ * @param name\n+ *   device name to select the device structure.\n+ *\n+ * @return\n+ *   - The rte_eventdev structure pointer for the given device ID.\n+ */\n+static inline struct rte_eventdev *\n+rte_eventdev_pmd_get_named_dev(const char *name)\n+{\n+\tstruct rte_eventdev *dev;\n+\tunsigned int i;\n+\n+\tif (name == NULL)\n+\t\treturn NULL;\n+\n+\tfor (i = 0, dev = &rte_eventdevs[i];\n+\t\t\ti < rte_eventdev_globals->max_devs; i++) {\n+\t\tif ((dev->attached == RTE_EVENTDEV_ATTACHED) &&\n+\t\t\t\t(strcmp(dev->data->name, name) == 0))\n+\t\t\treturn dev;\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+/**\n+ * Validate if the event device index is valid attached event device.\n+ *\n+ * @param dev_id\n+ *   Event device index.\n+ *\n+ * @return\n+ *   - If the device index is valid (1) or not (0).\n+ */\n+static inline unsigned\n+rte_eventdev_pmd_is_valid_dev(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tif (dev_id >= rte_eventdev_globals->nb_devs)\n+\t\treturn 0;\n+\n+\tdev = &rte_eventdevs[dev_id];\n+\tif (dev->attached != RTE_EVENTDEV_ATTACHED)\n+\t\treturn 0;\n+\telse\n+\t\treturn 1;\n+}\n+\n+/**\n+ * Definitions of all functions exported by a driver through the\n+ * the generic structure of type *event_dev_ops* supplied in the\n+ * *rte_eventdev* structure associated with a device.\n+ */\n+\n+/**\n+ * Get device information of a device.\n+ *\n+ * @param dev\n+ *   Event device pointer\n+ * @param dev_info\n+ *   Event device information structure\n+ *\n+ * @return\n+ *   Returns 0 on success\n+ */\n+typedef void (*eventdev_info_get_t)(struct rte_eventdev *dev,\n+\t\tstruct rte_event_dev_info *dev_info);\n+\n+/**\n+ * Configure a device.\n+ *\n+ * @param dev\n+ *   Event device pointer\n+ *\n+ * @return\n+ *   Returns 0 on success\n+ */\n+typedef int (*eventdev_configure_t)(struct rte_eventdev *dev);\n+\n+/**\n+ * Start a configured device.\n+ *\n+ * @param dev\n+ *   Event device pointer\n+ *\n+ * @return\n+ *   Returns 0 on success\n+ */\n+typedef int (*eventdev_start_t)(struct rte_eventdev *dev);\n+\n+/**\n+ * Stop a configured device.\n+ *\n+ * @param dev\n+ *   Event device pointer\n+ */\n+typedef void (*eventdev_stop_t)(struct rte_eventdev *dev);\n+\n+/**\n+ * Close a configured device.\n+ *\n+ * @param dev\n+ *   Event device pointer\n+ *\n+ * @return\n+ * - 0 on success\n+ * - (-EAGAIN) if can't close as device is busy\n+ */\n+typedef int (*eventdev_close_t)(struct rte_eventdev *dev);\n+\n+/**\n+ * Retrieve the default event queue configuration.\n+ *\n+ * @param dev\n+ *   Event device pointer\n+ * @param queue_id\n+ *   Event queue index\n+ * @param[out] queue_conf\n+ *   Event queue configuration structure\n+ *\n+ */\n+typedef void (*eventdev_queue_default_conf_get_t)(struct rte_eventdev *dev,\n+\t\tuint8_t queue_id, struct rte_event_queue_conf *queue_conf);\n+\n+/**\n+ * Setup an event queue.\n+ *\n+ * @param dev\n+ *   Event device pointer\n+ * @param queue_id\n+ *   Event queue index\n+ * @param queue_conf\n+ *   Event queue configuration structure\n+ *\n+ * @return\n+ *   Returns 0 on success.\n+ */\n+typedef int (*eventdev_queue_setup_t)(struct rte_eventdev *dev,\n+\t\tuint8_t queue_id, struct rte_event_queue_conf *queue_conf);\n+\n+/**\n+ * Release memory resources allocated by given event queue.\n+ *\n+ * @param queue\n+ *   Event queue pointer\n+ *\n+ */\n+typedef void (*eventdev_queue_release_t)(void *queue);\n+\n+/**\n+ * Retrieve the default event port configuration.\n+ *\n+ * @param dev\n+ *   Event device pointer\n+ * @param port_id\n+ *   Event port index\n+ * @param[out] port_conf\n+ *   Event port configuration structure\n+ *\n+ */\n+typedef void (*eventdev_port_default_conf_get_t)(struct rte_eventdev *dev,\n+\t\tuint8_t port_id, struct rte_event_port_conf *port_conf);\n+\n+/**\n+ * Setup an event port.\n+ *\n+ * @param dev\n+ *   Event device pointer\n+ * @param port_id\n+ *   Event port index\n+ * @param port_conf\n+ *   Event port configuration structure\n+ *\n+ * @return\n+ *   Returns 0 on success.\n+ */\n+typedef int (*eventdev_port_setup_t)(struct rte_eventdev *dev,\n+\t\tuint8_t port_id, struct rte_event_port_conf *port_conf);\n+\n+/**\n+ * Release memory resources allocated by given event port.\n+ *\n+ * @param port\n+ *   Event port pointer\n+ *\n+ */\n+typedef void (*eventdev_port_release_t)(void *port);\n+\n+/**\n+ * Link multiple source event queues to destination event port.\n+ *\n+ * @param port\n+ *   Event port pointer\n+ * @param link\n+ *   An array of *nb_links* pointers to *rte_event_queue_link* structure\n+ * @param nb_links\n+ *   The number of links to establish\n+ *\n+ * @return\n+ *   Returns 0 on success.\n+ *\n+ */\n+typedef int (*eventdev_port_link_t)(void *port,\n+\t\tstruct rte_event_queue_link link[], uint16_t nb_links);\n+\n+/**\n+ * Unlink multiple source event queues from destination event port.\n+ *\n+ * @param port\n+ *   Event port pointer\n+ * @param queues\n+ *   An array of *nb_unlinks* event queues to be unlinked from the event port.\n+ * @param nb_unlinks\n+ *   The number of unlinks to establish\n+ *\n+ * @return\n+ *   Returns 0 on success.\n+ *\n+ */\n+typedef int (*eventdev_port_unlink_t)(void *port,\n+\t\tuint8_t queues[], uint16_t nb_unlinks);\n+\n+/**\n+ * Converts nanoseconds to *wait* value for rte_event_dequeue()\n+ *\n+ * @param dev\n+ *   Event device pointer\n+ * @param ns\n+ *   Wait time in nanosecond\n+ * @param[out] wait_ticks\n+ *   Value for the *wait* parameter in rte_event_dequeue() function\n+ *\n+ */\n+typedef void (*eventdev_dequeue_wait_time_t)(struct rte_eventdev *dev,\n+\t\tuint64_t ns, uint64_t *wait_ticks);\n+\n+/**\n+ * Dump internal information\n+ *\n+ * @param dev\n+ *   Event device pointer\n+ * @param f\n+ *   A pointer to a file for output\n+ *\n+ */\n+typedef void (*eventdev_dump_t)(struct rte_eventdev *dev, FILE *f);\n+\n+/** Event device operations function pointer table */\n+struct rte_eventdev_ops {\n+\teventdev_info_get_t dev_infos_get;\t/**< Get device info. */\n+\teventdev_configure_t dev_configure;\t/**< Configure device. */\n+\teventdev_start_t dev_start;\t\t/**< Start device. */\n+\teventdev_stop_t dev_stop;\t\t/**< Stop device. */\n+\teventdev_close_t dev_close;\t\t/**< Close device. */\n+\n+\teventdev_queue_default_conf_get_t queue_def_conf;\n+\t/**< Get default queue configuration. */\n+\teventdev_queue_setup_t queue_setup;\n+\t/**< Set up an event queue. */\n+\teventdev_queue_release_t queue_release;\n+\t/**< Release an event queue. */\n+\n+\teventdev_port_default_conf_get_t port_def_conf;\n+\t/**< Get default port configuration. */\n+\teventdev_port_setup_t port_setup;\n+\t/**< Set up an event port. */\n+\teventdev_port_release_t port_release;\n+\t/**< Release an event port. */\n+\n+\teventdev_port_link_t port_link;\n+\t/**< Link event queues to an event port. */\n+\teventdev_port_unlink_t port_unlink;\n+\t/**< Unlink event queues from an event port. */\n+\teventdev_dequeue_wait_time_t wait_time;\n+\t/**< Converts nanoseconds to *wait* value for rte_event_dequeue() */\n+\teventdev_dump_t dump;\n+\t/* Dump internal information */\n+};\n+\n+/**\n+ * Allocates a new eventdev slot for an event device and returns the pointer\n+ * to that slot for the driver to use.\n+ *\n+ * @param name\n+ *   Unique identifier name for each device\n+ * @param socket_id\n+ *   Socket to allocate resources on.\n+ * @return\n+ *   - Slot in the rte_dev_devices array for a new device;\n+ */\n+struct rte_eventdev *\n+rte_eventdev_pmd_allocate(const char *name, int socket_id);\n+\n+/**\n+ * Release the specified eventdev device.\n+ *\n+ * @param eventdev\n+ * The *eventdev* pointer is the address of the *rte_eventdev* structure.\n+ * @return\n+ *   - 0 on success, negative on error\n+ */\n+int\n+rte_eventdev_pmd_release(struct rte_eventdev *eventdev);\n+\n+/**\n+ * Creates a new virtual event device and returns the pointer to that device.\n+ *\n+ * @param name\n+ *   PMD type name\n+ * @param dev_private_size\n+ *   Size of event PMDs private data\n+ * @param socket_id\n+ *   Socket to allocate resources on.\n+ *\n+ * @return\n+ *   - Eventdev pointer if device is successfully created.\n+ *   - NULL if device cannot be created.\n+ */\n+struct rte_eventdev *\n+rte_eventdev_pmd_vdev_init(const char *name, size_t dev_private_size,\n+\t\tint socket_id);\n+\n+\n+/**\n+ * Wrapper for use by pci drivers as a .probe function to attach to a event\n+ * interface.\n+ */\n+int rte_eventdev_pmd_pci_probe(struct rte_pci_driver *pci_drv,\n+\t\t\t    struct rte_pci_device *pci_dev);\n+\n+/**\n+ * Wrapper for use by pci drivers as a .remove function to detach a event\n+ * interface.\n+ */\n+int rte_eventdev_pmd_pci_remove(struct rte_pci_device *pci_dev);\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _RTE_EVENTDEV_PMD_H_ */\ndiff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map\nnew file mode 100644\nindex 0000000..ef40aae\n--- /dev/null\n+++ b/lib/librte_eventdev/rte_eventdev_version.map\n@@ -0,0 +1,39 @@\n+DPDK_17.02 {\n+\tglobal:\n+\n+\trte_eventdevs;\n+\n+\trte_event_dev_count;\n+\trte_event_dev_get_dev_id;\n+\trte_event_dev_socket_id;\n+\trte_event_dev_info_get;\n+\trte_event_dev_configure;\n+\trte_event_dev_start;\n+\trte_event_dev_stop;\n+\trte_event_dev_close;\n+\trte_event_dev_dump;\n+\n+\trte_event_port_default_conf_get;\n+\trte_event_port_setup;\n+\trte_event_port_dequeue_depth;\n+\trte_event_port_enqueue_depth;\n+\trte_event_port_count;\n+\trte_event_port_link;\n+\trte_event_port_unlink;\n+\trte_event_port_links_get;\n+\n+\trte_event_queue_default_conf_get\n+\trte_event_queue_setup;\n+\trte_event_queue_count;\n+\trte_event_queue_priority;\n+\n+\trte_event_dequeue_wait_time;\n+\n+\trte_eventdev_pmd_allocate;\n+\trte_eventdev_pmd_release;\n+\trte_eventdev_pmd_vdev_init;\n+\trte_eventdev_pmd_pci_probe;\n+\trte_eventdev_pmd_pci_remove;\n+\n+\tlocal: *;\n+};\ndiff --git a/mk/rte.app.mk b/mk/rte.app.mk\nindex f75f0e2..716725a 100644\n--- a/mk/rte.app.mk\n+++ b/mk/rte.app.mk\n@@ -93,6 +93,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_MBUF)           += -lrte_mbuf\n _LDLIBS-$(CONFIG_RTE_LIBRTE_NET)            += -lrte_net\n _LDLIBS-$(CONFIG_RTE_LIBRTE_ETHER)          += -lrte_ethdev\n _LDLIBS-$(CONFIG_RTE_LIBRTE_CRYPTODEV)      += -lrte_cryptodev\n+_LDLIBS-$(CONFIG_RTE_LIBRTE_EVENTDEV)       += -lrte_eventdev\n _LDLIBS-$(CONFIG_RTE_LIBRTE_MEMPOOL)        += -lrte_mempool\n _LDLIBS-$(CONFIG_RTE_LIBRTE_RING)           += -lrte_ring\n _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrte_eal\n",
    "prefixes": [
        "dpdk-dev",
        "2/4"
    ]
}