get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/17689/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 17689,
    "url": "http://patches.dpdk.org/api/patches/17689/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1480996340-29871-4-git-send-email-jerin.jacob@caviumnetworks.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1480996340-29871-4-git-send-email-jerin.jacob@caviumnetworks.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1480996340-29871-4-git-send-email-jerin.jacob@caviumnetworks.com",
    "date": "2016-12-06T03:52:17",
    "name": "[dpdk-dev,v2,3/6] eventdev: implement the northbound APIs",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "326ac59766e2455fa05eaeb78dbdeb59e1e58ea1",
    "submitter": {
        "id": 305,
        "url": "http://patches.dpdk.org/api/people/305/?format=api",
        "name": "Jerin Jacob",
        "email": "jerin.jacob@caviumnetworks.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1480996340-29871-4-git-send-email-jerin.jacob@caviumnetworks.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/17689/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/17689/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id CBB1047D1;\n\tTue,  6 Dec 2016 04:53:37 +0100 (CET)",
            "from NAM01-BN3-obe.outbound.protection.outlook.com\n\t(mail-bn3nam01on0042.outbound.protection.outlook.com [104.47.33.42])\n\tby dpdk.org (Postfix) with ESMTP id 76B332BC5\n\tfor <dev@dpdk.org>; Tue,  6 Dec 2016 04:52:48 +0100 (CET)",
            "from localhost.caveonetworks.com (50.233.148.156) by\n\tBY1PR0701MB1723.namprd07.prod.outlook.com (10.162.111.142) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384_P384) id 15.1.693.12;\n\tTue, 6 Dec 2016 03:52:44 +0000"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=CAVIUMNETWORKS.onmicrosoft.com; s=selector1-cavium-com;\n\th=From:Date:Subject:Message-ID:Content-Type:MIME-Version;\n\tbh=3lItDHNaJhAjrQ7UUJ86IX30MuPG0nFmidhpPPte+pU=;\n\tb=aTPh/sLJMhDwyjfpqig+0fFzi3EggUpp7F6wd2VgSukiNTaHlTwyG1+7kOj5sdNUmhgUmbSqMf2xeEmLGRW5++lsv4O5bqkewOfgwfVH4fSlD2eu/EjmzvL6x67pTcdEh5mpP0DVmFH4MNUCSR8HezjZ0378VRZS5XnVODCeraM=",
        "Authentication-Results": "spf=none (sender IP is )\n\tsmtp.mailfrom=Jerin.Jacob@cavium.com; ",
        "From": "Jerin Jacob <jerin.jacob@caviumnetworks.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<thomas.monjalon@6wind.com>, <bruce.richardson@intel.com>,\n\t<hemant.agrawal@nxp.com>, <gage.eads@intel.com>,\n\t<harry.van.haaren@intel.com>,\n\tJerin Jacob <jerin.jacob@caviumnetworks.com>",
        "Date": "Tue, 6 Dec 2016 09:22:17 +0530",
        "Message-ID": "<1480996340-29871-4-git-send-email-jerin.jacob@caviumnetworks.com>",
        "X-Mailer": "git-send-email 2.5.5",
        "In-Reply-To": "<1480996340-29871-1-git-send-email-jerin.jacob@caviumnetworks.com>",
        "References": "<1479447902-3700-2-git-send-email-jerin.jacob@caviumnetworks.com>\n\t<1480996340-29871-1-git-send-email-jerin.jacob@caviumnetworks.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[50.233.148.156]",
        "X-ClientProxiedBy": "BLUPR13CA0014.namprd13.prod.outlook.com (10.160.191.24) To\n\tBY1PR0701MB1723.namprd07.prod.outlook.com (10.162.111.142)",
        "X-Microsoft-Exchange-Diagnostics": [
            "1; BY1PR0701MB1723;\n\t2:OTzBWspjrq6D9EbSFHo1TB+e2l+k5wRcZcjZU1T8TiV37XzezAXeyD9gN7oif67c0lqkpPiqb1dlAcPVQhkkyg+XR+z8kZrLaYArCjoz5MiA6iKxv1NOeliJEOAYXGNaE4sXlQCrub1szW/Lus7CeJt2l2pLpNtf8T+JVhPeOtM=;\n\t3:8xWJGPxCtLKjAPhKWpzpxWXko/VBxwdu3FNIaHDT3eGCyAOn1aIt9r5kp1MNB2AWEYfyxLJtMshv34A1bRFkoxjS2vX/Q6E/0RWtDX9CGJDQneYJbfo7YzTizb3aTyOE29Y+R1Le93gnaUTtqafJiPSADJJ56uiDNrrsVAGVAS8=;\n\t25:XQvV4HSiEvMmI2Wt0ODmxB3S0/FQKNosKeSFl2ik7kLKkZ5uPYdmCwmixd/B2WAlle6ABXwtKqZNSS8KpvwZ6OFr9V9Bj+3kg6EKJpUWXFnrUhXGTNaz7eeP39JzZDwMscxhlhBi2yuTX2CqTf+/gnpuKnv0l9rL589CraUZ+w+NVA2eJDVNRb/+b1QM4+wLlxCvOZvmMCIT2AK//USOX6W5Egd6cCPiUSzqM3wEoHw83kSNQb5rzG85W2dZHbhghUitvye86YYT5L3RSMEcvKZCl3pDsgUk8voyDPnjnsMNawcWTRkGIaAvdoczIRG12WR5MYwyKYSs8X4qbnAaLkSSLJZepwr/vMitA/iDJE6zT3dYaZ2YqL3LcZVR2tT9himxz6dO6imtKgEjU7QQkcRXaOm7iOeL08TeTDrpWAk8L14V2uykTl4mrCFxhbNPznYihdaXL6Xvb/zNRSXgPQ==",
            "1; BY1PR0701MB1723;\n\t31:aWv5WlslEPk9zE/HggwJGMMQXvgtA4qhnn+DMcjXQZdwGLTIAYY/cJZvkM0lISIixQ78fwo7yeNHTEWNfpQyqZC+0eiez0DgX8f40f9KlkJ08jSGueveRjz+3Hwk9t52UmTq4hFi2MsBJJ/B8vRuvH5VUoariwEP1198NmSMHUMUAXEWJsSKznGbEHXtpDuiQnBbuufRNCn2CRo9X/TKhDyzdRqK/DGVAdLX2zBobALuI8w4Oh34niLBh8ranwQG;\n\t20:YnShj5V9Dd1FzgJoWUm+U4G2M1FtuuMGE37fXpXT9BhPaWPLRnWQ6sjkGTjYR0ylZ/EovLg/PzATT8eAJzDvAraPtwjBZsmKZ7/yvHSsQYnVJ4+ieYtE/KBLHpZK8KKnBis1AJGzyRt/yh9cgx1J8T2O+zFjoBFoNGMJe2rfP2PgMrzwTeRyAJZfdM8K2B7kWNFL1BzKVXb63gtjW8iYm2soqP9njkbmJsZnQCdFwv+OfI0Q122NY/rLQDJxVRuf6pKx5i5qJynviv2nSytJPNVC9lREUX68VuW3TuTMxDJpejpkpM2h+8AwCVHFL2uZf+lLhxRE7l7W4z/QHw1cx/XnBQDH7LKHGQek8xfxjS6DYAE69YNEr7IbdoDjJMF/WJnN0vHHK6eo6M5C0US83rT8tGWD5QaAf2VlD1tCBIVH18SuhGW/IDUdnnrLL6yHruPRe/p33rVrH8mlx06DFIq46exzAjxAu8Y42X7Juo60Usz3dw2D5VRDvdpnR4pl5Ej69Psbn+mC7r6227McNOuBXNKrohItJDolALSSf4uivpA252Oijao1/RkfiZYuZnrPQoMy4CV8dyNHph03G9kcSCudJZm8zpGTkhph+k0=",
            "1; BY1PR0701MB1723;\n\t4:P76af0KQI0GZ67V01KFsM7BthI/UMDMJLng8ncFGVVBDVBFbAhZUpY+mwG7+UTMUaIjbBPF8gC/w85FO0thWD47DStkSUbaYUjqXMtWwwFukgKKd7Rvun2kqWQgzCtr4N/Ny841/p4TPG8vSwab4FrbLGIXgDfovu80sy5KMTYyMq0o0Z98HF5CPkmIoLamIkJcjDjLlzdvvCNbruWv8ruDZbDFEQgnsbQ4JIxVYmRyQy6rUMzfPw+7vsY7HqHAh6UeaHF22YyuFtaoagYFx2L+eyqSvV7PCM8bOUfJiuXVD4hdJbaPEBlLGv/kV7BIyNrkr5dMvCTrD3udLFTliV0D6+YTb+EhzgazrH6X/lU6nqDDgvBU+rG+Wu142CbqAoFFDzLrsAc/cXusR5MgaX2Qha96UhQLO28JC/H0wklRR+BRxqesGzh9KIQWG01i41nAB9aqB7XI8HaNlCGXUyj7R0PvZ5eTA2uMnuUI8fauA9Ql0AmmJMx+Thyk3e0zwjZNcJa5x9LHD0wNpurxLnfKRRmz1Ib9PH0idWax7Ecblg+yrN9QeOkqXV7f0LKCFt7SZpXDl3KH5UWywUD4ZHw==",
            "=?us-ascii?Q?1; BY1PR0701MB1723;\n\t23:2jeSakcbFbRCciP6U6kmLdbfb5CgJ3FnuYJJFnY?=\n\t=?us-ascii?Q?CI5yDsNLexF6SXQ/tpiyUz72GXOEXj7OjcfRnpNMZtJPbJ7bgM91C7ca8514?=\n\t=?us-ascii?Q?gaYflVoE/AIB58ybeMsehs3BHgYWyAhn4+8GUk6bHoovDQHVc5muhJv0y5it?=\n\t=?us-ascii?Q?TUC21KjokqPX12vtD+ju2dYlPdIzm5KA199Uq8d9RElLFx/j2u4iod2bdbNc?=\n\t=?us-ascii?Q?lBS47h/nICJqWp78bETzcykDfNZ+JWAqlW0QsuqQunwckUzfDyfGRkJ26JY+?=\n\t=?us-ascii?Q?vdR4SROGBzpxPd18ux63ePvQrUVRfEXNP2GsfB6E7L9wgaaQI1j0TUKS3P5k?=\n\t=?us-ascii?Q?SILpTYb3sgntDTLbvH19rLU0Gf/nmXlEh7MYAXGTf5B/5u0pbZudqZ6YpNy+?=\n\t=?us-ascii?Q?7gbTJxBUe+HIsd0uIf2tjzp6J5P4d21kgVPO1xe2WUWI+f5yI2FKzzi2OECZ?=\n\t=?us-ascii?Q?7CSVPq5rz0m2lZPKx7ZSax5z2cL3/whshAWrX6o/MHEybuxzM58uWtUpgHvc?=\n\t=?us-ascii?Q?aFbvbn58gvKnn6Oh6Avk9IImB/IzS6YQqznzlBGtS/bMp8SsdLKMmsUpzfFb?=\n\t=?us-ascii?Q?OpnAjFqDfKspZWye+q0Zq1pWGC1TbDnK8vf2Wi53WVQiSDmDFNOk39yMY4AJ?=\n\t=?us-ascii?Q?FfSE05WJU55Pr3YXBSkv2LLZOSOm1elLpCiNpshz2AKYYL1jBehAASarUr99?=\n\t=?us-ascii?Q?4lP7aHwWgQylC5C5TfN9lYVOZMfaCFDRcXPj1azhJT3kSEyjge1CP2ZtVaTx?=\n\t=?us-ascii?Q?eeQwPj5gb98G/PB0rFCb82UFV20G1zt9tEBnbeIIPuYRuMO9avPTgg6tISGq?=\n\t=?us-ascii?Q?J2WdiavLMZcbGtrvjjvgjH0F0pSvz8/Yu0Rm0E+BTfEHj2vgdrnnNwgtAxi3?=\n\t=?us-ascii?Q?BB1oz/2YJrSkcU/yWPO2ldOqnKJyRrq/gN6lrlwx4IdnQCD4EcNtJPO3T7pg?=\n\t=?us-ascii?Q?a6eKI3WN8WCIAHP/7iz1USLthdrs7MAqzfemKFCbTGe+yuRGJlm6uRe+q4Mm?=\n\t=?us-ascii?Q?DBMZc/v+STSWJ3W+911eT4Ve8ljZY1AtIEV3QMRKXg7dFby0fZhkef8duPjQ?=\n\t=?us-ascii?Q?v3j2ulYHZ2xJ8FSVELwhR6wKvXl3t/DMWoK/zU85aKv1+OGfNdJKveYjMpPs?=\n\t=?us-ascii?Q?BqtRGnhnY3r0soBcdqNcYHcoXSKzyLYeef4a4xDD3jiPpQUF1nW4xm5MBG66?=\n\t=?us-ascii?Q?xugL9ZbJ2aTyv4ICU3SR7YujgWN5CMHlWvNi9rG3mVvexEO6JmHvQfiNjJX8?=\n\t=?us-ascii?Q?LQ811yR9Vaz5gL61BnBTqhM5ZZFqr719knqOyNqsXtIqtMC8uWOrYBwo2fOW?=\n\t=?us-ascii?Q?Zt2x1lSisFgn2+Ih/Fi7X1lzrhM8dE+pezP6vv7kp4lkuNRxAPpnapqtZV3A?=\n\t=?us-ascii?Q?eZYEUEFgEwaxfB8byhw1DoOZiIN7n9OEhzx0MYuCQy20DdGRJq831/gGAl+9?=\n\t=?us-ascii?Q?VYAlcQmT2ETdsp0B+WYTaSQJCv7cqdNRBkCLNC+r8fdqNwjwVMgpQ?=",
            "1; BY1PR0701MB1723;\n\t6:ZyXEqMoIMS9y5uZ0a2umvra+nS34foiBaCXYBGIjcr5gsXzV/jVvpXuaLrL6LL5e2len3L76jWKkv8XELz78sJjVktINWlhaosT2miEJLYvEqdJie4R+vx14DRplgfFs8eB+SXrplW2LDztrbQK9IThL909YNTOB04v1OWK8qD5ZSGWYGOp3VD3DbhTqW6kuV6d65jn8Ni21U0rtZLz775uhDCuHEQL0AEcwV7YkoIDIEiFcckmsODlyrQHeqvwTZwFUEJWhl7bz32ZOT1pYErb0vRaSIsAVLwBpo/cOfR4CjFJ2c01J1Gs/jzU9naaEZcLaOwBd1IIgAfETWtUybLPaJJ9OqI4QagvSzYEdla0=;\n\t5:NL2IwddoKE3Nzt6BnForrDcJYuA1Rl1ezXbdsuv+uxrGW77xKs+qVEjMfUf/7YtCd8OA7opIrNPAL5Y7UenIA6m05dp4vv0wdxz3W7CfDxNX7jXJsv9uegzWdCjm0DaRmchwFLZqsTzBjlEzMew3fA==;\n\t24:uOUqtBR0D6JZ/puZVXG2AHPITg3nbXXBgDkfB4iHlPNXQ1RWNDpIy81VLzMheefXyfIKlNHYcJ0D6QH4lWPB6rUHCmJIs+2Xq+8TdoYwdQg=",
            "1; BY1PR0701MB1723;\n\t7:QlXSXOra+ywOA00bpzLrqG1oP+Oi/ji0osVBoVfKOWbRPr0iNPhCKS/ahB621NGyo5fcqqX3vBNqdxSPZZMDuA69O57721iD4Jzw1EoZXm69f3f80bH//viOfCRVtOsXvlxNPffi9zTq/psDjIYsD+tthgWqSFbCcTadQXbgVAxOnMxzuKlJT9BepK5aImrUt5+ZaiqVdjpMPJzvhKHQdRhpnVBx9r/Q4asSPCn26oR29e//lBcxRl4foD/r65Aa809837Fwb7S7XR92Lb13CAzZ3f4U//atwLg4u375ohYAdXp3NmEU/62d3hXfNqUdiuykiGCcf3NHrijlvJABKezbQZq8rnX3uWLYJTgMiqc="
        ],
        "X-MS-Office365-Filtering-Correlation-Id": "0b1a2ddd-833a-40b2-b52e-08d41d8b56b5",
        "X-Microsoft-Antispam": "UriScan:; BCL:0; PCL:0; RULEID:(22001);\n\tSRVR:BY1PR0701MB1723; ",
        "X-Microsoft-Antispam-PRVS": "<BY1PR0701MB17232615D26BB8C2E0B14BDC81820@BY1PR0701MB1723.namprd07.prod.outlook.com>",
        "X-Exchange-Antispam-Report-Test": "UriScan:;",
        "X-Exchange-Antispam-Report-CFA-Test": "BCL:0; PCL:0;\n\tRULEID:(6040375)(601004)(2401047)(5005006)(8121501046)(10201501046)(3002001)(6041248)(20161123555025)(20161123564025)(20161123562025)(20161123560025)(6072148);\n\tSRVR:BY1PR0701MB1723; BCL:0; PCL:0; RULEID:; SRVR:BY1PR0701MB1723; ",
        "X-Forefront-PRVS": "01480965DA",
        "X-Forefront-Antispam-Report": "SFV:NSPM;\n\tSFS:(10009020)(4630300001)(6069001)(6009001)(7916002)(199003)(189002)(101416001)(110136003)(6916009)(69596002)(4326007)(8666005)(106356001)(76176999)(2351001)(47776003)(97736004)(2906002)(107886002)(7846002)(5660300001)(92566002)(305945005)(48376002)(733004)(42882006)(68736007)(50986999)(39850400001)(50466002)(6666003)(36756003)(2950100002)(5890100001)(575784001)(81156014)(105586002)(42186005)(3846002)(33646002)(81166006)(8676002)(5003940100001)(6512006)(6506006)(6486002)(53416004)(7736002)(189998001)(38730400001)(4001430100002)(6116002)(50226002)(39840400001)(39450400002)(39410400001)(66066001)(76506005)(7059030)(579004);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:BY1PR0701MB1723;\n\tH:localhost.caveonetworks.com; \n\tFPR:; SPF:None; PTR:InfoNoRecords; A:1; MX:1; LANG:en; ",
        "Received-SPF": "None (protection.outlook.com: cavium.com does not designate\n\tpermitted sender hosts)",
        "SpamDiagnosticOutput": "1:99",
        "SpamDiagnosticMetadata": "NSPM",
        "X-OriginatorOrg": "caviumnetworks.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "06 Dec 2016 03:52:44.6350\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "Hosted",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BY1PR0701MB1723",
        "Subject": "[dpdk-dev] [PATCH v2 3/6] eventdev: implement the northbound APIs",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch implements northbound eventdev API interface using\nsouthbond driver interface\n\nSigned-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>\n---\n config/common_base                           |    6 +\n lib/Makefile                                 |    1 +\n lib/librte_eal/common/include/rte_log.h      |    1 +\n lib/librte_eventdev/Makefile                 |   57 ++\n lib/librte_eventdev/rte_eventdev.c           | 1001 ++++++++++++++++++++++++++\n lib/librte_eventdev/rte_eventdev.h           |  108 ++-\n lib/librte_eventdev/rte_eventdev_pmd.h       |  109 +++\n lib/librte_eventdev/rte_eventdev_version.map |   33 +\n mk/rte.app.mk                                |    1 +\n 9 files changed, 1311 insertions(+), 6 deletions(-)\n create mode 100644 lib/librte_eventdev/Makefile\n create mode 100644 lib/librte_eventdev/rte_eventdev.c\n create mode 100644 lib/librte_eventdev/rte_eventdev_version.map",
    "diff": "diff --git a/config/common_base b/config/common_base\nindex 4bff83a..7a8814e 100644\n--- a/config/common_base\n+++ b/config/common_base\n@@ -411,6 +411,12 @@ CONFIG_RTE_LIBRTE_PMD_ZUC_DEBUG=n\n CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y\n \n #\n+# Compile generic event device library\n+#\n+CONFIG_RTE_LIBRTE_EVENTDEV=y\n+CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n\n+CONFIG_RTE_EVENT_MAX_DEVS=16\n+CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64\n # Compile librte_ring\n #\n CONFIG_RTE_LIBRTE_RING=y\ndiff --git a/lib/Makefile b/lib/Makefile\nindex 990f23a..1a067bf 100644\n--- a/lib/Makefile\n+++ b/lib/Makefile\n@@ -41,6 +41,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile\n DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline\n DIRS-$(CONFIG_RTE_LIBRTE_ETHER) += librte_ether\n DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += librte_cryptodev\n+DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += librte_eventdev\n DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += librte_vhost\n DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash\n DIRS-$(CONFIG_RTE_LIBRTE_LPM) += librte_lpm\ndiff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h\nindex 29f7d19..9a07d92 100644\n--- a/lib/librte_eal/common/include/rte_log.h\n+++ b/lib/librte_eal/common/include/rte_log.h\n@@ -79,6 +79,7 @@ extern struct rte_logs rte_logs;\n #define RTE_LOGTYPE_PIPELINE 0x00008000 /**< Log related to pipeline. */\n #define RTE_LOGTYPE_MBUF    0x00010000 /**< Log related to mbuf. */\n #define RTE_LOGTYPE_CRYPTODEV 0x00020000 /**< Log related to cryptodev. */\n+#define RTE_LOGTYPE_EVENTDEV 0x00040000 /**< Log related to eventdev. */\n \n /* these log types can be used in an application */\n #define RTE_LOGTYPE_USER1   0x01000000 /**< User-defined log type 1. */\ndiff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile\nnew file mode 100644\nindex 0000000..dac0663\n--- /dev/null\n+++ b/lib/librte_eventdev/Makefile\n@@ -0,0 +1,57 @@\n+#   BSD LICENSE\n+#\n+#   Copyright(c) 2016 Cavium networks. All rights reserved.\n+#\n+#   Redistribution and use in source and binary forms, with or without\n+#   modification, are permitted provided that the following conditions\n+#   are met:\n+#\n+#     * Redistributions of source code must retain the above copyright\n+#       notice, this list of conditions and the following disclaimer.\n+#     * Redistributions in binary form must reproduce the above copyright\n+#       notice, this list of conditions and the following disclaimer in\n+#       the documentation and/or other materials provided with the\n+#       distribution.\n+#     * Neither the name of Cavium networks nor the names of its\n+#       contributors may be used to endorse or promote products derived\n+#       from this software without specific prior written permission.\n+#\n+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+#   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+\n+include $(RTE_SDK)/mk/rte.vars.mk\n+\n+# library name\n+LIB = librte_eventdev.a\n+\n+# library version\n+LIBABIVER := 1\n+\n+# build flags\n+CFLAGS += -O3\n+CFLAGS += $(WERROR_FLAGS)\n+\n+# library source files\n+SRCS-y += rte_eventdev.c\n+\n+# export include files\n+SYMLINK-y-include += rte_eventdev.h\n+SYMLINK-y-include += rte_eventdev_pmd.h\n+\n+# versioning export map\n+EXPORT_MAP := rte_eventdev_version.map\n+\n+# library dependencies\n+DEPDIRS-y += lib/librte_eal\n+DEPDIRS-y += lib/librte_mbuf\n+\n+include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c\nnew file mode 100644\nindex 0000000..0a1d2d6\n--- /dev/null\n+++ b/lib/librte_eventdev/rte_eventdev.c\n@@ -0,0 +1,1001 @@\n+/*\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2016 Cavium networks. All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Cavium networks nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <ctype.h>\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <string.h>\n+#include <stdarg.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <inttypes.h>\n+#include <sys/types.h>\n+#include <sys/queue.h>\n+\n+#include <rte_byteorder.h>\n+#include <rte_log.h>\n+#include <rte_debug.h>\n+#include <rte_dev.h>\n+#include <rte_pci.h>\n+#include <rte_memory.h>\n+#include <rte_memcpy.h>\n+#include <rte_memzone.h>\n+#include <rte_eal.h>\n+#include <rte_per_lcore.h>\n+#include <rte_lcore.h>\n+#include <rte_atomic.h>\n+#include <rte_branch_prediction.h>\n+#include <rte_common.h>\n+#include <rte_malloc.h>\n+#include <rte_errno.h>\n+\n+#include \"rte_eventdev.h\"\n+#include \"rte_eventdev_pmd.h\"\n+\n+struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];\n+\n+struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];\n+\n+static struct rte_eventdev_global eventdev_globals = {\n+\t.nb_devs\t\t= 0\n+};\n+\n+struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;\n+\n+/* Event dev north bound API implementation */\n+\n+uint8_t\n+rte_event_dev_count(void)\n+{\n+\treturn rte_eventdev_globals->nb_devs;\n+}\n+\n+int\n+rte_event_dev_get_dev_id(const char *name)\n+{\n+\tint i;\n+\n+\tif (!name)\n+\t\treturn -EINVAL;\n+\n+\tfor (i = 0; i < rte_eventdev_globals->nb_devs; i++)\n+\t\tif ((strcmp(rte_event_devices[i].data->name, name)\n+\t\t\t\t== 0) &&\n+\t\t\t\t(rte_event_devices[i].attached ==\n+\t\t\t\t\t\tRTE_EVENTDEV_ATTACHED))\n+\t\t\treturn i;\n+\treturn -ENODEV;\n+}\n+\n+int\n+rte_event_dev_socket_id(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\treturn dev->data->socket_id;\n+}\n+\n+int\n+rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\tif (dev_info == NULL)\n+\t\treturn -EINVAL;\n+\n+\tmemset(dev_info, 0, sizeof(struct rte_event_dev_info));\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);\n+\t(*dev->dev_ops->dev_infos_get)(dev, dev_info);\n+\n+\tdev_info->pci_dev = dev->pci_dev;\n+\treturn 0;\n+}\n+\n+static inline int\n+rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)\n+{\n+\tuint8_t old_nb_queues = dev->data->nb_queues;\n+\tvoid **queues;\n+\tuint8_t *queues_prio;\n+\tunsigned int i;\n+\n+\tRTE_EDEV_LOG_DEBUG(\"Setup %d queues on device %u\", nb_queues,\n+\t\t\t dev->data->dev_id);\n+\n+\t/* First time configuration */\n+\tif (dev->data->queues == NULL && nb_queues != 0) {\n+\t\tdev->data->queues = rte_zmalloc_socket(\"eventdev->data->queues\",\n+\t\t\t\tsizeof(dev->data->queues[0]) * nb_queues,\n+\t\t\t\tRTE_CACHE_LINE_SIZE, dev->data->socket_id);\n+\t\tif (dev->data->queues == NULL) {\n+\t\t\tdev->data->nb_queues = 0;\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to get memory for queue meta,\"\n+\t\t\t\t\t\"nb_queues %u\", nb_queues);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\t\t/* Allocate memory to store queue priority */\n+\t\tdev->data->queues_prio = rte_zmalloc_socket(\n+\t\t\t\t\"eventdev->data->queues_prio\",\n+\t\t\t\tsizeof(dev->data->queues_prio[0]) * nb_queues,\n+\t\t\t\tRTE_CACHE_LINE_SIZE, dev->data->socket_id);\n+\t\tif (dev->data->queues_prio == NULL) {\n+\t\t\tdev->data->nb_queues = 0;\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to get mem for queue priority,\"\n+\t\t\t\t\t\"nb_queues %u\", nb_queues);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t} else if (dev->data->queues != NULL && nb_queues != 0) {/* re-config */\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);\n+\n+\t\tqueues = dev->data->queues;\n+\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n+\t\t\t(*dev->dev_ops->queue_release)(queues[i]);\n+\n+\t\tqueues = rte_realloc(queues, sizeof(queues[0]) * nb_queues,\n+\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\tif (queues == NULL) {\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to realloc queue meta data,\"\n+\t\t\t\t\t\t\" nb_queues %u\", nb_queues);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\t\tdev->data->queues = queues;\n+\n+\t\t/* Re allocate memory to store queue priority */\n+\t\tqueues_prio = dev->data->queues_prio;\n+\t\tqueues_prio = rte_realloc(queues_prio,\n+\t\t\t\tsizeof(queues_prio[0]) * nb_queues,\n+\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\tif (queues_prio == NULL) {\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to realloc queue priority,\"\n+\t\t\t\t\t\t\" nb_queues %u\", nb_queues);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\t\tdev->data->queues_prio = queues_prio;\n+\n+\t\tif (nb_queues > old_nb_queues) {\n+\t\t\tuint8_t new_qs = nb_queues - old_nb_queues;\n+\n+\t\t\tmemset(queues + old_nb_queues, 0,\n+\t\t\t\tsizeof(queues[0]) * new_qs);\n+\t\t\tmemset(queues_prio + old_nb_queues, 0,\n+\t\t\t\tsizeof(queues_prio[0]) * new_qs);\n+\t\t}\n+\t} else if (dev->data->queues != NULL && nb_queues == 0) {\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);\n+\n+\t\tqueues = dev->data->queues;\n+\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n+\t\t\t(*dev->dev_ops->queue_release)(queues[i]);\n+\t}\n+\n+\tdev->data->nb_queues = nb_queues;\n+\treturn 0;\n+}\n+\n+static inline int\n+rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)\n+{\n+\tuint8_t old_nb_ports = dev->data->nb_ports;\n+\tvoid **ports;\n+\tuint16_t *links_map;\n+\tuint8_t *ports_dequeue_depth;\n+\tuint8_t *ports_enqueue_depth;\n+\tunsigned int i;\n+\n+\tRTE_EDEV_LOG_DEBUG(\"Setup %d ports on device %u\", nb_ports,\n+\t\t\t dev->data->dev_id);\n+\n+\t/* First time configuration */\n+\tif (dev->data->ports == NULL && nb_ports != 0) {\n+\t\tdev->data->ports = rte_zmalloc_socket(\"eventdev->data->ports\",\n+\t\t\t\tsizeof(dev->data->ports[0]) * nb_ports,\n+\t\t\t\tRTE_CACHE_LINE_SIZE, dev->data->socket_id);\n+\t\tif (dev->data->ports == NULL) {\n+\t\t\tdev->data->nb_ports = 0;\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to get mem for port meta data,\"\n+\t\t\t\t\t\"nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t\t/* Allocate memory to store ports dequeue depth */\n+\t\tdev->data->ports_dequeue_depth =\n+\t\t\trte_zmalloc_socket(\"eventdev->ports_dequeue_depth\",\n+\t\t\tsizeof(dev->data->ports_dequeue_depth[0]) * nb_ports,\n+\t\t\tRTE_CACHE_LINE_SIZE, dev->data->socket_id);\n+\t\tif (dev->data->ports_dequeue_depth == NULL) {\n+\t\t\tdev->data->nb_ports = 0;\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to get mem for port deq meta,\"\n+\t\t\t\t\t\"nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t\t/* Allocate memory to store ports enqueue depth */\n+\t\tdev->data->ports_enqueue_depth =\n+\t\t\trte_zmalloc_socket(\"eventdev->ports_enqueue_depth\",\n+\t\t\tsizeof(dev->data->ports_enqueue_depth[0]) * nb_ports,\n+\t\t\tRTE_CACHE_LINE_SIZE, dev->data->socket_id);\n+\t\tif (dev->data->ports_enqueue_depth == NULL) {\n+\t\t\tdev->data->nb_ports = 0;\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to get mem for port enq meta,\"\n+\t\t\t\t\t\"nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t\t/* Allocate memory to store queue to port link connection */\n+\t\tdev->data->links_map =\n+\t\t\trte_zmalloc_socket(\"eventdev->links_map\",\n+\t\t\tsizeof(dev->data->links_map[0]) * nb_ports *\n+\t\t\tRTE_EVENT_MAX_QUEUES_PER_DEV,\n+\t\t\tRTE_CACHE_LINE_SIZE, dev->data->socket_id);\n+\t\tif (dev->data->links_map == NULL) {\n+\t\t\tdev->data->nb_ports = 0;\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to get mem for port_map area,\"\n+\t\t\t\t\t\"nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\t} else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);\n+\n+\t\tports = dev->data->ports;\n+\t\tports_dequeue_depth = dev->data->ports_dequeue_depth;\n+\t\tports_enqueue_depth = dev->data->ports_enqueue_depth;\n+\t\tlinks_map = dev->data->links_map;\n+\n+\t\tfor (i = nb_ports; i < old_nb_ports; i++)\n+\t\t\t(*dev->dev_ops->port_release)(ports[i]);\n+\n+\t\t/* Realloc memory for ports */\n+\t\tports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,\n+\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\tif (ports == NULL) {\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to realloc port meta data,\"\n+\t\t\t\t\t\t\" nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t\t/* Realloc memory for ports_dequeue_depth */\n+\t\tports_dequeue_depth = rte_realloc(ports_dequeue_depth,\n+\t\t\tsizeof(ports_dequeue_depth[0]) * nb_ports,\n+\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\tif (ports_dequeue_depth == NULL) {\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to realloc port dequeue meta,\"\n+\t\t\t\t\t\t\" nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t\t/* Realloc memory for ports_enqueue_depth */\n+\t\tports_enqueue_depth = rte_realloc(ports_enqueue_depth,\n+\t\t\tsizeof(ports_enqueue_depth[0]) * nb_ports,\n+\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\tif (ports_enqueue_depth == NULL) {\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to realloc port enqueue meta,\"\n+\t\t\t\t\t\t\" nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t\t/* Realloc memory to store queue to port link connection */\n+\t\tlinks_map = rte_realloc(links_map,\n+\t\t\tsizeof(dev->data->links_map[0]) * nb_ports *\n+\t\t\tRTE_EVENT_MAX_QUEUES_PER_DEV,\n+\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\tif (dev->data->links_map == NULL) {\n+\t\t\tdev->data->nb_ports = 0;\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to realloc mem for port_map,\"\n+\t\t\t\t\t\"nb_ports %u\", nb_ports);\n+\t\t\treturn -(ENOMEM);\n+\t\t}\n+\n+\t\tif (nb_ports > old_nb_ports) {\n+\t\t\tuint8_t new_ps = nb_ports - old_nb_ports;\n+\n+\t\t\tmemset(ports + old_nb_ports, 0,\n+\t\t\t\tsizeof(ports[0]) * new_ps);\n+\t\t\tmemset(ports_dequeue_depth + old_nb_ports, 0,\n+\t\t\t\tsizeof(ports_dequeue_depth[0]) * new_ps);\n+\t\t\tmemset(ports_enqueue_depth + old_nb_ports, 0,\n+\t\t\t\tsizeof(ports_enqueue_depth[0]) * new_ps);\n+\t\t\tmemset(links_map +\n+\t\t\t\t(old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV),\n+\t\t\t\t0, sizeof(ports_enqueue_depth[0]) * new_ps);\n+\t\t}\n+\n+\t\tdev->data->ports = ports;\n+\t\tdev->data->ports_dequeue_depth = ports_dequeue_depth;\n+\t\tdev->data->ports_enqueue_depth = ports_enqueue_depth;\n+\t\tdev->data->links_map = links_map;\n+\t} else if (dev->data->ports != NULL && nb_ports == 0) {\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);\n+\n+\t\tports = dev->data->ports;\n+\t\tfor (i = nb_ports; i < old_nb_ports; i++)\n+\t\t\t(*dev->dev_ops->port_release)(ports[i]);\n+\t}\n+\n+\tdev->data->nb_ports = nb_ports;\n+\treturn 0;\n+}\n+\n+int\n+rte_event_dev_configure(uint8_t dev_id,\n+\t\t\tconst struct rte_event_dev_config *dev_conf)\n+{\n+\tstruct rte_eventdev *dev;\n+\tstruct rte_event_dev_info info;\n+\tint diag;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);\n+\n+\tif (dev->data->dev_started) {\n+\t\tRTE_EDEV_LOG_ERR(\n+\t\t    \"device %d must be stopped to allow configuration\", dev_id);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tif (dev_conf == NULL)\n+\t\treturn -EINVAL;\n+\n+\t(*dev->dev_ops->dev_infos_get)(dev, &info);\n+\n+\t/* Check dequeue_timeout_ns value is in limit */\n+\tif (!dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {\n+\t\tif (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns\n+\t\t\t|| dev_conf->dequeue_timeout_ns >\n+\t\t\t\t info.max_dequeue_timeout_ns) {\n+\t\t\tRTE_EDEV_LOG_ERR(\"dev%d invalid dequeue_timeout_ns=%d\"\n+\t\t\t\" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d\",\n+\t\t\tdev_id, dev_conf->dequeue_timeout_ns,\n+\t\t\tinfo.min_dequeue_timeout_ns,\n+\t\t\tinfo.max_dequeue_timeout_ns);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\t/* Check nb_events_limit is in limit */\n+\tif (dev_conf->nb_events_limit > info.max_num_events) {\n+\t\tRTE_EDEV_LOG_ERR(\"dev%d nb_events_limit=%d > max_num_events=%d\",\n+\t\tdev_id, dev_conf->nb_events_limit, info.max_num_events);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check nb_event_queues is in limit */\n+\tif (!dev_conf->nb_event_queues) {\n+\t\tRTE_EDEV_LOG_ERR(\"dev%d nb_event_queues cannot be zero\",\n+\t\t\t\t\tdev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_event_queues > info.max_event_queues) {\n+\t\tRTE_EDEV_LOG_ERR(\"%d nb_event_queues=%d > max_event_queues=%d\",\n+\t\tdev_id, dev_conf->nb_event_queues, info.max_event_queues);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check nb_event_ports is in limit */\n+\tif (!dev_conf->nb_event_ports) {\n+\t\tRTE_EDEV_LOG_ERR(\"dev%d nb_event_ports cannot be zero\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_event_ports > info.max_event_ports) {\n+\t\tRTE_EDEV_LOG_ERR(\"id%d nb_event_ports=%d > max_event_ports= %d\",\n+\t\tdev_id, dev_conf->nb_event_ports, info.max_event_ports);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check nb_event_queue_flows is in limit */\n+\tif (!dev_conf->nb_event_queue_flows) {\n+\t\tRTE_EDEV_LOG_ERR(\"dev%d nb_flows cannot be zero\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {\n+\t\tRTE_EDEV_LOG_ERR(\"dev%d nb_flows=%x > max_flows=%x\",\n+\t\tdev_id, dev_conf->nb_event_queue_flows,\n+\t\tinfo.max_event_queue_flows);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check nb_event_port_dequeue_depth is in limit */\n+\tif (!dev_conf->nb_event_port_dequeue_depth) {\n+\t\tRTE_EDEV_LOG_ERR(\"dev%d nb_dequeue_depth cannot be zero\",\n+\t\t\t\t\tdev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_event_port_dequeue_depth >\n+\t\t\t info.max_event_port_dequeue_depth) {\n+\t\tRTE_EDEV_LOG_ERR(\"dev%d nb_dq_depth=%d > max_dq_depth=%d\",\n+\t\tdev_id, dev_conf->nb_event_port_dequeue_depth,\n+\t\tinfo.max_event_port_dequeue_depth);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check nb_event_port_enqueue_depth is in limit */\n+\tif (!dev_conf->nb_event_port_enqueue_depth) {\n+\t\tRTE_EDEV_LOG_ERR(\"dev%d nb_enqueue_depth cannot be zero\",\n+\t\t\t\t\tdev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_event_port_enqueue_depth >\n+\t\t\t info.max_event_port_enqueue_depth) {\n+\t\tRTE_EDEV_LOG_ERR(\"dev%d nb_enq_depth=%d > max_enq_depth=%d\",\n+\t\tdev_id, dev_conf->nb_event_port_enqueue_depth,\n+\t\tinfo.max_event_port_enqueue_depth);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Copy the dev_conf parameter into the dev structure */\n+\tmemcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));\n+\n+\t/* Setup new number of queues and reconfigure device. */\n+\tdiag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);\n+\tif (diag != 0) {\n+\t\tRTE_EDEV_LOG_ERR(\"dev%d rte_event_dev_queue_config = %d\",\n+\t\t\t\tdev_id, diag);\n+\t\treturn diag;\n+\t}\n+\n+\t/* Setup new number of ports and reconfigure device. */\n+\tdiag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);\n+\tif (diag != 0) {\n+\t\trte_event_dev_queue_config(dev, 0);\n+\t\tRTE_EDEV_LOG_ERR(\"dev%d rte_event_dev_port_config = %d\",\n+\t\t\t\tdev_id, diag);\n+\t\treturn diag;\n+\t}\n+\n+\t/* Configure the device */\n+\tdiag = (*dev->dev_ops->dev_configure)(dev);\n+\tif (diag != 0) {\n+\t\tRTE_EDEV_LOG_ERR(\"dev%d dev_configure = %d\", dev_id, diag);\n+\t\trte_event_dev_queue_config(dev, 0);\n+\t\trte_event_dev_port_config(dev, 0);\n+\t}\n+\n+\tdev->data->event_dev_cap = info.event_dev_cap;\n+\treturn diag;\n+}\n+\n+static inline int\n+is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)\n+{\n+\tif (queue_id < dev->data->nb_queues && queue_id <\n+\t\t\t\tRTE_EVENT_MAX_QUEUES_PER_DEV)\n+\t\treturn 1;\n+\telse\n+\t\treturn 0;\n+}\n+\n+int\n+rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,\n+\t\t\t\t struct rte_event_queue_conf *queue_conf)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\tif (queue_conf == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (!is_valid_queue(dev, queue_id)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid queue_id=%\" PRIu8, queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);\n+\tmemset(queue_conf, 0, sizeof(struct rte_event_queue_conf));\n+\t(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);\n+\treturn 0;\n+}\n+\n+static inline int\n+is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)\n+{\n+\tif (queue_conf && (\n+\t\t((queue_conf->event_queue_cfg &\n+\t\t\tRTE_EVENT_QUEUE_CFG_FLAG_TYPE_MASK)\n+\t\t\t== RTE_EVENT_QUEUE_CFG_FLAG_ALL_TYPES) ||\n+\t\t((queue_conf->event_queue_cfg &\n+\t\t\tRTE_EVENT_QUEUE_CFG_FLAG_TYPE_MASK)\n+\t\t\t== RTE_EVENT_QUEUE_CFG_FLAG_ATOMIC_ONLY)\n+\t\t))\n+\t\treturn 1;\n+\telse\n+\t\treturn 0;\n+}\n+\n+static inline int\n+is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)\n+{\n+\tif (queue_conf && (\n+\t\t((queue_conf->event_queue_cfg &\n+\t\t\tRTE_EVENT_QUEUE_CFG_FLAG_TYPE_MASK)\n+\t\t\t== RTE_EVENT_QUEUE_CFG_FLAG_ALL_TYPES) ||\n+\t\t((queue_conf->event_queue_cfg &\n+\t\t\tRTE_EVENT_QUEUE_CFG_FLAG_TYPE_MASK)\n+\t\t\t== RTE_EVENT_QUEUE_CFG_FLAG_ORDERED_ONLY)\n+\t\t))\n+\t\treturn 1;\n+\telse\n+\t\treturn 0;\n+}\n+\n+\n+int\n+rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,\n+\t\t      const struct rte_event_queue_conf *queue_conf)\n+{\n+\tstruct rte_eventdev *dev;\n+\tstruct rte_event_queue_conf def_conf;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\tif (!is_valid_queue(dev, queue_id)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid queue_id=%\" PRIu8, queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check nb_atomic_flows limit */\n+\tif (is_valid_atomic_queue_conf(queue_conf)) {\n+\t\tif (queue_conf->nb_atomic_flows == 0 ||\n+\t\t    queue_conf->nb_atomic_flows >\n+\t\t\tdev->data->dev_conf.nb_event_queue_flows) {\n+\t\t\tRTE_EDEV_LOG_ERR(\n+\t\t\"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d\",\n+\t\t\tdev_id, queue_id, queue_conf->nb_atomic_flows,\n+\t\t\tdev->data->dev_conf.nb_event_queue_flows);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\t/* Check nb_atomic_order_sequences limit */\n+\tif (is_valid_ordered_queue_conf(queue_conf)) {\n+\t\tif (queue_conf->nb_atomic_order_sequences == 0 ||\n+\t\t    queue_conf->nb_atomic_order_sequences >\n+\t\t\tdev->data->dev_conf.nb_event_queue_flows) {\n+\t\t\tRTE_EDEV_LOG_ERR(\n+\t\t\"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d\",\n+\t\t\tdev_id, queue_id, queue_conf->nb_atomic_order_sequences,\n+\t\t\tdev->data->dev_conf.nb_event_queue_flows);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\tif (dev->data->dev_started) {\n+\t\tRTE_EDEV_LOG_ERR(\n+\t\t    \"device %d must be stopped to allow queue setup\", dev_id);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);\n+\n+\tif (queue_conf == NULL) {\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,\n+\t\t\t\t\t-ENOTSUP);\n+\t\t(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);\n+\t\tdef_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_FLAG_DEFAULT;\n+\t\tqueue_conf = &def_conf;\n+\t}\n+\n+\tdev->data->queues_prio[queue_id] = queue_conf->priority;\n+\treturn (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);\n+}\n+\n+uint8_t\n+rte_event_queue_count(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tdev = &rte_eventdevs[dev_id];\n+\treturn dev->data->nb_queues;\n+}\n+\n+uint8_t\n+rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tdev = &rte_eventdevs[dev_id];\n+\tif (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_FLAG_QUEUE_QOS)\n+\t\treturn dev->data->queues_prio[queue_id];\n+\telse\n+\t\treturn RTE_EVENT_DEV_PRIORITY_NORMAL;\n+}\n+\n+static inline int\n+is_valid_port(struct rte_eventdev *dev, uint8_t port_id)\n+{\n+\tif (port_id < dev->data->nb_ports)\n+\t\treturn 1;\n+\telse\n+\t\treturn 0;\n+}\n+\n+int\n+rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,\n+\t\t\t\t struct rte_event_port_conf *port_conf)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\tif (port_conf == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (!is_valid_port(dev, port_id)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid port_id=%\" PRIu8, port_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);\n+\tmemset(port_conf, 0, sizeof(struct rte_event_port_conf));\n+\t(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);\n+\treturn 0;\n+}\n+\n+int\n+rte_event_port_setup(uint8_t dev_id, uint8_t port_id,\n+\t\t     const struct rte_event_port_conf *port_conf)\n+{\n+\tstruct rte_eventdev *dev;\n+\tstruct rte_event_port_conf def_conf;\n+\tint diag;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\tif (!is_valid_port(dev, port_id)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid port_id=%\" PRIu8, port_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check new_event_threshold limit */\n+\tif ((port_conf && !port_conf->new_event_threshold) ||\n+\t\t\t(port_conf && port_conf->new_event_threshold >\n+\t\t\t\t dev->data->dev_conf.nb_events_limit)) {\n+\t\tRTE_EDEV_LOG_ERR(\n+\t\t   \"dev%d port%d Invalid event_threshold=%d nb_events_limit=%d\",\n+\t\t\tdev_id, port_id, port_conf->new_event_threshold,\n+\t\t\tdev->data->dev_conf.nb_events_limit);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check dequeue_depth limit */\n+\tif ((port_conf && !port_conf->dequeue_depth) ||\n+\t\t\t(port_conf && port_conf->dequeue_depth >\n+\t\tdev->data->dev_conf.nb_event_port_dequeue_depth)) {\n+\t\tRTE_EDEV_LOG_ERR(\n+\t\t   \"dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d\",\n+\t\t\tdev_id, port_id, port_conf->dequeue_depth,\n+\t\t\tdev->data->dev_conf.nb_event_port_dequeue_depth);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check enqueue_depth limit */\n+\tif ((port_conf && !port_conf->enqueue_depth) ||\n+\t\t\t(port_conf && port_conf->enqueue_depth >\n+\t\tdev->data->dev_conf.nb_event_port_enqueue_depth)) {\n+\t\tRTE_EDEV_LOG_ERR(\n+\t\t   \"dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d\",\n+\t\t\tdev_id, port_id, port_conf->enqueue_depth,\n+\t\t\tdev->data->dev_conf.nb_event_port_enqueue_depth);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (dev->data->dev_started) {\n+\t\tRTE_EDEV_LOG_ERR(\n+\t\t    \"device %d must be stopped to allow port setup\", dev_id);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);\n+\n+\tif (port_conf == NULL) {\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,\n+\t\t\t\t\t-ENOTSUP);\n+\t\t(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);\n+\t\tport_conf = &def_conf;\n+\t}\n+\n+\tdev->data->ports_dequeue_depth[port_id] =\n+\t\t\tport_conf->dequeue_depth;\n+\tdev->data->ports_enqueue_depth[port_id] =\n+\t\t\tport_conf->enqueue_depth;\n+\n+\tdiag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);\n+\n+\t/* Unlink all the queues from this port(default state after setup) */\n+\tif (!diag)\n+\t\tdiag = rte_event_port_unlink(dev_id, port_id, NULL, 0);\n+\n+\tif (diag < 0)\n+\t\treturn diag;\n+\n+\treturn 0;\n+}\n+\n+uint8_t\n+rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tdev = &rte_eventdevs[dev_id];\n+\treturn dev->data->ports_dequeue_depth[port_id];\n+}\n+\n+uint8_t\n+rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tdev = &rte_eventdevs[dev_id];\n+\treturn dev->data->ports_enqueue_depth[port_id];\n+}\n+\n+uint8_t\n+rte_event_port_count(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tdev = &rte_eventdevs[dev_id];\n+\treturn dev->data->nb_ports;\n+}\n+\n+int\n+rte_event_port_link(uint8_t dev_id, uint8_t port_id,\n+\t\t    const struct rte_event_queue_link link[],\n+\t\t    uint16_t nb_links)\n+{\n+\tstruct rte_eventdev *dev;\n+\tstruct rte_event_queue_link all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];\n+\tuint16_t *links_map;\n+\tint i, diag;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP);\n+\n+\tif (!is_valid_port(dev, port_id)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid port_id=%\" PRIu8, port_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (link == NULL) {\n+\t\tfor (i = 0; i < dev->data->nb_queues; i++) {\n+\t\t\tall_queues[i].queue_id = i;\n+\t\t\tall_queues[i].priority =\n+\t\t\t\tRTE_EVENT_DEV_PRIORITY_NORMAL;\n+\t\t}\n+\t\tlink = all_queues;\n+\t\tnb_links = dev->data->nb_queues;\n+\t}\n+\n+\tfor (i = 0; i < nb_links; i++)\n+\t\tif (link[i].queue_id >= RTE_EVENT_MAX_QUEUES_PER_DEV)\n+\t\t\treturn -EINVAL;\n+\n+\tdiag = (*dev->dev_ops->port_link)(dev->data->ports[port_id], link,\n+\t\t\t\t\t\t nb_links);\n+\tif (diag < 0)\n+\t\treturn diag;\n+\n+\tlinks_map = dev->data->links_map;\n+\t/* Point links_map to this port specific area */\n+\tlinks_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);\n+\tfor (i = 0; i < diag; i++)\n+\t\tlinks_map[link[i].queue_id] = (uint8_t)link[i].priority;\n+\n+\treturn diag;\n+}\n+\n+#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)\n+\n+int\n+rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,\n+\t\t      uint8_t queues[], uint16_t nb_unlinks)\n+{\n+\tstruct rte_eventdev *dev;\n+\tuint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];\n+\tint i, diag;\n+\tuint16_t *links_map;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -ENOTSUP);\n+\n+\tif (!is_valid_port(dev, port_id)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid port_id=%\" PRIu8, port_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (queues == NULL) {\n+\t\tfor (i = 0; i < dev->data->nb_queues; i++)\n+\t\t\tall_queues[i] = i;\n+\t\tqueues = all_queues;\n+\t\tnb_unlinks = dev->data->nb_queues;\n+\t}\n+\n+\tfor (i = 0; i < nb_unlinks; i++)\n+\t\tif (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV)\n+\t\t\treturn -EINVAL;\n+\n+\tdiag = (*dev->dev_ops->port_unlink)(dev->data->ports[port_id], queues,\n+\t\t\t\t\tnb_unlinks);\n+\n+\tif (diag < 0)\n+\t\treturn diag;\n+\n+\tlinks_map = dev->data->links_map;\n+\t/* Point links_map to this port specific area */\n+\tlinks_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);\n+\tfor (i = 0; i < diag; i++)\n+\t\tlinks_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;\n+\n+\treturn diag;\n+}\n+\n+int\n+rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,\n+\t\t\tstruct rte_event_queue_link link[])\n+{\n+\tstruct rte_eventdev *dev;\n+\tuint16_t *links_map;\n+\tint i, count = 0;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\tif (!is_valid_port(dev, port_id)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid port_id=%\" PRIu8, port_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tlinks_map = dev->data->links_map;\n+\t/* Point links_map to this port specific area */\n+\tlinks_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);\n+\tfor (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {\n+\t\tif (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {\n+\t\t\tlink[count].queue_id = i;\n+\t\t\tlink[count].priority = (uint8_t)links_map[i];\n+\t\t\t++count;\n+\t\t}\n+\t}\n+\treturn count;\n+}\n+\n+int\n+rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,\n+\t\t\t\t uint64_t *timeout_ticks)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);\n+\n+\tif (timeout_ticks == NULL)\n+\t\treturn -EINVAL;\n+\n+\t(*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);\n+\treturn 0;\n+}\n+\n+int\n+rte_event_dev_dump(uint8_t dev_id, FILE *f)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);\n+\n+\t(*dev->dev_ops->dump)(dev, f);\n+\treturn 0;\n+\n+}\n+\n+int\n+rte_event_dev_start(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\tint diag;\n+\n+\tRTE_EDEV_LOG_DEBUG(\"Start dev_id=%\" PRIu8, dev_id);\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);\n+\n+\tif (dev->data->dev_started != 0) {\n+\t\tRTE_EDEV_LOG_ERR(\"Device with dev_id=%\" PRIu8 \"already started\",\n+\t\t\tdev_id);\n+\t\treturn 0;\n+\t}\n+\n+\tdiag = (*dev->dev_ops->dev_start)(dev);\n+\tif (diag == 0)\n+\t\tdev->data->dev_started = 1;\n+\telse\n+\t\treturn diag;\n+\n+\treturn 0;\n+}\n+\n+void\n+rte_event_dev_stop(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EDEV_LOG_DEBUG(\"Stop dev_id=%\" PRIu8, dev_id);\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);\n+\tdev = &rte_eventdevs[dev_id];\n+\tRTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);\n+\n+\tif (dev->data->dev_started == 0) {\n+\t\tRTE_EDEV_LOG_ERR(\"Device with dev_id=%\" PRIu8 \"already stopped\",\n+\t\t\tdev_id);\n+\t\treturn;\n+\t}\n+\n+\tdev->data->dev_started = 0;\n+\t(*dev->dev_ops->dev_stop)(dev);\n+}\n+\n+int\n+rte_event_dev_close(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);\n+\n+\t/* Device must be stopped before it can be closed */\n+\tif (dev->data->dev_started == 1) {\n+\t\tRTE_EDEV_LOG_ERR(\"Device %u must be stopped before closing\",\n+\t\t\t\tdev_id);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\treturn (*dev->dev_ops->dev_close)(dev);\n+}\ndiff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h\nindex 451bb5d..cefca98 100644\n--- a/lib/librte_eventdev/rte_eventdev.h\n+++ b/lib/librte_eventdev/rte_eventdev.h\n@@ -970,6 +970,8 @@ struct rte_event {\n \t};\n };\n \n+\n+struct rte_eventdev_driver;\n struct rte_eventdev_ops;\n struct rte_eventdev;\n \n@@ -991,6 +993,51 @@ typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],\n \t\tuint16_t nb_events, uint64_t timeout_ticks);\n /**< @internal Dequeue burst of events from port of a device */\n \n+#define RTE_EVENTDEV_NAME_MAX_LEN\t(64)\n+/**< @internal Max length of name of event PMD */\n+\n+/**\n+ * @internal\n+ * The data part, with no function pointers, associated with each device.\n+ *\n+ * This structure is safe to place in shared memory to be common among\n+ * different processes in a multi-process configuration.\n+ */\n+struct rte_eventdev_data {\n+\tint socket_id;\n+\t/**< Socket ID where memory is allocated */\n+\tuint8_t dev_id;\n+\t/**< Device ID for this instance */\n+\tuint8_t nb_queues;\n+\t/**< Number of event queues. */\n+\tuint8_t nb_ports;\n+\t/**< Number of event ports. */\n+\tvoid **ports;\n+\t/**< Array of pointers to ports. */\n+\tuint8_t *ports_dequeue_depth;\n+\t/**< Array of port dequeue depth. */\n+\tuint8_t *ports_enqueue_depth;\n+\t/**< Array of port enqueue depth. */\n+\tvoid **queues;\n+\t/**< Array of pointers to queues. */\n+\tuint8_t *queues_prio;\n+\t/**< Array of queue priority. */\n+\tuint16_t *links_map;\n+\t/**< Memory to store queues to port connections. */\n+\tvoid *dev_private;\n+\t/**< PMD-specific private data */\n+\tuint32_t event_dev_cap;\n+\t/**< Event device capabilities(RTE_EVENT_DEV_CAP_FLAG)*/\n+\tstruct rte_event_dev_config dev_conf;\n+\t/**< Configuration applied to device. */\n+\n+\tRTE_STD_C11\n+\tuint8_t dev_started : 1;\n+\t/**< Device state: STARTED(1)/STOPPED(0) */\n+\n+\tchar name[RTE_EVENTDEV_NAME_MAX_LEN];\n+\t/**< Unique identifier name */\n+} __rte_cache_aligned;\n \n /** @internal The data structure associated with each event device. */\n struct rte_eventdev {\n@@ -1005,8 +1052,23 @@ struct rte_eventdev {\n \tevent_dequeue_burst_t dequeue_burst;\n \t/**< Pointer to PMD dequeue burst function. */\n \n+\tstruct rte_eventdev_data *data;\n+\t/**< Pointer to device data */\n+\tconst struct rte_eventdev_ops *dev_ops;\n+\t/**< Functions exported by PMD */\n+\tstruct rte_pci_device *pci_dev;\n+\t/**< PCI info. supplied by probing */\n+\tconst struct rte_eventdev_driver *driver;\n+\t/**< Driver for this device */\n+\n+\tRTE_STD_C11\n+\tuint8_t attached : 1;\n+\t/**< Flag indicating the device is attached */\n } __rte_cache_aligned;\n \n+extern struct rte_eventdev *rte_eventdevs;\n+/** @internal The pool of rte_eventdev structures. */\n+\n \n /**\n  * Schedule one or more events in the event dev.\n@@ -1017,8 +1079,13 @@ struct rte_eventdev {\n  * @param dev_id\n  *   The identifier of the device.\n  */\n-void\n-rte_event_schedule(uint8_t dev_id);\n+static inline void\n+rte_event_schedule(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev = &rte_eventdevs[dev_id];\n+\tif (*dev->schedule)\n+\t\t(*dev->schedule)(dev);\n+}\n \n /**\n  * Enqueue a burst of events objects or an event object supplied in *rte_event*\n@@ -1053,9 +1120,23 @@ rte_event_schedule(uint8_t dev_id);\n  *\n  * @see rte_event_port_enqueue_depth()\n  */\n-uint16_t\n+static inline uint16_t\n rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],\n-\t\t\tuint16_t nb_events);\n+\t\t\tuint16_t nb_events)\n+{\n+\tstruct rte_eventdev *dev = &rte_eventdevs[dev_id];\n+\n+\t/*\n+\t * Allow zero cost non burst mode routine invocation if application\n+\t * requests nb_events as const one\n+\t */\n+\tif (nb_events == 1)\n+\t\treturn (*dev->enqueue)(\n+\t\t\tdev->data->ports[port_id], ev);\n+\telse\n+\t\treturn (*dev->enqueue_burst)(\n+\t\t\tdev->data->ports[port_id], ev, nb_events);\n+}\n \n /**\n  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()\n@@ -1147,9 +1228,24 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,\n  *\n  * @see rte_event_port_dequeue_depth()\n  */\n-uint16_t\n+static inline uint16_t\n rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],\n-\t\t\tuint16_t nb_events, uint64_t timeout_ticks);\n+\t\t\tuint16_t nb_events, uint64_t timeout_ticks)\n+{\n+\tstruct rte_eventdev *dev = &rte_eventdevs[dev_id];\n+\n+\t/*\n+\t * Allow zero cost non burst mode routine invocation if application\n+\t * requests nb_events as const one\n+\t */\n+\tif (nb_events == 1)\n+\t\treturn (*dev->dequeue)(\n+\t\t\tdev->data->ports[port_id], ev, timeout_ticks);\n+\telse\n+\t\treturn (*dev->dequeue_burst)(\n+\t\t\tdev->data->ports[port_id], ev, nb_events,\n+\t\t\t\ttimeout_ticks);\n+}\n \n /** Structure to hold the queue to port link establishment attributes */\n struct rte_event_queue_link {\ndiff --git a/lib/librte_eventdev/rte_eventdev_pmd.h b/lib/librte_eventdev/rte_eventdev_pmd.h\nindex 0b04ab7..7d94031 100644\n--- a/lib/librte_eventdev/rte_eventdev_pmd.h\n+++ b/lib/librte_eventdev/rte_eventdev_pmd.h\n@@ -44,8 +44,117 @@\n extern \"C\" {\n #endif\n \n+#include <string.h>\n+\n+#include <rte_dev.h>\n+#include <rte_pci.h>\n+#include <rte_malloc.h>\n+#include <rte_log.h>\n+#include <rte_common.h>\n+\n #include \"rte_eventdev.h\"\n \n+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG\n+#define RTE_PMD_DEBUG_TRACE(...) \\\n+\trte_pmd_debug_trace(__func__, __VA_ARGS__)\n+#else\n+#define RTE_PMD_DEBUG_TRACE(...)\n+#endif\n+\n+/* Logging Macros */\n+#define RTE_EDEV_LOG_ERR(fmt, args...) \\\n+\tRTE_LOG(ERR, EVENTDEV, \"%s() line %u: \" fmt \"\\n\",  \\\n+\t\t\t__func__, __LINE__, ## args)\n+\n+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG\n+#define RTE_EDEV_LOG_DEBUG(fmt, args...) \\\n+\tRTE_LOG(DEBUG, EVENTDEV, \"%s() line %u: \" fmt \"\\n\",  \\\n+\t\t\t__func__, __LINE__, ## args)\n+#else\n+#define RTE_EDEV_LOG_DEBUG(fmt, args...) (void)0\n+#endif\n+\n+/* Macros to check for valid device */\n+#define RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, retval) do { \\\n+\tif (!rte_event_pmd_is_valid_dev((dev_id))) { \\\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid dev_id=%d\\n\", dev_id); \\\n+\t\treturn retval; \\\n+\t} \\\n+} while (0)\n+\n+#define RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id) do { \\\n+\tif (!rte_event_pmd_is_valid_dev((dev_id))) { \\\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid dev_id=%d\\n\", dev_id); \\\n+\t\treturn; \\\n+\t} \\\n+} while (0)\n+\n+#define RTE_EVENTDEV_DETACHED  (0)\n+#define RTE_EVENTDEV_ATTACHED  (1)\n+\n+/** Global structure used for maintaining state of allocated event devices */\n+struct rte_eventdev_global {\n+\tuint8_t nb_devs;\t/**< Number of devices found */\n+\tuint8_t max_devs;\t/**< Max number of devices */\n+};\n+\n+extern struct rte_eventdev_global *rte_eventdev_globals;\n+/** Pointer to global event devices data structure. */\n+extern struct rte_eventdev *rte_eventdevs;\n+/** The pool of rte_eventdev structures. */\n+\n+/**\n+ * Get the rte_eventdev structure device pointer for the named device.\n+ *\n+ * @param name\n+ *   device name to select the device structure.\n+ *\n+ * @return\n+ *   - The rte_eventdev structure pointer for the given device ID.\n+ */\n+static inline struct rte_eventdev *\n+rte_event_pmd_get_named_dev(const char *name)\n+{\n+\tstruct rte_eventdev *dev;\n+\tunsigned int i;\n+\n+\tif (name == NULL)\n+\t\treturn NULL;\n+\n+\tfor (i = 0, dev = &rte_eventdevs[i];\n+\t\t\ti < rte_eventdev_globals->max_devs; i++) {\n+\t\tif ((dev->attached == RTE_EVENTDEV_ATTACHED) &&\n+\t\t\t\t(strcmp(dev->data->name, name) == 0))\n+\t\t\treturn dev;\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+/**\n+ * Validate if the event device index is valid attached event device.\n+ *\n+ * @param dev_id\n+ *   Event device index.\n+ *\n+ * @return\n+ *   - If the device index is valid (1) or not (0).\n+ */\n+static inline unsigned\n+rte_event_pmd_is_valid_dev(uint8_t dev_id)\n+{\n+\tstruct rte_eventdev *dev;\n+\n+\tif (dev_id >= rte_eventdev_globals->nb_devs)\n+\t\treturn 0;\n+\n+\tdev = &rte_eventdevs[dev_id];\n+\tif (dev->attached != RTE_EVENTDEV_ATTACHED)\n+\t\treturn 0;\n+\telse\n+\t\treturn 1;\n+}\n+\n /**\n  * Definitions of all functions exported by a driver through the\n  * the generic structure of type *event_dev_ops* supplied in the\ndiff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map\nnew file mode 100644\nindex 0000000..3cae03d\n--- /dev/null\n+++ b/lib/librte_eventdev/rte_eventdev_version.map\n@@ -0,0 +1,33 @@\n+DPDK_17.02 {\n+\tglobal:\n+\n+\trte_eventdevs;\n+\n+\trte_event_dev_count;\n+\trte_event_dev_get_dev_id;\n+\trte_event_dev_socket_id;\n+\trte_event_dev_info_get;\n+\trte_event_dev_configure;\n+\trte_event_dev_start;\n+\trte_event_dev_stop;\n+\trte_event_dev_close;\n+\trte_event_dev_dump;\n+\n+\trte_event_port_default_conf_get;\n+\trte_event_port_setup;\n+\trte_event_port_dequeue_depth;\n+\trte_event_port_enqueue_depth;\n+\trte_event_port_count;\n+\trte_event_port_link;\n+\trte_event_port_unlink;\n+\trte_event_port_links_get;\n+\n+\trte_event_queue_default_conf_get;\n+\trte_event_queue_setup;\n+\trte_event_queue_count;\n+\trte_event_queue_priority;\n+\n+\trte_event_dequeue_timeout_ticks;\n+\n+\tlocal: *;\n+};\ndiff --git a/mk/rte.app.mk b/mk/rte.app.mk\nindex f75f0e2..716725a 100644\n--- a/mk/rte.app.mk\n+++ b/mk/rte.app.mk\n@@ -93,6 +93,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_MBUF)           += -lrte_mbuf\n _LDLIBS-$(CONFIG_RTE_LIBRTE_NET)            += -lrte_net\n _LDLIBS-$(CONFIG_RTE_LIBRTE_ETHER)          += -lrte_ethdev\n _LDLIBS-$(CONFIG_RTE_LIBRTE_CRYPTODEV)      += -lrte_cryptodev\n+_LDLIBS-$(CONFIG_RTE_LIBRTE_EVENTDEV)       += -lrte_eventdev\n _LDLIBS-$(CONFIG_RTE_LIBRTE_MEMPOOL)        += -lrte_mempool\n _LDLIBS-$(CONFIG_RTE_LIBRTE_RING)           += -lrte_ring\n _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrte_eal\n",
    "prefixes": [
        "dpdk-dev",
        "v2",
        "3/6"
    ]
}