get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/72292/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 72292,
    "url": "http://patches.dpdk.org/api/patches/72292/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1593232671-5690-2-git-send-email-timothy.mcdaniel@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1593232671-5690-2-git-send-email-timothy.mcdaniel@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1593232671-5690-2-git-send-email-timothy.mcdaniel@intel.com",
    "date": "2020-06-27T04:37:25",
    "name": "[01/27] eventdev: dlb upstream prerequisites",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "b18b102d500eb1886cf4b5e745e5802b682a4f83",
    "submitter": {
        "id": 826,
        "url": "http://patches.dpdk.org/api/people/826/?format=api",
        "name": "Timothy McDaniel",
        "email": "timothy.mcdaniel@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1593232671-5690-2-git-send-email-timothy.mcdaniel@intel.com/mbox/",
    "series": [
        {
            "id": 10644,
            "url": "http://patches.dpdk.org/api/series/10644/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=10644",
            "date": "2020-06-27T04:37:24",
            "name": "event/dlb Intel DLB PMD",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/10644/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/72292/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/72292/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 3FC78A0520;\n\tSat, 27 Jun 2020 06:40:18 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 98B621BEC7;\n\tSat, 27 Jun 2020 06:40:03 +0200 (CEST)",
            "from mga17.intel.com (mga17.intel.com [192.55.52.151])\n by dpdk.org (Postfix) with ESMTP id 797751BDAC\n for <dev@dpdk.org>; Sat, 27 Jun 2020 06:39:57 +0200 (CEST)",
            "from fmsmga006.fm.intel.com ([10.253.24.20])\n by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 26 Jun 2020 21:39:56 -0700",
            "from txasoft-yocto.an.intel.com ([10.123.72.192])\n by fmsmga006.fm.intel.com with ESMTP; 26 Jun 2020 21:39:55 -0700"
        ],
        "IronPort-SDR": [
            "\n tRiaKlhPCnZtC/pxbh2nqIFKbaoqiwbQN0enkLVXDfRkCMMIkL5PFHaQxa+7MjPZFU9i7GMBDm\n jjPAGESIB6Qg==",
            "\n mgzqNP6kWj+58VlGTOaBCNnt7NceaCcNsBLKJYus7h+klCZXKxnAifkNNTi/4utF3qv8E18qv+\n ggf8JC1p46UA=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9664\"; a=\"125752942\"",
            "E=Sophos;i=\"5.75,286,1589266800\"; d=\"scan'208\";a=\"125752942\"",
            "E=Sophos;i=\"5.75,286,1589266800\"; d=\"scan'208\";a=\"480022887\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Tim McDaniel <timothy.mcdaniel@intel.com>",
        "To": "jerinj@marvell.com",
        "Cc": "mattias.ronnblom@ericsson.com, dev@dpdk.org, gage.eads@intel.com,\n harry.van.haaren@intel.com,\n \"McDaniel, Timothy\" <timothy.mcdaniel@intel.com>",
        "Date": "Fri, 26 Jun 2020 23:37:25 -0500",
        "Message-Id": "<1593232671-5690-2-git-send-email-timothy.mcdaniel@intel.com>",
        "X-Mailer": "git-send-email 1.7.10",
        "In-Reply-To": "<1593232671-5690-1-git-send-email-timothy.mcdaniel@intel.com>",
        "References": "<1593232671-5690-1-git-send-email-timothy.mcdaniel@intel.com>",
        "Subject": "[dpdk-dev] [PATCH 01/27] eventdev: dlb upstream prerequisites",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: \"McDaniel, Timothy\" <timothy.mcdaniel@intel.com>\n\nThe DLB hardware does not conform exactly to the eventdev interface.\n1) It has a limit on the number of queues that may be linked to a port.\n2) Some ports a further restricted to a maximum of 1 linked queue.\n3) It does not (currently) have the ability to carry the flow_id as part\nof the event (QE) payload.\n\nDue to the above, we would like to propose the following enhancements.\n\n1) Add new fields to the rte_event_dev_info struct. These fields allow\nthe device to advertize its capabilities so that applications can take\nthe appropriate actions based on those capabilities.\n\n    struct rte_event_dev_info {\n\tuint32_t max_event_port_links;\n\t/**< Maximum number of queues that can be linked to a single event\n\t * port by this device.\n\t */\n\n\tuint8_t max_single_link_event_port_queue_pairs;\n\t/**< Maximum number of event ports and queues that are optimized for\n\t * (and only capable of) single-link configurations supported by this\n\t * device. These ports and queues are not accounted for in\n\t * max_event_ports or max_event_queues.\n\t */\n    }\n\n2) Add a new field to the rte_event_dev_config struct. This field allows the\napplication to specify how many of its ports are limited to a single link,\nor will be used in single link mode.\n\n    /** Event device configuration structure */\n    struct rte_event_dev_config {\n\tuint8_t nb_single_link_event_port_queues;\n\t/**< Number of event ports and queues that will be singly-linked to\n\t * each other. These are a subset of the overall event ports and\n\t * queues; this value cannot exceed *nb_event_ports* or\n\t * *nb_event_queues*. If the device has ports and queues that are\n\t * optimized for single-link usage, this field is a hint for how many\n\t * to allocate; otherwise, regular event ports and queues can be used.\n\t */\n    }\n\n3) Replace the dedicated implicit_release_disabled field with a bit field\nof explicit port capabilities. The implicit_release_disable functionality\nis assiged to one bit, and a port-is-single-link-only  attribute is\nassigned to other, with the remaining bits available for future assignment.\n\n\t* Event port configuration bitmap flags */\n\t#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)\n\t/**< Configure the port not to release outstanding events in\n\t * rte_event_dev_dequeue_burst(). If set, all events received through\n\t * the port must be explicitly released with RTE_EVENT_OP_RELEASE or\n\t * RTE_EVENT_OP_FORWARD. Must be unset if the device is not\n\t * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.\n\t */\n\t#define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)\n\n\t/**< This event port links only to a single event queue.\n\t *\n\t *  @see rte_event_port_setup(), rte_event_port_link()\n\t */\n\n\t#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3\n\t/**\n\t * The implicit release disable attribute of the port\n\t */\n\n\tstruct rte_event_port_conf {\n\t\tuint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */\n\t}\n\n4) Add UMWAIT/UMONITOR bit to rte_cpuflags\n\n5) Added a new API that is useful for probing PCI devices.\n\n\t/**\n\t * @internal\n\t * Wrapper for use by pci drivers as a .probe function to attach to a event\n\t * interface.  Same as rte_event_pmd_pci_probe, except caller can specify\n\t * the name.\n\t */\n\tstatic inline int\n\trte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,\n\t\t\t\t    struct rte_pci_device *pci_dev,\n\t\t\t\t    size_t private_data_size,\n\t\t\t\t    eventdev_pmd_pci_callback_t devinit,\n\t\t\t\t    const char *name);\n\nSigned-off-by: McDaniel, Timothy <timothy.mcdaniel@intel.com>\n---\n app/test-eventdev/evt_common.h                     |    1 +\n app/test-eventdev/test_order_atq.c                 |    4 +\n app/test-eventdev/test_order_common.c              |    6 +-\n app/test-eventdev/test_order_queue.c               |    4 +\n app/test-eventdev/test_perf_atq.c                  |    1 +\n app/test-eventdev/test_perf_queue.c                |    1 +\n app/test-eventdev/test_pipeline_atq.c              |    1 +\n app/test-eventdev/test_pipeline_queue.c            |    1 +\n app/test/test_eventdev.c                           |    4 +-\n drivers/event/dpaa2/dpaa2_eventdev.c               |    2 +-\n drivers/event/octeontx/ssovf_evdev.c               |    2 +-\n drivers/event/skeleton/skeleton_eventdev.c         |    2 +-\n drivers/event/sw/sw_evdev.c                        |    5 +-\n drivers/event/sw/sw_evdev_selftest.c               |    9 +-\n .../eventdev_pipeline/pipeline_worker_generic.c    |    8 +-\n examples/eventdev_pipeline/pipeline_worker_tx.c    |    3 +\n examples/l2fwd-event/l2fwd_event_generic.c         |    5 +-\n examples/l2fwd-event/l2fwd_event_internal_port.c   |    5 +-\n examples/l3fwd/l3fwd_event_generic.c               |    5 +-\n examples/l3fwd/l3fwd_event_internal_port.c         |    5 +-\n lib/librte_eal/x86/include/rte_cpuflags.h          |    1 +\n lib/librte_eal/x86/rte_cpuflags.c                  |    1 +\n lib/librte_eventdev/meson.build                    |    1 +\n lib/librte_eventdev/rte_event_eth_tx_adapter.c     |    2 +-\n lib/librte_eventdev/rte_eventdev.c                 |  198 ++++++++++++++++++--\n lib/librte_eventdev/rte_eventdev.h                 |  198 ++++++++++++++++++++\n lib/librte_eventdev/rte_eventdev_pmd_pci.h         |   54 ++++++\n lib/librte_eventdev/rte_eventdev_version.map       |   13 +-\n 28 files changed, 507 insertions(+), 35 deletions(-)",
    "diff": "diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h\nindex f9d7378..120c27b 100644\n--- a/app/test-eventdev/evt_common.h\n+++ b/app/test-eventdev/evt_common.h\n@@ -169,6 +169,7 @@ struct evt_options {\n \t\t\t.dequeue_timeout_ns = opt->deq_tmo_nsec,\n \t\t\t.nb_event_queues = nb_queues,\n \t\t\t.nb_event_ports = nb_ports,\n+\t\t\t.nb_single_link_event_port_queues = 0,\n \t\t\t.nb_events_limit  = info.max_num_events,\n \t\t\t.nb_event_queue_flows = opt->nb_flows,\n \t\t\t.nb_event_port_dequeue_depth =\ndiff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c\nindex 3366cfc..8246b96 100644\n--- a/app/test-eventdev/test_order_atq.c\n+++ b/app/test-eventdev/test_order_atq.c\n@@ -34,6 +34,8 @@\n \t\t\tcontinue;\n \t\t}\n \n+\t\tev.flow_id = ev.mbuf->udata64;\n+\n \t\tif (ev.sub_event_type == 0) { /* stage 0 from producer */\n \t\t\torder_atq_process_stage_0(&ev);\n \t\t\twhile (rte_event_enqueue_burst(dev_id, port, &ev, 1)\n@@ -68,6 +70,8 @@\n \t\t}\n \n \t\tfor (i = 0; i < nb_rx; i++) {\n+\t\t\tev[i].flow_id = ev[i].mbuf->udata64;\n+\n \t\t\tif (ev[i].sub_event_type == 0) { /*stage 0 */\n \t\t\t\torder_atq_process_stage_0(&ev[i]);\n \t\t\t} else if (ev[i].sub_event_type == 1) { /* stage 1 */\ndiff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c\nindex 4190f9a..c6fcd05 100644\n--- a/app/test-eventdev/test_order_common.c\n+++ b/app/test-eventdev/test_order_common.c\n@@ -49,6 +49,7 @@\n \t\tconst uint32_t flow = (uintptr_t)m % nb_flows;\n \t\t/* Maintain seq number per flow */\n \t\tm->seqn = producer_flow_seq[flow]++;\n+\t\tm->udata64 = flow;\n \n \t\tev.flow_id = flow;\n \t\tev.mbuf = m;\n@@ -318,10 +319,11 @@\n \t\topt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;\n \n \t/* port configuration */\n-\tconst struct rte_event_port_conf p_conf = {\n+\tstruct rte_event_port_conf p_conf = {\n \t\t\t.dequeue_depth = opt->wkr_deq_dep,\n \t\t\t.enqueue_depth = dev_info.max_event_port_dequeue_depth,\n \t\t\t.new_event_threshold = dev_info.max_num_events,\n+\t\t\t.event_port_cfg = 0,\n \t};\n \n \t/* setup one port per worker, linking to all queues */\n@@ -351,6 +353,8 @@\n \tp->queue_id = 0;\n \tp->t = t;\n \n+\tp_conf.new_event_threshold /= 2;\n+\n \tret = rte_event_port_setup(opt->dev_id, port, &p_conf);\n \tif (ret) {\n \t\tevt_err(\"failed to setup producer port %d\", port);\ndiff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c\nindex 495efd9..a0a2187 100644\n--- a/app/test-eventdev/test_order_queue.c\n+++ b/app/test-eventdev/test_order_queue.c\n@@ -34,6 +34,8 @@\n \t\t\tcontinue;\n \t\t}\n \n+\t\tev.flow_id = ev.mbuf->udata64;\n+\n \t\tif (ev.queue_id == 0) { /* from ordered queue */\n \t\t\torder_queue_process_stage_0(&ev);\n \t\t\twhile (rte_event_enqueue_burst(dev_id, port, &ev, 1)\n@@ -68,6 +70,8 @@\n \t\t}\n \n \t\tfor (i = 0; i < nb_rx; i++) {\n+\t\t\tev[i].flow_id = ev[i].mbuf->udata64;\n+\n \t\t\tif (ev[i].queue_id == 0) { /* from ordered queue */\n \t\t\t\torder_queue_process_stage_0(&ev[i]);\n \t\t\t} else if (ev[i].queue_id == 1) {/* from atomic queue */\ndiff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c\nindex 8fd5100..10846f2 100644\n--- a/app/test-eventdev/test_perf_atq.c\n+++ b/app/test-eventdev/test_perf_atq.c\n@@ -204,6 +204,7 @@\n \t\t\t.dequeue_depth = opt->wkr_deq_dep,\n \t\t\t.enqueue_depth = dev_info.max_event_port_dequeue_depth,\n \t\t\t.new_event_threshold = dev_info.max_num_events,\n+\t\t\t.event_port_cfg = 0,\n \t};\n \n \tret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues,\ndiff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c\nindex f4ea3a7..a0119da 100644\n--- a/app/test-eventdev/test_perf_queue.c\n+++ b/app/test-eventdev/test_perf_queue.c\n@@ -219,6 +219,7 @@\n \t\t\t.dequeue_depth = opt->wkr_deq_dep,\n \t\t\t.enqueue_depth = dev_info.max_event_port_dequeue_depth,\n \t\t\t.new_event_threshold = dev_info.max_num_events,\n+\t\t\t.event_port_cfg = 0,\n \t};\n \n \tret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,\ndiff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c\nindex 8e8686c..a95ec0a 100644\n--- a/app/test-eventdev/test_pipeline_atq.c\n+++ b/app/test-eventdev/test_pipeline_atq.c\n@@ -356,6 +356,7 @@\n \t\t.dequeue_depth = opt->wkr_deq_dep,\n \t\t.enqueue_depth = info.max_event_port_dequeue_depth,\n \t\t.new_event_threshold = info.max_num_events,\n+\t\t.event_port_cfg = 0,\n \t};\n \n \tif (!t->internal_port)\ndiff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c\nindex 7bebac3..30817dc 100644\n--- a/app/test-eventdev/test_pipeline_queue.c\n+++ b/app/test-eventdev/test_pipeline_queue.c\n@@ -379,6 +379,7 @@\n \t\t\t.dequeue_depth = opt->wkr_deq_dep,\n \t\t\t.enqueue_depth = info.max_event_port_dequeue_depth,\n \t\t\t.new_event_threshold = info.max_num_events,\n+\t\t\t.event_port_cfg = 0,\n \t};\n \n \tif (!t->internal_port) {\ndiff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c\nindex 43ccb1c..62019c1 100644\n--- a/app/test/test_eventdev.c\n+++ b/app/test/test_eventdev.c\n@@ -559,10 +559,10 @@\n \tif (!(info.event_dev_cap &\n \t      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {\n \t\tpconf.enqueue_depth = info.max_event_port_enqueue_depth;\n-\t\tpconf.disable_implicit_release = 1;\n+\t\tpconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;\n \t\tret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);\n \t\tTEST_ASSERT(ret == -EINVAL, \"Expected -EINVAL, %d\", ret);\n-\t\tpconf.disable_implicit_release = 0;\n+\t\tpconf.event_port_cfg = 0;\n \t}\n \n \tret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,\ndiff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c\nindex a196ad4..8568bfc 100644\n--- a/drivers/event/dpaa2/dpaa2_eventdev.c\n+++ b/drivers/event/dpaa2/dpaa2_eventdev.c\n@@ -537,7 +537,7 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,\n \t\tDPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;\n \tport_conf->enqueue_depth =\n \t\tDPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;\n-\tport_conf->disable_implicit_release = 0;\n+\tport_conf->event_port_cfg = 0;\n }\n \n static int\ndiff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c\nindex 1b1a5d9..99c0b2e 100644\n--- a/drivers/event/octeontx/ssovf_evdev.c\n+++ b/drivers/event/octeontx/ssovf_evdev.c\n@@ -224,7 +224,7 @@ struct ssovf_mbox_convert_ns_getworks_iter {\n \tport_conf->new_event_threshold = edev->max_num_events;\n \tport_conf->dequeue_depth = 1;\n \tport_conf->enqueue_depth = 1;\n-\tport_conf->disable_implicit_release = 0;\n+\tport_conf->event_port_cfg = 0;\n }\n \n static void\ndiff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c\nindex c889220..37d569b 100644\n--- a/drivers/event/skeleton/skeleton_eventdev.c\n+++ b/drivers/event/skeleton/skeleton_eventdev.c\n@@ -209,7 +209,7 @@\n \tport_conf->new_event_threshold = 32 * 1024;\n \tport_conf->dequeue_depth = 16;\n \tport_conf->enqueue_depth = 16;\n-\tport_conf->disable_implicit_release = 0;\n+\tport_conf->event_port_cfg = 0;\n }\n \n static void\ndiff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c\nindex fb8e8be..0b3dd9c 100644\n--- a/drivers/event/sw/sw_evdev.c\n+++ b/drivers/event/sw/sw_evdev.c\n@@ -175,7 +175,8 @@\n \t}\n \n \tp->inflight_max = conf->new_event_threshold;\n-\tp->implicit_release = !conf->disable_implicit_release;\n+\tp->implicit_release = !(conf->event_port_cfg &\n+\t\t\t\tRTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);\n \n \t/* check if ring exists, same as rx_worker above */\n \tsnprintf(buf, sizeof(buf), \"sw%d_p%u, %s\", dev->data->dev_id,\n@@ -508,7 +509,7 @@\n \tport_conf->new_event_threshold = 1024;\n \tport_conf->dequeue_depth = 16;\n \tport_conf->enqueue_depth = 16;\n-\tport_conf->disable_implicit_release = 0;\n+\tport_conf->event_port_cfg = 0;\n }\n \n static int\ndiff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c\nindex 38c21fa..a78d6cd 100644\n--- a/drivers/event/sw/sw_evdev_selftest.c\n+++ b/drivers/event/sw/sw_evdev_selftest.c\n@@ -172,7 +172,7 @@ struct test {\n \t\t\t.new_event_threshold = 1024,\n \t\t\t.dequeue_depth = 32,\n \t\t\t.enqueue_depth = 64,\n-\t\t\t.disable_implicit_release = 0,\n+\t\t\t.event_port_cfg = 0,\n \t};\n \tif (num_ports > MAX_PORTS)\n \t\treturn -1;\n@@ -1227,7 +1227,7 @@ struct test_event_dev_stats {\n \t\t\t\t.new_event_threshold = 128,\n \t\t\t\t.dequeue_depth = 32,\n \t\t\t\t.enqueue_depth = 64,\n-\t\t\t\t.disable_implicit_release = 0,\n+\t\t\t\t.event_port_cfg = 0,\n \t\t};\n \t\tif (rte_event_port_setup(evdev, 0, &port_conf) < 0) {\n \t\t\tprintf(\"%d Error setting up port\\n\", __LINE__);\n@@ -1317,7 +1317,7 @@ struct test_event_dev_stats {\n \t\t.new_event_threshold = 128,\n \t\t.dequeue_depth = 32,\n \t\t.enqueue_depth = 64,\n-\t\t.disable_implicit_release = 0,\n+\t\t.event_port_cfg = 0,\n \t};\n \tif (rte_event_port_setup(evdev, 0, &port_conf) < 0) {\n \t\tprintf(\"%d Error setting up port\\n\", __LINE__);\n@@ -3079,7 +3079,8 @@ struct test_event_dev_stats {\n \t * only be initialized once - and this needs to be set for multiple runs\n \t */\n \tconf.new_event_threshold = 512;\n-\tconf.disable_implicit_release = disable_implicit_release;\n+\tconf.event_port_cfg = disable_implicit_release ?\n+\t\tRTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;\n \n \tif (rte_event_port_setup(evdev, 0, &conf) < 0) {\n \t\tprintf(\"Error setting up RX port\\n\");\ndiff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c\nindex 42ff4ee..a091da3 100644\n--- a/examples/eventdev_pipeline/pipeline_worker_generic.c\n+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c\n@@ -129,6 +129,7 @@\n \tstruct rte_event_dev_config config = {\n \t\t\t.nb_event_queues = nb_queues,\n \t\t\t.nb_event_ports = nb_ports,\n+\t\t\t.nb_single_link_event_port_queues = 1,\n \t\t\t.nb_events_limit  = 4096,\n \t\t\t.nb_event_queue_flows = 1024,\n \t\t\t.nb_event_port_dequeue_depth = 128,\n@@ -138,12 +139,13 @@\n \t\t\t.dequeue_depth = cdata.worker_cq_depth,\n \t\t\t.enqueue_depth = 64,\n \t\t\t.new_event_threshold = 4096,\n+\t\t\t.event_port_cfg = 0,\n \t};\n \tstruct rte_event_queue_conf wkr_q_conf = {\n \t\t\t.schedule_type = cdata.queue_type,\n \t\t\t.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,\n \t\t\t.nb_atomic_flows = 1024,\n-\t\t.nb_atomic_order_sequences = 1024,\n+\t\t\t.nb_atomic_order_sequences = 1024,\n \t};\n \tstruct rte_event_queue_conf tx_q_conf = {\n \t\t\t.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,\n@@ -167,7 +169,8 @@\n \tdisable_implicit_release = (dev_info.event_dev_cap &\n \t\t\tRTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);\n \n-\twkr_p_conf.disable_implicit_release = disable_implicit_release;\n+\twkr_p_conf.event_port_cfg = disable_implicit_release ?\n+\t\tRTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;\n \n \tif (dev_info.max_num_events < config.nb_events_limit)\n \t\tconfig.nb_events_limit = dev_info.max_num_events;\n@@ -417,6 +420,7 @@\n \t\t.dequeue_depth = cdata.worker_cq_depth,\n \t\t.enqueue_depth = 64,\n \t\t.new_event_threshold = 4096,\n+\t\t.event_port_cfg = 0,\n \t};\n \n \tif (adptr_p_conf.new_event_threshold > dev_info.max_num_events)\ndiff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c\nindex 55bb2f7..e8a9652 100644\n--- a/examples/eventdev_pipeline/pipeline_worker_tx.c\n+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c\n@@ -436,6 +436,7 @@\n \tstruct rte_event_dev_config config = {\n \t\t\t.nb_event_queues = nb_queues,\n \t\t\t.nb_event_ports = nb_ports,\n+\t\t\t.nb_single_link_event_port_queues = 0,\n \t\t\t.nb_events_limit  = 4096,\n \t\t\t.nb_event_queue_flows = 1024,\n \t\t\t.nb_event_port_dequeue_depth = 128,\n@@ -445,6 +446,7 @@\n \t\t\t.dequeue_depth = cdata.worker_cq_depth,\n \t\t\t.enqueue_depth = 64,\n \t\t\t.new_event_threshold = 4096,\n+\t\t\t.event_port_cfg = 0,\n \t};\n \tstruct rte_event_queue_conf wkr_q_conf = {\n \t\t\t.schedule_type = cdata.queue_type,\n@@ -746,6 +748,7 @@ struct rx_adptr_services {\n \t\t.dequeue_depth = cdata.worker_cq_depth,\n \t\t.enqueue_depth = 64,\n \t\t.new_event_threshold = 4096,\n+\t\t.event_port_cfg = 0,\n \t};\n \n \tinit_ports(nb_ports);\ndiff --git a/examples/l2fwd-event/l2fwd_event_generic.c b/examples/l2fwd-event/l2fwd_event_generic.c\nindex 2dc95e5..e01df04 100644\n--- a/examples/l2fwd-event/l2fwd_event_generic.c\n+++ b/examples/l2fwd-event/l2fwd_event_generic.c\n@@ -126,8 +126,9 @@\n \tif (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)\n \t\tevent_p_conf.enqueue_depth = def_p_conf.enqueue_depth;\n \n-\tevent_p_conf.disable_implicit_release =\n-\t\tevt_rsrc->disable_implicit_release;\n+\tevent_p_conf.event_port_cfg = 0;\n+\tif (evt_rsrc->disable_implicit_release)\n+\t\tevent_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;\n \tevt_rsrc->deq_depth = def_p_conf.dequeue_depth;\n \n \tfor (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;\ndiff --git a/examples/l2fwd-event/l2fwd_event_internal_port.c b/examples/l2fwd-event/l2fwd_event_internal_port.c\nindex 63d57b4..f54327b 100644\n--- a/examples/l2fwd-event/l2fwd_event_internal_port.c\n+++ b/examples/l2fwd-event/l2fwd_event_internal_port.c\n@@ -123,8 +123,9 @@\n \tif (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)\n \t\tevent_p_conf.enqueue_depth = def_p_conf.enqueue_depth;\n \n-\tevent_p_conf.disable_implicit_release =\n-\t\tevt_rsrc->disable_implicit_release;\n+\tevent_p_conf.event_port_cfg = 0;\n+\tif (evt_rsrc->disable_implicit_release)\n+\t\tevent_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;\n \n \tfor (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;\n \t\t\t\t\t\t\t\tevent_p_id++) {\ndiff --git a/examples/l3fwd/l3fwd_event_generic.c b/examples/l3fwd/l3fwd_event_generic.c\nindex f8c9843..409a410 100644\n--- a/examples/l3fwd/l3fwd_event_generic.c\n+++ b/examples/l3fwd/l3fwd_event_generic.c\n@@ -115,8 +115,9 @@\n \tif (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)\n \t\tevent_p_conf.enqueue_depth = def_p_conf.enqueue_depth;\n \n-\tevent_p_conf.disable_implicit_release =\n-\t\tevt_rsrc->disable_implicit_release;\n+\tevent_p_conf.event_port_cfg = 0;\n+\tif (evt_rsrc->disable_implicit_release)\n+\t\tevent_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;\n \tevt_rsrc->deq_depth = def_p_conf.dequeue_depth;\n \n \tfor (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;\ndiff --git a/examples/l3fwd/l3fwd_event_internal_port.c b/examples/l3fwd/l3fwd_event_internal_port.c\nindex 03ac581..df410f1 100644\n--- a/examples/l3fwd/l3fwd_event_internal_port.c\n+++ b/examples/l3fwd/l3fwd_event_internal_port.c\n@@ -113,8 +113,9 @@\n \tif (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)\n \t\tevent_p_conf.enqueue_depth = def_p_conf.enqueue_depth;\n \n-\tevent_p_conf.disable_implicit_release =\n-\t\tevt_rsrc->disable_implicit_release;\n+\tevent_p_conf.event_port_cfg = 0;\n+\tif (evt_rsrc->disable_implicit_release)\n+\t\tevent_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;\n \n \tfor (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;\n \t\t\t\t\t\t\t\tevent_p_id++) {\ndiff --git a/lib/librte_eal/x86/include/rte_cpuflags.h b/lib/librte_eal/x86/include/rte_cpuflags.h\nindex c1d2036..ab2c3b3 100644\n--- a/lib/librte_eal/x86/include/rte_cpuflags.h\n+++ b/lib/librte_eal/x86/include/rte_cpuflags.h\n@@ -130,6 +130,7 @@ enum rte_cpu_flag_t {\n \tRTE_CPUFLAG_CLDEMOTE,               /**< Cache Line Demote */\n \tRTE_CPUFLAG_MOVDIRI,                /**< Direct Store Instructions */\n \tRTE_CPUFLAG_MOVDIR64B,              /**< Direct Store Instructions 64B */\n+\tRTE_CPUFLAG_UMWAIT,                 /**< UMONITOR/UMWAIT */\n \tRTE_CPUFLAG_AVX512VP2INTERSECT,     /**< AVX512 Two Register Intersection */\n \n \t/* The last item */\ndiff --git a/lib/librte_eal/x86/rte_cpuflags.c b/lib/librte_eal/x86/rte_cpuflags.c\nindex 30439e7..69ac0db 100644\n--- a/lib/librte_eal/x86/rte_cpuflags.c\n+++ b/lib/librte_eal/x86/rte_cpuflags.c\n@@ -137,6 +137,7 @@ struct feature_entry {\n \tFEAT_DEF(CLDEMOTE, 0x00000007, 0, RTE_REG_ECX, 25)\n \tFEAT_DEF(MOVDIRI, 0x00000007, 0, RTE_REG_ECX, 27)\n \tFEAT_DEF(MOVDIR64B, 0x00000007, 0, RTE_REG_ECX, 28)\n+        FEAT_DEF(UMWAIT, 0x00000007, 0, RTE_REG_ECX, 5)\n \tFEAT_DEF(AVX512VP2INTERSECT, 0x00000007, 0, RTE_REG_EDX, 8)\n };\n \ndiff --git a/lib/librte_eventdev/meson.build b/lib/librte_eventdev/meson.build\nindex d1f25ee..17f7f40 100644\n--- a/lib/librte_eventdev/meson.build\n+++ b/lib/librte_eventdev/meson.build\n@@ -7,6 +7,7 @@ else\n \tcflags += '-DBSD'\n endif\n \n+use_function_versioning = true\n sources = files('rte_eventdev.c',\n \t\t'rte_event_ring.c',\n \t\t'eventdev_trace_points.c',\ndiff --git a/lib/librte_eventdev/rte_event_eth_tx_adapter.c b/lib/librte_eventdev/rte_event_eth_tx_adapter.c\nindex bb21dc4..8a72256 100644\n--- a/lib/librte_eventdev/rte_event_eth_tx_adapter.c\n+++ b/lib/librte_eventdev/rte_event_eth_tx_adapter.c\n@@ -286,7 +286,7 @@ static int txa_service_queue_del(uint8_t id,\n \t\treturn ret;\n \t}\n \n-\tpc->disable_implicit_release = 0;\n+\tpc->event_port_cfg = 0;\n \tret = rte_event_port_setup(dev_id, port_id, pc);\n \tif (ret) {\n \t\tRTE_EDEV_LOG_ERR(\"failed to setup event port %u\\n\",\ndiff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c\nindex 82c177c..e8d7c0d 100644\n--- a/lib/librte_eventdev/rte_eventdev.c\n+++ b/lib/librte_eventdev/rte_eventdev.c\n@@ -32,6 +32,8 @@\n #include <rte_ethdev.h>\n #include <rte_cryptodev.h>\n #include <rte_cryptodev_pmd.h>\n+#include <rte_compat.h>\n+#include <rte_function_versioning.h>\n \n #include \"rte_eventdev.h\"\n #include \"rte_eventdev_pmd.h\"\n@@ -87,7 +89,47 @@\n }\n \n int\n-rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)\n+rte_event_dev_info_get_v20(uint8_t dev_id,\n+\t\t\t     struct rte_event_dev_info_v20 *dev_info)\n+{\n+\tstruct rte_event_dev_info new_dev_info;\n+\tint err;\n+\n+\tif (dev_info == NULL)\n+\t\treturn -EINVAL;\n+\n+\tmemset(&new_dev_info, 0, sizeof(struct rte_event_dev_info));\n+\n+\terr = rte_event_dev_info_get(dev_id, &new_dev_info);\n+\tif (err)\n+\t\treturn err;\n+\n+\tdev_info->driver_name = new_dev_info.driver_name;\n+\tdev_info->dev = new_dev_info.dev;\n+\tdev_info->min_dequeue_timeout_ns = new_dev_info.min_dequeue_timeout_ns;\n+\tdev_info->max_dequeue_timeout_ns = new_dev_info.max_dequeue_timeout_ns;\n+\tdev_info->max_event_queues = new_dev_info.max_event_queues;\n+\tdev_info->max_event_queue_flows = new_dev_info.max_event_queue_flows;\n+\tdev_info->max_event_queue_priority_levels =\n+\t\tnew_dev_info.max_event_queue_priority_levels;\n+\tdev_info->max_event_priority_levels =\n+\t\tnew_dev_info.max_event_priority_levels;\n+\tdev_info->max_event_ports = new_dev_info.max_event_ports;\n+\tdev_info->max_event_port_dequeue_depth =\n+\t\tnew_dev_info.max_event_port_dequeue_depth;\n+\tdev_info->max_event_port_enqueue_depth =\n+\t\tnew_dev_info.max_event_port_enqueue_depth;\n+\tdev_info->max_num_events = new_dev_info.max_num_events;\n+\tdev_info->event_dev_cap = new_dev_info.event_dev_cap;\n+\tdev_info->dequeue_timeout_ns = new_dev_info.dequeue_timeout_ns;\n+\n+\treturn 0;\n+}\n+VERSION_SYMBOL(rte_event_dev_info_get, _v20, 20);\n+\n+int\n+rte_event_dev_info_get_v21(uint8_t dev_id,\n+\t\t\t     struct rte_event_dev_info *dev_info)\n {\n \tstruct rte_eventdev *dev;\n \n@@ -107,6 +149,10 @@\n \tdev_info->dev = dev->dev;\n \treturn 0;\n }\n+BIND_DEFAULT_SYMBOL(rte_event_dev_info_get, _v21, 21);\n+MAP_STATIC_SYMBOL(int rte_event_dev_info_get(uint8_t dev_id,\n+\t\t\tstruct rte_event_dev_info *dev_info),\n+\t\t\trte_event_dev_info_get_v21);\n \n int\n rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,\n@@ -385,7 +431,29 @@\n }\n \n int\n-rte_event_dev_configure(uint8_t dev_id,\n+rte_event_dev_configure_v20(uint8_t dev_id,\n+\t\t\t      const struct rte_event_dev_config_v20 *dev_conf)\n+{\n+\tstruct rte_event_dev_config new_dev_conf;\n+\n+\tnew_dev_conf.dequeue_timeout_ns = dev_conf->dequeue_timeout_ns;\n+\tnew_dev_conf.nb_events_limit = dev_conf->nb_events_limit;\n+\tnew_dev_conf.nb_event_queues = dev_conf->nb_event_queues;\n+\tnew_dev_conf.nb_event_ports = dev_conf->nb_event_ports;\n+\tnew_dev_conf.nb_event_queue_flows = dev_conf->nb_event_queue_flows;\n+\tnew_dev_conf.nb_event_port_dequeue_depth =\n+\t\tdev_conf->nb_event_port_dequeue_depth;\n+\tnew_dev_conf.nb_event_port_enqueue_depth =\n+\t\tdev_conf->nb_event_port_enqueue_depth;\n+\tnew_dev_conf.event_dev_cfg = dev_conf->event_dev_cfg;\n+\tnew_dev_conf.nb_single_link_event_port_queues = 0;\n+\n+\treturn rte_event_dev_configure(dev_id, &new_dev_conf);\n+}\n+VERSION_SYMBOL(rte_event_dev_info_get, _v20, 20);\n+\n+int\n+rte_event_dev_configure_v21(uint8_t dev_id,\n \t\t\tconst struct rte_event_dev_config *dev_conf)\n {\n \tstruct rte_eventdev *dev;\n@@ -437,9 +505,29 @@\n \t\t\t\t\tdev_id);\n \t\treturn -EINVAL;\n \t}\n-\tif (dev_conf->nb_event_queues > info.max_event_queues) {\n-\t\tRTE_EDEV_LOG_ERR(\"%d nb_event_queues=%d > max_event_queues=%d\",\n-\t\tdev_id, dev_conf->nb_event_queues, info.max_event_queues);\n+\tif (dev_conf->nb_event_queues > info.max_event_queues +\n+\t\t\tinfo.max_single_link_event_port_queue_pairs) {\n+\t\tRTE_EDEV_LOG_ERR(\"%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d\",\n+\t\t\t\t dev_id, dev_conf->nb_event_queues,\n+\t\t\t\t info.max_event_queues,\n+\t\t\t\t info.max_single_link_event_port_queue_pairs);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_event_queues -\n+\t\t\tdev_conf->nb_single_link_event_port_queues >\n+\t\t\tinfo.max_event_queues) {\n+\t\tRTE_EDEV_LOG_ERR(\"id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d\",\n+\t\t\t\t dev_id, dev_conf->nb_event_queues,\n+\t\t\t\t dev_conf->nb_single_link_event_port_queues,\n+\t\t\t\t info.max_event_queues);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_single_link_event_port_queues >\n+\t\t\tdev_conf->nb_event_queues) {\n+\t\tRTE_EDEV_LOG_ERR(\"dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d\",\n+\t\t\t\t dev_id,\n+\t\t\t\t dev_conf->nb_single_link_event_port_queues,\n+\t\t\t\t dev_conf->nb_event_queues);\n \t\treturn -EINVAL;\n \t}\n \n@@ -448,9 +536,31 @@\n \t\tRTE_EDEV_LOG_ERR(\"dev%d nb_event_ports cannot be zero\", dev_id);\n \t\treturn -EINVAL;\n \t}\n-\tif (dev_conf->nb_event_ports > info.max_event_ports) {\n-\t\tRTE_EDEV_LOG_ERR(\"id%d nb_event_ports=%d > max_event_ports= %d\",\n-\t\tdev_id, dev_conf->nb_event_ports, info.max_event_ports);\n+\tif (dev_conf->nb_event_ports > info.max_event_ports +\n+\t\t\tinfo.max_single_link_event_port_queue_pairs) {\n+\t\tRTE_EDEV_LOG_ERR(\"id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d\",\n+\t\t\t\t dev_id, dev_conf->nb_event_ports,\n+\t\t\t\t info.max_event_ports,\n+\t\t\t\t info.max_single_link_event_port_queue_pairs);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_event_ports -\n+\t\t\tdev_conf->nb_single_link_event_port_queues\n+\t\t\t> info.max_event_ports) {\n+\t\tRTE_EDEV_LOG_ERR(\"id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d\",\n+\t\t\t\t dev_id, dev_conf->nb_event_ports,\n+\t\t\t\t dev_conf->nb_single_link_event_port_queues,\n+\t\t\t\t info.max_event_ports);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (dev_conf->nb_single_link_event_port_queues >\n+\t    dev_conf->nb_event_ports) {\n+\t\tRTE_EDEV_LOG_ERR(\n+\t\t\t\t \"dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d\",\n+\t\t\t\t dev_id,\n+\t\t\t\t dev_conf->nb_single_link_event_port_queues,\n+\t\t\t\t dev_conf->nb_event_ports);\n \t\treturn -EINVAL;\n \t}\n \n@@ -528,6 +638,10 @@\n \trte_eventdev_trace_configure(dev_id, dev_conf, diag);\n \treturn diag;\n }\n+BIND_DEFAULT_SYMBOL(rte_event_dev_configure, _v21, 21);\n+MAP_STATIC_SYMBOL(int rte_event_dev_configure(uint8_t dev_id,\n+\t\t\tconst struct rte_event_dev_config *dev_conf),\n+\t\t\trte_event_dev_configure_v21);\n \n static inline int\n is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)\n@@ -666,7 +780,33 @@\n }\n \n int\n-rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,\n+rte_event_port_default_conf_get_v20(uint8_t dev_id, uint8_t port_id,\n+\t\t\t\t struct rte_event_port_conf_v20 *port_conf)\n+{\n+\tstruct rte_event_port_conf new_port_conf;\n+\tint err;\n+\n+\tif (port_conf == NULL)\n+\t\treturn -EINVAL;\n+\n+\tmemset(&new_port_conf, 0, sizeof(new_port_conf));\n+\n+\terr = rte_event_port_default_conf_get(dev_id, port_id, &new_port_conf);\n+\tif (err)\n+\t\treturn err;\n+\n+\tport_conf->new_event_threshold = new_port_conf.new_event_threshold;\n+\tport_conf->dequeue_depth = new_port_conf.dequeue_depth;\n+\tport_conf->enqueue_depth = new_port_conf.enqueue_depth;\n+\tport_conf->disable_implicit_release = !!(new_port_conf.event_port_cfg &\n+\t\tRTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);\n+\n+\treturn 0;\n+}\n+VERSION_SYMBOL(rte_event_port_default_conf_get, _v20, 20);\n+\n+int\n+rte_event_port_default_conf_get_v21(uint8_t dev_id, uint8_t port_id,\n \t\t\t\t struct rte_event_port_conf *port_conf)\n {\n \tstruct rte_eventdev *dev;\n@@ -687,9 +827,35 @@\n \t(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);\n \treturn 0;\n }\n+BIND_DEFAULT_SYMBOL(rte_event_port_default_conf_get, _v21, 21);\n+MAP_STATIC_SYMBOL(int rte_event_port_default_conf_get(uint8_t dev_id,\n+\t\t\tuint8_t port_id, struct rte_event_port_conf *port_conf),\n+\t\t\trte_event_port_default_conf_get_v21);\n \n int\n-rte_event_port_setup(uint8_t dev_id, uint8_t port_id,\n+rte_event_port_setup_v20(uint8_t dev_id, uint8_t port_id,\n+\t\t     const struct rte_event_port_conf_v20 *port_conf)\n+{\n+\tstruct rte_event_port_conf new_port_conf;\n+\n+\tif (port_conf == NULL)\n+\t\treturn -EINVAL;\n+\n+\tnew_port_conf.new_event_threshold = port_conf->new_event_threshold;\n+\tnew_port_conf.dequeue_depth = port_conf->dequeue_depth;\n+\tnew_port_conf.enqueue_depth = port_conf->enqueue_depth;\n+\tnew_port_conf.event_port_cfg = 0;\n+\tif (port_conf->disable_implicit_release)\n+\t\tnew_port_conf.event_port_cfg =\n+\t\t\tRTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;\n+\n+\treturn rte_event_port_setup(dev_id, port_id, &new_port_conf);\n+\n+}\n+VERSION_SYMBOL(rte_event_port_setup, _v20, 20);\n+\n+int\n+rte_event_port_setup_v21(uint8_t dev_id, uint8_t port_id,\n \t\t     const struct rte_event_port_conf *port_conf)\n {\n \tstruct rte_eventdev *dev;\n@@ -737,7 +903,8 @@\n \t\treturn -EINVAL;\n \t}\n \n-\tif (port_conf && port_conf->disable_implicit_release &&\n+\tif (port_conf &&\n+\t    (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&\n \t    !(dev->data->event_dev_cap &\n \t      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {\n \t\tRTE_EDEV_LOG_ERR(\n@@ -775,6 +942,10 @@\n \n \treturn 0;\n }\n+BIND_DEFAULT_SYMBOL(rte_event_port_setup, _v21, 21);\n+MAP_STATIC_SYMBOL(int rte_event_port_setup(uint8_t dev_id, uint8_t port_id,\n+\t\t  const struct rte_event_port_conf *port_conf),\n+\t\t  rte_event_port_setup_v21);\n \n int\n rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,\n@@ -809,6 +980,7 @@\n \t\t\tuint32_t *attr_value)\n {\n \tstruct rte_eventdev *dev;\n+\tuint32_t config;\n \n \tif (!attr_value)\n \t\treturn -EINVAL;\n@@ -830,6 +1002,10 @@\n \tcase RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:\n \t\t*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;\n \t\tbreak;\n+\tcase RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:\n+\t\tconfig = dev->data->ports_cfg[port_id].event_port_cfg;\n+\t\t*attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);\n+\t\tbreak;\n \tdefault:\n \t\treturn -EINVAL;\n \t};\ndiff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h\nindex 7dc8323..e7155e6 100644\n--- a/lib/librte_eventdev/rte_eventdev.h\n+++ b/lib/librte_eventdev/rte_eventdev.h\n@@ -291,6 +291,12 @@\n  * single queue to each port or map a single queue to many port.\n  */\n \n+#define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)\n+/**< Event device preserves the flow ID from the enqueued\n+ * event to the dequeued event if the flag is set. Otherwise,\n+ * the content of this field is implementation dependent.\n+ */\n+\n /* Event device priority levels */\n #define RTE_EVENT_DEV_PRIORITY_HIGHEST   0\n /**< Highest priority expressed across eventdev subsystem\n@@ -380,6 +386,58 @@ struct rte_event_dev_info {\n \t * event port by this device.\n \t * A device that does not support bulk enqueue will set this as 1.\n \t */\n+\tuint8_t max_event_port_links;\n+\t/**< Maximum number of queues that can be linked to a single event\n+\t * port by this device.\n+\t */\n+\tint32_t max_num_events;\n+\t/**< A *closed system* event dev has a limit on the number of events it\n+\t * can manage at a time. An *open system* event dev does not have a\n+\t * limit and will specify this as -1.\n+\t */\n+\tuint32_t event_dev_cap;\n+\t/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/\n+\tuint8_t max_single_link_event_port_queue_pairs;\n+\t/**< Maximum number of event ports and queues that are optimized for\n+\t * (and only capable of) single-link configurations supported by this\n+\t * device. These ports and queues are not accounted for in\n+\t * max_event_ports or max_event_queues.\n+\t */\n+};\n+\n+struct rte_event_dev_info_v20 {\n+\tconst char *driver_name;\t/**< Event driver name */\n+\tstruct rte_device *dev;\t/**< Device information */\n+\tuint32_t min_dequeue_timeout_ns;\n+\t/**< Minimum supported global dequeue timeout(ns) by this device */\n+\tuint32_t max_dequeue_timeout_ns;\n+\t/**< Maximum supported global dequeue timeout(ns) by this device */\n+\tuint32_t dequeue_timeout_ns;\n+\t/**< Configured global dequeue timeout(ns) for this device */\n+\tuint8_t max_event_queues;\n+\t/**< Maximum event_queues supported by this device */\n+\tuint32_t max_event_queue_flows;\n+\t/**< Maximum supported flows in an event queue by this device*/\n+\tuint8_t max_event_queue_priority_levels;\n+\t/**< Maximum number of event queue priority levels by this device.\n+\t * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability\n+\t */\n+\tuint8_t max_event_priority_levels;\n+\t/**< Maximum number of event priority levels by this device.\n+\t * Valid when the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability\n+\t */\n+\tuint8_t max_event_ports;\n+\t/**< Maximum number of event ports supported by this device */\n+\tuint8_t max_event_port_dequeue_depth;\n+\t/**< Maximum number of events can be dequeued at a time from an\n+\t * event port by this device.\n+\t * A device that does not support bulk dequeue will set this as 1.\n+\t */\n+\tuint32_t max_event_port_enqueue_depth;\n+\t/**< Maximum number of events can be enqueued at a time from an\n+\t * event port by this device.\n+\t * A device that does not support bulk enqueue will set this as 1.\n+\t */\n \tint32_t max_num_events;\n \t/**< A *closed system* event dev has a limit on the number of events it\n \t * can manage at a time. An *open system* event dev does not have a\n@@ -407,6 +465,14 @@ struct rte_event_dev_info {\n int\n rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);\n \n+int\n+rte_event_dev_info_get_v20(uint8_t dev_id,\n+\t\t\t     struct rte_event_dev_info_v20 *dev_info);\n+\n+int\n+rte_event_dev_info_get_v21(uint8_t dev_id,\n+\t\t\t     struct rte_event_dev_info *dev_info);\n+\n /**\n  * The count of ports.\n  */\n@@ -494,6 +560,67 @@ struct rte_event_dev_config {\n \t */\n \tuint32_t event_dev_cfg;\n \t/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/\n+\tuint8_t nb_single_link_event_port_queues;\n+\t/**< Number of event ports and queues that will be singly-linked to\n+\t * each other. These are a subset of the overall event ports and\n+\t * queues; this value cannot exceed *nb_event_ports* or\n+\t * *nb_event_queues*. If the device has ports and queues that are\n+\t * optimized for single-link usage, this field is a hint for how many\n+\t * to allocate; otherwise, regular event ports and queues can be used.\n+\t */\n+};\n+\n+/** Event device configuration structure */\n+struct rte_event_dev_config_v20 {\n+\tuint32_t dequeue_timeout_ns;\n+\t/**< rte_event_dequeue_burst() timeout on this device.\n+\t * This value should be in the range of *min_dequeue_timeout_ns* and\n+\t * *max_dequeue_timeout_ns* which previously provided in\n+\t * rte_event_dev_info_get()\n+\t * The value 0 is allowed, in which case, default dequeue timeout used.\n+\t * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT\n+\t */\n+\tint32_t nb_events_limit;\n+\t/**< In a *closed system* this field is the limit on maximum number of\n+\t * events that can be inflight in the eventdev at a given time. The\n+\t * limit is required to ensure that the finite space in a closed system\n+\t * is not overwhelmed. The value cannot exceed the *max_num_events*\n+\t * as provided by rte_event_dev_info_get().\n+\t * This value should be set to -1 for *open system*.\n+\t */\n+\tuint8_t nb_event_queues;\n+\t/**< Number of event queues to configure on this device.\n+\t * This value cannot exceed the *max_event_queues* which previously\n+\t * provided in rte_event_dev_info_get()\n+\t */\n+\tuint8_t nb_event_ports;\n+\t/**< Number of event ports to configure on this device.\n+\t * This value cannot exceed the *max_event_ports* which previously\n+\t * provided in rte_event_dev_info_get()\n+\t */\n+\tuint32_t nb_event_queue_flows;\n+\t/**< Number of flows for any event queue on this device.\n+\t * This value cannot exceed the *max_event_queue_flows* which previously\n+\t * provided in rte_event_dev_info_get()\n+\t */\n+\tuint32_t nb_event_port_dequeue_depth;\n+\t/**< Maximum number of events can be dequeued at a time from an\n+\t * event port by this device.\n+\t * This value cannot exceed the *max_event_port_dequeue_depth*\n+\t * which previously provided in rte_event_dev_info_get().\n+\t * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.\n+\t * @see rte_event_port_setup()\n+\t */\n+\tuint32_t nb_event_port_enqueue_depth;\n+\t/**< Maximum number of events can be enqueued at a time from an\n+\t * event port by this device.\n+\t * This value cannot exceed the *max_event_port_enqueue_depth*\n+\t * which previously provided in rte_event_dev_info_get().\n+\t * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.\n+\t * @see rte_event_port_setup()\n+\t */\n+\tuint32_t event_dev_cfg;\n+\t/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/\n };\n \n /**\n@@ -519,6 +646,13 @@ struct rte_event_dev_config {\n rte_event_dev_configure(uint8_t dev_id,\n \t\t\tconst struct rte_event_dev_config *dev_conf);\n \n+int\n+rte_event_dev_configure_v20(uint8_t dev_id,\n+\t\t\tconst struct rte_event_dev_config_v20 *dev_conf);\n+\n+int\n+rte_event_dev_configure_v21(uint8_t dev_id,\n+\t\t\tconst struct rte_event_dev_config *dev_conf);\n \n /* Event queue specific APIs */\n \n@@ -671,6 +805,20 @@ struct rte_event_queue_conf {\n \n /* Event port specific APIs */\n \n+/* Event port configuration bitmap flags */\n+#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)\n+/**< Configure the port not to release outstanding events in\n+ * rte_event_dev_dequeue_burst(). If set, all events received through\n+ * the port must be explicitly released with RTE_EVENT_OP_RELEASE or\n+ * RTE_EVENT_OP_FORWARD. Must be unset if the device is not\n+ * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.\n+ */\n+#define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)\n+/**< This event port links only to a single event queue.\n+ *\n+ *  @see rte_event_port_setup(), rte_event_port_link()\n+ */\n+\n /** Event port configuration structure */\n struct rte_event_port_conf {\n \tint32_t new_event_threshold;\n@@ -698,6 +846,36 @@ struct rte_event_port_conf {\n \t * which previously supplied to rte_event_dev_configure().\n \t * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.\n \t */\n+\tuint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */\n+};\n+\n+/** Event port configuration structure */\n+struct rte_event_port_conf_v20 {\n+\tint32_t new_event_threshold;\n+\t/**< A backpressure threshold for new event enqueues on this port.\n+\t * Use for *closed system* event dev where event capacity is limited,\n+\t * and cannot exceed the capacity of the event dev.\n+\t * Configuring ports with different thresholds can make higher priority\n+\t * traffic less likely to  be backpressured.\n+\t * For example, a port used to inject NIC Rx packets into the event dev\n+\t * can have a lower threshold so as not to overwhelm the device,\n+\t * while ports used for worker pools can have a higher threshold.\n+\t * This value cannot exceed the *nb_events_limit*\n+\t * which was previously supplied to rte_event_dev_configure().\n+\t * This should be set to '-1' for *open system*.\n+\t */\n+\tuint16_t dequeue_depth;\n+\t/**< Configure number of bulk dequeues for this event port.\n+\t * This value cannot exceed the *nb_event_port_dequeue_depth*\n+\t * which previously supplied to rte_event_dev_configure().\n+\t * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.\n+\t */\n+\tuint16_t enqueue_depth;\n+\t/**< Configure number of bulk enqueues for this event port.\n+\t * This value cannot exceed the *nb_event_port_enqueue_depth*\n+\t * which previously supplied to rte_event_dev_configure().\n+\t * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.\n+\t */\n \tuint8_t disable_implicit_release;\n \t/**< Configure the port not to release outstanding events in\n \t * rte_event_dev_dequeue_burst(). If true, all events received through\n@@ -733,6 +911,14 @@ struct rte_event_port_conf {\n rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,\n \t\t\t\tstruct rte_event_port_conf *port_conf);\n \n+int\n+rte_event_port_default_conf_get_v20(uint8_t dev_id, uint8_t port_id,\n+\t\t\t\tstruct rte_event_port_conf_v20 *port_conf);\n+\n+int\n+rte_event_port_default_conf_get_v21(uint8_t dev_id, uint8_t port_id,\n+\t\t\t\t      struct rte_event_port_conf *port_conf);\n+\n /**\n  * Allocate and set up an event port for an event device.\n  *\n@@ -757,6 +943,14 @@ struct rte_event_port_conf {\n rte_event_port_setup(uint8_t dev_id, uint8_t port_id,\n \t\t     const struct rte_event_port_conf *port_conf);\n \n+int\n+rte_event_port_setup_v20(uint8_t dev_id, uint8_t port_id,\n+\t\t\t   const struct rte_event_port_conf_v20 *port_conf);\n+\n+int\n+rte_event_port_setup_v21(uint8_t dev_id, uint8_t port_id,\n+\t\t\t   const struct rte_event_port_conf *port_conf);\n+\n /**\n  * The queue depth of the port on the enqueue side\n  */\n@@ -769,6 +963,10 @@ struct rte_event_port_conf {\n  * The new event threshold of the port\n  */\n #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2\n+/**\n+ * The implicit release disable attribute of the port\n+ */\n+#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3\n \n /**\n  * Get an attribute from a port.\ndiff --git a/lib/librte_eventdev/rte_eventdev_pmd_pci.h b/lib/librte_eventdev/rte_eventdev_pmd_pci.h\nindex 443cd38..1572999 100644\n--- a/lib/librte_eventdev/rte_eventdev_pmd_pci.h\n+++ b/lib/librte_eventdev/rte_eventdev_pmd_pci.h\n@@ -88,6 +88,60 @@\n \treturn -ENXIO;\n }\n \n+/**\n+ * @internal\n+ * Wrapper for use by pci drivers as a .probe function to attach to a event\n+ * interface.  Same as rte_event_pmd_pci_probe, except caller can specify\n+ * the name.\n+ */\n+static inline int\n+rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,\n+\t\t\t    struct rte_pci_device *pci_dev,\n+\t\t\t    size_t private_data_size,\n+\t\t\t    eventdev_pmd_pci_callback_t devinit,\n+\t\t\t    const char *name)\n+{\n+\tstruct rte_eventdev *eventdev;\n+\n+\tint retval;\n+\n+\tif (devinit == NULL)\n+\t\treturn -EINVAL;\n+\n+\teventdev = rte_event_pmd_allocate(name,\n+\t\t\t pci_dev->device.numa_node);\n+\tif (eventdev == NULL)\n+\t\treturn -ENOMEM;\n+\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY) {\n+\t\teventdev->data->dev_private =\n+\t\t\t\trte_zmalloc_socket(\n+\t\t\t\t\t\t\"eventdev private structure\",\n+\t\t\t\t\t\tprivate_data_size,\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t\trte_socket_id());\n+\n+\t\tif (eventdev->data->dev_private == NULL)\n+\t\t\trte_panic(\"Cannot allocate memzone for private \"\n+\t\t\t\t\t\"device data\");\n+\t}\n+\n+\teventdev->dev = &pci_dev->device;\n+\n+\t/* Invoke PMD device initialization function */\n+\tretval = devinit(eventdev);\n+\tif (retval == 0)\n+\t\treturn 0;\n+\n+\tRTE_EDEV_LOG_ERR(\"driver %s: (vendor_id=0x%x device_id=0x%x)\"\n+\t\t\t\" failed\", pci_drv->driver.name,\n+\t\t\t(unsigned int) pci_dev->id.vendor_id,\n+\t\t\t(unsigned int) pci_dev->id.device_id);\n+\n+\trte_event_pmd_release(eventdev);\n+\n+\treturn -ENXIO;\n+}\n \n /**\n  * @internal\ndiff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map\nindex 91a62cd..081fcb6 100644\n--- a/lib/librte_eventdev/rte_eventdev_version.map\n+++ b/lib/librte_eventdev/rte_eventdev_version.map\n@@ -94,13 +94,21 @@ DPDK_20.0 {\n \tlocal: *;\n };\n \n+DPDK_21 {\n+        global:\n+\n+\trte_event_dev_info_get;\n+\trte_event_dev_configure;\n+\trte_event_port_default_conf_get;\n+\trte_event_port_setup;\n+} DPDK_20.0;\n+\n EXPERIMENTAL {\n \tglobal:\n \n \t# added in 20.05\n \t__rte_eventdev_trace_configure;\n \t__rte_eventdev_trace_queue_setup;\n-\t__rte_eventdev_trace_port_setup;\n \t__rte_eventdev_trace_port_link;\n \t__rte_eventdev_trace_port_unlink;\n \t__rte_eventdev_trace_start;\n@@ -134,4 +142,7 @@ EXPERIMENTAL {\n \t__rte_eventdev_trace_crypto_adapter_queue_pair_del;\n \t__rte_eventdev_trace_crypto_adapter_start;\n \t__rte_eventdev_trace_crypto_adapter_stop;\n+\n+\t# changed in 20.08\n+\t__rte_eventdev_trace_port_setup;\n };\n",
    "prefixes": [
        "01/27"
    ]
}