get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/45287/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 45287,
    "url": "https://patches.dpdk.org/api/patches/45287/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1537858955-6884-2-git-send-email-hemant.agrawal@nxp.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1537858955-6884-2-git-send-email-hemant.agrawal@nxp.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1537858955-6884-2-git-send-email-hemant.agrawal@nxp.com",
    "date": "2018-09-25T07:02:35",
    "name": "[v2,2/2] event/dpaa: add select based event support",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "63d113c2a160f538c8f33714c9afbeb0b36d0650",
    "submitter": {
        "id": 477,
        "url": "https://patches.dpdk.org/api/people/477/?format=api",
        "name": "Hemant Agrawal",
        "email": "hemant.agrawal@nxp.com"
    },
    "delegate": {
        "id": 310,
        "url": "https://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1537858955-6884-2-git-send-email-hemant.agrawal@nxp.com/mbox/",
    "series": [
        {
            "id": 1481,
            "url": "https://patches.dpdk.org/api/series/1481/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=1481",
            "date": "2018-09-25T07:02:34",
            "name": "[v2,1/2] event/dpaa: remove duplicate log macros",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/1481/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/45287/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/45287/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id BF9AB44BE;\n\tTue, 25 Sep 2018 09:04:42 +0200 (CEST)",
            "from inva020.nxp.com (inva020.nxp.com [92.121.34.13])\n\tby dpdk.org (Postfix) with ESMTP id 802A01AFF\n\tfor <dev@dpdk.org>; Tue, 25 Sep 2018 09:04:39 +0200 (CEST)",
            "from inva020.nxp.com (localhost [127.0.0.1])\n\tby inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 4BC381A0006;\n\tTue, 25 Sep 2018 09:04:39 +0200 (CEST)",
            "from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com\n\t[165.114.16.14])\n\tby inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 6D4CA1A0179;\n\tTue, 25 Sep 2018 09:04:37 +0200 (CEST)",
            "from bf-netperf1.ap.freescale.net (bf-netperf1.ap.freescale.net\n\t[10.232.134.28])\n\tby invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id D4675402E2;\n\tTue, 25 Sep 2018 15:04:34 +0800 (SGT)"
        ],
        "From": "Hemant Agrawal <hemant.agrawal@nxp.com>",
        "To": "dev@dpdk.org",
        "Cc": "jerin.jacob@caviumnetworks.com",
        "Date": "Tue, 25 Sep 2018 12:32:35 +0530",
        "Message-Id": "<1537858955-6884-2-git-send-email-hemant.agrawal@nxp.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1537858955-6884-1-git-send-email-hemant.agrawal@nxp.com>",
        "References": "<1535607196-26782-1-git-send-email-hemant.agrawal@nxp.com>\n\t<1537858955-6884-1-git-send-email-hemant.agrawal@nxp.com>",
        "X-Virus-Scanned": "ClamAV using ClamSMTP",
        "Subject": "[dpdk-dev] [PATCH v2 2/2] event/dpaa: add select based event support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch add support to use select call with qman portal fd\nfor timeout based dequeue request for eventdev.\n\nIf there is a event available qman portal fd will be set\nand the function will be awakened. If no event is available,\nit will only wait till the given timeout value.\n\nIn case of interrupt the timeout ticks are used as usecs.\n\nSigned-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>\n---\nNote: This patch has a dependency on following patch series:\nhttp://mails.dpdk.org/archives/dev/2018-September/112433.html\nwhich is now part of dpdk-next-net tree.\n\n doc/guides/eventdevs/dpaa.rst      |   2 +\n drivers/event/dpaa/dpaa_eventdev.c | 283 +++++++++++++++++++++++++++++++------\n drivers/event/dpaa/dpaa_eventdev.h |  10 +-\n 3 files changed, 247 insertions(+), 48 deletions(-)",
    "diff": "diff --git a/doc/guides/eventdevs/dpaa.rst b/doc/guides/eventdevs/dpaa.rst\nindex 7383295..2f356d3 100644\n--- a/doc/guides/eventdevs/dpaa.rst\n+++ b/doc/guides/eventdevs/dpaa.rst\n@@ -122,6 +122,8 @@ Example:\n \n     ./your_eventdev_application --vdev=\"event_dpaa1\"\n \n+* Use dev arg option ``disable_intr=1`` to disable the interrupt mode\n+\n Limitations\n -----------\n \ndiff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c\nindex 9ddaf30..1e247e4 100644\n--- a/drivers/event/dpaa/dpaa_eventdev.c\n+++ b/drivers/event/dpaa/dpaa_eventdev.c\n@@ -30,6 +30,7 @@\n #include <rte_dpaa_bus.h>\n #include <rte_dpaa_logs.h>\n #include <rte_cycles.h>\n+#include <rte_kvargs.h>\n \n #include <dpaa_ethdev.h>\n #include \"dpaa_eventdev.h\"\n@@ -43,22 +44,34 @@\n  * 1 Eventdev can have N Eventqueue\n  */\n \n+#define DISABLE_INTR_MODE \"disable_intr\"\n+\n static int\n dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,\n \t\t\t\t uint64_t *timeout_ticks)\n {\n-\tuint64_t cycles_per_second;\n-\n \tEVENTDEV_INIT_FUNC_TRACE();\n \n \tRTE_SET_USED(dev);\n \n+\tuint64_t cycles_per_second;\n+\n \tcycles_per_second = rte_get_timer_hz();\n-\t*timeout_ticks = ns * (cycles_per_second / NS_PER_S);\n+\t*timeout_ticks = (ns * cycles_per_second) / NS_PER_S;\n \n \treturn 0;\n }\n \n+static int\n+dpaa_event_dequeue_timeout_ticks_intr(struct rte_eventdev *dev, uint64_t ns,\n+\t\t\t\t uint64_t *timeout_ticks)\n+{\n+\tRTE_SET_USED(dev);\n+\n+\t*timeout_ticks = ns/1000;\n+\treturn 0;\n+}\n+\n static void\n dpaa_eventq_portal_add(u16 ch_id)\n {\n@@ -100,6 +113,56 @@ dpaa_event_enqueue(void *port, const struct rte_event *ev)\n \treturn dpaa_event_enqueue_burst(port, ev, 1);\n }\n \n+static void drain_4_bytes(int fd, fd_set *fdset)\n+{\n+\tif (FD_ISSET(fd, fdset)) {\n+\t\t/* drain 4 bytes */\n+\t\tuint32_t junk;\n+\t\tssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));\n+\t\tif (sjunk != sizeof(junk))\n+\t\t\tDPAA_EVENTDEV_ERR(\"UIO irq read error\");\n+\t}\n+}\n+\n+static inline int\n+dpaa_event_dequeue_wait(uint64_t timeout_ticks)\n+{\n+\tint fd_qman, nfds;\n+\tint ret;\n+\tfd_set readset;\n+\n+\t/* Go into (and back out of) IRQ mode for each select,\n+\t * it simplifies exit-path considerations and other\n+\t * potential nastiness.\n+\t */\n+\tstruct timeval tv = {\n+\t\t.tv_sec = timeout_ticks / 1000000,\n+\t\t.tv_usec = timeout_ticks % 1000000\n+\t};\n+\n+\tfd_qman = qman_thread_fd();\n+\tnfds = fd_qman + 1;\n+\tFD_ZERO(&readset);\n+\tFD_SET(fd_qman, &readset);\n+\n+\tqman_irqsource_add(QM_PIRQ_DQRI);\n+\n+\tret = select(nfds, &readset, NULL, NULL, &tv);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\t/* Calling irqsource_remove() prior to thread_irq()\n+\t * means thread_irq() will not process whatever caused\n+\t * the interrupts, however it does ensure that, once\n+\t * thread_irq() re-enables interrupts, they won't fire\n+\t * again immediately.\n+\t */\n+\tqman_irqsource_remove(~0);\n+\tdrain_4_bytes(fd_qman, &readset);\n+\tqman_thread_irq();\n+\n+\treturn ret;\n+}\n+\n static uint16_t\n dpaa_event_dequeue_burst(void *port, struct rte_event ev[],\n \t\t\t uint16_t nb_events, uint64_t timeout_ticks)\n@@ -107,8 +170,8 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],\n \tint ret;\n \tu16 ch_id;\n \tvoid *buffers[8];\n-\tu32 num_frames, i;\n-\tuint64_t wait_time, cur_ticks, start_ticks;\n+\tu32 num_frames, i, irq = 0;\n+\tuint64_t cur_ticks = 0, wait_time_ticks = 0;\n \tstruct dpaa_port *portal = (struct dpaa_port *)port;\n \tstruct rte_mbuf *mbuf;\n \n@@ -147,20 +210,21 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],\n \t}\n \tDPAA_PER_LCORE_DQRR_HELD = 0;\n \n-\tif (portal->timeout == DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID)\n-\t\twait_time = timeout_ticks;\n+\tif (timeout_ticks)\n+\t\twait_time_ticks = timeout_ticks;\n \telse\n-\t\twait_time = portal->timeout;\n+\t\twait_time_ticks = portal->timeout_us;\n \n-\t/* Lets dequeue the frames */\n-\tstart_ticks = rte_get_timer_cycles();\n-\twait_time += start_ticks;\n+\twait_time_ticks += rte_get_timer_cycles();\n \tdo {\n+\t\t/* Lets dequeue the frames */\n \t\tnum_frames = qman_portal_dequeue(ev, nb_events, buffers);\n-\t\tif (num_frames != 0)\n+\t\tif (irq)\n+\t\t\tirq = 0;\n+\t\tif (num_frames)\n \t\t\tbreak;\n \t\tcur_ticks = rte_get_timer_cycles();\n-\t} while (cur_ticks < wait_time);\n+\t} while (cur_ticks < wait_time_ticks);\n \n \treturn num_frames;\n }\n@@ -171,6 +235,86 @@ dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)\n \treturn dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);\n }\n \n+static uint16_t\n+dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],\n+\t\t\t      uint16_t nb_events, uint64_t timeout_ticks)\n+{\n+\tint ret;\n+\tu16 ch_id;\n+\tvoid *buffers[8];\n+\tu32 num_frames, i, irq = 0;\n+\tuint64_t cur_ticks = 0, wait_time_ticks = 0;\n+\tstruct dpaa_port *portal = (struct dpaa_port *)port;\n+\tstruct rte_mbuf *mbuf;\n+\n+\tif (unlikely(!RTE_PER_LCORE(dpaa_io))) {\n+\t\t/* Affine current thread context to a qman portal */\n+\t\tret = rte_dpaa_portal_init((void *)0);\n+\t\tif (ret) {\n+\t\t\tDPAA_EVENTDEV_ERR(\"Unable to initialize portal\");\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\tif (unlikely(!portal->is_port_linked)) {\n+\t\t/*\n+\t\t * Affine event queue for current thread context\n+\t\t * to a qman portal.\n+\t\t */\n+\t\tfor (i = 0; i < portal->num_linked_evq; i++) {\n+\t\t\tch_id = portal->evq_info[i].ch_id;\n+\t\t\tdpaa_eventq_portal_add(ch_id);\n+\t\t}\n+\t\tportal->is_port_linked = true;\n+\t}\n+\n+\t/* Check if there are atomic contexts to be released */\n+\ti = 0;\n+\twhile (DPAA_PER_LCORE_DQRR_SIZE) {\n+\t\tif (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {\n+\t\t\tqman_dca_index(i, 0);\n+\t\t\tmbuf = DPAA_PER_LCORE_DQRR_MBUF(i);\n+\t\t\tmbuf->seqn = DPAA_INVALID_MBUF_SEQN;\n+\t\t\tDPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);\n+\t\t\tDPAA_PER_LCORE_DQRR_SIZE--;\n+\t\t}\n+\t\ti++;\n+\t}\n+\tDPAA_PER_LCORE_DQRR_HELD = 0;\n+\n+\tif (timeout_ticks)\n+\t\twait_time_ticks = timeout_ticks;\n+\telse\n+\t\twait_time_ticks = portal->timeout_us;\n+\n+\tdo {\n+\t\t/* Lets dequeue the frames */\n+\t\tnum_frames = qman_portal_dequeue(ev, nb_events, buffers);\n+\t\tif (irq)\n+\t\t\tirq = 0;\n+\t\tif (num_frames)\n+\t\t\tbreak;\n+\t\tif (wait_time_ticks) { /* wait for time */\n+\t\t\tif (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {\n+\t\t\t\tirq = 1;\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\tbreak; /* no event after waiting */\n+\t\t}\n+\t\tcur_ticks = rte_get_timer_cycles();\n+\t} while (cur_ticks < wait_time_ticks);\n+\n+\treturn num_frames;\n+}\n+\n+static uint16_t\n+dpaa_event_dequeue_intr(void *port,\n+\t\t\tstruct rte_event *ev,\n+\t\t\tuint64_t timeout_ticks)\n+{\n+\treturn dpaa_event_dequeue_burst_intr(port, ev, 1, timeout_ticks);\n+}\n+\n static void\n dpaa_event_dev_info_get(struct rte_eventdev *dev,\n \t\t\tstruct rte_event_dev_info *dev_info)\n@@ -184,7 +328,7 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev,\n \tdev_info->max_dequeue_timeout_ns =\n \t\tDPAA_EVENT_MAX_DEQUEUE_TIMEOUT;\n \tdev_info->dequeue_timeout_ns =\n-\t\tDPAA_EVENT_MIN_DEQUEUE_TIMEOUT;\n+\t\tDPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;\n \tdev_info->max_event_queues =\n \t\tDPAA_EVENT_MAX_QUEUES;\n \tdev_info->max_event_queue_flows =\n@@ -230,15 +374,6 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)\n \tpriv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;\n \tpriv->event_dev_cfg = conf->event_dev_cfg;\n \n-\t/* Check dequeue timeout method is per dequeue or global */\n-\tif (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {\n-\t\t/*\n-\t\t * Use timeout value as given in dequeue operation.\n-\t\t * So invalidating this timetout value.\n-\t\t */\n-\t\tpriv->dequeue_timeout_ns = 0;\n-\t}\n-\n \tch_id = rte_malloc(\"dpaa-channels\",\n \t\t\t  sizeof(uint32_t) * priv->nb_event_queues,\n \t\t\t  RTE_CACHE_LINE_SIZE);\n@@ -260,24 +395,35 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)\n \t/* Lets prepare event ports */\n \tmemset(&priv->ports[0], 0,\n \t      sizeof(struct dpaa_port) * priv->nb_event_ports);\n+\n+\t/* Check dequeue timeout method is per dequeue or global */\n \tif (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {\n-\t\tfor (i = 0; i < priv->nb_event_ports; i++) {\n-\t\t\tpriv->ports[i].timeout =\n-\t\t\t\tDPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID;\n-\t\t}\n-\t} else if (priv->dequeue_timeout_ns == 0) {\n-\t\tfor (i = 0; i < priv->nb_event_ports; i++) {\n-\t\t\tdpaa_event_dequeue_timeout_ticks(NULL,\n-\t\t\t\tDPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS,\n-\t\t\t\t&priv->ports[i].timeout);\n-\t\t}\n+\t\t/*\n+\t\t * Use timeout value as given in dequeue operation.\n+\t\t * So invalidating this timeout value.\n+\t\t */\n+\t\tpriv->dequeue_timeout_ns = 0;\n+\n+\t} else if (conf->dequeue_timeout_ns == 0) {\n+\t\tpriv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;\n \t} else {\n-\t\tfor (i = 0; i < priv->nb_event_ports; i++) {\n-\t\t\tdpaa_event_dequeue_timeout_ticks(NULL,\n-\t\t\t\tpriv->dequeue_timeout_ns,\n-\t\t\t\t&priv->ports[i].timeout);\n+\t\tpriv->dequeue_timeout_ns = conf->dequeue_timeout_ns;\n+\t}\n+\n+\tfor (i = 0; i < priv->nb_event_ports; i++) {\n+\t\tif (priv->intr_mode) {\n+\t\t\tpriv->ports[i].timeout_us =\n+\t\t\t\tpriv->dequeue_timeout_ns/1000;\n+\t\t} else {\n+\t\t\tuint64_t cycles_per_second;\n+\n+\t\t\tcycles_per_second = rte_get_timer_hz();\n+\t\t\tpriv->ports[i].timeout_us =\n+\t\t\t\t(priv->dequeue_timeout_ns * cycles_per_second)\n+\t\t\t\t\t/ NS_PER_S;\n \t\t}\n \t}\n+\n \t/*\n \t * TODO: Currently portals are affined with threads. Maximum threads\n \t * can be created equals to number of lcore.\n@@ -454,7 +600,8 @@ dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,\n \t\tevent_queue->event_port = NULL;\n \t}\n \n-\tevent_port->num_linked_evq = event_port->num_linked_evq - i;\n+\tif (event_port->num_linked_evq)\n+\t\tevent_port->num_linked_evq = event_port->num_linked_evq - i;\n \n \treturn (int)i;\n }\n@@ -593,8 +740,44 @@ static struct rte_eventdev_ops dpaa_eventdev_ops = {\n \t.eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,\n };\n \n+static int flag_check_handler(__rte_unused const char *key,\n+\t\tconst char *value, __rte_unused void *opaque)\n+{\n+\tif (strcmp(value, \"1\"))\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+static int\n+dpaa_event_check_flags(const char *params)\n+{\n+\tstruct rte_kvargs *kvlist;\n+\n+\tif (params == NULL || params[0] == '\\0')\n+\t\treturn 0;\n+\n+\tkvlist = rte_kvargs_parse(params, NULL);\n+\tif (kvlist == NULL)\n+\t\treturn 0;\n+\n+\tif (!rte_kvargs_count(kvlist, DISABLE_INTR_MODE)) {\n+\t\trte_kvargs_free(kvlist);\n+\t\treturn 0;\n+\t}\n+\t/* INTR MODE is disabled when there's key-value pair: disable_intr = 1*/\n+\tif (rte_kvargs_process(kvlist, DISABLE_INTR_MODE,\n+\t\t\t\tflag_check_handler, NULL) < 0) {\n+\t\trte_kvargs_free(kvlist);\n+\t\treturn 0;\n+\t}\n+\trte_kvargs_free(kvlist);\n+\n+\treturn 1;\n+}\n+\n static int\n-dpaa_event_dev_create(const char *name)\n+dpaa_event_dev_create(const char *name, const char *params)\n {\n \tstruct rte_eventdev *eventdev;\n \tstruct dpaa_eventdev *priv;\n@@ -606,18 +789,27 @@ dpaa_event_dev_create(const char *name)\n \t\tDPAA_EVENTDEV_ERR(\"Failed to create eventdev vdev %s\", name);\n \t\tgoto fail;\n \t}\n+\tpriv = eventdev->data->dev_private;\n \n \teventdev->dev_ops       = &dpaa_eventdev_ops;\n \teventdev->enqueue       = dpaa_event_enqueue;\n \teventdev->enqueue_burst = dpaa_event_enqueue_burst;\n-\teventdev->dequeue       = dpaa_event_dequeue;\n-\teventdev->dequeue_burst = dpaa_event_dequeue_burst;\n+\n+\tif (dpaa_event_check_flags(params)) {\n+\t\teventdev->dequeue\t= dpaa_event_dequeue;\n+\t\teventdev->dequeue_burst = dpaa_event_dequeue_burst;\n+\t} else {\n+\t\tpriv->intr_mode = 1;\n+\t\teventdev->dev_ops->timeout_ticks =\n+\t\t\t\tdpaa_event_dequeue_timeout_ticks_intr;\n+\t\teventdev->dequeue\t= dpaa_event_dequeue_intr;\n+\t\teventdev->dequeue_burst = dpaa_event_dequeue_burst_intr;\n+\t}\n \n \t/* For secondary processes, the primary has done all the work */\n \tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n \t\treturn 0;\n \n-\tpriv = eventdev->data->dev_private;\n \tpriv->max_event_queues = DPAA_EVENT_MAX_QUEUES;\n \n \treturn 0;\n@@ -629,11 +821,14 @@ static int\n dpaa_event_dev_probe(struct rte_vdev_device *vdev)\n {\n \tconst char *name;\n+\tconst char *params;\n \n \tname = rte_vdev_device_name(vdev);\n \tDPAA_EVENTDEV_INFO(\"Initializing %s\", name);\n \n-\treturn dpaa_event_dev_create(name);\n+\tparams = rte_vdev_device_args(vdev);\n+\n+\treturn dpaa_event_dev_create(name, params);\n }\n \n static int\n@@ -653,3 +848,5 @@ static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {\n };\n \n RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);\n+RTE_PMD_REGISTER_PARAM_STRING(EVENTDEV_NAME_DPAA_PMD,\n+\t\tDISABLE_INTR_MODE \"=<int>\");\ndiff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h\nindex 3994bd6..8134e6b 100644\n--- a/drivers/event/dpaa/dpaa_eventdev.h\n+++ b/drivers/event/dpaa/dpaa_eventdev.h\n@@ -12,8 +12,8 @@\n \n #define EVENTDEV_NAME_DPAA_PMD\t\tevent_dpaa1\n \n-#define DPAA_EVENT_MAX_PORTS\t\t\t8\n-#define DPAA_EVENT_MAX_QUEUES\t\t\t16\n+#define DPAA_EVENT_MAX_PORTS\t\t\t4\n+#define DPAA_EVENT_MAX_QUEUES\t\t\t8\n #define DPAA_EVENT_MIN_DEQUEUE_TIMEOUT\t1\n #define DPAA_EVENT_MAX_DEQUEUE_TIMEOUT\t(UINT32_MAX - 1)\n #define DPAA_EVENT_MAX_QUEUE_FLOWS\t\t2048\n@@ -21,7 +21,7 @@\n #define DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS\t0\n #define DPAA_EVENT_MAX_EVENT_PORT\t\tRTE_MIN(RTE_MAX_LCORE, INT8_MAX)\n #define DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH\t8\n-#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS\t100UL\n+#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS\t100000UL\n #define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID\t((uint64_t)-1)\n #define DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH\t1\n #define DPAA_EVENT_MAX_NUM_EVENTS\t\t(INT32_MAX - 1)\n@@ -54,7 +54,7 @@ struct dpaa_port {\n \tstruct dpaa_eventq evq_info[DPAA_EVENT_MAX_QUEUES];\n \tuint8_t num_linked_evq;\n \tuint8_t is_port_linked;\n-\tuint64_t timeout;\n+\tuint64_t timeout_us;\n };\n \n struct dpaa_eventdev {\n@@ -65,7 +65,7 @@ struct dpaa_eventdev {\n \tuint8_t max_event_queues;\n \tuint8_t nb_event_queues;\n \tuint8_t nb_event_ports;\n-\tuint8_t resvd;\n+\tuint8_t intr_mode;\n \tuint32_t nb_event_queue_flows;\n \tuint32_t nb_event_port_dequeue_depth;\n \tuint32_t nb_event_port_enqueue_depth;\n",
    "prefixes": [
        "v2",
        "2/2"
    ]
}