get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/45098/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 45098,
    "url": "https://patches.dpdk.org/api/patches/45098/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1537530366-4722-5-git-send-email-hemant.agrawal@nxp.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1537530366-4722-5-git-send-email-hemant.agrawal@nxp.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1537530366-4722-5-git-send-email-hemant.agrawal@nxp.com",
    "date": "2018-09-21T11:46:06",
    "name": "[v2,5/5] event/dpaa2: affining portal at runtime during I/O",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "6f42bed62bbcf8ab7ade06663c4529a9083127f8",
    "submitter": {
        "id": 477,
        "url": "https://patches.dpdk.org/api/people/477/?format=api",
        "name": "Hemant Agrawal",
        "email": "hemant.agrawal@nxp.com"
    },
    "delegate": {
        "id": 310,
        "url": "https://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1537530366-4722-5-git-send-email-hemant.agrawal@nxp.com/mbox/",
    "series": [
        {
            "id": 1438,
            "url": "https://patches.dpdk.org/api/series/1438/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=1438",
            "date": "2018-09-21T11:46:03",
            "name": "[v2,1/5] event/dpaa2: fix mbuf assignment in atomic processing",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/1438/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/45098/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/45098/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 004884F9B;\n\tFri, 21 Sep 2018 13:48:25 +0200 (CEST)",
            "from inva020.nxp.com (inva020.nxp.com [92.121.34.13])\n\tby dpdk.org (Postfix) with ESMTP id C83404CA5\n\tfor <dev@dpdk.org>; Fri, 21 Sep 2018 13:48:20 +0200 (CEST)",
            "from inva020.nxp.com (localhost [127.0.0.1])\n\tby inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id ABB981A02B4;\n\tFri, 21 Sep 2018 13:48:20 +0200 (CEST)",
            "from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com\n\t[165.114.16.14])\n\tby inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 031DE1A02C6;\n\tFri, 21 Sep 2018 13:48:18 +0200 (CEST)",
            "from bf-netperf1.ap.freescale.net (bf-netperf1.ap.freescale.net\n\t[10.232.134.28])\n\tby invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id EF934402F8;\n\tFri, 21 Sep 2018 19:48:12 +0800 (SGT)"
        ],
        "From": "Hemant Agrawal <hemant.agrawal@nxp.com>",
        "To": "dev@dpdk.org",
        "Cc": "jerin.jacob@caviumnetworks.com, Hemant Agrawal <hemant.agrawal@nxp.com>, \n\tSunil Kumar Kori <sunil.kori@nxp.com>",
        "Date": "Fri, 21 Sep 2018 17:16:06 +0530",
        "Message-Id": "<1537530366-4722-5-git-send-email-hemant.agrawal@nxp.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1537530366-4722-1-git-send-email-hemant.agrawal@nxp.com>",
        "References": "<1535609039-10869-1-git-send-email-hemant.agrawal@nxp.com>\n\t<1537530366-4722-1-git-send-email-hemant.agrawal@nxp.com>",
        "X-Virus-Scanned": "ClamAV using ClamSMTP",
        "Subject": "[dpdk-dev] [PATCH v2 5/5] event/dpaa2: affining portal at runtime\n\tduring I/O",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch restructure the code to have the QBMAN portal\naffliated at run time for per lcore basis.\nThe device cleanup is also improved.\n\nSigned-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>\nSigned-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>\n---\nv5: fixed a compilation error on x86\n\n drivers/event/dpaa2/dpaa2_eventdev.c | 277 ++++++++++++++++++++++++-----------\n drivers/event/dpaa2/dpaa2_eventdev.h |   9 ++\n 2 files changed, 198 insertions(+), 88 deletions(-)",
    "diff": "diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c\nindex 456b446..24df8d7 100644\n--- a/drivers/event/dpaa2/dpaa2_eventdev.c\n+++ b/drivers/event/dpaa2/dpaa2_eventdev.c\n@@ -54,31 +54,60 @@ static uint16_t\n dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],\n \t\t\t     uint16_t nb_events)\n {\n-\tstruct rte_eventdev *ev_dev =\n-\t\t\t((struct dpaa2_io_portal_t *)port)->eventdev;\n-\tstruct dpaa2_eventdev *priv = ev_dev->data->dev_private;\n+\n+\tstruct dpaa2_port *dpaa2_portal = port;\n+\tstruct dpaa2_dpio_dev *dpio_dev;\n \tuint32_t queue_id = ev[0].queue_id;\n-\tstruct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];\n+\tstruct dpaa2_eventq *evq_info;\n \tuint32_t fqid;\n \tstruct qbman_swp *swp;\n \tstruct qbman_fd fd_arr[MAX_TX_RING_SLOTS];\n \tuint32_t loop, frames_to_send;\n \tstruct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];\n \tuint16_t num_tx = 0;\n-\tint ret;\n-\n-\tRTE_SET_USED(port);\n+\tint i, n, ret;\n+\tuint8_t channel_index;\n \n \tif (unlikely(!DPAA2_PER_LCORE_DPIO)) {\n+\t\t/* Affine current thread context to a qman portal */\n \t\tret = dpaa2_affine_qbman_swp();\n-\t\tif (ret) {\n+\t\tif (ret < 0) {\n \t\t\tDPAA2_EVENTDEV_ERR(\"Failure in affining portal\");\n \t\t\treturn 0;\n \t\t}\n \t}\n-\n+\t/* todo - dpaa2_portal shall have dpio_dev - no per thread variable */\n+\tdpio_dev = DPAA2_PER_LCORE_DPIO;\n \tswp = DPAA2_PER_LCORE_PORTAL;\n \n+\tif (likely(dpaa2_portal->is_port_linked))\n+\t\tgoto skip_linking;\n+\n+\t/* Create mapping between portal and channel to receive packets */\n+\tfor (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {\n+\t\tevq_info = &dpaa2_portal->evq_info[i];\n+\t\tif (!evq_info->event_port)\n+\t\t\tcontinue;\n+\n+\t\tret = dpio_add_static_dequeue_channel(dpio_dev->dpio,\n+\t\t\t\t\t\t      CMD_PRI_LOW,\n+\t\t\t\t\t\t      dpio_dev->token,\n+\t\t\t\t\t\t      evq_info->dpcon->dpcon_id,\n+\t\t\t\t\t\t      &channel_index);\n+\t\tif (ret < 0) {\n+\t\t\tDPAA2_EVENTDEV_ERR(\n+\t\t\t\t\"Static dequeue config failed: err(%d)\", ret);\n+\t\t\tgoto err;\n+\t\t}\n+\n+\t\tqbman_swp_push_set(swp, channel_index, 1);\n+\t\tevq_info->dpcon->channel_index = channel_index;\n+\t}\n+\tdpaa2_portal->is_port_linked = true;\n+\n+skip_linking:\n+\tevq_info = &dpaa2_portal->evq_info[queue_id];\n+\n \twhile (nb_events) {\n \t\tframes_to_send = (nb_events >> 3) ?\n \t\t\tMAX_TX_RING_SLOTS : nb_events;\n@@ -99,14 +128,14 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],\n \t\t\tqbman_eq_desc_set_no_orp(&eqdesc[loop], 0);\n \t\t\tqbman_eq_desc_set_response(&eqdesc[loop], 0, 0);\n \n-\t\t\tif (event->mbuf->seqn) {\n+\t\t\tif (event->sched_type == RTE_SCHED_TYPE_ATOMIC\n+\t\t\t\t&& event->mbuf->seqn) {\n \t\t\t\tuint8_t dqrr_index = event->mbuf->seqn - 1;\n \n \t\t\t\tqbman_eq_desc_set_dca(&eqdesc[loop], 1,\n \t\t\t\t\t\t      dqrr_index, 0);\n \t\t\t\tDPAA2_PER_LCORE_DQRR_SIZE--;\n-\t\t\t\tDPAA2_PER_LCORE_DQRR_HELD &=\n-\t\t\t\t\t~(1 << dqrr_index);\n+\t\t\t\tDPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);\n \t\t\t}\n \n \t\t\tmemset(&fd_arr[loop], 0, sizeof(struct qbman_fd));\n@@ -116,7 +145,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],\n \t\t\t * to avoid copy\n \t\t\t */\n \t\t\tstruct rte_event *ev_temp = rte_malloc(NULL,\n-\t\t\t\tsizeof(struct rte_event), 0);\n+\t\t\t\t\t\tsizeof(struct rte_event), 0);\n \n \t\t\tif (!ev_temp) {\n \t\t\t\tif (!loop)\n@@ -143,6 +172,18 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],\n \t}\n \n \treturn num_tx;\n+err:\n+\tfor (n = 0; n < i; n++) {\n+\t\tevq_info = &dpaa2_portal->evq_info[n];\n+\t\tif (!evq_info->event_port)\n+\t\t\tcontinue;\n+\t\tqbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);\n+\t\tdpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,\n+\t\t\t\t\t\tdpio_dev->token,\n+\t\t\t\t\t\tevq_info->dpcon->dpcon_id);\n+\t}\n+\treturn 0;\n+\n }\n \n static uint16_t\n@@ -205,22 +246,53 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],\n \t\t\t     uint16_t nb_events, uint64_t timeout_ticks)\n {\n \tconst struct qbman_result *dq;\n+\tstruct dpaa2_dpio_dev *dpio_dev = NULL;\n+\tstruct dpaa2_port *dpaa2_portal = port;\n+\tstruct dpaa2_eventq *evq_info;\n \tstruct qbman_swp *swp;\n \tconst struct qbman_fd *fd;\n \tstruct dpaa2_queue *rxq;\n-\tint num_pkts = 0, ret, i = 0;\n-\n-\tRTE_SET_USED(port);\n+\tint num_pkts = 0, ret, i = 0, n;\n+\tuint8_t channel_index;\n \n \tif (unlikely(!DPAA2_PER_LCORE_DPIO)) {\n+\t\t/* Affine current thread context to a qman portal */\n \t\tret = dpaa2_affine_qbman_swp();\n-\t\tif (ret) {\n+\t\tif (ret < 0) {\n \t\t\tDPAA2_EVENTDEV_ERR(\"Failure in affining portal\");\n \t\t\treturn 0;\n \t\t}\n \t}\n+\n+\tdpio_dev = DPAA2_PER_LCORE_DPIO;\n \tswp = DPAA2_PER_LCORE_PORTAL;\n \n+\tif (likely(dpaa2_portal->is_port_linked))\n+\t\tgoto skip_linking;\n+\n+\t/* Create mapping between portal and channel to receive packets */\n+\tfor (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {\n+\t\tevq_info = &dpaa2_portal->evq_info[i];\n+\t\tif (!evq_info->event_port)\n+\t\t\tcontinue;\n+\n+\t\tret = dpio_add_static_dequeue_channel(dpio_dev->dpio,\n+\t\t\t\t\t\t      CMD_PRI_LOW,\n+\t\t\t\t\t\t      dpio_dev->token,\n+\t\t\t\t\t\t      evq_info->dpcon->dpcon_id,\n+\t\t\t\t\t\t      &channel_index);\n+\t\tif (ret < 0) {\n+\t\t\tDPAA2_EVENTDEV_ERR(\n+\t\t\t\t\"Static dequeue config failed: err(%d)\", ret);\n+\t\t\tgoto err;\n+\t\t}\n+\n+\t\tqbman_swp_push_set(swp, channel_index, 1);\n+\t\tevq_info->dpcon->channel_index = channel_index;\n+\t}\n+\tdpaa2_portal->is_port_linked = true;\n+\n+skip_linking:\n \t/* Check if there are atomic contexts to be released */\n \twhile (DPAA2_PER_LCORE_DQRR_SIZE) {\n \t\tif (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {\n@@ -259,6 +331,18 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],\n \t} while (num_pkts < nb_events);\n \n \treturn num_pkts;\n+err:\n+\tfor (n = 0; n < i; n++) {\n+\t\tevq_info = &dpaa2_portal->evq_info[n];\n+\t\tif (!evq_info->event_port)\n+\t\t\tcontinue;\n+\n+\t\tqbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);\n+\t\tdpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,\n+\t\t\t\t\t\t\tdpio_dev->token,\n+\t\t\t\t\t\tevq_info->dpcon->dpcon_id);\n+\t}\n+\treturn 0;\n }\n \n static uint16_t\n@@ -387,31 +471,39 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,\n \tqueue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;\n }\n \n-static void\n-dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)\n-{\n-\tEVENTDEV_INIT_FUNC_TRACE();\n-\n-\tRTE_SET_USED(dev);\n-\tRTE_SET_USED(queue_id);\n-}\n-\n static int\n dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,\n \t\t\t   const struct rte_event_queue_conf *queue_conf)\n {\n \tstruct dpaa2_eventdev *priv = dev->data->dev_private;\n-\tstruct dpaa2_eventq *evq_info =\n-\t\t&priv->evq_info[queue_id];\n+\tstruct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];\n \n \tEVENTDEV_INIT_FUNC_TRACE();\n \n+\tswitch (queue_conf->schedule_type) {\n+\tcase RTE_SCHED_TYPE_PARALLEL:\n+\tcase RTE_SCHED_TYPE_ATOMIC:\n+\t\tbreak;\n+\tcase RTE_SCHED_TYPE_ORDERED:\n+\t\tDPAA2_EVENTDEV_ERR(\"Schedule type is not supported.\");\n+\t\treturn -1;\n+\t}\n \tevq_info->event_queue_cfg = queue_conf->event_queue_cfg;\n+\tevq_info->event_queue_id = queue_id;\n \n \treturn 0;\n }\n \n static void\n+dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)\n+{\n+\tEVENTDEV_INIT_FUNC_TRACE();\n+\n+\tRTE_SET_USED(dev);\n+\tRTE_SET_USED(queue_id);\n+}\n+\n+static void\n dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,\n \t\t\t     struct rte_event_port_conf *port_conf)\n {\n@@ -419,7 +511,6 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,\n \n \tRTE_SET_USED(dev);\n \tRTE_SET_USED(port_id);\n-\tRTE_SET_USED(port_conf);\n \n \tport_conf->new_event_threshold =\n \t\tDPAA2_EVENT_MAX_NUM_EVENTS;\n@@ -430,56 +521,44 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,\n \tport_conf->disable_implicit_release = 0;\n }\n \n-static void\n-dpaa2_eventdev_port_release(void *port)\n-{\n-\tEVENTDEV_INIT_FUNC_TRACE();\n-\n-\tRTE_SET_USED(port);\n-}\n-\n static int\n dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,\n \t\t\t  const struct rte_event_port_conf *port_conf)\n {\n+\tchar event_port_name[32];\n+\tstruct dpaa2_port *portal;\n+\n \tEVENTDEV_INIT_FUNC_TRACE();\n \n \tRTE_SET_USED(port_conf);\n \n-\tif (!dpaa2_io_portal[port_id].dpio_dev) {\n-\t\tdpaa2_io_portal[port_id].dpio_dev =\n-\t\t\t\tdpaa2_get_qbman_swp(port_id);\n-\t\trte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);\n-\t\tif (!dpaa2_io_portal[port_id].dpio_dev)\n-\t\t\treturn -1;\n+\tsprintf(event_port_name, \"event-port-%d\", port_id);\n+\tportal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0);\n+\tif (!portal) {\n+\t\tDPAA2_EVENTDEV_ERR(\"Memory allocation failure\");\n+\t\treturn -ENOMEM;\n \t}\n \n-\tdpaa2_io_portal[port_id].eventdev = dev;\n-\tdev->data->ports[port_id] = &dpaa2_io_portal[port_id];\n+\tmemset(portal, 0, sizeof(struct dpaa2_port));\n+\tdev->data->ports[port_id] = portal;\n \treturn 0;\n }\n \n-static int\n-dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,\n-\t\t\t   uint8_t queues[], uint16_t nb_unlinks)\n+static void\n+dpaa2_eventdev_port_release(void *port)\n {\n-\tstruct dpaa2_eventdev *priv = dev->data->dev_private;\n-\tstruct dpaa2_io_portal_t *dpaa2_portal = port;\n-\tstruct dpaa2_eventq *evq_info;\n-\tint i;\n+\tstruct dpaa2_port *portal = port;\n \n \tEVENTDEV_INIT_FUNC_TRACE();\n \n-\tfor (i = 0; i < nb_unlinks; i++) {\n-\t\tevq_info = &priv->evq_info[queues[i]];\n-\t\tqbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,\n-\t\t\t\t   evq_info->dpcon->channel_index, 0);\n-\t\tdpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,\n-\t\t\t\t\t0, dpaa2_portal->dpio_dev->token,\n-\t\t\tevq_info->dpcon->dpcon_id);\n-\t}\n+\t/* TODO: Cleanup is required when ports are in linked state. */\n+\tif (portal->is_port_linked)\n+\t\tDPAA2_EVENTDEV_WARN(\"Event port must be unlinked before release\");\n \n-\treturn (int)nb_unlinks;\n+\tif (portal)\n+\t\trte_free(portal);\n+\n+\tportal = NULL;\n }\n \n static int\n@@ -488,46 +567,66 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,\n \t\t\tuint16_t nb_links)\n {\n \tstruct dpaa2_eventdev *priv = dev->data->dev_private;\n-\tstruct dpaa2_io_portal_t *dpaa2_portal = port;\n+\tstruct dpaa2_port *dpaa2_portal = port;\n \tstruct dpaa2_eventq *evq_info;\n-\tuint8_t channel_index;\n-\tint ret, i, n;\n+\tuint16_t i;\n \n \tEVENTDEV_INIT_FUNC_TRACE();\n \n+\tRTE_SET_USED(priorities);\n+\n \tfor (i = 0; i < nb_links; i++) {\n \t\tevq_info = &priv->evq_info[queues[i]];\n+\t\tmemcpy(&dpaa2_portal->evq_info[queues[i]], evq_info,\n+\t\t\t   sizeof(struct dpaa2_eventq));\n+\t\tdpaa2_portal->evq_info[queues[i]].event_port = port;\n+\t\tdpaa2_portal->num_linked_evq++;\n+\t}\n \n-\t\tret = dpio_add_static_dequeue_channel(\n-\t\t\tdpaa2_portal->dpio_dev->dpio,\n-\t\t\tCMD_PRI_LOW, dpaa2_portal->dpio_dev->token,\n-\t\t\tevq_info->dpcon->dpcon_id, &channel_index);\n-\t\tif (ret < 0) {\n-\t\t\tDPAA2_EVENTDEV_ERR(\n-\t\t\t\t\"Static dequeue config failed: err(%d)\", ret);\n-\t\t\tgoto err;\n-\t\t}\n+\treturn (int)nb_links;\n+}\n \n-\t\tqbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,\n-\t\t\t\t   channel_index, 1);\n-\t\tevq_info->dpcon->channel_index = channel_index;\n-\t}\n+static int\n+dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,\n+\t\t\t   uint8_t queues[], uint16_t nb_unlinks)\n+{\n+\tstruct dpaa2_port *dpaa2_portal = port;\n+\tint i;\n+\tstruct dpaa2_dpio_dev *dpio_dev = NULL;\n+\tstruct dpaa2_eventq *evq_info;\n+\tstruct qbman_swp *swp;\n \n-\tRTE_SET_USED(priorities);\n+\tEVENTDEV_INIT_FUNC_TRACE();\n \n-\treturn (int)nb_links;\n-err:\n-\tfor (n = 0; n < i; n++) {\n-\t\tevq_info = &priv->evq_info[queues[n]];\n-\t\tqbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,\n-\t\t\t\t   evq_info->dpcon->channel_index, 0);\n-\t\tdpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,\n-\t\t\t\t\t0, dpaa2_portal->dpio_dev->token,\n-\t\t\tevq_info->dpcon->dpcon_id);\n+\tRTE_SET_USED(dev);\n+\tRTE_SET_USED(queues);\n+\n+\tfor (i = 0; i < nb_unlinks; i++) {\n+\t\tevq_info = &dpaa2_portal->evq_info[queues[i]];\n+\n+\t\tif (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) {\n+\t\t\t/* todo dpaa2_portal shall have dpio_dev-no per lcore*/\n+\t\t\tdpio_dev = DPAA2_PER_LCORE_DPIO;\n+\t\t\tswp = DPAA2_PER_LCORE_PORTAL;\n+\n+\t\t\tqbman_swp_push_set(swp,\n+\t\t\t\t\tevq_info->dpcon->channel_index, 0);\n+\t\t\tdpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,\n+\t\t\t\t\t\tdpio_dev->token,\n+\t\t\t\t\t\tevq_info->dpcon->dpcon_id);\n+\t\t}\n+\t\tmemset(evq_info, 0, sizeof(struct dpaa2_eventq));\n+\t\tif (dpaa2_portal->num_linked_evq)\n+\t\t\tdpaa2_portal->num_linked_evq--;\n \t}\n-\treturn ret;\n+\n+\tif (!dpaa2_portal->num_linked_evq)\n+\t\tdpaa2_portal->is_port_linked = false;\n+\n+\treturn (int)nb_unlinks;\n }\n \n+\n static int\n dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,\n \t\t\t     uint64_t *timeout_ticks)\n@@ -806,6 +905,8 @@ dpaa2_eventdev_create(const char *name)\n \t\tpriv->max_event_queues++;\n \t} while (dpcon_dev && dpci_dev);\n \n+\tRTE_LOG(INFO, PMD, \"%s eventdev created\\n\", name);\n+\n \treturn 0;\n fail:\n \treturn -EFAULT;\ndiff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h\nindex 8898024..720e0c6 100644\n--- a/drivers/event/dpaa2/dpaa2_eventdev.h\n+++ b/drivers/event/dpaa2/dpaa2_eventdev.h\n@@ -62,11 +62,20 @@ struct dpaa2_eventq {\n \tstruct dpaa2_dpcon_dev *dpcon;\n \t/* Attached DPCI device */\n \tstruct dpaa2_dpci_dev *dpci;\n+\t/* Mapped event port */\n+\tstruct dpaa2_io_portal_t *event_port;\n \t/* Configuration provided by the user */\n \tuint32_t event_queue_cfg;\n \tuint32_t event_queue_id;\n };\n \n+struct dpaa2_port {\n+\tstruct dpaa2_eventq evq_info[DPAA2_EVENT_MAX_QUEUES];\n+\tuint8_t num_linked_evq;\n+\tuint8_t is_port_linked;\n+\tuint64_t timeout_us;\n+};\n+\n struct dpaa2_eventdev {\n \tstruct dpaa2_eventq evq_info[DPAA2_EVENT_MAX_QUEUES];\n \tuint32_t dequeue_timeout_ns;\n",
    "prefixes": [
        "v2",
        "5/5"
    ]
}