get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/59851/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 59851,
    "url": "https://patches.dpdk.org/api/patches/59851/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20190926100558.24348-6-pbhagavatula@marvell.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190926100558.24348-6-pbhagavatula@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190926100558.24348-6-pbhagavatula@marvell.com",
    "date": "2019-09-26T10:05:52",
    "name": "[05/11] examples/l3fwd: add event port and queue setup",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "cfb878c09cd12ba895f39cef5654a44a32730fd2",
    "submitter": {
        "id": 1183,
        "url": "https://patches.dpdk.org/api/people/1183/?format=api",
        "name": "Pavan Nikhilesh Bhagavatula",
        "email": "pbhagavatula@marvell.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20190926100558.24348-6-pbhagavatula@marvell.com/mbox/",
    "series": [
        {
            "id": 6542,
            "url": "https://patches.dpdk.org/api/series/6542/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=6542",
            "date": "2019-09-26T10:05:47",
            "name": "example/l3fwd: introduce event device support",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/6542/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/59851/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/59851/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 025A01BFA2;\n\tThu, 26 Sep 2019 12:07:42 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n\t[67.231.148.174]) by dpdk.org (Postfix) with ESMTP id 271A41BF77\n\tfor <dev@dpdk.org>; Thu, 26 Sep 2019 12:07:39 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n\tby mx0a-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id\n\tx8QA72W1002599; Thu, 26 Sep 2019 03:07:38 -0700",
            "from sc-exch03.marvell.com ([199.233.58.183])\n\tby mx0a-0016f401.pphosted.com with ESMTP id 2v8ua0002f-5\n\t(version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); \n\tThu, 26 Sep 2019 03:07:38 -0700",
            "from SC-EXCH01.marvell.com (10.93.176.81) by SC-EXCH03.marvell.com\n\t(10.93.176.83) with Microsoft SMTP Server (TLS) id 15.0.1367.3;\n\tThu, 26 Sep 2019 03:06:21 -0700",
            "from maili.marvell.com (10.93.176.43) by SC-EXCH01.marvell.com\n\t(10.93.176.81) with Microsoft SMTP Server id 15.0.1367.3 via Frontend\n\tTransport; Thu, 26 Sep 2019 03:06:20 -0700",
            "from BG-LT7430.marvell.com (unknown [10.28.17.15])\n\tby maili.marvell.com (Postfix) with ESMTP id 334423F7041;\n\tThu, 26 Sep 2019 03:06:17 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n\th=from : to : cc :\n\tsubject : date : message-id : in-reply-to : references : mime-version\n\t: content-transfer-encoding : content-type; s=pfpt0818;\n\tbh=NvPzi574gtm0MA8pazJVapiR/AmCQZySGCZqBYg0lcQ=;\n\tb=iEX2yVgEEVmZSRP1qRFSCm3hhPyhvXbg/AtKFsJf8EGoOrKEL20v5A4rAj97tbTNT0yn\n\topd0YRK89ZUIfF604kjYYfCjsmM910VuM9uXoRqT0IDlSlGD8kXCKHMF0zNBNf1T8YEe\n\tbpElkfuy9JzYui6C08BhINoTHDIEOv8q1616p/jCeSnVIB5xlBatsBSrVZ67SNI+yaqg\n\tK/LSuXqjdcoBwQhZlNysR0Fgo5r4myvVhGkzjU9kX0oCFq75HNc49vRwahtif0wxFlyt\n\tghOb02UT5juWJTlxXDdYyQzXZHaKZAHnWcKzxv8GPu4McBdjkhIPH8228hdyGBM73NlI\n\trw== ",
        "From": "<pbhagavatula@marvell.com>",
        "To": "<jerinj@marvell.com>, <akhil.goyal@nxp.com>, Marko Kovacevic\n\t<marko.kovacevic@intel.com>, Ori Kam <orika@mellanox.com>,\n\tBruce Richardson\n\t<bruce.richardson@intel.com>, Radu Nicolau <radu.nicolau@intel.com>, \n\t\"Tomasz Kantecki\" <tomasz.kantecki@intel.com>",
        "CC": "<dev@dpdk.org>, Sunil Kumar Kori <skori@marvell.com>",
        "Date": "Thu, 26 Sep 2019 15:35:52 +0530",
        "Message-ID": "<20190926100558.24348-6-pbhagavatula@marvell.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20190926100558.24348-1-pbhagavatula@marvell.com>",
        "References": "<20190926100558.24348-1-pbhagavatula@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.95,1.0.8\n\tdefinitions=2019-09-26_04:2019-09-25,2019-09-26 signatures=0",
        "Subject": "[dpdk-dev] [PATCH 05/11] examples/l3fwd: add event port and queue\n\tsetup",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Sunil Kumar Kori <skori@marvell.com>\n\nAdd event device queue and port setup based on event eth Tx adapter\ncapabilities.\n\nSigned-off-by: Sunil Kumar Kori <skori@marvell.com>\n---\n examples/l3fwd/l3fwd_eventdev.c               |  30 ++++-\n examples/l3fwd/l3fwd_eventdev.h               |  16 +++\n examples/l3fwd/l3fwd_eventdev_generic.c       | 113 +++++++++++++++++-\n examples/l3fwd/l3fwd_eventdev_internal_port.c | 106 +++++++++++++++-\n 4 files changed, 261 insertions(+), 4 deletions(-)",
    "diff": "diff --git a/examples/l3fwd/l3fwd_eventdev.c b/examples/l3fwd/l3fwd_eventdev.c\nindex f07cd4b31..f5ac3ccce 100644\n--- a/examples/l3fwd/l3fwd_eventdev.c\n+++ b/examples/l3fwd/l3fwd_eventdev.c\n@@ -215,7 +215,6 @@ l3fwd_eventdev_capability_setup(void)\n \t\tl3fwd_eventdev_set_internal_port_ops(&evdev_rsrc->ops);\n }\n \n-\n static uint32_t\n l3fwd_eventdev_setup(uint16_t ethdev_count)\n {\n@@ -267,6 +266,7 @@ l3fwd_eventdev_setup(uint16_t ethdev_count)\n \t\tnum_workers = dev_info.max_event_ports;\n \n \tevent_d_conf.nb_event_ports = num_workers;\n+\tevdev_rsrc->evp.nb_ports = num_workers;\n \tevdev_rsrc->has_burst = !!(dev_info.event_dev_cap &\n \t\t\t\t    RTE_EVENT_DEV_CAP_BURST_MODE);\n \n@@ -278,11 +278,31 @@ l3fwd_eventdev_setup(uint16_t ethdev_count)\n \treturn event_queue_cfg;\n }\n \n+int\n+l3fwd_get_free_event_port(struct l3fwd_eventdev_resources *evdev_rsrc)\n+{\n+\tstatic int index;\n+\tint port_id;\n+\n+\trte_spinlock_lock(&evdev_rsrc->evp.lock);\n+\tif (index >= evdev_rsrc->evp.nb_ports) {\n+\t\tprintf(\"No free event port is available\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tport_id = evdev_rsrc->evp.event_p_id[index];\n+\tindex++;\n+\trte_spinlock_unlock(&evdev_rsrc->evp.lock);\n+\n+\treturn port_id;\n+}\n+\n void\n l3fwd_eventdev_resource_setup(struct rte_eth_conf *port_conf)\n {\n \tstruct l3fwd_eventdev_resources *evdev_rsrc = l3fwd_get_eventdev_rsrc();\n \tuint16_t ethdev_count = rte_eth_dev_count_avail();\n+\tuint32_t event_queue_cfg;\n \tint32_t ret;\n \n \t/* Parse eventdev command line options */\n@@ -300,5 +320,11 @@ l3fwd_eventdev_resource_setup(struct rte_eth_conf *port_conf)\n \tl3fwd_eth_dev_port_setup(port_conf);\n \n \t/* Event device configuration */\n-\tl3fwd_eventdev_setup(ethdev_count);\n+\tevent_queue_cfg = l3fwd_eventdev_setup(ethdev_count);\n+\n+\t/* Event queue configuration */\n+\tevdev_rsrc->ops.event_queue_setup(ethdev_count, event_queue_cfg);\n+\n+\t/* Event port configuration */\n+\tevdev_rsrc->ops.event_port_setup();\n }\ndiff --git a/examples/l3fwd/l3fwd_eventdev.h b/examples/l3fwd/l3fwd_eventdev.h\nindex f63f3d4ef..2640d6cec 100644\n--- a/examples/l3fwd/l3fwd_eventdev.h\n+++ b/examples/l3fwd/l3fwd_eventdev.h\n@@ -29,6 +29,17 @@ typedef void (*event_port_setup_cb)(void);\n typedef void (*service_setup_cb)(void);\n typedef int (*event_loop_cb)(void *);\n \n+struct l3fwd_eventdev_queues {\n+\tuint8_t *event_q_id;\n+\tuint8_t\tnb_queues;\n+};\n+\n+struct l3fwd_eventdev_ports {\n+\tuint8_t *event_p_id;\n+\tuint8_t\tnb_ports;\n+\trte_spinlock_t lock;\n+};\n+\n struct l3fwd_eventdev_setup_ops {\n \tevent_queue_setup_cb event_queue_setup;\n \tevent_port_setup_cb event_port_setup;\n@@ -38,14 +49,18 @@ struct l3fwd_eventdev_setup_ops {\n };\n \n struct l3fwd_eventdev_resources {\n+\tstruct rte_event_port_conf def_p_conf;\n \tuint8_t disable_implicit_release;\n \tstruct l3fwd_eventdev_setup_ops ops;\n \tstruct rte_mempool * (*pkt_pool)[NB_SOCKETS];\n+\tstruct l3fwd_eventdev_queues evq;\n+\tstruct l3fwd_eventdev_ports evp;\n \tuint32_t port_mask;\n \tuint8_t per_port_pool;\n \tuint8_t event_d_id;\n \tuint8_t sync_mode;\n \tuint8_t tx_mode_q;\n+\tuint8_t deq_depth;\n \tuint8_t has_burst;\n \tuint8_t enabled;\n \tuint8_t nb_args;\n@@ -76,6 +91,7 @@ l3fwd_get_eventdev_rsrc(void)\n }\n \n void l3fwd_eventdev_resource_setup(struct rte_eth_conf *port_conf);\n+int l3fwd_get_free_event_port(struct l3fwd_eventdev_resources *eventdev_rsrc);\n void l3fwd_eventdev_set_generic_ops(struct l3fwd_eventdev_setup_ops *ops);\n void l3fwd_eventdev_set_internal_port_ops(struct l3fwd_eventdev_setup_ops *ops);\n \ndiff --git a/examples/l3fwd/l3fwd_eventdev_generic.c b/examples/l3fwd/l3fwd_eventdev_generic.c\nindex 35e655fc0..4aec0e403 100644\n--- a/examples/l3fwd/l3fwd_eventdev_generic.c\n+++ b/examples/l3fwd/l3fwd_eventdev_generic.c\n@@ -5,8 +5,119 @@\n #include \"l3fwd.h\"\n #include \"l3fwd_eventdev.h\"\n \n+static void\n+l3fwd_event_port_setup_generic(void)\n+{\n+\tstruct l3fwd_eventdev_resources *evdev_rsrc = l3fwd_get_eventdev_rsrc();\n+\tuint8_t event_d_id = evdev_rsrc->event_d_id;\n+\tstruct rte_event_port_conf event_p_conf = {\n+\t\t.dequeue_depth = 32,\n+\t\t.enqueue_depth = 32,\n+\t\t.new_event_threshold = 4096\n+\t};\n+\tstruct rte_event_port_conf def_p_conf;\n+\tuint8_t event_p_id;\n+\tint32_t ret;\n+\n+\tevdev_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *\n+\t\t\t\t\tevdev_rsrc->evp.nb_ports);\n+\tif (!evdev_rsrc->evp.event_p_id)\n+\t\trte_exit(EXIT_FAILURE, \" No space is available\");\n+\n+\tmemset(&def_p_conf, 0, sizeof(struct rte_event_port_conf));\n+\trte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);\n+\n+\tif (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)\n+\t\tevent_p_conf.new_event_threshold =\n+\t\t\tdef_p_conf.new_event_threshold;\n+\n+\tif (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)\n+\t\tevent_p_conf.dequeue_depth = def_p_conf.dequeue_depth;\n+\n+\tif (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)\n+\t\tevent_p_conf.enqueue_depth = def_p_conf.enqueue_depth;\n+\n+\tevent_p_conf.disable_implicit_release =\n+\t\tevdev_rsrc->disable_implicit_release;\n+\tevdev_rsrc->deq_depth = def_p_conf.dequeue_depth;\n+\n+\tfor (event_p_id = 0; event_p_id < evdev_rsrc->evp.nb_ports;\n+\t\t\t\t\t\t\t\tevent_p_id++) {\n+\t\tret = rte_event_port_setup(event_d_id, event_p_id,\n+\t\t\t\t\t   &event_p_conf);\n+\t\tif (ret < 0) {\n+\t\t\trte_exit(EXIT_FAILURE,\n+\t\t\t\t \"Error in configuring event port %d\\n\",\n+\t\t\t\t event_p_id);\n+\t\t}\n+\n+\t\tret = rte_event_port_link(event_d_id, event_p_id,\n+\t\t\t\t\t  evdev_rsrc->evq.event_q_id,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  evdev_rsrc->evq.nb_queues - 1);\n+\t\tif (ret != (evdev_rsrc->evq.nb_queues - 1)) {\n+\t\t\trte_exit(EXIT_FAILURE, \"Error in linking event port %d \"\n+\t\t\t\t \"to event queue\", event_p_id);\n+\t\t}\n+\t\tevdev_rsrc->evp.event_p_id[event_p_id] = event_p_id;\n+\t}\n+\t/* init spinlock */\n+\trte_spinlock_init(&evdev_rsrc->evp.lock);\n+\n+\tevdev_rsrc->def_p_conf = event_p_conf;\n+}\n+\n+static void\n+l3fwd_event_queue_setup_generic(uint16_t ethdev_count,\n+\t\t\t\tuint32_t event_queue_cfg)\n+{\n+\tstruct l3fwd_eventdev_resources *evdev_rsrc = l3fwd_get_eventdev_rsrc();\n+\tuint8_t event_d_id = evdev_rsrc->event_d_id;\n+\tstruct rte_event_queue_conf event_q_conf = {\n+\t\t.nb_atomic_flows = 1024,\n+\t\t.nb_atomic_order_sequences = 1024,\n+\t\t.event_queue_cfg = event_queue_cfg,\n+\t\t.priority = RTE_EVENT_DEV_PRIORITY_NORMAL\n+\t};\n+\tstruct rte_event_queue_conf def_q_conf;\n+\tuint8_t event_q_id;\n+\tint32_t ret;\n+\n+\tevent_q_conf.schedule_type = evdev_rsrc->sync_mode;\n+\tevdev_rsrc->evq.nb_queues = ethdev_count + 1;\n+\tevdev_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *\n+\t\t\t\t\tevdev_rsrc->evq.nb_queues);\n+\tif (!evdev_rsrc->evq.event_q_id)\n+\t\trte_exit(EXIT_FAILURE, \"Memory allocation failure\");\n+\n+\trte_event_queue_default_conf_get(event_d_id, 0, &def_q_conf);\n+\tif (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)\n+\t\tevent_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;\n+\n+\tfor (event_q_id = 0; event_q_id < (evdev_rsrc->evq.nb_queues - 1);\n+\t\t\t\t\t\t\t\tevent_q_id++) {\n+\t\tret = rte_event_queue_setup(event_d_id, event_q_id,\n+\t\t\t\t\t    &event_q_conf);\n+\t\tif (ret < 0) {\n+\t\t\trte_exit(EXIT_FAILURE,\n+\t\t\t\t \"Error in configuring event queue\");\n+\t\t}\n+\t\tevdev_rsrc->evq.event_q_id[event_q_id] = event_q_id;\n+\t}\n+\n+\tevent_q_conf.event_queue_cfg |= RTE_EVENT_QUEUE_CFG_SINGLE_LINK;\n+\tevent_q_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,\n+\tret = rte_event_queue_setup(event_d_id, event_q_id, &event_q_conf);\n+\tif (ret < 0) {\n+\t\trte_exit(EXIT_FAILURE,\n+\t\t\t \"Error in configuring event queue for Tx adapter\");\n+\t}\n+\tevdev_rsrc->evq.event_q_id[event_q_id] = event_q_id;\n+}\n+\n void\n l3fwd_eventdev_set_generic_ops(struct l3fwd_eventdev_setup_ops *ops)\n {\n-\tRTE_SET_USED(ops);\n+\tops->event_queue_setup = l3fwd_event_queue_setup_generic;\n+\tops->event_port_setup = l3fwd_event_port_setup_generic;\n }\ndiff --git a/examples/l3fwd/l3fwd_eventdev_internal_port.c b/examples/l3fwd/l3fwd_eventdev_internal_port.c\nindex d40185862..363e37899 100644\n--- a/examples/l3fwd/l3fwd_eventdev_internal_port.c\n+++ b/examples/l3fwd/l3fwd_eventdev_internal_port.c\n@@ -5,9 +5,113 @@\n #include \"l3fwd.h\"\n #include \"l3fwd_eventdev.h\"\n \n+static void\n+l3fwd_event_port_setup_internal_port(void)\n+{\n+\tstruct l3fwd_eventdev_resources *evdev_rsrc = l3fwd_get_eventdev_rsrc();\n+\tuint8_t event_d_id = evdev_rsrc->event_d_id;\n+\tstruct rte_event_port_conf event_p_conf = {\n+\t\t.dequeue_depth = 32,\n+\t\t.enqueue_depth = 32,\n+\t\t.new_event_threshold = 4096\n+\t};\n+\tstruct rte_event_port_conf def_p_conf;\n+\tuint8_t event_p_id;\n+\tint32_t ret;\n+\n+\tevdev_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *\n+\t\t\t\t\tevdev_rsrc->evp.nb_ports);\n+\tif (!evdev_rsrc->evp.event_p_id)\n+\t\trte_exit(EXIT_FAILURE,\n+\t\t\t \"Failed to allocate memory for Event Ports\");\n+\n+\trte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);\n+\tif (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)\n+\t\tevent_p_conf.new_event_threshold =\n+\t\t\t\t\t\tdef_p_conf.new_event_threshold;\n+\n+\tif (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)\n+\t\tevent_p_conf.dequeue_depth = def_p_conf.dequeue_depth;\n+\n+\tif (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)\n+\t\tevent_p_conf.enqueue_depth = def_p_conf.enqueue_depth;\n+\n+\tevent_p_conf.disable_implicit_release =\n+\t\tevdev_rsrc->disable_implicit_release;\n+\n+\tfor (event_p_id = 0; event_p_id < evdev_rsrc->evp.nb_ports;\n+\t\t\t\t\t\t\t\tevent_p_id++) {\n+\t\tret = rte_event_port_setup(event_d_id, event_p_id,\n+\t\t\t\t\t   &event_p_conf);\n+\t\tif (ret < 0) {\n+\t\t\trte_exit(EXIT_FAILURE,\n+\t\t\t\t \"Error in configuring event port %d\\n\",\n+\t\t\t\t event_p_id);\n+\t\t}\n+\n+\t\tret = rte_event_port_link(event_d_id, event_p_id, NULL,\n+\t\t\t\t\t  NULL, 0);\n+\t\tif (ret < 0) {\n+\t\t\trte_exit(EXIT_FAILURE, \"Error in linking event port %d \"\n+\t\t\t\t \"to event queue\", event_p_id);\n+\t\t}\n+\t\tevdev_rsrc->evp.event_p_id[event_p_id] = event_p_id;\n+\n+\t\t/* init spinlock */\n+\t\trte_spinlock_init(&evdev_rsrc->evp.lock);\n+\t}\n+\n+\tevdev_rsrc->def_p_conf = event_p_conf;\n+}\n+\n+static void\n+l3fwd_event_queue_setup_internal_port(uint16_t ethdev_count,\n+\t\t\t\t      uint32_t event_queue_cfg)\n+{\n+\tstruct l3fwd_eventdev_resources *evdev_rsrc = l3fwd_get_eventdev_rsrc();\n+\tuint8_t event_d_id = evdev_rsrc->event_d_id;\n+\tstruct rte_event_queue_conf event_q_conf = {\n+\t\t.nb_atomic_flows = 1024,\n+\t\t.nb_atomic_order_sequences = 1024,\n+\t\t.event_queue_cfg = event_queue_cfg,\n+\t\t.priority = RTE_EVENT_DEV_PRIORITY_NORMAL\n+\t};\n+\tstruct rte_event_queue_conf def_q_conf;\n+\tuint8_t event_q_id = 0;\n+\tint32_t ret;\n+\n+\trte_event_queue_default_conf_get(event_d_id, event_q_id, &def_q_conf);\n+\n+\tif (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)\n+\t\tevent_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;\n+\n+\tif (def_q_conf.nb_atomic_order_sequences <\n+\t\t\t\t\tevent_q_conf.nb_atomic_order_sequences)\n+\t\tevent_q_conf.nb_atomic_order_sequences =\n+\t\t\t\t\tdef_q_conf.nb_atomic_order_sequences;\n+\n+\tevent_q_conf.event_queue_cfg = event_queue_cfg;\n+\tevent_q_conf.schedule_type = evdev_rsrc->sync_mode;\n+\tevdev_rsrc->evq.nb_queues = ethdev_count;\n+\tevdev_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *\n+\t\t\t\t\tevdev_rsrc->evq.nb_queues);\n+\tif (!evdev_rsrc->evq.event_q_id)\n+\t\trte_exit(EXIT_FAILURE, \"Memory allocation failure\");\n+\n+\tfor (event_q_id = 0; event_q_id < ethdev_count; event_q_id++) {\n+\t\tret = rte_event_queue_setup(event_d_id, event_q_id,\n+\t\t\t\t\t    &event_q_conf);\n+\t\tif (ret < 0) {\n+\t\t\trte_exit(EXIT_FAILURE,\n+\t\t\t\t \"Error in configuring event queue\");\n+\t\t}\n+\t\tevdev_rsrc->evq.event_q_id[event_q_id] = event_q_id;\n+\t}\n+}\n \n void\n l3fwd_eventdev_set_internal_port_ops(struct l3fwd_eventdev_setup_ops *ops)\n {\n-\tRTE_SET_USED(ops);\n+\tops->event_queue_setup = l3fwd_event_queue_setup_internal_port;\n+\tops->event_port_setup = l3fwd_event_port_setup_internal_port;\n }\n",
    "prefixes": [
        "05/11"
    ]
}