get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/63649/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 63649,
    "url": "http://patches.dpdk.org/api/patches/63649/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1575808249-31135-9-git-send-email-anoobj@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1575808249-31135-9-git-send-email-anoobj@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1575808249-31135-9-git-send-email-anoobj@marvell.com",
    "date": "2019-12-08T12:30:43",
    "name": "[08/14] examples/ipsec-secgw: add support for internal ports",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "fea3bfa1b1b3ae51efa7e8e3933bdf15181b405d",
    "submitter": {
        "id": 1205,
        "url": "http://patches.dpdk.org/api/people/1205/?format=api",
        "name": "Anoob Joseph",
        "email": "anoobj@marvell.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1575808249-31135-9-git-send-email-anoobj@marvell.com/mbox/",
    "series": [
        {
            "id": 7750,
            "url": "http://patches.dpdk.org/api/series/7750/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=7750",
            "date": "2019-12-08T12:30:35",
            "name": "add eventmode to ipsec-secgw",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/7750/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/63649/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/63649/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id DB3F8A04F1;\n\tSun,  8 Dec 2019 13:32:42 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id C465C1BFA3;\n\tSun,  8 Dec 2019 13:32:01 +0100 (CET)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174]) by dpdk.org (Postfix) with ESMTP id 9EDAF1BFA3\n for <dev@dpdk.org>; Sun,  8 Dec 2019 13:31:59 +0100 (CET)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id\n xB8CVHUA021928; Sun, 8 Dec 2019 04:31:58 -0800",
            "from sc-exch01.marvell.com ([199.233.58.181])\n by mx0a-0016f401.pphosted.com with ESMTP id 2wrbawjm70-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);\n Sun, 08 Dec 2019 04:31:58 -0800",
            "from SC-EXCH01.marvell.com (10.93.176.81) by SC-EXCH01.marvell.com\n (10.93.176.81) with Microsoft SMTP Server (TLS) id 15.0.1367.3; Sun, 8 Dec\n 2019 04:31:57 -0800",
            "from maili.marvell.com (10.93.176.43) by SC-EXCH01.marvell.com\n (10.93.176.81) with Microsoft SMTP Server id 15.0.1367.3 via Frontend\n Transport; Sun, 8 Dec 2019 04:31:57 -0800",
            "from ajoseph83.caveonetworks.com.com (unknown [10.29.45.60])\n by maili.marvell.com (Postfix) with ESMTP id D7B583F703F;\n Sun,  8 Dec 2019 04:31:52 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0818;\n bh=Cn47jyragqnZoYUzhI5S38xjKAjpqFJCo8z6Krg8R5g=;\n b=dYolnNDezcZZgtEq8ujuZwVUM+f2h8b8iY4YW6bu42bJKidI/LqZaqSLi8PE6SqqNqC7\n aQjzuaITdRZ+hxZj3YTWpJHaahchMRNeUc6M+fUxVi9Ial3bl7e1rgJAu3cZkaRruzZ0\n vSc7Bp/gPoghiutxCXDxkH6PRAb7rhzsOV4pjaGYljKceQL8AQbZp2TrU5btps7OFs7/\n NnlPG8X4GndHPMsMGsgDlpmwAiKni4JAIX1Wyz7YJeVuF7KaDKj4ur4H76vykgVpDVZT\n NwIke5YBmKgGtTHM6LlQb0h/gr/79IwO1xsIhZmk/2uRoS6VgZEBW8jqlT7GLBoQ/mTf XA==",
        "From": "Anoob Joseph <anoobj@marvell.com>",
        "To": "Akhil Goyal <akhil.goyal@nxp.com>, Radu Nicolau <radu.nicolau@intel.com>,\n Thomas Monjalon <thomas@monjalon.net>",
        "CC": "Lukasz Bartosik <lbartosik@marvell.com>, Jerin Jacob <jerinj@marvell.com>,\n Narayana Prasad <pathreya@marvell.com>,\n Ankur Dwivedi <adwivedi@marvell.com>, Anoob Joseph <anoobj@marvell.com>,\n Archana Muniganti <marchana@marvell.com>,\n Tejasree Kondoj <ktejasree@marvell.com>, Vamsi Attunuru\n <vattunuru@marvell.com>,\n Konstantin Ananyev <konstantin.ananyev@intel.com>, <dev@dpdk.org>",
        "Date": "Sun, 8 Dec 2019 18:00:43 +0530",
        "Message-ID": "<1575808249-31135-9-git-send-email-anoobj@marvell.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1575808249-31135-1-git-send-email-anoobj@marvell.com>",
        "References": "<1575808249-31135-1-git-send-email-anoobj@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.95,18.0.572\n definitions=2019-12-08_03:2019-12-05,2019-12-08 signatures=0",
        "Subject": "[dpdk-dev] [PATCH 08/14] examples/ipsec-secgw: add support for\n\tinternal ports",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Lukasz Bartosik <lbartosik@marvell.com>\n\nAdd support for Rx and Tx internal ports. When internal ports are\navailable then a packet can be received from eth port and forwarded\nto event queue by HW without any software intervention. The same\napplies to Tx side where a packet sent to an event queue can by\nforwarded by HW to eth port without any software intervention.\n\nSigned-off-by: Anoob Joseph <anoobj@marvell.com>\nSigned-off-by: Lukasz Bartosik <lbartosik@marvell.com>\n---\n examples/ipsec-secgw/event_helper.c | 231 ++++++++++++++++++++++++++++--------\n examples/ipsec-secgw/event_helper.h |  11 ++\n 2 files changed, 195 insertions(+), 47 deletions(-)",
    "diff": "diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c\nindex a67132a..6549875 100644\n--- a/examples/ipsec-secgw/event_helper.c\n+++ b/examples/ipsec-secgw/event_helper.c\n@@ -100,6 +100,39 @@ eh_get_eventdev_params(struct eventmode_conf *em_conf, uint8_t eventdev_id)\n \n \treturn &(em_conf->eventdev_config[i]);\n }\n+\n+static inline bool\n+eh_dev_has_rx_internal_port(uint8_t eventdev_id)\n+{\n+\tint j;\n+\tbool flag = true;\n+\n+\tRTE_ETH_FOREACH_DEV(j) {\n+\t\tuint32_t caps = 0;\n+\n+\t\trte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps);\n+\t\tif (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))\n+\t\t\tflag = false;\n+\t}\n+\treturn flag;\n+}\n+\n+static inline bool\n+eh_dev_has_tx_internal_port(uint8_t eventdev_id)\n+{\n+\tint j;\n+\tbool flag = true;\n+\n+\tRTE_ETH_FOREACH_DEV(j) {\n+\t\tuint32_t caps = 0;\n+\n+\t\trte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps);\n+\t\tif (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))\n+\t\t\tflag = false;\n+\t}\n+\treturn flag;\n+}\n+\n static inline bool\n eh_dev_has_burst_mode(uint8_t dev_id)\n {\n@@ -115,7 +148,9 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)\n {\n \tstruct eventdev_params *eventdev_config;\n \tstruct rte_event_dev_info dev_info;\n+\tint lcore_count;\n \tint nb_eventdev;\n+\tint nb_eth_dev;\n \tint i, ret;\n \n \t/* Get the number of event devices */\n@@ -126,6 +161,17 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)\n \t\treturn -EINVAL;\n \t}\n \n+\t/* Get the number of eth devs */\n+\tnb_eth_dev = rte_eth_dev_count_avail();\n+\n+\tif (nb_eth_dev == 0) {\n+\t\tEH_LOG_ERR(\"No eth devices detected\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Get the number of lcores */\n+\tlcore_count = rte_lcore_count();\n+\n \tfor (i = 0; i < nb_eventdev; i++) {\n \n \t\t/* Get the event dev conf */\n@@ -152,11 +198,17 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)\n \t\teventdev_config->ev_queue_mode =\n \t\t\t\tRTE_EVENT_QUEUE_CFG_ALL_TYPES;\n \n-\t\t/* One port is required for eth Rx adapter */\n-\t\teventdev_config->nb_eventport -= 1;\n+\t\t/* Check if there are more queues than required */\n+\t\tif (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {\n+\t\t\t/* One queue is reserved for Tx */\n+\t\t\teventdev_config->nb_eventqueue = nb_eth_dev + 1;\n+\t\t}\n \n-\t\t/* One port is reserved for eth Tx adapter */\n-\t\teventdev_config->nb_eventport -= 1;\n+\t\t/* Check if there are more ports than required */\n+\t\tif (eventdev_config->nb_eventport > lcore_count) {\n+\t\t\t/* One port per lcore is enough */\n+\t\t\teventdev_config->nb_eventport = lcore_count;\n+\t\t}\n \n \t\t/* Update the number of event devices */\n \t\tem_conf->nb_eventdev++;\n@@ -165,6 +217,42 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)\n \treturn 0;\n }\n \n+static void\n+eh_do_capability_check(struct eventmode_conf *em_conf)\n+{\n+\tstruct eventdev_params *eventdev_config;\n+\tint all_internal_ports = 1;\n+\tuint32_t eventdev_id;\n+\tint i;\n+\n+\tfor (i = 0; i < em_conf->nb_eventdev; i++) {\n+\n+\t\t/* Get the event dev conf */\n+\t\teventdev_config = &(em_conf->eventdev_config[i]);\n+\t\teventdev_id = eventdev_config->eventdev_id;\n+\n+\t\t/* Check if event device has internal port for Rx & Tx */\n+\t\tif (eh_dev_has_rx_internal_port(eventdev_id) &&\n+\t\t    eh_dev_has_tx_internal_port(eventdev_id)) {\n+\t\t\teventdev_config->all_internal_ports = 1;\n+\t\t} else {\n+\t\t\tall_internal_ports = 0;\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * If Rx & Tx internal ports are supported by all event devices then\n+\t * eth cores won't be required. Override the eth core mask requested\n+\t * and decrement number of event queues by one as it won't be needed\n+\t * for Tx.\n+\t */\n+\tif (all_internal_ports) {\n+\t\trte_bitmap_reset(em_conf->eth_core_mask);\n+\t\tfor (i = 0; i < em_conf->nb_eventdev; i++)\n+\t\t\tem_conf->eventdev_config[i].nb_eventqueue--;\n+\t}\n+}\n+\n static int\n eh_set_default_conf_link(struct eventmode_conf *em_conf)\n {\n@@ -239,6 +327,9 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)\n \tstruct rx_adapter_connection_info *conn;\n \tstruct eventdev_params *eventdev_config;\n \tstruct rx_adapter_conf *adapter;\n+\tbool rx_internal_port = true;\n+\tint nb_eventqueue;\n+\tuint32_t caps = 0;\n \tint eventdev_id;\n \tint nb_eth_dev;\n \tint adapter_id;\n@@ -268,7 +359,14 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)\n \t/* Set adapter conf */\n \tadapter->eventdev_id = eventdev_id;\n \tadapter->adapter_id = adapter_id;\n-\tadapter->rx_core_id = eh_get_next_eth_core(em_conf);\n+\n+\t/*\n+\t * If event device does not have internal ports for passing\n+\t * packets then reserved one queue for Tx path\n+\t */\n+\tnb_eventqueue = eventdev_config->all_internal_ports ?\n+\t\t\teventdev_config->nb_eventqueue :\n+\t\t\teventdev_config->nb_eventqueue - 1;\n \n \t/*\n \t * Map all queues of one eth device (port) to one event\n@@ -277,10 +375,10 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)\n \t */\n \n \t/* Make sure there is enough event queues for 1:1 mapping */\n-\tif (nb_eth_dev > eventdev_config->nb_eventqueue) {\n+\tif (nb_eth_dev > nb_eventqueue) {\n \t\tEH_LOG_ERR(\"Not enough event queues for 1:1 mapping \"\n \t\t\t\"[eth devs: %d, event queues: %d]\\n\",\n-\t\t\tnb_eth_dev, eventdev_config->nb_eventqueue);\n+\t\t\tnb_eth_dev, nb_eventqueue);\n \t\treturn -EINVAL;\n \t}\n \n@@ -303,11 +401,24 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)\n \t\t/* Add all eth queues of one eth port to one event queue */\n \t\tconn->ethdev_rx_qid = -1;\n \n+\t\t/* Get Rx adapter capabilities */\n+\t\trte_event_eth_rx_adapter_caps_get(eventdev_id, i, &caps);\n+\t\tif (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))\n+\t\t\trx_internal_port = false;\n+\n \t\t/* Update no of connections */\n \t\tadapter->nb_connections++;\n \n \t}\n \n+\tif (rx_internal_port) {\n+\t\t/* Rx core is not required */\n+\t\tadapter->rx_core_id = -1;\n+\t} else {\n+\t\t/* Rx core is required */\n+\t\tadapter->rx_core_id = eh_get_next_eth_core(em_conf);\n+\t}\n+\n \t/* We have setup one adapter */\n \tem_conf->nb_rx_adapter = 1;\n \n@@ -320,6 +431,8 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)\n \tstruct tx_adapter_connection_info *conn;\n \tstruct eventdev_params *eventdev_config;\n \tstruct tx_adapter_conf *tx_adapter;\n+\tbool tx_internal_port = true;\n+\tuint32_t caps = 0;\n \tint eventdev_id;\n \tint adapter_id;\n \tint nb_eth_dev;\n@@ -353,22 +466,6 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)\n \ttx_adapter->eventdev_id = eventdev_id;\n \ttx_adapter->adapter_id = adapter_id;\n \n-\t/* TODO: Tx core is required only when internal port is not present */\n-\n-\ttx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);\n-\n-\t/*\n-\t * Application uses one event queue per adapter for submitting\n-\t * packets for Tx. Reserve the last queue available and decrement\n-\t * the total available event queues for this\n-\t */\n-\n-\t/* Queue numbers start at 0 */\n-\ttx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;\n-\n-\t/* Update the number of event queues available in eventdev */\n-\teventdev_config->nb_eventqueue--;\n-\n \t/*\n \t * Map all Tx queues of the eth device (port) to the event device.\n \t */\n@@ -398,10 +495,30 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)\n \t\t/* Add all eth tx queues to adapter */\n \t\tconn->ethdev_tx_qid = -1;\n \n+\t\t/* Get Tx adapter capabilities */\n+\t\trte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);\n+\t\tif (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))\n+\t\t\ttx_internal_port = false;\n+\n \t\t/* Update no of connections */\n \t\ttx_adapter->nb_connections++;\n \t}\n \n+\tif (tx_internal_port) {\n+\t\t/* Tx core is not required */\n+\t\ttx_adapter->tx_core_id = -1;\n+\t} else {\n+\t\t/* Tx core is required */\n+\t\ttx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);\n+\n+\t\t/*\n+\t\t * Use one event queue per adapter for submitting packets\n+\t\t * for Tx. Reserving the last queue available\n+\t\t */\n+\t\t/* Queue numbers start at 0 */\n+\t\ttx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;\n+\t}\n+\n \t/* We have setup one adapter */\n \tem_conf->nb_tx_adapter = 1;\n \treturn 0;\n@@ -422,6 +539,9 @@ eh_validate_conf(struct eventmode_conf *em_conf)\n \t\t\treturn ret;\n \t}\n \n+\t/* Perform capability check for the selected event devices */\n+\teh_do_capability_check(em_conf);\n+\n \t/*\n \t * Check if links are specified. Else generate a default config for\n \t * the event ports used.\n@@ -481,9 +601,6 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)\n \t\t/* Get the number of queues */\n \t\tnb_eventqueue = eventdev_config->nb_eventqueue;\n \n-\t\t/* One queue is reserved for the final stage (doing eth tx) */\n-\t\tnb_eventqueue += 1;\n-\n \t\t/* Reset the default conf */\n \t\tmemset(&evdev_default_conf, 0,\n \t\t\tsizeof(struct rte_event_dev_info));\n@@ -530,11 +647,13 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)\n \t\t\t\t\teventdev_config->ev_queue_mode;\n \t\t\t/*\n \t\t\t * All queues need to be set with sched_type as\n-\t\t\t * schedule type for the application stage. One queue\n-\t\t\t * would be reserved for the final eth tx stage. This\n-\t\t\t * will be an atomic queue.\n+\t\t\t * schedule type for the application stage. One\n+\t\t\t * queue would be reserved for the final eth tx\n+\t\t\t * stage if event device does not have internal\n+\t\t\t * ports. This will be an atomic queue.\n \t\t\t */\n-\t\t\tif (j == nb_eventqueue-1) {\n+\t\t\tif (!eventdev_config->all_internal_ports &&\n+\t\t\t    j == nb_eventqueue-1) {\n \t\t\t\teventq_conf.schedule_type =\n \t\t\t\t\tRTE_SCHED_TYPE_ATOMIC;\n \t\t\t} else {\n@@ -650,10 +769,6 @@ eh_rx_adapter_configure(struct eventmode_conf *em_conf,\n \t}\n \n \t/* Setup various connections in the adapter */\n-\n-\tqueue_conf.rx_queue_flags =\n-\t\t\tRTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;\n-\n \tfor (j = 0; j < adapter->nb_connections; j++) {\n \t\t/* Get connection */\n \t\tconn = &(adapter->conn[j]);\n@@ -661,9 +776,7 @@ eh_rx_adapter_configure(struct eventmode_conf *em_conf,\n \t\t/* Setup queue conf */\n \t\tqueue_conf.ev.queue_id = conn->eventq_id;\n \t\tqueue_conf.ev.sched_type = em_conf->ext_params.sched_type;\n-\n-\t\t/* Set flow ID as ethdev ID */\n-\t\tqueue_conf.ev.flow_id = conn->ethdev_id;\n+\t\tqueue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;\n \n \t\t/* Add queue to the adapter */\n \t\tret = rte_event_eth_rx_adapter_queue_add(\n@@ -859,6 +972,12 @@ eh_find_worker(uint32_t lcore_id, struct eh_conf *conf,\n \n \t/* Populate the curr_conf with the capabilities */\n \n+\t/* Check for Tx internal port */\n+\tif (eh_dev_has_tx_internal_port(eventdev_id))\n+\t\tcurr_conf.cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;\n+\telse\n+\t\tcurr_conf.cap.tx_internal_port = EH_TX_TYPE_NO_INTERNAL_PORT;\n+\n \t/* Check for burst mode */\n \tif (eh_dev_has_burst_mode(eventdev_id))\n \t\tcurr_conf.cap.burst = EH_RX_TYPE_BURST;\n@@ -1034,6 +1153,18 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,\n \t\t}\n \t}\n \n+\t/*\n+\t * Check if Tx core is assigned. If Tx core is not assigned, then\n+\t * the adapter has internal port for submitting packets for Tx\n+\t * and so Tx event queue & port setup is not required\n+\t */\n+\tif (adapter->tx_core_id == (uint32_t) (-1)) {\n+\t\t/* Internal port is present */\n+\t\tgoto skip_tx_queue_port_setup;\n+\t}\n+\n+\t/* Setup Tx queue & port */\n+\n \t/* Get event port used by the adapter */\n \tret = rte_event_eth_tx_adapter_event_port_get(\n \t\t\tadapter->adapter_id,\n@@ -1044,11 +1175,6 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,\n \t}\n \n \t/*\n-\t * TODO: event queue for Tx adapter is required only if the\n-\t * INTERNAL PORT is not present.\n-\t */\n-\n-\t/*\n \t * Tx event queue is reserved for Tx adapter. Unlink this queue\n \t * from all other ports\n \t *\n@@ -1058,6 +1184,7 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,\n \t\t\t\t      &(adapter->tx_ev_queue), 1);\n \t}\n \n+\t/* Link Tx event queue to Tx port */\n \tret = rte_event_port_link(\n \t\t\teventdev_id,\n \t\t\ttx_port_id,\n@@ -1079,6 +1206,7 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,\n \n \trte_service_set_runstate_mapped_check(service_id, 0);\n \n+skip_tx_queue_port_setup:\n \t/* Start adapter */\n \tret = rte_event_eth_tx_adapter_start(adapter->adapter_id);\n \tif (ret < 0) {\n@@ -1163,13 +1291,22 @@ eh_display_rx_adapter_conf(struct eventmode_conf *em_conf)\n \n \tfor (i = 0; i < nb_rx_adapter; i++) {\n \t\tadapter = &(em_conf->rx_adapter[i]);\n-\t\tEH_LOG_INFO(\n-\t\t\t\"\\tRx adaper ID: %-2d\\tConnections: %-2d\\tEvent dev ID: %-2d\"\n-\t\t\t\"\\tRx core: %-2d\",\n+\t\tsprintf(print_buf,\n+\t\t\t\"\\tRx adaper ID: %-2d\\tConnections: %-2d\\tEvent dev ID: %-2d\",\n \t\t\tadapter->adapter_id,\n \t\t\tadapter->nb_connections,\n-\t\t\tadapter->eventdev_id,\n-\t\t\tadapter->rx_core_id);\n+\t\t\tadapter->eventdev_id);\n+\t\tif (adapter->rx_core_id == (uint32_t)-1)\n+\t\t\tsprintf(print_buf + strlen(print_buf),\n+\t\t\t\t\"\\tRx core: %-2s\", \"[INTERNAL PORT]\");\n+\t\telse if (adapter->rx_core_id == RTE_MAX_LCORE)\n+\t\t\tsprintf(print_buf + strlen(print_buf),\n+\t\t\t\t\"\\tRx core: %-2s\", \"[NONE]\");\n+\t\telse\n+\t\t\tsprintf(print_buf + strlen(print_buf),\n+\t\t\t\t\"\\tRx core: %-2d\", adapter->rx_core_id);\n+\n+\t\tEH_LOG_INFO(\"%s\", print_buf);\n \n \t\tfor (j = 0; j < adapter->nb_connections; j++) {\n \t\t\tconn = &(adapter->conn[j]);\ndiff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h\nindex aad87f7..2895dfa 100644\n--- a/examples/ipsec-secgw/event_helper.h\n+++ b/examples/ipsec-secgw/event_helper.h\n@@ -66,12 +66,21 @@ enum eh_rx_types {\n \tEH_RX_TYPE_BURST\n };\n \n+/**\n+ * Event mode packet tx types\n+ */\n+enum eh_tx_types {\n+\tEH_TX_TYPE_INTERNAL_PORT = 0,\n+\tEH_TX_TYPE_NO_INTERNAL_PORT\n+};\n+\n /* Event dev params */\n struct eventdev_params {\n \tuint8_t eventdev_id;\n \tuint8_t nb_eventqueue;\n \tuint8_t nb_eventport;\n \tuint8_t ev_queue_mode;\n+\tuint8_t all_internal_ports;\n };\n \n /**\n@@ -183,6 +192,8 @@ struct eh_app_worker_params {\n \t\tstruct {\n \t\t\tuint64_t burst : 1;\n \t\t\t/**< Specify status of rx type burst */\n+\t\t\tuint64_t tx_internal_port : 1;\n+\t\t\t/**< Specify whether tx internal port is available */\n \t\t};\n \t\tuint64_t u64;\n \t} cap;\n",
    "prefixes": [
        "08/14"
    ]
}