get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/60813/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 60813,
    "url": "http://patches.dpdk.org/api/patches/60813/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1570633816-4706-8-git-send-email-anoobj@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1570633816-4706-8-git-send-email-anoobj@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1570633816-4706-8-git-send-email-anoobj@marvell.com",
    "date": "2019-10-09T15:10:10",
    "name": "[RFC,07/13] examples/ipsec-secgw: add support for internal ports",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "78478c63c5a4884f1a65fceebbca7769a5c0cd7e",
    "submitter": {
        "id": 1205,
        "url": "http://patches.dpdk.org/api/people/1205/?format=api",
        "name": "Anoob Joseph",
        "email": "anoobj@marvell.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1570633816-4706-8-git-send-email-anoobj@marvell.com/mbox/",
    "series": [
        {
            "id": 6769,
            "url": "http://patches.dpdk.org/api/series/6769/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=6769",
            "date": "2019-10-09T15:10:03",
            "name": "add eventmode to ipsec-secgw",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/6769/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/60813/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/60813/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 4D1861E92E;\n\tWed,  9 Oct 2019 17:11:07 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n\t[67.231.156.173]) by dpdk.org (Postfix) with ESMTP id 8DA491E927\n\tfor <dev@dpdk.org>; Wed,  9 Oct 2019 17:11:05 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n\tby mx0b-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id\n\tx99EsZWt024408; Wed, 9 Oct 2019 08:11:05 -0700",
            "from sc-exch02.marvell.com ([199.233.58.182])\n\tby mx0b-0016f401.pphosted.com with ESMTP id 2vhdxbrwn6-2\n\t(version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); \n\tWed, 09 Oct 2019 08:11:04 -0700",
            "from SC-EXCH03.marvell.com (10.93.176.83) by SC-EXCH02.marvell.com\n\t(10.93.176.82) with Microsoft SMTP Server (TLS) id 15.0.1367.3;\n\tWed, 9 Oct 2019 08:11:02 -0700",
            "from maili.marvell.com (10.93.176.43) by SC-EXCH03.marvell.com\n\t(10.93.176.83) with Microsoft SMTP Server id 15.0.1367.3 via Frontend\n\tTransport; Wed, 9 Oct 2019 08:11:02 -0700",
            "from ajoseph83.caveonetworks.com.com (unknown [10.29.45.60])\n\tby maili.marvell.com (Postfix) with ESMTP id D9D2B3F7045;\n\tWed,  9 Oct 2019 08:10:59 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n\th=from : to : cc :\n\tsubject : date : message-id : in-reply-to : references : mime-version\n\t: content-transfer-encoding : content-type; s=pfpt0818;\n\tbh=mDjo/dYxsieRDnbOQ20Jk3My43pEIlIni2W43TbEPZk=;\n\tb=NZUDaqZzkX0IKCrFEwbvcqFQmASYoQOVMIQKdIn0OZfF9qDAitXRHXrpWgDlJri5g7Yp\n\t2oMf+GaSSGpgvSC41j2Qu5Gn1FAzVM9ZBCsqh55ZJWtVbBYfmz2Ehrm+pgVMni1XLxkx\n\tPBClr8na2yompUmMDrvNvMbQUsNPSwX5GTbbp9eLAgV2kSFeVCWeg1O2ai2OI4RVdH0R\n\tFrdTszxaYLAfT+gfsBALymKP0QsOcJIDmOeu1YGspWfJ6M1CYm2Q3zT4SeBdiktpM+ZQ\n\ts/yDWFu4WFBVxRMFP2aBS5tvMhlrvDpLQJh5aJny38FwtIhFrD+RSaeIZvSjj2z8JoIL\n\tvg== ",
        "From": "Anoob Joseph <anoobj@marvell.com>",
        "To": "Akhil Goyal <akhil.goyal@nxp.com>, Radu Nicolau <radu.nicolau@intel.com>",
        "CC": "Anoob Joseph <anoobj@marvell.com>, Thomas Monjalon <thomas@monjalon.net>,\n\tJerin Jacob <jerinj@marvell.com>,\n\tNarayana Prasad <pathreya@marvell.com>, <dev@dpdk.org>,\n\tLukasz Bartosik <lbartosik@marvell.com>",
        "Date": "Wed, 9 Oct 2019 20:40:10 +0530",
        "Message-ID": "<1570633816-4706-8-git-send-email-anoobj@marvell.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1570633816-4706-1-git-send-email-anoobj@marvell.com>",
        "References": "<1570633816-4706-1-git-send-email-anoobj@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.95,1.0.8\n\tdefinitions=2019-10-09_06:2019-10-08,2019-10-09 signatures=0",
        "Subject": "[dpdk-dev] [RFC PATCH 07/13] examples/ipsec-secgw: add support for\n\tinternal ports",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add support for Rx and Tx internal ports. When internal ports are\navailable then a packet can be received from eth port and forwarded\nto event queue by HW without any software intervention. The same\napplies to Tx side where a packet sent to an event queue can by\nforwarded by HW to eth port without any software intervention.\n\nSigned-off-by: Anoob Joseph <anoobj@marvell.com>\nSigned-off-by: Lukasz Bartosik <lbartosik@marvell.com>\n---\n examples/ipsec-secgw/event_helper.c | 234 ++++++++++++++++++++++++++++--------\n examples/ipsec-secgw/event_helper.h |  11 ++\n 2 files changed, 195 insertions(+), 50 deletions(-)",
    "diff": "diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c\nindex 6993092..858b855 100644\n--- a/examples/ipsec-secgw/event_helper.c\n+++ b/examples/ipsec-secgw/event_helper.c\n@@ -102,7 +102,38 @@ eh_get_eventdev_params(struct eventmode_conf *em_conf,\n \treturn &(em_conf->eventdev_config[i]);\n }\n static inline bool\n-eh_dev_has_burst_mode(uint8_t dev_id)\n+eh_dev_has_rx_internal_port(uint8_t eventdev_id)\n+{\n+\tint j;\n+\tbool flag = true;\n+\n+\tRTE_ETH_FOREACH_DEV(j) {\n+\t\tuint32_t caps = 0;\n+\n+\t\trte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps);\n+\t\tif (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))\n+\t\t\tflag = false;\n+\t}\n+\treturn flag;\n+}\n+\n+static inline bool\n+eh_dev_has_tx_internal_port(uint8_t eventdev_id)\n+{\n+\tint j;\n+\tbool flag = true;\n+\n+\tRTE_ETH_FOREACH_DEV(j) {\n+\t\tuint32_t caps = 0;\n+\n+\t\trte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps);\n+\t\tif (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))\n+\t\t\tflag = false;\n+\t}\n+\treturn flag;\n+}\n+\n+static inline bool eh_dev_has_burst_mode(uint8_t dev_id)\n {\n \tstruct rte_event_dev_info dev_info;\n \n@@ -127,6 +158,8 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)\n {\n \tint i, ret;\n \tint nb_eventdev;\n+\tint nb_eth_dev;\n+\tint lcore_count;\n \tstruct eventdev_params *eventdev_config;\n \tstruct rte_event_dev_info dev_info;\n \n@@ -138,6 +171,17 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)\n \t\treturn -EINVAL;\n \t}\n \n+\t/* Get the number of eth devs */\n+\tnb_eth_dev = rte_eth_dev_count_avail();\n+\n+\tif (nb_eth_dev == 0) {\n+\t\tEH_LOG_ERR(\"No eth devices detected\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Get the number of lcores */\n+\tlcore_count = rte_lcore_count();\n+\n \tfor (i = 0; i < nb_eventdev; i++) {\n \n \t\t/* Get the event dev conf */\n@@ -163,13 +207,19 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)\n \t\teventdev_config->nb_eventqueue = dev_info.max_event_queues;\n \t\teventdev_config->nb_eventport = dev_info.max_event_ports;\n \t\teventdev_config->ev_queue_mode =\n-\t\t\t\tRTE_EVENT_QUEUE_CFG_SINGLE_LINK;\n+\t\t\t\tRTE_EVENT_QUEUE_CFG_ALL_TYPES;\n \n-\t\t/* One port is required for eth Rx adapter */\n-\t\teventdev_config->nb_eventport -= 1;\n+\t\t/* Check if there are more queues than required */\n+\t\tif (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {\n+\t\t\t/* One queue is reserved for Tx */\n+\t\t\teventdev_config->nb_eventqueue = nb_eth_dev + 1;\n+\t\t}\n \n-\t\t/* One port is reserved for eth Tx adapter */\n-\t\teventdev_config->nb_eventport -= 1;\n+\t\t/* Check if there are more ports than required */\n+\t\tif (eventdev_config->nb_eventport > lcore_count) {\n+\t\t\t/* One port per lcore is enough */\n+\t\t\teventdev_config->nb_eventport = lcore_count;\n+\t\t}\n \n \t\t/* Update the number of eventdevs */\n \t\tem_conf->nb_eventdev++;\n@@ -178,6 +228,42 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)\n \treturn 0;\n }\n \n+static void\n+eh_do_capability_check(struct eventmode_conf *em_conf)\n+{\n+\tstruct eventdev_params *eventdev_config;\n+\tuint32_t eventdev_id;\n+\tint all_internal_ports = 1;\n+\tint i;\n+\n+\tfor (i = 0; i < em_conf->nb_eventdev; i++) {\n+\n+\t\t/* Get the event dev conf */\n+\t\teventdev_config = &(em_conf->eventdev_config[i]);\n+\t\teventdev_id = eventdev_config->eventdev_id;\n+\n+\t\t/* Check if event device has internal port for Rx & Tx */\n+\t\tif (eh_dev_has_rx_internal_port(eventdev_id) &&\n+\t\t    eh_dev_has_tx_internal_port(eventdev_id)) {\n+\t\t\teventdev_config->all_internal_ports = 1;\n+\t\t} else {\n+\t\t\tall_internal_ports = 0;\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * If Rx & Tx internal ports are supported by all event devices then\n+\t * eth cores won't be required. Override the eth core mask requested\n+\t * and decrement number of event queues by one as it won't be needed\n+\t * for Tx.\n+\t */\n+\tif (all_internal_ports) {\n+\t\trte_bitmap_reset(em_conf->eth_core_mask);\n+\t\tfor (i = 0; i < em_conf->nb_eventdev; i++)\n+\t\t\tem_conf->eventdev_config[i].nb_eventqueue--;\n+\t}\n+}\n+\n static int\n eh_set_default_conf_link(struct eventmode_conf *em_conf)\n {\n@@ -254,9 +340,12 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)\n \tint adapter_id;\n \tint eventdev_id;\n \tint conn_id;\n+\tint nb_eventqueue;\n \tstruct rx_adapter_conf *adapter;\n \tstruct rx_adapter_connection_info *conn;\n \tstruct eventdev_params *eventdev_config;\n+\tbool rx_internal_port = true;\n+\tuint32_t caps = 0;\n \n \t/* Create one adapter with all eth queues mapped to event queues 1:1 */\n \n@@ -281,7 +370,14 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)\n \t/* Set adapter conf */\n \tadapter->eventdev_id = eventdev_id;\n \tadapter->adapter_id = adapter_id;\n-\tadapter->rx_core_id = eh_get_next_eth_core(em_conf);\n+\n+\t/*\n+\t * If event device does not have internal ports for passing\n+\t * packets then one queue is reserved for Tx path\n+\t */\n+\tnb_eventqueue = eventdev_config->all_internal_ports ?\n+\t\t\teventdev_config->nb_eventqueue :\n+\t\t\teventdev_config->nb_eventqueue - 1;\n \n \t/*\n \t * All queues of one eth device (port) will be mapped to one event\n@@ -290,10 +386,10 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)\n \t */\n \n \t/* Make sure there is enough event queues for 1:1 mapping */\n-\tif (nb_eth_dev > eventdev_config->nb_eventqueue) {\n+\tif (nb_eth_dev > nb_eventqueue) {\n \t\tEH_LOG_ERR(\"Not enough event queues for 1:1 mapping \"\n \t\t\t\"[eth devs: %d, event queues: %d]\\n\",\n-\t\t\tnb_eth_dev, eventdev_config->nb_eventqueue);\n+\t\t\tnb_eth_dev, nb_eventqueue);\n \t\treturn -EINVAL;\n \t}\n \n@@ -316,11 +412,24 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)\n \t\t/* Add all eth queues of one eth port to one event queue */\n \t\tconn->ethdev_rx_qid = -1;\n \n+\t\t/* Get Rx adapter capabilities */\n+\t\trte_event_eth_rx_adapter_caps_get(eventdev_id, i, &caps);\n+\t\tif (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))\n+\t\t\trx_internal_port = false;\n+\n \t\t/* Update no of connections */\n \t\tadapter->nb_connections++;\n \n \t}\n \n+\tif (rx_internal_port) {\n+\t\t/* Rx core is not required */\n+\t\tadapter->rx_core_id = -1;\n+\t} else {\n+\t\t/* Rx core is required */\n+\t\tadapter->rx_core_id = eh_get_next_eth_core(em_conf);\n+\t}\n+\n \t/* We have setup one adapter */\n \tem_conf->nb_rx_adapter = 1;\n \n@@ -338,6 +447,8 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)\n \tstruct eventdev_params *eventdev_config;\n \tstruct tx_adapter_conf *tx_adapter;\n \tstruct tx_adapter_connection_info *conn;\n+\tbool tx_internal_port = true;\n+\tuint32_t caps = 0;\n \n \t/*\n \t * Create one Tx adapter with all eth queues mapped to event queues\n@@ -366,22 +477,6 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)\n \ttx_adapter->eventdev_id = eventdev_id;\n \ttx_adapter->adapter_id = adapter_id;\n \n-\t/* TODO: Tx core is required only when internal port is not present */\n-\n-\ttx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);\n-\n-\t/*\n-\t * Application would need to use one event queue per adapter for\n-\t * submitting packets for Tx. Reserving the last queue available\n-\t * and decrementing the total available event queues for this\n-\t */\n-\n-\t/* Queue numbers start at 0 */\n-\ttx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;\n-\n-\t/* Update the number of event queues available in eventdev */\n-\teventdev_config->nb_eventqueue--;\n-\n \t/*\n \t * All Tx queues of the eth device (port) will be mapped to the event\n \t * device.\n@@ -412,10 +507,30 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)\n \t\t/* Add all eth tx queues to adapter */\n \t\tconn->ethdev_tx_qid = -1;\n \n+\t\t/* Get Rx adapter capabilities */\n+\t\trte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);\n+\t\tif (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))\n+\t\t\ttx_internal_port = false;\n+\n \t\t/* Update no of connections */\n \t\ttx_adapter->nb_connections++;\n \t}\n \n+\tif (tx_internal_port) {\n+\t\t/* Tx core is not required */\n+\t\ttx_adapter->tx_core_id = -1;\n+\t} else {\n+\t\t/* Tx core is required */\n+\t\ttx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);\n+\n+\t\t/*\n+\t\t * Application would need to use one event queue per adapter for\n+\t\t * submitting packets for Tx. Reserving the last queue available\n+\t\t */\n+\t\t/* Queue numbers start at 0 */\n+\t\ttx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;\n+\t}\n+\n \t/* We have setup one adapter */\n \tem_conf->nb_tx_adapter = 1;\n \treturn 0;\n@@ -441,6 +556,9 @@ eh_validate_conf(struct eventmode_conf *em_conf)\n \t\t\treturn ret;\n \t}\n \n+\t/* Perform capability check for the selected event devices*/\n+\teh_do_capability_check(em_conf);\n+\n \t/*\n \t * See if links are specified. Else generate a default conf for\n \t * the event ports used.\n@@ -500,10 +618,6 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)\n \t\t/* Get the number of queues */\n \t\tnb_eventqueue = eventdev_config->nb_eventqueue;\n \n-\t\t/* One queue is reserved for the final stage (doing eth tx) */\n-\t\t/* TODO handles only one Tx adapter. Fix this */\n-\t\tnb_eventqueue += 1;\n-\n \t\t/* Reset the default conf */\n \t\tmemset(&evdev_default_conf, 0,\n \t\t\tsizeof(struct rte_event_dev_info));\n@@ -550,11 +664,13 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)\n \t\t\t\t\teventdev_config->ev_queue_mode;\n \t\t\t/*\n \t\t\t * All queues need to be set with sched_type as\n-\t\t\t * schedule type for the application stage. One queue\n-\t\t\t * would be reserved for the final eth tx stage. This\n-\t\t\t * will be an atomic queue.\n+\t\t\t * schedule type for the application stage. One\n+\t\t\t * queue would be reserved for the final eth tx\n+\t\t\t * stage if event device does not have internal\n+\t\t\t * ports. This will be an atomic queue.\n \t\t\t */\n-\t\t\tif (j == nb_eventqueue-1) {\n+\t\t\tif (!eventdev_config->all_internal_ports &&\n+\t\t\t    j == nb_eventqueue-1) {\n \t\t\t\teventq_conf.schedule_type =\n \t\t\t\t\tRTE_SCHED_TYPE_ATOMIC;\n \t\t\t} else {\n@@ -669,10 +785,6 @@ eh_rx_adapter_configure(struct eventmode_conf *em_conf,\n \t}\n \n \t/* Setup various connections in the adapter */\n-\n-\tqueue_conf.rx_queue_flags =\n-\t\t\tRTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;\n-\n \tfor (j = 0; j < adapter->nb_connections; j++) {\n \t\t/* Get connection */\n \t\tconn = &(adapter->conn[j]);\n@@ -680,9 +792,7 @@ eh_rx_adapter_configure(struct eventmode_conf *em_conf,\n \t\t/* Setup queue conf */\n \t\tqueue_conf.ev.queue_id = conn->eventq_id;\n \t\tqueue_conf.ev.sched_type = em_conf->ext_params.sched_type;\n-\n-\t\t/* Set flow ID as ethdev ID */\n-\t\tqueue_conf.ev.flow_id = conn->ethdev_id;\n+\t\tqueue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;\n \n \t\t/* Add queue to the adapter */\n \t\tret = rte_event_eth_rx_adapter_queue_add(\n@@ -881,6 +991,12 @@ eh_find_worker(uint32_t lcore_id, struct eh_conf *conf,\n \n \t/* Populate the curr_conf with the capabilities */\n \n+\t/* Check for Tx internal port */\n+\tif (eh_dev_has_tx_internal_port(eventdev_id))\n+\t\tcurr_conf.cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;\n+\telse\n+\t\tcurr_conf.cap.tx_internal_port = EH_TX_TYPE_NO_INTERNAL_PORT;\n+\n \t/* Check for burst mode */\n \tif (eh_dev_has_burst_mode(eventdev_id))\n \t\tcurr_conf.cap.burst = EH_RX_TYPE_BURST;\n@@ -1057,6 +1173,18 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,\n \t\t}\n \t}\n \n+\t/*\n+\t * Check if Tx core is assigned. If Tx core is not assigned, then\n+\t * the adapter would be having internal port for submitting packets\n+\t * for Tx and so Tx event queue & port setup is not required\n+\t */\n+\tif (adapter->tx_core_id == (uint32_t) (-1)) {\n+\t\t/* Internal port is present */\n+\t\tgoto skip_tx_queue_port_setup;\n+\t}\n+\n+\t/* Setup Tx queue & port */\n+\n \t/* Get event port used by the adapter */\n \tret = rte_event_eth_tx_adapter_event_port_get(\n \t\t\tadapter->adapter_id,\n@@ -1067,11 +1195,6 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,\n \t}\n \n \t/*\n-\t * TODO: event queue for Tx adapter is required only if the\n-\t * INTERNAL PORT is not present.\n-\t */\n-\n-\t/*\n \t * Tx event queue would be reserved for Tx adapter. Need to unlink\n \t * this queue from all other ports\n \t *\n@@ -1081,6 +1204,7 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,\n \t\t\t\t      &(adapter->tx_ev_queue), 1);\n \t}\n \n+\t/* Link Tx event queue to Tx port */\n \tret = rte_event_port_link(\n \t\t\teventdev_id,\n \t\t\ttx_port_id,\n@@ -1105,6 +1229,7 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,\n \t */\n \trte_service_set_runstate_mapped_check(service_id, 0);\n \n+skip_tx_queue_port_setup:\n \t/* Start adapter */\n \tret = rte_event_eth_tx_adapter_start(adapter->adapter_id);\n \tif (ret) {\n@@ -1189,13 +1314,22 @@ eh_display_rx_adapter_conf(struct eventmode_conf *em_conf)\n \n \tfor (i = 0; i < nb_rx_adapter; i++) {\n \t\tadapter = &(em_conf->rx_adapter[i]);\n-\t\tEH_LOG_INFO(\n-\t\t\t\"\\tRx adaper ID: %-2d\\tConnections: %-2d\\tEvent dev ID: %-2d\"\n-\t\t\t\"\\tRx core: %-2d\",\n+\t\tsprintf(print_buf,\n+\t\t\t\"\\tRx adaper ID: %-2d\\tConnections: %-2d\\tEvent dev ID: %-2d\",\n \t\t\tadapter->adapter_id,\n \t\t\tadapter->nb_connections,\n-\t\t\tadapter->eventdev_id,\n-\t\t\tadapter->rx_core_id);\n+\t\t\tadapter->eventdev_id);\n+\t\tif (adapter->rx_core_id == (uint32_t)-1)\n+\t\t\tsprintf(print_buf + strlen(print_buf),\n+\t\t\t\t\"\\tRx core: %-2s\", \"[INTERNAL PORT]\");\n+\t\telse if (adapter->rx_core_id == RTE_MAX_LCORE)\n+\t\t\tsprintf(print_buf + strlen(print_buf),\n+\t\t\t\t\"\\tRx core: %-2s\", \"[NONE]\");\n+\t\telse\n+\t\t\tsprintf(print_buf + strlen(print_buf),\n+\t\t\t\t\"\\tRx core: %-2d\", adapter->rx_core_id);\n+\n+\t\tEH_LOG_INFO(\"%s\", print_buf);\n \n \t\tfor (j = 0; j < adapter->nb_connections; j++) {\n \t\t\tconn = &(adapter->conn[j]);\ndiff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h\nindex 1d5a087..a66bcb3 100644\n--- a/examples/ipsec-secgw/event_helper.h\n+++ b/examples/ipsec-secgw/event_helper.h\n@@ -76,12 +76,21 @@ enum eh_rx_types {\n \tEH_RX_TYPE_BURST\n };\n \n+/**\n+ * Event mode packet tx types\n+ */\n+enum eh_tx_types {\n+\tEH_TX_TYPE_INTERNAL_PORT = 0,\n+\tEH_TX_TYPE_NO_INTERNAL_PORT\n+};\n+\n /* Event dev params */\n struct eventdev_params {\n \tuint8_t eventdev_id;\n \tuint8_t nb_eventqueue;\n \tuint8_t nb_eventport;\n \tuint8_t ev_queue_mode;\n+\tuint8_t all_internal_ports;\n };\n \n /**\n@@ -193,6 +202,8 @@ struct eh_app_worker_params {\n \t\tstruct {\n \t\t\tuint64_t burst : 1;\n \t\t\t/**< Specify status of rx type burst */\n+\t\t\tuint64_t tx_internal_port : 1;\n+\t\t\t/**< Specify whether tx internal port is available */\n \t\t};\n \t\tuint64_t u64;\n \t} cap;\n",
    "prefixes": [
        "RFC",
        "07/13"
    ]
}