get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/54257/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 54257,
    "url": "http://patches.dpdk.org/api/patches/54257/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1559583160-13944-35-git-send-email-anoobj@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1559583160-13944-35-git-send-email-anoobj@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1559583160-13944-35-git-send-email-anoobj@marvell.com",
    "date": "2019-06-03T17:32:34",
    "name": "[34/39] eventdev: add support for internal ports",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "501934f441165355274852e9aa106ef0c05331a5",
    "submitter": {
        "id": 1205,
        "url": "http://patches.dpdk.org/api/people/1205/?format=api",
        "name": "Anoob Joseph",
        "email": "anoobj@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1559583160-13944-35-git-send-email-anoobj@marvell.com/mbox/",
    "series": [
        {
            "id": 4868,
            "url": "http://patches.dpdk.org/api/series/4868/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4868",
            "date": "2019-06-03T17:32:00",
            "name": "adding eventmode helper library",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/4868/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/54257/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/54257/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id B8EDA1BB47;\n\tMon,  3 Jun 2019 19:36:53 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n\t[67.231.156.173]) by dpdk.org (Postfix) with ESMTP id C26671B9EE\n\tfor <dev@dpdk.org>; Mon,  3 Jun 2019 19:36:51 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n\tby mx0b-0016f401.pphosted.com (8.16.0.27/8.16.0.27) with SMTP id\n\tx53HKoTK027681; Mon, 3 Jun 2019 10:36:51 -0700",
            "from sc-exch04.marvell.com ([199.233.58.184])\n\tby mx0b-0016f401.pphosted.com with ESMTP id 2sw79pr75w-1\n\t(version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); \n\tMon, 03 Jun 2019 10:36:50 -0700",
            "from SC-EXCH01.marvell.com (10.93.176.81) by SC-EXCH04.marvell.com\n\t(10.93.176.84) with Microsoft SMTP Server (TLS) id 15.0.1367.3;\n\tMon, 3 Jun 2019 10:36:49 -0700",
            "from maili.marvell.com (10.93.176.43) by SC-EXCH01.marvell.com\n\t(10.93.176.81) with Microsoft SMTP Server id 15.0.1367.3 via Frontend\n\tTransport; Mon, 3 Jun 2019 10:36:49 -0700",
            "from ajoseph83.caveonetworks.com.com (unknown [10.29.45.56])\n\tby maili.marvell.com (Postfix) with ESMTP id 6EEB13F703F;\n\tMon,  3 Jun 2019 10:36:44 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n\th=from : to : cc :\n\tsubject : date : message-id : in-reply-to : references : mime-version\n\t: content-transfer-encoding : content-type; s=pfpt0818;\n\tbh=BbBhlL0PLFX99vF9KEyY9oXkA0BO2cyrhM3mg4SoMfw=;\n\tb=ED79dZkeuj/5sDCeAaQZNtpWXmfkGPWQ71lblTYGs2tHaXUHk2tDc2a5E2pTPSBEmTdQ\n\tKTIemOmE1pkfoGTOCmETBYK4OjXPf+Ab8pplZKsXs8d3XaHpUIlvwsMCagpVjzvrce0/\n\tHIkfPX7tdHPcY1GiUd/X9xXvE8b07cmJNvVGyOujRdWHu4D0DEs0S5vt6qlz+oz+W/au\n\tjaFCnFFRqWnyuMraAGAxBIBKhcCiT0lXWHboPVwY4a0EkDTvaJV8CdLt/RzrCb1rzse0\n\tXlEmuBiS2VFy+HYcEMtxG8nLcqgzZCM2N02hzhjQS0f1YMTcFF0Wyoc0B73lAOWC02dP\n\tyQ== ",
        "From": "Anoob Joseph <anoobj@marvell.com>",
        "To": "Jerin Jacob <jerinj@marvell.com>, Nikhil Rao <nikhil.rao@intel.com>,\n\t\"Erik Gabriel Carrillo\" <erik.g.carrillo@intel.com>, Abhinandan Gujjar\n\t<abhinandan.gujjar@intel.com>,\n\tBruce Richardson <bruce.richardson@intel.com>,\n\tPablo de Lara <pablo.de.lara.guarch@intel.com>",
        "CC": "Anoob Joseph <anoobj@marvell.com>, Narayana Prasad <pathreya@marvell.com>,\n\t<dev@dpdk.org>, Lukasz Bartosik <lbartosik@marvell.com>, Pavan Nikhilesh\n\t<pbhagavatula@marvell.com>, Hemant Agrawal <hemant.agrawal@nxp.com>,\n\t\"Nipun  Gupta\" <nipun.gupta@nxp.com>, Harry van Haaren\n\t<harry.van.haaren@intel.com>, =?utf-8?q?Mattias_R=C3=B6nnblom?=\n\t<mattias.ronnblom@ericsson.com>,  Liang Ma <liang.j.ma@intel.com>",
        "Date": "Mon, 3 Jun 2019 23:02:34 +0530",
        "Message-ID": "<1559583160-13944-35-git-send-email-anoobj@marvell.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1559583160-13944-1-git-send-email-anoobj@marvell.com>",
        "References": "<1559583160-13944-1-git-send-email-anoobj@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:, ,\n\tdefinitions=2019-06-03_13:, , signatures=0",
        "Subject": "[dpdk-dev] [PATCH 34/39] eventdev: add support for internal ports",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "For eventdev-ethdev combinations having Tx internal port & Rx internal\nport, usage of ethcore is not needed.\n\nSigned-off-by: Anoob Joseph <anoobj@marvell.com>\nSigned-off-by: Lukasz Bartosik <lbartosik@marvell.com>\n---\n lib/librte_eventdev/rte_eventmode_helper.c         | 213 +++++++++++++++++----\n lib/librte_eventdev/rte_eventmode_helper.h         |   4 +\n .../rte_eventmode_helper_internal.h                |   1 +\n lib/librte_eventdev/rte_eventmode_helper_prints.c  |  19 +-\n 4 files changed, 192 insertions(+), 45 deletions(-)",
    "diff": "diff --git a/lib/librte_eventdev/rte_eventmode_helper.c b/lib/librte_eventdev/rte_eventmode_helper.c\nindex f237cab..2451cac 100644\n--- a/lib/librte_eventdev/rte_eventmode_helper.c\n+++ b/lib/librte_eventdev/rte_eventmode_helper.c\n@@ -142,6 +142,38 @@ internal_get_eventdev_params(struct eventmode_conf *em_conf,\n }\n \n static inline bool\n+internal_dev_has_rx_internal_port(uint8_t eventdev_id)\n+{\n+\tint j;\n+\tbool flag = true;\n+\n+\tRTE_ETH_FOREACH_DEV(j) {\n+\t\tuint32_t caps = 0;\n+\n+\t\trte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps);\n+\t\tif (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))\n+\t\t\tflag = false;\n+\t}\n+\treturn flag;\n+}\n+\n+static inline bool\n+internal_dev_has_tx_internal_port(uint8_t eventdev_id)\n+{\n+\tint j;\n+\tbool flag = true;\n+\n+\tRTE_ETH_FOREACH_DEV(j) {\n+\t\tuint32_t caps = 0;\n+\n+\t\trte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps);\n+\t\tif (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))\n+\t\t\tflag = false;\n+\t}\n+\treturn flag;\n+}\n+\n+static inline bool\n internal_dev_has_burst_mode(uint8_t dev_id)\n {\n \tstruct rte_event_dev_info dev_info;\n@@ -303,6 +335,8 @@ rte_eventmode_helper_set_default_conf_eventdev(struct eventmode_conf *em_conf)\n {\n \tint i, ret;\n \tint nb_eventdev;\n+\tint nb_eth_dev;\n+\tint lcore_count;\n \tstruct eventdev_params *eventdev_config;\n \tstruct rte_event_dev_info dev_info;\n \n@@ -314,6 +348,17 @@ rte_eventmode_helper_set_default_conf_eventdev(struct eventmode_conf *em_conf)\n \t\treturn -1;\n \t}\n \n+\t/* Get the number of eth devs */\n+\tnb_eth_dev = rte_eth_dev_count_avail();\n+\n+\tif (nb_eth_dev == 0) {\n+\t\tRTE_EM_HLPR_LOG_ERR(\"No eth devices detected\");\n+\t\treturn -1;\n+\t}\n+\n+\t/* Get the number of lcores */\n+\tlcore_count = rte_lcore_count();\n+\n \tfor (i = 0; i < nb_eventdev; i++) {\n \n \t\t/* Get the event dev conf */\n@@ -340,13 +385,19 @@ rte_eventmode_helper_set_default_conf_eventdev(struct eventmode_conf *em_conf)\n \t\teventdev_config->nb_eventqueue = dev_info.max_event_queues;\n \t\teventdev_config->nb_eventport = dev_info.max_event_ports;\n \t\teventdev_config->ev_queue_mode =\n-\t\t\t\tRTE_EVENT_QUEUE_CFG_SINGLE_LINK;\n+\t\t\t\tRTE_EVENT_QUEUE_CFG_ALL_TYPES;\n \n-\t\t/* One port is required for eth Rx adapter */\n-\t\teventdev_config->nb_eventport -= 1;\n+\t\t/* Check if there are more queues than required */\n+\t\tif (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {\n+\t\t\t/* One queue is reserved for Tx */\n+\t\t\teventdev_config->nb_eventqueue = nb_eth_dev + 1;\n+\t\t}\n \n-\t\t/* One port is reserved for eth Tx adapter */\n-\t\teventdev_config->nb_eventport -= 1;\n+\t\t/* Check if there are more ports than required */\n+\t\tif (eventdev_config->nb_eventport > lcore_count) {\n+\t\t\t/* One port per lcore is enough */\n+\t\t\teventdev_config->nb_eventport = lcore_count;\n+\t\t}\n \n \t\t/* Update the number of eventdevs */\n \t\tem_conf->nb_eventdev++;\n@@ -355,6 +406,37 @@ rte_eventmode_helper_set_default_conf_eventdev(struct eventmode_conf *em_conf)\n \treturn 0;\n }\n \n+static void\n+rte_eventmode_helper_do_capability_check(struct eventmode_conf *em_conf)\n+{\n+\tstruct eventdev_params *eventdev_config;\n+\tuint32_t eventdev_id;\n+\tint all_internal_ports = 1;\n+\tint i;\n+\n+\tfor (i = 0; i < em_conf->nb_eventdev; i++) {\n+\n+\t\t/* Get the event dev conf */\n+\t\teventdev_config = &(em_conf->eventdev_config[i]);\n+\t\teventdev_id = eventdev_config->eventdev_id;\n+\n+\t\t/* Check if event device has internal port for Rx & Tx */\n+\t\tif (internal_dev_has_rx_internal_port(eventdev_id) &&\n+\t\t    internal_dev_has_tx_internal_port(eventdev_id)) {\n+\t\t\teventdev_config->all_internal_ports = 1;\n+\t\t} else {\n+\t\t\tall_internal_ports = 0;\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * If Rx & Tx internal ports are supported by all event devices then\n+\t * eth cores won't be required. Override the eth core mask requested.\n+\t */\n+\tif (all_internal_ports)\n+\t\tem_conf->eth_core_mask = 0;\n+}\n+\n static int\n rte_eventmode_helper_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)\n {\n@@ -363,9 +445,12 @@ rte_eventmode_helper_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)\n \tint adapter_id;\n \tint eventdev_id;\n \tint conn_id;\n+\tint nb_eventqueue;\n \tstruct rx_adapter_conf *adapter;\n \tstruct adapter_connection_info *conn;\n \tstruct eventdev_params *eventdev_config;\n+\tbool rx_internal_port = true;\n+\tuint32_t caps = 0;\n \n \t/* Create one adapter with all eth queues mapped to event queues 1:1 */\n \n@@ -390,7 +475,14 @@ rte_eventmode_helper_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)\n \t/* Set adapter conf */\n \tadapter->eventdev_id = eventdev_id;\n \tadapter->adapter_id = adapter_id;\n-\tadapter->rx_core_id = internal_get_next_eth_core(em_conf);\n+\n+\t/*\n+\t * If event device does not have internal ports for passing\n+\t * packets then one queue is reserved for Tx path\n+\t */\n+\tnb_eventqueue = eventdev_config->all_internal_ports ?\n+\t\t\teventdev_config->nb_eventqueue :\n+\t\t\teventdev_config->nb_eventqueue - 1;\n \n \t/*\n \t * All queues of one eth device (port) will be mapped to one event\n@@ -399,12 +491,11 @@ rte_eventmode_helper_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)\n \t */\n \n \t/* Make sure there is enough event queues for 1:1 mapping */\n-\tif (nb_eth_dev > eventdev_config->nb_eventqueue) {\n+\tif (nb_eth_dev > nb_eventqueue) {\n \t\tRTE_EM_HLPR_LOG_ERR(\n \t\t\t\"Not enough event queues for 1:1 mapping \"\n \t\t\t\"[eth devs: %d, event queues: %d]\\n\",\n-\t\t\tnb_eth_dev,\n-\t\t\teventdev_config->nb_eventqueue);\n+\t\t\tnb_eth_dev, nb_eventqueue);\n \t\treturn -1;\n \t}\n \n@@ -427,11 +518,24 @@ rte_eventmode_helper_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)\n \t\t/* Add all eth queues of one eth port to one event queue */\n \t\tconn->ethdev_rx_qid = -1;\n \n+\t\t/* Get Rx adapter capabilities */\n+\t\trte_event_eth_rx_adapter_caps_get(eventdev_id, i, &caps);\n+\t\tif (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))\n+\t\t\trx_internal_port = false;\n+\n \t\t/* Update no of connections */\n \t\tadapter->nb_connections++;\n \n \t}\n \n+\tif (rx_internal_port) {\n+\t\t/* Rx core is not required */\n+\t\tadapter->rx_core_id = -1;\n+\t} else {\n+\t\t/* Rx core is required */\n+\t\tadapter->rx_core_id = internal_get_next_eth_core(em_conf);\n+\t}\n+\n \t/* We have setup one adapter */\n \tem_conf->nb_rx_adapter = 1;\n \n@@ -449,6 +553,8 @@ rte_eventmode_helper_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)\n \tstruct eventdev_params *eventdev_config;\n \tstruct tx_adapter_conf *tx_adapter;\n \tstruct tx_adapter_connection_info *conn;\n+\tbool tx_internal_port = true;\n+\tuint32_t caps = 0;\n \n \t/*\n \t * Create one Tx adapter with all eth queues mapped to event queues\n@@ -477,22 +583,6 @@ rte_eventmode_helper_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)\n \ttx_adapter->eventdev_id = eventdev_id;\n \ttx_adapter->adapter_id = adapter_id;\n \n-\t/* TODO: Tx core is required only when internal port is not present */\n-\n-\ttx_adapter->tx_core_id = internal_get_next_eth_core(em_conf);\n-\n-\t/*\n-\t * Application would need to use one event queue per adapter for\n-\t * submitting packets for Tx. Reserving the last queue available\n-\t * and decrementing the total available event queues for this\n-\t */\n-\n-\t/* Queue numbers start at 0 */\n-\ttx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;\n-\n-\t/* Update the number of event queues available in eventdev */\n-\teventdev_config->nb_eventqueue--;\n-\n \t/*\n \t * All Tx queues of the eth device (port) will be mapped to the event\n \t * device.\n@@ -523,10 +613,30 @@ rte_eventmode_helper_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)\n \t\t/* Add all eth tx queues to adapter */\n \t\tconn->ethdev_tx_qid = -1;\n \n+\t\t/* Get Rx adapter capabilities */\n+\t\trte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);\n+\t\tif (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))\n+\t\t\ttx_internal_port = false;\n+\n \t\t/* Update no of connections */\n \t\ttx_adapter->nb_connections++;\n \t}\n \n+\tif (tx_internal_port) {\n+\t\t/* Tx core is not required */\n+\t\ttx_adapter->tx_core_id = -1;\n+\t} else {\n+\t\t/* Tx core is required */\n+\t\ttx_adapter->tx_core_id = internal_get_next_eth_core(em_conf);\n+\n+\t\t/*\n+\t\t * Application would need to use one event queue per adapter for\n+\t\t * submitting packets for Tx. Reserving the last queue available\n+\t\t */\n+\t\t/* Queue numbers start at 0 */\n+\t\ttx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;\n+\t}\n+\n \t/* We have setup one adapter */\n \tem_conf->nb_tx_adapter = 1;\n \treturn 0;\n@@ -620,6 +730,9 @@ rte_eventmode_helper_validate_conf(struct eventmode_conf *em_conf)\n \t\t\treturn ret;\n \t}\n \n+\t/* Perform capability check for the selected event devices*/\n+\trte_eventmode_helper_do_capability_check(em_conf);\n+\n \t/*\n \t * See if rx adapters are specified. Else generate a default conf\n \t * with one rx adapter and all eth queue - event queue mapped.\n@@ -681,10 +794,6 @@ rte_eventmode_helper_initialize_eventdev(struct eventmode_conf *em_conf)\n \t\t/* Get the number of queues */\n \t\tnb_eventqueue = eventdev_config->nb_eventqueue;\n \n-\t\t/* One queue is reserved for the final stage (doing eth tx) */\n-\t\t/* TODO handles only one Tx adapter. Fix this */\n-\t\tnb_eventqueue += 1;\n-\n \t\t/* Reset the default conf */\n \t\tmemset(&evdev_default_conf, 0,\n \t\t\tsizeof(struct rte_event_dev_info));\n@@ -730,14 +839,15 @@ rte_eventmode_helper_initialize_eventdev(struct eventmode_conf *em_conf)\n \t\t\t/* Per event dev queues can be ATQ or SINGLE LINK */\n \t\t\teventq_conf.event_queue_cfg =\n \t\t\t\t\teventdev_config->ev_queue_mode;\n-\n \t\t\t/*\n \t\t\t * All queues need to be set with sched_type as\n-\t\t\t * schedule type for the application stage. One queue\n-\t\t\t * would be reserved for the final eth tx stage. This\n-\t\t\t * will be an atomic queue.\n+\t\t\t * schedule type for the application stage. One\n+\t\t\t * queue would be reserved for the final eth tx\n+\t\t\t * stage if event device does not have internal\n+\t\t\t * ports. This will be an atomic queue.\n \t\t\t */\n-\t\t\tif (j == nb_eventqueue-1) {\n+\t\t\tif (!eventdev_config->all_internal_ports &&\n+\t\t\t    j == nb_eventqueue-1) {\n \t\t\t\teventq_conf.schedule_type =\n \t\t\t\t\tRTE_SCHED_TYPE_ATOMIC;\n \t\t\t} else {\n@@ -867,8 +977,10 @@ rx_adapter_configure(struct eventmode_conf *em_conf,\n \n \t/* Setup various connections in the adapter */\n \n+#ifdef UNSELECT\n \tqueue_conf.rx_queue_flags =\n \t\t\tRTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;\n+#endif /* UNSELECT */\n \n \tfor (j = 0; j < adapter->nb_connections; j++) {\n \t\t/* Get connection */\n@@ -877,9 +989,12 @@ rx_adapter_configure(struct eventmode_conf *em_conf,\n \t\t/* Setup queue conf */\n \t\tqueue_conf.ev.queue_id = conn->eventq_id;\n \t\tqueue_conf.ev.sched_type = em_conf->ext_params.sched_type;\n+\t\tqueue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;\n \n+#ifdef UNSELECT\n \t\t/* Set flow ID as ethdev ID */\n \t\tqueue_conf.ev.flow_id = conn->ethdev_id;\n+#endif /* UNSELECT */\n \n \t\t/* Add queue to the adapter */\n \t\tret = rte_event_eth_rx_adapter_queue_add(\n@@ -945,8 +1060,8 @@ tx_adapter_configure(struct eventmode_conf *em_conf,\n {\n \tint ret, j;\n \tuint8_t tx_port_id = 0;\n-\tuint8_t eventdev_id;\n \tuint32_t service_id;\n+\tuint8_t eventdev_id;\n \tstruct rte_event_port_conf port_conf = {0};\n \tstruct rte_event_dev_info evdev_default_conf = {0};\n \tstruct tx_adapter_connection_info *conn;\n@@ -1004,6 +1119,18 @@ tx_adapter_configure(struct eventmode_conf *em_conf,\n \t\t}\n \t}\n \n+\t/*\n+\t * Check if Tx core is assigned. If Tx core is not assigned, then\n+\t * the adapter would be having internal port for submitting packets\n+\t * for Tx and so Tx event queue & port setup is not required\n+\t */\n+\tif (adapter->tx_core_id == (uint32_t) (-1)) {\n+\t\t/* Internal port is present */\n+\t\tgoto skip_tx_queue_port_setup;\n+\t}\n+\n+\t/* Setup Tx queue & port */\n+\n \t/* Get event port used by the adapter */\n \tret = rte_event_eth_tx_adapter_event_port_get(\n \t\t\tadapter->adapter_id,\n@@ -1014,11 +1141,6 @@ tx_adapter_configure(struct eventmode_conf *em_conf,\n \t}\n \n \t/*\n-\t * TODO: event queue for Tx adapter is required only if the\n-\t * INTERNAL PORT is not present.\n-\t */\n-\n-\t/*\n \t * Tx event queue would be reserved for Tx adapter. Need to unlink\n \t * this queue from all other ports\n \t *\n@@ -1028,6 +1150,7 @@ tx_adapter_configure(struct eventmode_conf *em_conf,\n \t\t\t\t      &(adapter->tx_ev_queue), 1);\n \t}\n \n+\t/* Link Tx event queue to Tx port */\n \tret = rte_event_port_link(\n \t\t\teventdev_id,\n \t\t\ttx_port_id,\n@@ -1055,6 +1178,8 @@ tx_adapter_configure(struct eventmode_conf *em_conf,\n \t */\n \trte_service_set_runstate_mapped_check(service_id, 0);\n \n+skip_tx_queue_port_setup:\n+\n \t/* Start adapter */\n \tret = rte_event_eth_tx_adapter_start(adapter->adapter_id);\n \tif (ret) {\n@@ -1437,6 +1562,14 @@ rte_eventmode_helper_find_worker(uint32_t lcore_id,\n \telse\n \t\tcurr_conf.cap.burst = RTE_EVENTMODE_HELPER_RX_TYPE_NON_BURST;\n \n+\t/* Check for Tx internal port */\n+\tif (internal_dev_has_tx_internal_port(eventdev_id))\n+\t\tcurr_conf.cap.tx_internal_port =\n+\t\t\t\tRTE_EVENTMODE_HELPER_TX_TYPE_INTERNAL_PORT;\n+\telse\n+\t\tcurr_conf.cap.tx_internal_port =\n+\t\t\t\tRTE_EVENTMODE_HELPER_TX_TYPE_NO_INTERNAL_PORT;\n+\n \t/* Now parse the passed list and see if we have matching capabilities */\n \n \t/* Initialize the pointer used to traverse the list */\ndiff --git a/lib/librte_eventdev/rte_eventmode_helper.h b/lib/librte_eventdev/rte_eventmode_helper.h\nindex 2212622..f705eec 100644\n--- a/lib/librte_eventdev/rte_eventmode_helper.h\n+++ b/lib/librte_eventdev/rte_eventmode_helper.h\n@@ -36,6 +36,8 @@ enum rte_eventmode_helper_rx_types {\n /* Event mode packet tx types */\n enum rte_eventmode_helper_tx_types {\n \tRTE_EVETNMODE_HELPER_TX_TYPE_INVALID = 0,\n+\tRTE_EVENTMODE_HELPER_TX_TYPE_INTERNAL_PORT,\n+\tRTE_EVENTMODE_HELPER_TX_TYPE_NO_INTERNAL_PORT,\n \tRTE_EVENTMODE_HELPER_TX_TYPE_MAX = 16\n };\n \n@@ -85,6 +87,8 @@ struct rte_eventmode_helper_app_worker_params {\n \t\tstruct {\n \t\t\tuint64_t burst : 4;\n \t\t\t/**< Specify status of rx type burst */\n+\t\t\tuint64_t tx_internal_port : 4;\n+\t\t\t/**< Specify whether tx internal port is available */\n \t\t};\n \t\tuint64_t u64;\n \t} cap;\ndiff --git a/lib/librte_eventdev/rte_eventmode_helper_internal.h b/lib/librte_eventdev/rte_eventmode_helper_internal.h\nindex 1daca22..44796e3 100644\n--- a/lib/librte_eventdev/rte_eventmode_helper_internal.h\n+++ b/lib/librte_eventdev/rte_eventmode_helper_internal.h\n@@ -66,6 +66,7 @@ struct eventdev_params {\n \tuint8_t nb_eventqueue;\n \tuint8_t nb_eventport;\n \tuint8_t ev_queue_mode;\n+\tuint8_t all_internal_ports;\n };\n \n /* Rx adapter connection info */\ndiff --git a/lib/librte_eventdev/rte_eventmode_helper_prints.c b/lib/librte_eventdev/rte_eventmode_helper_prints.c\nindex 387302a..0a34f43 100644\n--- a/lib/librte_eventdev/rte_eventmode_helper_prints.c\n+++ b/lib/librte_eventdev/rte_eventmode_helper_prints.c\n@@ -64,13 +64,22 @@ rte_eventmode_display_rx_adapter_conf(struct eventmode_conf *em_conf)\n \n \tfor (i = 0; i < nb_rx_adapter; i++) {\n \t\tadapter = &(em_conf->rx_adapter[i]);\n-\t\tRTE_EM_HLPR_LOG_INFO(\n-\t\t\t\"\\tRx adaper ID: %-2d\\tConnections: %-2d\\tEvent dev ID: %-2d\"\n-\t\t\t\"\\tRx core: %-2d\",\n+\t\tsprintf(print_buf,\n+\t\t\t\"\\tRx adaper ID: %-2d\\tConnections: %-2d\\tEvent dev ID: %-2d\",\n \t\t\tadapter->adapter_id,\n \t\t\tadapter->nb_connections,\n-\t\t\tadapter->eventdev_id,\n-\t\t\tadapter->rx_core_id);\n+\t\t\tadapter->eventdev_id);\n+\t\tif (adapter->rx_core_id == (uint32_t)-1)\n+\t\t\tsprintf(print_buf + strlen(print_buf),\n+\t\t\t\t\"\\tRx core: %-2s\", \"[INTERNAL PORT]\");\n+\t\telse if (adapter->rx_core_id == RTE_MAX_LCORE)\n+\t\t\tsprintf(print_buf + strlen(print_buf),\n+\t\t\t\t\"\\tRx core: %-2s\", \"[NONE]\");\n+\t\telse\n+\t\t\tsprintf(print_buf + strlen(print_buf),\n+\t\t\t\t\"\\tRx core: %-2d\", adapter->rx_core_id);\n+\n+\t\tRTE_EM_HLPR_LOG_INFO(\"%s\", print_buf);\n \n \t\tfor (j = 0; j < adapter->nb_connections; j++) {\n \t\t\tconn = &(adapter->conn[j]);\n",
    "prefixes": [
        "34/39"
    ]
}