get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/94549/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 94549,
    "url": "https://patches.dpdk.org/api/patches/94549/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210619110154.10301-9-pbhagavatula@marvell.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210619110154.10301-9-pbhagavatula@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210619110154.10301-9-pbhagavatula@marvell.com",
    "date": "2021-06-19T11:01:49",
    "name": "[v2,09/13] event/cnxk: add Tx adapter support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "da943d5869af1447a81db92f9db01f6960f93d1a",
    "submitter": {
        "id": 1183,
        "url": "https://patches.dpdk.org/api/people/1183/?format=api",
        "name": "Pavan Nikhilesh Bhagavatula",
        "email": "pbhagavatula@marvell.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210619110154.10301-9-pbhagavatula@marvell.com/mbox/",
    "series": [
        {
            "id": 17405,
            "url": "https://patches.dpdk.org/api/series/17405/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17405",
            "date": "2021-06-19T11:01:41",
            "name": "[v2,01/13] net/cnxk: add multi seg Rx vector routine",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/17405/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/94549/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/94549/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9435CA0A0C;\n\tSat, 19 Jun 2021 13:03:23 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 22E844116E;\n\tSat, 19 Jun 2021 13:02:44 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id D2A604116B\n for <dev@dpdk.org>; Sat, 19 Jun 2021 13:02:41 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id\n 15JAu1tT007709 for <dev@dpdk.org>; Sat, 19 Jun 2021 04:02:41 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0b-0016f401.pphosted.com with ESMTP id 398tu0v627-2\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Sat, 19 Jun 2021 04:02:41 -0700",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18;\n Sat, 19 Jun 2021 04:02:38 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend\n Transport; Sat, 19 Jun 2021 04:02:38 -0700",
            "from BG-LT7430.marvell.com (BG-LT7430.marvell.com [10.28.177.176])\n by maili.marvell.com (Postfix) with ESMTP id 43A955B6965;\n Sat, 19 Jun 2021 04:02:35 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=6rWi1jpmOVnbMyzaX61XwLDFek/pMbmxcOnq8wJ/Nrk=;\n b=UrnYCTJJtRT19Np0tAEMOOApTX9oG+uiOLIkxWhcLMic877mRSxG6fqsinUR0vIUfU+y\n cet4u1SUTzKmVehTKXj/XHFex+OuJhVZJmRDzWuyWT+6A8hiysbJ2sYHKuPFm5tjUDx8\n CUdBfQtw0SqrCcXB4QKk2gRYKN8qUBUN1dKNRVbClQI+JQXDi9DyCV1lmdfhojTs8Gft\n bU5jqb9Yies5pU9RU+7D0i2XyDuavpMC/b/eIvfTyKX8Ep47aGeI6XBXgTvrGil3lHen\n SaBn4DLtCuOEpR/psNUZZkGX2qTGf9FbHIJ/Go29Xowodrd+l1xEnkiA/4/vIoW8aWyF kQ==",
        "From": "<pbhagavatula@marvell.com>",
        "To": "<jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>, \"Shijith\n Thotton\" <sthotton@marvell.com>",
        "CC": "<dev@dpdk.org>",
        "Date": "Sat, 19 Jun 2021 16:31:49 +0530",
        "Message-ID": "<20210619110154.10301-9-pbhagavatula@marvell.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20210619110154.10301-1-pbhagavatula@marvell.com>",
        "References": "<20210524122303.1116-1-pbhagavatula@marvell.com>\n <20210619110154.10301-1-pbhagavatula@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-GUID": "OBZWPsD8jRGa0xYkMdHwVOhP8rQrLCsv",
        "X-Proofpoint-ORIG-GUID": "OBZWPsD8jRGa0xYkMdHwVOhP8rQrLCsv",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.391, 18.0.790\n definitions=2021-06-19_09:2021-06-18,\n 2021-06-19 signatures=0",
        "Subject": "[dpdk-dev] [PATCH v2 09/13] event/cnxk: add Tx adapter support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Pavan Nikhilesh <pbhagavatula@marvell.com>\n\nAdd support for event eth Tx adapter.\n\nSigned-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>\n---\n doc/guides/eventdevs/cnxk.rst            |   4 +-\n doc/guides/rel_notes/release_21_08.rst   |   6 +-\n drivers/event/cnxk/cn10k_eventdev.c      |  91 ++++++++++++++++++\n drivers/event/cnxk/cn9k_eventdev.c       | 117 +++++++++++++++++++++++\n drivers/event/cnxk/cnxk_eventdev.h       |  21 +++-\n drivers/event/cnxk/cnxk_eventdev_adptr.c | 106 ++++++++++++++++++++\n 6 files changed, 339 insertions(+), 6 deletions(-)",
    "diff": "diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst\nindex b7e82c1273..6fdccc2ab4 100644\n--- a/doc/guides/eventdevs/cnxk.rst\n+++ b/doc/guides/eventdevs/cnxk.rst\n@@ -42,7 +42,9 @@ Features of the OCTEON cnxk SSO PMD are:\n - HW managed packets enqueued from ethdev to eventdev exposed through event eth\n   RX adapter.\n - N:1 ethernet device Rx queue to Event queue mapping.\n-- Full Rx offload support defined through ethdev queue configuration.\n+- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``\n+  capability while maintaining receive packet order.\n+- Full Rx/Tx offload support defined through ethdev queue configuration.\n \n Prerequisites and Compilation procedure\n ---------------------------------------\ndiff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst\nindex 3892c8017a..80ff93269c 100644\n--- a/doc/guides/rel_notes/release_21_08.rst\n+++ b/doc/guides/rel_notes/release_21_08.rst\n@@ -60,10 +60,10 @@ New Features\n   * Added net/cnxk driver which provides the support for the integrated ethernet\n     device.\n \n-* **Added support for Marvell CN10K, CN9K, event Rx adapter.**\n+* **Added support for Marvell CN10K, CN9K, event Rx/Tx adapter.**\n \n-  * Added Rx adapter support for event/cnxk when the ethernet device requested is\n-    net/cnxk.\n+  * Added Rx/Tx adapter support for event/cnxk when the ethernet device requested\n+    is net/cnxk.\n \n \n Removed Items\ndiff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c\nindex ba7d95fff7..8a9b04a3db 100644\n--- a/drivers/event/cnxk/cn10k_eventdev.c\n+++ b/drivers/event/cnxk/cn10k_eventdev.c\n@@ -44,6 +44,7 @@ cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)\n \t/* First cache line is reserved for cookie */\n \tws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);\n \tws->base = roc_sso_hws_base_get(&dev->sso, port_id);\n+\tws->tx_base = ws->base;\n \tws->hws_id = port_id;\n \tws->swtag_req = 0;\n \tws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);\n@@ -233,6 +234,39 @@ cn10k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)\n \treturn roc_sso_rsrc_init(&dev->sso, hws, hwgrp);\n }\n \n+static int\n+cn10k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)\n+{\n+\tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n+\tint i;\n+\n+\tif (dev->tx_adptr_data == NULL)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < dev->nb_event_ports; i++) {\n+\t\tstruct cn10k_sso_hws *ws = event_dev->data->ports[i];\n+\t\tvoid *ws_cookie;\n+\n+\t\tws_cookie = cnxk_sso_hws_get_cookie(ws);\n+\t\tws_cookie = rte_realloc_socket(\n+\t\t\tws_cookie,\n+\t\t\tsizeof(struct cnxk_sso_hws_cookie) +\n+\t\t\t\tsizeof(struct cn10k_sso_hws) +\n+\t\t\t\t(sizeof(uint64_t) * (dev->max_port_id + 1) *\n+\t\t\t\t RTE_MAX_QUEUES_PER_PORT),\n+\t\t\tRTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\t\tif (ws_cookie == NULL)\n+\t\t\treturn -ENOMEM;\n+\t\tws = RTE_PTR_ADD(ws_cookie, sizeof(struct cnxk_sso_hws_cookie));\n+\t\tmemcpy(&ws->tx_adptr_data, dev->tx_adptr_data,\n+\t\t       sizeof(uint64_t) * (dev->max_port_id + 1) *\n+\t\t\t       RTE_MAX_QUEUES_PER_PORT);\n+\t\tevent_dev->data->ports[i] = ws;\n+\t}\n+\n+\treturn 0;\n+}\n+\n static void\n cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)\n {\n@@ -493,6 +527,10 @@ cn10k_sso_start(struct rte_eventdev *event_dev)\n {\n \tint rc;\n \n+\trc = cn10k_sso_updt_tx_adptr_data(event_dev);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\n \trc = cnxk_sso_start(event_dev, cn10k_sso_hws_reset,\n \t\t\t    cn10k_sso_hws_flush_events);\n \tif (rc < 0)\n@@ -595,6 +633,55 @@ cn10k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,\n \treturn cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);\n }\n \n+static int\n+cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,\n+\t\t\t      const struct rte_eth_dev *eth_dev, uint32_t *caps)\n+{\n+\tint ret;\n+\n+\tRTE_SET_USED(dev);\n+\tret = strncmp(eth_dev->device->driver->name, \"net_cn10k\", 8);\n+\tif (ret)\n+\t\t*caps = 0;\n+\telse\n+\t\t*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;\n+\n+\treturn 0;\n+}\n+\n+static int\n+cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,\n+\t\t\t       const struct rte_eth_dev *eth_dev,\n+\t\t\t       int32_t tx_queue_id)\n+{\n+\tint rc;\n+\n+\tRTE_SET_USED(id);\n+\trc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\trc = cn10k_sso_updt_tx_adptr_data(event_dev);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\tcn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);\n+\n+\treturn 0;\n+}\n+\n+static int\n+cn10k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,\n+\t\t\t       const struct rte_eth_dev *eth_dev,\n+\t\t\t       int32_t tx_queue_id)\n+{\n+\tint rc;\n+\n+\tRTE_SET_USED(id);\n+\trc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\treturn cn10k_sso_updt_tx_adptr_data(event_dev);\n+}\n+\n static struct rte_eventdev_ops cn10k_sso_dev_ops = {\n \t.dev_infos_get = cn10k_sso_info_get,\n \t.dev_configure = cn10k_sso_dev_configure,\n@@ -614,6 +701,10 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {\n \t.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,\n \t.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,\n \n+\t.eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,\n+\t.eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add,\n+\t.eth_tx_adapter_queue_del = cn10k_sso_tx_adapter_queue_del,\n+\n \t.timer_adapter_caps_get = cnxk_tim_caps_get,\n \n \t.dump = cnxk_sso_dump,\ndiff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c\nindex e386cb784a..bdc5632235 100644\n--- a/drivers/event/cnxk/cn9k_eventdev.c\n+++ b/drivers/event/cnxk/cn9k_eventdev.c\n@@ -248,6 +248,66 @@ cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)\n \treturn roc_sso_rsrc_init(&dev->sso, hws, hwgrp);\n }\n \n+static int\n+cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)\n+{\n+\tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n+\tint i;\n+\n+\tif (dev->tx_adptr_data == NULL)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < dev->nb_event_ports; i++) {\n+\t\tif (dev->dual_ws) {\n+\t\t\tstruct cn9k_sso_hws_dual *dws =\n+\t\t\t\tevent_dev->data->ports[i];\n+\t\t\tvoid *ws_cookie;\n+\n+\t\t\tws_cookie = cnxk_sso_hws_get_cookie(dws);\n+\t\t\tws_cookie = rte_realloc_socket(\n+\t\t\t\tws_cookie,\n+\t\t\t\tsizeof(struct cnxk_sso_hws_cookie) +\n+\t\t\t\t\tsizeof(struct cn9k_sso_hws_dual) +\n+\t\t\t\t\t(sizeof(uint64_t) *\n+\t\t\t\t\t (dev->max_port_id + 1) *\n+\t\t\t\t\t RTE_MAX_QUEUES_PER_PORT),\n+\t\t\t\tRTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\t\t\tif (ws_cookie == NULL)\n+\t\t\t\treturn -ENOMEM;\n+\t\t\tdws = RTE_PTR_ADD(ws_cookie,\n+\t\t\t\t\t  sizeof(struct cnxk_sso_hws_cookie));\n+\t\t\tmemcpy(&dws->tx_adptr_data, dev->tx_adptr_data,\n+\t\t\t       sizeof(uint64_t) * (dev->max_port_id + 1) *\n+\t\t\t\t       RTE_MAX_QUEUES_PER_PORT);\n+\t\t\tevent_dev->data->ports[i] = dws;\n+\t\t} else {\n+\t\t\tstruct cn9k_sso_hws *ws = event_dev->data->ports[i];\n+\t\t\tvoid *ws_cookie;\n+\n+\t\t\tws_cookie = cnxk_sso_hws_get_cookie(ws);\n+\t\t\tws_cookie = rte_realloc_socket(\n+\t\t\t\tws_cookie,\n+\t\t\t\tsizeof(struct cnxk_sso_hws_cookie) +\n+\t\t\t\t\tsizeof(struct cn9k_sso_hws_dual) +\n+\t\t\t\t\t(sizeof(uint64_t) *\n+\t\t\t\t\t (dev->max_port_id + 1) *\n+\t\t\t\t\t RTE_MAX_QUEUES_PER_PORT),\n+\t\t\t\tRTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\t\t\tif (ws_cookie == NULL)\n+\t\t\t\treturn -ENOMEM;\n+\t\t\tws = RTE_PTR_ADD(ws_cookie,\n+\t\t\t\t\t sizeof(struct cnxk_sso_hws_cookie));\n+\t\t\tmemcpy(&ws->tx_adptr_data, dev->tx_adptr_data,\n+\t\t\t       sizeof(uint64_t) * (dev->max_port_id + 1) *\n+\t\t\t\t       RTE_MAX_QUEUES_PER_PORT);\n+\t\t\tevent_dev->data->ports[i] = ws;\n+\t\t}\n+\t}\n+\trte_mb();\n+\n+\treturn 0;\n+}\n+\n static void\n cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)\n {\n@@ -734,6 +794,10 @@ cn9k_sso_start(struct rte_eventdev *event_dev)\n {\n \tint rc;\n \n+\trc = cn9k_sso_updt_tx_adptr_data(event_dev);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\n \trc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,\n \t\t\t    cn9k_sso_hws_flush_events);\n \tif (rc < 0)\n@@ -844,6 +908,55 @@ cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,\n \treturn cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);\n }\n \n+static int\n+cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,\n+\t\t\t     const struct rte_eth_dev *eth_dev, uint32_t *caps)\n+{\n+\tint ret;\n+\n+\tRTE_SET_USED(dev);\n+\tret = strncmp(eth_dev->device->driver->name, \"net_cn9k\", 8);\n+\tif (ret)\n+\t\t*caps = 0;\n+\telse\n+\t\t*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;\n+\n+\treturn 0;\n+}\n+\n+static int\n+cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,\n+\t\t\t      const struct rte_eth_dev *eth_dev,\n+\t\t\t      int32_t tx_queue_id)\n+{\n+\tint rc;\n+\n+\tRTE_SET_USED(id);\n+\trc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\trc = cn9k_sso_updt_tx_adptr_data(event_dev);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\tcn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);\n+\n+\treturn 0;\n+}\n+\n+static int\n+cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,\n+\t\t\t      const struct rte_eth_dev *eth_dev,\n+\t\t\t      int32_t tx_queue_id)\n+{\n+\tint rc;\n+\n+\tRTE_SET_USED(id);\n+\trc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\treturn cn9k_sso_updt_tx_adptr_data(event_dev);\n+}\n+\n static struct rte_eventdev_ops cn9k_sso_dev_ops = {\n \t.dev_infos_get = cn9k_sso_info_get,\n \t.dev_configure = cn9k_sso_dev_configure,\n@@ -863,6 +976,10 @@ static struct rte_eventdev_ops cn9k_sso_dev_ops = {\n \t.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,\n \t.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,\n \n+\t.eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,\n+\t.eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,\n+\t.eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,\n+\n \t.timer_adapter_caps_get = cnxk_tim_caps_get,\n \n \t.dump = cnxk_sso_dump,\ndiff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h\nindex 9d5d2d0339..458fdc8d92 100644\n--- a/drivers/event/cnxk/cnxk_eventdev.h\n+++ b/drivers/event/cnxk/cnxk_eventdev.h\n@@ -8,6 +8,7 @@\n #include <rte_devargs.h>\n #include <rte_ethdev.h>\n #include <rte_event_eth_rx_adapter.h>\n+#include <rte_event_eth_tx_adapter.h>\n #include <rte_kvargs.h>\n #include <rte_mbuf_pool_ops.h>\n #include <rte_pci.h>\n@@ -86,9 +87,12 @@ struct cnxk_sso_evdev {\n \trte_iova_t fc_iova;\n \tstruct rte_mempool *xaq_pool;\n \tuint64_t rx_offloads;\n+\tuint64_t tx_offloads;\n \tuint64_t adptr_xae_cnt;\n \tuint16_t rx_adptr_pool_cnt;\n \tuint64_t *rx_adptr_pools;\n+\tuint64_t *tx_adptr_data;\n+\tuint16_t max_port_id;\n \tuint16_t tim_adptr_ring_cnt;\n \tuint16_t *timer_adptr_rings;\n \tuint64_t *timer_adptr_sz;\n@@ -115,7 +119,10 @@ struct cn10k_sso_hws {\n \tuint64_t xaq_lmt __rte_cache_aligned;\n \tuint64_t *fc_mem;\n \tuintptr_t grps_base[CNXK_SSO_MAX_HWGRP];\n+\t/* Tx Fastpath data */\n+\tuint64_t tx_base __rte_cache_aligned;\n \tuintptr_t lmt_base;\n+\tuint8_t tx_adptr_data[];\n } __rte_cache_aligned;\n \n /* CN9K HWS ops */\n@@ -140,7 +147,9 @@ struct cn9k_sso_hws {\n \tuint64_t xaq_lmt __rte_cache_aligned;\n \tuint64_t *fc_mem;\n \tuintptr_t grps_base[CNXK_SSO_MAX_HWGRP];\n-\tuint64_t base;\n+\t/* Tx Fastpath data */\n+\tuint64_t base __rte_cache_aligned;\n+\tuint8_t tx_adptr_data[];\n } __rte_cache_aligned;\n \n struct cn9k_sso_hws_state {\n@@ -160,7 +169,9 @@ struct cn9k_sso_hws_dual {\n \tuint64_t xaq_lmt __rte_cache_aligned;\n \tuint64_t *fc_mem;\n \tuintptr_t grps_base[CNXK_SSO_MAX_HWGRP];\n-\tuint64_t base[2];\n+\t/* Tx Fastpath data */\n+\tuint64_t base[2] __rte_cache_aligned;\n+\tuint8_t tx_adptr_data[];\n } __rte_cache_aligned;\n \n struct cnxk_sso_hws_cookie {\n@@ -267,5 +278,11 @@ int cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,\n \t\t\t      const struct rte_eth_dev *eth_dev);\n int cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,\n \t\t\t     const struct rte_eth_dev *eth_dev);\n+int cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,\n+\t\t\t\t  const struct rte_eth_dev *eth_dev,\n+\t\t\t\t  int32_t tx_queue_id);\n+int cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,\n+\t\t\t\t  const struct rte_eth_dev *eth_dev,\n+\t\t\t\t  int32_t tx_queue_id);\n \n #endif /* __CNXK_EVENTDEV_H__ */\ndiff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c\nindex 24bfd985e7..548d7b81ce 100644\n--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c\n+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c\n@@ -5,6 +5,8 @@\n #include \"cnxk_ethdev.h\"\n #include \"cnxk_eventdev.h\"\n \n+#define CNXK_SSO_SQB_LIMIT (0x180)\n+\n void\n cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,\n \t\t      uint32_t event_type)\n@@ -241,3 +243,107 @@ cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,\n \n \treturn 0;\n }\n+\n+static int\n+cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)\n+{\n+\tuint16_t sqb_limit;\n+\n+\tsqb_limit = RTE_MIN(nb_sqb_bufs, sq->nb_sqb_bufs);\n+\treturn roc_npa_aura_limit_modify(sq->aura_handle, sqb_limit);\n+}\n+\n+static int\n+cnxk_sso_updt_tx_queue_data(const struct rte_eventdev *event_dev,\n+\t\t\t    uint16_t eth_port_id, uint16_t tx_queue_id,\n+\t\t\t    void *txq)\n+{\n+\tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n+\tuint16_t max_port_id = dev->max_port_id;\n+\tuint64_t *txq_data = dev->tx_adptr_data;\n+\n+\tif (txq_data == NULL || eth_port_id > max_port_id) {\n+\t\tmax_port_id = RTE_MAX(max_port_id, eth_port_id);\n+\t\ttxq_data = rte_realloc_socket(\n+\t\t\ttxq_data,\n+\t\t\t(sizeof(uint64_t) * (max_port_id + 1) *\n+\t\t\t RTE_MAX_QUEUES_PER_PORT),\n+\t\t\tRTE_CACHE_LINE_SIZE, event_dev->data->socket_id);\n+\t\tif (txq_data == NULL)\n+\t\t\treturn -ENOMEM;\n+\t}\n+\n+\t((uint64_t(*)[RTE_MAX_QUEUES_PER_PORT])\n+\t\t txq_data)[eth_port_id][tx_queue_id] = (uint64_t)txq;\n+\tdev->max_port_id = max_port_id;\n+\tdev->tx_adptr_data = txq_data;\n+\treturn 0;\n+}\n+\n+int\n+cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,\n+\t\t\t      const struct rte_eth_dev *eth_dev,\n+\t\t\t      int32_t tx_queue_id)\n+{\n+\tstruct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;\n+\tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n+\tstruct roc_nix_sq *sq;\n+\tint i, ret;\n+\tvoid *txq;\n+\n+\tif (tx_queue_id < 0) {\n+\t\tfor (i = 0; i < eth_dev->data->nb_tx_queues; i++) {\n+\t\t\ttxq = eth_dev->data->tx_queues[i];\n+\t\t\tsq = &cnxk_eth_dev->sqs[i];\n+\t\t\tcnxk_sso_sqb_aura_limit_edit(sq, CNXK_SSO_SQB_LIMIT);\n+\t\t\tret = cnxk_sso_updt_tx_queue_data(\n+\t\t\t\tevent_dev, eth_dev->data->port_id, i, txq);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t}\n+\t} else {\n+\t\ttxq = eth_dev->data->tx_queues[tx_queue_id];\n+\t\tsq = &cnxk_eth_dev->sqs[tx_queue_id];\n+\t\tcnxk_sso_sqb_aura_limit_edit(sq, CNXK_SSO_SQB_LIMIT);\n+\t\tret = cnxk_sso_updt_tx_queue_data(\n+\t\t\tevent_dev, eth_dev->data->port_id, tx_queue_id, txq);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\t}\n+\n+\tdev->tx_offloads |= cnxk_eth_dev->tx_offload_flags;\n+\n+\treturn 0;\n+}\n+\n+int\n+cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,\n+\t\t\t      const struct rte_eth_dev *eth_dev,\n+\t\t\t      int32_t tx_queue_id)\n+{\n+\tstruct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;\n+\tstruct roc_nix_sq *sq;\n+\tint i, ret;\n+\n+\tRTE_SET_USED(event_dev);\n+\tif (tx_queue_id < 0) {\n+\t\tfor (i = 0; i < eth_dev->data->nb_tx_queues; i++) {\n+\t\t\tsq = &cnxk_eth_dev->sqs[i];\n+\t\t\tcnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);\n+\t\t\tret = cnxk_sso_updt_tx_queue_data(\n+\t\t\t\tevent_dev, eth_dev->data->port_id, tx_queue_id,\n+\t\t\t\tNULL);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t}\n+\t} else {\n+\t\tsq = &cnxk_eth_dev->sqs[tx_queue_id];\n+\t\tcnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);\n+\t\tret = cnxk_sso_updt_tx_queue_data(\n+\t\t\tevent_dev, eth_dev->data->port_id, tx_queue_id, NULL);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n",
    "prefixes": [
        "v2",
        "09/13"
    ]
}