get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/95247/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 95247,
    "url": "https://patches.dpdk.org/api/patches/95247/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210703220022.1387-3-pbhagavatula@marvell.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210703220022.1387-3-pbhagavatula@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210703220022.1387-3-pbhagavatula@marvell.com",
    "date": "2021-07-03T22:00:18",
    "name": "[v7,3/7] event/cnxk: add Tx adapter support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "3396bf8215c334baacb838d68149196397b9d97a",
    "submitter": {
        "id": 1183,
        "url": "https://patches.dpdk.org/api/people/1183/?format=api",
        "name": "Pavan Nikhilesh Bhagavatula",
        "email": "pbhagavatula@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "https://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210703220022.1387-3-pbhagavatula@marvell.com/mbox/",
    "series": [
        {
            "id": 17612,
            "url": "https://patches.dpdk.org/api/series/17612/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17612",
            "date": "2021-07-03T22:00:16",
            "name": "[v7,1/7] event/cnxk: add Rx adapter support",
            "version": 7,
            "mbox": "https://patches.dpdk.org/series/17612/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/95247/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/95247/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id BB311A0C40;\n\tSun,  4 Jul 2021 00:00:56 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 9CB34410FE;\n\tSun,  4 Jul 2021 00:00:45 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id 6E8BF410F9\n for <dev@dpdk.org>; Sun,  4 Jul 2021 00:00:41 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id\n 163M0ef5028738 for <dev@dpdk.org>; Sat, 3 Jul 2021 15:00:40 -0700",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0a-0016f401.pphosted.com with ESMTP id 39jn8qhe40-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Sat, 03 Jul 2021 15:00:40 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.18;\n Sat, 3 Jul 2021 15:00:38 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.18 via Frontend\n Transport; Sat, 3 Jul 2021 15:00:38 -0700",
            "from BG-LT7430.marvell.com (BG-LT7430.marvell.com [10.28.177.176])\n by maili.marvell.com (Postfix) with ESMTP id E8EBF3F70C3;\n Sat,  3 Jul 2021 15:00:35 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=p6h7vbaSN5bm79hEFJmLHWsE8BbkA5uaUa3LqOfbeQo=;\n b=cIRUeP9tY+hv5loA1DS7Dm+G02R0rVxtVOxaPeSO30GCtvKO1WQJL3i/gR1YuOKDvgFf\n u51vBcsQFTN5h9iywi8in9zvEJxbMah4U/dc63O8yoL8FN4ThF7awG/7+T7yLVtjo6vp\n KtEdyyn9Ai7FVeB7llj8jrXPRlOdkVipgDkfkJ9Bo8yTjUQMJza6QSkY9gdAf/NsBjT7\n a/6eyhm2kdFKQeGO5j4g4AtMPZRIhSXF5tUZmvHza05Ht3oGqvlDvwN9/dKbB1RX1dW2\n I1tRHP9XNTgvkBy/+BqZF6mb35LX8oTTNzK0tClNr7ZchxAqG2RJGkcLKEi1QdvJWoi8 Yw==",
        "From": "<pbhagavatula@marvell.com>",
        "To": "<jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>, \"Shijith\n Thotton\" <sthotton@marvell.com>,\n Nithin Dabilpuram <ndabilpuram@marvell.com>,\n Kiran Kumar K <kirankumark@marvell.com>, Sunil Kumar Kori\n <skori@marvell.com>, Satha Rao <skoteshwar@marvell.com>",
        "CC": "<dev@dpdk.org>",
        "Date": "Sun, 4 Jul 2021 03:30:18 +0530",
        "Message-ID": "<20210703220022.1387-3-pbhagavatula@marvell.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20210703220022.1387-1-pbhagavatula@marvell.com>",
        "References": "<20210702211408.777-1-pbhagavatula@marvell.com>\n <20210703220022.1387-1-pbhagavatula@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-GUID": "skdtgI3GcLR18TJEBmN-5baqLuJSlYU1",
        "X-Proofpoint-ORIG-GUID": "skdtgI3GcLR18TJEBmN-5baqLuJSlYU1",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.391, 18.0.790\n definitions=2021-07-03_07:2021-07-02,\n 2021-07-03 signatures=0",
        "Subject": "[dpdk-dev] [PATCH v7 3/7] event/cnxk: add Tx adapter support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Pavan Nikhilesh <pbhagavatula@marvell.com>\n\nAdd support for event eth Tx adapter.\n\nSigned-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>\nAcked-by: Nithin Dabilpuram <ndabilpuram@marvell.com>\n---\n doc/guides/eventdevs/cnxk.rst            |   4 +-\n doc/guides/rel_notes/release_21_08.rst   |   6 +-\n drivers/common/cnxk/roc_nix.h            |   1 +\n drivers/common/cnxk/roc_nix_queue.c      |   8 +-\n drivers/event/cnxk/cn10k_eventdev.c      |  91 ++++++++++++++\n drivers/event/cnxk/cn9k_eventdev.c       | 148 +++++++++++++++++++++++\n drivers/event/cnxk/cnxk_eventdev.h       |  22 +++-\n drivers/event/cnxk/cnxk_eventdev_adptr.c |  88 ++++++++++++++\n 8 files changed, 359 insertions(+), 9 deletions(-)\n\n--\n2.17.1",
    "diff": "diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst\nindex b7e82c127..6fdccc2ab 100644\n--- a/doc/guides/eventdevs/cnxk.rst\n+++ b/doc/guides/eventdevs/cnxk.rst\n@@ -42,7 +42,9 @@ Features of the OCTEON cnxk SSO PMD are:\n - HW managed packets enqueued from ethdev to eventdev exposed through event eth\n   RX adapter.\n - N:1 ethernet device Rx queue to Event queue mapping.\n-- Full Rx offload support defined through ethdev queue configuration.\n+- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``\n+  capability while maintaining receive packet order.\n+- Full Rx/Tx offload support defined through ethdev queue configuration.\n\n Prerequisites and Compilation procedure\n ---------------------------------------\ndiff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst\nindex 3892c8017..80ff93269 100644\n--- a/doc/guides/rel_notes/release_21_08.rst\n+++ b/doc/guides/rel_notes/release_21_08.rst\n@@ -60,10 +60,10 @@ New Features\n   * Added net/cnxk driver which provides the support for the integrated ethernet\n     device.\n\n-* **Added support for Marvell CN10K, CN9K, event Rx adapter.**\n+* **Added support for Marvell CN10K, CN9K, event Rx/Tx adapter.**\n\n-  * Added Rx adapter support for event/cnxk when the ethernet device requested is\n-    net/cnxk.\n+  * Added Rx/Tx adapter support for event/cnxk when the ethernet device requested\n+    is net/cnxk.\n\n\n Removed Items\ndiff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h\nindex 76613fe84..822c1900e 100644\n--- a/drivers/common/cnxk/roc_nix.h\n+++ b/drivers/common/cnxk/roc_nix.h\n@@ -200,6 +200,7 @@ struct roc_nix_sq {\n \tuint64_t aura_handle;\n \tint16_t nb_sqb_bufs_adj;\n \tuint16_t nb_sqb_bufs;\n+\tuint16_t aura_sqb_bufs;\n \tplt_iova_t io_addr;\n \tvoid *lmt_addr;\n \tvoid *sqe_mem;\ndiff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c\nindex 0604e7a18..7e2f86eca 100644\n--- a/drivers/common/cnxk/roc_nix_queue.c\n+++ b/drivers/common/cnxk/roc_nix_queue.c\n@@ -587,12 +587,12 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)\n \taura.fc_ena = 1;\n \taura.fc_addr = (uint64_t)sq->fc;\n \taura.fc_hyst_bits = 0; /* Store count on all updates */\n-\trc = roc_npa_pool_create(&sq->aura_handle, blk_sz, nb_sqb_bufs, &aura,\n+\trc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura,\n \t\t\t\t &pool);\n \tif (rc)\n \t\tgoto fail;\n\n-\tsq->sqe_mem = plt_zmalloc(blk_sz * nb_sqb_bufs, blk_sz);\n+\tsq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz);\n \tif (sq->sqe_mem == NULL) {\n \t\trc = NIX_ERR_NO_MEM;\n \t\tgoto nomem;\n@@ -600,11 +600,13 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)\n\n \t/* Fill the initial buffers */\n \tiova = (uint64_t)sq->sqe_mem;\n-\tfor (count = 0; count < nb_sqb_bufs; count++) {\n+\tfor (count = 0; count < NIX_MAX_SQB; count++) {\n \t\troc_npa_aura_op_free(sq->aura_handle, 0, iova);\n \t\tiova += blk_sz;\n \t}\n \troc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);\n+\troc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs);\n+\tsq->aura_sqb_bufs = NIX_MAX_SQB;\n\n \treturn rc;\n nomem:\ndiff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c\nindex ba7d95fff..8a9b04a3d 100644\n--- a/drivers/event/cnxk/cn10k_eventdev.c\n+++ b/drivers/event/cnxk/cn10k_eventdev.c\n@@ -44,6 +44,7 @@ cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)\n \t/* First cache line is reserved for cookie */\n \tws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);\n \tws->base = roc_sso_hws_base_get(&dev->sso, port_id);\n+\tws->tx_base = ws->base;\n \tws->hws_id = port_id;\n \tws->swtag_req = 0;\n \tws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);\n@@ -233,6 +234,39 @@ cn10k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)\n \treturn roc_sso_rsrc_init(&dev->sso, hws, hwgrp);\n }\n\n+static int\n+cn10k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)\n+{\n+\tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n+\tint i;\n+\n+\tif (dev->tx_adptr_data == NULL)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < dev->nb_event_ports; i++) {\n+\t\tstruct cn10k_sso_hws *ws = event_dev->data->ports[i];\n+\t\tvoid *ws_cookie;\n+\n+\t\tws_cookie = cnxk_sso_hws_get_cookie(ws);\n+\t\tws_cookie = rte_realloc_socket(\n+\t\t\tws_cookie,\n+\t\t\tsizeof(struct cnxk_sso_hws_cookie) +\n+\t\t\t\tsizeof(struct cn10k_sso_hws) +\n+\t\t\t\t(sizeof(uint64_t) * (dev->max_port_id + 1) *\n+\t\t\t\t RTE_MAX_QUEUES_PER_PORT),\n+\t\t\tRTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\t\tif (ws_cookie == NULL)\n+\t\t\treturn -ENOMEM;\n+\t\tws = RTE_PTR_ADD(ws_cookie, sizeof(struct cnxk_sso_hws_cookie));\n+\t\tmemcpy(&ws->tx_adptr_data, dev->tx_adptr_data,\n+\t\t       sizeof(uint64_t) * (dev->max_port_id + 1) *\n+\t\t\t       RTE_MAX_QUEUES_PER_PORT);\n+\t\tevent_dev->data->ports[i] = ws;\n+\t}\n+\n+\treturn 0;\n+}\n+\n static void\n cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)\n {\n@@ -493,6 +527,10 @@ cn10k_sso_start(struct rte_eventdev *event_dev)\n {\n \tint rc;\n\n+\trc = cn10k_sso_updt_tx_adptr_data(event_dev);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\n \trc = cnxk_sso_start(event_dev, cn10k_sso_hws_reset,\n \t\t\t    cn10k_sso_hws_flush_events);\n \tif (rc < 0)\n@@ -595,6 +633,55 @@ cn10k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,\n \treturn cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);\n }\n\n+static int\n+cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,\n+\t\t\t      const struct rte_eth_dev *eth_dev, uint32_t *caps)\n+{\n+\tint ret;\n+\n+\tRTE_SET_USED(dev);\n+\tret = strncmp(eth_dev->device->driver->name, \"net_cn10k\", 8);\n+\tif (ret)\n+\t\t*caps = 0;\n+\telse\n+\t\t*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;\n+\n+\treturn 0;\n+}\n+\n+static int\n+cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,\n+\t\t\t       const struct rte_eth_dev *eth_dev,\n+\t\t\t       int32_t tx_queue_id)\n+{\n+\tint rc;\n+\n+\tRTE_SET_USED(id);\n+\trc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\trc = cn10k_sso_updt_tx_adptr_data(event_dev);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\tcn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);\n+\n+\treturn 0;\n+}\n+\n+static int\n+cn10k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,\n+\t\t\t       const struct rte_eth_dev *eth_dev,\n+\t\t\t       int32_t tx_queue_id)\n+{\n+\tint rc;\n+\n+\tRTE_SET_USED(id);\n+\trc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\treturn cn10k_sso_updt_tx_adptr_data(event_dev);\n+}\n+\n static struct rte_eventdev_ops cn10k_sso_dev_ops = {\n \t.dev_infos_get = cn10k_sso_info_get,\n \t.dev_configure = cn10k_sso_dev_configure,\n@@ -614,6 +701,10 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {\n \t.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,\n \t.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,\n\n+\t.eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,\n+\t.eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add,\n+\t.eth_tx_adapter_queue_del = cn10k_sso_tx_adapter_queue_del,\n+\n \t.timer_adapter_caps_get = cnxk_tim_caps_get,\n\n \t.dump = cnxk_sso_dump,\ndiff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c\nindex e386cb784..21f80323d 100644\n--- a/drivers/event/cnxk/cn9k_eventdev.c\n+++ b/drivers/event/cnxk/cn9k_eventdev.c\n@@ -248,6 +248,66 @@ cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)\n \treturn roc_sso_rsrc_init(&dev->sso, hws, hwgrp);\n }\n\n+static int\n+cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)\n+{\n+\tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n+\tint i;\n+\n+\tif (dev->tx_adptr_data == NULL)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < dev->nb_event_ports; i++) {\n+\t\tif (dev->dual_ws) {\n+\t\t\tstruct cn9k_sso_hws_dual *dws =\n+\t\t\t\tevent_dev->data->ports[i];\n+\t\t\tvoid *ws_cookie;\n+\n+\t\t\tws_cookie = cnxk_sso_hws_get_cookie(dws);\n+\t\t\tws_cookie = rte_realloc_socket(\n+\t\t\t\tws_cookie,\n+\t\t\t\tsizeof(struct cnxk_sso_hws_cookie) +\n+\t\t\t\t\tsizeof(struct cn9k_sso_hws_dual) +\n+\t\t\t\t\t(sizeof(uint64_t) *\n+\t\t\t\t\t (dev->max_port_id + 1) *\n+\t\t\t\t\t RTE_MAX_QUEUES_PER_PORT),\n+\t\t\t\tRTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\t\t\tif (ws_cookie == NULL)\n+\t\t\t\treturn -ENOMEM;\n+\t\t\tdws = RTE_PTR_ADD(ws_cookie,\n+\t\t\t\t\t  sizeof(struct cnxk_sso_hws_cookie));\n+\t\t\tmemcpy(&dws->tx_adptr_data, dev->tx_adptr_data,\n+\t\t\t       sizeof(uint64_t) * (dev->max_port_id + 1) *\n+\t\t\t\t       RTE_MAX_QUEUES_PER_PORT);\n+\t\t\tevent_dev->data->ports[i] = dws;\n+\t\t} else {\n+\t\t\tstruct cn9k_sso_hws *ws = event_dev->data->ports[i];\n+\t\t\tvoid *ws_cookie;\n+\n+\t\t\tws_cookie = cnxk_sso_hws_get_cookie(ws);\n+\t\t\tws_cookie = rte_realloc_socket(\n+\t\t\t\tws_cookie,\n+\t\t\t\tsizeof(struct cnxk_sso_hws_cookie) +\n+\t\t\t\t\tsizeof(struct cn9k_sso_hws_dual) +\n+\t\t\t\t\t(sizeof(uint64_t) *\n+\t\t\t\t\t (dev->max_port_id + 1) *\n+\t\t\t\t\t RTE_MAX_QUEUES_PER_PORT),\n+\t\t\t\tRTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\t\t\tif (ws_cookie == NULL)\n+\t\t\t\treturn -ENOMEM;\n+\t\t\tws = RTE_PTR_ADD(ws_cookie,\n+\t\t\t\t\t sizeof(struct cnxk_sso_hws_cookie));\n+\t\t\tmemcpy(&ws->tx_adptr_data, dev->tx_adptr_data,\n+\t\t\t       sizeof(uint64_t) * (dev->max_port_id + 1) *\n+\t\t\t\t       RTE_MAX_QUEUES_PER_PORT);\n+\t\t\tevent_dev->data->ports[i] = ws;\n+\t\t}\n+\t}\n+\trte_mb();\n+\n+\treturn 0;\n+}\n+\n static void\n cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)\n {\n@@ -734,6 +794,10 @@ cn9k_sso_start(struct rte_eventdev *event_dev)\n {\n \tint rc;\n\n+\trc = cn9k_sso_updt_tx_adptr_data(event_dev);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\n \trc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,\n \t\t\t    cn9k_sso_hws_flush_events);\n \tif (rc < 0)\n@@ -844,6 +908,86 @@ cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,\n \treturn cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);\n }\n\n+static int\n+cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,\n+\t\t\t     const struct rte_eth_dev *eth_dev, uint32_t *caps)\n+{\n+\tint ret;\n+\n+\tRTE_SET_USED(dev);\n+\tret = strncmp(eth_dev->device->driver->name, \"net_cn9k\", 8);\n+\tif (ret)\n+\t\t*caps = 0;\n+\telse\n+\t\t*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;\n+\n+\treturn 0;\n+}\n+\n+static void\n+cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,\n+\t\t       bool ena)\n+{\n+\tstruct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;\n+\tstruct cn9k_eth_txq *txq;\n+\tstruct roc_nix_sq *sq;\n+\tint i;\n+\n+\tif (tx_queue_id < 0) {\n+\t\tfor (i = 0; i < eth_dev->data->nb_tx_queues; i++)\n+\t\t\tcn9k_sso_txq_fc_update(eth_dev, i, ena);\n+\t} else {\n+\t\tuint16_t sq_limit;\n+\n+\t\tsq = &cnxk_eth_dev->sqs[tx_queue_id];\n+\t\ttxq = eth_dev->data->tx_queues[tx_queue_id];\n+\t\tsq_limit =\n+\t\t\tena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :\n+\t\t\t\t    sq->nb_sqb_bufs;\n+\t\ttxq->nb_sqb_bufs_adj =\n+\t\t\tsq_limit -\n+\t\t\tRTE_ALIGN_MUL_CEIL(sq_limit,\n+\t\t\t\t\t   (1ULL << txq->sqes_per_sqb_log2)) /\n+\t\t\t\t(1ULL << txq->sqes_per_sqb_log2);\n+\t\ttxq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;\n+\t}\n+}\n+\n+static int\n+cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,\n+\t\t\t      const struct rte_eth_dev *eth_dev,\n+\t\t\t      int32_t tx_queue_id)\n+{\n+\tint rc;\n+\n+\tRTE_SET_USED(id);\n+\trc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\tcn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);\n+\trc = cn9k_sso_updt_tx_adptr_data(event_dev);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\tcn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);\n+\n+\treturn 0;\n+}\n+\n+static int\n+cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,\n+\t\t\t      const struct rte_eth_dev *eth_dev,\n+\t\t\t      int32_t tx_queue_id)\n+{\n+\tint rc;\n+\n+\tRTE_SET_USED(id);\n+\trc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\tcn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);\n+\treturn cn9k_sso_updt_tx_adptr_data(event_dev);\n+}\n+\n static struct rte_eventdev_ops cn9k_sso_dev_ops = {\n \t.dev_infos_get = cn9k_sso_info_get,\n \t.dev_configure = cn9k_sso_dev_configure,\n@@ -863,6 +1007,10 @@ static struct rte_eventdev_ops cn9k_sso_dev_ops = {\n \t.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,\n \t.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,\n\n+\t.eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,\n+\t.eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,\n+\t.eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,\n+\n \t.timer_adapter_caps_get = cnxk_tim_caps_get,\n\n \t.dump = cnxk_sso_dump,\ndiff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h\nindex 9d5d2d033..24e1be6a9 100644\n--- a/drivers/event/cnxk/cnxk_eventdev.h\n+++ b/drivers/event/cnxk/cnxk_eventdev.h\n@@ -8,6 +8,7 @@\n #include <rte_devargs.h>\n #include <rte_ethdev.h>\n #include <rte_event_eth_rx_adapter.h>\n+#include <rte_event_eth_tx_adapter.h>\n #include <rte_kvargs.h>\n #include <rte_mbuf_pool_ops.h>\n #include <rte_pci.h>\n@@ -34,6 +35,7 @@\n #define CNXK_SSO_XAQ_CACHE_CNT (0x7)\n #define CNXK_SSO_XAQ_SLACK     (8)\n #define CNXK_SSO_WQE_SG_PTR    (9)\n+#define CNXK_SSO_SQB_LIMIT     (0x180)\n\n #define CNXK_TT_FROM_TAG(x)\t    (((x) >> 32) & SSO_TT_EMPTY)\n #define CNXK_TT_FROM_EVENT(x)\t    (((x) >> 38) & SSO_TT_EMPTY)\n@@ -86,9 +88,12 @@ struct cnxk_sso_evdev {\n \trte_iova_t fc_iova;\n \tstruct rte_mempool *xaq_pool;\n \tuint64_t rx_offloads;\n+\tuint64_t tx_offloads;\n \tuint64_t adptr_xae_cnt;\n \tuint16_t rx_adptr_pool_cnt;\n \tuint64_t *rx_adptr_pools;\n+\tuint64_t *tx_adptr_data;\n+\tuint16_t max_port_id;\n \tuint16_t tim_adptr_ring_cnt;\n \tuint16_t *timer_adptr_rings;\n \tuint64_t *timer_adptr_sz;\n@@ -115,7 +120,10 @@ struct cn10k_sso_hws {\n \tuint64_t xaq_lmt __rte_cache_aligned;\n \tuint64_t *fc_mem;\n \tuintptr_t grps_base[CNXK_SSO_MAX_HWGRP];\n+\t/* Tx Fastpath data */\n+\tuint64_t tx_base __rte_cache_aligned;\n \tuintptr_t lmt_base;\n+\tuint8_t tx_adptr_data[];\n } __rte_cache_aligned;\n\n /* CN9K HWS ops */\n@@ -140,7 +148,9 @@ struct cn9k_sso_hws {\n \tuint64_t xaq_lmt __rte_cache_aligned;\n \tuint64_t *fc_mem;\n \tuintptr_t grps_base[CNXK_SSO_MAX_HWGRP];\n-\tuint64_t base;\n+\t/* Tx Fastpath data */\n+\tuint64_t base __rte_cache_aligned;\n+\tuint8_t tx_adptr_data[];\n } __rte_cache_aligned;\n\n struct cn9k_sso_hws_state {\n@@ -160,7 +170,9 @@ struct cn9k_sso_hws_dual {\n \tuint64_t xaq_lmt __rte_cache_aligned;\n \tuint64_t *fc_mem;\n \tuintptr_t grps_base[CNXK_SSO_MAX_HWGRP];\n-\tuint64_t base[2];\n+\t/* Tx Fastpath data */\n+\tuint64_t base[2] __rte_cache_aligned;\n+\tuint8_t tx_adptr_data[];\n } __rte_cache_aligned;\n\n struct cnxk_sso_hws_cookie {\n@@ -267,5 +279,11 @@ int cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,\n \t\t\t      const struct rte_eth_dev *eth_dev);\n int cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,\n \t\t\t     const struct rte_eth_dev *eth_dev);\n+int cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,\n+\t\t\t\t  const struct rte_eth_dev *eth_dev,\n+\t\t\t\t  int32_t tx_queue_id);\n+int cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,\n+\t\t\t\t  const struct rte_eth_dev *eth_dev,\n+\t\t\t\t  int32_t tx_queue_id);\n\n #endif /* __CNXK_EVENTDEV_H__ */\ndiff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c\nindex 3b7ecb375..502da272d 100644\n--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c\n+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c\n@@ -223,3 +223,91 @@ cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,\n\n \treturn 0;\n }\n+\n+static int\n+cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)\n+{\n+\treturn roc_npa_aura_limit_modify(\n+\t\tsq->aura_handle, RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));\n+}\n+\n+static int\n+cnxk_sso_updt_tx_queue_data(const struct rte_eventdev *event_dev,\n+\t\t\t    uint16_t eth_port_id, uint16_t tx_queue_id,\n+\t\t\t    void *txq)\n+{\n+\tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n+\tuint16_t max_port_id = dev->max_port_id;\n+\tuint64_t *txq_data = dev->tx_adptr_data;\n+\n+\tif (txq_data == NULL || eth_port_id > max_port_id) {\n+\t\tmax_port_id = RTE_MAX(max_port_id, eth_port_id);\n+\t\ttxq_data = rte_realloc_socket(\n+\t\t\ttxq_data,\n+\t\t\t(sizeof(uint64_t) * (max_port_id + 1) *\n+\t\t\t RTE_MAX_QUEUES_PER_PORT),\n+\t\t\tRTE_CACHE_LINE_SIZE, event_dev->data->socket_id);\n+\t\tif (txq_data == NULL)\n+\t\t\treturn -ENOMEM;\n+\t}\n+\n+\t((uint64_t(*)[RTE_MAX_QUEUES_PER_PORT])\n+\t\t txq_data)[eth_port_id][tx_queue_id] = (uint64_t)txq;\n+\tdev->max_port_id = max_port_id;\n+\tdev->tx_adptr_data = txq_data;\n+\treturn 0;\n+}\n+\n+int\n+cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,\n+\t\t\t      const struct rte_eth_dev *eth_dev,\n+\t\t\t      int32_t tx_queue_id)\n+{\n+\tstruct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;\n+\tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n+\tstruct roc_nix_sq *sq;\n+\tint i, ret;\n+\tvoid *txq;\n+\n+\tif (tx_queue_id < 0) {\n+\t\tfor (i = 0; i < eth_dev->data->nb_tx_queues; i++)\n+\t\t\tcnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, i);\n+\t} else {\n+\t\ttxq = eth_dev->data->tx_queues[tx_queue_id];\n+\t\tsq = &cnxk_eth_dev->sqs[tx_queue_id];\n+\t\tcnxk_sso_sqb_aura_limit_edit(sq, CNXK_SSO_SQB_LIMIT);\n+\t\tret = cnxk_sso_updt_tx_queue_data(\n+\t\t\tevent_dev, eth_dev->data->port_id, tx_queue_id, txq);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\n+\t\tdev->tx_offloads |= cnxk_eth_dev->tx_offload_flags;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,\n+\t\t\t      const struct rte_eth_dev *eth_dev,\n+\t\t\t      int32_t tx_queue_id)\n+{\n+\tstruct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;\n+\tstruct roc_nix_sq *sq;\n+\tint i, ret;\n+\n+\tRTE_SET_USED(event_dev);\n+\tif (tx_queue_id < 0) {\n+\t\tfor (i = 0; i < eth_dev->data->nb_tx_queues; i++)\n+\t\t\tcnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, i);\n+\t} else {\n+\t\tsq = &cnxk_eth_dev->sqs[tx_queue_id];\n+\t\tcnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);\n+\t\tret = cnxk_sso_updt_tx_queue_data(\n+\t\t\tevent_dev, eth_dev->data->port_id, tx_queue_id, NULL);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n",
    "prefixes": [
        "v7",
        "3/7"
    ]
}