get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/94564/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 94564,
    "url": "https://patches.dpdk.org/api/patches/94564/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210620202906.10974-7-pbhagavatula@marvell.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210620202906.10974-7-pbhagavatula@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210620202906.10974-7-pbhagavatula@marvell.com",
    "date": "2021-06-20T20:29:00",
    "name": "[v3,07/13] event/cnxk: add Rx adapter support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "cf44f4a2ef2ec2d79d19f7ad385bc08cd160bcf9",
    "submitter": {
        "id": 1183,
        "url": "https://patches.dpdk.org/api/people/1183/?format=api",
        "name": "Pavan Nikhilesh Bhagavatula",
        "email": "pbhagavatula@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "https://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210620202906.10974-7-pbhagavatula@marvell.com/mbox/",
    "series": [
        {
            "id": 17410,
            "url": "https://patches.dpdk.org/api/series/17410/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17410",
            "date": "2021-06-20T20:28:54",
            "name": "[v3,01/13] net/cnxk: add multi seg Rx vector routine",
            "version": 3,
            "mbox": "https://patches.dpdk.org/series/17410/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/94564/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/94564/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 1D65DA0547;\n\tSun, 20 Jun 2021 22:30:14 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id BA6E941142;\n\tSun, 20 Jun 2021 22:29:43 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id 1AE334003F\n for <dev@dpdk.org>; Sun, 20 Jun 2021 22:29:41 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id\n 15KKQB98010118; Sun, 20 Jun 2021 13:29:38 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0a-0016f401.pphosted.com with ESMTP id 399dxrmgs2-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);\n Sun, 20 Jun 2021 13:29:38 -0700",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18;\n Sun, 20 Jun 2021 13:29:36 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend\n Transport; Sun, 20 Jun 2021 13:29:36 -0700",
            "from BG-LT7430.marvell.com (BG-LT7430.marvell.com [10.28.177.176])\n by maili.marvell.com (Postfix) with ESMTP id 4F57D3F7066;\n Sun, 20 Jun 2021 13:29:33 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=Z3ghKRN+06GN3xumapqnswVO3Cv/4AyCAWDq9/sNRu0=;\n b=J0DBcffluERqCgbwjLNB2FU/YuBJE5GVebSMowQsh8epyaOllTK+XY6HXri5TZgYftYZ\n h+ugv817arljcyXzcAgbf+HinWIsPt8uHppNliDZ+CFFQl9BqRowhwJq7xpEVwIJ3ahw\n Ghyn4kjHIOss+NxiFJf4YpRRBnqAqxCeI71Np9kyk7FVyRq/f7SjZJN3TrEoD11EQYWN\n eKBryrsCiq5e/TM6+YfCzQhYrrvN6HY4Q5g+XH9Wumq8j0iEAFw6IP83AhpbTD7cji4n\n 7EI1GbM+fFEFi439gAVuQI19437DRQdXSY6ESTna2C6LPIvbCLrfavBCwBq3jWZ6nTKn Ew==",
        "From": "<pbhagavatula@marvell.com>",
        "To": "<jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>, \"Shijith\n Thotton\" <sthotton@marvell.com>,\n Nithin Dabilpuram <ndabilpuram@marvell.com>,\n Kiran Kumar K <kirankumark@marvell.com>, Sunil Kumar Kori\n <skori@marvell.com>, Satha Rao <skoteshwar@marvell.com>, Ray Kinsella\n <mdr@ashroe.eu>, Neil Horman <nhorman@tuxdriver.com>",
        "CC": "<dev@dpdk.org>",
        "Date": "Mon, 21 Jun 2021 01:59:00 +0530",
        "Message-ID": "<20210620202906.10974-7-pbhagavatula@marvell.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20210620202906.10974-1-pbhagavatula@marvell.com>",
        "References": "<20210619110154.10301-1-pbhagavatula@marvell.com>\n <20210620202906.10974-1-pbhagavatula@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "hGZlUt4to_uykPqjPmc0j0toqkjCJ12x",
        "X-Proofpoint-GUID": "hGZlUt4to_uykPqjPmc0j0toqkjCJ12x",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.391, 18.0.790\n definitions=2021-06-20_14:2021-06-20,\n 2021-06-20 signatures=0",
        "Subject": "[dpdk-dev] [PATCH v3 07/13] event/cnxk: add Rx adapter support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Pavan Nikhilesh <pbhagavatula@marvell.com>\n\nAdd support for event eth Rx adapter.\nResize cn10k workslot fastpath structure to fit in 64B cacheline size.\n\nSigned-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>\n---\n doc/guides/eventdevs/cnxk.rst            |  28 ++++\n doc/guides/rel_notes/release_21_08.rst   |   5 +\n drivers/common/cnxk/roc_nix.h            |   3 +\n drivers/common/cnxk/roc_nix_fc.c         |  78 ++++++++++\n drivers/common/cnxk/roc_nix_priv.h       |   3 +-\n drivers/common/cnxk/version.map          |   1 +\n drivers/event/cnxk/cn10k_eventdev.c      | 107 +++++++++++---\n drivers/event/cnxk/cn10k_worker.c        |   7 +-\n drivers/event/cnxk/cn10k_worker.h        |  32 +++--\n drivers/event/cnxk/cn9k_eventdev.c       |  89 ++++++++++++\n drivers/event/cnxk/cn9k_worker.h         |   4 +\n drivers/event/cnxk/cnxk_eventdev.c       |   2 +\n drivers/event/cnxk/cnxk_eventdev.h       |  43 ++++--\n drivers/event/cnxk/cnxk_eventdev_adptr.c | 176 +++++++++++++++++++++++\n drivers/event/cnxk/meson.build           |   9 +-\n 15 files changed, 540 insertions(+), 47 deletions(-)",
    "diff": "diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst\nindex 36da3800cc..b7e82c1273 100644\n--- a/doc/guides/eventdevs/cnxk.rst\n+++ b/doc/guides/eventdevs/cnxk.rst\n@@ -39,6 +39,10 @@ Features of the OCTEON cnxk SSO PMD are:\n   time granularity of 2.5us on CN9K and 1us on CN10K.\n - Up to 256 TIM rings a.k.a event timer adapters.\n - Up to 8 rings traversed in parallel.\n+- HW managed packets enqueued from ethdev to eventdev exposed through event eth\n+  RX adapter.\n+- N:1 ethernet device Rx queue to Event queue mapping.\n+- Full Rx offload support defined through ethdev queue configuration.\n \n Prerequisites and Compilation procedure\n ---------------------------------------\n@@ -93,6 +97,15 @@ Runtime Config Options\n \n     -a 0002:0e:00.0,qos=[1-50-50-50]\n \n+- ``Force Rx Back pressure``\n+\n+   Force Rx back pressure when same mempool is used across ethernet device\n+   connected to event device.\n+\n+   For example::\n+\n+      -a 0002:0e:00.0,force_rx_bp=1\n+\n - ``TIM disable NPA``\n \n   By default chunks are allocated from NPA then TIM can automatically free\n@@ -160,3 +173,18 @@ Debugging Options\n    +---+------------+-------------------------------------------------------+\n    | 2 | TIM        | --log-level='pmd\\.event\\.cnxk\\.timer,8'               |\n    +---+------------+-------------------------------------------------------+\n+\n+Limitations\n+-----------\n+\n+Rx adapter support\n+~~~~~~~~~~~~~~~~~~\n+\n+Using the same mempool for all the ethernet device ports connected to\n+event device would cause back pressure to be asserted only on the first\n+ethernet device.\n+Back pressure is automatically disabled when using same mempool for all the\n+ethernet devices connected to event device to override this applications can\n+use `force_rx_bp=1` device arguments.\n+Using unique mempool per each ethernet device is recommended when they are\n+connected to event device.\ndiff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst\nindex 31e49e1a56..3892c8017a 100644\n--- a/doc/guides/rel_notes/release_21_08.rst\n+++ b/doc/guides/rel_notes/release_21_08.rst\n@@ -60,6 +60,11 @@ New Features\n   * Added net/cnxk driver which provides the support for the integrated ethernet\n     device.\n \n+* **Added support for Marvell CN10K, CN9K, event Rx adapter.**\n+\n+  * Added Rx adapter support for event/cnxk when the ethernet device requested is\n+    net/cnxk.\n+\n \n Removed Items\n -------------\ndiff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h\nindex bb69027956..76613fe84e 100644\n--- a/drivers/common/cnxk/roc_nix.h\n+++ b/drivers/common/cnxk/roc_nix.h\n@@ -514,6 +514,9 @@ int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,\n \n enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);\n \n+void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,\n+\t\t\t\t     uint8_t ena, uint8_t force);\n+\n /* NPC */\n int __roc_api roc_nix_npc_promisc_ena_dis(struct roc_nix *roc_nix, int enable);\n \ndiff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c\nindex 47be8aa3f8..f17eba4169 100644\n--- a/drivers/common/cnxk/roc_nix_fc.c\n+++ b/drivers/common/cnxk/roc_nix_fc.c\n@@ -249,3 +249,81 @@ roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)\n exit:\n \treturn rc;\n }\n+\n+void\n+rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,\n+\t\t      uint8_t force)\n+{\n+\tstruct nix *nix = roc_nix_to_nix_priv(roc_nix);\n+\tstruct npa_lf *lf = idev_npa_obj_get();\n+\tstruct npa_aq_enq_req *req;\n+\tstruct npa_aq_enq_rsp *rsp;\n+\tstruct mbox *mbox;\n+\tuint32_t limit;\n+\tint rc;\n+\n+\tif (roc_nix_is_sdp(roc_nix))\n+\t\treturn;\n+\n+\tif (!lf)\n+\t\treturn;\n+\tmbox = lf->mbox;\n+\n+\treq = mbox_alloc_msg_npa_aq_enq(mbox);\n+\tif (req == NULL)\n+\t\treturn;\n+\n+\treq->aura_id = roc_npa_aura_handle_to_aura(pool_id);\n+\treq->ctype = NPA_AQ_CTYPE_AURA;\n+\treq->op = NPA_AQ_INSTOP_READ;\n+\n+\trc = mbox_process_msg(mbox, (void *)&rsp);\n+\tif (rc)\n+\t\treturn;\n+\n+\tlimit = rsp->aura.limit;\n+\t/* BP is already enabled. */\n+\tif (rsp->aura.bp_ena) {\n+\t\t/* If BP ids don't match disable BP. */\n+\t\tif ((rsp->aura.nix0_bpid != nix->bpid[0]) && !force) {\n+\t\t\treq = mbox_alloc_msg_npa_aq_enq(mbox);\n+\t\t\tif (req == NULL)\n+\t\t\t\treturn;\n+\n+\t\t\treq->aura_id = roc_npa_aura_handle_to_aura(pool_id);\n+\t\t\treq->ctype = NPA_AQ_CTYPE_AURA;\n+\t\t\treq->op = NPA_AQ_INSTOP_WRITE;\n+\n+\t\t\treq->aura.bp_ena = 0;\n+\t\t\treq->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);\n+\n+\t\t\tmbox_process(mbox);\n+\t\t}\n+\t\treturn;\n+\t}\n+\n+\t/* BP was previously enabled but now disabled skip. */\n+\tif (rsp->aura.bp)\n+\t\treturn;\n+\n+\treq = mbox_alloc_msg_npa_aq_enq(mbox);\n+\tif (req == NULL)\n+\t\treturn;\n+\n+\treq->aura_id = roc_npa_aura_handle_to_aura(pool_id);\n+\treq->ctype = NPA_AQ_CTYPE_AURA;\n+\treq->op = NPA_AQ_INSTOP_WRITE;\n+\n+\tif (ena) {\n+\t\treq->aura.nix0_bpid = nix->bpid[0];\n+\t\treq->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);\n+\t\treq->aura.bp = NIX_RQ_AURA_THRESH(\n+\t\t\tlimit > 128 ? 256 : limit); /* 95% of size*/\n+\t\treq->aura_mask.bp = ~(req->aura_mask.bp);\n+\t}\n+\n+\treq->aura.bp_ena = !!ena;\n+\treq->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);\n+\n+\tmbox_process(mbox);\n+}\ndiff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h\nindex d9c32df442..9dc0c88a6f 100644\n--- a/drivers/common/cnxk/roc_nix_priv.h\n+++ b/drivers/common/cnxk/roc_nix_priv.h\n@@ -16,7 +16,8 @@\n #define NIX_SQB_LOWER_THRESH ((uint16_t)70)\n \n /* Apply BP/DROP when CQ is 95% full */\n-#define NIX_CQ_THRESH_LEVEL (5 * 256 / 100)\n+#define NIX_CQ_THRESH_LEVEL\t(5 * 256 / 100)\n+#define NIX_RQ_AURA_THRESH(x)\t(((x) * 95) / 100)\n \n /* IRQ triggered when NIX_LF_CINTX_CNT[QCOUNT] crosses this value */\n #define CQ_CQE_THRESH_DEFAULT\t0x1ULL\ndiff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map\nindex 8a5c839e57..cb1ce4b6fc 100644\n--- a/drivers/common/cnxk/version.map\n+++ b/drivers/common/cnxk/version.map\n@@ -29,6 +29,7 @@ INTERNAL {\n \troc_nix_fc_config_set;\n \troc_nix_fc_mode_set;\n \troc_nix_fc_mode_get;\n+\trox_nix_fc_npa_bp_cfg;\n \troc_nix_get_base_chan;\n \troc_nix_get_pf;\n \troc_nix_get_pf_func;\ndiff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c\nindex bf4052c76c..2060c8fe84 100644\n--- a/drivers/event/cnxk/cn10k_eventdev.c\n+++ b/drivers/event/cnxk/cn10k_eventdev.c\n@@ -6,18 +6,6 @@\n #include \"cnxk_eventdev.h\"\n #include \"cnxk_worker.h\"\n \n-static void\n-cn10k_init_hws_ops(struct cn10k_sso_hws *ws, uintptr_t base)\n-{\n-\tws->tag_wqe_op = base + SSOW_LF_GWS_WQE0;\n-\tws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;\n-\tws->updt_wqe_op = base + SSOW_LF_GWS_OP_UPD_WQP_GRP1;\n-\tws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;\n-\tws->swtag_untag_op = base + SSOW_LF_GWS_OP_SWTAG_UNTAG;\n-\tws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;\n-\tws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;\n-}\n-\n static uint32_t\n cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)\n {\n@@ -56,7 +44,6 @@ cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)\n \t/* First cache line is reserved for cookie */\n \tws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);\n \tws->base = roc_sso_hws_base_get(&dev->sso, port_id);\n-\tcn10k_init_hws_ops(ws, ws->base);\n \tws->hws_id = port_id;\n \tws->swtag_req = 0;\n \tws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);\n@@ -135,13 +122,14 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,\n \tcq_ds_cnt &= 0x3FFF3FFF0000;\n \n \twhile (aq_cnt || cq_ds_cnt || ds_cnt) {\n-\t\tplt_write64(req, ws->getwrk_op);\n+\t\tplt_write64(req, ws->base + SSOW_LF_GWS_OP_GET_WORK0);\n \t\tcn10k_sso_hws_get_work_empty(ws, &ev);\n \t\tif (fn != NULL && ev.u64 != 0)\n \t\t\tfn(arg, ev);\n \t\tif (ev.sched_type != SSO_TT_EMPTY)\n-\t\t\tcnxk_sso_hws_swtag_flush(ws->tag_wqe_op,\n-\t\t\t\t\t\t ws->swtag_flush_op);\n+\t\t\tcnxk_sso_hws_swtag_flush(\n+\t\t\t\tws->base + SSOW_LF_GWS_WQE0,\n+\t\t\t\tws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);\n \t\tdo {\n \t\t\tval = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);\n \t\t} while (val & BIT_ULL(56));\n@@ -205,9 +193,11 @@ cn10k_sso_hws_reset(void *arg, void *hws)\n \n \tif (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_PRF_WQE0)) !=\n \t    SSO_TT_EMPTY) {\n-\t\tplt_write64(BIT_ULL(16) | 1, ws->getwrk_op);\n+\t\tplt_write64(BIT_ULL(16) | 1,\n+\t\t\t    ws->base + SSOW_LF_GWS_OP_GET_WORK0);\n \t\tdo {\n-\t\t\troc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op);\n+\t\t\troc_load_pair(gw.u64[0], gw.u64[1],\n+\t\t\t\t      ws->base + SSOW_LF_GWS_WQE0);\n \t\t} while (gw.u64[0] & BIT_ULL(63));\n \t\tpend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));\n \t\tif (pend_tt != SSO_TT_EMPTY) { /* Work was pending */\n@@ -407,6 +397,80 @@ cn10k_sso_selftest(void)\n \treturn cnxk_sso_selftest(RTE_STR(event_cn10k));\n }\n \n+static int\n+cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,\n+\t\t\t      const struct rte_eth_dev *eth_dev, uint32_t *caps)\n+{\n+\tint rc;\n+\n+\tRTE_SET_USED(event_dev);\n+\trc = strncmp(eth_dev->device->driver->name, \"net_cn10k\", 9);\n+\tif (rc)\n+\t\t*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;\n+\telse\n+\t\t*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |\n+\t\t\tRTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |\n+\t\t\tRTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;\n+\n+\treturn 0;\n+}\n+\n+static void\n+cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,\n+\t\t       void *tstmp_info)\n+{\n+\tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n+\tint i;\n+\n+\tfor (i = 0; i < dev->nb_event_ports; i++) {\n+\t\tstruct cn10k_sso_hws *ws = event_dev->data->ports[i];\n+\t\tws->lookup_mem = lookup_mem;\n+\t\tws->tstamp = tstmp_info;\n+\t}\n+}\n+\n+static int\n+cn10k_sso_rx_adapter_queue_add(\n+\tconst struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,\n+\tint32_t rx_queue_id,\n+\tconst struct rte_event_eth_rx_adapter_queue_conf *queue_conf)\n+{\n+\tstruct cn10k_eth_rxq *rxq;\n+\tvoid *lookup_mem;\n+\tvoid *tstmp_info;\n+\tint rc;\n+\n+\trc = strncmp(eth_dev->device->driver->name, \"net_cn10k\", 8);\n+\tif (rc)\n+\t\treturn -EINVAL;\n+\n+\trc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,\n+\t\t\t\t\t   queue_conf);\n+\tif (rc)\n+\t\treturn -EINVAL;\n+\trxq = eth_dev->data->rx_queues[0];\n+\tlookup_mem = rxq->lookup_mem;\n+\ttstmp_info = rxq->tstamp;\n+\tcn10k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);\n+\tcn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);\n+\n+\treturn 0;\n+}\n+\n+static int\n+cn10k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,\n+\t\t\t       const struct rte_eth_dev *eth_dev,\n+\t\t\t       int32_t rx_queue_id)\n+{\n+\tint rc;\n+\n+\trc = strncmp(eth_dev->device->driver->name, \"net_cn10k\", 8);\n+\tif (rc)\n+\t\treturn -EINVAL;\n+\n+\treturn cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);\n+}\n+\n static struct rte_eventdev_ops cn10k_sso_dev_ops = {\n \t.dev_infos_get = cn10k_sso_info_get,\n \t.dev_configure = cn10k_sso_dev_configure,\n@@ -420,6 +484,12 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {\n \t.port_unlink = cn10k_sso_port_unlink,\n \t.timeout_ticks = cnxk_sso_timeout_ticks,\n \n+\t.eth_rx_adapter_caps_get = cn10k_sso_rx_adapter_caps_get,\n+\t.eth_rx_adapter_queue_add = cn10k_sso_rx_adapter_queue_add,\n+\t.eth_rx_adapter_queue_del = cn10k_sso_rx_adapter_queue_del,\n+\t.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,\n+\t.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,\n+\n \t.timer_adapter_caps_get = cnxk_tim_caps_get,\n \n \t.dump = cnxk_sso_dump,\n@@ -502,6 +572,7 @@ RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);\n RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, \"vfio-pci\");\n RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT \"=<int>\"\n \t\t\t      CNXK_SSO_GGRP_QOS \"=<string>\"\n+\t\t\t      CNXK_SSO_FORCE_BP \"=1\"\n \t\t\t      CN10K_SSO_GW_MODE \"=<int>\"\n \t\t\t      CNXK_TIM_DISABLE_NPA \"=1\"\n \t\t\t      CNXK_TIM_CHNK_SLOTS \"=<int>\"\ndiff --git a/drivers/event/cnxk/cn10k_worker.c b/drivers/event/cnxk/cn10k_worker.c\nindex e2aa534c64..5dbae275ba 100644\n--- a/drivers/event/cnxk/cn10k_worker.c\n+++ b/drivers/event/cnxk/cn10k_worker.c\n@@ -18,7 +18,8 @@ cn10k_sso_hws_enq(void *port, const struct rte_event *ev)\n \t\tcn10k_sso_hws_forward_event(ws, ev);\n \t\tbreak;\n \tcase RTE_EVENT_OP_RELEASE:\n-\t\tcnxk_sso_hws_swtag_flush(ws->tag_wqe_op, ws->swtag_flush_op);\n+\t\tcnxk_sso_hws_swtag_flush(ws->base + SSOW_LF_GWS_WQE0,\n+\t\t\t\t\t ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);\n \t\tbreak;\n \tdefault:\n \t\treturn 0;\n@@ -69,7 +70,7 @@ cn10k_sso_hws_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)\n \n \tif (ws->swtag_req) {\n \t\tws->swtag_req = 0;\n-\t\tcnxk_sso_hws_swtag_wait(ws->tag_wqe_op);\n+\t\tcnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0);\n \t\treturn 1;\n \t}\n \n@@ -94,7 +95,7 @@ cn10k_sso_hws_tmo_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)\n \n \tif (ws->swtag_req) {\n \t\tws->swtag_req = 0;\n-\t\tcnxk_sso_hws_swtag_wait(ws->tag_wqe_op);\n+\t\tcnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0);\n \t\treturn ret;\n \t}\n \ndiff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h\nindex 2f093a8dd5..c7250bf9e7 100644\n--- a/drivers/event/cnxk/cn10k_worker.h\n+++ b/drivers/event/cnxk/cn10k_worker.h\n@@ -5,9 +5,13 @@\n #ifndef __CN10K_WORKER_H__\n #define __CN10K_WORKER_H__\n \n+#include \"cnxk_ethdev.h\"\n #include \"cnxk_eventdev.h\"\n #include \"cnxk_worker.h\"\n \n+#include \"cn10k_ethdev.h\"\n+#include \"cn10k_rx.h\"\n+\n /* SSO Operations */\n \n static __rte_always_inline uint8_t\n@@ -31,7 +35,8 @@ cn10k_sso_hws_fwd_swtag(struct cn10k_sso_hws *ws, const struct rte_event *ev)\n {\n \tconst uint32_t tag = (uint32_t)ev->event;\n \tconst uint8_t new_tt = ev->sched_type;\n-\tconst uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(ws->tag_wqe_op));\n+\tconst uint8_t cur_tt =\n+\t\tCNXK_TT_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_WQE0));\n \n \t/* CNXK model\n \t * cur_tt/new_tt     SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED\n@@ -43,9 +48,11 @@ cn10k_sso_hws_fwd_swtag(struct cn10k_sso_hws *ws, const struct rte_event *ev)\n \n \tif (new_tt == SSO_TT_UNTAGGED) {\n \t\tif (cur_tt != SSO_TT_UNTAGGED)\n-\t\t\tcnxk_sso_hws_swtag_untag(ws->swtag_untag_op);\n+\t\t\tcnxk_sso_hws_swtag_untag(ws->base +\n+\t\t\t\t\t\t SSOW_LF_GWS_OP_SWTAG_UNTAG);\n \t} else {\n-\t\tcnxk_sso_hws_swtag_norm(tag, new_tt, ws->swtag_norm_op);\n+\t\tcnxk_sso_hws_swtag_norm(tag, new_tt,\n+\t\t\t\t\tws->base + SSOW_LF_GWS_OP_SWTAG_NORM);\n \t}\n \tws->swtag_req = 1;\n }\n@@ -57,8 +64,9 @@ cn10k_sso_hws_fwd_group(struct cn10k_sso_hws *ws, const struct rte_event *ev,\n \tconst uint32_t tag = (uint32_t)ev->event;\n \tconst uint8_t new_tt = ev->sched_type;\n \n-\tplt_write64(ev->u64, ws->updt_wqe_op);\n-\tcnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op);\n+\tplt_write64(ev->u64, ws->base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);\n+\tcnxk_sso_hws_swtag_desched(tag, new_tt, grp,\n+\t\t\t\t   ws->base + SSOW_LF_GWS_OP_SWTAG_DESCHED);\n }\n \n static __rte_always_inline void\n@@ -68,7 +76,7 @@ cn10k_sso_hws_forward_event(struct cn10k_sso_hws *ws,\n \tconst uint8_t grp = ev->queue_id;\n \n \t/* Group hasn't changed, Use SWTAG to forward the event */\n-\tif (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_wqe_op)) == grp)\n+\tif (CNXK_GRP_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_WQE0)) == grp)\n \t\tcn10k_sso_hws_fwd_swtag(ws, ev);\n \telse\n \t\t/*\n@@ -93,12 +101,13 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev)\n \t\tPLT_CPU_FEATURE_PREAMBLE\n \t\t\"caspl %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\\n\"\n \t\t: [wdata] \"+r\"(gw.get_work)\n-\t\t: [gw_loc] \"r\"(ws->getwrk_op)\n+\t\t: [gw_loc] \"r\"(ws->base + SSOW_LF_GWS_OP_GET_WORK0)\n \t\t: \"memory\");\n #else\n-\tplt_write64(gw.u64[0], ws->getwrk_op);\n+\tplt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0);\n \tdo {\n-\t\troc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op);\n+\t\troc_load_pair(gw.u64[0], gw.u64[1],\n+\t\t\t      ws->base + SSOW_LF_GWS_WQE0);\n \t} while (gw.u64[0] & BIT_ULL(63));\n #endif\n \tgw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |\n@@ -130,11 +139,12 @@ cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)\n \t\t     \"\t\ttbnz %[tag], 63, rty%=\t\t\t\\n\"\n \t\t     \"done%=:\tdmb ld\t\t\t\t\t\\n\"\n \t\t     : [tag] \"=&r\"(gw.u64[0]), [wqp] \"=&r\"(gw.u64[1])\n-\t\t     : [tag_loc] \"r\"(ws->tag_wqe_op)\n+\t\t     : [tag_loc] \"r\"(ws->base + SSOW_LF_GWS_WQE0)\n \t\t     : \"memory\");\n #else\n \tdo {\n-\t\troc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op);\n+\t\troc_load_pair(gw.u64[0], gw.u64[1],\n+\t\t\t      ws->base + SSOW_LF_GWS_WQE0);\n \t} while (gw.u64[0] & BIT_ULL(63));\n #endif\n \ndiff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c\nindex 0684417eab..072800c243 100644\n--- a/drivers/event/cnxk/cn9k_eventdev.c\n+++ b/drivers/event/cnxk/cn9k_eventdev.c\n@@ -481,6 +481,88 @@ cn9k_sso_selftest(void)\n \treturn cnxk_sso_selftest(RTE_STR(event_cn9k));\n }\n \n+static int\n+cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,\n+\t\t\t     const struct rte_eth_dev *eth_dev, uint32_t *caps)\n+{\n+\tint rc;\n+\n+\tRTE_SET_USED(event_dev);\n+\trc = strncmp(eth_dev->device->driver->name, \"net_cn9k\", 9);\n+\tif (rc)\n+\t\t*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;\n+\telse\n+\t\t*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |\n+\t\t\tRTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |\n+\t\t\tRTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;\n+\n+\treturn 0;\n+}\n+\n+static void\n+cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,\n+\t\t      void *tstmp_info)\n+{\n+\tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n+\tint i;\n+\n+\tfor (i = 0; i < dev->nb_event_ports; i++) {\n+\t\tif (dev->dual_ws) {\n+\t\t\tstruct cn9k_sso_hws_dual *dws =\n+\t\t\t\tevent_dev->data->ports[i];\n+\t\t\tdws->lookup_mem = lookup_mem;\n+\t\t\tdws->tstamp = tstmp_info;\n+\t\t} else {\n+\t\t\tstruct cn9k_sso_hws *ws = event_dev->data->ports[i];\n+\t\t\tws->lookup_mem = lookup_mem;\n+\t\t\tws->tstamp = tstmp_info;\n+\t\t}\n+\t}\n+}\n+\n+static int\n+cn9k_sso_rx_adapter_queue_add(\n+\tconst struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,\n+\tint32_t rx_queue_id,\n+\tconst struct rte_event_eth_rx_adapter_queue_conf *queue_conf)\n+{\n+\tstruct cn9k_eth_rxq *rxq;\n+\tvoid *lookup_mem;\n+\tvoid *tstmp_info;\n+\tint rc;\n+\n+\trc = strncmp(eth_dev->device->driver->name, \"net_cn9k\", 8);\n+\tif (rc)\n+\t\treturn -EINVAL;\n+\n+\trc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,\n+\t\t\t\t\t   queue_conf);\n+\tif (rc)\n+\t\treturn -EINVAL;\n+\n+\trxq = eth_dev->data->rx_queues[0];\n+\tlookup_mem = rxq->lookup_mem;\n+\ttstmp_info = rxq->tstamp;\n+\tcn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);\n+\tcn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);\n+\n+\treturn 0;\n+}\n+\n+static int\n+cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,\n+\t\t\t      const struct rte_eth_dev *eth_dev,\n+\t\t\t      int32_t rx_queue_id)\n+{\n+\tint rc;\n+\n+\trc = strncmp(eth_dev->device->driver->name, \"net_cn9k\", 8);\n+\tif (rc)\n+\t\treturn -EINVAL;\n+\n+\treturn cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);\n+}\n+\n static struct rte_eventdev_ops cn9k_sso_dev_ops = {\n \t.dev_infos_get = cn9k_sso_info_get,\n \t.dev_configure = cn9k_sso_dev_configure,\n@@ -494,6 +576,12 @@ static struct rte_eventdev_ops cn9k_sso_dev_ops = {\n \t.port_unlink = cn9k_sso_port_unlink,\n \t.timeout_ticks = cnxk_sso_timeout_ticks,\n \n+\t.eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,\n+\t.eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,\n+\t.eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,\n+\t.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,\n+\t.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,\n+\n \t.timer_adapter_caps_get = cnxk_tim_caps_get,\n \n \t.dump = cnxk_sso_dump,\n@@ -571,6 +659,7 @@ RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);\n RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, \"vfio-pci\");\n RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT \"=<int>\"\n \t\t\t      CNXK_SSO_GGRP_QOS \"=<string>\"\n+\t\t\t      CNXK_SSO_FORCE_BP \"=1\"\n \t\t\t      CN9K_SSO_SINGLE_WS \"=1\"\n \t\t\t      CNXK_TIM_DISABLE_NPA \"=1\"\n \t\t\t      CNXK_TIM_CHNK_SLOTS \"=<int>\"\ndiff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h\nindex 38fca08fb6..f5a4401465 100644\n--- a/drivers/event/cnxk/cn9k_worker.h\n+++ b/drivers/event/cnxk/cn9k_worker.h\n@@ -5,9 +5,13 @@\n #ifndef __CN9K_WORKER_H__\n #define __CN9K_WORKER_H__\n \n+#include \"cnxk_ethdev.h\"\n #include \"cnxk_eventdev.h\"\n #include \"cnxk_worker.h\"\n \n+#include \"cn9k_ethdev.h\"\n+#include \"cn9k_rx.h\"\n+\n /* SSO Operations */\n \n static __rte_always_inline uint8_t\ndiff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c\nindex 7189ee3a79..cfd7fb971c 100644\n--- a/drivers/event/cnxk/cnxk_eventdev.c\n+++ b/drivers/event/cnxk/cnxk_eventdev.c\n@@ -571,6 +571,8 @@ cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs)\n \t\t\t   &dev->xae_cnt);\n \trte_kvargs_process(kvlist, CNXK_SSO_GGRP_QOS, &parse_sso_kvargs_dict,\n \t\t\t   dev);\n+\trte_kvargs_process(kvlist, CNXK_SSO_FORCE_BP, &parse_kvargs_value,\n+\t\t\t   &dev->force_ena_bp);\n \trte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_value,\n \t\t\t   &single_ws);\n \trte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_value,\ndiff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h\nindex 668e51d62a..b65d725f55 100644\n--- a/drivers/event/cnxk/cnxk_eventdev.h\n+++ b/drivers/event/cnxk/cnxk_eventdev.h\n@@ -6,6 +6,8 @@\n #define __CNXK_EVENTDEV_H__\n \n #include <rte_devargs.h>\n+#include <rte_ethdev.h>\n+#include <rte_event_eth_rx_adapter.h>\n #include <rte_kvargs.h>\n #include <rte_mbuf_pool_ops.h>\n #include <rte_pci.h>\n@@ -18,6 +20,7 @@\n \n #define CNXK_SSO_XAE_CNT   \"xae_cnt\"\n #define CNXK_SSO_GGRP_QOS  \"qos\"\n+#define CNXK_SSO_FORCE_BP  \"force_rx_bp\"\n #define CN9K_SSO_SINGLE_WS \"single_ws\"\n #define CN10K_SSO_GW_MODE  \"gw_mode\"\n \n@@ -81,7 +84,10 @@ struct cnxk_sso_evdev {\n \tuint64_t nb_xaq_cfg;\n \trte_iova_t fc_iova;\n \tstruct rte_mempool *xaq_pool;\n+\tuint64_t rx_offloads;\n \tuint64_t adptr_xae_cnt;\n+\tuint16_t rx_adptr_pool_cnt;\n+\tuint64_t *rx_adptr_pools;\n \tuint16_t tim_adptr_ring_cnt;\n \tuint16_t *timer_adptr_rings;\n \tuint64_t *timer_adptr_sz;\n@@ -89,25 +95,18 @@ struct cnxk_sso_evdev {\n \tuint32_t xae_cnt;\n \tuint8_t qos_queue_cnt;\n \tstruct cnxk_sso_qos *qos_parse_data;\n+\tuint8_t force_ena_bp;\n \t/* CN9K */\n \tuint8_t dual_ws;\n \t/* CN10K */\n \tuint8_t gw_mode;\n } __rte_cache_aligned;\n \n-/* CN10K HWS ops */\n-#define CN10K_SSO_HWS_OPS                                                      \\\n-\tuintptr_t swtag_desched_op;                                            \\\n-\tuintptr_t swtag_flush_op;                                              \\\n-\tuintptr_t swtag_untag_op;                                              \\\n-\tuintptr_t swtag_norm_op;                                               \\\n-\tuintptr_t updt_wqe_op;                                                 \\\n-\tuintptr_t tag_wqe_op;                                                  \\\n-\tuintptr_t getwrk_op\n-\n struct cn10k_sso_hws {\n-\t/* Get Work Fastpath data */\n-\tCN10K_SSO_HWS_OPS;\n+\tuint64_t base;\n+\t/* PTP timestamp */\n+\tstruct cnxk_timesync_info *tstamp;\n+\tvoid *lookup_mem;\n \tuint32_t gw_wdata;\n \tuint8_t swtag_req;\n \tuint8_t hws_id;\n@@ -115,7 +114,6 @@ struct cn10k_sso_hws {\n \tuint64_t xaq_lmt __rte_cache_aligned;\n \tuint64_t *fc_mem;\n \tuintptr_t grps_base[CNXK_SSO_MAX_HWGRP];\n-\tuint64_t base;\n \tuintptr_t lmt_base;\n } __rte_cache_aligned;\n \n@@ -132,6 +130,9 @@ struct cn10k_sso_hws {\n struct cn9k_sso_hws {\n \t/* Get Work Fastpath data */\n \tCN9K_SSO_HWS_OPS;\n+\t/* PTP timestamp */\n+\tstruct cnxk_timesync_info *tstamp;\n+\tvoid *lookup_mem;\n \tuint8_t swtag_req;\n \tuint8_t hws_id;\n \t/* Add Work Fastpath data */\n@@ -148,6 +149,9 @@ struct cn9k_sso_hws_state {\n struct cn9k_sso_hws_dual {\n \t/* Get Work Fastpath data */\n \tstruct cn9k_sso_hws_state ws_state[2]; /* Ping and Pong */\n+\t/* PTP timestamp */\n+\tstruct cnxk_timesync_info *tstamp;\n+\tvoid *lookup_mem;\n \tuint8_t swtag_req;\n \tuint8_t vws; /* Ping pong bit */\n \tuint8_t hws_id;\n@@ -250,4 +254,17 @@ int cnxk_sso_xstats_reset(struct rte_eventdev *event_dev,\n /* CN9K */\n void cn9k_sso_set_rsrc(void *arg);\n \n+/* Common adapter ops */\n+int cnxk_sso_rx_adapter_queue_add(\n+\tconst struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,\n+\tint32_t rx_queue_id,\n+\tconst struct rte_event_eth_rx_adapter_queue_conf *queue_conf);\n+int cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,\n+\t\t\t\t  const struct rte_eth_dev *eth_dev,\n+\t\t\t\t  int32_t rx_queue_id);\n+int cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,\n+\t\t\t      const struct rte_eth_dev *eth_dev);\n+int cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,\n+\t\t\t     const struct rte_eth_dev *eth_dev);\n+\n #endif /* __CNXK_EVENTDEV_H__ */\ndiff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c\nindex 89a1d82c14..24bfd985e7 100644\n--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c\n+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c\n@@ -2,6 +2,7 @@\n  * Copyright(C) 2021 Marvell.\n  */\n \n+#include \"cnxk_ethdev.h\"\n #include \"cnxk_eventdev.h\"\n \n void\n@@ -11,6 +12,32 @@ cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,\n \tint i;\n \n \tswitch (event_type) {\n+\tcase RTE_EVENT_TYPE_ETHDEV: {\n+\t\tstruct cnxk_eth_rxq_sp *rxq = data;\n+\t\tuint64_t *old_ptr;\n+\n+\t\tfor (i = 0; i < dev->rx_adptr_pool_cnt; i++) {\n+\t\t\tif ((uint64_t)rxq->qconf.mp == dev->rx_adptr_pools[i])\n+\t\t\t\treturn;\n+\t\t}\n+\n+\t\tdev->rx_adptr_pool_cnt++;\n+\t\told_ptr = dev->rx_adptr_pools;\n+\t\tdev->rx_adptr_pools = rte_realloc(\n+\t\t\tdev->rx_adptr_pools,\n+\t\t\tsizeof(uint64_t) * dev->rx_adptr_pool_cnt, 0);\n+\t\tif (dev->rx_adptr_pools == NULL) {\n+\t\t\tdev->adptr_xae_cnt += rxq->qconf.mp->size;\n+\t\t\tdev->rx_adptr_pools = old_ptr;\n+\t\t\tdev->rx_adptr_pool_cnt--;\n+\t\t\treturn;\n+\t\t}\n+\t\tdev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =\n+\t\t\t(uint64_t)rxq->qconf.mp;\n+\n+\t\tdev->adptr_xae_cnt += rxq->qconf.mp->size;\n+\t\tbreak;\n+\t}\n \tcase RTE_EVENT_TYPE_TIMER: {\n \t\tstruct cnxk_tim_ring *timr = data;\n \t\tuint16_t *old_ring_ptr;\n@@ -65,3 +92,152 @@ cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,\n \t\tbreak;\n \t}\n }\n+\n+static int\n+cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,\n+\t\t    uint16_t port_id, const struct rte_event *ev,\n+\t\t    uint8_t custom_flowid)\n+{\n+\tstruct roc_nix_rq *rq;\n+\n+\trq = &cnxk_eth_dev->rqs[rq_id];\n+\trq->sso_ena = 1;\n+\trq->tt = ev->sched_type;\n+\trq->hwgrp = ev->queue_id;\n+\trq->flow_tag_width = 20;\n+\trq->wqe_skip = 1;\n+\trq->tag_mask = (port_id & 0xF) << 20;\n+\trq->tag_mask |= (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4))\n+\t\t\t<< 24;\n+\n+\tif (custom_flowid) {\n+\t\trq->flow_tag_width = 0;\n+\t\trq->tag_mask |= ev->flow_id;\n+\t}\n+\n+\treturn roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);\n+}\n+\n+static int\n+cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)\n+{\n+\tstruct roc_nix_rq *rq;\n+\n+\trq = &cnxk_eth_dev->rqs[rq_id];\n+\trq->sso_ena = 0;\n+\trq->flow_tag_width = 32;\n+\trq->tag_mask = 0;\n+\n+\treturn roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);\n+}\n+\n+int\n+cnxk_sso_rx_adapter_queue_add(\n+\tconst struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,\n+\tint32_t rx_queue_id,\n+\tconst struct rte_event_eth_rx_adapter_queue_conf *queue_conf)\n+{\n+\tstruct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;\n+\tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n+\tuint16_t port = eth_dev->data->port_id;\n+\tstruct cnxk_eth_rxq_sp *rxq_sp;\n+\tint i, rc = 0;\n+\n+\tif (rx_queue_id < 0) {\n+\t\tfor (i = 0; i < eth_dev->data->nb_rx_queues; i++) {\n+\t\t\trxq_sp = eth_dev->data->rx_queues[i];\n+\t\t\trxq_sp = rxq_sp - 1;\n+\t\t\tcnxk_sso_updt_xae_cnt(dev, rxq_sp,\n+\t\t\t\t\t      RTE_EVENT_TYPE_ETHDEV);\n+\t\t\trc = cnxk_sso_xae_reconfigure(\n+\t\t\t\t(struct rte_eventdev *)(uintptr_t)event_dev);\n+\t\t\trc |= cnxk_sso_rxq_enable(\n+\t\t\t\tcnxk_eth_dev, i, port, &queue_conf->ev,\n+\t\t\t\t!!(queue_conf->rx_queue_flags &\n+\t\t\t\tRTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));\n+\t\t\trox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,\n+\t\t\t\t\t      rxq_sp->qconf.mp->pool_id, true,\n+\t\t\t\t\t      dev->force_ena_bp);\n+\t\t}\n+\t} else {\n+\t\trxq_sp = eth_dev->data->rx_queues[rx_queue_id];\n+\t\trxq_sp = rxq_sp - 1;\n+\t\tcnxk_sso_updt_xae_cnt(dev, rxq_sp, RTE_EVENT_TYPE_ETHDEV);\n+\t\trc = cnxk_sso_xae_reconfigure(\n+\t\t\t(struct rte_eventdev *)(uintptr_t)event_dev);\n+\t\trc |= cnxk_sso_rxq_enable(\n+\t\t\tcnxk_eth_dev, (uint16_t)rx_queue_id, port,\n+\t\t\t&queue_conf->ev,\n+\t\t\t!!(queue_conf->rx_queue_flags &\n+\t\t\t   RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));\n+\t\trox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,\n+\t\t\t\t      rxq_sp->qconf.mp->pool_id, true,\n+\t\t\t\t      dev->force_ena_bp);\n+\t}\n+\n+\tif (rc < 0) {\n+\t\tplt_err(\"Failed to configure Rx adapter port=%d, q=%d\", port,\n+\t\t\tqueue_conf->ev.queue_id);\n+\t\treturn rc;\n+\t}\n+\n+\tdev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;\n+\n+\treturn 0;\n+}\n+\n+int\n+cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,\n+\t\t\t      const struct rte_eth_dev *eth_dev,\n+\t\t\t      int32_t rx_queue_id)\n+{\n+\tstruct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;\n+\tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n+\tstruct cnxk_eth_rxq_sp *rxq_sp;\n+\tint i, rc = 0;\n+\n+\tRTE_SET_USED(event_dev);\n+\tif (rx_queue_id < 0) {\n+\t\tfor (i = 0; i < eth_dev->data->nb_rx_queues; i++) {\n+\t\t\trxq_sp = eth_dev->data->rx_queues[rx_queue_id];\n+\t\t\trxq_sp = rxq_sp - 1;\n+\t\t\trc = cnxk_sso_rxq_disable(cnxk_eth_dev, i);\n+\t\t\trox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,\n+\t\t\t\t\t      rxq_sp->qconf.mp->pool_id, false,\n+\t\t\t\t\t      dev->force_ena_bp);\n+\t\t}\n+\t} else {\n+\t\trxq_sp = eth_dev->data->rx_queues[rx_queue_id];\n+\t\trxq_sp = rxq_sp - 1;\n+\t\trc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);\n+\t\trox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,\n+\t\t\t\t      rxq_sp->qconf.mp->pool_id, false,\n+\t\t\t\t      dev->force_ena_bp);\n+\t}\n+\n+\tif (rc < 0)\n+\t\tplt_err(\"Failed to clear Rx adapter config port=%d, q=%d\",\n+\t\t\teth_dev->data->port_id, rx_queue_id);\n+\n+\treturn rc;\n+}\n+\n+int\n+cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,\n+\t\t\t  const struct rte_eth_dev *eth_dev)\n+{\n+\tRTE_SET_USED(event_dev);\n+\tRTE_SET_USED(eth_dev);\n+\n+\treturn 0;\n+}\n+\n+int\n+cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,\n+\t\t\t const struct rte_eth_dev *eth_dev)\n+{\n+\tRTE_SET_USED(event_dev);\n+\tRTE_SET_USED(eth_dev);\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build\nindex 87bb9f76a9..eda562f5b5 100644\n--- a/drivers/event/cnxk/meson.build\n+++ b/drivers/event/cnxk/meson.build\n@@ -21,4 +21,11 @@ sources = files(\n         'cnxk_tim_worker.c',\n )\n \n-deps += ['bus_pci', 'common_cnxk']\n+extra_flags = ['-flax-vector-conversions', '-Wno-strict-aliasing']\n+foreach flag: extra_flags\n+    if cc.has_argument(flag)\n+        cflags += flag\n+    endif\n+endforeach\n+\n+deps += ['bus_pci', 'common_cnxk', 'net_cnxk']\n",
    "prefixes": [
        "v3",
        "07/13"
    ]
}