From patchwork Mon May 3 15:22:17 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Pavan Nikhilesh Bhagavatula X-Patchwork-Id: 92652 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 35366A0562; Mon, 3 May 2021 17:24:33 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id AF5C3410FC; Mon, 3 May 2021 17:23:37 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id B617C410EB for ; Mon, 3 May 2021 17:23:36 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id 143FAGSG032536 for ; Mon, 3 May 2021 08:23:36 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=EGr+AreJiWXOdC4brM4IUjn7GvxBYST1fsm+Fm3pg0g=; b=JbVOZ8EJ1SH2WYkhOghAiH5fmXWePpCFCgqwS+VfluJIPg1eSIq0SeFZ8ApaJ9gd+eog 5hv8WjuPSPYvHzGXOUzGsHEjs959ozSkMTl84pgAK87Fq1DVJfFa6zUvzZIb3MzFwemI pykSPLLi3UVrqBL9vbJcte+H/zMVo7YQMv3qoFF0QFNxC5j7Xf0SsyOL6MVxEIpHLQG3 tHR40KTbUjPlHAm8w37aoJqBBAeUhD98CBoPhmiS7Hbb2FM0dmefub3AZauWYNGacbz6 iY3K/87L5q1c5jEIEm7o8ws1ulmoAI0v10WPOYyBfUz+RJMlN//lXcZsW/70w/OOXHWS XA== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0b-0016f401.pphosted.com with ESMTP id 38ad05heka-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Mon, 03 May 2021 08:23:35 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Mon, 3 May 2021 08:23:33 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Mon, 3 May 2021 08:23:33 -0700 Received: from BG-LT7430.marvell.com (BG-LT7430.marvell.com [10.28.177.176]) by maili.marvell.com (Postfix) with ESMTP id 0BF123F703F; Mon, 3 May 2021 08:23:31 -0700 (PDT) From: To: , Pavan Nikhilesh , "Shijith Thotton" CC: Date: Mon, 3 May 2021 20:52:17 +0530 Message-ID: <20210503152238.2437-15-pbhagavatula@marvell.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210503152238.2437-1-pbhagavatula@marvell.com> References: <20210430135336.2749-1-pbhagavatula@marvell.com> <20210503152238.2437-1-pbhagavatula@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: vUk9AxCCrMwAYVWSz_wyVxn4XMW5qY4W X-Proofpoint-ORIG-GUID: vUk9AxCCrMwAYVWSz_wyVxn4XMW5qY4W X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.391, 18.0.761 definitions=2021-05-03_10:2021-05-03, 2021-05-03 signatures=0 Subject: [dpdk-dev] [PATCH v4 14/34] event/cnxk: add SSO HW device operations X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Pavan Nikhilesh Add SSO HW device operations used for enqueue/dequeue. Signed-off-by: Pavan Nikhilesh --- drivers/event/cnxk/cn10k_worker.c | 7 + drivers/event/cnxk/cn10k_worker.h | 151 +++++++++++++++++ drivers/event/cnxk/cn9k_worker.c | 7 + drivers/event/cnxk/cn9k_worker.h | 249 +++++++++++++++++++++++++++++ drivers/event/cnxk/cnxk_eventdev.h | 10 ++ drivers/event/cnxk/cnxk_worker.h | 101 ++++++++++++ drivers/event/cnxk/meson.build | 4 +- 7 files changed, 528 insertions(+), 1 deletion(-) create mode 100644 drivers/event/cnxk/cn10k_worker.c create mode 100644 drivers/event/cnxk/cn10k_worker.h create mode 100644 drivers/event/cnxk/cn9k_worker.c create mode 100644 drivers/event/cnxk/cn9k_worker.h create mode 100644 drivers/event/cnxk/cnxk_worker.h diff --git a/drivers/event/cnxk/cn10k_worker.c b/drivers/event/cnxk/cn10k_worker.c new file mode 100644 index 000000000..63b587301 --- /dev/null +++ b/drivers/event/cnxk/cn10k_worker.c @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2021 Marvell. + */ + +#include "cn10k_worker.h" +#include "cnxk_eventdev.h" +#include "cnxk_worker.h" diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h new file mode 100644 index 000000000..04517055d --- /dev/null +++ b/drivers/event/cnxk/cn10k_worker.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2021 Marvell. + */ + +#ifndef __CN10K_WORKER_H__ +#define __CN10K_WORKER_H__ + +#include "cnxk_eventdev.h" +#include "cnxk_worker.h" + +/* SSO Operations */ + +static __rte_always_inline uint8_t +cn10k_sso_hws_new_event(struct cn10k_sso_hws *ws, const struct rte_event *ev) +{ + const uint32_t tag = (uint32_t)ev->event; + const uint8_t new_tt = ev->sched_type; + const uint64_t event_ptr = ev->u64; + const uint16_t grp = ev->queue_id; + + rte_atomic_thread_fence(__ATOMIC_ACQ_REL); + if (ws->xaq_lmt <= *ws->fc_mem) + return 0; + + cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]); + return 1; +} + +static __rte_always_inline void +cn10k_sso_hws_fwd_swtag(struct cn10k_sso_hws *ws, const struct rte_event *ev) +{ + const uint32_t tag = (uint32_t)ev->event; + const uint8_t new_tt = ev->sched_type; + const uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(ws->tag_wqe_op)); + + /* CNXK model + * cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED + * + * SSO_TT_ORDERED norm norm untag + * SSO_TT_ATOMIC norm norm untag + * SSO_TT_UNTAGGED norm norm NOOP + */ + + if (new_tt == SSO_TT_UNTAGGED) { + if (cur_tt != SSO_TT_UNTAGGED) + cnxk_sso_hws_swtag_untag(ws->swtag_untag_op); + } else { + cnxk_sso_hws_swtag_norm(tag, new_tt, ws->swtag_norm_op); + } + ws->swtag_req = 1; +} + +static __rte_always_inline void +cn10k_sso_hws_fwd_group(struct cn10k_sso_hws *ws, const struct rte_event *ev, + const uint16_t grp) +{ + const uint32_t tag = (uint32_t)ev->event; + const uint8_t new_tt = ev->sched_type; + + plt_write64(ev->u64, ws->updt_wqe_op); + cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op); +} + +static __rte_always_inline void +cn10k_sso_hws_forward_event(struct cn10k_sso_hws *ws, + const struct rte_event *ev) +{ + const uint8_t grp = ev->queue_id; + + /* Group hasn't changed, Use SWTAG to forward the event */ + if (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_wqe_op)) == grp) + cn10k_sso_hws_fwd_swtag(ws, ev); + else + /* + * Group has been changed for group based work pipelining, + * Use deschedule/add_work operation to transfer the event to + * new group/core + */ + cn10k_sso_hws_fwd_group(ws, ev, grp); +} + +static __rte_always_inline uint16_t +cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev) +{ + union { + __uint128_t get_work; + uint64_t u64[2]; + } gw; + + gw.get_work = ws->gw_wdata; +#if defined(RTE_ARCH_ARM64) && !defined(__clang__) + asm volatile( + PLT_CPU_FEATURE_PREAMBLE + "caspl %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n" + : [wdata] "+r"(gw.get_work) + : [gw_loc] "r"(ws->getwrk_op) + : "memory"); +#else + plt_write64(gw.u64[0], ws->getwrk_op); + do { + roc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op); + } while (gw.u64[0] & BIT_ULL(63)); +#endif + gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 | + (gw.u64[0] & (0x3FFull << 36)) << 4 | + (gw.u64[0] & 0xffffffff); + + ev->event = gw.u64[0]; + ev->u64 = gw.u64[1]; + + return !!gw.u64[1]; +} + +/* Used in cleaning up workslot. */ +static __rte_always_inline uint16_t +cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev) +{ + union { + __uint128_t get_work; + uint64_t u64[2]; + } gw; + +#ifdef RTE_ARCH_ARM64 + asm volatile(PLT_CPU_FEATURE_PREAMBLE + " ldp %[tag], %[wqp], [%[tag_loc]] \n" + " tbz %[tag], 63, done%= \n" + " sevl \n" + "rty%=: wfe \n" + " ldp %[tag], %[wqp], [%[tag_loc]] \n" + " tbnz %[tag], 63, rty%= \n" + "done%=: dmb ld \n" + : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]) + : [tag_loc] "r"(ws->tag_wqe_op) + : "memory"); +#else + do { + roc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op); + } while (gw.u64[0] & BIT_ULL(63)); +#endif + + gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 | + (gw.u64[0] & (0x3FFull << 36)) << 4 | + (gw.u64[0] & 0xffffffff); + + ev->event = gw.u64[0]; + ev->u64 = gw.u64[1]; + + return !!gw.u64[1]; +} + +#endif diff --git a/drivers/event/cnxk/cn9k_worker.c b/drivers/event/cnxk/cn9k_worker.c new file mode 100644 index 000000000..836914163 --- /dev/null +++ b/drivers/event/cnxk/cn9k_worker.c @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2021 Marvell. + */ + +#include "roc_api.h" + +#include "cn9k_worker.h" diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h new file mode 100644 index 000000000..85be742c1 --- /dev/null +++ b/drivers/event/cnxk/cn9k_worker.h @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2021 Marvell. + */ + +#ifndef __CN9K_WORKER_H__ +#define __CN9K_WORKER_H__ + +#include "cnxk_eventdev.h" +#include "cnxk_worker.h" + +/* SSO Operations */ + +static __rte_always_inline uint8_t +cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct rte_event *ev) +{ + const uint32_t tag = (uint32_t)ev->event; + const uint8_t new_tt = ev->sched_type; + const uint64_t event_ptr = ev->u64; + const uint16_t grp = ev->queue_id; + + rte_atomic_thread_fence(__ATOMIC_ACQ_REL); + if (ws->xaq_lmt <= *ws->fc_mem) + return 0; + + cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]); + return 1; +} + +static __rte_always_inline void +cn9k_sso_hws_fwd_swtag(struct cn9k_sso_hws_state *vws, + const struct rte_event *ev) +{ + const uint32_t tag = (uint32_t)ev->event; + const uint8_t new_tt = ev->sched_type; + const uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(vws->tag_op)); + + /* CNXK model + * cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED + * + * SSO_TT_ORDERED norm norm untag + * SSO_TT_ATOMIC norm norm untag + * SSO_TT_UNTAGGED norm norm NOOP + */ + + if (new_tt == SSO_TT_UNTAGGED) { + if (cur_tt != SSO_TT_UNTAGGED) + cnxk_sso_hws_swtag_untag( + CN9K_SSOW_GET_BASE_ADDR(vws->getwrk_op) + + SSOW_LF_GWS_OP_SWTAG_UNTAG); + } else { + cnxk_sso_hws_swtag_norm(tag, new_tt, vws->swtag_norm_op); + } +} + +static __rte_always_inline void +cn9k_sso_hws_fwd_group(struct cn9k_sso_hws_state *ws, + const struct rte_event *ev, const uint16_t grp) +{ + const uint32_t tag = (uint32_t)ev->event; + const uint8_t new_tt = ev->sched_type; + + plt_write64(ev->u64, CN9K_SSOW_GET_BASE_ADDR(ws->getwrk_op) + + SSOW_LF_GWS_OP_UPD_WQP_GRP1); + cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op); +} + +static __rte_always_inline void +cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev) +{ + const uint8_t grp = ev->queue_id; + + /* Group hasn't changed, Use SWTAG to forward the event */ + if (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_op)) == grp) { + cn9k_sso_hws_fwd_swtag((struct cn9k_sso_hws_state *)ws, ev); + ws->swtag_req = 1; + } else { + /* + * Group has been changed for group based work pipelining, + * Use deschedule/add_work operation to transfer the event to + * new group/core + */ + cn9k_sso_hws_fwd_group((struct cn9k_sso_hws_state *)ws, ev, + grp); + } +} + +/* Dual ws ops. */ + +static __rte_always_inline uint8_t +cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws, + const struct rte_event *ev) +{ + const uint32_t tag = (uint32_t)ev->event; + const uint8_t new_tt = ev->sched_type; + const uint64_t event_ptr = ev->u64; + const uint16_t grp = ev->queue_id; + + rte_atomic_thread_fence(__ATOMIC_ACQ_REL); + if (dws->xaq_lmt <= *dws->fc_mem) + return 0; + + cnxk_sso_hws_add_work(event_ptr, tag, new_tt, dws->grps_base[grp]); + return 1; +} + +static __rte_always_inline void +cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, + struct cn9k_sso_hws_state *vws, + const struct rte_event *ev) +{ + const uint8_t grp = ev->queue_id; + + /* Group hasn't changed, Use SWTAG to forward the event */ + if (CNXK_GRP_FROM_TAG(plt_read64(vws->tag_op)) == grp) { + cn9k_sso_hws_fwd_swtag(vws, ev); + dws->swtag_req = 1; + } else { + /* + * Group has been changed for group based work pipelining, + * Use deschedule/add_work operation to transfer the event to + * new group/core + */ + cn9k_sso_hws_fwd_group(vws, ev, grp); + } +} + +static __rte_always_inline uint16_t +cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws, + struct cn9k_sso_hws_state *ws_pair, + struct rte_event *ev) +{ + const uint64_t set_gw = BIT_ULL(16) | 1; + union { + __uint128_t get_work; + uint64_t u64[2]; + } gw; + +#ifdef RTE_ARCH_ARM64 + asm volatile(PLT_CPU_FEATURE_PREAMBLE + "rty%=: \n" + " ldr %[tag], [%[tag_loc]] \n" + " ldr %[wqp], [%[wqp_loc]] \n" + " tbnz %[tag], 63, rty%= \n" + "done%=: str %[gw], [%[pong]] \n" + " dmb ld \n" + : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]) + : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op), + [gw] "r"(set_gw), [pong] "r"(ws_pair->getwrk_op)); +#else + gw.u64[0] = plt_read64(ws->tag_op); + while ((BIT_ULL(63)) & gw.u64[0]) + gw.u64[0] = plt_read64(ws->tag_op); + gw.u64[1] = plt_read64(ws->wqp_op); + plt_write64(set_gw, ws_pair->getwrk_op); +#endif + + gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 | + (gw.u64[0] & (0x3FFull << 36)) << 4 | + (gw.u64[0] & 0xffffffff); + + ev->event = gw.u64[0]; + ev->u64 = gw.u64[1]; + + return !!gw.u64[1]; +} + +static __rte_always_inline uint16_t +cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev) +{ + union { + __uint128_t get_work; + uint64_t u64[2]; + } gw; + + plt_write64(BIT_ULL(16) | /* wait for work. */ + 1, /* Use Mask set 0. */ + ws->getwrk_op); +#ifdef RTE_ARCH_ARM64 + asm volatile(PLT_CPU_FEATURE_PREAMBLE + " ldr %[tag], [%[tag_loc]] \n" + " ldr %[wqp], [%[wqp_loc]] \n" + " tbz %[tag], 63, done%= \n" + " sevl \n" + "rty%=: wfe \n" + " ldr %[tag], [%[tag_loc]] \n" + " ldr %[wqp], [%[wqp_loc]] \n" + " tbnz %[tag], 63, rty%= \n" + "done%=: dmb ld \n" + : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]) + : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op)); +#else + gw.u64[0] = plt_read64(ws->tag_op); + while ((BIT_ULL(63)) & gw.u64[0]) + gw.u64[0] = plt_read64(ws->tag_op); + + gw.u64[1] = plt_read64(ws->wqp_op); +#endif + + gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 | + (gw.u64[0] & (0x3FFull << 36)) << 4 | + (gw.u64[0] & 0xffffffff); + + ev->event = gw.u64[0]; + ev->u64 = gw.u64[1]; + + return !!gw.u64[1]; +} + +/* Used in cleaning up workslot. */ +static __rte_always_inline uint16_t +cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev) +{ + union { + __uint128_t get_work; + uint64_t u64[2]; + } gw; + +#ifdef RTE_ARCH_ARM64 + asm volatile(PLT_CPU_FEATURE_PREAMBLE + " ldr %[tag], [%[tag_loc]] \n" + " ldr %[wqp], [%[wqp_loc]] \n" + " tbz %[tag], 63, done%= \n" + " sevl \n" + "rty%=: wfe \n" + " ldr %[tag], [%[tag_loc]] \n" + " ldr %[wqp], [%[wqp_loc]] \n" + " tbnz %[tag], 63, rty%= \n" + "done%=: dmb ld \n" + : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]) + : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op)); +#else + gw.u64[0] = plt_read64(ws->tag_op); + while ((BIT_ULL(63)) & gw.u64[0]) + gw.u64[0] = plt_read64(ws->tag_op); + + gw.u64[1] = plt_read64(ws->wqp_op); +#endif + + gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 | + (gw.u64[0] & (0x3FFull << 36)) << 4 | + (gw.u64[0] & 0xffffffff); + + ev->event = gw.u64[0]; + ev->u64 = gw.u64[1]; + + return !!gw.u64[1]; +} + +#endif diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h index 437cdf3db..0a3ab71e4 100644 --- a/drivers/event/cnxk/cnxk_eventdev.h +++ b/drivers/event/cnxk/cnxk_eventdev.h @@ -29,6 +29,16 @@ #define CNXK_SSO_XAQ_CACHE_CNT (0x7) #define CNXK_SSO_XAQ_SLACK (8) +#define CNXK_TT_FROM_TAG(x) (((x) >> 32) & SSO_TT_EMPTY) +#define CNXK_TT_FROM_EVENT(x) (((x) >> 38) & SSO_TT_EMPTY) +#define CNXK_EVENT_TYPE_FROM_TAG(x) (((x) >> 28) & 0xf) +#define CNXK_SUB_EVENT_FROM_TAG(x) (((x) >> 20) & 0xff) +#define CNXK_CLR_SUB_EVENT(x) (~(0xffu << 20) & x) +#define CNXK_GRP_FROM_TAG(x) (((x) >> 36) & 0x3ff) +#define CNXK_SWTAG_PEND(x) (BIT_ULL(62) & x) + +#define CN9K_SSOW_GET_BASE_ADDR(_GW) ((_GW)-SSOW_LF_GWS_OP_GET_WORK0) + #define CN10K_GW_MODE_NONE 0 #define CN10K_GW_MODE_PREF 1 #define CN10K_GW_MODE_PREF_WFE 2 diff --git a/drivers/event/cnxk/cnxk_worker.h b/drivers/event/cnxk/cnxk_worker.h new file mode 100644 index 000000000..4eb46ae16 --- /dev/null +++ b/drivers/event/cnxk/cnxk_worker.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2021 Marvell. + */ + +#ifndef __CNXK_WORKER_H__ +#define __CNXK_WORKER_H__ + +#include "cnxk_eventdev.h" + +/* SSO Operations */ + +static __rte_always_inline void +cnxk_sso_hws_add_work(const uint64_t event_ptr, const uint32_t tag, + const uint8_t new_tt, const uintptr_t grp_base) +{ + uint64_t add_work0; + + add_work0 = tag | ((uint64_t)(new_tt) << 32); + roc_store_pair(add_work0, event_ptr, grp_base); +} + +static __rte_always_inline void +cnxk_sso_hws_swtag_desched(uint32_t tag, uint8_t new_tt, uint16_t grp, + uintptr_t swtag_desched_op) +{ + uint64_t val; + + val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34); + __atomic_store_n((uint64_t *)swtag_desched_op, val, __ATOMIC_RELEASE); +} + +static __rte_always_inline void +cnxk_sso_hws_swtag_norm(uint32_t tag, uint8_t new_tt, uintptr_t swtag_norm_op) +{ + uint64_t val; + + val = tag | ((uint64_t)(new_tt & 0x3) << 32); + plt_write64(val, swtag_norm_op); +} + +static __rte_always_inline void +cnxk_sso_hws_swtag_untag(uintptr_t swtag_untag_op) +{ + plt_write64(0, swtag_untag_op); +} + +static __rte_always_inline void +cnxk_sso_hws_swtag_flush(uint64_t tag_op, uint64_t flush_op) +{ + if (CNXK_TT_FROM_TAG(plt_read64(tag_op)) == SSO_TT_EMPTY) + return; + plt_write64(0, flush_op); +} + +static __rte_always_inline void +cnxk_sso_hws_swtag_wait(uintptr_t tag_op) +{ +#ifdef RTE_ARCH_ARM64 + uint64_t swtp; + + asm volatile(PLT_CPU_FEATURE_PREAMBLE + " ldr %[swtb], [%[swtp_loc]] \n" + " tbz %[swtb], 62, done%= \n" + " sevl \n" + "rty%=: wfe \n" + " ldr %[swtb], [%[swtp_loc]] \n" + " tbnz %[swtb], 62, rty%= \n" + "done%=: \n" + : [swtb] "=&r"(swtp) + : [swtp_loc] "r"(tag_op)); +#else + /* Wait for the SWTAG/SWTAG_FULL operation */ + while (plt_read64(tag_op) & BIT_ULL(62)) + ; +#endif +} + +static __rte_always_inline void +cnxk_sso_hws_head_wait(uintptr_t tag_op) +{ +#ifdef RTE_ARCH_ARM64 + uint64_t swtp; + + asm volatile(PLT_CPU_FEATURE_PREAMBLE + " ldr %[swtb], [%[swtp_loc]] \n" + " tbz %[swtb], 35, done%= \n" + " sevl \n" + "rty%=: wfe \n" + " ldr %[swtb], [%[swtp_loc]] \n" + " tbnz %[swtb], 35, rty%= \n" + "done%=: \n" + : [swtb] "=&r"(swtp) + : [swtp_loc] "r"(tag_op)); +#else + /* Wait for the SWTAG/SWTAG_FULL operation */ + while (plt_read64(tag_op) & BIT_ULL(35)) + ; +#endif +} + +#endif diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build index 22eb28345..57b3f66ea 100644 --- a/drivers/event/cnxk/meson.build +++ b/drivers/event/cnxk/meson.build @@ -8,7 +8,9 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64') subdir_done() endif -sources = files('cn10k_eventdev.c', +sources = files('cn10k_worker.c', + 'cn10k_eventdev.c', + 'cn9k_worker.c', 'cn9k_eventdev.c', 'cnxk_eventdev.c', )