From patchwork Sat Jun 1 18:53:47 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Pavan Nikhilesh Bhagavatula X-Patchwork-Id: 54034 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0C78F1BB3A; Sat, 1 Jun 2019 20:57:12 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by dpdk.org (Postfix) with ESMTP id 2052D1B9E8 for ; Sat, 1 Jun 2019 20:56:36 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.0.27/8.16.0.27) with SMTP id x51IsiFj029446 for ; Sat, 1 Jun 2019 11:56:35 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0818; bh=HPm0oUxfRf3t9VDm2Er/B9hZN9BVqjZ5KZodCwZGlJs=; b=WAm2zA4mb31SJ8K2AfJG+tQy9Ejpgrg5YR8+lDP43vnwBMY3RXiSDhqsyj3+KHBweS6D y0XZYxWbuigdoW/sU+efXIu1v/U8bzJit+Wa98ijeedlhM23CUXYkbw6qf3LtqUvMB7K VNHolpvAAejGBRAvt4aHvrv3pEwN9Vei1EgOcuyz+xImMPCK0W1+0rpm0+m4smx6ULeR QCBlLfz2H7GurzK2d0K3wERg75A10wSFvwz1zfP3c6xQaKWfRZg6K4Rn/y1LQF+qa865 23u3VDjrI79Scx5YC8R8iT3hMBUEur9tcZY8NvjHBir8H308tgKQ+gofTZtHLc4JQCuJ 6w== Received: from sc-exch02.marvell.com ([199.233.58.182]) by mx0b-0016f401.pphosted.com with ESMTP id 2survk12j1-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Sat, 01 Jun 2019 11:56:35 -0700 Received: from SC-EXCH01.marvell.com (10.93.176.81) by SC-EXCH02.marvell.com (10.93.176.82) with Microsoft SMTP Server (TLS) id 15.0.1367.3; Sat, 1 Jun 2019 11:56:34 -0700 Received: from maili.marvell.com (10.93.176.43) by SC-EXCH01.marvell.com (10.93.176.81) with Microsoft SMTP Server id 15.0.1367.3 via Frontend Transport; Sat, 1 Jun 2019 11:56:34 -0700 Received: from BG-LT7430.marvell.com (unknown [10.28.17.28]) by maili.marvell.com (Postfix) with ESMTP id BFD6B3F7040; Sat, 1 Jun 2019 11:56:32 -0700 (PDT) From: To: , Pavan Nikhilesh CC: Date: Sun, 2 Jun 2019 00:23:47 +0530 Message-ID: <20190601185355.370-38-pbhagavatula@marvell.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190601185355.370-1-pbhagavatula@marvell.com> References: <20190601185355.370-1-pbhagavatula@marvell.com> MIME-Version: 1.0 X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:, , definitions=2019-06-01_13:, , signatures=0 Subject: [dpdk-dev] [PATCH 37/44] event/octeontx2: add event timer arm routine X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Pavan Nikhilesh Add event timer arm routine. Signed-off-by: Pavan Nikhilesh --- drivers/event/octeontx2/otx2_tim_evdev.c | 20 +++ drivers/event/octeontx2/otx2_tim_evdev.h | 33 ++++ drivers/event/octeontx2/otx2_tim_worker.c | 77 ++++++++ drivers/event/octeontx2/otx2_tim_worker.h | 204 ++++++++++++++++++++++ 4 files changed, 334 insertions(+) diff --git a/drivers/event/octeontx2/otx2_tim_evdev.c b/drivers/event/octeontx2/otx2_tim_evdev.c index 71daac20d..68f6cd75a 100644 --- a/drivers/event/octeontx2/otx2_tim_evdev.c +++ b/drivers/event/octeontx2/otx2_tim_evdev.c @@ -29,6 +29,23 @@ tim_get_msix_offsets(void) return rc; } +static void +tim_set_fp_ops(struct otx2_tim_ring *tim_ring) +{ + uint8_t prod_flag = !tim_ring->prod_type_sp; + + /* [MOD/AND] [DFB/FB] [SP][MP]*/ + const rte_event_timer_arm_burst_t arm_burst[2][2][2] = { +#define FP(_name, _f3, _f2, _f1, flags) \ + [_f3][_f2][_f1] = otx2_tim_arm_burst_ ## _name, +TIM_ARM_FASTPATH_MODES +#undef FP + }; + + otx2_tim_ops.arm_burst = arm_burst[tim_ring->optimized] + [tim_ring->ena_dfb][prod_flag]; +} + static void otx2_tim_ring_info_get(const struct rte_event_timer_adapter *adptr, struct rte_event_timer_adapter_info *adptr_info) @@ -326,6 +343,9 @@ otx2_tim_ring_create(struct rte_event_timer_adapter *adptr) tim_ring->base + TIM_LF_RING_BASE); otx2_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA); + /* Set fastpath ops. */ + tim_set_fp_ops(tim_ring); + /* Update SSO xae count. */ sso_updt_xae_cnt(sso_pmd_priv(dev->event_dev), (void *)&nb_timers, RTE_EVENT_TYPE_TIMER); diff --git a/drivers/event/octeontx2/otx2_tim_evdev.h b/drivers/event/octeontx2/otx2_tim_evdev.h index 4034f1a8b..e35f95237 100644 --- a/drivers/event/octeontx2/otx2_tim_evdev.h +++ b/drivers/event/octeontx2/otx2_tim_evdev.h @@ -7,6 +7,7 @@ #include #include +#include #include "otx2_dev.h" @@ -60,6 +61,13 @@ #define OTX2_TIM_NB_CNK_SLOTS(sz) (((sz) / OTX2_TIM_CHUNK_ALIGNMENT) - 1) #define OTX2_TIM_MIN_TMO_TKS (256) +#define OTX2_TIM_SP 0x1 +#define OTX2_TIM_MP 0x2 +#define OTX2_TIM_BKT_AND 0x4 +#define OTX2_TIM_BKT_MOD 0x8 +#define OTX2_TIM_ENA_FB 0x10 +#define OTX2_TIM_ENA_DFB 0x20 + enum otx2_tim_clk_src { OTX2_TIM_CLK_SRC_10NS = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, OTX2_TIM_CLK_SRC_GPIO = RTE_EVENT_TIMER_ADAPTER_EXT_CLK0, @@ -85,6 +93,11 @@ struct otx2_tim_bkt { uint64_t pad; } __rte_packed __rte_aligned(32); +struct otx2_tim_ent { + uint64_t w0; + uint64_t wqe; +} __rte_packed; + struct otx2_tim_evdev { struct rte_pci_device *pci_dev; struct rte_eventdev *event_dev; @@ -101,8 +114,10 @@ struct otx2_tim_evdev { struct otx2_tim_ring { uintptr_t base; + struct rte_reciprocal_u64 fast_div; uint16_t nb_chunk_slots; uint32_t nb_bkts; + uint64_t ring_start_cyc; struct otx2_tim_bkt *bkt; struct rte_mempool *chunk_pool; uint64_t tck_int; @@ -132,6 +147,24 @@ otx2_tim_priv_get(void) return mz->addr; } +#define TIM_ARM_FASTPATH_MODES \ +FP(mod_sp, 0, 0, 0, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_DFB | OTX2_TIM_SP) \ +FP(mod_mp, 0, 0, 1, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_DFB | OTX2_TIM_MP) \ +FP(mod_fb_sp, 0, 1, 0, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_FB | OTX2_TIM_SP) \ +FP(mod_fb_mp, 0, 1, 1, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_FB | OTX2_TIM_MP) \ +FP(and_sp, 1, 0, 0, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_DFB | OTX2_TIM_SP) \ +FP(and_mp, 1, 0, 1, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_DFB | OTX2_TIM_MP) \ +FP(and_fb_sp, 1, 1, 0, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_FB | OTX2_TIM_SP) \ +FP(and_fb_mp, 1, 1, 1, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_FB | OTX2_TIM_MP) \ + +#define FP(_name, _f3, _f2, _f1, flags) \ +uint16_t otx2_tim_arm_burst_ ## _name( \ + const struct rte_event_timer_adapter *adptr, \ + struct rte_event_timer **tim, \ + const uint16_t nb_timers); +TIM_ARM_FASTPATH_MODES +#undef FP + int otx2_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags, uint32_t *caps, const struct rte_event_timer_adapter_ops **ops); diff --git a/drivers/event/octeontx2/otx2_tim_worker.c b/drivers/event/octeontx2/otx2_tim_worker.c index 29ed1fd5a..409575ec4 100644 --- a/drivers/event/octeontx2/otx2_tim_worker.c +++ b/drivers/event/octeontx2/otx2_tim_worker.c @@ -5,3 +5,80 @@ #include "otx2_tim_evdev.h" #include "otx2_tim_worker.h" +static inline int +tim_arm_checks(const struct otx2_tim_ring * const tim_ring, + struct rte_event_timer * const tim) +{ + if (unlikely(tim->state)) { + tim->state = RTE_EVENT_TIMER_ERROR; + rte_errno = EALREADY; + goto fail; + } + + if (unlikely(!tim->timeout_ticks || + tim->timeout_ticks >= tim_ring->nb_bkts)) { + tim->state = tim->timeout_ticks ? RTE_EVENT_TIMER_ERROR_TOOLATE + : RTE_EVENT_TIMER_ERROR_TOOEARLY; + rte_errno = EINVAL; + goto fail; + } + + return 0; + +fail: + return -EINVAL; +} + +static inline void +tim_format_event(const struct rte_event_timer * const tim, + struct otx2_tim_ent * const entry) +{ + entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 | + (tim->ev.event & 0xFFFFFFFFF); + entry->wqe = tim->ev.u64; +} + +static __rte_always_inline uint16_t +tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, + const uint16_t nb_timers, + const uint8_t flags) +{ + struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv; + struct otx2_tim_ent entry; + uint16_t index; + int ret; + + for (index = 0; index < nb_timers; index++) { + if (tim_arm_checks(tim_ring, tim[index])) + break; + + tim_format_event(tim[index], &entry); + if (flags & OTX2_TIM_SP) + ret = tim_add_entry_sp(tim_ring, + tim[index]->timeout_ticks, + tim[index], &entry, flags); + if (flags & OTX2_TIM_MP) + ret = tim_add_entry_mp(tim_ring, + tim[index]->timeout_ticks, + tim[index], &entry, flags); + + if (unlikely(ret)) { + rte_errno = -ret; + break; + } + } + + return index; +} + +#define FP(_name, _f3, _f2, _f1, _flags) \ +uint16_t __rte_noinline \ +otx2_tim_arm_burst_ ## _name(const struct rte_event_timer_adapter *adptr, \ + struct rte_event_timer **tim, \ + const uint16_t nb_timers) \ +{ \ + return tim_timer_arm_burst(adptr, tim, nb_timers, _flags); \ +} +TIM_ARM_FASTPATH_MODES +#undef FP diff --git a/drivers/event/octeontx2/otx2_tim_worker.h b/drivers/event/octeontx2/otx2_tim_worker.h index ccb137d13..a5e0d56bc 100644 --- a/drivers/event/octeontx2/otx2_tim_worker.h +++ b/drivers/event/octeontx2/otx2_tim_worker.h @@ -108,4 +108,208 @@ tim_bkt_clr_nent(struct otx2_tim_bkt *bktp) return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL); } +static __rte_always_inline struct otx2_tim_bkt * +tim_get_target_bucket(struct otx2_tim_ring * const tim_ring, + const uint32_t rel_bkt, const uint8_t flag) +{ + const uint64_t bkt_cyc = rte_rdtsc() - tim_ring->ring_start_cyc; + uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc, + &tim_ring->fast_div) + rel_bkt; + + if (flag & OTX2_TIM_BKT_MOD) + bucket = bucket % tim_ring->nb_bkts; + if (flag & OTX2_TIM_BKT_AND) + bucket = bucket & (tim_ring->nb_bkts - 1); + + return &tim_ring->bkt[bucket]; +} + +static struct otx2_tim_ent * +tim_clr_bkt(struct otx2_tim_ring * const tim_ring, + struct otx2_tim_bkt * const bkt) +{ + struct otx2_tim_ent *chunk; + struct otx2_tim_ent *pnext; + + chunk = ((struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk); + chunk = (struct otx2_tim_ent *)(uintptr_t)(chunk + + tim_ring->nb_chunk_slots)->w0; + while (chunk) { + pnext = (struct otx2_tim_ent *)(uintptr_t) + ((chunk + tim_ring->nb_chunk_slots)->w0); + rte_mempool_put(tim_ring->chunk_pool, chunk); + chunk = pnext; + } + + return (struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk; +} + +static struct otx2_tim_ent * +tim_refill_chunk(struct otx2_tim_bkt * const bkt, + struct otx2_tim_ring * const tim_ring) +{ + struct otx2_tim_ent *chunk; + + if (bkt->nb_entry || !bkt->first_chunk) { + if (unlikely(rte_mempool_get(tim_ring->chunk_pool, + (void **)&chunk))) + return NULL; + if (bkt->nb_entry) { + *(uint64_t *)(((struct otx2_tim_ent *)(uintptr_t) + bkt->current_chunk) + + tim_ring->nb_chunk_slots) = + (uintptr_t)chunk; + } else { + bkt->first_chunk = (uintptr_t)chunk; + } + } else { + chunk = tim_clr_bkt(tim_ring, bkt); + bkt->first_chunk = (uintptr_t)chunk; + } + *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0; + + return chunk; +} + +static struct otx2_tim_ent * +tim_insert_chunk(struct otx2_tim_bkt * const bkt, + struct otx2_tim_ring * const tim_ring) +{ + struct otx2_tim_ent *chunk; + + if (unlikely(rte_mempool_get(tim_ring->chunk_pool, (void **)&chunk))) + return NULL; + + *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0; + if (bkt->nb_entry) { + *(uint64_t *)(((struct otx2_tim_ent *)(uintptr_t) + bkt->current_chunk) + + tim_ring->nb_chunk_slots) = (uintptr_t)chunk; + } else { + bkt->first_chunk = (uintptr_t)chunk; + } + + return chunk; +} + +static __rte_always_inline int +tim_add_entry_sp(struct otx2_tim_ring * const tim_ring, + const uint32_t rel_bkt, + struct rte_event_timer * const tim, + const struct otx2_tim_ent * const pent, + const uint8_t flags) +{ + struct otx2_tim_ent *chunk; + struct otx2_tim_bkt *bkt; + uint64_t lock_sema; + int16_t rem; + + bkt = tim_get_target_bucket(tim_ring, rel_bkt, flags); + +__retry: + /* Get Bucket sema*/ + lock_sema = tim_bkt_fetch_sema(bkt); + + /* Bucket related checks. */ + if (unlikely(tim_bkt_get_hbt(lock_sema))) + goto __retry; + + /* Insert the work. */ + rem = tim_bkt_fetch_rem(lock_sema); + + if (!rem) { + if (flags & OTX2_TIM_ENA_FB) + chunk = tim_refill_chunk(bkt, tim_ring); + if (flags & OTX2_TIM_ENA_DFB) + chunk = tim_insert_chunk(bkt, tim_ring); + + if (unlikely(chunk == NULL)) { + tim_bkt_set_rem(bkt, 0); + tim->impl_opaque[0] = 0; + tim->impl_opaque[1] = 0; + tim->state = RTE_EVENT_TIMER_ERROR; + return -ENOMEM; + } + bkt->current_chunk = (uintptr_t)chunk; + tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - 1); + } else { + chunk = (struct otx2_tim_ent *)(uintptr_t)bkt->current_chunk; + chunk += tim_ring->nb_chunk_slots - rem; + } + + /* Copy work entry. */ + *chunk = *pent; + + tim_bkt_inc_nent(bkt); + + tim->impl_opaque[0] = (uintptr_t)chunk; + tim->impl_opaque[1] = (uintptr_t)bkt; + tim->state = RTE_EVENT_TIMER_ARMED; + + return 0; +} + +static __rte_always_inline int +tim_add_entry_mp(struct otx2_tim_ring * const tim_ring, + const uint32_t rel_bkt, + struct rte_event_timer * const tim, + const struct otx2_tim_ent * const pent, + const uint8_t flags) +{ + struct otx2_tim_ent *chunk; + struct otx2_tim_bkt *bkt; + uint64_t lock_sema; + int16_t rem; + +__retry: + bkt = tim_get_target_bucket(tim_ring, rel_bkt, flags); + + /* Get Bucket sema*/ + lock_sema = tim_bkt_fetch_sema_lock(bkt); + + /* Bucket related checks. */ + if (unlikely(tim_bkt_get_hbt(lock_sema))) { + tim_bkt_dec_lock(bkt); + goto __retry; + } + + rem = tim_bkt_fetch_rem(lock_sema); + + if (rem < 0) { + /* Goto diff bucket. */ + tim_bkt_dec_lock(bkt); + goto __retry; + } else if (!rem) { + /* Only one thread can be here*/ + if (flags & OTX2_TIM_ENA_FB) + chunk = tim_refill_chunk(bkt, tim_ring); + if (flags & OTX2_TIM_ENA_DFB) + chunk = tim_insert_chunk(bkt, tim_ring); + + if (unlikely(chunk == NULL)) { + tim_bkt_set_rem(bkt, 0); + tim_bkt_dec_lock(bkt); + tim->impl_opaque[0] = 0; + tim->impl_opaque[1] = 0; + tim->state = RTE_EVENT_TIMER_ERROR; + return -ENOMEM; + } + bkt->current_chunk = (uintptr_t)chunk; + tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - 1); + } else { + chunk = (struct otx2_tim_ent *)(uintptr_t)bkt->current_chunk; + chunk += tim_ring->nb_chunk_slots - rem; + } + + /* Copy work entry. */ + *chunk = *pent; + tim_bkt_dec_lock(bkt); + tim_bkt_inc_nent(bkt); + tim->impl_opaque[0] = (uintptr_t)chunk; + tim->impl_opaque[1] = (uintptr_t)bkt; + tim->state = RTE_EVENT_TIMER_ARMED; + + return 0; +} + #endif /* __OTX2_TIM_WORKER_H__ */