[dpdk-dev,v3,09/12] event/octeontx: optimize timer adapter resolution parameters
Checks
Commit Message
When application sets `RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES` flag
while creating adapter underlying driver is free to optimize the
resolution for best possible configuration.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
drivers/event/octeontx/timvf_evdev.c | 48 +++++++++++++++++++++++++++++++++++-
drivers/event/octeontx/timvf_evdev.h | 6 +++++
2 files changed, 53 insertions(+), 1 deletion(-)
Comments
-----Original Message-----
> Date: Tue, 3 Apr 2018 20:35:11 +0530
> From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> To: jerin.jacob@caviumnetworks.com, santosh.shukla@caviumnetworks.com,
> erik.g.carrillo@intel.com
> Cc: dev@dpdk.org, Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [dpdk-dev] [PATCH v3 09/12] event/octeontx: optimize timer adapter
> resolution parameters
> X-Mailer: git-send-email 2.16.3
>
> When application sets `RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES` flag
> while creating adapter underlying driver is free to optimize the
> resolution for best possible configuration.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> ---
> static int
> timvf_ring_start(const struct rte_event_timer_adapter *adptr)
> {
> @@ -217,7 +256,7 @@ timvf_ring_create(struct rte_event_timer_adapter *adptr)
> }
>
> timr->tim_ring_id = adptr->data->id;
> - timr->tck_nsec = rcfg->timer_tick_ns;
> + timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
> timr->max_tout = rcfg->max_tmo_ns;
> timr->meta.nb_bkts = (timr->max_tout / timr->tck_nsec) + 1;
> timr->vbar0 = octeontx_timvf_bar(timr->tim_ring_id, 0);
> @@ -227,6 +266,13 @@ timvf_ring_create(struct rte_event_timer_adapter *adptr)
>
> timr->nb_chunks = nb_timers / nb_chunk_slots;
>
> + /* Try to optimize the bucket parameters. */
> + if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
> + && !rte_is_power_of_2(timr->meta.nb_bkts)) {
> + optimize_bucket_parameters(timr);
> + timvf_log_info("Optimizing configured values");
You could print the adjusted values here.
> + }
> +
> if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
> mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
> timvf_log_info("Using single producer mode");
> diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
> index d8a6d111f..22c8c2266 100644
> --- a/drivers/event/octeontx/timvf_evdev.h
> +++ b/drivers/event/octeontx/timvf_evdev.h
> @@ -192,6 +192,12 @@ bkt_mod(const uint32_t rel_bkt, const uint32_t nb_bkts)
> return rel_bkt % nb_bkts;
> }
>
> +static __rte_always_inline uint32_t __hot
__hot may not be required here as it in as inline function.
> +bkt_and(uint32_t rel_bkt, uint32_t nb_bkts)
> +{
> + return rel_bkt & (nb_bkts - 1);
> +}
> +
> int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
> uint32_t *caps, const struct rte_event_timer_adapter_ops **ops);
> uint16_t timvf_timer_unreg_burst(const struct rte_event_timer_adapter *adptr,
With above change:
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
@@ -82,6 +82,45 @@ timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
return octeontx_ssovf_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
}
+static int
+optimize_bucket_parameters(struct timvf_ring *timr)
+{
+ uint32_t hbkts;
+ uint32_t lbkts;
+ uint64_t tck_nsec;
+
+ hbkts = rte_align32pow2(timr->meta.nb_bkts);
+ tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10);
+
+ if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
+ hbkts = 0;
+
+ lbkts = rte_align32prevpow2(timr->meta.nb_bkts);
+ tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10);
+
+ if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
+ lbkts = 0;
+
+ if (!hbkts && !lbkts)
+ return 0;
+
+ if (!hbkts) {
+ timr->meta.nb_bkts = lbkts;
+ goto end;
+ } else if (!lbkts) {
+ timr->meta.nb_bkts = hbkts;
+ goto end;
+ }
+
+ timr->meta.nb_bkts = (hbkts - timr->meta.nb_bkts) <
+ (timr->meta.nb_bkts - lbkts) ? hbkts : lbkts;
+end:
+ timr->meta.get_target_bkt = bkt_and;
+ timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout /
+ (timr->meta.nb_bkts - 1)), 10);
+ return 1;
+}
+
static int
timvf_ring_start(const struct rte_event_timer_adapter *adptr)
{
@@ -217,7 +256,7 @@ timvf_ring_create(struct rte_event_timer_adapter *adptr)
}
timr->tim_ring_id = adptr->data->id;
- timr->tck_nsec = rcfg->timer_tick_ns;
+ timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
timr->max_tout = rcfg->max_tmo_ns;
timr->meta.nb_bkts = (timr->max_tout / timr->tck_nsec) + 1;
timr->vbar0 = octeontx_timvf_bar(timr->tim_ring_id, 0);
@@ -227,6 +266,13 @@ timvf_ring_create(struct rte_event_timer_adapter *adptr)
timr->nb_chunks = nb_timers / nb_chunk_slots;
+ /* Try to optimize the bucket parameters. */
+ if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
+ && !rte_is_power_of_2(timr->meta.nb_bkts)) {
+ optimize_bucket_parameters(timr);
+ timvf_log_info("Optimizing configured values");
+ }
+
if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
timvf_log_info("Using single producer mode");
@@ -192,6 +192,12 @@ bkt_mod(const uint32_t rel_bkt, const uint32_t nb_bkts)
return rel_bkt % nb_bkts;
}
+static __rte_always_inline uint32_t __hot
+bkt_and(uint32_t rel_bkt, uint32_t nb_bkts)
+{
+ return rel_bkt & (nb_bkts - 1);
+}
+
int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
uint32_t *caps, const struct rte_event_timer_adapter_ops **ops);
uint16_t timvf_timer_unreg_burst(const struct rte_event_timer_adapter *adptr,