From patchwork Tue Feb 7 14:12:45 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bruce Richardson X-Patchwork-Id: 20225 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 57339F937; Tue, 7 Feb 2017 15:13:51 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 3A1ACF95C for ; Tue, 7 Feb 2017 15:13:49 +0100 (CET) Received: from orsmga005.jf.intel.com ([10.7.209.41]) by orsmga105.jf.intel.com with ESMTP; 07 Feb 2017 06:13:48 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.33,346,1477983600"; d="scan'208";a="61813850" Received: from sivswdev01.ir.intel.com (HELO localhost.localdomain) ([10.237.217.45]) by orsmga005.jf.intel.com with ESMTP; 07 Feb 2017 06:13:47 -0800 From: Bruce Richardson To: olivier.matz@6wind.com Cc: thomas.monjalon@6wind.com, keith.wiles@intel.com, konstantin.ananyev@intel.com, stephen@networkplumber.org, dev@dpdk.org, Bruce Richardson Date: Tue, 7 Feb 2017 14:12:45 +0000 Message-Id: <1486476777-24768-8-git-send-email-bruce.richardson@intel.com> X-Mailer: git-send-email 1.7.0.7 In-Reply-To: <20170125121456.GA24344@bricha3-MOBL3.ger.corp.intel.com> References: <20170125121456.GA24344@bricha3-MOBL3.ger.corp.intel.com> Subject: [dpdk-dev] [PATCH RFCv3 07/19] ring: remove debug setting X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The debug option only provided some statistics to the user most of which could be tracked by the application itself. Remove this as a compile time option, and feature, simplifying the code somewhat. Signed-off-by: Bruce Richardson --- app/test/test_ring.c | 410 ------------------------------------- config/common_base | 1 - doc/guides/prog_guide/ring_lib.rst | 7 - lib/librte_ring/rte_ring.c | 41 ---- lib/librte_ring/rte_ring.h | 97 ++------- 5 files changed, 12 insertions(+), 544 deletions(-) diff --git a/app/test/test_ring.c b/app/test/test_ring.c index af74e7d..0cf55b5 100644 --- a/app/test/test_ring.c +++ b/app/test/test_ring.c @@ -763,412 +763,6 @@ test_ring_burst_basic(void) return -1; } -static int -test_ring_stats(void) -{ - -#ifndef RTE_LIBRTE_RING_DEBUG - printf("Enable RTE_LIBRTE_RING_DEBUG to test ring stats.\n"); - return 0; -#else - void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL; - int ret; - unsigned i; - unsigned num_items = 0; - unsigned failed_enqueue_ops = 0; - unsigned failed_enqueue_items = 0; - unsigned failed_dequeue_ops = 0; - unsigned failed_dequeue_items = 0; - unsigned last_enqueue_ops = 0; - unsigned last_enqueue_items = 0; - unsigned last_quota_ops = 0; - unsigned last_quota_items = 0; - unsigned lcore_id = rte_lcore_id(); - struct rte_ring_debug_stats *ring_stats = &r->stats[lcore_id]; - - printf("Test the ring stats.\n"); - - /* Reset the watermark in case it was set in another test. */ - rte_ring_set_water_mark(r, 0); - - /* Reset the ring stats. */ - memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id])); - - /* Allocate some dummy object pointers. */ - src = malloc(RING_SIZE*2*sizeof(void *)); - if (src == NULL) - goto fail; - - for (i = 0; i < RING_SIZE*2 ; i++) { - src[i] = (void *)(unsigned long)i; - } - - /* Allocate some memory for copied objects. */ - dst = malloc(RING_SIZE*2*sizeof(void *)); - if (dst == NULL) - goto fail; - - memset(dst, 0, RING_SIZE*2*sizeof(void *)); - - /* Set the head and tail pointers. */ - cur_src = src; - cur_dst = dst; - - /* Do Enqueue tests. */ - printf("Test the dequeue stats.\n"); - - /* Fill the ring up to RING_SIZE -1. */ - printf("Fill the ring.\n"); - for (i = 0; i< (RING_SIZE/MAX_BULK); i++) { - rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK); - cur_src += MAX_BULK; - } - - /* Adjust for final enqueue = MAX_BULK -1. */ - cur_src--; - - printf("Verify that the ring is full.\n"); - if (rte_ring_full(r) != 1) - goto fail; - - - printf("Verify the enqueue success stats.\n"); - /* Stats should match above enqueue operations to fill the ring. */ - if (ring_stats->enq_success_bulk != (RING_SIZE/MAX_BULK)) - goto fail; - - /* Current max objects is RING_SIZE -1. */ - if (ring_stats->enq_success_objs != RING_SIZE -1) - goto fail; - - /* Shouldn't have any failures yet. */ - if (ring_stats->enq_fail_bulk != 0) - goto fail; - if (ring_stats->enq_fail_objs != 0) - goto fail; - - - printf("Test stats for SP burst enqueue to a full ring.\n"); - num_items = 2; - ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items); - if ((ret & RTE_RING_SZ_MASK) != 0) - goto fail; - - failed_enqueue_ops += 1; - failed_enqueue_items += num_items; - - /* The enqueue should have failed. */ - if (ring_stats->enq_fail_bulk != failed_enqueue_ops) - goto fail; - if (ring_stats->enq_fail_objs != failed_enqueue_items) - goto fail; - - - printf("Test stats for SP bulk enqueue to a full ring.\n"); - num_items = 4; - ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items); - if (ret != -ENOBUFS) - goto fail; - - failed_enqueue_ops += 1; - failed_enqueue_items += num_items; - - /* The enqueue should have failed. */ - if (ring_stats->enq_fail_bulk != failed_enqueue_ops) - goto fail; - if (ring_stats->enq_fail_objs != failed_enqueue_items) - goto fail; - - - printf("Test stats for MP burst enqueue to a full ring.\n"); - num_items = 8; - ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items); - if ((ret & RTE_RING_SZ_MASK) != 0) - goto fail; - - failed_enqueue_ops += 1; - failed_enqueue_items += num_items; - - /* The enqueue should have failed. */ - if (ring_stats->enq_fail_bulk != failed_enqueue_ops) - goto fail; - if (ring_stats->enq_fail_objs != failed_enqueue_items) - goto fail; - - - printf("Test stats for MP bulk enqueue to a full ring.\n"); - num_items = 16; - ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items); - if (ret != -ENOBUFS) - goto fail; - - failed_enqueue_ops += 1; - failed_enqueue_items += num_items; - - /* The enqueue should have failed. */ - if (ring_stats->enq_fail_bulk != failed_enqueue_ops) - goto fail; - if (ring_stats->enq_fail_objs != failed_enqueue_items) - goto fail; - - - /* Do Dequeue tests. */ - printf("Test the dequeue stats.\n"); - - printf("Empty the ring.\n"); - for (i = 0; ideq_success_bulk != (RING_SIZE/MAX_BULK)) - goto fail; - - /* Objects dequeued is RING_SIZE -1. */ - if (ring_stats->deq_success_objs != RING_SIZE -1) - goto fail; - - /* Shouldn't have any dequeue failure stats yet. */ - if (ring_stats->deq_fail_bulk != 0) - goto fail; - - printf("Test stats for SC burst dequeue with an empty ring.\n"); - num_items = 2; - ret = rte_ring_sc_dequeue_burst(r, cur_dst, num_items); - if ((ret & RTE_RING_SZ_MASK) != 0) - goto fail; - - failed_dequeue_ops += 1; - failed_dequeue_items += num_items; - - /* The dequeue should have failed. */ - if (ring_stats->deq_fail_bulk != failed_dequeue_ops) - goto fail; - if (ring_stats->deq_fail_objs != failed_dequeue_items) - goto fail; - - - printf("Test stats for SC bulk dequeue with an empty ring.\n"); - num_items = 4; - ret = rte_ring_sc_dequeue_bulk(r, cur_dst, num_items); - if (ret != -ENOENT) - goto fail; - - failed_dequeue_ops += 1; - failed_dequeue_items += num_items; - - /* The dequeue should have failed. */ - if (ring_stats->deq_fail_bulk != failed_dequeue_ops) - goto fail; - if (ring_stats->deq_fail_objs != failed_dequeue_items) - goto fail; - - - printf("Test stats for MC burst dequeue with an empty ring.\n"); - num_items = 8; - ret = rte_ring_mc_dequeue_burst(r, cur_dst, num_items); - if ((ret & RTE_RING_SZ_MASK) != 0) - goto fail; - failed_dequeue_ops += 1; - failed_dequeue_items += num_items; - - /* The dequeue should have failed. */ - if (ring_stats->deq_fail_bulk != failed_dequeue_ops) - goto fail; - if (ring_stats->deq_fail_objs != failed_dequeue_items) - goto fail; - - - printf("Test stats for MC bulk dequeue with an empty ring.\n"); - num_items = 16; - ret = rte_ring_mc_dequeue_bulk(r, cur_dst, num_items); - if (ret != -ENOENT) - goto fail; - - failed_dequeue_ops += 1; - failed_dequeue_items += num_items; - - /* The dequeue should have failed. */ - if (ring_stats->deq_fail_bulk != failed_dequeue_ops) - goto fail; - if (ring_stats->deq_fail_objs != failed_dequeue_items) - goto fail; - - - printf("Test total enqueue/dequeue stats.\n"); - /* At this point the enqueue and dequeue stats should be the same. */ - if (ring_stats->enq_success_bulk != ring_stats->deq_success_bulk) - goto fail; - if (ring_stats->enq_success_objs != ring_stats->deq_success_objs) - goto fail; - if (ring_stats->enq_fail_bulk != ring_stats->deq_fail_bulk) - goto fail; - if (ring_stats->enq_fail_objs != ring_stats->deq_fail_objs) - goto fail; - - - /* Watermark Tests. */ - printf("Test the watermark/quota stats.\n"); - - printf("Verify the initial watermark stats.\n"); - /* Watermark stats should be 0 since there is no watermark. */ - if (ring_stats->enq_quota_bulk != 0) - goto fail; - if (ring_stats->enq_quota_objs != 0) - goto fail; - - /* Set a watermark. */ - rte_ring_set_water_mark(r, 16); - - /* Reset pointers. */ - cur_src = src; - cur_dst = dst; - - last_enqueue_ops = ring_stats->enq_success_bulk; - last_enqueue_items = ring_stats->enq_success_objs; - - - printf("Test stats for SP burst enqueue below watermark.\n"); - num_items = 8; - ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items); - if ((ret & RTE_RING_SZ_MASK) != num_items) - goto fail; - - /* Watermark stats should still be 0. */ - if (ring_stats->enq_quota_bulk != 0) - goto fail; - if (ring_stats->enq_quota_objs != 0) - goto fail; - - /* Success stats should have increased. */ - if (ring_stats->enq_success_bulk != last_enqueue_ops + 1) - goto fail; - if (ring_stats->enq_success_objs != last_enqueue_items + num_items) - goto fail; - - last_enqueue_ops = ring_stats->enq_success_bulk; - last_enqueue_items = ring_stats->enq_success_objs; - - - printf("Test stats for SP burst enqueue at watermark.\n"); - num_items = 8; - ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items); - if ((ret & RTE_RING_SZ_MASK) != num_items) - goto fail; - - /* Watermark stats should have changed. */ - if (ring_stats->enq_quota_bulk != 1) - goto fail; - if (ring_stats->enq_quota_objs != num_items) - goto fail; - - last_quota_ops = ring_stats->enq_quota_bulk; - last_quota_items = ring_stats->enq_quota_objs; - - - printf("Test stats for SP burst enqueue above watermark.\n"); - num_items = 1; - ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items); - if ((ret & RTE_RING_SZ_MASK) != num_items) - goto fail; - - /* Watermark stats should have changed. */ - if (ring_stats->enq_quota_bulk != last_quota_ops +1) - goto fail; - if (ring_stats->enq_quota_objs != last_quota_items + num_items) - goto fail; - - last_quota_ops = ring_stats->enq_quota_bulk; - last_quota_items = ring_stats->enq_quota_objs; - - - printf("Test stats for MP burst enqueue above watermark.\n"); - num_items = 2; - ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items); - if ((ret & RTE_RING_SZ_MASK) != num_items) - goto fail; - - /* Watermark stats should have changed. */ - if (ring_stats->enq_quota_bulk != last_quota_ops +1) - goto fail; - if (ring_stats->enq_quota_objs != last_quota_items + num_items) - goto fail; - - last_quota_ops = ring_stats->enq_quota_bulk; - last_quota_items = ring_stats->enq_quota_objs; - - - printf("Test stats for SP bulk enqueue above watermark.\n"); - num_items = 4; - ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items); - if (ret != -EDQUOT) - goto fail; - - /* Watermark stats should have changed. */ - if (ring_stats->enq_quota_bulk != last_quota_ops +1) - goto fail; - if (ring_stats->enq_quota_objs != last_quota_items + num_items) - goto fail; - - last_quota_ops = ring_stats->enq_quota_bulk; - last_quota_items = ring_stats->enq_quota_objs; - - - printf("Test stats for MP bulk enqueue above watermark.\n"); - num_items = 8; - ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items); - if (ret != -EDQUOT) - goto fail; - - /* Watermark stats should have changed. */ - if (ring_stats->enq_quota_bulk != last_quota_ops +1) - goto fail; - if (ring_stats->enq_quota_objs != last_quota_items + num_items) - goto fail; - - printf("Test watermark success stats.\n"); - /* Success stats should be same as last non-watermarked enqueue. */ - if (ring_stats->enq_success_bulk != last_enqueue_ops) - goto fail; - if (ring_stats->enq_success_objs != last_enqueue_items) - goto fail; - - - /* Cleanup. */ - - /* Empty the ring. */ - for (i = 0; istats[lcore_id], 0, sizeof(r->stats[lcore_id])); - - /* Free memory before test completed */ - free(src); - free(dst); - return 0; - -fail: - free(src); - free(dst); - return -1; -#endif -} - /* * it will always fail to create ring with a wrong ring size number in this function */ @@ -1335,10 +929,6 @@ test_ring(void) if (test_ring_basic() < 0) return -1; - /* ring stats */ - if (test_ring_stats() < 0) - return -1; - /* basic operations */ if (test_live_watermark_change() < 0) return -1; diff --git a/config/common_base b/config/common_base index 7691647..3bbe3aa 100644 --- a/config/common_base +++ b/config/common_base @@ -447,7 +447,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y # Compile librte_ring # CONFIG_RTE_LIBRTE_RING=y -CONFIG_RTE_LIBRTE_RING_DEBUG=n CONFIG_RTE_RING_PAUSE_REP_COUNT=0 # diff --git a/doc/guides/prog_guide/ring_lib.rst b/doc/guides/prog_guide/ring_lib.rst index 9f69753..d4ab502 100644 --- a/doc/guides/prog_guide/ring_lib.rst +++ b/doc/guides/prog_guide/ring_lib.rst @@ -110,13 +110,6 @@ Once an enqueue operation reaches the high water mark, the producer is notified, This mechanism can be used, for example, to exert a back pressure on I/O to inform the LAN to PAUSE. -Debug -~~~~~ - -When debug is enabled (CONFIG_RTE_LIBRTE_RING_DEBUG is set), -the library stores some per-ring statistic counters about the number of enqueues/dequeues. -These statistics are per-core to avoid concurrent accesses or atomic operations. - Use Cases --------- diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c index 183594f..2a04f05 100644 --- a/lib/librte_ring/rte_ring.c +++ b/lib/librte_ring/rte_ring.c @@ -131,12 +131,6 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count, RTE_CACHE_LINE_MASK) != 0); RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) & RTE_CACHE_LINE_MASK) != 0); -#ifdef RTE_LIBRTE_RING_DEBUG - RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) & - RTE_CACHE_LINE_MASK) != 0); - RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) & - RTE_CACHE_LINE_MASK) != 0); -#endif /* init the ring structure */ memset(r, 0, sizeof(*r)); @@ -284,11 +278,6 @@ rte_ring_set_water_mark(struct rte_ring *r, unsigned count) void rte_ring_dump(FILE *f, const struct rte_ring *r) { -#ifdef RTE_LIBRTE_RING_DEBUG - struct rte_ring_debug_stats sum; - unsigned lcore_id; -#endif - fprintf(f, "ring <%s>@%p\n", r->name, r); fprintf(f, " flags=%x\n", r->flags); fprintf(f, " size=%"PRIu32"\n", r->size); @@ -302,36 +291,6 @@ rte_ring_dump(FILE *f, const struct rte_ring *r) fprintf(f, " watermark=0\n"); else fprintf(f, " watermark=%"PRIu32"\n", r->watermark); - - /* sum and dump statistics */ -#ifdef RTE_LIBRTE_RING_DEBUG - memset(&sum, 0, sizeof(sum)); - for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { - sum.enq_success_bulk += r->stats[lcore_id].enq_success_bulk; - sum.enq_success_objs += r->stats[lcore_id].enq_success_objs; - sum.enq_quota_bulk += r->stats[lcore_id].enq_quota_bulk; - sum.enq_quota_objs += r->stats[lcore_id].enq_quota_objs; - sum.enq_fail_bulk += r->stats[lcore_id].enq_fail_bulk; - sum.enq_fail_objs += r->stats[lcore_id].enq_fail_objs; - sum.deq_success_bulk += r->stats[lcore_id].deq_success_bulk; - sum.deq_success_objs += r->stats[lcore_id].deq_success_objs; - sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk; - sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs; - } - fprintf(f, " size=%"PRIu32"\n", r->size); - fprintf(f, " enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk); - fprintf(f, " enq_success_objs=%"PRIu64"\n", sum.enq_success_objs); - fprintf(f, " enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk); - fprintf(f, " enq_quota_objs=%"PRIu64"\n", sum.enq_quota_objs); - fprintf(f, " enq_fail_bulk=%"PRIu64"\n", sum.enq_fail_bulk); - fprintf(f, " enq_fail_objs=%"PRIu64"\n", sum.enq_fail_objs); - fprintf(f, " deq_success_bulk=%"PRIu64"\n", sum.deq_success_bulk); - fprintf(f, " deq_success_objs=%"PRIu64"\n", sum.deq_success_objs); - fprintf(f, " deq_fail_bulk=%"PRIu64"\n", sum.deq_fail_bulk); - fprintf(f, " deq_fail_objs=%"PRIu64"\n", sum.deq_fail_objs); -#else - fprintf(f, " no statistics available\n"); -#endif } /* dump the status of all rings on the console */ diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h index 1e4b8ad..c059daa 100644 --- a/lib/librte_ring/rte_ring.h +++ b/lib/librte_ring/rte_ring.h @@ -109,24 +109,6 @@ enum rte_ring_queue_behavior { RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */ }; -#ifdef RTE_LIBRTE_RING_DEBUG -/** - * A structure that stores the ring statistics (per-lcore). - */ -struct rte_ring_debug_stats { - uint64_t enq_success_bulk; /**< Successful enqueues number. */ - uint64_t enq_success_objs; /**< Objects successfully enqueued. */ - uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */ - uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */ - uint64_t enq_fail_bulk; /**< Failed enqueues number. */ - uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */ - uint64_t deq_success_bulk; /**< Successful dequeues number. */ - uint64_t deq_success_objs; /**< Objects successfully dequeued. */ - uint64_t deq_fail_bulk; /**< Failed dequeues number. */ - uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */ -} __rte_cache_aligned; -#endif - #define RTE_RING_MZ_PREFIX "RG_" /**< The maximum length of a ring name. */ #define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \ @@ -179,10 +161,6 @@ struct rte_ring { /** Ring consumer status. */ struct rte_ring_ht_ptr cons __rte_aligned(RTE_CACHE_LINE_SIZE * 2); -#ifdef RTE_LIBRTE_RING_DEBUG - struct rte_ring_debug_stats stats[RTE_MAX_LCORE]; -#endif - void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here. * not volatile so need to be careful * about compiler re-ordering */ @@ -194,27 +172,6 @@ struct rte_ring { #define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */ /** - * @internal When debug is enabled, store ring statistics. - * @param r - * A pointer to the ring. - * @param name - * The name of the statistics field to increment in the ring. - * @param n - * The number to add to the object-oriented statistics. - */ -#ifdef RTE_LIBRTE_RING_DEBUG -#define __RING_STAT_ADD(r, name, n) do { \ - unsigned __lcore_id = rte_lcore_id(); \ - if (__lcore_id < RTE_MAX_LCORE) { \ - r->stats[__lcore_id].name##_objs += n; \ - r->stats[__lcore_id].name##_bulk += 1; \ - } \ - } while(0) -#else -#define __RING_STAT_ADD(r, name, n) do {} while(0) -#endif - -/** * Calculate the memory size needed for a ring * * This function returns the number of bytes needed for a ring, given @@ -455,17 +412,12 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table, /* check that we have enough room in ring */ if (unlikely(n > free_entries)) { - if (behavior == RTE_RING_QUEUE_FIXED) { - __RING_STAT_ADD(r, enq_fail, n); + if (behavior == RTE_RING_QUEUE_FIXED) return -ENOBUFS; - } else { /* No free entry available */ - if (unlikely(free_entries == 0)) { - __RING_STAT_ADD(r, enq_fail, n); + if (unlikely(free_entries == 0)) return 0; - } - n = free_entries; } } @@ -480,15 +432,11 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table, rte_smp_wmb(); /* if we exceed the watermark */ - if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) { + if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT : (int)(n | RTE_RING_QUOT_EXCEED); - __RING_STAT_ADD(r, enq_quota, n); - } - else { + else ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n; - __RING_STAT_ADD(r, enq_success, n); - } /* * If there are other enqueues in progress that preceded us, @@ -552,17 +500,12 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table, /* check that we have enough room in ring */ if (unlikely(n > free_entries)) { - if (behavior == RTE_RING_QUEUE_FIXED) { - __RING_STAT_ADD(r, enq_fail, n); + if (behavior == RTE_RING_QUEUE_FIXED) return -ENOBUFS; - } else { /* No free entry available */ - if (unlikely(free_entries == 0)) { - __RING_STAT_ADD(r, enq_fail, n); + if (unlikely(free_entries == 0)) return 0; - } - n = free_entries; } } @@ -575,15 +518,11 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table, rte_smp_wmb(); /* if we exceed the watermark */ - if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) { + if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT : (int)(n | RTE_RING_QUOT_EXCEED); - __RING_STAT_ADD(r, enq_quota, n); - } - else { + else ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n; - __RING_STAT_ADD(r, enq_success, n); - } r->prod.tail = prod_next; return ret; @@ -647,16 +586,11 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table, /* Set the actual entries for dequeue */ if (n > entries) { - if (behavior == RTE_RING_QUEUE_FIXED) { - __RING_STAT_ADD(r, deq_fail, n); + if (behavior == RTE_RING_QUEUE_FIXED) return -ENOENT; - } else { - if (unlikely(entries == 0)){ - __RING_STAT_ADD(r, deq_fail, n); + if (unlikely(entries == 0)) return 0; - } - n = entries; } } @@ -686,7 +620,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table, sched_yield(); } } - __RING_STAT_ADD(r, deq_success, n); r->cons.tail = cons_next; return behavior == RTE_RING_QUEUE_FIXED ? 0 : n; @@ -733,16 +666,11 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table, entries = prod_tail - cons_head; if (n > entries) { - if (behavior == RTE_RING_QUEUE_FIXED) { - __RING_STAT_ADD(r, deq_fail, n); + if (behavior == RTE_RING_QUEUE_FIXED) return -ENOENT; - } else { - if (unlikely(entries == 0)){ - __RING_STAT_ADD(r, deq_fail, n); + if (unlikely(entries == 0)) return 0; - } - n = entries; } } @@ -754,7 +682,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table, DEQUEUE_PTRS(); rte_smp_rmb(); - __RING_STAT_ADD(r, deq_success, n); r->cons.tail = cons_next; return behavior == RTE_RING_QUEUE_FIXED ? 0 : n; }