Zero-copy access to mempool caches is beneficial for PMD performance, and must be provided by the mempool library to fix [Bug 1052] without a performance regression.
[Bug 1052]: https://bugs.dpdk.org/show_bug.cgi?id=1052
Changes from the RFC:
* Removed run-time parameter checks. (Honnappa)
This is a hot fast path function; requiring correct application
behaviour, i.e. function parameters must be valid.
* Added RTE_ASSERT for parameters instead.
Code for this is only generated if built with RTE_ENABLE_ASSERT.
* Removed fallback when 'cache' parameter is not set. (Honnappa)
* Chose the simple get function; i.e. do not move the existing objects in
the cache to the top of the new stack, just leave them at the bottom.
* Renamed the functions. Other suggestions are welcome, of course. ;-)
* Updated the function descriptions.
* Added the functions to trace_fp and version.map.
Signed-off-by: Morten Brørup <mb@smartsharesystems.com>
---
lib/mempool/rte_mempool.h | 124 +++++++++++++++++++++++++++++
lib/mempool/rte_mempool_trace_fp.h | 16 ++++
lib/mempool/version.map | 6 ++
3 files changed, 146 insertions(+)
@@ -47,6 +47,7 @@
#include <rte_ring.h>
#include <rte_memcpy.h>
#include <rte_common.h>
+#include <rte_errno.h>
#include "rte_mempool_trace_fp.h"
@@ -1346,6 +1347,129 @@ rte_mempool_cache_flush(struct rte_mempool_cache *cache,
cache->len = 0;
}
+/**
+ * @warning
+ * @b EXPERIMENTAL: This API may change, or be removed, without prior notice.
+ *
+ * Zero-copy put objects in a user-owned mempool cache backed by the specified mempool.
+ *
+ * @param cache
+ * A pointer to the mempool cache.
+ * @param mp
+ * A pointer to the mempool.
+ * @param n
+ * The number of objects to be put in the mempool cache.
+ * Must not exceed RTE_MEMPOOL_CACHE_MAX_SIZE.
+ * @return
+ * The pointer to where to put the objects in the mempool cache.
+ */
+ __rte_experimental
+static __rte_always_inline void *
+rte_mempool_cache_zc_put_bulk(struct rte_mempool_cache *cache,
+ struct rte_mempool *mp,
+ unsigned int n)
+{
+ void **cache_objs;
+
+ RTE_ASSERT(cache != NULL);
+ RTE_ASSERT(mp != NULL);
+ RTE_ASSERT(n <= RTE_MEMPOOL_CACHE_MAX_SIZE);
+
+ rte_mempool_trace_cache_zc_put_bulk(cache, mp, n);
+
+ /* Increment stats now, adding in mempool always succeeds. */
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
+
+ /*
+ * The cache follows the following algorithm:
+ * 1. If the objects cannot be added to the cache without crossing
+ * the flush threshold, flush the cache to the backend.
+ * 2. Add the objects to the cache.
+ */
+
+ if (cache->len + n <= cache->flushthresh) {
+ cache_objs = &cache->objs[cache->len];
+ cache->len += n;
+ } else {
+ cache_objs = &cache->objs[0];
+ rte_mempool_ops_enqueue_bulk(mp, cache_objs, cache->len);
+ cache->len = n;
+ }
+
+ return cache_objs;
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: This API may change, or be removed, without prior notice.
+ *
+ * Zero-copy get objects from a user-owned mempool cache backed by the specified mempool.
+ *
+ * @param cache
+ * A pointer to the mempool cache.
+ * @param mp
+ * A pointer to the mempool.
+ * @param n
+ * The number of objects to prefetch into the mempool cache.
+ * Must not exceed RTE_MEMPOOL_CACHE_MAX_SIZE.
+ * @return
+ * The pointer to the objects in the mempool cache.
+ * NULL on error; i.e. the cache + the pool does not contain n objects.
+ * With rte_errno set to the error code of the mempool dequeue function.
+ */
+ __rte_experimental
+static __rte_always_inline void *
+rte_mempool_cache_zc_get_bulk(struct rte_mempool_cache *cache,
+ struct rte_mempool *mp,
+ unsigned int n)
+{
+ unsigned int len;
+
+ RTE_ASSERT(cache != NULL);
+ RTE_ASSERT(mp != NULL);
+ RTE_ASSERT(n <= RTE_MEMPOOL_CACHE_MAX_SIZE);
+
+ rte_mempool_trace_cache_zc_get_bulk(cache, mp, n);
+
+ len = cache->len;
+
+ if (unlikely(n > len)) {
+ /* Fill the cache from the backend; fetch size + requested - len objects. */
+ int ret;
+ const unsigned int size = cache->size;
+
+ ret = rte_mempool_ops_dequeue_bulk(mp, &cache->objs[len], size + n - len);
+ if (unlikely(ret < 0)) {
+ /*
+ * We are buffer constrained.
+ * Do not fill the cache, just satisfy the request.
+ */
+ ret = rte_mempool_ops_dequeue_bulk(mp, &cache->objs[len], n - len);
+ if (unlikely(ret < 0)) {
+ /* Unable to satisfy the request. */
+
+ RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
+
+ rte_errno = -ret;
+ return NULL;
+ }
+
+ len = 0;
+ } else
+ len = size;
+ } else
+ len -= n;
+
+ cache->len = len;
+
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
+
+ return &cache->objs[len];
+}
+
/**
* @internal Put several objects back in the mempool; used internally.
* @param mp
@@ -109,6 +109,22 @@ RTE_TRACE_POINT_FP(
rte_trace_point_emit_ptr(mempool);
)
+RTE_TRACE_POINT_FP(
+ rte_mempool_trace_cache_zc_put_bulk,
+ RTE_TRACE_POINT_ARGS(void *cache, void *mempool, uint32_t nb_objs),
+ rte_trace_point_emit_ptr(cache);
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_u32(nb_objs);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_mempool_trace_cache_zc_get_bulk,
+ RTE_TRACE_POINT_ARGS(void *cache, void *mempool, uint32_t nb_objs),
+ rte_trace_point_emit_ptr(cache);
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_u32(nb_objs);
+)
+
#ifdef __cplusplus
}
#endif
@@ -63,6 +63,12 @@ EXPERIMENTAL {
__rte_mempool_trace_ops_alloc;
__rte_mempool_trace_ops_free;
__rte_mempool_trace_set_ops_byname;
+
+ # added in 23.03
+ rte_mempool_cache_zc_put_bulk;
+ __rte_mempool_trace_cache_zc_put_bulk;
+ rte_mempool_cache_zc_get_bulk;
+ __rte_mempool_trace_cache_zc_get_bulk;
};
INTERNAL {