get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/73577/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 73577,
    "url": "https://patches.dpdk.org/api/patches/73577/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20200709061212.15931-4-feifei.wang2@arm.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200709061212.15931-4-feifei.wang2@arm.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200709061212.15931-4-feifei.wang2@arm.com",
    "date": "2020-07-09T06:12:12",
    "name": "[v2,3/3] ring: use element APIs to implement legacy APIs",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "ab236a3ab6366bafe5539f1b0b98fd11987f839d",
    "submitter": {
        "id": 1771,
        "url": "https://patches.dpdk.org/api/people/1771/?format=api",
        "name": "Feifei Wang",
        "email": "feifei.wang2@arm.com"
    },
    "delegate": {
        "id": 24651,
        "url": "https://patches.dpdk.org/api/users/24651/?format=api",
        "username": "dmarchand",
        "first_name": "David",
        "last_name": "Marchand",
        "email": "david.marchand@redhat.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20200709061212.15931-4-feifei.wang2@arm.com/mbox/",
    "series": [
        {
            "id": 10907,
            "url": "https://patches.dpdk.org/api/series/10907/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=10907",
            "date": "2020-07-09T06:12:09",
            "name": "ring clean up",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/10907/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/73577/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/73577/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id EFA6BA0526;\n\tThu,  9 Jul 2020 08:12:55 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 4A2ED1DB89;\n\tThu,  9 Jul 2020 08:12:43 +0200 (CEST)",
            "from foss.arm.com (foss.arm.com [217.140.110.172])\n by dpdk.org (Postfix) with ESMTP id 1F3221DB6E\n for <dev@dpdk.org>; Thu,  9 Jul 2020 08:12:42 +0200 (CEST)",
            "from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14])\n by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id AE0BB1063;\n Wed,  8 Jul 2020 23:12:41 -0700 (PDT)",
            "from net-arm-n1sdp.shanghai.arm.com (net-arm-n1sdp.shanghai.arm.com\n [10.169.208.213])\n by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 9C4733F71E;\n Wed,  8 Jul 2020 23:12:39 -0700 (PDT)"
        ],
        "From": "Feifei Wang <feifei.wang2@arm.com>",
        "To": "Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Konstantin Ananyev <konstantin.ananyev@intel.com>",
        "Cc": "dev@dpdk.org, nd@arm.com, Ruifeng.wang@arm.com,\n Feifei Wang <feifei.wang2@arm.com>",
        "Date": "Thu,  9 Jul 2020 01:12:12 -0500",
        "Message-Id": "<20200709061212.15931-4-feifei.wang2@arm.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20200709061212.15931-1-feifei.wang2@arm.com>",
        "References": "<20200703102651.8918-1>\n <20200709061212.15931-1-feifei.wang2@arm.com>",
        "Subject": "[dpdk-dev] [PATCH v2 3/3] ring: use element APIs to implement\n\tlegacy APIs",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Use rte_ring_xxx_elem_xxx APIs to replace legacy API implementation.\nThis reduces code duplication and improves code maintenance.\n\nTests done on Arm, x86 [1] and PPC [2] do not indicate performance\ndegradation.\n[1] https://mails.dpdk.org/archives/dev/2020-July/173780.html\n[2] https://mails.dpdk.org/archives/dev/2020-July/173863.html\n\nSigned-off-by: Feifei Wang <feifei.wang2@arm.com>\nReviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>\nReviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>\nTested-by: Konstantin Ananyev <konstantin.ananyev@intel.com>\nTested-by: David Christensen <drc@linux.vnet.ibm.com>\nAcked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>\n---\nv2:\n1. add performance tests in other platforms. (Konstantin, David)\n2. removing the perf data from the commit message. (Honnappa)\n3. fix the error of the pointer (*obj) passing.\t(ci failed)\n\n lib/librte_ring/rte_ring.h | 284 ++++---------------------------------\n 1 file changed, 30 insertions(+), 254 deletions(-)",
    "diff": "diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h\nindex 35f3f8c42..da17ed6d7 100644\n--- a/lib/librte_ring/rte_ring.h\n+++ b/lib/librte_ring/rte_ring.h\n@@ -191,168 +191,6 @@ void rte_ring_free(struct rte_ring *r);\n  */\n void rte_ring_dump(FILE *f, const struct rte_ring *r);\n \n-/* the actual enqueue of pointers on the ring.\n- * Placed here since identical code needed in both\n- * single and multi producer enqueue functions */\n-#define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \\\n-\tunsigned int i; \\\n-\tconst uint32_t size = (r)->size; \\\n-\tuint32_t idx = prod_head & (r)->mask; \\\n-\tobj_type *ring = (obj_type *)ring_start; \\\n-\tif (likely(idx + n < size)) { \\\n-\t\tfor (i = 0; i < (n & ~0x3); i += 4, idx += 4) { \\\n-\t\t\tring[idx] = obj_table[i]; \\\n-\t\t\tring[idx + 1] = obj_table[i + 1]; \\\n-\t\t\tring[idx + 2] = obj_table[i + 2]; \\\n-\t\t\tring[idx + 3] = obj_table[i + 3]; \\\n-\t\t} \\\n-\t\tswitch (n & 0x3) { \\\n-\t\tcase 3: \\\n-\t\t\tring[idx++] = obj_table[i++]; /* fallthrough */ \\\n-\t\tcase 2: \\\n-\t\t\tring[idx++] = obj_table[i++]; /* fallthrough */ \\\n-\t\tcase 1: \\\n-\t\t\tring[idx++] = obj_table[i++]; \\\n-\t\t} \\\n-\t} else { \\\n-\t\tfor (i = 0; idx < size; i++, idx++)\\\n-\t\t\tring[idx] = obj_table[i]; \\\n-\t\tfor (idx = 0; i < n; i++, idx++) \\\n-\t\t\tring[idx] = obj_table[i]; \\\n-\t} \\\n-} while (0)\n-\n-/* the actual copy of pointers on the ring to obj_table.\n- * Placed here since identical code needed in both\n- * single and multi consumer dequeue functions */\n-#define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \\\n-\tunsigned int i; \\\n-\tuint32_t idx = cons_head & (r)->mask; \\\n-\tconst uint32_t size = (r)->size; \\\n-\tobj_type *ring = (obj_type *)ring_start; \\\n-\tif (likely(idx + n < size)) { \\\n-\t\tfor (i = 0; i < (n & ~0x3); i += 4, idx += 4) {\\\n-\t\t\tobj_table[i] = ring[idx]; \\\n-\t\t\tobj_table[i + 1] = ring[idx + 1]; \\\n-\t\t\tobj_table[i + 2] = ring[idx + 2]; \\\n-\t\t\tobj_table[i + 3] = ring[idx + 3]; \\\n-\t\t} \\\n-\t\tswitch (n & 0x3) { \\\n-\t\tcase 3: \\\n-\t\t\tobj_table[i++] = ring[idx++]; /* fallthrough */ \\\n-\t\tcase 2: \\\n-\t\t\tobj_table[i++] = ring[idx++]; /* fallthrough */ \\\n-\t\tcase 1: \\\n-\t\t\tobj_table[i++] = ring[idx++]; \\\n-\t\t} \\\n-\t} else { \\\n-\t\tfor (i = 0; idx < size; i++, idx++) \\\n-\t\t\tobj_table[i] = ring[idx]; \\\n-\t\tfor (idx = 0; i < n; i++, idx++) \\\n-\t\t\tobj_table[i] = ring[idx]; \\\n-\t} \\\n-} while (0)\n-\n-/* Between load and load. there might be cpu reorder in weak model\n- * (powerpc/arm).\n- * There are 2 choices for the users\n- * 1.use rmb() memory barrier\n- * 2.use one-direction load_acquire/store_release barrier,defined by\n- * CONFIG_RTE_USE_C11_MEM_MODEL=y\n- * It depends on performance test results.\n- * By default, move common functions to rte_ring_generic.h\n- */\n-#ifdef RTE_USE_C11_MEM_MODEL\n-#include \"rte_ring_c11_mem.h\"\n-#else\n-#include \"rte_ring_generic.h\"\n-#endif\n-\n-/**\n- * @internal Enqueue several objects on the ring\n- *\n-  * @param r\n- *   A pointer to the ring structure.\n- * @param obj_table\n- *   A pointer to a table of void * pointers (objects).\n- * @param n\n- *   The number of objects to add in the ring from the obj_table.\n- * @param behavior\n- *   RTE_RING_QUEUE_FIXED:    Enqueue a fixed number of items from a ring\n- *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring\n- * @param is_sp\n- *   Indicates whether to use single producer or multi-producer head update\n- * @param free_space\n- *   returns the amount of space after the enqueue operation has finished\n- * @return\n- *   Actual number of objects enqueued.\n- *   If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.\n- */\n-static __rte_always_inline unsigned int\n-__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,\n-\t\t unsigned int n, enum rte_ring_queue_behavior behavior,\n-\t\t unsigned int is_sp, unsigned int *free_space)\n-{\n-\tuint32_t prod_head, prod_next;\n-\tuint32_t free_entries;\n-\n-\tn = __rte_ring_move_prod_head(r, is_sp, n, behavior,\n-\t\t\t&prod_head, &prod_next, &free_entries);\n-\tif (n == 0)\n-\t\tgoto end;\n-\n-\tENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *);\n-\n-\tupdate_tail(&r->prod, prod_head, prod_next, is_sp, 1);\n-end:\n-\tif (free_space != NULL)\n-\t\t*free_space = free_entries - n;\n-\treturn n;\n-}\n-\n-/**\n- * @internal Dequeue several objects from the ring\n- *\n- * @param r\n- *   A pointer to the ring structure.\n- * @param obj_table\n- *   A pointer to a table of void * pointers (objects).\n- * @param n\n- *   The number of objects to pull from the ring.\n- * @param behavior\n- *   RTE_RING_QUEUE_FIXED:    Dequeue a fixed number of items from a ring\n- *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring\n- * @param is_sc\n- *   Indicates whether to use single consumer or multi-consumer head update\n- * @param available\n- *   returns the number of remaining ring entries after the dequeue has finished\n- * @return\n- *   - Actual number of objects dequeued.\n- *     If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.\n- */\n-static __rte_always_inline unsigned int\n-__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,\n-\t\t unsigned int n, enum rte_ring_queue_behavior behavior,\n-\t\t unsigned int is_sc, unsigned int *available)\n-{\n-\tuint32_t cons_head, cons_next;\n-\tuint32_t entries;\n-\n-\tn = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,\n-\t\t\t&cons_head, &cons_next, &entries);\n-\tif (n == 0)\n-\t\tgoto end;\n-\n-\tDEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *);\n-\n-\tupdate_tail(&r->cons, cons_head, cons_next, is_sc, 0);\n-\n-end:\n-\tif (available != NULL)\n-\t\t*available = entries - n;\n-\treturn n;\n-}\n-\n /**\n  * Enqueue several objects on the ring (multi-producers safe).\n  *\n@@ -375,8 +213,8 @@ static __rte_always_inline unsigned int\n rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,\n \t\t\t unsigned int n, unsigned int *free_space)\n {\n-\treturn __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,\n-\t\t\tRTE_RING_SYNC_MT, free_space);\n+\treturn rte_ring_mp_enqueue_bulk_elem(r, obj_table, sizeof(void *),\n+\t\t\tn, free_space);\n }\n \n /**\n@@ -398,8 +236,8 @@ static __rte_always_inline unsigned int\n rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,\n \t\t\t unsigned int n, unsigned int *free_space)\n {\n-\treturn __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,\n-\t\t\tRTE_RING_SYNC_ST, free_space);\n+\treturn rte_ring_sp_enqueue_bulk_elem(r, obj_table, sizeof(void *),\n+\t\t\tn, free_space);\n }\n \n /**\n@@ -425,24 +263,8 @@ static __rte_always_inline unsigned int\n rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,\n \t\t      unsigned int n, unsigned int *free_space)\n {\n-\tswitch (r->prod.sync_type) {\n-\tcase RTE_RING_SYNC_MT:\n-\t\treturn rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space);\n-\tcase RTE_RING_SYNC_ST:\n-\t\treturn rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space);\n-#ifdef ALLOW_EXPERIMENTAL_API\n-\tcase RTE_RING_SYNC_MT_RTS:\n-\t\treturn rte_ring_mp_rts_enqueue_bulk(r, obj_table, n,\n-\t\t\tfree_space);\n-\tcase RTE_RING_SYNC_MT_HTS:\n-\t\treturn rte_ring_mp_hts_enqueue_bulk(r, obj_table, n,\n-\t\t\tfree_space);\n-#endif\n-\t}\n-\n-\t/* valid ring should never reach this point */\n-\tRTE_ASSERT(0);\n-\treturn 0;\n+\treturn rte_ring_enqueue_bulk_elem(r, obj_table, sizeof(void *),\n+\t\t\tn, free_space);\n }\n \n /**\n@@ -462,7 +284,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,\n static __rte_always_inline int\n rte_ring_mp_enqueue(struct rte_ring *r, void *obj)\n {\n-\treturn rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;\n+\treturn rte_ring_mp_enqueue_elem(r, &obj, sizeof(void *));\n }\n \n /**\n@@ -479,7 +301,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)\n static __rte_always_inline int\n rte_ring_sp_enqueue(struct rte_ring *r, void *obj)\n {\n-\treturn rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;\n+\treturn rte_ring_sp_enqueue_elem(r, &obj, sizeof(void *));\n }\n \n /**\n@@ -500,7 +322,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)\n static __rte_always_inline int\n rte_ring_enqueue(struct rte_ring *r, void *obj)\n {\n-\treturn rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;\n+\treturn rte_ring_enqueue_elem(r, &obj, sizeof(void *));\n }\n \n /**\n@@ -525,8 +347,8 @@ static __rte_always_inline unsigned int\n rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,\n \t\tunsigned int n, unsigned int *available)\n {\n-\treturn __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,\n-\t\t\tRTE_RING_SYNC_MT, available);\n+\treturn rte_ring_mc_dequeue_bulk_elem(r, obj_table, sizeof(void *),\n+\t\t\tn, available);\n }\n \n /**\n@@ -549,8 +371,8 @@ static __rte_always_inline unsigned int\n rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,\n \t\tunsigned int n, unsigned int *available)\n {\n-\treturn __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,\n-\t\t\tRTE_RING_SYNC_ST, available);\n+\treturn rte_ring_sc_dequeue_bulk_elem(r, obj_table, sizeof(void *),\n+\t\t\tn, available);\n }\n \n /**\n@@ -576,22 +398,8 @@ static __rte_always_inline unsigned int\n rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,\n \t\tunsigned int *available)\n {\n-\tswitch (r->cons.sync_type) {\n-\tcase RTE_RING_SYNC_MT:\n-\t\treturn rte_ring_mc_dequeue_bulk(r, obj_table, n, available);\n-\tcase RTE_RING_SYNC_ST:\n-\t\treturn rte_ring_sc_dequeue_bulk(r, obj_table, n, available);\n-#ifdef ALLOW_EXPERIMENTAL_API\n-\tcase RTE_RING_SYNC_MT_RTS:\n-\t\treturn rte_ring_mc_rts_dequeue_bulk(r, obj_table, n, available);\n-\tcase RTE_RING_SYNC_MT_HTS:\n-\t\treturn rte_ring_mc_hts_dequeue_bulk(r, obj_table, n, available);\n-#endif\n-\t}\n-\n-\t/* valid ring should never reach this point */\n-\tRTE_ASSERT(0);\n-\treturn 0;\n+\treturn rte_ring_dequeue_bulk_elem(r, obj_table, sizeof(void *),\n+\t\t\tn, available);\n }\n \n /**\n@@ -612,7 +420,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,\n static __rte_always_inline int\n rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)\n {\n-\treturn rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL)  ? 0 : -ENOENT;\n+\treturn rte_ring_mc_dequeue_elem(r, obj_p, sizeof(void *));\n }\n \n /**\n@@ -630,7 +438,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)\n static __rte_always_inline int\n rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)\n {\n-\treturn rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;\n+\treturn rte_ring_sc_dequeue_elem(r, obj_p, sizeof(void *));\n }\n \n /**\n@@ -652,7 +460,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)\n static __rte_always_inline int\n rte_ring_dequeue(struct rte_ring *r, void **obj_p)\n {\n-\treturn rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;\n+\treturn rte_ring_dequeue_elem(r, obj_p, sizeof(void *));\n }\n \n /**\n@@ -860,8 +668,8 @@ static __rte_always_inline unsigned int\n rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,\n \t\t\t unsigned int n, unsigned int *free_space)\n {\n-\treturn __rte_ring_do_enqueue(r, obj_table, n,\n-\t\t\tRTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_MT, free_space);\n+\treturn rte_ring_mp_enqueue_burst_elem(r, obj_table, sizeof(void *),\n+\t\t\tn, free_space);\n }\n \n /**\n@@ -883,8 +691,8 @@ static __rte_always_inline unsigned int\n rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,\n \t\t\t unsigned int n, unsigned int *free_space)\n {\n-\treturn __rte_ring_do_enqueue(r, obj_table, n,\n-\t\t\tRTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_ST, free_space);\n+\treturn rte_ring_sp_enqueue_burst_elem(r, obj_table, sizeof(void *),\n+\t\t\tn, free_space);\n }\n \n /**\n@@ -910,24 +718,8 @@ static __rte_always_inline unsigned int\n rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,\n \t\t      unsigned int n, unsigned int *free_space)\n {\n-\tswitch (r->prod.sync_type) {\n-\tcase RTE_RING_SYNC_MT:\n-\t\treturn rte_ring_mp_enqueue_burst(r, obj_table, n, free_space);\n-\tcase RTE_RING_SYNC_ST:\n-\t\treturn rte_ring_sp_enqueue_burst(r, obj_table, n, free_space);\n-#ifdef ALLOW_EXPERIMENTAL_API\n-\tcase RTE_RING_SYNC_MT_RTS:\n-\t\treturn rte_ring_mp_rts_enqueue_burst(r, obj_table, n,\n-\t\t\tfree_space);\n-\tcase RTE_RING_SYNC_MT_HTS:\n-\t\treturn rte_ring_mp_hts_enqueue_burst(r, obj_table, n,\n-\t\t\tfree_space);\n-#endif\n-\t}\n-\n-\t/* valid ring should never reach this point */\n-\tRTE_ASSERT(0);\n-\treturn 0;\n+\treturn rte_ring_enqueue_burst_elem(r, obj_table, sizeof(void *),\n+\t\t\tn, free_space);\n }\n \n /**\n@@ -954,8 +746,8 @@ static __rte_always_inline unsigned int\n rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,\n \t\tunsigned int n, unsigned int *available)\n {\n-\treturn __rte_ring_do_dequeue(r, obj_table, n,\n-\t\t\tRTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_MT, available);\n+\treturn rte_ring_mc_dequeue_burst_elem(r, obj_table, sizeof(void *),\n+\t\t\tn, available);\n }\n \n /**\n@@ -979,8 +771,8 @@ static __rte_always_inline unsigned int\n rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,\n \t\tunsigned int n, unsigned int *available)\n {\n-\treturn __rte_ring_do_dequeue(r, obj_table, n,\n-\t\t\tRTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_ST, available);\n+\treturn rte_ring_sc_dequeue_burst_elem(r, obj_table, sizeof(void *),\n+\t\t\tn, available);\n }\n \n /**\n@@ -1006,24 +798,8 @@ static __rte_always_inline unsigned int\n rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,\n \t\tunsigned int n, unsigned int *available)\n {\n-\tswitch (r->cons.sync_type) {\n-\tcase RTE_RING_SYNC_MT:\n-\t\treturn rte_ring_mc_dequeue_burst(r, obj_table, n, available);\n-\tcase RTE_RING_SYNC_ST:\n-\t\treturn rte_ring_sc_dequeue_burst(r, obj_table, n, available);\n-#ifdef ALLOW_EXPERIMENTAL_API\n-\tcase RTE_RING_SYNC_MT_RTS:\n-\t\treturn rte_ring_mc_rts_dequeue_burst(r, obj_table, n,\n-\t\t\tavailable);\n-\tcase RTE_RING_SYNC_MT_HTS:\n-\t\treturn rte_ring_mc_hts_dequeue_burst(r, obj_table, n,\n-\t\t\tavailable);\n-#endif\n-\t}\n-\n-\t/* valid ring should never reach this point */\n-\tRTE_ASSERT(0);\n-\treturn 0;\n+\treturn rte_ring_dequeue_burst_elem(r, obj_table, sizeof(void *),\n+\t\t\tn, available);\n }\n \n #ifdef __cplusplus\n",
    "prefixes": [
        "v2",
        "3/3"
    ]
}