get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/21542/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 21542,
    "url": "http://patches.dpdk.org/api/patches/21542/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20170307113217.11077-13-bruce.richardson@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20170307113217.11077-13-bruce.richardson@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20170307113217.11077-13-bruce.richardson@intel.com",
    "date": "2017-03-07T11:32:15",
    "name": "[dpdk-dev,v2,12/14] ring: separate out head index manipulation for enq/deq",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "9b16b7e1a4ec4e1b582857b89d184f5ce79deba0",
    "submitter": {
        "id": 20,
        "url": "http://patches.dpdk.org/api/people/20/?format=api",
        "name": "Bruce Richardson",
        "email": "bruce.richardson@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20170307113217.11077-13-bruce.richardson@intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/21542/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/21542/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id CA8A3F96B;\n\tTue,  7 Mar 2017 12:33:57 +0100 (CET)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n\tby dpdk.org (Postfix) with ESMTP id 0A7C4F60C\n\tfor <dev@dpdk.org>; Tue,  7 Mar 2017 12:33:37 +0100 (CET)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby fmsmga105.fm.intel.com with ESMTP; 07 Mar 2017 03:33:37 -0800",
            "from sivswdev01.ir.intel.com ([10.237.217.45])\n\tby FMSMGA003.fm.intel.com with ESMTP; 07 Mar 2017 03:33:35 -0800"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.35,258,1484035200\"; d=\"scan'208\";a=\"831864361\"",
        "From": "Bruce Richardson <bruce.richardson@intel.com>",
        "To": "olivier.matz@6wind.com",
        "Cc": "jerin.jacob@caviumnetworks.com, dev@dpdk.org,\n\tBruce Richardson <bruce.richardson@intel.com>",
        "Date": "Tue,  7 Mar 2017 11:32:15 +0000",
        "Message-Id": "<20170307113217.11077-13-bruce.richardson@intel.com>",
        "X-Mailer": "git-send-email 2.8.4",
        "In-Reply-To": "<20170307113217.11077-1-bruce.richardson@intel.com>",
        "References": "<20170223172407.27664-1-bruce.richardson@intel.com>\n\t<20170307113217.11077-1-bruce.richardson@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v2 12/14] ring: separate out head index\n\tmanipulation for enq/deq",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "We can write a single common function for head manipulation for enq\nand a common one for deq, allowing us to have a single worker function\nfor enq and deq, rather than two of each. Update all other inline\nfunctions to use the new functions.\n\nSigned-off-by: Bruce Richardson <bruce.richardson@intel.com>\n---\n lib/librte_ring/rte_ring.c |   4 +-\n lib/librte_ring/rte_ring.h | 328 ++++++++++++++++++++-------------------------\n 2 files changed, 149 insertions(+), 183 deletions(-)",
    "diff": "diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c\nindex 18fb644..4776079 100644\n--- a/lib/librte_ring/rte_ring.c\n+++ b/lib/librte_ring/rte_ring.c\n@@ -138,8 +138,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,\n \tif (ret < 0 || ret >= (int)sizeof(r->name))\n \t\treturn -ENAMETOOLONG;\n \tr->flags = flags;\n-\tr->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);\n-\tr->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);\n+\tr->prod.sp_enqueue = (flags & RING_F_SP_ENQ) ? __IS_SP : __IS_MP;\n+\tr->cons.sc_dequeue = (flags & RING_F_SC_DEQ) ? __IS_SC : __IS_MC;\n \tr->size = count;\n \tr->mask = count - 1;\n \tr->prod.head = r->cons.head = 0;\ndiff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h\nindex 4e5219a..d2ebc9d 100644\n--- a/lib/librte_ring/rte_ring.h\n+++ b/lib/librte_ring/rte_ring.h\n@@ -172,6 +172,12 @@ struct rte_ring {\n #define RING_F_SC_DEQ 0x0002 /**< The default dequeue is \"single-consumer\". */\n #define RTE_RING_SZ_MASK  (unsigned)(0x0fffffff) /**< Ring size mask */\n \n+/* @internal defines for passing to the enqueue dequeue worker functions */\n+#define __IS_SP 1\n+#define __IS_MP 0\n+#define __IS_SC 1\n+#define __IS_MC 0\n+\n /**\n  * Calculate the memory size needed for a ring\n  *\n@@ -290,7 +296,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);\n #define ENQUEUE_PTRS() do { \\\n \tunsigned int i; \\\n \tconst uint32_t size = r->size; \\\n-\tuint32_t idx = prod_head & mask; \\\n+\tuint32_t idx = prod_head & r->mask; \\\n \tif (likely(idx + n < size)) { \\\n \t\tfor (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \\\n \t\t\tr->ring[idx] = obj_table[i]; \\\n@@ -316,7 +322,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);\n  * single and multi consumer dequeue functions */\n #define DEQUEUE_PTRS() do { \\\n \tunsigned int i; \\\n-\tuint32_t idx = cons_head & mask; \\\n+\tuint32_t idx = cons_head & r->mask; \\\n \tconst uint32_t size = r->size; \\\n \tif (likely(idx + n < size)) { \\\n \t\tfor (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\\\n@@ -339,83 +345,72 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);\n } while (0)\n \n /**\n- * @internal Enqueue several objects on the ring (multi-producers safe).\n- *\n- * This function uses a \"compare and set\" instruction to move the\n- * producer index atomically.\n+ * @internal This function updates the producer head for enqueue\n  *\n  * @param r\n- *   A pointer to the ring structure.\n- * @param obj_table\n- *   A pointer to a table of void * pointers (objects).\n+ *   A pointer to the ring structure\n+ * @param is_sp\n+ *   Indicates whether multi-producer path is needed or not\n  * @param n\n- *   The number of objects to add in the ring from the obj_table.\n+ *   The number of elements we will want to enqueue, i.e. how far should the\n+ *   head be moved\n  * @param behavior\n  *   RTE_RING_QUEUE_FIXED:    Enqueue a fixed number of items from a ring\n- *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring\n+ *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring\n+ * @param old_head\n+ *   Returns head value as it was before the move, i.e. where enqueue starts\n+ * @param new_head\n+ *   Returns the current/new head value i.e. where enqueue finishes\n+ * @param free_entries\n+ *   Returns the amount of free space in the ring BEFORE head was moved\n  * @return\n  *   Actual number of objects enqueued.\n  *   If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.\n  */\n-static inline unsigned int __attribute__((always_inline))\n-__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,\n-\t\t\t unsigned int n, enum rte_ring_queue_behavior behavior,\n-\t\t\t unsigned int *free_space)\n+static inline __attribute__((always_inline)) unsigned int\n+__rte_ring_move_prod_head(struct rte_ring *r, int is_sp,\n+\t\tunsigned int n, enum rte_ring_queue_behavior behavior,\n+\t\tuint32_t *old_head, uint32_t *new_head,\n+\t\tuint32_t *free_entries)\n {\n-\tuint32_t prod_head, prod_next;\n-\tuint32_t cons_tail, free_entries;\n-\tconst unsigned int max = n;\n+\tconst uint32_t mask = r->mask;\n+\tunsigned int max = n;\n \tint success;\n-\tuint32_t mask = r->mask;\n \n-\t/* move prod.head atomically */\n \tdo {\n \t\t/* Reset n to the initial burst count */\n \t\tn = max;\n \n-\t\tprod_head = r->prod.head;\n-\t\tcons_tail = r->cons.tail;\n+\t\t*old_head = r->prod.head;\n+\t\tconst uint32_t cons_tail = r->cons.tail;\n \t\t/* The subtraction is done between two unsigned 32bits value\n \t\t * (the result is always modulo 32 bits even if we have\n-\t\t * prod_head > cons_tail). So 'free_entries' is always between 0\n+\t\t * *old_head > cons_tail). So 'free_entries' is always between 0\n \t\t * and size(ring)-1. */\n-\t\tfree_entries = (mask + cons_tail - prod_head);\n+\t\t*free_entries = (mask + cons_tail - *old_head);\n \n \t\t/* check that we have enough room in ring */\n-\t\tif (unlikely(n > free_entries))\n+\t\tif (unlikely(n > *free_entries))\n \t\t\tn = (behavior == RTE_RING_QUEUE_FIXED) ?\n-\t\t\t\t\t0 : free_entries;\n+\t\t\t\t\t0 : *free_entries;\n \n \t\tif (n == 0)\n-\t\t\tgoto end;\n-\n-\t\tprod_next = prod_head + n;\n-\t\tsuccess = rte_atomic32_cmpset(&r->prod.head, prod_head,\n-\t\t\t\t\t      prod_next);\n+\t\t\treturn 0;\n+\n+\t\t*new_head = *old_head + n;\n+\t\tif (is_sp)\n+\t\t\tr->prod.head = *new_head, success = 1;\n+\t\telse\n+\t\t\tsuccess = rte_atomic32_cmpset(&r->prod.head,\n+\t\t\t\t\t*old_head, *new_head);\n \t} while (unlikely(success == 0));\n-\n-\t/* write entries in ring */\n-\tENQUEUE_PTRS();\n-\trte_smp_wmb();\n-\n-\t/*\n-\t * If there are other enqueues in progress that preceded us,\n-\t * we need to wait for them to complete\n-\t */\n-\twhile (unlikely(r->prod.tail != prod_head))\n-\t\trte_pause();\n-\n-\tr->prod.tail = prod_next;\n-end:\n-\tif (free_space != NULL)\n-\t\t*free_space = free_entries - n;\n \treturn n;\n }\n \n /**\n- * @internal Enqueue several objects on a ring (NOT multi-producers safe).\n+ * @internal Enqueue several objects on the ring\n  *\n- * @param r\n+  * @param r\n  *   A pointer to the ring structure.\n  * @param obj_table\n  *   A pointer to a table of void * pointers (objects).\n@@ -423,44 +418,40 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,\n  *   The number of objects to add in the ring from the obj_table.\n  * @param behavior\n  *   RTE_RING_QUEUE_FIXED:    Enqueue a fixed number of items from a ring\n- *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring\n+ *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring\n+ * @param is_sp\n+ *   Indicates whether to use single producer or multi-producer head update\n+ * @param free_space\n+ *   returns the amount of space after the enqueue operation has finished\n  * @return\n  *   Actual number of objects enqueued.\n  *   If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.\n  */\n-static inline unsigned int __attribute__((always_inline))\n-__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,\n-\t\t\t unsigned int n, enum rte_ring_queue_behavior behavior,\n-\t\t\t unsigned int *free_space)\n+static inline __attribute__((always_inline)) unsigned int\n+__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,\n+\t\t unsigned int n, enum rte_ring_queue_behavior behavior,\n+\t\t int is_sp, unsigned int *free_space)\n {\n-\tuint32_t prod_head, cons_tail;\n-\tuint32_t prod_next, free_entries;\n-\tuint32_t mask = r->mask;\n-\n-\tprod_head = r->prod.head;\n-\tcons_tail = r->cons.tail;\n-\t/* The subtraction is done between two unsigned 32bits value\n-\t * (the result is always modulo 32 bits even if we have\n-\t * prod_head > cons_tail). So 'free_entries' is always between 0\n-\t * and size(ring)-1. */\n-\tfree_entries = mask + cons_tail - prod_head;\n-\n-\t/* check that we have enough room in ring */\n-\tif (unlikely(n > free_entries))\n-\t\tn = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;\n+\tuint32_t prod_head, prod_next;\n+\tuint32_t free_entries;\n \n+\tn = __rte_ring_move_prod_head(r, is_sp, n, behavior,\n+\t\t\t&prod_head, &prod_next, &free_entries);\n \tif (n == 0)\n \t\tgoto end;\n \n-\n-\tprod_next = prod_head + n;\n-\tr->prod.head = prod_next;\n-\n-\t/* write entries in ring */\n \tENQUEUE_PTRS();\n \trte_smp_wmb();\n \n+\t/*\n+\t * If there are other enqueues in progress that preceded us,\n+\t * we need to wait for them to complete\n+\t */\n+\twhile (unlikely(r->prod.tail != prod_head))\n+\t\trte_pause();\n+\n \tr->prod.tail = prod_next;\n+\n end:\n \tif (free_space != NULL)\n \t\t*free_space = free_entries - n;\n@@ -468,130 +459,112 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,\n }\n \n /**\n- * @internal Dequeue several objects from a ring (multi-consumers safe). When\n- * the request objects are more than the available objects, only dequeue the\n- * actual number of objects\n- *\n- * This function uses a \"compare and set\" instruction to move the\n- * consumer index atomically.\n+ * @internal This function updates the consumer head for dequeue\n  *\n  * @param r\n- *   A pointer to the ring structure.\n- * @param obj_table\n- *   A pointer to a table of void * pointers (objects) that will be filled.\n+ *   A pointer to the ring structure\n+ * @param is_sc\n+ *   Indicates whether multi-consumer path is needed or not\n  * @param n\n- *   The number of objects to dequeue from the ring to the obj_table.\n+ *   The number of elements we will want to enqueue, i.e. how far should the\n+ *   head be moved\n  * @param behavior\n  *   RTE_RING_QUEUE_FIXED:    Dequeue a fixed number of items from a ring\n- *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring\n+ *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring\n+ * @param old_head\n+ *   Returns head value as it was before the move, i.e. where dequeue starts\n+ * @param new_head\n+ *   Returns the current/new head value i.e. where dequeue finishes\n+ * @param entries\n+ *   Returns the number of entries in the ring BEFORE head was moved\n  * @return\n  *   - Actual number of objects dequeued.\n  *     If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.\n  */\n-\n-static inline unsigned int __attribute__((always_inline))\n-__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,\n-\t\t unsigned int n, enum rte_ring_queue_behavior behavior,\n-\t\t unsigned int *available)\n+static inline __attribute__((always_inline)) unsigned int\n+__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,\n+\t\tunsigned int n, enum rte_ring_queue_behavior behavior,\n+\t\tuint32_t *old_head, uint32_t *new_head,\n+\t\tuint32_t *entries)\n {\n-\tuint32_t cons_head, prod_tail;\n-\tuint32_t cons_next, entries;\n-\tconst unsigned max = n;\n+\tunsigned int max = n;\n \tint success;\n-\tuint32_t mask = r->mask;\n \n \t/* move cons.head atomically */\n \tdo {\n \t\t/* Restore n as it may change every loop */\n \t\tn = max;\n \n-\t\tcons_head = r->cons.head;\n-\t\tprod_tail = r->prod.tail;\n+\t\t*old_head = r->cons.head;\n+\t\tconst uint32_t prod_tail = r->prod.tail;\n \t\t/* The subtraction is done between two unsigned 32bits value\n \t\t * (the result is always modulo 32 bits even if we have\n \t\t * cons_head > prod_tail). So 'entries' is always between 0\n \t\t * and size(ring)-1. */\n-\t\tentries = (prod_tail - cons_head);\n+\t\t*entries = (prod_tail - *old_head);\n \n \t\t/* Set the actual entries for dequeue */\n-\t\tif (n > entries)\n-\t\t\tn = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;\n+\t\tif (n > *entries)\n+\t\t\tn = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;\n \n \t\tif (unlikely(n == 0))\n-\t\t\tgoto end;\n-\n-\t\tcons_next = cons_head + n;\n-\t\tsuccess = rte_atomic32_cmpset(&r->cons.head, cons_head,\n-\t\t\t\t\t      cons_next);\n+\t\t\treturn 0;\n+\n+\t\t*new_head = *old_head + n;\n+\t\tif (is_sc)\n+\t\t\tr->cons.head = *new_head, success = 1;\n+\t\telse\n+\t\t\tsuccess = rte_atomic32_cmpset(&r->cons.head, *old_head,\n+\t\t\t\t\t*new_head);\n \t} while (unlikely(success == 0));\n-\n-\t/* copy in table */\n-\tDEQUEUE_PTRS();\n-\trte_smp_rmb();\n-\n-\t/*\n-\t * If there are other dequeues in progress that preceded us,\n-\t * we need to wait for them to complete\n-\t */\n-\twhile (unlikely(r->cons.tail != cons_head))\n-\t\trte_pause();\n-\n-\tr->cons.tail = cons_next;\n-end:\n-\tif (available != NULL)\n-\t\t*available = entries - n;\n \treturn n;\n }\n \n /**\n- * @internal Dequeue several objects from a ring (NOT multi-consumers safe).\n- * When the request objects are more than the available objects, only dequeue\n- * the actual number of objects\n+ * @internal Dequeue several objects from the ring\n  *\n  * @param r\n  *   A pointer to the ring structure.\n  * @param obj_table\n- *   A pointer to a table of void * pointers (objects) that will be filled.\n+ *   A pointer to a table of void * pointers (objects).\n  * @param n\n- *   The number of objects to dequeue from the ring to the obj_table.\n+ *   The number of objects to pull from the ring.\n  * @param behavior\n  *   RTE_RING_QUEUE_FIXED:    Dequeue a fixed number of items from a ring\n- *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring\n+ *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring\n+ * @param is_sc\n+ *   Indicates whether to use single consumer or multi-consumer head update\n+ * @param available\n+ *   returns the number of remaining ring entries after the dequeue has finished\n  * @return\n  *   - Actual number of objects dequeued.\n  *     If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.\n  */\n-static inline unsigned int __attribute__((always_inline))\n-__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,\n+static inline __attribute__((always_inline)) unsigned int\n+__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,\n \t\t unsigned int n, enum rte_ring_queue_behavior behavior,\n-\t\t unsigned int *available)\n+\t\t int is_mp, unsigned int *available)\n {\n-\tuint32_t cons_head, prod_tail;\n-\tuint32_t cons_next, entries;\n-\tuint32_t mask = r->mask;\n-\n-\tcons_head = r->cons.head;\n-\tprod_tail = r->prod.tail;\n-\t/* The subtraction is done between two unsigned 32bits value\n-\t * (the result is always modulo 32 bits even if we have\n-\t * cons_head > prod_tail). So 'entries' is always between 0\n-\t * and size(ring)-1. */\n-\tentries = prod_tail - cons_head;\n-\n-\tif (n > entries)\n-\t\tn = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;\n-\n-\tif (unlikely(entries == 0))\n-\t\tgoto end;\n+\tuint32_t cons_head, cons_next;\n+\tuint32_t entries;\n \n-\tcons_next = cons_head + n;\n-\tr->cons.head = cons_next;\n+\tn = __rte_ring_move_cons_head(r, is_mp, n, behavior,\n+\t\t\t&cons_head, &cons_next, &entries);\n+\tif (n == 0)\n+\t\tgoto end;\n \n-\t/* copy in table */\n \tDEQUEUE_PTRS();\n \trte_smp_rmb();\n \n+\t/*\n+\t * If there are other enqueues in progress that preceded us,\n+\t * we need to wait for them to complete\n+\t */\n+\twhile (unlikely(r->cons.tail != cons_head))\n+\t\trte_pause();\n+\n \tr->cons.tail = cons_next;\n+\n end:\n \tif (available != NULL)\n \t\t*available = entries - n;\n@@ -617,8 +590,8 @@ static inline unsigned int __attribute__((always_inline))\n rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,\n \t\t\t unsigned int n, unsigned int *free_space)\n {\n-\treturn __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,\n-\t\t\tfree_space);\n+\treturn __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,\n+\t\t\t__IS_MP, free_space);\n }\n \n /**\n@@ -637,8 +610,8 @@ static inline unsigned int __attribute__((always_inline))\n rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,\n \t\t\t unsigned int n, unsigned int *free_space)\n {\n-\treturn __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,\n-\t\t\tfree_space);\n+\treturn __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,\n+\t\t\t__IS_SP, free_space);\n }\n \n /**\n@@ -661,10 +634,8 @@ static inline unsigned int __attribute__((always_inline))\n rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,\n \t\t      unsigned int n, unsigned int *free_space)\n {\n-\tif (r->prod.sp_enqueue)\n-\t\treturn rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space);\n-\telse\n-\t\treturn rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space);\n+\treturn __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,\n+\t\t\tr->prod.sp_enqueue, free_space);\n }\n \n /**\n@@ -744,8 +715,8 @@ static inline unsigned int __attribute__((always_inline))\n rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,\n \t\tunsigned int n, unsigned int *available)\n {\n-\treturn __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,\n-\t\t\tavailable);\n+\treturn __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,\n+\t\t\t__IS_MC, available);\n }\n \n /**\n@@ -765,8 +736,8 @@ static inline unsigned int __attribute__((always_inline))\n rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,\n \t\tunsigned int n, unsigned int *available)\n {\n-\treturn __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,\n-\t\t\tavailable);\n+\treturn __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,\n+\t\t\t__IS_SC, available);\n }\n \n /**\n@@ -789,10 +760,8 @@ static inline unsigned int __attribute__((always_inline))\n rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,\n \t\tunsigned int *available)\n {\n-\tif (r->cons.sc_dequeue)\n-\t\treturn rte_ring_sc_dequeue_bulk(r, obj_table, n, available);\n-\telse\n-\t\treturn rte_ring_mc_dequeue_bulk(r, obj_table, n, available);\n+\treturn __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,\n+\t\t\t\tr->cons.sc_dequeue, available);\n }\n \n /**\n@@ -975,8 +944,8 @@ static inline unsigned __attribute__((always_inline))\n rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,\n \t\t\t unsigned int n, unsigned int *free_space)\n {\n-\treturn __rte_ring_mp_do_enqueue(r, obj_table, n,\n-\t\t\tRTE_RING_QUEUE_VARIABLE, free_space);\n+\treturn __rte_ring_do_enqueue(r, obj_table, n,\n+\t\t\tRTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);\n }\n \n /**\n@@ -995,8 +964,8 @@ static inline unsigned __attribute__((always_inline))\n rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,\n \t\t\t unsigned int n, unsigned int *free_space)\n {\n-\treturn __rte_ring_sp_do_enqueue(r, obj_table, n,\n-\t\t\tRTE_RING_QUEUE_VARIABLE, free_space);\n+\treturn __rte_ring_do_enqueue(r, obj_table, n,\n+\t\t\tRTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);\n }\n \n /**\n@@ -1019,10 +988,8 @@ static inline unsigned __attribute__((always_inline))\n rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,\n \t\t      unsigned int n, unsigned int *free_space)\n {\n-\tif (r->prod.sp_enqueue)\n-\t\treturn rte_ring_sp_enqueue_burst(r, obj_table, n, free_space);\n-\telse\n-\t\treturn rte_ring_mp_enqueue_burst(r, obj_table, n, free_space);\n+\treturn __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,\n+\t\t\tr->prod.sp_enqueue, free_space);\n }\n \n /**\n@@ -1046,8 +1013,8 @@ static inline unsigned __attribute__((always_inline))\n rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,\n \t\tunsigned int n, unsigned int *available)\n {\n-\treturn __rte_ring_mc_do_dequeue(r, obj_table, n,\n-\t\t\tRTE_RING_QUEUE_VARIABLE, available);\n+\treturn __rte_ring_do_dequeue(r, obj_table, n,\n+\t\t\tRTE_RING_QUEUE_VARIABLE, __IS_MC, available);\n }\n \n /**\n@@ -1068,8 +1035,8 @@ static inline unsigned __attribute__((always_inline))\n rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,\n \t\tunsigned int n, unsigned int *available)\n {\n-\treturn __rte_ring_sc_do_dequeue(r, obj_table, n,\n-\t\t\tRTE_RING_QUEUE_VARIABLE, available);\n+\treturn __rte_ring_do_dequeue(r, obj_table, n,\n+\t\t\tRTE_RING_QUEUE_VARIABLE, __IS_SC, available);\n }\n \n /**\n@@ -1092,10 +1059,9 @@ static inline unsigned __attribute__((always_inline))\n rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,\n \t\tunsigned int n, unsigned int *available)\n {\n-\tif (r->cons.sc_dequeue)\n-\t\treturn rte_ring_sc_dequeue_burst(r, obj_table, n, available);\n-\telse\n-\t\treturn rte_ring_mc_dequeue_burst(r, obj_table, n, available);\n+\treturn __rte_ring_do_dequeue(r, obj_table, n,\n+\t\t\t\tRTE_RING_QUEUE_VARIABLE,\n+\t\t\t\tr->cons.sc_dequeue, available);\n }\n \n #ifdef __cplusplus\n",
    "prefixes": [
        "dpdk-dev",
        "v2",
        "12/14"
    ]
}