get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/22353/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 22353,
    "url": "https://patches.dpdk.org/api/patches/22353/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20170324171008.29355-8-bruce.richardson@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20170324171008.29355-8-bruce.richardson@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20170324171008.29355-8-bruce.richardson@intel.com",
    "date": "2017-03-24T17:10:01",
    "name": "[dpdk-dev,v3,07/14] ring: make bulk and burst fn return vals consistent",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "dc248d37a95de4d9fc3b1c255591937b51104038",
    "submitter": {
        "id": 20,
        "url": "https://patches.dpdk.org/api/people/20/?format=api",
        "name": "Bruce Richardson",
        "email": "bruce.richardson@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20170324171008.29355-8-bruce.richardson@intel.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/22353/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/22353/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id C53BFD14C;\n\tFri, 24 Mar 2017 18:12:44 +0100 (CET)",
            "from mga04.intel.com (mga04.intel.com [192.55.52.120])\n\tby dpdk.org (Postfix) with ESMTP id F24B2D40B\n\tfor <dev@dpdk.org>; Fri, 24 Mar 2017 18:12:42 +0100 (CET)",
            "from fmsmga002.fm.intel.com ([10.253.24.26])\n\tby fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t24 Mar 2017 10:11:21 -0700",
            "from sivswdev01.ir.intel.com ([10.237.217.45])\n\tby fmsmga002.fm.intel.com with ESMTP; 24 Mar 2017 10:11:16 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=simple/simple;\n\td=intel.com; i=@intel.com; q=dns/txt; s=intel;\n\tt=1490375563; x=1521911563;\n\th=from:to:cc:subject:date:message-id:in-reply-to: references;\n\tbh=Z53Rrkdprll+7LeqMh0Zzcj4pu3AjuXtJ2rO8hDPVkc=;\n\tb=NslDYPAWfTfb+96hiopccFJIWqL0mlMRouXxJGxZHsrCwkTn81wDSvFV\n\tNSHptfhMrrjcID67nKZQmkp1sMJNvQ==;",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos; i=\"5.36,215,1486454400\"; d=\"scan'208\";\n\ta=\"1146576836\"",
        "From": "Bruce Richardson <bruce.richardson@intel.com>",
        "To": "olivier.matz@6wind.com",
        "Cc": "dev@dpdk.org, jerin.jacob@caviumnetworks.com, thomas.monjalon@6wind.com, \n\tBruce Richardson <bruce.richardson@intel.com>",
        "Date": "Fri, 24 Mar 2017 17:10:01 +0000",
        "Message-Id": "<20170324171008.29355-8-bruce.richardson@intel.com>",
        "X-Mailer": "git-send-email 2.8.4",
        "In-Reply-To": "<20170324171008.29355-1-bruce.richardson@intel.com>",
        "References": "<20170307113217.11077-1-bruce.richardson@intel.com>\n\t<20170324171008.29355-1-bruce.richardson@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v3 07/14] ring: make bulk and burst fn return\n\tvals consistent",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The bulk fns for rings returns 0 for all elements enqueued and negative\nfor no space. Change that to make them consistent with the burst functions\nin returning the number of elements enqueued/dequeued, i.e. 0 or N.\nThis change also allows the return value from enq/deq to be used directly\nwithout a branch for error checking.\n\nSigned-off-by: Bruce Richardson <bruce.richardson@intel.com>\nAcked-by: Olivier Matz <olivier.matz@6wind.com>\n---\n doc/guides/rel_notes/release_17_05.rst             |  11 +++\n doc/guides/sample_app_ug/server_node_efd.rst       |   2 +-\n examples/load_balancer/runtime.c                   |  16 ++-\n .../client_server_mp/mp_client/client.c            |   8 +-\n .../client_server_mp/mp_server/main.c              |   2 +-\n examples/qos_sched/app_thread.c                    |   8 +-\n examples/server_node_efd/node/node.c               |   2 +-\n examples/server_node_efd/server/main.c             |   2 +-\n lib/librte_mempool/rte_mempool_ring.c              |  12 ++-\n lib/librte_ring/rte_ring.h                         | 109 +++++++--------------\n test/test-pipeline/pipeline_hash.c                 |   2 +-\n test/test-pipeline/runtime.c                       |   8 +-\n test/test/test_ring.c                              |  46 +++++----\n test/test/test_ring_perf.c                         |   8 +-\n 14 files changed, 106 insertions(+), 130 deletions(-)",
    "diff": "diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst\nindex af907b8..a465c69 100644\n--- a/doc/guides/rel_notes/release_17_05.rst\n+++ b/doc/guides/rel_notes/release_17_05.rst\n@@ -130,6 +130,17 @@ API Changes\n   * removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``\n   * removed the function ``rte_ring_set_water_mark`` as part of a general\n     removal of watermarks support in the library.\n+  * changed the return value of the enqueue and dequeue bulk functions to\n+    match that of the burst equivalents. In all cases, ring functions which\n+    operate on multiple packets now return the number of elements enqueued\n+    or dequeued, as appropriate. The updated functions are:\n+\n+    - ``rte_ring_mp_enqueue_bulk``\n+    - ``rte_ring_sp_enqueue_bulk``\n+    - ``rte_ring_enqueue_bulk``\n+    - ``rte_ring_mc_dequeue_bulk``\n+    - ``rte_ring_sc_dequeue_bulk``\n+    - ``rte_ring_dequeue_bulk``\n \n ABI Changes\n -----------\ndiff --git a/doc/guides/sample_app_ug/server_node_efd.rst b/doc/guides/sample_app_ug/server_node_efd.rst\nindex 9b69cfe..e3a63c8 100644\n--- a/doc/guides/sample_app_ug/server_node_efd.rst\n+++ b/doc/guides/sample_app_ug/server_node_efd.rst\n@@ -286,7 +286,7 @@ repeated infinitely.\n \n         cl = &nodes[node];\n         if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,\n-                cl_rx_buf[node].count) != 0){\n+                cl_rx_buf[node].count) != cl_rx_buf[node].count){\n             for (j = 0; j < cl_rx_buf[node].count; j++)\n                 rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);\n             cl->stats.rx_drop += cl_rx_buf[node].count;\ndiff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c\nindex 6944325..82b10bc 100644\n--- a/examples/load_balancer/runtime.c\n+++ b/examples/load_balancer/runtime.c\n@@ -146,7 +146,7 @@ app_lcore_io_rx_buffer_to_send (\n \t\t(void **) lp->rx.mbuf_out[worker].array,\n \t\tbsz);\n \n-\tif (unlikely(ret == -ENOBUFS)) {\n+\tif (unlikely(ret == 0)) {\n \t\tuint32_t k;\n \t\tfor (k = 0; k < bsz; k ++) {\n \t\t\tstruct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];\n@@ -312,7 +312,7 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)\n \t\t\t(void **) lp->rx.mbuf_out[worker].array,\n \t\t\tlp->rx.mbuf_out[worker].n_mbufs);\n \n-\t\tif (unlikely(ret < 0)) {\n+\t\tif (unlikely(ret == 0)) {\n \t\t\tuint32_t k;\n \t\t\tfor (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {\n \t\t\t\tstruct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];\n@@ -349,9 +349,8 @@ app_lcore_io_tx(\n \t\t\t\t(void **) &lp->tx.mbuf_out[port].array[n_mbufs],\n \t\t\t\tbsz_rd);\n \n-\t\t\tif (unlikely(ret == -ENOENT)) {\n+\t\t\tif (unlikely(ret == 0))\n \t\t\t\tcontinue;\n-\t\t\t}\n \n \t\t\tn_mbufs += bsz_rd;\n \n@@ -505,9 +504,8 @@ app_lcore_worker(\n \t\t\t(void **) lp->mbuf_in.array,\n \t\t\tbsz_rd);\n \n-\t\tif (unlikely(ret == -ENOENT)) {\n+\t\tif (unlikely(ret == 0))\n \t\t\tcontinue;\n-\t\t}\n \n #if APP_WORKER_DROP_ALL_PACKETS\n \t\tfor (j = 0; j < bsz_rd; j ++) {\n@@ -559,7 +557,7 @@ app_lcore_worker(\n \n #if APP_STATS\n \t\t\tlp->rings_out_iters[port] ++;\n-\t\t\tif (ret == 0) {\n+\t\t\tif (ret > 0) {\n \t\t\t\tlp->rings_out_count[port] += 1;\n \t\t\t}\n \t\t\tif (lp->rings_out_iters[port] == APP_STATS){\n@@ -572,7 +570,7 @@ app_lcore_worker(\n \t\t\t}\n #endif\n \n-\t\t\tif (unlikely(ret == -ENOBUFS)) {\n+\t\t\tif (unlikely(ret == 0)) {\n \t\t\t\tuint32_t k;\n \t\t\t\tfor (k = 0; k < bsz_wr; k ++) {\n \t\t\t\t\tstruct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];\n@@ -609,7 +607,7 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)\n \t\t\t(void **) lp->mbuf_out[port].array,\n \t\t\tlp->mbuf_out[port].n_mbufs);\n \n-\t\tif (unlikely(ret < 0)) {\n+\t\tif (unlikely(ret == 0)) {\n \t\t\tuint32_t k;\n \t\t\tfor (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {\n \t\t\t\tstruct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];\ndiff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c\nindex d4f9ca3..dca9eb9 100644\n--- a/examples/multi_process/client_server_mp/mp_client/client.c\n+++ b/examples/multi_process/client_server_mp/mp_client/client.c\n@@ -276,14 +276,10 @@ main(int argc, char *argv[])\n \tprintf(\"[Press Ctrl-C to quit ...]\\n\");\n \n \tfor (;;) {\n-\t\tuint16_t i, rx_pkts = PKT_READ_SIZE;\n+\t\tuint16_t i, rx_pkts;\n \t\tuint8_t port;\n \n-\t\t/* try dequeuing max possible packets first, if that fails, get the\n-\t\t * most we can. Loop body should only execute once, maximum */\n-\t\twhile (rx_pkts > 0 &&\n-\t\t\t\tunlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0))\n-\t\t\trx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);\n+\t\trx_pkts = rte_ring_dequeue_burst(rx_ring, pkts, PKT_READ_SIZE);\n \n \t\tif (unlikely(rx_pkts == 0)){\n \t\t\tif (need_flush)\ndiff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c\nindex a6dc12d..19c95b2 100644\n--- a/examples/multi_process/client_server_mp/mp_server/main.c\n+++ b/examples/multi_process/client_server_mp/mp_server/main.c\n@@ -227,7 +227,7 @@ flush_rx_queue(uint16_t client)\n \n \tcl = &clients[client];\n \tif (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,\n-\t\t\tcl_rx_buf[client].count) != 0){\n+\t\t\tcl_rx_buf[client].count) == 0){\n \t\tfor (j = 0; j < cl_rx_buf[client].count; j++)\n \t\t\trte_pktmbuf_free(cl_rx_buf[client].buffer[j]);\n \t\tcl->stats.rx_drop += cl_rx_buf[client].count;\ndiff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c\nindex 70fdcdb..dab4594 100644\n--- a/examples/qos_sched/app_thread.c\n+++ b/examples/qos_sched/app_thread.c\n@@ -107,7 +107,7 @@ app_rx_thread(struct thread_conf **confs)\n \t\t\t}\n \n \t\t\tif (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,\n-\t\t\t\t\t\t\t\t(void **)rx_mbufs, nb_rx) != 0)) {\n+\t\t\t\t\t(void **)rx_mbufs, nb_rx) == 0)) {\n \t\t\t\tfor(i = 0; i < nb_rx; i++) {\n \t\t\t\t\trte_pktmbuf_free(rx_mbufs[i]);\n \n@@ -180,7 +180,7 @@ app_tx_thread(struct thread_conf **confs)\n \twhile ((conf = confs[conf_idx])) {\n \t\tretval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,\n \t\t\t\t\tburst_conf.qos_dequeue);\n-\t\tif (likely(retval == 0)) {\n+\t\tif (likely(retval != 0)) {\n \t\t\tapp_send_packets(conf, mbufs, burst_conf.qos_dequeue);\n \n \t\t\tconf->counter = 0; /* reset empty read loop counter */\n@@ -230,7 +230,9 @@ app_worker_thread(struct thread_conf **confs)\n \t\tnb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,\n \t\t\t\t\tburst_conf.qos_dequeue);\n \t\tif (likely(nb_pkt > 0))\n-\t\t\twhile (rte_ring_sp_enqueue_bulk(conf->tx_ring, (void **)mbufs, nb_pkt) != 0);\n+\t\t\twhile (rte_ring_sp_enqueue_bulk(conf->tx_ring,\n+\t\t\t\t\t(void **)mbufs, nb_pkt) == 0)\n+\t\t\t\t; /* empty body */\n \n \t\tconf_idx++;\n \t\tif (confs[conf_idx] == NULL)\ndiff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c\nindex a6c0c70..9ec6a05 100644\n--- a/examples/server_node_efd/node/node.c\n+++ b/examples/server_node_efd/node/node.c\n@@ -392,7 +392,7 @@ main(int argc, char *argv[])\n \t\t */\n \t\twhile (rx_pkts > 0 &&\n \t\t\t\tunlikely(rte_ring_dequeue_bulk(rx_ring, pkts,\n-\t\t\t\t\trx_pkts) != 0))\n+\t\t\t\t\trx_pkts) == 0))\n \t\t\trx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),\n \t\t\t\t\tPKT_READ_SIZE);\n \ndiff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c\nindex 1a54d1b..3eb7fac 100644\n--- a/examples/server_node_efd/server/main.c\n+++ b/examples/server_node_efd/server/main.c\n@@ -247,7 +247,7 @@ flush_rx_queue(uint16_t node)\n \n \tcl = &nodes[node];\n \tif (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,\n-\t\t\tcl_rx_buf[node].count) != 0){\n+\t\t\tcl_rx_buf[node].count) != cl_rx_buf[node].count){\n \t\tfor (j = 0; j < cl_rx_buf[node].count; j++)\n \t\t\trte_pktmbuf_free(cl_rx_buf[node].buffer[j]);\n \t\tcl->stats.rx_drop += cl_rx_buf[node].count;\ndiff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c\nindex b9aa64d..409b860 100644\n--- a/lib/librte_mempool/rte_mempool_ring.c\n+++ b/lib/librte_mempool/rte_mempool_ring.c\n@@ -42,26 +42,30 @@ static int\n common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,\n \t\tunsigned n)\n {\n-\treturn rte_ring_mp_enqueue_bulk(mp->pool_data, obj_table, n);\n+\treturn rte_ring_mp_enqueue_bulk(mp->pool_data,\n+\t\t\tobj_table, n) == 0 ? -ENOBUFS : 0;\n }\n \n static int\n common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,\n \t\tunsigned n)\n {\n-\treturn rte_ring_sp_enqueue_bulk(mp->pool_data, obj_table, n);\n+\treturn rte_ring_sp_enqueue_bulk(mp->pool_data,\n+\t\t\tobj_table, n) == 0 ? -ENOBUFS : 0;\n }\n \n static int\n common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)\n {\n-\treturn rte_ring_mc_dequeue_bulk(mp->pool_data, obj_table, n);\n+\treturn rte_ring_mc_dequeue_bulk(mp->pool_data,\n+\t\t\tobj_table, n) == 0 ? -ENOBUFS : 0;\n }\n \n static int\n common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)\n {\n-\treturn rte_ring_sc_dequeue_bulk(mp->pool_data, obj_table, n);\n+\treturn rte_ring_sc_dequeue_bulk(mp->pool_data,\n+\t\t\tobj_table, n) == 0 ? -ENOBUFS : 0;\n }\n \n static unsigned\ndiff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h\nindex 906e8ae..34b438c 100644\n--- a/lib/librte_ring/rte_ring.h\n+++ b/lib/librte_ring/rte_ring.h\n@@ -349,14 +349,10 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);\n  *   RTE_RING_QUEUE_FIXED:    Enqueue a fixed number of items from a ring\n  *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring\n  * @return\n- *   Depend on the behavior value\n- *   if behavior = RTE_RING_QUEUE_FIXED\n- *   - 0: Success; objects enqueue.\n- *   - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.\n- *   if behavior = RTE_RING_QUEUE_VARIABLE\n- *   - n: Actual number of objects enqueued.\n+ *   Actual number of objects enqueued.\n+ *   If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.\n  */\n-static inline int __attribute__((always_inline))\n+static inline unsigned int __attribute__((always_inline))\n __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,\n \t\t\t unsigned n, enum rte_ring_queue_behavior behavior)\n {\n@@ -388,7 +384,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,\n \t\t/* check that we have enough room in ring */\n \t\tif (unlikely(n > free_entries)) {\n \t\t\tif (behavior == RTE_RING_QUEUE_FIXED)\n-\t\t\t\treturn -ENOBUFS;\n+\t\t\t\treturn 0;\n \t\t\telse {\n \t\t\t\t/* No free entry available */\n \t\t\t\tif (unlikely(free_entries == 0))\n@@ -414,7 +410,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,\n \t\trte_pause();\n \n \tr->prod.tail = prod_next;\n-\treturn (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;\n+\treturn n;\n }\n \n /**\n@@ -430,14 +426,10 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,\n  *   RTE_RING_QUEUE_FIXED:    Enqueue a fixed number of items from a ring\n  *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring\n  * @return\n- *   Depend on the behavior value\n- *   if behavior = RTE_RING_QUEUE_FIXED\n- *   - 0: Success; objects enqueue.\n- *   - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.\n- *   if behavior = RTE_RING_QUEUE_VARIABLE\n- *   - n: Actual number of objects enqueued.\n+ *   Actual number of objects enqueued.\n+ *   If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.\n  */\n-static inline int __attribute__((always_inline))\n+static inline unsigned int __attribute__((always_inline))\n __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,\n \t\t\t unsigned n, enum rte_ring_queue_behavior behavior)\n {\n@@ -457,7 +449,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,\n \t/* check that we have enough room in ring */\n \tif (unlikely(n > free_entries)) {\n \t\tif (behavior == RTE_RING_QUEUE_FIXED)\n-\t\t\treturn -ENOBUFS;\n+\t\t\treturn 0;\n \t\telse {\n \t\t\t/* No free entry available */\n \t\t\tif (unlikely(free_entries == 0))\n@@ -474,7 +466,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,\n \trte_smp_wmb();\n \n \tr->prod.tail = prod_next;\n-\treturn (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;\n+\treturn n;\n }\n \n /**\n@@ -495,16 +487,11 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,\n  *   RTE_RING_QUEUE_FIXED:    Dequeue a fixed number of items from a ring\n  *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring\n  * @return\n- *   Depend on the behavior value\n- *   if behavior = RTE_RING_QUEUE_FIXED\n- *   - 0: Success; objects dequeued.\n- *   - -ENOENT: Not enough entries in the ring to dequeue; no object is\n- *     dequeued.\n- *   if behavior = RTE_RING_QUEUE_VARIABLE\n- *   - n: Actual number of objects dequeued.\n+ *   - Actual number of objects dequeued.\n+ *     If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.\n  */\n \n-static inline int __attribute__((always_inline))\n+static inline unsigned int __attribute__((always_inline))\n __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,\n \t\t unsigned n, enum rte_ring_queue_behavior behavior)\n {\n@@ -536,7 +523,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,\n \t\t/* Set the actual entries for dequeue */\n \t\tif (n > entries) {\n \t\t\tif (behavior == RTE_RING_QUEUE_FIXED)\n-\t\t\t\treturn -ENOENT;\n+\t\t\t\treturn 0;\n \t\t\telse {\n \t\t\t\tif (unlikely(entries == 0))\n \t\t\t\t\treturn 0;\n@@ -562,7 +549,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,\n \n \tr->cons.tail = cons_next;\n \n-\treturn behavior == RTE_RING_QUEUE_FIXED ? 0 : n;\n+\treturn n;\n }\n \n /**\n@@ -580,15 +567,10 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,\n  *   RTE_RING_QUEUE_FIXED:    Dequeue a fixed number of items from a ring\n  *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring\n  * @return\n- *   Depend on the behavior value\n- *   if behavior = RTE_RING_QUEUE_FIXED\n- *   - 0: Success; objects dequeued.\n- *   - -ENOENT: Not enough entries in the ring to dequeue; no object is\n- *     dequeued.\n- *   if behavior = RTE_RING_QUEUE_VARIABLE\n- *   - n: Actual number of objects dequeued.\n+ *   - Actual number of objects dequeued.\n+ *     If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.\n  */\n-static inline int __attribute__((always_inline))\n+static inline unsigned int __attribute__((always_inline))\n __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,\n \t\t unsigned n, enum rte_ring_queue_behavior behavior)\n {\n@@ -607,7 +589,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,\n \n \tif (n > entries) {\n \t\tif (behavior == RTE_RING_QUEUE_FIXED)\n-\t\t\treturn -ENOENT;\n+\t\t\treturn 0;\n \t\telse {\n \t\t\tif (unlikely(entries == 0))\n \t\t\t\treturn 0;\n@@ -623,7 +605,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,\n \trte_smp_rmb();\n \n \tr->cons.tail = cons_next;\n-\treturn behavior == RTE_RING_QUEUE_FIXED ? 0 : n;\n+\treturn n;\n }\n \n /**\n@@ -639,10 +621,9 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,\n  * @param n\n  *   The number of objects to add in the ring from the obj_table.\n  * @return\n- *   - 0: Success; objects enqueue.\n- *   - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.\n+ *   The number of objects enqueued, either 0 or n\n  */\n-static inline int __attribute__((always_inline))\n+static inline unsigned int __attribute__((always_inline))\n rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,\n \t\t\t unsigned n)\n {\n@@ -659,10 +640,9 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,\n  * @param n\n  *   The number of objects to add in the ring from the obj_table.\n  * @return\n- *   - 0: Success; objects enqueued.\n- *   - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.\n+ *   The number of objects enqueued, either 0 or n\n  */\n-static inline int __attribute__((always_inline))\n+static inline unsigned int __attribute__((always_inline))\n rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,\n \t\t\t unsigned n)\n {\n@@ -683,10 +663,9 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,\n  * @param n\n  *   The number of objects to add in the ring from the obj_table.\n  * @return\n- *   - 0: Success; objects enqueued.\n- *   - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.\n+ *   The number of objects enqueued, either 0 or n\n  */\n-static inline int __attribute__((always_inline))\n+static inline unsigned int __attribute__((always_inline))\n rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,\n \t\t      unsigned n)\n {\n@@ -713,7 +692,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,\n static inline int __attribute__((always_inline))\n rte_ring_mp_enqueue(struct rte_ring *r, void *obj)\n {\n-\treturn rte_ring_mp_enqueue_bulk(r, &obj, 1);\n+\treturn rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;\n }\n \n /**\n@@ -730,7 +709,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)\n static inline int __attribute__((always_inline))\n rte_ring_sp_enqueue(struct rte_ring *r, void *obj)\n {\n-\treturn rte_ring_sp_enqueue_bulk(r, &obj, 1);\n+\treturn rte_ring_sp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;\n }\n \n /**\n@@ -751,10 +730,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)\n static inline int __attribute__((always_inline))\n rte_ring_enqueue(struct rte_ring *r, void *obj)\n {\n-\tif (r->prod.single)\n-\t\treturn rte_ring_sp_enqueue(r, obj);\n-\telse\n-\t\treturn rte_ring_mp_enqueue(r, obj);\n+\treturn rte_ring_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;\n }\n \n /**\n@@ -770,11 +746,9 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)\n  * @param n\n  *   The number of objects to dequeue from the ring to the obj_table.\n  * @return\n- *   - 0: Success; objects dequeued.\n- *   - -ENOENT: Not enough entries in the ring to dequeue; no object is\n- *     dequeued.\n+ *   The number of objects dequeued, either 0 or n\n  */\n-static inline int __attribute__((always_inline))\n+static inline unsigned int __attribute__((always_inline))\n rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)\n {\n \treturn __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);\n@@ -791,11 +765,9 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)\n  *   The number of objects to dequeue from the ring to the obj_table,\n  *   must be strictly positive.\n  * @return\n- *   - 0: Success; objects dequeued.\n- *   - -ENOENT: Not enough entries in the ring to dequeue; no object is\n- *     dequeued.\n+ *   The number of objects dequeued, either 0 or n\n  */\n-static inline int __attribute__((always_inline))\n+static inline unsigned int __attribute__((always_inline))\n rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)\n {\n \treturn __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);\n@@ -815,11 +787,9 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)\n  * @param n\n  *   The number of objects to dequeue from the ring to the obj_table.\n  * @return\n- *   - 0: Success; objects dequeued.\n- *   - -ENOENT: Not enough entries in the ring to dequeue, no object is\n- *     dequeued.\n+ *   The number of objects dequeued, either 0 or n\n  */\n-static inline int __attribute__((always_inline))\n+static inline unsigned int __attribute__((always_inline))\n rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)\n {\n \tif (r->cons.single)\n@@ -846,7 +816,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)\n static inline int __attribute__((always_inline))\n rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)\n {\n-\treturn rte_ring_mc_dequeue_bulk(r, obj_p, 1);\n+\treturn rte_ring_mc_dequeue_bulk(r, obj_p, 1)  ? 0 : -ENOBUFS;\n }\n \n /**\n@@ -864,7 +834,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)\n static inline int __attribute__((always_inline))\n rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)\n {\n-\treturn rte_ring_sc_dequeue_bulk(r, obj_p, 1);\n+\treturn rte_ring_sc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;\n }\n \n /**\n@@ -886,10 +856,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)\n static inline int __attribute__((always_inline))\n rte_ring_dequeue(struct rte_ring *r, void **obj_p)\n {\n-\tif (r->cons.single)\n-\t\treturn rte_ring_sc_dequeue(r, obj_p);\n-\telse\n-\t\treturn rte_ring_mc_dequeue(r, obj_p);\n+\treturn rte_ring_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;\n }\n \n /**\ndiff --git a/test/test-pipeline/pipeline_hash.c b/test/test-pipeline/pipeline_hash.c\nindex 10d2869..1ac0aa8 100644\n--- a/test/test-pipeline/pipeline_hash.c\n+++ b/test/test-pipeline/pipeline_hash.c\n@@ -547,6 +547,6 @@ app_main_loop_rx_metadata(void) {\n \t\t\t\tapp.rings_rx[i],\n \t\t\t\t(void **) app.mbuf_rx.array,\n \t\t\t\tn_mbufs);\n-\t\t} while (ret < 0);\n+\t\t} while (ret == 0);\n \t}\n }\ndiff --git a/test/test-pipeline/runtime.c b/test/test-pipeline/runtime.c\nindex 42a6142..4e20669 100644\n--- a/test/test-pipeline/runtime.c\n+++ b/test/test-pipeline/runtime.c\n@@ -98,7 +98,7 @@ app_main_loop_rx(void) {\n \t\t\t\tapp.rings_rx[i],\n \t\t\t\t(void **) app.mbuf_rx.array,\n \t\t\t\tn_mbufs);\n-\t\t} while (ret < 0);\n+\t\t} while (ret == 0);\n \t}\n }\n \n@@ -123,7 +123,7 @@ app_main_loop_worker(void) {\n \t\t\t(void **) worker_mbuf->array,\n \t\t\tapp.burst_size_worker_read);\n \n-\t\tif (ret == -ENOENT)\n+\t\tif (ret == 0)\n \t\t\tcontinue;\n \n \t\tdo {\n@@ -131,7 +131,7 @@ app_main_loop_worker(void) {\n \t\t\t\tapp.rings_tx[i ^ 1],\n \t\t\t\t(void **) worker_mbuf->array,\n \t\t\t\tapp.burst_size_worker_write);\n-\t\t} while (ret < 0);\n+\t\t} while (ret == 0);\n \t}\n }\n \n@@ -152,7 +152,7 @@ app_main_loop_tx(void) {\n \t\t\t(void **) &app.mbuf_tx[i].array[n_mbufs],\n \t\t\tapp.burst_size_tx_read);\n \n-\t\tif (ret == -ENOENT)\n+\t\tif (ret == 0)\n \t\t\tcontinue;\n \n \t\tn_mbufs += app.burst_size_tx_read;\ndiff --git a/test/test/test_ring.c b/test/test/test_ring.c\nindex 666a451..112433b 100644\n--- a/test/test/test_ring.c\n+++ b/test/test/test_ring.c\n@@ -117,20 +117,18 @@ test_ring_basic_full_empty(void * const src[], void *dst[])\n \t\trand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);\n \t\tprintf(\"%s: iteration %u, random shift: %u;\\n\",\n \t\t    __func__, i, rand);\n-\t\tTEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,\n-\t\t    rand));\n-\t\tTEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand));\n+\t\tTEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand) != 0);\n+\t\tTEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);\n \n \t\t/* fill the ring */\n-\t\tTEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,\n-\t\t    rsz));\n+\t\tTEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz) != 0);\n \t\tTEST_RING_VERIFY(0 == rte_ring_free_count(r));\n \t\tTEST_RING_VERIFY(rsz == rte_ring_count(r));\n \t\tTEST_RING_VERIFY(rte_ring_full(r));\n \t\tTEST_RING_VERIFY(0 == rte_ring_empty(r));\n \n \t\t/* empty the ring */\n-\t\tTEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz));\n+\t\tTEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz) == rsz);\n \t\tTEST_RING_VERIFY(rsz == rte_ring_free_count(r));\n \t\tTEST_RING_VERIFY(0 == rte_ring_count(r));\n \t\tTEST_RING_VERIFY(0 == rte_ring_full(r));\n@@ -171,37 +169,37 @@ test_ring_basic(void)\n \tprintf(\"enqueue 1 obj\\n\");\n \tret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);\n \tcur_src += 1;\n-\tif (ret != 0)\n+\tif (ret == 0)\n \t\tgoto fail;\n \n \tprintf(\"enqueue 2 objs\\n\");\n \tret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);\n \tcur_src += 2;\n-\tif (ret != 0)\n+\tif (ret == 0)\n \t\tgoto fail;\n \n \tprintf(\"enqueue MAX_BULK objs\\n\");\n \tret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);\n \tcur_src += MAX_BULK;\n-\tif (ret != 0)\n+\tif (ret == 0)\n \t\tgoto fail;\n \n \tprintf(\"dequeue 1 obj\\n\");\n \tret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);\n \tcur_dst += 1;\n-\tif (ret != 0)\n+\tif (ret == 0)\n \t\tgoto fail;\n \n \tprintf(\"dequeue 2 objs\\n\");\n \tret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);\n \tcur_dst += 2;\n-\tif (ret != 0)\n+\tif (ret == 0)\n \t\tgoto fail;\n \n \tprintf(\"dequeue MAX_BULK objs\\n\");\n \tret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);\n \tcur_dst += MAX_BULK;\n-\tif (ret != 0)\n+\tif (ret == 0)\n \t\tgoto fail;\n \n \t/* check data */\n@@ -217,37 +215,37 @@ test_ring_basic(void)\n \tprintf(\"enqueue 1 obj\\n\");\n \tret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);\n \tcur_src += 1;\n-\tif (ret != 0)\n+\tif (ret == 0)\n \t\tgoto fail;\n \n \tprintf(\"enqueue 2 objs\\n\");\n \tret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);\n \tcur_src += 2;\n-\tif (ret != 0)\n+\tif (ret == 0)\n \t\tgoto fail;\n \n \tprintf(\"enqueue MAX_BULK objs\\n\");\n \tret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);\n \tcur_src += MAX_BULK;\n-\tif (ret != 0)\n+\tif (ret == 0)\n \t\tgoto fail;\n \n \tprintf(\"dequeue 1 obj\\n\");\n \tret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);\n \tcur_dst += 1;\n-\tif (ret != 0)\n+\tif (ret == 0)\n \t\tgoto fail;\n \n \tprintf(\"dequeue 2 objs\\n\");\n \tret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);\n \tcur_dst += 2;\n-\tif (ret != 0)\n+\tif (ret == 0)\n \t\tgoto fail;\n \n \tprintf(\"dequeue MAX_BULK objs\\n\");\n \tret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);\n \tcur_dst += MAX_BULK;\n-\tif (ret != 0)\n+\tif (ret == 0)\n \t\tgoto fail;\n \n \t/* check data */\n@@ -264,11 +262,11 @@ test_ring_basic(void)\n \tfor (i = 0; i<RING_SIZE/MAX_BULK; i++) {\n \t\tret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);\n \t\tcur_src += MAX_BULK;\n-\t\tif (ret != 0)\n+\t\tif (ret == 0)\n \t\t\tgoto fail;\n \t\tret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);\n \t\tcur_dst += MAX_BULK;\n-\t\tif (ret != 0)\n+\t\tif (ret == 0)\n \t\t\tgoto fail;\n \t}\n \n@@ -294,25 +292,25 @@ test_ring_basic(void)\n \n \tret = rte_ring_enqueue_bulk(r, cur_src, num_elems);\n \tcur_src += num_elems;\n-\tif (ret != 0) {\n+\tif (ret == 0) {\n \t\tprintf(\"Cannot enqueue\\n\");\n \t\tgoto fail;\n \t}\n \tret = rte_ring_enqueue_bulk(r, cur_src, num_elems);\n \tcur_src += num_elems;\n-\tif (ret != 0) {\n+\tif (ret == 0) {\n \t\tprintf(\"Cannot enqueue\\n\");\n \t\tgoto fail;\n \t}\n \tret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);\n \tcur_dst += num_elems;\n-\tif (ret != 0) {\n+\tif (ret == 0) {\n \t\tprintf(\"Cannot dequeue\\n\");\n \t\tgoto fail;\n \t}\n \tret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);\n \tcur_dst += num_elems;\n-\tif (ret != 0) {\n+\tif (ret == 0) {\n \t\tprintf(\"Cannot dequeue2\\n\");\n \t\tgoto fail;\n \t}\ndiff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c\nindex 320c20c..8ccbdef 100644\n--- a/test/test/test_ring_perf.c\n+++ b/test/test/test_ring_perf.c\n@@ -195,13 +195,13 @@ enqueue_bulk(void *p)\n \n \tconst uint64_t sp_start = rte_rdtsc();\n \tfor (i = 0; i < iterations; i++)\n-\t\twhile (rte_ring_sp_enqueue_bulk(r, burst, size) != 0)\n+\t\twhile (rte_ring_sp_enqueue_bulk(r, burst, size) == 0)\n \t\t\trte_pause();\n \tconst uint64_t sp_end = rte_rdtsc();\n \n \tconst uint64_t mp_start = rte_rdtsc();\n \tfor (i = 0; i < iterations; i++)\n-\t\twhile (rte_ring_mp_enqueue_bulk(r, burst, size) != 0)\n+\t\twhile (rte_ring_mp_enqueue_bulk(r, burst, size) == 0)\n \t\t\trte_pause();\n \tconst uint64_t mp_end = rte_rdtsc();\n \n@@ -230,13 +230,13 @@ dequeue_bulk(void *p)\n \n \tconst uint64_t sc_start = rte_rdtsc();\n \tfor (i = 0; i < iterations; i++)\n-\t\twhile (rte_ring_sc_dequeue_bulk(r, burst, size) != 0)\n+\t\twhile (rte_ring_sc_dequeue_bulk(r, burst, size) == 0)\n \t\t\trte_pause();\n \tconst uint64_t sc_end = rte_rdtsc();\n \n \tconst uint64_t mc_start = rte_rdtsc();\n \tfor (i = 0; i < iterations; i++)\n-\t\twhile (rte_ring_mc_dequeue_bulk(r, burst, size) != 0)\n+\t\twhile (rte_ring_mc_dequeue_bulk(r, burst, size) == 0)\n \t\t\trte_pause();\n \tconst uint64_t mc_end = rte_rdtsc();\n \n",
    "prefixes": [
        "dpdk-dev",
        "v3",
        "07/14"
    ]
}