get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/77320/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 77320,
    "url": "http://patches.dpdk.org/api/patches/77320/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1599801997-19880-2-git-send-email-phil.yang@arm.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1599801997-19880-2-git-send-email-phil.yang@arm.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1599801997-19880-2-git-send-email-phil.yang@arm.com",
    "date": "2020-09-11T05:26:36",
    "name": "[v2,1/2] drivers: replace RTE CIO barriers with RTE IO barriers",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "f0f21eb1caf017c5fad21a4179c706320459ddd4",
    "submitter": {
        "id": 833,
        "url": "http://patches.dpdk.org/api/people/833/?format=api",
        "name": "Phil Yang",
        "email": "phil.yang@arm.com"
    },
    "delegate": {
        "id": 24651,
        "url": "http://patches.dpdk.org/api/users/24651/?format=api",
        "username": "dmarchand",
        "first_name": "David",
        "last_name": "Marchand",
        "email": "david.marchand@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1599801997-19880-2-git-send-email-phil.yang@arm.com/mbox/",
    "series": [
        {
            "id": 12128,
            "url": "http://patches.dpdk.org/api/series/12128/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=12128",
            "date": "2020-09-11T05:26:35",
            "name": "remove RTE CIO barriers",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/12128/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/77320/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/77320/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 695B1A04B5;\n\tFri, 11 Sep 2020 07:26:50 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 4F42C1C0CC;\n\tFri, 11 Sep 2020 07:26:50 +0200 (CEST)",
            "from foss.arm.com (foss.arm.com [217.140.110.172])\n by dpdk.org (Postfix) with ESMTP id E28FE255\n for <dev@dpdk.org>; Fri, 11 Sep 2020 07:26:48 +0200 (CEST)",
            "from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14])\n by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 70637106F;\n Thu, 10 Sep 2020 22:26:48 -0700 (PDT)",
            "from phil-VirtualBox.shanghai.arm.com\n (phil-VirtualBox.shanghai.arm.com [10.169.182.49])\n by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id E73EB3F66E;\n Thu, 10 Sep 2020 22:26:40 -0700 (PDT)"
        ],
        "From": "Phil Yang <phil.yang@arm.com>",
        "To": "dev@dpdk.org",
        "Cc": "Honnappa.Nagarahalli@arm.com, Ruifeng.Wang@arm.com, joyce.kong@arm.com,\n nd@arm.com, Matan Azrad <matan@mellanox.com>,\n Shahaf Shuler <shahafs@mellanox.com>,\n Viacheslav Ovsiienko <viacheslavo@mellanox.com>,\n Ankur Dwivedi <adwivedi@marvell.com>, Anoob Joseph <anoobj@marvell.com>,\n Jerin Jacob <jerinj@marvell.com>,\n Pavan Nikhilesh <pbhagavatula@marvell.com>,\n Ajit Khaparde <ajit.khaparde@broadcom.com>,\n Somnath Kotur <somnath.kotur@broadcom.com>, Wei Zhao <wei.zhao1@intel.com>,\n Jeff Guo <jia.guo@intel.com>, Beilei Xing <beilei.xing@intel.com>,\n Ruifeng Wang <ruifeng.wang@arm.com>, Harman Kalra <hkalra@marvell.com>,\n Nithin Dabilpuram <ndabilpuram@marvell.com>,\n Kiran Kumar K <kirankumark@marvell.com>,\n Maxime Coquelin <maxime.coquelin@redhat.com>,\n Chenbo Xia <chenbo.xia@intel.com>, Zhihong Wang <zhihong.wang@intel.com>,\n Mahipal Challa <mchalla@marvell.com>, Ori Kam <orika@mellanox.com>",
        "Date": "Fri, 11 Sep 2020 13:26:36 +0800",
        "Message-Id": "<1599801997-19880-2-git-send-email-phil.yang@arm.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1599801997-19880-1-git-send-email-phil.yang@arm.com>",
        "References": "<1598258441-15696-1-git-send-email-phil.yang@arm.com>\n <1599801997-19880-1-git-send-email-phil.yang@arm.com>",
        "Subject": "[dpdk-dev] [PATCH v2 1/2] drivers: replace RTE CIO barriers with\n\tRTE IO barriers",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Replace the deprecated rte_cio_[rw]mb barriers with rte_io_[rw]mb\nbarriers.\n\nSigned-off-by: Phil Yang <phil.yang@arm.com>\nSigned-off-by: Joyce Kong <joyce.kong@arm.com>\nReviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>\n---\n drivers/common/mlx5/mlx5_common.h             |  2 +-\n drivers/crypto/octeontx2/otx2_cryptodev_ops.c |  2 +-\n drivers/crypto/octeontx2/otx2_cryptodev_sec.c |  4 ++--\n drivers/event/octeontx/ssovf_worker.c         |  6 +++---\n drivers/event/octeontx2/otx2_worker.h         |  2 +-\n drivers/net/bnxt/bnxt_hwrm.c                  |  2 +-\n drivers/net/bnxt/bnxt_ring.h                  |  6 +++---\n drivers/net/e1000/em_rxtx.c                   |  2 +-\n drivers/net/i40e/i40e_rxtx.c                  |  2 +-\n drivers/net/i40e/i40e_rxtx_vec_neon.c         |  4 ++--\n drivers/net/mlx5/mlx5_flow.c                  |  2 +-\n drivers/net/mlx5/mlx5_flow_dv.c               |  2 +-\n drivers/net/mlx5/mlx5_rxq.c                   | 12 ++++++------\n drivers/net/mlx5/mlx5_rxtx.c                  | 16 ++++++++--------\n drivers/net/mlx5/mlx5_rxtx.h                  |  2 +-\n drivers/net/mlx5/mlx5_rxtx_vec.h              |  2 +-\n drivers/net/mlx5/mlx5_rxtx_vec_altivec.h      |  2 +-\n drivers/net/mlx5/mlx5_rxtx_vec_neon.h         |  4 ++--\n drivers/net/mlx5/mlx5_rxtx_vec_sse.h          |  2 +-\n drivers/net/mlx5/mlx5_txq.c                   |  4 ++--\n drivers/net/octeontx/octeontx_rxtx.h          |  2 +-\n drivers/net/octeontx2/otx2_ethdev_sec.c       |  2 +-\n drivers/net/octeontx2/otx2_ethdev_sec_tx.h    |  2 +-\n drivers/net/octeontx2/otx2_rx.c               |  2 +-\n drivers/net/octeontx2/otx2_tx.c               |  6 +++---\n drivers/net/virtio/virtio_rxtx.c              |  2 +-\n drivers/net/virtio/virtio_rxtx_simple_neon.c  |  2 +-\n drivers/net/virtio/virtqueue.h                | 26 +++++++++++++-------------\n drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c     |  4 ++--\n drivers/regex/mlx5/mlx5_regex_fastpath.c      |  4 ++--\n 30 files changed, 66 insertions(+), 66 deletions(-)",
    "diff": "diff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h\nindex 2cdb226..ed44a45 100644\n--- a/drivers/common/mlx5/mlx5_common.h\n+++ b/drivers/common/mlx5/mlx5_common.h\n@@ -193,7 +193,7 @@ check_cqe(volatile struct mlx5_cqe *cqe, const uint16_t cqes_n,\n \n \tif (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))\n \t\treturn MLX5_CQE_STATUS_HW_OWN;\n-\trte_cio_rmb();\n+\trte_io_rmb();\n \tif (unlikely(op_code == MLX5_CQE_RESP_ERR ||\n \t\t     op_code == MLX5_CQE_REQ_ERR))\n \t\treturn MLX5_CQE_STATUS_ERR;\ndiff --git a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c\nindex 9d51b17..df39cde 100644\n--- a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c\n+++ b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c\n@@ -469,7 +469,7 @@ otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,\n \t\t * buffer immediately, a DMB is not required to push out\n \t\t * LMTSTs.\n \t\t */\n-\t\trte_cio_wmb();\n+\t\trte_io_wmb();\n \t\tlmt_status = otx2_lmt_submit(qp->lf_nq_reg);\n \t} while (lmt_status == 0);\n \ndiff --git a/drivers/crypto/octeontx2/otx2_cryptodev_sec.c b/drivers/crypto/octeontx2/otx2_cryptodev_sec.c\nindex 0741a59..72e6c41 100644\n--- a/drivers/crypto/octeontx2/otx2_cryptodev_sec.c\n+++ b/drivers/crypto/octeontx2/otx2_cryptodev_sec.c\n@@ -107,7 +107,7 @@ otx2_cpt_enq_sa_write(struct otx2_sec_session_ipsec_lp *lp,\n \tinst.u64[3] = 0;\n \tinst.res_addr = rte_mempool_virt2iova(res);\n \n-\trte_cio_wmb();\n+\trte_io_wmb();\n \n \tdo {\n \t\t/* Copy CPT command to LMTLINE */\n@@ -124,7 +124,7 @@ otx2_cpt_enq_sa_write(struct otx2_sec_session_ipsec_lp *lp,\n \t\t\totx2_err(\"Request timed out\");\n \t\t\treturn -ETIMEDOUT;\n \t\t}\n-\t    rte_cio_rmb();\n+\t    rte_io_rmb();\n \t}\n \n \tif (unlikely(res->compcode != CPT_9X_COMP_E_GOOD)) {\ndiff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c\nindex 18b7926..3dfe665 100644\n--- a/drivers/event/octeontx/ssovf_worker.c\n+++ b/drivers/event/octeontx/ssovf_worker.c\n@@ -286,17 +286,17 @@ __sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],\n \tswitch (ev->sched_type) {\n \tcase SSO_SYNC_ORDERED:\n \t\tssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);\n-\t\trte_cio_wmb();\n+\t\trte_io_wmb();\n \t\tssows_swtag_wait(ws);\n \t\tbreak;\n \tcase SSO_SYNC_UNTAGGED:\n \t\tssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC,\n \t\t\t\tev->queue_id);\n-\t\trte_cio_wmb();\n+\t\trte_io_wmb();\n \t\tssows_swtag_wait(ws);\n \t\tbreak;\n \tcase SSO_SYNC_ATOMIC:\n-\t\trte_cio_wmb();\n+\t\trte_io_wmb();\n \t\tbreak;\n \t}\n \ndiff --git a/drivers/event/octeontx2/otx2_worker.h b/drivers/event/octeontx2/otx2_worker.h\nindex 924ff7f..cde1288 100644\n--- a/drivers/event/octeontx2/otx2_worker.h\n+++ b/drivers/event/octeontx2/otx2_worker.h\n@@ -256,7 +256,7 @@ otx2_ssogws_order(struct otx2_ssogws *ws, const uint8_t wait_flag)\n \tif (wait_flag)\n \t\totx2_ssogws_head_wait(ws);\n \n-\trte_cio_wmb();\n+\trte_io_wmb();\n }\n \n static __rte_always_inline const struct otx2_eth_txq *\ndiff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c\nindex 8296d1d..0c7fa2d 100644\n--- a/drivers/net/bnxt/bnxt_hwrm.c\n+++ b/drivers/net/bnxt/bnxt_hwrm.c\n@@ -148,7 +148,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,\n \t/* Poll for the valid bit */\n \tfor (i = 0; i < timeout; i++) {\n \t\t/* Sanity check on the resp->resp_len */\n-\t\trte_cio_rmb();\n+\t\trte_io_rmb();\n \t\tif (resp->resp_len && resp->resp_len <= bp->max_resp_len) {\n \t\t\t/* Last byte of resp contains the valid key */\n \t\t\tvalid = (uint8_t *)resp + resp->resp_len - 1;\ndiff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h\nindex 9913aed..daf9804 100644\n--- a/drivers/net/bnxt/bnxt_ring.h\n+++ b/drivers/net/bnxt/bnxt_ring.h\n@@ -82,7 +82,7 @@ void bnxt_free_rxtx_nq_ring(struct bnxt *bp);\n \n static inline void bnxt_db_write(struct bnxt_db_info *db, uint32_t idx)\n {\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \n \tif (db->db_64)\n \t\trte_write64_relaxed(db->db_key64 | idx, db->doorbell);\n@@ -96,7 +96,7 @@ static inline void bnxt_db_nq(struct bnxt_cp_ring_info *cpr)\n \tif (unlikely(!cpr->cp_db.db_64))\n \t\treturn;\n \n-\trte_cio_wmb();\n+\trte_io_wmb();\n \trte_write64_relaxed(cpr->cp_db.db_key64 | DBR_TYPE_NQ |\n \t\t\t    RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons),\n \t\t\t    cpr->cp_db.doorbell);\n@@ -108,7 +108,7 @@ static inline void bnxt_db_nq_arm(struct bnxt_cp_ring_info *cpr)\n \tif (unlikely(!cpr->cp_db.db_64))\n \t\treturn;\n \n-\trte_cio_wmb();\n+\trte_io_wmb();\n \trte_write64_relaxed(cpr->cp_db.db_key64 | DBR_TYPE_NQ_ARM |\n \t\t\t    RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons),\n \t\t\t    cpr->cp_db.doorbell);\ndiff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c\nindex 67a271e..19e3bff 100644\n--- a/drivers/net/e1000/em_rxtx.c\n+++ b/drivers/net/e1000/em_rxtx.c\n@@ -2051,7 +2051,7 @@ e1000_flush_tx_ring(struct rte_eth_dev *dev)\n \t\ttx_desc->lower.data = rte_cpu_to_le_32(txd_lower | size);\n \t\ttx_desc->upper.data = 0;\n \n-\t\trte_cio_wmb();\n+\t\trte_io_wmb();\n \t\ttxq->tx_tail++;\n \t\tif (txq->tx_tail == txq->nb_tx_desc)\n \t\t\ttxq->tx_tail = 0;\ndiff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c\nindex 60b33d2..322fc1e 100644\n--- a/drivers/net/i40e/i40e_rxtx.c\n+++ b/drivers/net/i40e/i40e_rxtx.c\n@@ -1248,7 +1248,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \t\t   (unsigned) txq->port_id, (unsigned) txq->queue_id,\n \t\t   (unsigned) tx_id, (unsigned) nb_tx);\n \n-\trte_cio_wmb();\n+\trte_io_wmb();\n \tI40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);\n \ttxq->tx_tail = tx_id;\n \ndiff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c\nindex 6f874e4..543ecad 100644\n--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c\n+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c\n@@ -72,7 +72,7 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)\n \trx_id = (uint16_t)((rxq->rxrearm_start == 0) ?\n \t\t\t     (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));\n \n-\trte_cio_wmb();\n+\trte_io_wmb();\n \t/* Update the tail pointer on the NIC */\n \tI40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rx_id);\n }\n@@ -566,7 +566,7 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,\n \n \ttxq->tx_tail = tx_id;\n \n-\trte_cio_wmb();\n+\trte_io_wmb();\n \tI40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);\n \n \treturn nb_pkts;\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 7150173..2accb2f 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -6114,7 +6114,7 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,\n \t\tpool->raw = pool->raw_hw;\n \t\trte_spinlock_unlock(&pool->sl);\n \t\t/* Be sure the new raw counters data is updated in memory. */\n-\t\trte_cio_wmb();\n+\t\trte_io_wmb();\n \t\tif (!TAILQ_EMPTY(&pool->counters[query_gen])) {\n \t\t\trte_spinlock_lock(&cont->csl);\n \t\t\tTAILQ_CONCAT(&cont->counters,\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 5339980..dcff5bf 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -4398,7 +4398,7 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,\n \t\tcont->last_pool_idx = pool->index;\n \t}\n \t/* Pool initialization must be updated before host thread access. */\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \trte_atomic16_add(&cont->n_valid, 1);\n \treturn pool;\n }\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 79eb8f8..004a451 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -465,11 +465,11 @@ rxq_sync_cq(struct mlx5_rxq_data *rxq)\n \t\tcqe->op_own = MLX5_CQE_INVALIDATE;\n \t}\n \t/* Resync CQE and WQE (WQ in RESET state). */\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \t*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \t*rxq->rq_db = rte_cpu_to_be_32(0);\n-\trte_cio_wmb();\n+\trte_io_wmb();\n }\n \n /**\n@@ -601,12 +601,12 @@ mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)\n \t\trte_errno = errno;\n \t\treturn ret;\n \t}\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \t*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \t/* Reset RQ consumer before moving queue ro READY state. */\n \t*rxq->rq_db = rte_cpu_to_be_32(0);\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \tif (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {\n \t\tstruct ibv_wq_attr mod = {\n \t\t\t.attr_mask = IBV_WQ_ATTR_STATE,\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c\nindex 1b71e94..101555e 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.c\n+++ b/drivers/net/mlx5/mlx5_rxtx.c\n@@ -873,7 +873,7 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)\n \t};\n \t/* Update doorbell counter. */\n \trxq->rq_ci = wqe_n >> rxq->sges_n;\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \t*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);\n }\n \n@@ -1113,15 +1113,15 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)\n \tcase MLX5_RXQ_ERR_STATE_NEED_READY:\n \t\tret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);\n \t\tif (ret == MLX5_CQE_STATUS_HW_OWN) {\n-\t\t\trte_cio_wmb();\n+\t\t\trte_io_wmb();\n \t\t\t*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);\n-\t\t\trte_cio_wmb();\n+\t\t\trte_io_wmb();\n \t\t\t/*\n \t\t\t * The RQ consumer index must be zeroed while moving\n \t\t\t * from RESET state to RDY state.\n \t\t\t */\n \t\t\t*rxq->rq_db = rte_cpu_to_be_32(0);\n-\t\t\trte_cio_wmb();\n+\t\t\trte_io_wmb();\n \t\t\tsm.is_wq = 1;\n \t\t\tsm.queue_id = rxq->idx;\n \t\t\tsm.state = IBV_WQS_RDY;\n@@ -1515,9 +1515,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)\n \t\treturn 0;\n \t/* Update the consumer index. */\n \trxq->rq_ci = rq_ci >> sges_n;\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \t*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \t*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);\n #ifdef MLX5_PMD_SOFT_COUNTERS\n \t/* Increment packets counter. */\n@@ -1893,11 +1893,11 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)\n out:\n \t/* Update the consumer indexes. */\n \trxq->consumed_strd = consumed_strd;\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \t*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);\n \tif (rq_ci != rxq->rq_ci) {\n \t\trxq->rq_ci = rq_ci;\n-\t\trte_cio_wmb();\n+\t\trte_io_wmb();\n \t\t*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);\n \t}\n #ifdef MLX5_PMD_SOFT_COUNTERS\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h\nindex c02a007..c74fa57 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.h\n+++ b/drivers/net/mlx5/mlx5_rxtx.h\n@@ -691,7 +691,7 @@ mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,\n \tuint64_t *dst = MLX5_TX_BFREG(txq);\n \tvolatile uint64_t *src = ((volatile uint64_t *)wqe);\n \n-\trte_cio_wmb();\n+\trte_io_wmb();\n \t*txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);\n \t/* Ensure ordering between DB record and BF copy. */\n \trte_wmb();\ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h\nindex 6ddcbfb..a8d6c4f 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec.h\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h\n@@ -118,7 +118,7 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)\n \telts_idx = rxq->rq_ci & q_mask;\n \tfor (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)\n \t\t(*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \t*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);\n }\n \ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h\nindex cb4ce1a..6bf0c9b 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h\n@@ -788,7 +788,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,\n \t\t/* B.2 copy mbuf pointers. */\n \t\t*(vector unsigned char *)&pkts[pos] = mbp1;\n \t\t*(vector unsigned char *)&pkts[pos + 2] = mbp2;\n-\t\trte_cio_rmb();\n+\t\trte_io_rmb();\n \n \t\t/* C.1 load remaining CQE data and extract necessary fields. */\n \t\tcqe_tmp2 = *(vector unsigned char *)\ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h\nindex af924b7..d122dad 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h\n@@ -554,7 +554,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,\n \t\t/* B.0 (CQE 0) load a block having op_own. */\n \t\tc0 = vld1q_u64((uint64_t *)(p0 + 48));\n \t\t/* Synchronize for loading the rest of blocks. */\n-\t\trte_cio_rmb();\n+\t\trte_io_rmb();\n \t\t/* Prefetch next 4 CQEs. */\n \t\tif (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {\n \t\t\tunsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP;\n@@ -803,7 +803,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,\n \t\t\trxq->decompressed -= n;\n \t\t}\n \t}\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \t*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);\n \t*no_cq = !rcvd_pkt;\n \treturn rcvd_pkt;\ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h\nindex 554924d..0bbcbee 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h\n@@ -552,7 +552,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,\n \t\t/* B.2 copy mbuf pointers. */\n \t\t_mm_storeu_si128((__m128i *)&pkts[pos], mbp1);\n \t\t_mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2);\n-\t\trte_cio_rmb();\n+\t\trte_io_rmb();\n \t\t/* C.1 load remained CQE data and extract necessary fields. */\n \t\tcqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]);\n \t\tcqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]);\ndiff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c\nindex 21fe16b..207fa22 100644\n--- a/drivers/net/mlx5/mlx5_txq.c\n+++ b/drivers/net/mlx5/mlx5_txq.c\n@@ -155,9 +155,9 @@ txq_sync_cq(struct mlx5_txq_data *txq)\n \t\tcqe->op_own = MLX5_CQE_INVALIDATE;\n \t}\n \t/* Resync CQE and WQE (WQ in reset state). */\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \t*txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);\n-\trte_cio_wmb();\n+\trte_io_wmb();\n }\n \n /**\ndiff --git a/drivers/net/octeontx/octeontx_rxtx.h b/drivers/net/octeontx/octeontx_rxtx.h\nindex 8b46105..af596cd 100644\n--- a/drivers/net/octeontx/octeontx_rxtx.h\n+++ b/drivers/net/octeontx/octeontx_rxtx.h\n@@ -418,7 +418,7 @@ __octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \tstruct octeontx_txq *txq = tx_queue;\n \tocteontx_dq_t *dq = &txq->dq;\n \tuint16_t count = 0, nb_desc;\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \n \twhile (count < nb_pkts) {\n \t\tif (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0))\ndiff --git a/drivers/net/octeontx2/otx2_ethdev_sec.c b/drivers/net/octeontx2/otx2_ethdev_sec.c\nindex a155594..0cbeed0 100644\n--- a/drivers/net/octeontx2/otx2_ethdev_sec.c\n+++ b/drivers/net/octeontx2/otx2_ethdev_sec.c\n@@ -312,7 +312,7 @@ hmac_init(struct otx2_ipsec_fp_sa_ctl *ctl, struct otx2_cpt_qp *qp,\n \n \ttimeout = rte_get_timer_cycles() + 5 * rte_get_timer_hz();\n \n-\trte_cio_wmb();\n+\trte_io_wmb();\n \n \tdo {\n \t\totx2_lmt_mov(qp->lmtline, &inst, 2);\ndiff --git a/drivers/net/octeontx2/otx2_ethdev_sec_tx.h b/drivers/net/octeontx2/otx2_ethdev_sec_tx.h\nindex f8130ca..e30f382 100644\n--- a/drivers/net/octeontx2/otx2_ethdev_sec_tx.h\n+++ b/drivers/net/octeontx2/otx2_ethdev_sec_tx.h\n@@ -160,7 +160,7 @@ otx2_sec_event_tx(struct otx2_ssogws *ws, struct rte_event *ev,\n \tsess->ip_id++;\n \tsess->esn++;\n \n-\trte_cio_wmb();\n+\trte_io_wmb();\n \n \tdo {\n \t\totx2_lmt_mov(sess->cpt_lmtline, &inst, 2);\ndiff --git a/drivers/net/octeontx2/otx2_rx.c b/drivers/net/octeontx2/otx2_rx.c\nindex ac40704..2da8efe 100644\n--- a/drivers/net/octeontx2/otx2_rx.c\n+++ b/drivers/net/octeontx2/otx2_rx.c\n@@ -303,7 +303,7 @@ nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,\n \trxq->head = head;\n \trxq->available -= packets;\n \n-\trte_cio_wmb();\n+\trte_io_wmb();\n \t/* Free all the CQs that we've processed */\n \totx2_write64((rxq->wdata | packets), rxq->cq_door);\n \ndiff --git a/drivers/net/octeontx2/otx2_tx.c b/drivers/net/octeontx2/otx2_tx.c\nindex 1af6fa6..1b75cd5 100644\n--- a/drivers/net/octeontx2/otx2_tx.c\n+++ b/drivers/net/octeontx2/otx2_tx.c\n@@ -39,7 +39,7 @@ nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t}\n \n \t/* Lets commit any changes in the packet */\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \n \tfor (i = 0; i < pkts; i++) {\n \t\totx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);\n@@ -75,7 +75,7 @@ nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t}\n \n \t/* Lets commit any changes in the packet */\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \n \tfor (i = 0; i < pkts; i++) {\n \t\totx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);\n@@ -128,7 +128,7 @@ nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,\n \ttxq->fc_cache_pkts -= pkts;\n \n \t/* Lets commit any changes in the packet */\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \n \tsenddesc01_w0 = vld1q_dup_u64(&txq->cmd[0]);\n \tsenddesc23_w0 = senddesc01_w0;\ndiff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c\nindex f915b8a..0ade352 100644\n--- a/drivers/net/virtio/virtio_rxtx.c\n+++ b/drivers/net/virtio/virtio_rxtx.c\n@@ -147,7 +147,7 @@ virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,\n \n \tfor (i = 0; i < num; i++) {\n \t\tused_idx = vq->vq_used_cons_idx;\n-\t\t/* desc_is_used has a load-acquire or rte_cio_rmb inside\n+\t\t/* desc_is_used has a load-acquire or rte_io_rmb inside\n \t\t * and wait for used desc in virtqueue.\n \t\t */\n \t\tif (!desc_is_used(&desc[used_idx], vq))\ndiff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c\nindex 02520fd..12e034d 100644\n--- a/drivers/net/virtio/virtio_rxtx_simple_neon.c\n+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c\n@@ -84,7 +84,7 @@ virtio_recv_pkts_vec(void *rx_queue,\n \tif (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))\n \t\treturn 0;\n \n-\t/* virtqueue_nused has a load-acquire or rte_cio_rmb inside */\n+\t/* virtqueue_nused has a load-acquire or rte_io_rmb inside */\n \tnb_used = virtqueue_nused(vq);\n \n \tif (unlikely(nb_used == 0))\ndiff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h\nindex 105a9c0..d4c21e9 100644\n--- a/drivers/net/virtio/virtqueue.h\n+++ b/drivers/net/virtio/virtqueue.h\n@@ -47,7 +47,7 @@ virtio_rmb(uint8_t weak_barriers)\n \tif (weak_barriers)\n \t\trte_smp_rmb();\n \telse\n-\t\trte_cio_rmb();\n+\t\trte_io_rmb();\n }\n \n static inline void\n@@ -56,7 +56,7 @@ virtio_wmb(uint8_t weak_barriers)\n \tif (weak_barriers)\n \t\trte_smp_wmb();\n \telse\n-\t\trte_cio_wmb();\n+\t\trte_io_wmb();\n }\n \n static inline uint16_t\n@@ -68,7 +68,7 @@ virtqueue_fetch_flags_packed(struct vring_packed_desc *dp,\n \tif (weak_barriers) {\n /* x86 prefers to using rte_smp_rmb over __atomic_load_n as it reports\n  * a better perf(~1.5%), which comes from the saved branch by the compiler.\n- * The if and else branch are identical with the smp and cio barriers both\n+ * The if and else branch are identical with the smp and io barriers both\n  * defined as compiler barriers on x86.\n  */\n #ifdef RTE_ARCH_X86_64\n@@ -79,7 +79,7 @@ virtqueue_fetch_flags_packed(struct vring_packed_desc *dp,\n #endif\n \t} else {\n \t\tflags = dp->flags;\n-\t\trte_cio_rmb();\n+\t\trte_io_rmb();\n \t}\n \n \treturn flags;\n@@ -92,7 +92,7 @@ virtqueue_store_flags_packed(struct vring_packed_desc *dp,\n \tif (weak_barriers) {\n /* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports\n  * a better perf(~1.5%), which comes from the saved branch by the compiler.\n- * The if and else branch are identical with the smp and cio barriers both\n+ * The if and else branch are identical with the smp and io barriers both\n  * defined as compiler barriers on x86.\n  */\n #ifdef RTE_ARCH_X86_64\n@@ -102,7 +102,7 @@ virtqueue_store_flags_packed(struct vring_packed_desc *dp,\n \t\t__atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);\n #endif\n \t} else {\n-\t\trte_cio_wmb();\n+\t\trte_io_wmb();\n \t\tdp->flags = flags;\n \t}\n }\n@@ -469,7 +469,7 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)\n \t\treturn VTNET_TQ;\n }\n \n-/* virtqueue_nused has load-acquire or rte_cio_rmb insed */\n+/* virtqueue_nused has load-acquire or rte_io_rmb insed */\n static inline uint16_t\n virtqueue_nused(const struct virtqueue *vq)\n {\n@@ -480,7 +480,7 @@ virtqueue_nused(const struct virtqueue *vq)\n \t * x86 prefers to using rte_smp_rmb over __atomic_load_n as it\n \t * reports a slightly better perf, which comes from the saved\n \t * branch by the compiler.\n-\t * The if and else branches are identical with the smp and cio\n+\t * The if and else branches are identical with the smp and io\n \t * barriers both defined as compiler barriers on x86.\n \t */\n #ifdef RTE_ARCH_X86_64\n@@ -492,7 +492,7 @@ virtqueue_nused(const struct virtqueue *vq)\n #endif\n \t} else {\n \t\tidx = vq->vq_split.ring.used->idx;\n-\t\trte_cio_rmb();\n+\t\trte_io_rmb();\n \t}\n \treturn idx - vq->vq_used_cons_idx;\n }\n@@ -510,7 +510,7 @@ vq_update_avail_idx(struct virtqueue *vq)\n \t * it reports a slightly better perf, which comes from the\n \t * saved branch by the compiler.\n \t * The if and else branches are identical with the smp and\n-\t * cio barriers both defined as compiler barriers on x86.\n+\t * io barriers both defined as compiler barriers on x86.\n \t */\n #ifdef RTE_ARCH_X86_64\n \t\trte_smp_wmb();\n@@ -520,7 +520,7 @@ vq_update_avail_idx(struct virtqueue *vq)\n \t\t\t\t vq->vq_avail_idx, __ATOMIC_RELEASE);\n #endif\n \t} else {\n-\t\trte_cio_wmb();\n+\t\trte_io_wmb();\n \t\tvq->vq_split.ring.avail->idx = vq->vq_avail_idx;\n \t}\n }\n@@ -793,7 +793,7 @@ virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)\n \tstruct vq_desc_extra *dxp;\n \n \tused_idx = vq->vq_used_cons_idx;\n-\t/* desc_is_used has a load-acquire or rte_cio_rmb inside\n+\t/* desc_is_used has a load-acquire or rte_io_rmb inside\n \t * and wait for used desc in virtqueue.\n \t */\n \twhile (num > 0 && desc_is_used(&desc[used_idx], vq)) {\n@@ -827,7 +827,7 @@ virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)\n \tstruct vq_desc_extra *dxp;\n \n \tused_idx = vq->vq_used_cons_idx;\n-\t/* desc_is_used has a load-acquire or rte_cio_rmb inside\n+\t/* desc_is_used has a load-acquire or rte_io_rmb inside\n \t * and wait for used desc in virtqueue.\n \t */\n \twhile (num-- && desc_is_used(&desc[used_idx], vq)) {\ndiff --git a/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c b/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c\nindex 9f1e5ed..d04e957 100644\n--- a/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c\n+++ b/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c\n@@ -475,7 +475,7 @@ sdp_ring_doorbell(struct sdp_device *sdpvf __rte_unused,\n \totx2_write64(iq->fill_cnt, iq->doorbell_reg);\n \n \t/* Make sure doorbell writes observed by HW */\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \tiq->fill_cnt = 0;\n \n }\n@@ -812,7 +812,7 @@ sdp_rawdev_dequeue(struct rte_rawdev *rawdev,\n \n \t/* Ack the h/w with no# of pkts read by Host */\n \trte_write32(pkts, droq->pkts_sent_reg);\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \n \tdroq->last_pkt_count -= pkts;\n \ndiff --git a/drivers/regex/mlx5/mlx5_regex_fastpath.c b/drivers/regex/mlx5/mlx5_regex_fastpath.c\nindex 6fafcff..d9b2a1a 100644\n--- a/drivers/regex/mlx5/mlx5_regex_fastpath.c\n+++ b/drivers/regex/mlx5/mlx5_regex_fastpath.c\n@@ -135,7 +135,7 @@ send_doorbell(struct mlx5dv_devx_uar *uar, struct mlx5_regex_sq *sq)\n \t((struct mlx5_wqe_ctrl_seg *)wqe)->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;\n \tuint64_t *doorbell_addr =\n \t\t(uint64_t *)((uint8_t *)uar->base_addr + 0x800);\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \tsq->dbr[MLX5_SND_DBR] = rte_cpu_to_be_32((sq->db_pi + 1) &\n \t\t\t\t\t\t MLX5_REGEX_MAX_WQE_INDEX);\n \trte_wmb();\n@@ -219,7 +219,7 @@ poll_one(struct mlx5_regex_cq *cq)\n \n \tnext_cqe_offset =  (cq->ci & (cq_size_get(cq) - 1));\n \tcqe = (volatile struct mlx5_cqe *)(cq->cqe + next_cqe_offset);\n-\trte_cio_wmb();\n+\trte_io_wmb();\n \n \tint ret = check_cqe(cqe, cq_size_get(cq), cq->ci);\n \n",
    "prefixes": [
        "v2",
        "1/2"
    ]
}