get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/59778/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 59778,
    "url": "https://patches.dpdk.org/api/patches/59778/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20190926032022.104495-4-xiaoyun.li@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190926032022.104495-4-xiaoyun.li@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190926032022.104495-4-xiaoyun.li@intel.com",
    "date": "2019-09-26T03:20:21",
    "name": "[v6,3/4] raw/ntb: add enqueue and dequeue functions",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "b62cea157465c5aa6c9dcdb59d92ed79d5246aef",
    "submitter": {
        "id": 798,
        "url": "https://patches.dpdk.org/api/people/798/?format=api",
        "name": "Li, Xiaoyun",
        "email": "xiaoyun.li@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20190926032022.104495-4-xiaoyun.li@intel.com/mbox/",
    "series": [
        {
            "id": 6535,
            "url": "https://patches.dpdk.org/api/series/6535/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=6535",
            "date": "2019-09-26T03:20:18",
            "name": "enable FIFO for NTB",
            "version": 6,
            "mbox": "https://patches.dpdk.org/series/6535/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/59778/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/59778/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id CE8AE34F3;\n\tThu, 26 Sep 2019 05:21:30 +0200 (CEST)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n\tby dpdk.org (Postfix) with ESMTP id D57C92C37\n\tfor <dev@dpdk.org>; Thu, 26 Sep 2019 05:21:19 +0200 (CEST)",
            "from orsmga001.jf.intel.com ([10.7.209.18])\n\tby fmsmga101.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t25 Sep 2019 20:21:19 -0700",
            "from dpdk-xiaoyun3.sh.intel.com ([10.67.118.162])\n\tby orsmga001.jf.intel.com with ESMTP; 25 Sep 2019 20:21:17 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.64,550,1559545200\"; d=\"scan'208\";a=\"273188258\"",
        "From": "Xiaoyun Li <xiaoyun.li@intel.com>",
        "To": "jingjing.wu@intel.com, keith.wiles@intel.com, omkar.maslekar@intel.com, \n\tcunming.liang@intel.com",
        "Cc": "dev@dpdk.org,\n\tXiaoyun Li <xiaoyun.li@intel.com>",
        "Date": "Thu, 26 Sep 2019 11:20:21 +0800",
        "Message-Id": "<20190926032022.104495-4-xiaoyun.li@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20190926032022.104495-1-xiaoyun.li@intel.com>",
        "References": "<20190924084345.93255-1-xiaoyun.li@intel.com>\n\t<20190926032022.104495-1-xiaoyun.li@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v6 3/4] raw/ntb: add enqueue and dequeue functions",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Introduce enqueue and dequeue functions to support packet based\nprocessing. And enable write-combining for ntb driver since it\ncan improve the performance a lot.\n\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\n---\n doc/guides/rawdevs/ntb.rst     |  54 ++++++++\n drivers/raw/ntb/ntb.c          | 242 ++++++++++++++++++++++++++++++---\n drivers/raw/ntb/ntb.h          |   2 +\n drivers/raw/ntb/ntb_hw_intel.c |  22 +++\n 4 files changed, 301 insertions(+), 19 deletions(-)",
    "diff": "diff --git a/doc/guides/rawdevs/ntb.rst b/doc/guides/rawdevs/ntb.rst\nindex 99e7db441..12f931c97 100644\n--- a/doc/guides/rawdevs/ntb.rst\n+++ b/doc/guides/rawdevs/ntb.rst\n@@ -45,6 +45,50 @@ to use, i.e. igb_uio, vfio. The ``dpdk-devbind.py`` script can be used to\n show devices status and to bind them to a suitable kernel driver. They will\n appear under the category of \"Misc (rawdev) devices\".\n \n+Prerequisites\n+-------------\n+NTB PMD needs kernel PCI driver to support write combining (WC) to get\n+better performance. The difference will be more than 10 times.\n+To enable WC, there are 2 ways.\n+- Insert igb_uio with ``wc_active=1`` flag if use igb_uio driver.\n+\n+.. code-block:: console\n+  insmod igb_uio.ko wc_active=1\n+\n+- Enable WC for NTB device's Bar 2 and Bar 4 (Mapped memory) manually.\n+The reference is https://www.kernel.org/doc/html/latest/x86/mtrr.html\n+Get bar base address using ``lspci -vvv -s ae:00.0 | grep Region``.\n+\n+.. code-block:: console\n+\n+  # lspci -vvv -s ae:00.0 | grep Region\n+  Region 0: Memory at 39bfe0000000 (64-bit, prefetchable) [size=64K]\n+  Region 2: Memory at 39bfa0000000 (64-bit, prefetchable) [size=512M]\n+  Region 4: Memory at 39bfc0000000 (64-bit, prefetchable) [size=512M]\n+\n+Using the following command to enable WC.\n+\n+.. code-block:: console\n+\n+  echo \"base=0x39bfa0000000 size=0x20000000 type=write-combining\" >> /proc/mtrr\n+  echo \"base=0x39bfc0000000 size=0x20000000 type=write-combining\" >> /proc/mtrr\n+\n+And the results:\n+\n+.. code-block:: console\n+\n+  # cat /proc/mtrr\n+  reg00: base=0x000000000 (    0MB), size= 2048MB, count=1: write-back\n+  reg01: base=0x07f000000 ( 2032MB), size=   16MB, count=1: uncachable\n+  reg02: base=0x39bfa0000000 (60553728MB), size=  512MB, count=1: write-combining\n+  reg03: base=0x39bfc0000000 (60554240MB), size=  512MB, count=1: write-combining\n+\n+To disable WC for these regions, using the following.\n+\n+.. code-block:: console\n+     echo \"disable=2\" >> /proc/mtrr\n+     echo \"disable=3\" >> /proc/mtrr\n+\n Ring Layout\n -----------\n \n@@ -83,6 +127,16 @@ like the following:\n       +------------------------+   +------------------------+\n                     <---------traffic---------\n \n+- Enqueue and Dequeue\n+  Based on this ring layout, enqueue reads rx_tail to get how many free\n+  buffers and writes used_ring and tx_tail to tell the peer which buffers\n+  are filled with data.\n+  And dequeue reads tx_tail to get how many packets are arrived, and\n+  writes desc_ring and rx_tail to tell the peer about the new allocated\n+  buffers.\n+  So in this way, only remote write happens and remote read can be avoid\n+  to get better performance.\n+\n Limitation\n ----------\n \ndiff --git a/drivers/raw/ntb/ntb.c b/drivers/raw/ntb/ntb.c\nindex a30245c64..ad7f6abfd 100644\n--- a/drivers/raw/ntb/ntb.c\n+++ b/drivers/raw/ntb/ntb.c\n@@ -558,26 +558,140 @@ ntb_queue_init(struct rte_rawdev *dev, uint16_t qp_id)\n \treturn 0;\n }\n \n+static inline void\n+ntb_enqueue_cleanup(struct ntb_tx_queue *txq)\n+{\n+\tstruct ntb_tx_entry *sw_ring = txq->sw_ring;\n+\tuint16_t tx_free = txq->last_avail;\n+\tuint16_t nb_to_clean, i;\n+\n+\t/* avail_cnt + 1 represents where to rx next in the peer. */\n+\tnb_to_clean = (*txq->avail_cnt - txq->last_avail + 1 +\n+\t\t\ttxq->nb_tx_desc) & (txq->nb_tx_desc - 1);\n+\tnb_to_clean = RTE_MIN(nb_to_clean, txq->tx_free_thresh);\n+\tfor (i = 0; i < nb_to_clean; i++) {\n+\t\tif (sw_ring[tx_free].mbuf)\n+\t\t\trte_pktmbuf_free_seg(sw_ring[tx_free].mbuf);\n+\t\ttx_free = (tx_free + 1) & (txq->nb_tx_desc - 1);\n+\t}\n+\n+\ttxq->nb_tx_free += nb_to_clean;\n+\ttxq->last_avail = tx_free;\n+}\n+\n static int\n ntb_enqueue_bufs(struct rte_rawdev *dev,\n \t\t struct rte_rawdev_buf **buffers,\n \t\t unsigned int count,\n \t\t rte_rawdev_obj_t context)\n {\n-\t/* Not FIFO right now. Just for testing memory write. */\n \tstruct ntb_hw *hw = dev->dev_private;\n-\tunsigned int i;\n-\tvoid *bar_addr;\n-\tsize_t size;\n+\tstruct ntb_tx_queue *txq = hw->tx_queues[(size_t)context];\n+\tstruct ntb_tx_entry *sw_ring = txq->sw_ring;\n+\tstruct rte_mbuf *txm;\n+\tstruct ntb_used tx_used[NTB_MAX_DESC_SIZE];\n+\tvolatile struct ntb_desc *tx_item;\n+\tuint16_t tx_last, nb_segs, off, last_used, avail_cnt;\n+\tuint16_t nb_mbufs = 0;\n+\tuint16_t nb_tx = 0;\n+\tuint64_t bytes = 0;\n+\tvoid *buf_addr;\n+\tint i;\n \n-\tif (hw->ntb_ops->get_peer_mw_addr == NULL)\n-\t\treturn -ENOTSUP;\n-\tbar_addr = (*hw->ntb_ops->get_peer_mw_addr)(dev, 0);\n-\tsize = (size_t)context;\n+\tif (unlikely(hw->ntb_ops->ioremap == NULL)) {\n+\t\tNTB_LOG(ERR, \"Ioremap not supported.\");\n+\t\treturn nb_tx;\n+\t}\n \n-\tfor (i = 0; i < count; i++)\n-\t\trte_memcpy(bar_addr, buffers[i]->buf_addr, size);\n-\treturn 0;\n+\tif (unlikely(dev->started == 0 || hw->peer_dev_up == 0)) {\n+\t\tNTB_LOG(DEBUG, \"Link is not up.\");\n+\t\treturn nb_tx;\n+\t}\n+\n+\tif (txq->nb_tx_free < txq->tx_free_thresh)\n+\t\tntb_enqueue_cleanup(txq);\n+\n+\toff = NTB_XSTATS_NUM * ((size_t)context + 1);\n+\tlast_used = txq->last_used;\n+\tavail_cnt = *txq->avail_cnt;/* Where to alloc next. */\n+\tfor (nb_tx = 0; nb_tx < count; nb_tx++) {\n+\t\ttxm = (struct rte_mbuf *)(buffers[nb_tx]->buf_addr);\n+\t\tif (txm == NULL || txq->nb_tx_free < txm->nb_segs)\n+\t\t\tbreak;\n+\n+\t\ttx_last = (txq->last_used + txm->nb_segs - 1) &\n+\t\t\t  (txq->nb_tx_desc - 1);\n+\t\tnb_segs = txm->nb_segs;\n+\t\tfor (i = 0; i < nb_segs; i++) {\n+\t\t\t/* Not enough ring space for tx. */\n+\t\t\tif (txq->last_used == avail_cnt)\n+\t\t\t\tgoto end_of_tx;\n+\t\t\tsw_ring[txq->last_used].mbuf = txm;\n+\t\t\ttx_item = txq->tx_desc_ring + txq->last_used;\n+\n+\t\t\tif (!tx_item->len) {\n+\t\t\t\t(hw->ntb_xstats[NTB_TX_ERRS_ID + off])++;\n+\t\t\t\tgoto end_of_tx;\n+\t\t\t}\n+\t\t\tif (txm->data_len > tx_item->len) {\n+\t\t\t\tNTB_LOG(ERR, \"Data length exceeds buf length.\"\n+\t\t\t\t\t\" Only %u data would be transmitted.\",\n+\t\t\t\t\ttx_item->len);\n+\t\t\t\ttxm->data_len = tx_item->len;\n+\t\t\t}\n+\n+\t\t\t/* translate remote virtual addr to bar virtual addr */\n+\t\t\tbuf_addr = (*hw->ntb_ops->ioremap)(dev, tx_item->addr);\n+\t\t\tif (buf_addr == NULL) {\n+\t\t\t\t(hw->ntb_xstats[NTB_TX_ERRS_ID + off])++;\n+\t\t\t\tNTB_LOG(ERR, \"Null remap addr.\");\n+\t\t\t\tgoto end_of_tx;\n+\t\t\t}\n+\t\t\trte_memcpy(buf_addr, rte_pktmbuf_mtod(txm, void *),\n+\t\t\t\t   txm->data_len);\n+\n+\t\t\ttx_used[nb_mbufs].len = txm->data_len;\n+\t\t\ttx_used[nb_mbufs++].flags = (txq->last_used ==\n+\t\t\t\t\t\t    tx_last) ?\n+\t\t\t\t\t\t    NTB_FLAG_EOP : 0;\n+\n+\t\t\t/* update stats */\n+\t\t\tbytes += txm->data_len;\n+\n+\t\t\ttxm = txm->next;\n+\n+\t\t\tsw_ring[txq->last_used].next_id = (txq->last_used + 1) &\n+\t\t\t\t\t\t  (txq->nb_tx_desc - 1);\n+\t\t\tsw_ring[txq->last_used].last_id = tx_last;\n+\t\t\ttxq->last_used = (txq->last_used + 1) &\n+\t\t\t\t\t (txq->nb_tx_desc - 1);\n+\t\t}\n+\t\ttxq->nb_tx_free -= nb_segs;\n+\t}\n+\n+end_of_tx:\n+\tif (nb_tx) {\n+\t\tuint16_t nb1, nb2;\n+\t\tif (nb_mbufs > txq->nb_tx_desc - last_used) {\n+\t\t\tnb1 = txq->nb_tx_desc - last_used;\n+\t\t\tnb2 = nb_mbufs - txq->nb_tx_desc + last_used;\n+\t\t} else {\n+\t\t\tnb1 = nb_mbufs;\n+\t\t\tnb2 = 0;\n+\t\t}\n+\t\trte_memcpy(txq->tx_used_ring + last_used, tx_used,\n+\t\t\t   sizeof(struct ntb_used) * nb1);\n+\t\trte_memcpy(txq->tx_used_ring, tx_used + nb1,\n+\t\t\t   sizeof(struct ntb_used) * nb2);\n+\t\t*txq->used_cnt = txq->last_used;\n+\t\trte_wmb();\n+\n+\t\t/* update queue stats */\n+\t\thw->ntb_xstats[NTB_TX_BYTES_ID + off] += bytes;\n+\t\thw->ntb_xstats[NTB_TX_PKTS_ID + off] += nb_tx;\n+\t}\n+\n+\treturn nb_tx;\n }\n \n static int\n@@ -586,16 +700,106 @@ ntb_dequeue_bufs(struct rte_rawdev *dev,\n \t\t unsigned int count,\n \t\t rte_rawdev_obj_t context)\n {\n-\t/* Not FIFO. Just for testing memory read. */\n \tstruct ntb_hw *hw = dev->dev_private;\n-\tunsigned int i;\n-\tsize_t size;\n+\tstruct ntb_rx_queue *rxq = hw->rx_queues[(size_t)context];\n+\tstruct ntb_rx_entry *sw_ring = rxq->sw_ring;\n+\tstruct ntb_desc rx_desc[NTB_MAX_DESC_SIZE];\n+\tstruct rte_mbuf *first, *rxm_t;\n+\tstruct rte_mbuf *prev = NULL;\n+\tvolatile struct ntb_used *rx_item;\n+\tuint16_t nb_mbufs = 0;\n+\tuint16_t nb_rx = 0;\n+\tuint64_t bytes = 0;\n+\tuint16_t off, last_avail, used_cnt, used_nb;\n+\tint i;\n+\n+\tif (unlikely(dev->started == 0 || hw->peer_dev_up == 0)) {\n+\t\tNTB_LOG(DEBUG, \"Link is not up\");\n+\t\treturn nb_rx;\n+\t}\n+\n+\tused_cnt = *rxq->used_cnt;\n+\n+\tif (rxq->last_used == used_cnt)\n+\t\treturn nb_rx;\n+\n+\tlast_avail = rxq->last_avail;\n+\tused_nb = (used_cnt - rxq->last_used) & (rxq->nb_rx_desc - 1);\n+\tcount = RTE_MIN(count, used_nb);\n+\tfor (nb_rx = 0; nb_rx < count; nb_rx++) {\n+\t\ti = 0;\n+\t\twhile (true) {\n+\t\t\trx_item = rxq->rx_used_ring + rxq->last_used;\n+\t\t\trxm_t = sw_ring[rxq->last_used].mbuf;\n+\t\t\trxm_t->data_len = rx_item->len;\n+\t\t\trxm_t->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\t\trxm_t->port = rxq->port_id;\n+\n+\t\t\tif (!i) {\n+\t\t\t\trxm_t->nb_segs = 1;\n+\t\t\t\tfirst = rxm_t;\n+\t\t\t\tfirst->pkt_len = 0;\n+\t\t\t\tbuffers[nb_rx]->buf_addr = rxm_t;\n+\t\t\t} else {\n+\t\t\t\tprev->next = rxm_t;\n+\t\t\t\tfirst->nb_segs++;\n+\t\t\t}\n \n-\tsize = (size_t)context;\n+\t\t\tprev = rxm_t;\n+\t\t\tfirst->pkt_len += prev->data_len;\n+\t\t\trxq->last_used = (rxq->last_used + 1) &\n+\t\t\t\t\t (rxq->nb_rx_desc - 1);\n \n-\tfor (i = 0; i < count; i++)\n-\t\trte_memcpy(buffers[i]->buf_addr, hw->mz[i]->addr, size);\n-\treturn 0;\n+\t\t\t/* alloc new mbuf */\n+\t\t\trxm_t = rte_mbuf_raw_alloc(rxq->mpool);\n+\t\t\tif (unlikely(rxm_t == NULL)) {\n+\t\t\t\tNTB_LOG(ERR, \"recv alloc mbuf failed.\");\n+\t\t\t\tgoto end_of_rx;\n+\t\t\t}\n+\t\t\trxm_t->port = rxq->port_id;\n+\t\t\tsw_ring[rxq->last_avail].mbuf = rxm_t;\n+\t\t\ti++;\n+\n+\t\t\t/* fill new desc */\n+\t\t\trx_desc[nb_mbufs].addr =\n+\t\t\t\t\trte_pktmbuf_mtod(rxm_t, size_t);\n+\t\t\trx_desc[nb_mbufs++].len = rxm_t->buf_len -\n+\t\t\t\t\t\t  RTE_PKTMBUF_HEADROOM;\n+\t\t\trxq->last_avail = (rxq->last_avail + 1) &\n+\t\t\t\t\t  (rxq->nb_rx_desc - 1);\n+\n+\t\t\tif (rx_item->flags & NTB_FLAG_EOP)\n+\t\t\t\tbreak;\n+\t\t}\n+\t\t/* update stats */\n+\t\tbytes += first->pkt_len;\n+\t}\n+\n+end_of_rx:\n+\tif (nb_rx) {\n+\t\tuint16_t nb1, nb2;\n+\t\tif (nb_mbufs > rxq->nb_rx_desc - last_avail) {\n+\t\t\tnb1 = rxq->nb_rx_desc - last_avail;\n+\t\t\tnb2 = nb_mbufs - rxq->nb_rx_desc + last_avail;\n+\t\t} else {\n+\t\t\tnb1 = nb_mbufs;\n+\t\t\tnb2 = 0;\n+\t\t}\n+\t\trte_memcpy(rxq->rx_desc_ring + last_avail, rx_desc,\n+\t\t\t   sizeof(struct ntb_desc) * nb1);\n+\t\trte_memcpy(rxq->rx_desc_ring, rx_desc + nb1,\n+\t\t\t   sizeof(struct ntb_desc) * nb2);\n+\t\t*rxq->avail_cnt = rxq->last_avail;\n+\t\trte_wmb();\n+\n+\t\t/* update queue stats */\n+\t\toff = NTB_XSTATS_NUM * ((size_t)context + 1);\n+\t\thw->ntb_xstats[NTB_RX_BYTES_ID + off] += bytes;\n+\t\thw->ntb_xstats[NTB_RX_PKTS_ID + off] += nb_rx;\n+\t\thw->ntb_xstats[NTB_RX_MISS_ID + off] += (count - nb_rx);\n+\t}\n+\n+\treturn nb_rx;\n }\n \n static void\n@@ -1292,7 +1496,7 @@ ntb_remove(struct rte_pci_device *pci_dev)\n \n static struct rte_pci_driver rte_ntb_pmd = {\n \t.id_table = pci_id_ntb_map,\n-\t.drv_flags = RTE_PCI_DRV_NEED_MAPPING,\n+\t.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_WC_ACTIVATE,\n \t.probe = ntb_probe,\n \t.remove = ntb_remove,\n };\ndiff --git a/drivers/raw/ntb/ntb.h b/drivers/raw/ntb/ntb.h\nindex 3cc160680..a561c42d1 100644\n--- a/drivers/raw/ntb/ntb.h\n+++ b/drivers/raw/ntb/ntb.h\n@@ -87,6 +87,7 @@ enum ntb_spad_idx {\n  * @ntb_dev_init: Init ntb dev.\n  * @get_peer_mw_addr: To get the addr of peer mw[mw_idx].\n  * @mw_set_trans: Set translation of internal memory that remote can access.\n+ * @ioremap: Translate the remote host address to bar address.\n  * @get_link_status: get link status, link speed and link width.\n  * @set_link: Set local side up/down.\n  * @spad_read: Read local/peer spad register val.\n@@ -103,6 +104,7 @@ struct ntb_dev_ops {\n \tvoid *(*get_peer_mw_addr)(const struct rte_rawdev *dev, int mw_idx);\n \tint (*mw_set_trans)(const struct rte_rawdev *dev, int mw_idx,\n \t\t\t    uint64_t addr, uint64_t size);\n+\tvoid *(*ioremap)(const struct rte_rawdev *dev, uint64_t addr);\n \tint (*get_link_status)(const struct rte_rawdev *dev);\n \tint (*set_link)(const struct rte_rawdev *dev, bool up);\n \tuint32_t (*spad_read)(const struct rte_rawdev *dev, int spad,\ndiff --git a/drivers/raw/ntb/ntb_hw_intel.c b/drivers/raw/ntb/ntb_hw_intel.c\nindex 0e73f1609..e7f8667cd 100644\n--- a/drivers/raw/ntb/ntb_hw_intel.c\n+++ b/drivers/raw/ntb/ntb_hw_intel.c\n@@ -162,6 +162,27 @@ intel_ntb_mw_set_trans(const struct rte_rawdev *dev, int mw_idx,\n \treturn 0;\n }\n \n+static void *\n+intel_ntb_ioremap(const struct rte_rawdev *dev, uint64_t addr)\n+{\n+\tstruct ntb_hw *hw = dev->dev_private;\n+\tvoid *mapped = NULL;\n+\tvoid *base;\n+\tint i;\n+\n+\tfor (i = 0; i < hw->peer_used_mws; i++) {\n+\t\tif (addr >= hw->peer_mw_base[i] &&\n+\t\t    addr <= hw->peer_mw_base[i] + hw->mw_size[i]) {\n+\t\t\tbase = intel_ntb_get_peer_mw_addr(dev, i);\n+\t\t\tmapped = (void *)(size_t)(addr - hw->peer_mw_base[i] +\n+\t\t\t\t (size_t)base);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn mapped;\n+}\n+\n static int\n intel_ntb_get_link_status(const struct rte_rawdev *dev)\n {\n@@ -357,6 +378,7 @@ const struct ntb_dev_ops intel_ntb_ops = {\n \t.ntb_dev_init       = intel_ntb_dev_init,\n \t.get_peer_mw_addr   = intel_ntb_get_peer_mw_addr,\n \t.mw_set_trans       = intel_ntb_mw_set_trans,\n+\t.ioremap            = intel_ntb_ioremap,\n \t.get_link_status    = intel_ntb_get_link_status,\n \t.set_link           = intel_ntb_set_link,\n \t.spad_read          = intel_ntb_spad_read,\n",
    "prefixes": [
        "v6",
        "3/4"
    ]
}