get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/27579/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 27579,
    "url": "http://patches.dpdk.org/api/patches/27579/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20170814151537.29454-4-santosh.shukla@caviumnetworks.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20170814151537.29454-4-santosh.shukla@caviumnetworks.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20170814151537.29454-4-santosh.shukla@caviumnetworks.com",
    "date": "2017-08-14T15:15:36",
    "name": "[dpdk-dev,v1,3/4] eal/memory: rename memory translational api to _iova types",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "abd7a74a75ceb36908f75c11da6c2325b7f3c8a3",
    "submitter": {
        "id": 480,
        "url": "http://patches.dpdk.org/api/people/480/?format=api",
        "name": "Santosh Shukla",
        "email": "santosh.shukla@caviumnetworks.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20170814151537.29454-4-santosh.shukla@caviumnetworks.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/27579/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/27579/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 30FB190F1;\n\tMon, 14 Aug 2017 17:17:03 +0200 (CEST)",
            "from NAM03-DM3-obe.outbound.protection.outlook.com\n\t(mail-dm3nam03on0069.outbound.protection.outlook.com [104.47.41.69])\n\tby dpdk.org (Postfix) with ESMTP id 766DE7D67\n\tfor <dev@dpdk.org>; Mon, 14 Aug 2017 17:17:00 +0200 (CEST)",
            "from localhost.localdomain (14.140.2.178) by\n\tBN6PR07MB3091.namprd07.prod.outlook.com (10.172.105.9) with Microsoft\n\tSMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id\n\t15.1.1341.17; Mon, 14 Aug 2017 15:16:55 +0000"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=CAVIUMNETWORKS.onmicrosoft.com; s=selector1-cavium-com;\n\th=From:Date:Subject:Message-ID:Content-Type:MIME-Version;\n\tbh=Xr+DClU2s4/GwdQ5mBuR3bfljLmzynpbbfjqdfhP9ks=;\n\tb=E1hC5rie89i+w0vMlcV7mNZYcQKgWo6m6hjxGzQxq5JUyzX2EpbxyG0SHo+UOJ8Cgq0gZZ35oekQ6Jpb3070cOCF8ObB6BaqYesalx/TgnHw1EbstcJJVgfC+aZ45FxndjlG8fN24Xyzq3vcupMqUs8kH/p/cyo72jhMQAFq38A=",
        "Authentication-Results": "spf=none (sender IP is )\n\tsmtp.mailfrom=Santosh.Shukla@cavium.com; ",
        "From": "Santosh Shukla <santosh.shukla@caviumnetworks.com>",
        "To": "dev@dpdk.org",
        "Cc": "olivier.matz@6wind.com, thomas@monjalon.net,\n\tjerin.jacob@caviumnetworks.com, hemant.agrawal@nxp.com,\n\tSantosh Shukla <santosh.shukla@caviumnetworks.com>",
        "Date": "Mon, 14 Aug 2017 20:45:36 +0530",
        "Message-Id": "<20170814151537.29454-4-santosh.shukla@caviumnetworks.com>",
        "X-Mailer": "git-send-email 2.11.0",
        "In-Reply-To": "<20170814151537.29454-1-santosh.shukla@caviumnetworks.com>",
        "References": "<20170814151537.29454-1-santosh.shukla@caviumnetworks.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[14.140.2.178]",
        "X-ClientProxiedBy": "BMXPR01CA0040.INDPRD01.PROD.OUTLOOK.COM (10.174.214.26) To\n\tBN6PR07MB3091.namprd07.prod.outlook.com (10.172.105.9)",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "d0271b14-caa9-487c-7d81-08d4e327811c",
        "X-Microsoft-Antispam": "UriScan:; BCL:0; PCL:0;\n\tRULEID:(300000500095)(300135000095)(300000501095)(300135300095)(300000502095)(300135100095)(22001)(2017030254152)(300000503095)(300135400095)(201703131423075)(201703031133081)(201702281549075)(300000504095)(300135200095)(300000505095)(300135600095)(300000506095)(300135500095);\n\tSRVR:BN6PR07MB3091; ",
        "X-Microsoft-Exchange-Diagnostics": [
            "1; BN6PR07MB3091;\n\t3:oAnNoKlbNm85brcZZc4mPxP+Tl8AkPHvHp2b1DJ01Rpvgt+vz8nB59vwq2TPYh7IU8ihMOYJpgh1nc7e0hTcGqDlBe4K9W9QNMAHPMGqtF/kedGfDYktQBGThNV1F/KUqkSw4hynEOM/UH2Rm5UriJ/c0loivGi53cAJhqEkvb/cvcbJqqAgxgEHvGmtQIqLnOw4ZwW5MX540MqY1Ou3L7uoFWHoMu8/yy4QesrnHS3xvSgzSz75LScnD02Iks17;\n\t25:BG6K+WX+SNGDoOKxCtYcbYJ0TaOmB2t0KB9FlHddq9fVJTvF82Rhvew6f8s0G6weII/JUv4YpLqWtywHw+r2Hx7YrFlh/HIMqIivnh2eKSlht9WCGtkAFfQT7pJZihdgOBYHxybJ2S0UNP1O7wU+qKyuHUAAtJ0I1YFQOXDGBvotDo2eg3oSWeaABY0bNgUqRNLqhDz2E8QMKK9vv5VPmyotCzAXaqKbm3MQLhYZsm43aKtHHND0RQHBE+qmQ5qF0looc2ir04cON7V5aug9dMT1UI1sqYd08HouVJk/cZ9f7CMBN1hVlw2X6epTE8iU2uUn27dzDUXxT/Zrc6UcUw==;\n\t31:kW+W47ElOra20BbmRUj3asHbHXmAuAJBs/ZVFPkUY0TOo5OODTw7hQBeDvN+PWzHC542ndzvZgfMXFJXNbRnl8fVb0wpeRjyWAbKWYLmHRzAzYRsDYi1z3UCt+8kgyXc4W4V09LihSRdTQnn5UaFYuWwxiYZKKo9a0SpBy++h0Z0+BUNcYe3xEpiABIXyPQOPbZlnYVIk5UtgZISQ//axW2GIbbj7loz+zpY3W9R+NI=",
            "1; BN6PR07MB3091;\n\t20:cE+kAisrYh93dUI7v78zc8S8w18T0zcd17lin7nuqIZ5h/hp4fPA58U7y8VWHWfUExMUZRm4IOav2zFuQihvGsa2JyHzOU6pH9mEMtXbRfcS4YGNoyob3EUKvhuSWU0B3F3WShw+wPkqdHTX7LRyvLQcHc+hbZpjlABzbTa6meGeUY6yzTrlwglk9k5AjKPC+trnFHbQ6MNBwcVyHmCHXm1ll3Cee25UMgfvzVVHCgznnQWlcAk1NjSL/UzhvwPLUVVQtA49jOTg/7B6KatrQB1ngjqnEsmNKoork7+fKLqGj3DfGUx5VHM/8MkFTtSmgTLvKRP9CGMQGxyJmCo2L/2G5i6xa5MQE+BklPEqex1COOInBHqY7DoFk9qKnVhJ/wnczZPKvgRJ27C18G9gnRP7ryjvSrh6U4v4IvNhUZMR83Yd/N6yT2B288cVYQOlk2Q9fRqcL3vK1djdZyfz5sxcxtmCfav5/EibwGiyqNnbthPhoETtbbITbQI9KN2KTTfV2LgXYJNugpUtlJ2HyrnMLhiJicEix2DL/nDiRfUHyqIJSt0AVF5KyguHy/QBGnz8fMCcR39EMB/3Cs09Bcwxxvek7RDtUsOBDOTI4ro=;\n\t4:A0aKUpQFXcIX4zdonI9cLghnqgb1WHc8Z32EZXuBNluPFz8MI19cXARcG9GKaz7RYwdQ5wjWGriNvazLfUbCdrULjzEd/vi4gX8r5QKczkMzNmcgpO22WF698W4cKJAJeLu7JQCKzO+Qlh8R8DwX7B9IX5BgeqVBIznyMIm3RFcAnajoZsExejFSpY7yhbBuDtiLS+ezo3Mu+O3TLxxgnR9y2eXiFqG0ebQCCBQ4QymlAPSadrlw+GYTj07pXEZrVCChQXTOGzFZLrnmkqsCm6JnoS5pOsV6tyYi2Ig0nSM=",
            "=?us-ascii?Q?1; BN6PR07MB3091;\n\t23:KPMD4QpTK7I3c/+CF/2h2Z30dqIwxbZDLMy3yG26D?=\n\tLTFwcEu7VXKf/PtQlxMEqvg1c3ePa2+wLSkNPBwEsvRXXw/Qs+RbMmoC7N8E/AUB38Pol+hveMLEtz0x7cW4xJkC2Gu9wjP7BFjnY5YGGxEH3Lfo+81lxf5FfRlxraapmOzrnOXzxzQFPN0IkZDpv41Nrmm8ZCzQoCnP+Wf5Y4vKBjmLjilGydxKXKuiKxSqb0G/0PvpTq6fbv5ee30JKaKWGObwhn89KaZ5DLDQY+Gj361HuQ/dwaTAwj8mXxAn+NyvLbOBe5FYNoWw1AZekUUctBSf9SWKhVXM69Q47YsoHtQ5GC4RSvcYK6LaxxVL7UHHAsiMQJjYZA61W5AWJmup3FlJKcE03L4c+bH2PxYOiTuXtTaTw7mLHkk8gMUXUWUglRwAlbgBnIP9rfjMpufiJFlSBlU4QjnvP523bEhOCzXSkNe13cYfAcRInHS2ZWRRv6LoAW4EROij7z2FvRa5GRmrU81u+4EApUscUlJ9yeebemd79xbOIvGUY9p/PcdGxptXeRKSctWIV0uEbWiis0CLj5axeBQpn2SpLar1/dvpdUpZEnbTaZA52757WFHqrCd+Hq3mteCnGGtKZFlVwdWeVySzbNqSsFOcgQAQuN72X3/9v8A5vRBo6sTEsXz1YHlEl3b1qSLJihp+sklSipTHCeFvMjhcuIbFYKYzxRLLFjrFMgxnjs5xXGdEE+ZvQ0iEuBXkpZyzq54c+zCj/s7mSvkUNFM8R9TnTOeDZprvMPT0629E6XSj9jP2L2k8gOjBPeYvOCIfKdNDAGiFvpKefd8O+PPQCS9KDD/9j1VKv4dK2pCUSM4cLZ24BcILSKOc0sfCFJ1ALU/mzDMkABnDZsLccbU/LOi8Fy9ZfXgnhID1Fm/gvOfe095uYjypCmSJzscjlms2S3a5kjUzQBbJOV6XtzmwTapQFMoiqyOHy2UguRlkqkZOc0rNhJUtYSnOHjLucll5dpVmN/IbvysJXCh6Bf/paxaGVilcC8AmpsO5ovDP1OCqawWOuT6Dqpg+SgKNEujSKwR0JAgeB/f8cgTHNzZMsr9w6aDYVDbwVRpJVQbeb3+PGpTDWARc82eTYW+V1DbbvvUIW1jucNS0vNSFl8yC3a1ZBzbblI0eQ9QzjLClH0Z5diVOjwwvNemOHmIHmvVJpM4ZNboZCpYhuPU9SOt9yYPKB1GDJLydkdnGpWgNilm4cgOYIoSr99voBnXdRRzzOklPsy+Ji0jDdz83L6KZn5Mpneo7YwrmJpjdeAkCEDmKCcTOV4PN913e7cl3NjbBIXuy9mCrJQyshgr10tIFHazVUj6QUHACb371bnV3X0ynmZ/erx/k4BQqKu3oMQyGjyUPlNp",
            "1; BN6PR07MB3091;\n\t6:Zp1MgND4jbQZQDkjVZEOJRNcX9JnShprqRvjjbKBjnQgAIAJqBbb922ldgMm/su95YL2czGzWun4PSblIRaImJ7dHjIiBvjV4LoHgYgG67lzlhTCY4Koz/bQ1NG9oHMVluZwAq1FAIlZ9UVuWYkWRBLhcvsas6N2B2GVeethBNE3t5irLSt8aLOTIADpKIWXItZ820Q0wcIHvpY5tm+exDkvSxn3qBpOEin0iuwd0p0F07UP7dSrOCjRCq+DhUri4Aw7Did4tQFzFMUAhFaOO2RX9L/eNqIQcwMe1JHT9+A06PNvClK83IkrL42LbShiz/VwbEcNIH5ULObf/wIZxg==;\n\t5:ReHfC8L5+JcOMx1ifakutT9MMpBZ/UHicE8sLK/NocYITo2sp49XXgypMH2fyQezlbbrdYWbu8WY5mxzq3+AnnyzPNLgYjHx7tkEJQLTLaVQaAOdd7Ka2oV7nGhsc+GEtS1y/GoopbH2oRun9RgDZQ==;\n\t24:cbVKEanLcaImsGjXd9UovSV4hpen+qqMPXpk2UZffKXyNb1hmRVTS9hNiXNYUToLIvOsj/rMlw+hY0xq3nS/6YKSyJ3r5Swl8R0VZrLzvMg=;\n\t7:m283mk/IOOS4zDZsOhaFynnA/uxu9R5tWqkA/nJupxmPKG0EnTtCOf+tOVXtHQm+qddLH8Dd6u4L4ygl7XZBHL9paCcG37dgl+x1NzZaKx/3rmHFdNAhILzxjB2ysyyrXv1/oTxe2kO+ZW7LxApubgS1z9xEWmJVsMZtNl2zuH2v6jGsEucSZEgXQ4b+svcFPGfMMuz5WOVAt6i02rouqafFQxz4HsE/hEY1t2jgOME="
        ],
        "X-MS-TrafficTypeDiagnostic": "BN6PR07MB3091:",
        "X-Exchange-Antispam-Report-Test": "UriScan:(275809806118684);",
        "X-Microsoft-Antispam-PRVS": "<BN6PR07MB3091514A091FFB68E9A995D3EA8C0@BN6PR07MB3091.namprd07.prod.outlook.com>",
        "X-Exchange-Antispam-Report-CFA-Test": "BCL:0; PCL:0;\n\tRULEID:(100000700101)(100105000095)(100000701101)(100105300095)(100000702101)(100105100095)(6040450)(601004)(2401047)(5005006)(8121501046)(93006095)(100000703101)(100105400095)(10201501046)(3002001)(6041248)(20161123558100)(20161123555025)(20161123560025)(201703131423075)(201702281528075)(201703061421075)(201703061406153)(20161123564025)(20161123562025)(6072148)(201708071742011)(100000704101)(100105200095)(100000705101)(100105500095);\n\tSRVR:BN6PR07MB3091; BCL:0; PCL:0;\n\tRULEID:(100000800101)(100110000095)(100000801101)(100110300095)(100000802101)(100110100095)(100000803101)(100110400095)(100000804101)(100110200095)(100000805101)(100110500095);\n\tSRVR:BN6PR07MB3091; ",
        "X-Forefront-PRVS": "039975700A",
        "X-Forefront-Antispam-Report": "SFV:NSPM;\n\tSFS:(10009020)(7370300001)(4630300001)(6069001)(6009001)(189002)(199003)(81156014)(2906002)(6512007)(478600001)(2950100002)(42882006)(6916009)(50466002)(48376002)(53946003)(47776003)(107886003)(5890100001)(36756003)(5660300001)(5009440100003)(7350300001)(81166006)(8656003)(8676002)(53936002)(110136004)(6506006)(3846002)(6116002)(25786009)(2361001)(42186005)(4326008)(105586002)(2351001)(106356001)(189998001)(6486002)(50226002)(68736007)(1076002)(97736004)(575784001)(50986999)(76176999)(33646002)(7736002)(6666003)(5003940100001)(305945005)(72206003)(66066001)(101416001)(110426004)(21314002)(579004)(559001);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:BN6PR07MB3091; H:localhost.localdomain;\n\tFPR:; \n\tSPF:None; PTR:InfoNoRecords; MX:1; A:1; LANG:en; ",
        "Received-SPF": "None (protection.outlook.com: cavium.com does not designate\n\tpermitted sender hosts)",
        "SpamDiagnosticOutput": "1:99",
        "SpamDiagnosticMetadata": "NSPM",
        "X-OriginatorOrg": "caviumnetworks.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "14 Aug 2017 15:16:55.2002\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "Hosted",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BN6PR07MB3091",
        "Subject": "[dpdk-dev] [PATCH v1 3/4] eal/memory: rename memory translational\n\tapi to _iova types",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The following api renamed from:\n\nrte_mempool_populate_phys()\nrte_mempool_populate_phys_tab()\nrte_eal_using_phys_addrs()\nrte_mem_virt2phy()\nrte_dump_physmem_layout()\nrte_eal_get_physmem_layout()\nrte_eal_get_physmem_size()\nrte_malloc_virt2phy()\nrte_mem_phy2mch()\n\nTo the following iova types api:\n\nrte_mempool_populate_iova()\nrte_mempool_populate_iova_tab()\nrte_eal_using_iova_addrs()\nrte_mem_virt2iova()\nrte_dump_iovamem_layout()\nrte_eal_get_iovamem_layout()\nrte_eal_get_iovamem_size()\nrte_malloc_virt2iova()\nrte_mem_phy2iova()\n\nSigned-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>\n---\n app/proc_info/main.c                             |  2 +-\n app/test-crypto-perf/cperf_test_vector_parsing.c |  4 ++--\n app/test-crypto-perf/cperf_test_vectors.c        |  6 +++---\n app/test-pmd/cmdline.c                           |  2 +-\n drivers/bus/fslmc/fslmc_vfio.c                   |  2 +-\n drivers/bus/fslmc/portal/dpaa2_hw_pvt.h          |  4 ++--\n drivers/crypto/qat/qat_qp.c                      |  2 +-\n drivers/net/ark/ark_ethdev_rx.c                  |  4 ++--\n drivers/net/ark/ark_ethdev_tx.c                  |  4 ++--\n drivers/net/bnxt/bnxt_ethdev.c                   |  8 ++++----\n drivers/net/bnxt/bnxt_hwrm.c                     | 14 +++++++-------\n drivers/net/bnxt/bnxt_ring.c                     |  4 ++--\n drivers/net/bnxt/bnxt_vnic.c                     |  4 ++--\n drivers/net/e1000/em_rxtx.c                      |  4 ++--\n drivers/net/e1000/igb_rxtx.c                     |  4 ++--\n drivers/net/fm10k/fm10k_ethdev.c                 |  4 ++--\n drivers/net/i40e/i40e_ethdev.c                   |  2 +-\n drivers/net/i40e/i40e_fdir.c                     |  2 +-\n drivers/net/i40e/i40e_rxtx.c                     |  8 ++++----\n drivers/net/ixgbe/ixgbe_rxtx.c                   |  4 ++--\n drivers/net/liquidio/lio_rxtx.c                  |  2 +-\n drivers/net/mlx4/mlx4.c                          |  2 +-\n drivers/net/mlx5/mlx5_mr.c                       |  2 +-\n drivers/net/sfc/sfc.c                            |  2 +-\n drivers/net/sfc/sfc_tso.c                        |  2 +-\n examples/l2fwd-crypto/main.c                     |  2 +-\n lib/librte_cryptodev/rte_cryptodev.c             |  2 +-\n lib/librte_eal/bsdapp/eal/eal.c                  |  2 +-\n lib/librte_eal/bsdapp/eal/eal_memory.c           |  2 +-\n lib/librte_eal/bsdapp/eal/rte_eal_version.map    | 12 ++++++------\n lib/librte_eal/common/eal_common_memory.c        |  6 +++---\n lib/librte_eal/common/eal_common_memzone.c       |  4 ++--\n lib/librte_eal/common/eal_private.h              |  2 +-\n lib/librte_eal/common/include/rte_malloc.h       |  2 +-\n lib/librte_eal/common/include/rte_memory.h       | 12 ++++++------\n lib/librte_eal/common/rte_malloc.c               |  2 +-\n lib/librte_eal/linuxapp/eal/eal.c                |  2 +-\n lib/librte_eal/linuxapp/eal/eal_memory.c         |  8 ++++----\n lib/librte_eal/linuxapp/eal/eal_pci.c            |  4 ++--\n lib/librte_eal/linuxapp/eal/eal_vfio.c           |  6 +++---\n lib/librte_eal/linuxapp/eal/rte_eal_version.map  | 12 ++++++------\n lib/librte_mempool/rte_mempool.c                 | 24 ++++++++++++------------\n lib/librte_mempool/rte_mempool.h                 |  4 ++--\n lib/librte_mempool/rte_mempool_version.map       |  4 ++--\n lib/librte_vhost/vhost_user.c                    |  4 ++--\n test/test/commands.c                             |  2 +-\n test/test/test_malloc.c                          |  4 ++--\n test/test/test_memory.c                          |  6 +++---\n test/test/test_mempool.c                         |  4 ++--\n test/test/test_memzone.c                         | 10 +++++-----\n 50 files changed, 120 insertions(+), 120 deletions(-)",
    "diff": "diff --git a/app/proc_info/main.c b/app/proc_info/main.c\nindex 8b753a2ee..16df6d4b1 100644\n--- a/app/proc_info/main.c\n+++ b/app/proc_info/main.c\n@@ -297,7 +297,7 @@ static void\n meminfo_display(void)\n {\n \tprintf(\"----------- MEMORY_SEGMENTS -----------\\n\");\n-\trte_dump_physmem_layout(stdout);\n+\trte_dump_iovamem_layout(stdout);\n \tprintf(\"--------- END_MEMORY_SEGMENTS ---------\\n\");\n \n \tprintf(\"------------ MEMORY_ZONES -------------\\n\");\ndiff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c\nindex 148a60414..2e4e10a85 100644\n--- a/app/test-crypto-perf/cperf_test_vector_parsing.c\n+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c\n@@ -390,7 +390,7 @@ parse_entry(char *entry, struct cperf_test_vector *vector,\n \t} else if (strstr(key_token, \"aad\")) {\n \t\trte_free(vector->aad.data);\n \t\tvector->aad.data = data;\n-\t\tvector->aad.phys_addr = rte_malloc_virt2phy(vector->aad.data);\n+\t\tvector->aad.phys_addr = rte_malloc_virt2iova(vector->aad.data);\n \t\tif (tc_found)\n \t\t\tvector->aad.length = data_length;\n \t\telse {\n@@ -405,7 +405,7 @@ parse_entry(char *entry, struct cperf_test_vector *vector,\n \t} else if (strstr(key_token, \"digest\")) {\n \t\trte_free(vector->digest.data);\n \t\tvector->digest.data = data;\n-\t\tvector->digest.phys_addr = rte_malloc_virt2phy(\n+\t\tvector->digest.phys_addr = rte_malloc_virt2iova(\n \t\t\tvector->digest.data);\n \t\tif (tc_found)\n \t\t\tvector->digest.length = data_length;\ndiff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c\nindex e51dcc3f1..fa911ff69 100644\n--- a/app/test-crypto-perf/cperf_test_vectors.c\n+++ b/app/test-crypto-perf/cperf_test_vectors.c\n@@ -498,7 +498,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)\n \t\t\t\treturn NULL;\n \t\t\t}\n \t\t\tt_vec->digest.phys_addr =\n-\t\t\t\trte_malloc_virt2phy(t_vec->digest.data);\n+\t\t\t\trte_malloc_virt2iova(t_vec->digest.data);\n \t\t\tt_vec->digest.length = options->digest_sz;\n \t\t\tmemcpy(t_vec->digest.data, digest,\n \t\t\t\t\toptions->digest_sz);\n@@ -531,7 +531,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)\n \t\t\t\treturn NULL;\n \t\t\t}\n \t\t\tmemcpy(t_vec->aad.data, aad, options->aead_aad_sz);\n-\t\t\tt_vec->aad.phys_addr = rte_malloc_virt2phy(t_vec->aad.data);\n+\t\t\tt_vec->aad.phys_addr = rte_malloc_virt2iova(t_vec->aad.data);\n \t\t\tt_vec->aad.length = options->aead_aad_sz;\n \t\t} else {\n \t\t\tt_vec->aad.data = NULL;\n@@ -546,7 +546,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)\n \t\t\treturn NULL;\n \t\t}\n \t\tt_vec->digest.phys_addr =\n-\t\t\t\trte_malloc_virt2phy(t_vec->digest.data);\n+\t\t\t\trte_malloc_virt2iova(t_vec->digest.data);\n \t\tt_vec->digest.length = options->digest_sz;\n \t\tmemcpy(t_vec->digest.data, digest, options->digest_sz);\n \t\tt_vec->data.aead_offset = 0;\ndiff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c\nindex cd8c35850..114d5cdb6 100644\n--- a/app/test-pmd/cmdline.c\n+++ b/app/test-pmd/cmdline.c\n@@ -8039,7 +8039,7 @@ static void cmd_dump_parsed(void *parsed_result,\n \tstruct cmd_dump_result *res = parsed_result;\n \n \tif (!strcmp(res->dump, \"dump_physmem\"))\n-\t\trte_dump_physmem_layout(stdout);\n+\t\trte_dump_iovamem_layout(stdout);\n \telse if (!strcmp(res->dump, \"dump_memzone\"))\n \t\trte_memzone_dump(stdout);\n \telse if (!strcmp(res->dump, \"dump_struct_sizes\"))\ndiff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c\nindex 45e592770..fc4f967c4 100644\n--- a/drivers/bus/fslmc/fslmc_vfio.c\n+++ b/drivers/bus/fslmc/fslmc_vfio.c\n@@ -201,7 +201,7 @@ int rte_fslmc_vfio_dmamap(void)\n \tif (is_dma_done)\n \t\treturn 0;\n \n-\tmemseg = rte_eal_get_physmem_layout();\n+\tmemseg = rte_eal_get_iovamem_layout();\n \tif (memseg == NULL) {\n \t\tFSLMC_VFIO_LOG(ERR, \"Cannot get physical layout.\");\n \t\treturn -ENODEV;\ndiff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h\nindex 15e3878eb..84189c0f4 100644\n--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h\n+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h\n@@ -275,7 +275,7 @@ static void *dpaa2_mem_ptov(iova_addr_t paddr) __attribute__((unused));\n /* todo - this is costly, need to write a fast coversion routine */\n static void *dpaa2_mem_ptov(iova_addr_t paddr)\n {\n-\tconst struct rte_memseg *memseg = rte_eal_get_physmem_layout();\n+\tconst struct rte_memseg *memseg = rte_eal_get_iovamem_layout();\n \tint i;\n \n \tfor (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {\n@@ -290,7 +290,7 @@ static void *dpaa2_mem_ptov(iova_addr_t paddr)\n static iova_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));\n static iova_addr_t dpaa2_mem_vtop(uint64_t vaddr)\n {\n-\tconst struct rte_memseg *memseg = rte_eal_get_physmem_layout();\n+\tconst struct rte_memseg *memseg = rte_eal_get_iovamem_layout();\n \tint i;\n \n \tfor (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {\ndiff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c\nindex 5048d2144..b25419f30 100644\n--- a/drivers/crypto/qat/qat_qp.c\n+++ b/drivers/crypto/qat/qat_qp.c\n@@ -106,7 +106,7 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,\n \n \tPMD_DRV_LOG(DEBUG, \"Allocate memzone for %s, size %u on socket %u\",\n \t\t\t\t\tqueue_name, queue_size, socket_id);\n-\tms = rte_eal_get_physmem_layout();\n+\tms = rte_eal_get_iovamem_layout();\n \tswitch (ms[0].hugepage_sz) {\n \tcase(RTE_PGSIZE_2M):\n \t\tmemzone_flags = RTE_MEMZONE_2MB;\ndiff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c\nindex 90cf304c0..eb583915b 100644\n--- a/drivers/net/ark/ark_ethdev_rx.c\n+++ b/drivers/net/ark/ark_ethdev_rx.c\n@@ -100,11 +100,11 @@ eth_ark_rx_hw_setup(struct rte_eth_dev *dev,\n \tiova_addr_t phys_addr_q_base;\n \tiova_addr_t phys_addr_prod_index;\n \n-\tqueue_base = rte_malloc_virt2phy(queue);\n+\tqueue_base = rte_malloc_virt2iova(queue);\n \tphys_addr_prod_index = queue_base +\n \t\toffsetof(struct ark_rx_queue, prod_index);\n \n-\tphys_addr_q_base = rte_malloc_virt2phy(queue->paddress_q);\n+\tphys_addr_q_base = rte_malloc_virt2iova(queue->paddress_q);\n \n \t/* Verify HW */\n \tif (ark_mpu_verify(queue->mpu, sizeof(iova_addr_t))) {\ndiff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c\nindex 578cb09b8..e798e4786 100644\n--- a/drivers/net/ark/ark_ethdev_tx.c\n+++ b/drivers/net/ark/ark_ethdev_tx.c\n@@ -318,8 +318,8 @@ eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)\n \tif (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta)))\n \t\treturn -1;\n \n-\tqueue_base = rte_malloc_virt2phy(queue);\n-\tring_base = rte_malloc_virt2phy(queue->meta_q);\n+\tqueue_base = rte_malloc_virt2iova(queue);\n+\tring_base = rte_malloc_virt2iova(queue->meta_q);\n \tcons_index_addr =\n \t\tqueue_base + offsetof(struct ark_tx_queue, cons_index);\n \ndiff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c\nindex cb2ce334b..acc3236c2 100644\n--- a/drivers/net/bnxt/bnxt_ethdev.c\n+++ b/drivers/net/bnxt/bnxt_ethdev.c\n@@ -1679,8 +1679,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)\n \t\t\tRTE_LOG(WARNING, PMD,\n \t\t\t\t\"Memzone physical address same as virtual.\\n\");\n \t\t\tRTE_LOG(WARNING, PMD,\n-\t\t\t\t\"Using rte_mem_virt2phy()\\n\");\n-\t\t\tmz_phys_addr = rte_mem_virt2phy(mz->addr);\n+\t\t\t\t\"Using rte_mem_virt2iova()\\n\");\n+\t\t\tmz_phys_addr = rte_mem_virt2iova(mz->addr);\n \t\t\tif (mz_phys_addr == 0) {\n \t\t\t\tRTE_LOG(ERR, PMD,\n \t\t\t\t\"unable to map address to physical memory\\n\");\n@@ -1714,8 +1714,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)\n \t\t\tRTE_LOG(WARNING, PMD,\n \t\t\t\t\"Memzone physical address same as virtual.\\n\");\n \t\t\tRTE_LOG(WARNING, PMD,\n-\t\t\t\t\"Using rte_mem_virt2phy()\\n\");\n-\t\t\tmz_phys_addr = rte_mem_virt2phy(mz->addr);\n+\t\t\t\t\"Using rte_mem_virt2iova()\\n\");\n+\t\t\tmz_phys_addr = rte_mem_virt2iova(mz->addr);\n \t\t\tif (mz_phys_addr == 0) {\n \t\t\t\tRTE_LOG(ERR, PMD,\n \t\t\t\t\"unable to map address to physical memory\\n\");\ndiff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c\nindex e710e6367..3f420802c 100644\n--- a/drivers/net/bnxt/bnxt_hwrm.c\n+++ b/drivers/net/bnxt/bnxt_hwrm.c\n@@ -270,7 +270,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,\n \t\tif (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))\n \t\t\tmask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;\n \t\treq.vlan_tag_tbl_addr = rte_cpu_to_le_16(\n-\t\t\t rte_mem_virt2phy(vlan_table));\n+\t\t\t rte_mem_virt2iova(vlan_table));\n \t\treq.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);\n \t}\n \treq.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |\n@@ -311,7 +311,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,\n \treq.fid = rte_cpu_to_le_16(fid);\n \n \treq.vlan_tag_mask_tbl_addr =\n-\t\trte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));\n+\t\trte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));\n \treq.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);\n \n \trc = bnxt_hwrm_send_message(bp, &req, sizeof(req));\n@@ -612,7 +612,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)\n \t\t}\n \t\trte_mem_lock_page(bp->hwrm_cmd_resp_addr);\n \t\tbp->hwrm_cmd_resp_dma_addr =\n-\t\t\trte_mem_virt2phy(bp->hwrm_cmd_resp_addr);\n+\t\t\trte_mem_virt2iova(bp->hwrm_cmd_resp_addr);\n \t\tif (bp->hwrm_cmd_resp_dma_addr == 0) {\n \t\t\tRTE_LOG(ERR, PMD,\n \t\t\t\"Unable to map response buffer to physical memory.\\n\");\n@@ -638,7 +638,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)\n \t\t}\n \t\trte_mem_lock_page(bp->hwrm_short_cmd_req_addr);\n \t\tbp->hwrm_short_cmd_req_dma_addr =\n-\t\t\trte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);\n+\t\t\trte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);\n \t\tif (bp->hwrm_short_cmd_req_dma_addr == 0) {\n \t\t\trte_free(bp->hwrm_short_cmd_req_addr);\n \t\t\tRTE_LOG(ERR, PMD,\n@@ -1683,7 +1683,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)\n \tif (bp->hwrm_cmd_resp_addr == NULL)\n \t\treturn -ENOMEM;\n \tbp->hwrm_cmd_resp_dma_addr =\n-\t\trte_mem_virt2phy(bp->hwrm_cmd_resp_addr);\n+\t\trte_mem_virt2iova(bp->hwrm_cmd_resp_addr);\n \tif (bp->hwrm_cmd_resp_dma_addr == 0) {\n \t\tRTE_LOG(ERR, PMD,\n \t\t\t\"unable to map response address to physical memory\\n\");\n@@ -2489,7 +2489,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)\n \t\t\t page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));\n \treq.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);\n \treq.req_buf_page_addr[0] =\n-\t\trte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));\n+\t\trte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));\n \tif (req.req_buf_page_addr[0] == 0) {\n \t\tRTE_LOG(ERR, PMD,\n \t\t\t\"unable to map buffer address to physical memory\\n\");\n@@ -2861,7 +2861,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,\n \n \treq.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);\n \treq.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);\n-\treq.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));\n+\treq.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));\n \n \tif (req.vnic_id_tbl_addr == 0) {\n \t\tRTE_LOG(ERR, PMD,\ndiff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c\nindex 8e83e4704..1e6db4495 100644\n--- a/drivers/net/bnxt/bnxt_ring.c\n+++ b/drivers/net/bnxt/bnxt_ring.c\n@@ -177,10 +177,10 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,\n \t\tRTE_LOG(WARNING, PMD,\n \t\t\t\"Memzone physical address same as virtual.\\n\");\n \t\tRTE_LOG(WARNING, PMD,\n-\t\t\t\"Using rte_mem_virt2phy()\\n\");\n+\t\t\t\"Using rte_mem_virt2iova()\\n\");\n \t\tfor (sz = 0; sz < total_alloc_len; sz += getpagesize())\n \t\t\trte_mem_lock_page(((char *)mz->addr) + sz);\n-\t\tmz_phys_addr = rte_mem_virt2phy(mz->addr);\n+\t\tmz_phys_addr = rte_mem_virt2iova(mz->addr);\n \t\tif (mz_phys_addr == 0) {\n \t\t\tRTE_LOG(ERR, PMD,\n \t\t\t\"unable to map ring address to physical memory\\n\");\ndiff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c\nindex 90809f0f5..9002f6b30 100644\n--- a/drivers/net/bnxt/bnxt_vnic.c\n+++ b/drivers/net/bnxt/bnxt_vnic.c\n@@ -196,8 +196,8 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)\n \t\tRTE_LOG(WARNING, PMD,\n \t\t\t\"Memzone physical address same as virtual.\\n\");\n \t\tRTE_LOG(WARNING, PMD,\n-\t\t\t\"Using rte_mem_virt2phy()\\n\");\n-\t\tmz_phys_addr = rte_mem_virt2phy(mz->addr);\n+\t\t\t\"Using rte_mem_virt2iova()\\n\");\n+\t\tmz_phys_addr = rte_mem_virt2iova(mz->addr);\n \t\tif (mz_phys_addr == 0) {\n \t\t\tRTE_LOG(ERR, PMD,\n \t\t\t\"unable to map vnic address to physical memory\\n\");\ndiff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c\nindex 31819c5bd..e8316bf97 100644\n--- a/drivers/net/e1000/em_rxtx.c\n+++ b/drivers/net/e1000/em_rxtx.c\n@@ -1289,7 +1289,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,\n \ttxq->port_id = dev->data->port_id;\n \n \ttxq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));\n-\ttxq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);\n+\ttxq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);\n \ttxq->tx_ring = (struct e1000_data_desc *) tz->addr;\n \n \tPMD_INIT_LOG(DEBUG, \"sw_ring=%p hw_ring=%p dma_addr=0x%\"PRIx64,\n@@ -1416,7 +1416,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,\n \n \trxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));\n \trxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));\n-\trxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);\n+\trxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);\n \trxq->rx_ring = (struct e1000_rx_desc *) rz->addr;\n \n \tPMD_INIT_LOG(DEBUG, \"sw_ring=%p hw_ring=%p dma_addr=0x%\"PRIx64,\ndiff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c\nindex 1c80a2a1b..02cc0a505 100644\n--- a/drivers/net/e1000/igb_rxtx.c\n+++ b/drivers/net/e1000/igb_rxtx.c\n@@ -1530,7 +1530,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,\n \ttxq->port_id = dev->data->port_id;\n \n \ttxq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));\n-\ttxq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);\n+\ttxq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);\n \n \ttxq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;\n \t/* Allocate software ring */\n@@ -1667,7 +1667,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,\n \t}\n \trxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));\n \trxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));\n-\trxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);\n+\trxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);\n \trxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;\n \n \t/* Allocate software ring. */\ndiff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c\nindex e60d3a365..f5a0247ec 100644\n--- a/drivers/net/fm10k/fm10k_ethdev.c\n+++ b/drivers/net/fm10k/fm10k_ethdev.c\n@@ -1887,7 +1887,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,\n \t\treturn -ENOMEM;\n \t}\n \tq->hw_ring = mz->addr;\n-\tq->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);\n+\tq->hw_ring_phys_addr = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);\n \n \t/* Check if number of descs satisfied Vector requirement */\n \tif (!rte_is_power_of_2(nb_desc)) {\n@@ -2047,7 +2047,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,\n \t\treturn -ENOMEM;\n \t}\n \tq->hw_ring = mz->addr;\n-\tq->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);\n+\tq->hw_ring_phys_addr = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);\n \n \t/*\n \t * allocate memory for the RS bit tracker. Enough slots to hold the\ndiff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c\nindex 5f26e24a3..96339fdc6 100644\n--- a/drivers/net/i40e/i40e_ethdev.c\n+++ b/drivers/net/i40e/i40e_ethdev.c\n@@ -3741,7 +3741,7 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,\n \n \tmem->size = size;\n \tmem->va = mz->addr;\n-\tmem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);\n+\tmem->pa = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);\n \tmem->zone = (const void *)mz;\n \tPMD_DRV_LOG(DEBUG,\n \t\t\"memzone %s allocated with physical address: %\"PRIu64,\ndiff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c\nindex 8013add43..9fd728dfa 100644\n--- a/drivers/net/i40e/i40e_fdir.c\n+++ b/drivers/net/i40e/i40e_fdir.c\n@@ -249,7 +249,7 @@ i40e_fdir_setup(struct i40e_pf *pf)\n \t\tgoto fail_mem;\n \t}\n \tpf->fdir.prg_pkt = mz->addr;\n-\tpf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);\n+\tpf->fdir.dma_addr = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);\n \n \tpf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);\n \tPMD_DRV_LOG(INFO, \"FDIR setup successfully, with programming queue %u.\",\ndiff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c\nindex d42c23c05..f3269f981 100644\n--- a/drivers/net/i40e/i40e_rxtx.c\n+++ b/drivers/net/i40e/i40e_rxtx.c\n@@ -1822,7 +1822,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \t/* Zero all the descriptors in the ring. */\n \tmemset(rz->addr, 0, ring_size);\n \n-\trxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);\n+\trxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);\n \trxq->rx_ring = (union i40e_rx_desc *)rz->addr;\n \n \tlen = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);\n@@ -2159,7 +2159,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \ttxq->vsi = vsi;\n \ttxq->tx_deferred_start = tx_conf->tx_deferred_start;\n \n-\ttxq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);\n+\ttxq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);\n \ttxq->tx_ring = (struct i40e_tx_desc *)tz->addr;\n \n \t/* Allocate software ring */\n@@ -2675,7 +2675,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)\n \ttxq->reg_idx = pf->fdir.fdir_vsi->base_queue;\n \ttxq->vsi = pf->fdir.fdir_vsi;\n \n-\ttxq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);\n+\ttxq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);\n \ttxq->tx_ring = (struct i40e_tx_desc *)tz->addr;\n \t/*\n \t * don't need to allocate software ring and reset for the fdir\n@@ -2731,7 +2731,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)\n \trxq->reg_idx = pf->fdir.fdir_vsi->base_queue;\n \trxq->vsi = pf->fdir.fdir_vsi;\n \n-\trxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);\n+\trxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);\n \trxq->rx_ring = (union i40e_rx_desc *)rz->addr;\n \n \t/*\ndiff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c\nindex 64bff2584..ac6907b8e 100644\n--- a/drivers/net/ixgbe/ixgbe_rxtx.c\n+++ b/drivers/net/ixgbe/ixgbe_rxtx.c\n@@ -2548,7 +2548,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \telse\n \t\ttxq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));\n \n-\ttxq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);\n+\ttxq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);\n \ttxq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;\n \n \t/* Allocate software ring */\n@@ -2850,7 +2850,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \t\t\tIXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));\n \t}\n \n-\trxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);\n+\trxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);\n \trxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;\n \n \t/*\ndiff --git a/drivers/net/liquidio/lio_rxtx.c b/drivers/net/liquidio/lio_rxtx.c\nindex 5156ac08d..67179eaf5 100644\n--- a/drivers/net/liquidio/lio_rxtx.c\n+++ b/drivers/net/liquidio/lio_rxtx.c\n@@ -1790,7 +1790,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)\n \t\t\t\tm = m->next;\n \t\t\t}\n \n-\t\t\tphyaddr = rte_mem_virt2phy(g->sg);\n+\t\t\tphyaddr = rte_mem_virt2iova(g->sg);\n \t\t\tif (phyaddr == RTE_BAD_PHYS_ADDR) {\n \t\t\t\tPMD_TX_LOG(lio_dev, ERR, \"bad phys addr\\n\");\n \t\t\t\tgoto xmit_failed;\ndiff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c\nindex 055de49a3..8b8216bb3 100644\n--- a/drivers/net/mlx4/mlx4.c\n+++ b/drivers/net/mlx4/mlx4.c\n@@ -1206,7 +1206,7 @@ static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, struct rte_mempool *)\n static struct ibv_mr *\n mlx4_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)\n {\n-\tconst struct rte_memseg *ms = rte_eal_get_physmem_layout();\n+\tconst struct rte_memseg *ms = rte_eal_get_iovamem_layout();\n \tuintptr_t start;\n \tuintptr_t end;\n \tunsigned int i;\ndiff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c\nindex 287335179..530aa4911 100644\n--- a/drivers/net/mlx5/mlx5_mr.c\n+++ b/drivers/net/mlx5/mlx5_mr.c\n@@ -131,7 +131,7 @@ static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,\n struct ibv_mr *\n mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)\n {\n-\tconst struct rte_memseg *ms = rte_eal_get_physmem_layout();\n+\tconst struct rte_memseg *ms = rte_eal_get_iovamem_layout();\n \tuintptr_t start;\n \tuintptr_t end;\n \tunsigned int i;\ndiff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c\nindex 6cecfc00a..f75f1eb45 100644\n--- a/drivers/net/sfc/sfc.c\n+++ b/drivers/net/sfc/sfc.c\n@@ -61,7 +61,7 @@ sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,\n \t\treturn ENOMEM;\n \t}\n \n-\tesmp->esm_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);\n+\tesmp->esm_addr = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);\n \tif (esmp->esm_addr == RTE_BAD_PHYS_ADDR) {\n \t\t(void)rte_memzone_free(mz);\n \t\treturn EFAULT;\ndiff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c\nindex fb79d7491..ad100676e 100644\n--- a/drivers/net/sfc/sfc_tso.c\n+++ b/drivers/net/sfc/sfc_tso.c\n@@ -155,7 +155,7 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,\n \t\t\t\t\t   header_len);\n \t\ttsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;\n \n-\t\theader_paddr = rte_malloc_virt2phy((void *)tsoh);\n+\t\theader_paddr = rte_malloc_virt2iova((void *)tsoh);\n \t} else {\n \t\tif (m->data_len == header_len) {\n \t\t\t*in_off = 0;\ndiff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c\nindex 985baaf51..49db9c3c0 100644\n--- a/examples/l2fwd-crypto/main.c\n+++ b/examples/l2fwd-crypto/main.c\n@@ -2461,7 +2461,7 @@ reserve_key_memory(struct l2fwd_crypto_options *options)\n \toptions->aad.data = rte_malloc(\"aad\", MAX_KEY_SIZE, 0);\n \tif (options->aad.data == NULL)\n \t\trte_exit(EXIT_FAILURE, \"Failed to allocate memory for AAD\");\n-\toptions->aad.phys_addr = rte_malloc_virt2phy(options->aad.data);\n+\toptions->aad.phys_addr = rte_malloc_virt2iova(options->aad.data);\n }\n \n int\ndiff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c\nindex 327d7e846..a1ffc8c8c 100644\n--- a/lib/librte_cryptodev/rte_cryptodev.c\n+++ b/lib/librte_cryptodev/rte_cryptodev.c\n@@ -1271,7 +1271,7 @@ rte_crypto_op_init(struct rte_mempool *mempool,\n \n \t__rte_crypto_op_reset(op, type);\n \n-\top->phys_addr = rte_mem_virt2phy(_op_data);\n+\top->phys_addr = rte_mem_virt2iova(_op_data);\n \top->mempool = mempool;\n }\n \ndiff --git a/lib/librte_eal/bsdapp/eal/eal.c b/lib/librte_eal/bsdapp/eal/eal.c\nindex 5fa598842..1068995f2 100644\n--- a/lib/librte_eal/bsdapp/eal/eal.c\n+++ b/lib/librte_eal/bsdapp/eal/eal.c\n@@ -441,7 +441,7 @@ eal_check_mem_on_local_socket(void)\n \n \tsocket_id = rte_lcore_to_socket_id(rte_config.master_lcore);\n \n-\tms = rte_eal_get_physmem_layout();\n+\tms = rte_eal_get_iovamem_layout();\n \n \tfor (i = 0; i < RTE_MAX_MEMSEG; i++)\n \t\tif (ms[i].socket_id == socket_id &&\ndiff --git a/lib/librte_eal/bsdapp/eal/eal_memory.c b/lib/librte_eal/bsdapp/eal/eal_memory.c\nindex 10c2e121f..b16f4fc66 100644\n--- a/lib/librte_eal/bsdapp/eal/eal_memory.c\n+++ b/lib/librte_eal/bsdapp/eal/eal_memory.c\n@@ -51,7 +51,7 @@\n  * Get physical address of any mapped virtual address in the current process.\n  */\n iova_addr_t\n-rte_mem_virt2phy(const void *virtaddr)\n+rte_mem_virt2iova(const void *virtaddr)\n {\n \t/* XXX not implemented. This function is only used by\n \t * rte_mempool_virt2phy() when hugepages are disabled. */\ndiff --git a/lib/librte_eal/bsdapp/eal/rte_eal_version.map b/lib/librte_eal/bsdapp/eal/rte_eal_version.map\nindex aac6fd776..6df11dd3f 100644\n--- a/lib/librte_eal/bsdapp/eal/rte_eal_version.map\n+++ b/lib/librte_eal/bsdapp/eal/rte_eal_version.map\n@@ -14,7 +14,7 @@ DPDK_2.0 {\n \trte_cpu_get_flag_enabled;\n \trte_cycles_vmware_tsc_map;\n \trte_delay_us;\n-\trte_dump_physmem_layout;\n+\trte_dump_iovamem_layout;\n \trte_dump_registers;\n \trte_dump_stack;\n \trte_dump_tailq;\n@@ -25,8 +25,8 @@ DPDK_2.0 {\n \trte_eal_devargs_type_count;\n \trte_eal_get_configuration;\n \trte_eal_get_lcore_state;\n-\trte_eal_get_physmem_layout;\n-\trte_eal_get_physmem_size;\n+\trte_eal_get_iovamem_layout;\n+\trte_eal_get_iovamem_size;\n \trte_eal_has_hugepages;\n \trte_eal_hpet_init;\n \trte_eal_init;\n@@ -62,10 +62,10 @@ DPDK_2.0 {\n \trte_malloc_set_limit;\n \trte_malloc_socket;\n \trte_malloc_validate;\n-\trte_malloc_virt2phy;\n+\trte_malloc_virt2iova;\n \trte_mem_lock_page;\n-\trte_mem_phy2mch;\n-\trte_mem_virt2phy;\n+\trte_mem_phy2iova;\n+\trte_mem_virt2iova;\n \trte_memdump;\n \trte_memory_get_nchannel;\n \trte_memory_get_nrank;\ndiff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c\nindex 5ed83d20a..44bc072bf 100644\n--- a/lib/librte_eal/common/eal_common_memory.c\n+++ b/lib/librte_eal/common/eal_common_memory.c\n@@ -55,7 +55,7 @@\n  * memory. The last element of the table contains a NULL address.\n  */\n const struct rte_memseg *\n-rte_eal_get_physmem_layout(void)\n+rte_eal_get_iovamem_layout(void)\n {\n \treturn rte_eal_get_configuration()->mem_config->memseg;\n }\n@@ -63,7 +63,7 @@ rte_eal_get_physmem_layout(void)\n \n /* get the total size of memory */\n uint64_t\n-rte_eal_get_physmem_size(void)\n+rte_eal_get_iovamem_size(void)\n {\n \tconst struct rte_mem_config *mcfg;\n \tunsigned i = 0;\n@@ -84,7 +84,7 @@ rte_eal_get_physmem_size(void)\n \n /* Dump the physical memory layout on console */\n void\n-rte_dump_physmem_layout(FILE *f)\n+rte_dump_iovamem_layout(FILE *f)\n {\n \tconst struct rte_mem_config *mcfg;\n \tunsigned i = 0;\ndiff --git a/lib/librte_eal/common/eal_common_memzone.c b/lib/librte_eal/common/eal_common_memzone.c\nindex 3026e36b8..86457eaf0 100644\n--- a/lib/librte_eal/common/eal_common_memzone.c\n+++ b/lib/librte_eal/common/eal_common_memzone.c\n@@ -251,7 +251,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,\n \n \tmcfg->memzone_cnt++;\n \tsnprintf(mz->name, sizeof(mz->name), \"%s\", name);\n-\tmz->phys_addr = rte_malloc_virt2phy(mz_addr);\n+\tmz->phys_addr = rte_malloc_virt2iova(mz_addr);\n \tmz->addr = mz_addr;\n \tmz->len = (requested_len == 0 ? elem->size : requested_len);\n \tmz->hugepage_sz = elem->ms->hugepage_sz;\n@@ -419,7 +419,7 @@ rte_eal_memzone_init(void)\n \tif (rte_eal_process_type() == RTE_PROC_SECONDARY)\n \t\treturn 0;\n \n-\tmemseg = rte_eal_get_physmem_layout();\n+\tmemseg = rte_eal_get_iovamem_layout();\n \tif (memseg == NULL) {\n \t\tRTE_LOG(ERR, EAL, \"%s(): Cannot get physical layout\\n\", __func__);\n \t\treturn -1;\ndiff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h\nindex 597d82e44..a98dd69d3 100644\n--- a/lib/librte_eal/common/eal_private.h\n+++ b/lib/librte_eal/common/eal_private.h\n@@ -341,7 +341,7 @@ int rte_eal_hugepage_attach(void);\n  * addresses are obtainable. It is only possible to get\n  * physical addresses when running as a privileged user.\n  */\n-bool rte_eal_using_phys_addrs(void);\n+bool rte_eal_using_iova_addrs(void);\n \n /**\n  * Find a bus capable of identifying a device.\ndiff --git a/lib/librte_eal/common/include/rte_malloc.h b/lib/librte_eal/common/include/rte_malloc.h\nindex 491b479b1..b1a214c9d 100644\n--- a/lib/librte_eal/common/include/rte_malloc.h\n+++ b/lib/librte_eal/common/include/rte_malloc.h\n@@ -333,7 +333,7 @@ rte_malloc_set_limit(const char *type, size_t max);\n  *   otherwise return physical address of the buffer\n  */\n iova_addr_t\n-rte_malloc_virt2phy(const void *addr);\n+rte_malloc_virt2iova(const void *addr);\n \n #ifdef __cplusplus\n }\ndiff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h\nindex fc0fdf740..c5f1f40dc 100644\n--- a/lib/librte_eal/common/include/rte_memory.h\n+++ b/lib/librte_eal/common/include/rte_memory.h\n@@ -142,7 +142,7 @@ int rte_mem_lock_page(const void *virt);\n  * @return\n  *   The physical address or RTE_BAD_PHYS_ADDR on error.\n  */\n-iova_addr_t rte_mem_virt2phy(const void *virt);\n+iova_addr_t rte_mem_virt2iova(const void *virt);\n \n /**\n  * Get the layout of the available physical memory.\n@@ -159,7 +159,7 @@ iova_addr_t rte_mem_virt2phy(const void *virt);\n  *  - On error, return NULL. This should not happen since it is a fatal\n  *    error that will probably cause the entire system to panic.\n  */\n-const struct rte_memseg *rte_eal_get_physmem_layout(void);\n+const struct rte_memseg *rte_eal_get_iovamem_layout(void);\n \n /**\n  * Dump the physical memory layout to a file.\n@@ -167,7 +167,7 @@ const struct rte_memseg *rte_eal_get_physmem_layout(void);\n  * @param f\n  *   A pointer to a file for output\n  */\n-void rte_dump_physmem_layout(FILE *f);\n+void rte_dump_iovamem_layout(FILE *f);\n \n /**\n  * Get the total amount of available physical memory.\n@@ -175,7 +175,7 @@ void rte_dump_physmem_layout(FILE *f);\n  * @return\n  *    The total amount of available physical memory in bytes.\n  */\n-uint64_t rte_eal_get_physmem_size(void);\n+uint64_t rte_eal_get_iovamem_size(void);\n \n /**\n  * Get the number of memory channels.\n@@ -216,7 +216,7 @@ iova_addr_t rte_xen_mem_phy2mch(int32_t, const iova_addr_t);\n  *   The physical address or RTE_BAD_PHYS_ADDR on error.\n  */\n static inline iova_addr_t\n-rte_mem_phy2mch(int32_t memseg_id, const iova_addr_t phy_addr)\n+rte_mem_phy2iova(int32_t memseg_id, const iova_addr_t phy_addr)\n {\n \tif (rte_xen_dom0_supported())\n \t\treturn rte_xen_mem_phy2mch(memseg_id, phy_addr);\n@@ -252,7 +252,7 @@ static inline int rte_xen_dom0_supported(void)\n }\n \n static inline iova_addr_t\n-rte_mem_phy2mch(int32_t memseg_id __rte_unused, const iova_addr_t phy_addr)\n+rte_mem_phy2iova(int32_t memseg_id __rte_unused, const iova_addr_t phy_addr)\n {\n \treturn phy_addr;\n }\ndiff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c\nindex d03a5348a..cc0c6c935 100644\n--- a/lib/librte_eal/common/rte_malloc.c\n+++ b/lib/librte_eal/common/rte_malloc.c\n@@ -249,7 +249,7 @@ rte_malloc_set_limit(__rte_unused const char *type,\n  * Return the physical address of a virtual address obtained through rte_malloc\n  */\n iova_addr_t\n-rte_malloc_virt2phy(const void *addr)\n+rte_malloc_virt2iova(const void *addr)\n {\n \tconst struct malloc_elem *elem = malloc_elem_from_data(addr);\n \tif (elem == NULL)\ndiff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c\nindex 48f12f44c..0b7419442 100644\n--- a/lib/librte_eal/linuxapp/eal/eal.c\n+++ b/lib/librte_eal/linuxapp/eal/eal.c\n@@ -671,7 +671,7 @@ eal_check_mem_on_local_socket(void)\n \n \tsocket_id = rte_lcore_to_socket_id(rte_config.master_lcore);\n \n-\tms = rte_eal_get_physmem_layout();\n+\tms = rte_eal_get_iovamem_layout();\n \n \tfor (i = 0; i < RTE_MAX_MEMSEG; i++)\n \t\tif (ms[i].socket_id == socket_id &&\ndiff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c\nindex 5d9702c72..30d55d79f 100644\n--- a/lib/librte_eal/linuxapp/eal/eal_memory.c\n+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c\n@@ -117,7 +117,7 @@ test_phys_addrs_available(void)\n \t\treturn;\n \t}\n \n-\tphysaddr = rte_mem_virt2phy(&tmp);\n+\tphysaddr = rte_mem_virt2iova(&tmp);\n \tif (physaddr == RTE_BAD_PHYS_ADDR) {\n \t\tRTE_LOG(ERR, EAL,\n \t\t\t\"Cannot obtain physical addresses: %s. \"\n@@ -131,7 +131,7 @@ test_phys_addrs_available(void)\n  * Get physical address of any mapped virtual address in the current process.\n  */\n iova_addr_t\n-rte_mem_virt2phy(const void *virtaddr)\n+rte_mem_virt2iova(const void *virtaddr)\n {\n \tint fd, retval;\n \tuint64_t page, physaddr;\n@@ -222,7 +222,7 @@ find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)\n \tiova_addr_t addr;\n \n \tfor (i = 0; i < hpi->num_pages[0]; i++) {\n-\t\taddr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);\n+\t\taddr = rte_mem_virt2iova(hugepg_tbl[i].orig_va);\n \t\tif (addr == RTE_BAD_PHYS_ADDR)\n \t\t\treturn -1;\n \t\thugepg_tbl[i].physaddr = addr;\n@@ -1543,7 +1543,7 @@ rte_eal_hugepage_attach(void)\n }\n \n bool\n-rte_eal_using_phys_addrs(void)\n+rte_eal_using_iova_addrs(void)\n {\n \treturn phys_addrs_available;\n }\ndiff --git a/lib/librte_eal/linuxapp/eal/eal_pci.c b/lib/librte_eal/linuxapp/eal/eal_pci.c\nindex 8951ce742..ee4a60bca 100644\n--- a/lib/librte_eal/linuxapp/eal/eal_pci.c\n+++ b/lib/librte_eal/linuxapp/eal/eal_pci.c\n@@ -102,7 +102,7 @@ rte_pci_map_device(struct rte_pci_device *dev)\n \t\tbreak;\n \tcase RTE_KDRV_IGB_UIO:\n \tcase RTE_KDRV_UIO_GENERIC:\n-\t\tif (rte_eal_using_phys_addrs()) {\n+\t\tif (rte_eal_using_iova_addrs()) {\n \t\t\t/* map resources for devices that use uio */\n \t\t\tret = pci_uio_map_resource(dev);\n \t\t}\n@@ -144,7 +144,7 @@ rte_pci_unmap_device(struct rte_pci_device *dev)\n void *\n pci_find_max_end_va(void)\n {\n-\tconst struct rte_memseg *seg = rte_eal_get_physmem_layout();\n+\tconst struct rte_memseg *seg = rte_eal_get_iovamem_layout();\n \tconst struct rte_memseg *last = seg;\n \tunsigned i = 0;\n \ndiff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c\nindex c03fd713c..e2a6d3006 100644\n--- a/lib/librte_eal/linuxapp/eal/eal_vfio.c\n+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c\n@@ -692,7 +692,7 @@ vfio_get_group_no(const char *sysfs_base,\n static int\n vfio_type1_dma_map(int vfio_container_fd)\n {\n-\tconst struct rte_memseg *ms = rte_eal_get_physmem_layout();\n+\tconst struct rte_memseg *ms = rte_eal_get_iovamem_layout();\n \tint i, ret;\n \n \t/* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */\n@@ -725,7 +725,7 @@ vfio_type1_dma_map(int vfio_container_fd)\n static int\n vfio_spapr_dma_map(int vfio_container_fd)\n {\n-\tconst struct rte_memseg *ms = rte_eal_get_physmem_layout();\n+\tconst struct rte_memseg *ms = rte_eal_get_iovamem_layout();\n \tint i, ret;\n \n \tstruct vfio_iommu_spapr_register_memory reg = {\n@@ -760,7 +760,7 @@ vfio_spapr_dma_map(int vfio_container_fd)\n \t}\n \n \t/* calculate window size based on number of hugepages configured */\n-\tcreate.window_size = rte_eal_get_physmem_size();\n+\tcreate.window_size = rte_eal_get_iovamem_size();\n \tcreate.page_shift = __builtin_ctzll(ms->hugepage_sz);\n \tcreate.levels = 2;\n \ndiff --git a/lib/librte_eal/linuxapp/eal/rte_eal_version.map b/lib/librte_eal/linuxapp/eal/rte_eal_version.map\nindex 3a8f15406..e2f50e5b1 100644\n--- a/lib/librte_eal/linuxapp/eal/rte_eal_version.map\n+++ b/lib/librte_eal/linuxapp/eal/rte_eal_version.map\n@@ -14,7 +14,7 @@ DPDK_2.0 {\n \trte_cpu_get_flag_enabled;\n \trte_cycles_vmware_tsc_map;\n \trte_delay_us;\n-\trte_dump_physmem_layout;\n+\trte_dump_iovamem_layout;\n \trte_dump_registers;\n \trte_dump_stack;\n \trte_dump_tailq;\n@@ -25,8 +25,8 @@ DPDK_2.0 {\n \trte_eal_devargs_type_count;\n \trte_eal_get_configuration;\n \trte_eal_get_lcore_state;\n-\trte_eal_get_physmem_layout;\n-\trte_eal_get_physmem_size;\n+\trte_eal_get_iovamem_layout;\n+\trte_eal_get_iovamem_size;\n \trte_eal_has_hugepages;\n \trte_eal_hpet_init;\n \trte_eal_init;\n@@ -62,10 +62,10 @@ DPDK_2.0 {\n \trte_malloc_set_limit;\n \trte_malloc_socket;\n \trte_malloc_validate;\n-\trte_malloc_virt2phy;\n+\trte_malloc_virt2iova;\n \trte_mem_lock_page;\n-\trte_mem_phy2mch;\n-\trte_mem_virt2phy;\n+\trte_mem_phy2iova;\n+\trte_mem_virt2iova;\n \trte_memdump;\n \trte_memory_get_nchannel;\n \trte_memory_get_nrank;\ndiff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c\nindex b4f14cf61..39335e286 100644\n--- a/lib/librte_mempool/rte_mempool.c\n+++ b/lib/librte_mempool/rte_mempool.c\n@@ -344,7 +344,7 @@ rte_mempool_free_memchunks(struct rte_mempool *mp)\n  * on error.\n  */\n int\n-rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,\n+rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,\n \tiova_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,\n \tvoid *opaque)\n {\n@@ -408,7 +408,7 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,\n  * number of objects added, or a negative value on error.\n  */\n int\n-rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,\n+rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,\n \tconst iova_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,\n \trte_mempool_memchunk_free_cb_t *free_cb, void *opaque)\n {\n@@ -421,7 +421,7 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,\n \t\treturn -EEXIST;\n \n \tif (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)\n-\t\treturn rte_mempool_populate_phys(mp, vaddr, RTE_BAD_PHYS_ADDR,\n+\t\treturn rte_mempool_populate_iova(mp, vaddr, RTE_BAD_PHYS_ADDR,\n \t\t\tpg_num * pg_sz, free_cb, opaque);\n \n \tfor (i = 0; i < pg_num && mp->populated_size < mp->size; i += n) {\n@@ -431,7 +431,7 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,\n \t\t\t     paddr[i + n - 1] + pg_sz == paddr[i + n]; n++)\n \t\t\t;\n \n-\t\tret = rte_mempool_populate_phys(mp, vaddr + i * pg_sz,\n+\t\tret = rte_mempool_populate_iova(mp, vaddr + i * pg_sz,\n \t\t\tpaddr[i], n * pg_sz, free_cb, opaque);\n \t\tif (ret < 0) {\n \t\t\trte_mempool_free_memchunks(mp);\n@@ -466,15 +466,15 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,\n \t\treturn -EINVAL;\n \n \tif (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)\n-\t\treturn rte_mempool_populate_phys(mp, addr, RTE_BAD_PHYS_ADDR,\n+\t\treturn rte_mempool_populate_iova(mp, addr, RTE_BAD_PHYS_ADDR,\n \t\t\tlen, free_cb, opaque);\n \n \tfor (off = 0; off + pg_sz <= len &&\n \t\t     mp->populated_size < mp->size; off += phys_len) {\n \n-\t\tpaddr = rte_mem_virt2phy(addr + off);\n+\t\tpaddr = rte_mem_virt2iova(addr + off);\n \t\t/* required for xen_dom0 to get the machine address */\n-\t\tpaddr = rte_mem_phy2mch(-1, paddr);\n+\t\tpaddr = rte_mem_phy2iova(-1, paddr);\n \n \t\tif (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) {\n \t\t\tret = -EINVAL;\n@@ -485,14 +485,14 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,\n \t\tfor (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) {\n \t\t\tiova_addr_t paddr_tmp;\n \n-\t\t\tpaddr_tmp = rte_mem_virt2phy(addr + off + phys_len);\n-\t\t\tpaddr_tmp = rte_mem_phy2mch(-1, paddr_tmp);\n+\t\t\tpaddr_tmp = rte_mem_virt2iova(addr + off + phys_len);\n+\t\t\tpaddr_tmp = rte_mem_phy2iova(-1, paddr_tmp);\n \n \t\t\tif (paddr_tmp != paddr + phys_len)\n \t\t\t\tbreak;\n \t\t}\n \n-\t\tret = rte_mempool_populate_phys(mp, addr + off, paddr,\n+\t\tret = rte_mempool_populate_iova(mp, addr + off, paddr,\n \t\t\tphys_len, free_cb, opaque);\n \t\tif (ret < 0)\n \t\t\tgoto fail;\n@@ -569,7 +569,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)\n \t\t\tpaddr = mz->phys_addr;\n \n \t\tif (rte_eal_has_hugepages() && !rte_xen_dom0_supported())\n-\t\t\tret = rte_mempool_populate_phys(mp, mz->addr,\n+\t\t\tret = rte_mempool_populate_iova(mp, mz->addr,\n \t\t\t\tpaddr, mz->len,\n \t\t\t\trte_mempool_memchunk_mz_free,\n \t\t\t\t(void *)(uintptr_t)mz);\n@@ -954,7 +954,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,\n \tif (mp_init)\n \t\tmp_init(mp, mp_init_arg);\n \n-\tret = rte_mempool_populate_phys_tab(mp, vaddr, paddr, pg_num, pg_shift,\n+\tret = rte_mempool_populate_iova_tab(mp, vaddr, paddr, pg_num, pg_shift,\n \t\tNULL, NULL);\n \tif (ret < 0 || ret != (int)mp->size)\n \t\tgoto fail;\ndiff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h\nindex 1bcb6ebd7..13c16eee0 100644\n--- a/lib/librte_mempool/rte_mempool.h\n+++ b/lib/librte_mempool/rte_mempool.h\n@@ -819,7 +819,7 @@ rte_mempool_free(struct rte_mempool *mp);\n  *   On error, the chunk is not added in the memory list of the\n  *   mempool and a negative errno is returned.\n  */\n-int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,\n+int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,\n \tiova_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,\n \tvoid *opaque);\n \n@@ -850,7 +850,7 @@ int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,\n  *   On error, the chunks are not added in the memory list of the\n  *   mempool and a negative errno is returned.\n  */\n-int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,\n+int rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,\n \tconst iova_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,\n \trte_mempool_memchunk_free_cb_t *free_cb, void *opaque);\n \ndiff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map\nindex f9c079447..2904c299f 100644\n--- a/lib/librte_mempool/rte_mempool_version.map\n+++ b/lib/librte_mempool/rte_mempool_version.map\n@@ -34,8 +34,8 @@ DPDK_16.07 {\n \trte_mempool_ops_table;\n \trte_mempool_populate_anon;\n \trte_mempool_populate_default;\n-\trte_mempool_populate_phys;\n-\trte_mempool_populate_phys_tab;\n+\trte_mempool_populate_iova;\n+\trte_mempool_populate_iova_tab;\n \trte_mempool_populate_virt;\n \trte_mempool_register_ops;\n \trte_mempool_set_ops_byname;\ndiff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c\nindex ad2e8d380..5c546ba33 100644\n--- a/lib/librte_vhost/vhost_user.c\n+++ b/lib/librte_vhost/vhost_user.c\n@@ -453,7 +453,7 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,\n \tuint64_t host_phys_addr;\n \tuint64_t size;\n \n-\thost_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)host_user_addr);\n+\thost_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);\n \tsize = page_size - (guest_phys_addr & (page_size - 1));\n \tsize = RTE_MIN(size, reg_size);\n \n@@ -464,7 +464,7 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,\n \n \twhile (reg_size > 0) {\n \t\tsize = RTE_MIN(reg_size, page_size);\n-\t\thost_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)\n+\t\thost_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)\n \t\t\t\t\t\t  host_user_addr);\n \t\tadd_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);\n \ndiff --git a/test/test/commands.c b/test/test/commands.c\nindex 4097a3310..9f5028d41 100644\n--- a/test/test/commands.c\n+++ b/test/test/commands.c\n@@ -147,7 +147,7 @@ static void cmd_dump_parsed(void *parsed_result,\n \tstruct cmd_dump_result *res = parsed_result;\n \n \tif (!strcmp(res->dump, \"dump_physmem\"))\n-\t\trte_dump_physmem_layout(stdout);\n+\t\trte_dump_iovamem_layout(stdout);\n \telse if (!strcmp(res->dump, \"dump_memzone\"))\n \t\trte_memzone_dump(stdout);\n \telse if (!strcmp(res->dump, \"dump_struct_sizes\"))\ndiff --git a/test/test/test_malloc.c b/test/test/test_malloc.c\nindex 013fd4407..fc995596e 100644\n--- a/test/test/test_malloc.c\n+++ b/test/test/test_malloc.c\n@@ -741,7 +741,7 @@ test_malloc_bad_params(void)\n static int\n is_mem_on_socket(int32_t socket)\n {\n-\tconst struct rte_memseg *ms = rte_eal_get_physmem_layout();\n+\tconst struct rte_memseg *ms = rte_eal_get_iovamem_layout();\n \tunsigned i;\n \n \tfor (i = 0; i < RTE_MAX_MEMSEG; i++) {\n@@ -758,7 +758,7 @@ is_mem_on_socket(int32_t socket)\n static int32_t\n addr_to_socket(void * addr)\n {\n-\tconst struct rte_memseg *ms = rte_eal_get_physmem_layout();\n+\tconst struct rte_memseg *ms = rte_eal_get_iovamem_layout();\n \tunsigned i;\n \n \tfor (i = 0; i < RTE_MAX_MEMSEG; i++) {\ndiff --git a/test/test/test_memory.c b/test/test/test_memory.c\nindex 921bdc883..9ab0f52fd 100644\n--- a/test/test/test_memory.c\n+++ b/test/test/test_memory.c\n@@ -64,17 +64,17 @@ test_memory(void)\n \t * that at least one line is dumped\n \t */\n \tprintf(\"Dump memory layout\\n\");\n-\trte_dump_physmem_layout(stdout);\n+\trte_dump_iovamem_layout(stdout);\n \n \t/* check that memory size is != 0 */\n-\ts = rte_eal_get_physmem_size();\n+\ts = rte_eal_get_iovamem_size();\n \tif (s == 0) {\n \t\tprintf(\"No memory detected\\n\");\n \t\treturn -1;\n \t}\n \n \t/* try to read memory (should not segfault) */\n-\tmem = rte_eal_get_physmem_layout();\n+\tmem = rte_eal_get_iovamem_layout();\n \tfor (i = 0; i < RTE_MAX_MEMSEG && mem[i].addr != NULL ; i++) {\n \n \t\t/* check memory */\ndiff --git a/test/test/test_mempool.c b/test/test/test_mempool.c\nindex 0a4423954..b4c46131c 100644\n--- a/test/test/test_mempool.c\n+++ b/test/test/test_mempool.c\n@@ -145,9 +145,9 @@ test_mempool_basic(struct rte_mempool *mp, int use_external_cache)\n \t\t\tMEMPOOL_HEADER_SIZE(mp, mp->cache_size))\n \t\tGOTO_ERR(ret, out);\n \n-#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2phy() not supported on bsd */\n+#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2iova() not supported on bsd */\n \tprintf(\"get physical address of an object\\n\");\n-\tif (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2phy(obj))\n+\tif (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2iova(obj))\n \t\tGOTO_ERR(ret, out);\n #endif\n \ndiff --git a/test/test/test_memzone.c b/test/test/test_memzone.c\nindex 0afb159e9..177bcb73e 100644\n--- a/test/test/test_memzone.c\n+++ b/test/test/test_memzone.c\n@@ -139,7 +139,7 @@ test_memzone_reserve_flags(void)\n \tint hugepage_16GB_avail = 0;\n \tconst size_t size = 100;\n \tint i = 0;\n-\tms = rte_eal_get_physmem_layout();\n+\tms = rte_eal_get_iovamem_layout();\n \tfor (i = 0; i < RTE_MAX_MEMSEG; i++) {\n \t\tif (ms[i].hugepage_sz == RTE_PGSIZE_2M)\n \t\t\thugepage_2MB_avail = 1;\n@@ -422,7 +422,7 @@ test_memzone_reserve_max(void)\n \tif (mz == NULL){\n \t\tprintf(\"Failed to reserve a big chunk of memory - %s\\n\",\n \t\t\t\trte_strerror(rte_errno));\n-\t\trte_dump_physmem_layout(stdout);\n+\t\trte_dump_iovamem_layout(stdout);\n \t\trte_memzone_dump(stdout);\n \t\treturn -1;\n \t}\n@@ -430,7 +430,7 @@ test_memzone_reserve_max(void)\n \tif (mz->len != maxlen) {\n \t\tprintf(\"Memzone reserve with 0 size did not return bigest block\\n\");\n \t\tprintf(\"Expected size = %zu, actual size = %zu\\n\", maxlen, mz->len);\n-\t\trte_dump_physmem_layout(stdout);\n+\t\trte_dump_iovamem_layout(stdout);\n \t\trte_memzone_dump(stdout);\n \t\treturn -1;\n \t}\n@@ -459,7 +459,7 @@ test_memzone_reserve_max_aligned(void)\n \tif (mz == NULL){\n \t\tprintf(\"Failed to reserve a big chunk of memory - %s\\n\",\n \t\t\t\trte_strerror(rte_errno));\n-\t\trte_dump_physmem_layout(stdout);\n+\t\trte_dump_iovamem_layout(stdout);\n \t\trte_memzone_dump(stdout);\n \t\treturn -1;\n \t}\n@@ -469,7 +469,7 @@ test_memzone_reserve_max_aligned(void)\n \t\t\t\t\" bigest block\\n\", align);\n \t\tprintf(\"Expected size = %zu, actual size = %zu\\n\",\n \t\t\t\tmaxlen, mz->len);\n-\t\trte_dump_physmem_layout(stdout);\n+\t\trte_dump_iovamem_layout(stdout);\n \t\trte_memzone_dump(stdout);\n \t\treturn -1;\n \t}\n",
    "prefixes": [
        "dpdk-dev",
        "v1",
        "3/4"
    ]
}