get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/53122/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 53122,
    "url": "http://patches.dpdk.org/api/patches/53122/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1556339577-18185-2-git-send-email-viacheslavo@mellanox.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1556339577-18185-2-git-send-email-viacheslavo@mellanox.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1556339577-18185-2-git-send-email-viacheslavo@mellanox.com",
    "date": "2019-04-27T04:32:56",
    "name": "[v3,1/2] net/mlx5: share Memory Regions for multiport device",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "889f71d5340265b31eeeadbba51b7ed91bcf7f78",
    "submitter": {
        "id": 1102,
        "url": "http://patches.dpdk.org/api/people/1102/?format=api",
        "name": "Slava Ovsiienko",
        "email": "viacheslavo@mellanox.com"
    },
    "delegate": {
        "id": 6624,
        "url": "http://patches.dpdk.org/api/users/6624/?format=api",
        "username": "shahafs",
        "first_name": "Shahaf",
        "last_name": "Shuler",
        "email": "shahafs@mellanox.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1556339577-18185-2-git-send-email-viacheslavo@mellanox.com/mbox/",
    "series": [
        {
            "id": 4492,
            "url": "http://patches.dpdk.org/api/series/4492/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4492",
            "date": "2019-04-27T04:32:55",
            "name": "net/mlx5: share Memory Regions for multiport devices",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/4492/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/53122/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/53122/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 94AC01B750;\n\tSat, 27 Apr 2019 06:33:18 +0200 (CEST)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n\tby dpdk.org (Postfix) with ESMTP id 563031B73E\n\tfor <dev@dpdk.org>; Sat, 27 Apr 2019 06:33:16 +0200 (CEST)",
            "from Internal Mail-Server by MTLPINE2 (envelope-from\n\tviacheslavo@mellanox.com)\n\twith ESMTPS (AES256-SHA encrypted); 27 Apr 2019 07:33:13 +0300",
            "from pegasus12.mtr.labs.mlnx. (pegasus12.mtr.labs.mlnx\n\t[10.210.17.40])\n\tby labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x3R4X0eX020373;\n\tSat, 27 Apr 2019 07:33:13 +0300"
        ],
        "From": "Viacheslav Ovsiienko <viacheslavo@mellanox.com>",
        "To": "shahafs@mellanox.com",
        "Cc": "dev@dpdk.org, yskoh@mellanox.com",
        "Date": "Sat, 27 Apr 2019 04:32:56 +0000",
        "Message-Id": "<1556339577-18185-2-git-send-email-viacheslavo@mellanox.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1556339577-18185-1-git-send-email-viacheslavo@mellanox.com>",
        "References": "<1556095470-11407-1-git-send-email-viacheslavo@mellanox.com>\n\t<1556339577-18185-1-git-send-email-viacheslavo@mellanox.com>",
        "Subject": "[dpdk-dev] [PATCH v3 1/2] net/mlx5: share Memory Regions for\n\tmultiport device",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The multiport Infiniband device support was introduced [1].\nAll active ports, belonging to the same Infiniband device use the single\nshared Infiniband context of that device and share the resources:\n  - QPs are created within shared context\n  - Verbs flows are also created with specifying port index\n  - DV/DR resources\n  - Protection Domain\n  - Event Handlers\n\nThis patchset adds support for Memory Regions sharing between\nports, created on the base of multiport Infiniband device.\nThe datapath of mlx5 uses the layered cache subsystem for\nallocating/releasing Memory Regions, only the lowest layer L3\nis subject to share due to performance issues.\n\n[1] http://patches.dpdk.org/cover/51800/\n\nSigned-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>\n---\n drivers/net/mlx5/mlx5.c     |  40 +++++++----\n drivers/net/mlx5/mlx5.h     |  15 ++--\n drivers/net/mlx5/mlx5_mr.c  | 164 ++++++++++++++++++++++----------------------\n drivers/net/mlx5/mlx5_mr.h  |   5 +-\n drivers/net/mlx5/mlx5_txq.c |   2 +-\n 5 files changed, 121 insertions(+), 105 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex 1bb58b1..b563e0f 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -147,6 +147,7 @@ struct mlx5_dev_spawn_data {\n \tstruct mlx5_switch_info info; /**< Switch information. */\n \tstruct ibv_device *ibv_dev; /**< Associated IB device. */\n \tstruct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */\n+\tstruct rte_pci_device *pci_dev; /**< Backend PCI device. */\n };\n \n static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();\n@@ -225,6 +226,7 @@ struct mlx5_dev_spawn_data {\n \t\tsizeof(sh->ibdev_name));\n \tstrncpy(sh->ibdev_path, sh->ctx->device->ibdev_path,\n \t\tsizeof(sh->ibdev_path));\n+\tsh->pci_dev = spawn->pci_dev;\n \tpthread_mutex_init(&sh->intr_mutex, NULL);\n \t/*\n \t * Setting port_id to max unallowed value means\n@@ -239,6 +241,22 @@ struct mlx5_dev_spawn_data {\n \t\terr = ENOMEM;\n \t\tgoto error;\n \t}\n+\t/*\n+\t * Once the device is added to the list of memory event\n+\t * callback, its global MR cache table cannot be expanded\n+\t * on the fly because of deadlock. If it overflows, lookup\n+\t * should be done by searching MR list linearly, which is slow.\n+\t *\n+\t * At this point the device is not added to the memory\n+\t * event list yet, context is just being created.\n+\t */\n+\terr = mlx5_mr_btree_init(&sh->mr.cache,\n+\t\t\t\t MLX5_MR_BTREE_CACHE_N * 2,\n+\t\t\t\t sh->pci_dev->device.numa_node);\n+\tif (err) {\n+\t\terr = rte_errno;\n+\t\tgoto error;\n+\t}\n \tLIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);\n exit:\n \tpthread_mutex_unlock(&mlx5_ibv_list_mutex);\n@@ -286,6 +304,8 @@ struct mlx5_dev_spawn_data {\n \tassert(rte_eal_process_type() == RTE_PROC_PRIMARY);\n \tif (--sh->refcnt)\n \t\tgoto exit;\n+\t/* Release created Memory Regions. */\n+\tmlx5_mr_release(sh);\n \tLIST_REMOVE(sh, next);\n \t/*\n \t *  Ensure there is no async event handler installed.\n@@ -651,7 +671,10 @@ struct mlx5_dev_spawn_data {\n \t}\n \tmlx5_proc_priv_uninit(dev);\n \tmlx5_mprq_free_mp(dev);\n-\tmlx5_mr_release(dev);\n+\t/* Remove from memory callback device list. */\n+\trte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);\n+\tLIST_REMOVE(priv, mem_event_cb);\n+\trte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);\n \tassert(priv->sh);\n \tmlx5_free_shared_dr(priv);\n \tif (priv->rss_conf.rss_key != NULL)\n@@ -1548,19 +1571,6 @@ struct mlx5_dev_spawn_data {\n \t\tgoto error;\n \t}\n \tpriv->config.flow_prio = err;\n-\t/*\n-\t * Once the device is added to the list of memory event\n-\t * callback, its global MR cache table cannot be expanded\n-\t * on the fly because of deadlock. If it overflows, lookup\n-\t * should be done by searching MR list linearly, which is slow.\n-\t */\n-\terr = mlx5_mr_btree_init(&priv->mr.cache,\n-\t\t\t\t MLX5_MR_BTREE_CACHE_N * 2,\n-\t\t\t\t eth_dev->device->numa_node);\n-\tif (err) {\n-\t\terr = rte_errno;\n-\t\tgoto error;\n-\t}\n \t/* Add device to memory callback list. */\n \trte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);\n \tLIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,\n@@ -1757,6 +1767,7 @@ struct mlx5_dev_spawn_data {\n \t\t\tlist[ns].ibv_port = i;\n \t\t\tlist[ns].ibv_dev = ibv_match[0];\n \t\t\tlist[ns].eth_dev = NULL;\n+\t\t\tlist[ns].pci_dev = pci_dev;\n \t\t\tlist[ns].ifindex = mlx5_nl_ifindex\n \t\t\t\t\t(nl_rdma, list[ns].ibv_dev->name, i);\n \t\t\tif (!list[ns].ifindex) {\n@@ -1823,6 +1834,7 @@ struct mlx5_dev_spawn_data {\n \t\t\tlist[ns].ibv_port = 1;\n \t\t\tlist[ns].ibv_dev = ibv_match[i];\n \t\t\tlist[ns].eth_dev = NULL;\n+\t\t\tlist[ns].pci_dev = pci_dev;\n \t\t\tlist[ns].ifindex = 0;\n \t\t\tif (nl_rdma >= 0)\n \t\t\t\tlist[ns].ifindex = mlx5_nl_ifindex\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 0a6d7f1..2575732 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -275,6 +275,14 @@ struct mlx5_ibv_shared {\n \tchar ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */\n \tchar ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */\n \tstruct ibv_device_attr_ex device_attr; /* Device properties. */\n+\tstruct rte_pci_device *pci_dev; /* Backend PCI device. */\n+\tstruct {\n+\t\tuint32_t dev_gen; /* Generation number to flush local caches. */\n+\t\trte_rwlock_t rwlock; /* MR Lock. */\n+\t\tstruct mlx5_mr_btree cache; /* Global MR cache table. */\n+\t\tstruct mlx5_mr_list mr_list; /* Registered MR list. */\n+\t\tstruct mlx5_mr_list mr_free_list; /* Freed MR list. */\n+\t} mr;\n \t/* Shared DV/DR flow data section. */\n \tpthread_mutex_t dv_mutex; /* DV context mutex. */\n \tuint32_t dv_refcnt; /* DV/DR data reference counter. */\n@@ -347,13 +355,6 @@ struct mlx5_priv {\n \tstruct mlx5_flows ctrl_flows; /* Control flow rules. */\n \tLIST_HEAD(counters, mlx5_flow_counter) flow_counters;\n \t/* Flow counters. */\n-\tstruct {\n-\t\tuint32_t dev_gen; /* Generation number to flush local caches. */\n-\t\trte_rwlock_t rwlock; /* MR Lock. */\n-\t\tstruct mlx5_mr_btree cache; /* Global MR cache table. */\n-\t\tstruct mlx5_mr_list mr_list; /* Registered MR list. */\n-\t\tstruct mlx5_mr_list mr_free_list; /* Freed MR list. */\n-\t} mr;\n \tLIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */\n \tLIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */\n \tLIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */\ndiff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c\nindex e4957a4..a7a63b1 100644\n--- a/drivers/net/mlx5/mlx5_mr.c\n+++ b/drivers/net/mlx5/mlx5_mr.c\n@@ -36,7 +36,7 @@ struct mr_update_mp_data {\n \n /**\n  * Expand B-tree table to a given size. Can't be called with holding\n- * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc().\n+ * memory_hotplug_lock or sh->mr.rwlock due to rte_realloc().\n  *\n  * @param bt\n  *   Pointer to B-tree structure.\n@@ -350,7 +350,7 @@ struct mr_update_mp_data {\n \t\tn = mr_find_next_chunk(mr, &entry, n);\n \t\tif (!entry.end)\n \t\t\tbreak;\n-\t\tif (mr_btree_insert(&priv->mr.cache, &entry) < 0) {\n+\t\tif (mr_btree_insert(&priv->sh->mr.cache, &entry) < 0) {\n \t\t\t/*\n \t\t\t * Overflowed, but the global table cannot be expanded\n \t\t\t * because of deadlock.\n@@ -382,7 +382,7 @@ struct mr_update_mp_data {\n \tstruct mlx5_mr *mr;\n \n \t/* Iterate all the existing MRs. */\n-\tLIST_FOREACH(mr, &priv->mr.mr_list, mr) {\n+\tLIST_FOREACH(mr, &priv->sh->mr.mr_list, mr) {\n \t\tunsigned int n;\n \n \t\tif (mr->ms_n == 0)\n@@ -420,6 +420,7 @@ struct mr_update_mp_data {\n \t      uintptr_t addr)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_ibv_shared *sh = priv->sh;\n \tuint16_t idx;\n \tuint32_t lkey = UINT32_MAX;\n \tstruct mlx5_mr *mr;\n@@ -430,10 +431,10 @@ struct mr_update_mp_data {\n \t * has to be searched by traversing the original MR list instead, which\n \t * is very slow path. Otherwise, the global cache is all inclusive.\n \t */\n-\tif (!unlikely(priv->mr.cache.overflow)) {\n-\t\tlkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);\n+\tif (!unlikely(sh->mr.cache.overflow)) {\n+\t\tlkey = mr_btree_lookup(&sh->mr.cache, &idx, addr);\n \t\tif (lkey != UINT32_MAX)\n-\t\t\t*entry = (*priv->mr.cache.table)[idx];\n+\t\t\t*entry = (*sh->mr.cache.table)[idx];\n \t} else {\n \t\t/* Falling back to the slowest path. */\n \t\tmr = mr_lookup_dev_list(dev, entry, addr);\n@@ -468,13 +469,12 @@ struct mr_update_mp_data {\n /**\n  * Release resources of detached MR having no online entry.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n+ * @param sh\n+ *   Pointer to Ethernet device shared context.\n  */\n static void\n-mlx5_mr_garbage_collect(struct rte_eth_dev *dev)\n+mlx5_mr_garbage_collect(struct mlx5_ibv_shared *sh)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_mr *mr_next;\n \tstruct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);\n \n@@ -484,11 +484,11 @@ struct mr_update_mp_data {\n \t * MR can't be freed with holding the lock because rte_free() could call\n \t * memory free callback function. This will be a deadlock situation.\n \t */\n-\trte_rwlock_write_lock(&priv->mr.rwlock);\n+\trte_rwlock_write_lock(&sh->mr.rwlock);\n \t/* Detach the whole free list and release it after unlocking. */\n-\tfree_list = priv->mr.mr_free_list;\n-\tLIST_INIT(&priv->mr.mr_free_list);\n-\trte_rwlock_write_unlock(&priv->mr.rwlock);\n+\tfree_list = sh->mr.mr_free_list;\n+\tLIST_INIT(&sh->mr.mr_free_list);\n+\trte_rwlock_write_unlock(&sh->mr.rwlock);\n \t/* Release resources. */\n \tmr_next = LIST_FIRST(&free_list);\n \twhile (mr_next != NULL) {\n@@ -548,12 +548,12 @@ struct mr_update_mp_data {\n \t\t      dev->data->port_id, (void *)addr);\n \t\treturn UINT32_MAX;\n \t}\n-\trte_rwlock_read_lock(&priv->mr.rwlock);\n+\trte_rwlock_read_lock(&priv->sh->mr.rwlock);\n \t/* Fill in output data. */\n \tmr_lookup_dev(dev, entry, addr);\n \t/* Lookup can't fail. */\n \tassert(entry->lkey != UINT32_MAX);\n-\trte_rwlock_read_unlock(&priv->mr.rwlock);\n+\trte_rwlock_read_unlock(&priv->sh->mr.rwlock);\n \tDEBUG(\"port %u MR CREATED by primary process for %p:\\n\"\n \t      \"  [0x%\" PRIxPTR \", 0x%\" PRIxPTR \"), lkey=0x%x\",\n \t      dev->data->port_id, (void *)addr,\n@@ -582,6 +582,7 @@ struct mr_update_mp_data {\n \t\t       uintptr_t addr)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_ibv_shared *sh = priv->sh;\n \tstruct mlx5_dev_config *config = &priv->config;\n \tstruct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;\n \tconst struct rte_memseg_list *msl;\n@@ -602,12 +603,12 @@ struct mr_update_mp_data {\n \t\tdev->data->port_id, (void *)addr);\n \t/*\n \t * Release detached MRs if any. This can't be called with holding either\n-\t * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have\n+\t * memory_hotplug_lock or sh->mr.rwlock. MRs on the free list have\n \t * been detached by the memory free event but it couldn't be released\n \t * inside the callback due to deadlock. As a result, releasing resources\n \t * is quite opportunistic.\n \t */\n-\tmlx5_mr_garbage_collect(dev);\n+\tmlx5_mr_garbage_collect(sh);\n \t/*\n \t * If enabled, find out a contiguous virtual address chunk in use, to\n \t * which the given address belongs, in order to register maximum range.\n@@ -710,7 +711,7 @@ struct mr_update_mp_data {\n \t\tgoto alloc_resources;\n \t}\n \tassert(data.msl == data_re.msl);\n-\trte_rwlock_write_lock(&priv->mr.rwlock);\n+\trte_rwlock_write_lock(&sh->mr.rwlock);\n \t/*\n \t * Check the address is really missing. If other thread already created\n \t * one or it is not found due to overflow, abort and return.\n@@ -721,10 +722,10 @@ struct mr_update_mp_data {\n \t\t * low-on-memory. Then, this entry will have to be searched\n \t\t * here again.\n \t\t */\n-\t\tmr_btree_insert(&priv->mr.cache, entry);\n+\t\tmr_btree_insert(&sh->mr.cache, entry);\n \t\tDEBUG(\"port %u found MR for %p on final lookup, abort\",\n \t\t      dev->data->port_id, (void *)addr);\n-\t\trte_rwlock_write_unlock(&priv->mr.rwlock);\n+\t\trte_rwlock_write_unlock(&sh->mr.rwlock);\n \t\trte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);\n \t\t/*\n \t\t * Must be unlocked before calling rte_free() because\n@@ -769,7 +770,7 @@ struct mr_update_mp_data {\n \t * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()\n \t * through mlx5_alloc_verbs_buf().\n \t */\n-\tmr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)data.start, len,\n+\tmr->ibv_mr = mlx5_glue->reg_mr(sh->pd, (void *)data.start, len,\n \t\t\t\t       IBV_ACCESS_LOCAL_WRITE);\n \tif (mr->ibv_mr == NULL) {\n \t\tDEBUG(\"port %u fail to create a verbs MR for address (%p)\",\n@@ -779,7 +780,7 @@ struct mr_update_mp_data {\n \t}\n \tassert((uintptr_t)mr->ibv_mr->addr == data.start);\n \tassert(mr->ibv_mr->length == len);\n-\tLIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);\n+\tLIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);\n \tDEBUG(\"port %u MR CREATED (%p) for %p:\\n\"\n \t      \"  [0x%\" PRIxPTR \", 0x%\" PRIxPTR \"),\"\n \t      \" lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u\",\n@@ -792,11 +793,11 @@ struct mr_update_mp_data {\n \tmr_lookup_dev(dev, entry, addr);\n \t/* Lookup can't fail. */\n \tassert(entry->lkey != UINT32_MAX);\n-\trte_rwlock_write_unlock(&priv->mr.rwlock);\n+\trte_rwlock_write_unlock(&sh->mr.rwlock);\n \trte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);\n \treturn entry->lkey;\n err_mrlock:\n-\trte_rwlock_write_unlock(&priv->mr.rwlock);\n+\trte_rwlock_write_unlock(&sh->mr.rwlock);\n err_memlock:\n \trte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);\n err_nolock:\n@@ -854,14 +855,15 @@ struct mr_update_mp_data {\n mr_rebuild_dev_cache(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_ibv_shared *sh = priv->sh;\n \tstruct mlx5_mr *mr;\n \n \tDRV_LOG(DEBUG, \"port %u rebuild dev cache[]\", dev->data->port_id);\n \t/* Flush cache to rebuild. */\n-\tpriv->mr.cache.len = 1;\n-\tpriv->mr.cache.overflow = 0;\n+\tsh->mr.cache.len = 1;\n+\tsh->mr.cache.overflow = 0;\n \t/* Iterate all the existing MRs. */\n-\tLIST_FOREACH(mr, &priv->mr.mr_list, mr)\n+\tLIST_FOREACH(mr, &sh->mr.mr_list, mr)\n \t\tif (mr_insert_dev_cache(dev, mr) < 0)\n \t\t\treturn;\n }\n@@ -888,6 +890,7 @@ struct mr_update_mp_data {\n mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_ibv_shared *sh = priv->sh;\n \tconst struct rte_memseg_list *msl;\n \tstruct mlx5_mr *mr;\n \tint ms_n;\n@@ -901,7 +904,7 @@ struct mr_update_mp_data {\n \tassert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));\n \tassert(len == RTE_ALIGN(len, msl->page_sz));\n \tms_n = len / msl->page_sz;\n-\trte_rwlock_write_lock(&priv->mr.rwlock);\n+\trte_rwlock_write_lock(&sh->mr.rwlock);\n \t/* Clear bits of freed memsegs from MR. */\n \tfor (i = 0; i < ms_n; ++i) {\n \t\tconst struct rte_memseg *ms;\n@@ -928,7 +931,7 @@ struct mr_update_mp_data {\n \t\trte_bitmap_clear(mr->ms_bmp, pos);\n \t\tif (--mr->ms_n == 0) {\n \t\t\tLIST_REMOVE(mr, mr);\n-\t\t\tLIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);\n+\t\t\tLIST_INSERT_HEAD(&sh->mr.mr_free_list, mr, mr);\n \t\t\tDEBUG(\"port %u remove MR(%p) from list\",\n \t\t\t      dev->data->port_id, (void *)mr);\n \t\t}\n@@ -949,12 +952,12 @@ struct mr_update_mp_data {\n \t\t * generation below) will be guaranteed to be seen by other core\n \t\t * before the core sees the newly allocated memory.\n \t\t */\n-\t\t++priv->mr.dev_gen;\n+\t\t++sh->mr.dev_gen;\n \t\tDEBUG(\"broadcasting local cache flush, gen=%d\",\n-\t\t      priv->mr.dev_gen);\n+\t\t      sh->mr.dev_gen);\n \t\trte_smp_wmb();\n \t}\n-\trte_rwlock_write_unlock(&priv->mr.rwlock);\n+\trte_rwlock_write_unlock(&sh->mr.rwlock);\n }\n \n /**\n@@ -1013,6 +1016,7 @@ struct mr_update_mp_data {\n \t\t   struct mlx5_mr_cache *entry, uintptr_t addr)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_ibv_shared *sh = priv->sh;\n \tstruct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;\n \tuint16_t idx;\n \tuint32_t lkey;\n@@ -1021,12 +1025,12 @@ struct mr_update_mp_data {\n \tif (unlikely(bt->len == bt->size))\n \t\tmr_btree_expand(bt, bt->size << 1);\n \t/* Look up in the global cache. */\n-\trte_rwlock_read_lock(&priv->mr.rwlock);\n-\tlkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);\n+\trte_rwlock_read_lock(&sh->mr.rwlock);\n+\tlkey = mr_btree_lookup(&sh->mr.cache, &idx, addr);\n \tif (lkey != UINT32_MAX) {\n \t\t/* Found. */\n-\t\t*entry = (*priv->mr.cache.table)[idx];\n-\t\trte_rwlock_read_unlock(&priv->mr.rwlock);\n+\t\t*entry = (*sh->mr.cache.table)[idx];\n+\t\trte_rwlock_read_unlock(&sh->mr.rwlock);\n \t\t/*\n \t\t * Update local cache. Even if it fails, return the found entry\n \t\t * to update top-half cache. Next time, this entry will be found\n@@ -1035,7 +1039,7 @@ struct mr_update_mp_data {\n \t\tmr_btree_insert(bt, entry);\n \t\treturn lkey;\n \t}\n-\trte_rwlock_read_unlock(&priv->mr.rwlock);\n+\trte_rwlock_read_unlock(&sh->mr.rwlock);\n \t/* First time to see the address? Create a new MR. */\n \tlkey = mlx5_mr_create(dev, entry, addr);\n \t/*\n@@ -1261,6 +1265,7 @@ struct mr_update_mp_data {\n \tstruct mr_update_mp_data *data = opaque;\n \tstruct rte_eth_dev *dev = data->dev;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_ibv_shared *sh = priv->sh;\n \tstruct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;\n \tstruct mlx5_mr *mr = NULL;\n \tuintptr_t addr = (uintptr_t)memhdr->addr;\n@@ -1270,9 +1275,9 @@ struct mr_update_mp_data {\n \n \tassert(rte_eal_process_type() == RTE_PROC_PRIMARY);\n \t/* If already registered, it should return. */\n-\trte_rwlock_read_lock(&priv->mr.rwlock);\n+\trte_rwlock_read_lock(&sh->mr.rwlock);\n \tlkey = mr_lookup_dev(dev, &entry, addr);\n-\trte_rwlock_read_unlock(&priv->mr.rwlock);\n+\trte_rwlock_read_unlock(&sh->mr.rwlock);\n \tif (lkey != UINT32_MAX)\n \t\treturn;\n \tDRV_LOG(DEBUG, \"port %u register MR for chunk #%d of mempool (%s)\",\n@@ -1286,11 +1291,11 @@ struct mr_update_mp_data {\n \t\tdata->ret = -1;\n \t\treturn;\n \t}\n-\trte_rwlock_write_lock(&priv->mr.rwlock);\n-\tLIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);\n+\trte_rwlock_write_lock(&sh->mr.rwlock);\n+\tLIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);\n \t/* Insert to the global cache table. */\n \tmr_insert_dev_cache(dev, mr);\n-\trte_rwlock_write_unlock(&priv->mr.rwlock);\n+\trte_rwlock_write_unlock(&sh->mr.rwlock);\n \t/* Insert to the local cache table */\n \tmlx5_mr_addr2mr_bh(dev, mr_ctrl, addr);\n }\n@@ -1339,6 +1344,7 @@ struct mr_update_mp_data {\n \tstruct rte_eth_dev *dev;\n \tstruct mlx5_mr *mr;\n \tstruct mlx5_priv *priv;\n+\tstruct mlx5_ibv_shared *sh;\n \n \tdev = pci_dev_to_eth_dev(pdev);\n \tif (!dev) {\n@@ -1355,11 +1361,12 @@ struct mr_update_mp_data {\n \t\trte_errno = EINVAL;\n \t\treturn -1;\n \t}\n-\trte_rwlock_write_lock(&priv->mr.rwlock);\n-\tLIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);\n+\tsh = priv->sh;\n+\trte_rwlock_write_lock(&sh->mr.rwlock);\n+\tLIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);\n \t/* Insert to the global cache table. */\n \tmr_insert_dev_cache(dev, mr);\n-\trte_rwlock_write_unlock(&priv->mr.rwlock);\n+\trte_rwlock_write_unlock(&sh->mr.rwlock);\n \treturn 0;\n }\n \n@@ -1384,6 +1391,7 @@ struct mr_update_mp_data {\n {\n \tstruct rte_eth_dev *dev;\n \tstruct mlx5_priv *priv;\n+\tstruct mlx5_ibv_shared *sh;\n \tstruct mlx5_mr *mr;\n \tstruct mlx5_mr_cache entry;\n \n@@ -1395,10 +1403,11 @@ struct mr_update_mp_data {\n \t\treturn -1;\n \t}\n \tpriv = dev->data->dev_private;\n-\trte_rwlock_read_lock(&priv->mr.rwlock);\n+\tsh = priv->sh;\n+\trte_rwlock_read_lock(&sh->mr.rwlock);\n \tmr = mr_lookup_dev_list(dev, &entry, (uintptr_t)addr);\n \tif (!mr) {\n-\t\trte_rwlock_read_unlock(&priv->mr.rwlock);\n+\t\trte_rwlock_read_unlock(&sh->mr.rwlock);\n \t\tDRV_LOG(WARNING, \"address 0x%\" PRIxPTR \" wasn't registered \"\n \t\t\t\t \"to PCI device %p\", (uintptr_t)addr,\n \t\t\t\t (void *)pdev);\n@@ -1406,7 +1415,7 @@ struct mr_update_mp_data {\n \t\treturn -1;\n \t}\n \tLIST_REMOVE(mr, mr);\n-\tLIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);\n+\tLIST_INSERT_HEAD(&sh->mr.mr_free_list, mr, mr);\n \tDEBUG(\"port %u remove MR(%p) from list\", dev->data->port_id,\n \t      (void *)mr);\n \tmr_rebuild_dev_cache(dev);\n@@ -1419,11 +1428,10 @@ struct mr_update_mp_data {\n \t * generation below) will be guaranteed to be seen by other core\n \t * before the core sees the newly allocated memory.\n \t */\n-\t++priv->mr.dev_gen;\n-\tDEBUG(\"broadcasting local cache flush, gen=%d\",\n-\t\t\tpriv->mr.dev_gen);\n+\t++sh->mr.dev_gen;\n+\tDEBUG(\"broadcasting local cache flush, gen=%d\",\tsh->mr.dev_gen);\n \trte_smp_wmb();\n-\trte_rwlock_read_unlock(&priv->mr.rwlock);\n+\trte_rwlock_read_unlock(&sh->mr.rwlock);\n \treturn 0;\n }\n \n@@ -1544,25 +1552,24 @@ struct mr_update_mp_data {\n /**\n  * Dump all the created MRs and the global cache entries.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n+ * @param sh\n+ *   Pointer to Ethernet device shared context.\n  */\n void\n-mlx5_mr_dump_dev(struct rte_eth_dev *dev __rte_unused)\n+mlx5_mr_dump_dev(struct mlx5_ibv_shared *sh __rte_unused)\n {\n #ifndef NDEBUG\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_mr *mr;\n \tint mr_n = 0;\n \tint chunk_n = 0;\n \n-\trte_rwlock_read_lock(&priv->mr.rwlock);\n+\trte_rwlock_read_lock(&sh->mr.rwlock);\n \t/* Iterate all the existing MRs. */\n-\tLIST_FOREACH(mr, &priv->mr.mr_list, mr) {\n+\tLIST_FOREACH(mr, &sh->mr.mr_list, mr) {\n \t\tunsigned int n;\n \n-\t\tDEBUG(\"port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u\",\n-\t\t      dev->data->port_id, mr_n++,\n+\t\tDEBUG(\"device %s MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u\",\n+\t\t      sh->ibdev_name, mr_n++,\n \t\t      rte_cpu_to_be_32(mr->ibv_mr->lkey),\n \t\t      mr->ms_n, mr->ms_bmp_n);\n \t\tif (mr->ms_n == 0)\n@@ -1577,45 +1584,40 @@ struct mr_update_mp_data {\n \t\t\t      chunk_n++, ret.start, ret.end);\n \t\t}\n \t}\n-\tDEBUG(\"port %u dumping global cache\", dev->data->port_id);\n-\tmlx5_mr_btree_dump(&priv->mr.cache);\n-\trte_rwlock_read_unlock(&priv->mr.rwlock);\n+\tDEBUG(\"device %s dumping global cache\", sh->ibdev_name);\n+\tmlx5_mr_btree_dump(&sh->mr.cache);\n+\trte_rwlock_read_unlock(&sh->mr.rwlock);\n #endif\n }\n \n /**\n- * Release all the created MRs and resources. Remove device from memory callback\n+ * Release all the created MRs and resources for shared device context.\n  * list.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n+ * @param sh\n+ *   Pointer to Ethernet device shared context.\n  */\n void\n-mlx5_mr_release(struct rte_eth_dev *dev)\n+mlx5_mr_release(struct mlx5_ibv_shared *sh)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_mr *mr_next;\n \n-\t/* Remove from memory callback device list. */\n-\trte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);\n-\tLIST_REMOVE(priv, mem_event_cb);\n-\trte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);\n \tif (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)\n-\t\tmlx5_mr_dump_dev(dev);\n-\trte_rwlock_write_lock(&priv->mr.rwlock);\n+\t\tmlx5_mr_dump_dev(sh);\n+\trte_rwlock_write_lock(&sh->mr.rwlock);\n \t/* Detach from MR list and move to free list. */\n-\tmr_next = LIST_FIRST(&priv->mr.mr_list);\n+\tmr_next = LIST_FIRST(&sh->mr.mr_list);\n \twhile (mr_next != NULL) {\n \t\tstruct mlx5_mr *mr = mr_next;\n \n \t\tmr_next = LIST_NEXT(mr, mr);\n \t\tLIST_REMOVE(mr, mr);\n-\t\tLIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);\n+\t\tLIST_INSERT_HEAD(&sh->mr.mr_free_list, mr, mr);\n \t}\n-\tLIST_INIT(&priv->mr.mr_list);\n+\tLIST_INIT(&sh->mr.mr_list);\n \t/* Free global cache. */\n-\tmlx5_mr_btree_free(&priv->mr.cache);\n-\trte_rwlock_write_unlock(&priv->mr.rwlock);\n+\tmlx5_mr_btree_free(&sh->mr.cache);\n+\trte_rwlock_write_unlock(&sh->mr.rwlock);\n \t/* Free all remaining MRs. */\n-\tmlx5_mr_garbage_collect(dev);\n+\tmlx5_mr_garbage_collect(sh);\n }\ndiff --git a/drivers/net/mlx5/mlx5_mr.h b/drivers/net/mlx5/mlx5_mr.h\nindex 786f6a3..89e89b7 100644\n--- a/drivers/net/mlx5/mlx5_mr.h\n+++ b/drivers/net/mlx5/mlx5_mr.h\n@@ -62,6 +62,7 @@ struct mlx5_mr_ctrl {\n \tstruct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */\n } __rte_packed;\n \n+struct mlx5_ibv_shared;\n extern struct mlx5_dev_list  mlx5_mem_event_cb_list;\n extern rte_rwlock_t mlx5_mem_event_rwlock;\n \n@@ -76,11 +77,11 @@ void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,\n \t\t\t  size_t len, void *arg);\n int mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,\n \t\t      struct rte_mempool *mp);\n-void mlx5_mr_release(struct rte_eth_dev *dev);\n+void mlx5_mr_release(struct mlx5_ibv_shared *sh);\n \n /* Debug purpose functions. */\n void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt);\n-void mlx5_mr_dump_dev(struct rte_eth_dev *dev);\n+void mlx5_mr_dump_dev(struct mlx5_ibv_shared *sh);\n \n /**\n  * Look up LKey from given lookup table by linear search. Firstly look up the\ndiff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c\nindex dbe074f..4d55fd4 100644\n--- a/drivers/net/mlx5/mlx5_txq.c\n+++ b/drivers/net/mlx5/mlx5_txq.c\n@@ -814,7 +814,7 @@ struct mlx5_txq_ctrl *\n \t\tgoto error;\n \t}\n \t/* Save pointer of global generation number to check memory event. */\n-\ttmpl->txq.mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen;\n+\ttmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->mr.dev_gen;\n \tassert(desc > MLX5_TX_COMP_THRESH);\n \ttmpl->txq.offloads = conf->offloads |\n \t\t\t     dev->data->dev_conf.txmode.offloads;\n",
    "prefixes": [
        "v3",
        "1/2"
    ]
}