[2/2] net/mlx5: workaround counter memory region creation
Checks
Commit Message
Due to kernel issue in direct MKEY creation using the DevX API for
physical memory, this patch replaces the counter MR creation to use
Verbs API.
Fixes: 3aa279157fa0 ("net/mlx5: synchronize flow counter pool creation")
Cc: stable@dpdk.org
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 10 ----------
drivers/net/mlx5/mlx5.c | 11 +++++++----
drivers/net/mlx5/mlx5.h | 5 +----
drivers/net/mlx5/mlx5_flow.c | 27 +++++----------------------
drivers/net/mlx5/windows/mlx5_os.c | 9 ---------
5 files changed, 13 insertions(+), 49 deletions(-)
@@ -1163,16 +1163,6 @@
err = -err;
goto error;
}
- /* Check relax ordering support. */
- if (!haswell_broadwell_cpu) {
- sh->cmng.relaxed_ordering_write =
- config->hca_attr.relaxed_ordering_write;
- sh->cmng.relaxed_ordering_read =
- config->hca_attr.relaxed_ordering_read;
- } else {
- sh->cmng.relaxed_ordering_read = 0;
- sh->cmng.relaxed_ordering_write = 0;
- }
sh->rq_ts_format = config->hca_attr.rq_ts_format;
sh->sq_ts_format = config->hca_attr.sq_ts_format;
sh->qp_ts_format = config->hca_attr.qp_ts_format;
@@ -469,17 +469,20 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
/**
* Destroy all the resources allocated for a counter memory management.
*
+ * @param[in] sh
+ * Pointer to mlx5_dev_ctx_shared object to free.
* @param[in] mng
* Pointer to the memory management structure.
*/
static void
-mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
+mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_counter_stats_mem_mng *mng)
{
uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
LIST_REMOVE(mng, next);
- claim_zero(mlx5_devx_cmd_destroy(mng->dm));
- claim_zero(mlx5_os_umem_dereg(mng->umem));
+ sh->share_cache.dereg_mr_cb(&mng->dm);
+ memset(&mng->dm, 0, sizeof(mng->dm));
mlx5_free(mem);
}
@@ -533,7 +536,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
}
mng = LIST_FIRST(&sh->cmng.mem_mngs);
while (mng) {
- mlx5_flow_destroy_counter_stat_mem_mng(mng);
+ mlx5_flow_destroy_counter_stat_mem_mng(sh, mng);
mng = LIST_FIRST(&sh->cmng.mem_mngs);
}
memset(&sh->cmng, 0, sizeof(sh->cmng));
@@ -422,8 +422,7 @@ struct mlx5_flow_counter_pool {
struct mlx5_counter_stats_mem_mng {
LIST_ENTRY(mlx5_counter_stats_mem_mng) next;
struct mlx5_counter_stats_raw *raws;
- struct mlx5_devx_obj *dm;
- void *umem;
+ struct mlx5_pmd_mr dm;
};
/* Raw memory structure for the counter statistics values of a pool. */
@@ -454,8 +453,6 @@ struct mlx5_flow_counter_mng {
uint8_t pending_queries;
uint16_t pool_index;
uint8_t query_thread_on;
- bool relaxed_ordering_read;
- bool relaxed_ordering_write;
bool counter_fallback; /* Use counter fallback management. */
LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
@@ -6717,7 +6717,6 @@ struct mlx5_meter_domains_infos *
static int
mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
{
- struct mlx5_devx_mkey_attr mkey_attr;
struct mlx5_counter_stats_mem_mng *mem_mng;
volatile struct flow_counter_stats *raw_data;
int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
@@ -6727,6 +6726,7 @@ struct mlx5_meter_domains_infos *
sizeof(struct mlx5_counter_stats_mem_mng);
size_t pgsize = rte_mem_page_size();
uint8_t *mem;
+ int ret;
int i;
if (pgsize == (size_t)-1) {
@@ -6741,26 +6741,9 @@ struct mlx5_meter_domains_infos *
}
mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
- mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
- IBV_ACCESS_LOCAL_WRITE);
- if (!mem_mng->umem) {
- rte_errno = errno;
- mlx5_free(mem);
- return -rte_errno;
- }
- mkey_attr.addr = (uintptr_t)mem;
- mkey_attr.size = size;
- mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
- mkey_attr.pd = sh->pdn;
- mkey_attr.log_entity_size = 0;
- mkey_attr.pg_access = 0;
- mkey_attr.klm_array = NULL;
- mkey_attr.klm_num = 0;
- mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
- mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
- mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
- if (!mem_mng->dm) {
- mlx5_os_umem_dereg(mem_mng->umem);
+ ret = sh->share_cache.reg_mr_cb(sh->pd, (void *)mem, size,
+ &mem_mng->dm);
+ if (ret) {
rte_errno = errno;
mlx5_free(mem);
return -rte_errno;
@@ -6879,7 +6862,7 @@ struct mlx5_meter_domains_infos *
ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
MLX5_COUNTERS_PER_POOL,
NULL, NULL,
- pool->raw_hw->mem_mng->dm->id,
+ pool->raw_hw->mem_mng->dm.lkey,
(void *)(uintptr_t)
pool->raw_hw->data,
sh->devx_comp,
@@ -466,15 +466,6 @@
err = -err;
goto error;
}
- /* Check relax ordering support. */
- sh->cmng.relaxed_ordering_read = 0;
- sh->cmng.relaxed_ordering_write = 0;
- if (!haswell_broadwell_cpu) {
- sh->cmng.relaxed_ordering_write =
- config->hca_attr.relaxed_ordering_write;
- sh->cmng.relaxed_ordering_read =
- config->hca_attr.relaxed_ordering_read;
- }
}
if (config->devx) {
uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];