From: Michael Baum <michaelba@nvidia.com>
Due to kernel driver / FW issues in direct MKEY creation using the DevX
API, this patch replaces the counter MR creation to use wrapped mkey
API.
Fixes: 5382d28c2110 ("net/mlx5: accelerate DV flow counter transactions")
Cc: stable@dpdk.org
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Signed-off-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/mlx5.c | 8 +-------
drivers/net/mlx5/mlx5.h | 5 +----
drivers/net/mlx5/mlx5_flow.c | 25 ++++++-------------------
3 files changed, 8 insertions(+), 30 deletions(-)
@@ -522,7 +522,6 @@ mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh)
static void
mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
{
- struct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr;
int i;
memset(&sh->cmng, 0, sizeof(sh->cmng));
@@ -535,10 +534,6 @@ mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
TAILQ_INIT(&sh->cmng.counters[i]);
rte_spinlock_init(&sh->cmng.csl[i]);
}
- if (sh->devx && !haswell_broadwell_cpu) {
- sh->cmng.relaxed_ordering_write = attr->relaxed_ordering_write;
- sh->cmng.relaxed_ordering_read = attr->relaxed_ordering_read;
- }
}
/**
@@ -553,8 +548,7 @@ mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
LIST_REMOVE(mng, next);
- claim_zero(mlx5_devx_cmd_destroy(mng->dm));
- claim_zero(mlx5_os_umem_dereg(mng->umem));
+ mlx5_os_wrapped_mkey_destroy(&mng->wm);
mlx5_free(mem);
}
@@ -462,8 +462,7 @@ struct mlx5_flow_counter_pool {
struct mlx5_counter_stats_mem_mng {
LIST_ENTRY(mlx5_counter_stats_mem_mng) next;
struct mlx5_counter_stats_raw *raws;
- struct mlx5_devx_obj *dm;
- void *umem;
+ struct mlx5_pmd_wrapped_mr wm;
};
/* Raw memory structure for the counter statistics values of a pool. */
@@ -494,8 +493,6 @@ struct mlx5_flow_counter_mng {
uint8_t pending_queries;
uint16_t pool_index;
uint8_t query_thread_on;
- bool relaxed_ordering_read;
- bool relaxed_ordering_write;
bool counter_fallback; /* Use counter fallback management. */
LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
@@ -7775,7 +7775,6 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
static int
mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
{
- struct mlx5_devx_mkey_attr mkey_attr;
struct mlx5_counter_stats_mem_mng *mem_mng;
volatile struct flow_counter_stats *raw_data;
int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
@@ -7785,6 +7784,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
sizeof(struct mlx5_counter_stats_mem_mng);
size_t pgsize = rte_mem_page_size();
uint8_t *mem;
+ int ret;
int i;
if (pgsize == (size_t)-1) {
@@ -7799,23 +7799,10 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
}
mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
- mem_mng->umem = mlx5_os_umem_reg(sh->cdev->ctx, mem, size,
- IBV_ACCESS_LOCAL_WRITE);
- if (!mem_mng->umem) {
- rte_errno = errno;
- mlx5_free(mem);
- return -rte_errno;
- }
- memset(&mkey_attr, 0, sizeof(mkey_attr));
- mkey_attr.addr = (uintptr_t)mem;
- mkey_attr.size = size;
- mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
- mkey_attr.pd = sh->cdev->pdn;
- mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
- mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
- mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->cdev->ctx, &mkey_attr);
- if (!mem_mng->dm) {
- mlx5_os_umem_dereg(mem_mng->umem);
+ ret = mlx5_os_wrapped_mkey_create(sh->cdev->ctx, sh->cdev->pd,
+ sh->cdev->pdn, mem, size,
+ &mem_mng->wm);
+ if (ret) {
rte_errno = errno;
mlx5_free(mem);
return -rte_errno;
@@ -7934,7 +7921,7 @@ mlx5_flow_query_alarm(void *arg)
ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
MLX5_COUNTERS_PER_POOL,
NULL, NULL,
- pool->raw_hw->mem_mng->dm->id,
+ pool->raw_hw->mem_mng->wm.lkey,
(void *)(uintptr_t)
pool->raw_hw->data,
sh->devx_comp,