[4/4] net/mlx5: fix imissed statistics
Checks
Commit Message
The imissed port statistic counts packets that were dropped by the
device Rx queues.
In mlx5, the imissed counter summarizes 2 counters:
- packets dropped by the SW queue handling counted by SW.
- packets dropped by the HW queues due to "out of buffer" events
detected when no SW buffer is available for the incoming
packets.
There is HW counter object that should be created per device, and all
the Rx queues should be assigned to this counter in configuration time.
This part was missed when the Rx queues were created by DevX what
remained the "out of buffer" counter clean forever in this case.
Add 2 options to assign the DevX Rx queues to queue counter:
- Create queue counter per device by DevX and assign all the
queues to it.
- Query the kernel counter and assign all the queues to it.
Use the first option by default and if it is failed, fallback to the
second option.
Fixes: e79c9be91515 ("net/mlx5: support Rx hairpin queues")
Fixes: dc9ceff73c99 ("net/mlx5: create advanced RxQ via DevX")
Cc: stable@dpdk.org
Signed-off-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 52 ++++++++++++++++++++++++++++++++++++++++
drivers/net/mlx5/mlx5.c | 4 ++++
drivers/net/mlx5/mlx5.h | 2 ++
drivers/net/mlx5/mlx5_devx.c | 2 ++
4 files changed, 60 insertions(+)
Comments
> -----Original Message-----
> From: Matan Azrad <matan@nvidia.com>
> Sent: Thursday, February 25, 2021 12:45
> To: dev@dpdk.org
> Cc: Slava Ovsiienko <viacheslavo@nvidia.com>; stable@dpdk.org
> Subject: [PATCH 4/4] net/mlx5: fix imissed statistics
>
> The imissed port statistic counts packets that were dropped by the device Rx
> queues.
>
> In mlx5, the imissed counter summarizes 2 counters:
> - packets dropped by the SW queue handling counted by SW.
> - packets dropped by the HW queues due to "out of buffer" events
> detected when no SW buffer is available for the incoming
> packets.
>
> There is HW counter object that should be created per device, and all the Rx
> queues should be assigned to this counter in configuration time.
>
> This part was missed when the Rx queues were created by DevX what
> remained the "out of buffer" counter clean forever in this case.
>
> Add 2 options to assign the DevX Rx queues to queue counter:
> - Create queue counter per device by DevX and assign all the
> queues to it.
> - Query the kernel counter and assign all the queues to it.
>
> Use the first option by default and if it is failed, fallback to the second option.
>
> Fixes: e79c9be91515 ("net/mlx5: support Rx hairpin queues")
> Fixes: dc9ceff73c99 ("net/mlx5: create advanced RxQ via DevX")
> Cc: stable@dpdk.org
>
> Signed-off-by: Matan Azrad <matan@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
@@ -645,6 +645,53 @@
#endif
}
+static void
+mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ void *ctx = priv->sh->ctx;
+
+ priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
+ if (!priv->q_counters) {
+ struct ibv_cq *cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
+ struct ibv_wq *wq;
+
+ DRV_LOG(DEBUG, "Port %d queue counter object cannot be created "
+ "by DevX - fall-back to use the kernel driver global "
+ "queue counter.", dev->data->port_id);
+ /* Create WQ by kernel and query its queue counter ID. */
+ if (cq) {
+ wq = mlx5_glue->create_wq(ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = 1,
+ .max_sge = 1,
+ .pd = priv->sh->pd,
+ .cq = cq,
+ });
+ if (wq) {
+ /* Counter is assigned only on RDY state. */
+ int ret = mlx5_glue->modify_wq(wq,
+ &(struct ibv_wq_attr){
+ .attr_mask = IBV_WQ_ATTR_STATE,
+ .wq_state = IBV_WQS_RDY,
+ });
+
+ if (ret == 0)
+ mlx5_devx_cmd_wq_query(wq,
+ &priv->counter_set_id);
+ claim_zero(mlx5_glue->destroy_wq(wq));
+ }
+ claim_zero(mlx5_glue->destroy_cq(cq));
+ }
+ } else {
+ priv->counter_set_id = priv->q_counters->id;
+ }
+ if (priv->counter_set_id == 0)
+ DRV_LOG(INFO, "Part of the port %d statistics will not be "
+ "available.", dev->data->port_id);
+}
+
/**
* Spawn an Ethernet device from Verbs information.
*
@@ -1498,6 +1545,7 @@
/* Use specific wrappers for Tx object. */
priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new;
priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release;
+ mlx5_queue_counter_id_prepare(eth_dev);
} else {
priv->obj_ops = ibv_obj_ops;
@@ -2433,6 +2481,10 @@
int fd;
if (priv->sh) {
+ if (priv->q_counters != NULL &&
+ strcmp(ctr_name, "out_of_buffer") == 0)
+ return mlx5_devx_cmd_queue_counter_query(priv->sh->ctx,
+ 0, (uint32_t *)stat);
MKSTR(path, "%s/ports/%d/hw_counters/%s",
priv->sh->ibdev_path,
priv->dev_port,
@@ -1345,6 +1345,10 @@ struct mlx5_dev_ctx_shared *
priv->txqs = NULL;
}
mlx5_proc_priv_uninit(dev);
+ if (priv->q_counters) {
+ mlx5_devx_cmd_destroy(priv->q_counters);
+ priv->q_counters = NULL;
+ }
if (priv->drop_queue.hrxq)
mlx5_drop_action_destroy(dev);
if (priv->mreg_cp_tbl)
@@ -984,6 +984,8 @@ struct mlx5_priv {
LIST_HEAD(fdir, mlx5_fdir_flow) fdir_flows; /* fdir flows. */
rte_spinlock_t shared_act_sl; /* Shared actions spinlock. */
uint32_t rss_shared_actions; /* RSS shared actions. */
+ struct mlx5_devx_obj *q_counters; /* DevX queue counter object. */
+ uint32_t counter_set_id; /* Queue counter ID to set in DevX objects. */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
@@ -275,6 +275,7 @@
MLX5_WQ_END_PAD_MODE_ALIGN :
MLX5_WQ_END_PAD_MODE_NONE;
rq_attr.wq_attr.pd = priv->sh->pdn;
+ rq_attr.counter_set_id = priv->counter_set_id;
/* Create RQ using DevX API. */
return mlx5_devx_rq_create(priv->sh->ctx, &rxq_ctrl->obj->rq_obj,
wqe_size, log_desc_n, &rq_attr,
@@ -438,6 +439,7 @@
attr.wq_attr.log_hairpin_num_packets =
attr.wq_attr.log_hairpin_data_sz -
MLX5_HAIRPIN_QUEUE_STRIDE;
+ attr.counter_set_id = priv->counter_set_id;
tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
rxq_ctrl->socket);
if (!tmpl->rq) {