From patchwork Thu Oct 29 21:57:54 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 82840 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 94F61A04B5; Thu, 29 Oct 2020 22:58:27 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2C2B5CAAE; Thu, 29 Oct 2020 22:58:13 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 58181CAAD for ; Thu, 29 Oct 2020 22:58:11 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@nvidia.com) with SMTP; 29 Oct 2020 23:58:04 +0200 Received: from nvidia.com (pegasus25.mtr.labs.mlnx [10.210.16.10]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09TLw4To022832; Thu, 29 Oct 2020 23:58:04 +0200 From: Matan Azrad To: Viacheslav Ovsiienko Cc: dev@dpdk.org, Dekel Peled Date: Thu, 29 Oct 2020 21:57:54 +0000 Message-Id: <1604008681-414157-2-git-send-email-matan@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1604008681-414157-1-git-send-email-matan@nvidia.com> References: <1604008681-414157-1-git-send-email-matan@nvidia.com> Subject: [dpdk-dev] [PATCH 1/8] common/mlx5: add DevX API to create ASO flow hit object X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Dekel Peled Add DevX API to create ASO flow hit object. Signed-off-by: Dekel Peled --- drivers/common/mlx5/mlx5_devx_cmds.c | 45 ++++++++++++++++++++++++++++++++++++ drivers/common/mlx5/mlx5_devx_cmds.h | 4 ++++ drivers/common/mlx5/mlx5_prm.h | 14 +++++++++++ drivers/common/mlx5/version.map | 1 + 4 files changed, 64 insertions(+) diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c index 8aee12d..4b33473 100644 --- a/drivers/common/mlx5/mlx5_devx_cmds.c +++ b/drivers/common/mlx5/mlx5_devx_cmds.c @@ -1991,3 +1991,48 @@ struct mlx5_devx_obj * invalid_buffer); return ret; } + +/** + * Create general object of type FLOW_HIT_ASO using DevX API. + * + * @param[in] ctx + * Context returned from mlx5 open_device() glue function. + * @param [in] pd + * PD value to associate the FLOW_HIT_ASO object with. + * + * @return + * The DevX object created, NULL otherwise and rte_errno is set. + */ +struct mlx5_devx_obj * +mlx5_devx_cmd_create_flow_hit_aso_obj(void *ctx, uint32_t pd) +{ + uint32_t in[MLX5_ST_SZ_DW(create_flow_hit_aso_in)] = {0}; + uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + struct mlx5_devx_obj *flow_hit_aso_obj = NULL; + void *ptr = NULL; + + flow_hit_aso_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*flow_hit_aso_obj), + 0, SOCKET_ID_ANY); + if (!flow_hit_aso_obj) { + DRV_LOG(ERR, "Failed to allocate FLOW_HIT_ASO object data"); + rte_errno = ENOMEM; + return NULL; + } + ptr = MLX5_ADDR_OF(create_flow_hit_aso_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode, + MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type, + MLX5_GENERAL_OBJ_TYPE_FLOW_HIT_ASO); + ptr = MLX5_ADDR_OF(create_flow_hit_aso_in, in, flow_hit_aso); + MLX5_SET(flow_hit_aso, ptr, access_pd, pd); + flow_hit_aso_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), + out, sizeof(out)); + if (!flow_hit_aso_obj->obj) { + rte_errno = errno; + DRV_LOG(ERR, "Failed to create FLOW_HIT_ASO obj using DevX."); + mlx5_free(flow_hit_aso_obj); + return NULL; + } + flow_hit_aso_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + return flow_hit_aso_obj; +} diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h index abbea67..962eba7 100644 --- a/drivers/common/mlx5/mlx5_devx_cmds.h +++ b/drivers/common/mlx5/mlx5_devx_cmds.h @@ -483,4 +483,8 @@ int mlx5_devx_cmd_register_read(void *ctx, uint16_t reg_id, int mlx5_devx_cmd_query_virtio_q_counters(struct mlx5_devx_obj *couners_obj, struct mlx5_devx_virtio_q_couners_attr *attr); +__rte_internal +struct mlx5_devx_obj *mlx5_devx_cmd_create_flow_hit_aso_obj(void *ctx, + uint32_t pd); + #endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */ diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h index d342263..f7f9926 100644 --- a/drivers/common/mlx5/mlx5_prm.h +++ b/drivers/common/mlx5/mlx5_prm.h @@ -2211,6 +2211,7 @@ enum { MLX5_GENERAL_OBJ_TYPE_VIRTQ = 0x000d, MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c, MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH = 0x0022, + MLX5_GENERAL_OBJ_TYPE_FLOW_HIT_ASO = 0x0025, }; struct mlx5_ifc_general_obj_in_cmd_hdr_bits { @@ -2329,6 +2330,19 @@ struct mlx5_ifc_query_virtq_out_bits { struct mlx5_ifc_virtio_net_q_bits virtq; }; +struct mlx5_ifc_flow_hit_aso_bits { + u8 modify_field_select[0x40]; + u8 reserved_at_40[0x48]; + u8 access_pd[0x18]; + u8 reserved_at_a0[0x160]; + u8 flag[0x200]; +}; + +struct mlx5_ifc_create_flow_hit_aso_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_flow_hit_aso_bits flow_hit_aso; +}; + enum { MLX5_QP_ST_RC = 0x0, }; diff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map index 884001c..ec8d96c 100644 --- a/drivers/common/mlx5/version.map +++ b/drivers/common/mlx5/version.map @@ -21,6 +21,7 @@ INTERNAL { mlx5_devx_cmd_create_tis; mlx5_devx_cmd_create_virtio_q_counters; mlx5_devx_cmd_create_virtq; + mlx5_devx_cmd_create_flow_hit_aso_obj; mlx5_devx_cmd_destroy; mlx5_devx_cmd_flow_counter_alloc; mlx5_devx_cmd_flow_counter_query; From patchwork Thu Oct 29 21:57:55 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 82841 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 86664A04B5; Thu, 29 Oct 2020 22:58:42 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id ACE83CAC7; Thu, 29 Oct 2020 22:58:14 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 6D193CAAE for ; Thu, 29 Oct 2020 22:58:11 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@nvidia.com) with SMTP; 29 Oct 2020 23:58:04 +0200 Received: from nvidia.com (pegasus25.mtr.labs.mlnx [10.210.16.10]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09TLw4Tp022832; Thu, 29 Oct 2020 23:58:04 +0200 From: Matan Azrad To: Viacheslav Ovsiienko Cc: dev@dpdk.org, Dekel Peled Date: Thu, 29 Oct 2020 21:57:55 +0000 Message-Id: <1604008681-414157-3-git-send-email-matan@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1604008681-414157-1-git-send-email-matan@nvidia.com> References: <1604008681-414157-1-git-send-email-matan@nvidia.com> Subject: [dpdk-dev] [PATCH 2/8] common/mlx5: use general object type for cap index X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Dekel Peled PRM defines the general object types using positive numbers. The same values are used as index for the relevant bit in HCA capabilities general_obj_types bit mask. Signed-off-by: Dekel Peled --- drivers/common/mlx5/mlx5_prm.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h index f7f9926..2023638 100644 --- a/drivers/common/mlx5/mlx5_prm.h +++ b/drivers/common/mlx5/mlx5_prm.h @@ -1041,9 +1041,12 @@ enum { MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION = 0x13 << 1, }; -#define MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q (1ULL << 0xd) -#define MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS (1ULL << 0x1c) -#define MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE (1ULL << 0x22) +#define MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q \ + (1ULL << MLX5_GENERAL_OBJ_TYPE_VIRTQ) +#define MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS \ + (1ULL << MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS) +#define MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE \ + (1ULL << MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH) enum { MLX5_HCA_CAP_OPMOD_GET_MAX = 0, From patchwork Thu Oct 29 21:57:56 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 82843 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9FACDA04B5; Thu, 29 Oct 2020 22:59:18 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C26A5CAF1; Thu, 29 Oct 2020 22:58:17 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 858CDCAB5 for ; Thu, 29 Oct 2020 22:58:11 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@nvidia.com) with SMTP; 29 Oct 2020 23:58:04 +0200 Received: from nvidia.com (pegasus25.mtr.labs.mlnx [10.210.16.10]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09TLw4Tq022832; Thu, 29 Oct 2020 23:58:04 +0200 From: Matan Azrad To: Viacheslav Ovsiienko Cc: dev@dpdk.org, Dekel Peled Date: Thu, 29 Oct 2020 21:57:56 +0000 Message-Id: <1604008681-414157-4-git-send-email-matan@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1604008681-414157-1-git-send-email-matan@nvidia.com> References: <1604008681-414157-1-git-send-email-matan@nvidia.com> Subject: [dpdk-dev] [PATCH 3/8] common/mlx5: add read ASO flow hit HCA capability X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Dekel Peled Read and store the device capability of FLOW_HIT_ASO general object, using the DevX API. Signed-off-by: Dekel Peled --- drivers/common/mlx5/mlx5_devx_cmds.c | 3 +++ drivers/common/mlx5/mlx5_devx_cmds.h | 1 + drivers/common/mlx5/mlx5_prm.h | 2 ++ 3 files changed, 6 insertions(+) diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c index 4b33473..b82b0e9 100644 --- a/drivers/common/mlx5/mlx5_devx_cmds.c +++ b/drivers/common/mlx5/mlx5_devx_cmds.c @@ -718,6 +718,9 @@ struct mlx5_devx_obj * attr->regex = MLX5_GET(cmd_hca_cap, hcattr, regexp); attr->regexp_num_of_engines = MLX5_GET(cmd_hca_cap, hcattr, regexp_num_of_engines); + attr->flow_hit_aso = !!(MLX5_GET64(cmd_hca_cap, hcattr, + general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_HIT_ASO); if (attr->qos.sup) { MLX5_SET(query_hca_cap_in, in, op_mod, MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP | diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h index 962eba7..acdee8f 100644 --- a/drivers/common/mlx5/mlx5_devx_cmds.h +++ b/drivers/common/mlx5/mlx5_devx_cmds.h @@ -101,6 +101,7 @@ struct mlx5_hca_attr { uint32_t log_max_static_sq_wq:5; /* Static WQE size SQ. */ uint32_t dev_freq_khz; /* Timestamp counter frequency, kHz. */ uint32_t scatter_fcs_w_decap_disable:1; + uint32_t flow_hit_aso:1; /* General obj type FLOW_HIT_ASO supported. */ uint32_t regex:1; uint32_t regexp_num_of_engines; uint32_t log_max_ft_sampler_num:8; diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h index 2023638..9514aba 100644 --- a/drivers/common/mlx5/mlx5_prm.h +++ b/drivers/common/mlx5/mlx5_prm.h @@ -1047,6 +1047,8 @@ enum { (1ULL << MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS) #define MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE \ (1ULL << MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH) +#define MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_HIT_ASO \ + (1ULL << MLX5_GENERAL_OBJ_TYPE_FLOW_HIT_ASO) enum { MLX5_HCA_CAP_OPMOD_GET_MAX = 0, From patchwork Thu Oct 29 21:57:57 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 82842 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 62B93A04B5; Thu, 29 Oct 2020 22:59:00 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 46885CAD4; Thu, 29 Oct 2020 22:58:16 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 79582CAAF for ; Thu, 29 Oct 2020 22:58:11 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@nvidia.com) with SMTP; 29 Oct 2020 23:58:04 +0200 Received: from nvidia.com (pegasus25.mtr.labs.mlnx [10.210.16.10]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09TLw4Tr022832; Thu, 29 Oct 2020 23:58:04 +0200 From: Matan Azrad To: Viacheslav Ovsiienko Cc: dev@dpdk.org, Dekel Peled Date: Thu, 29 Oct 2020 21:57:57 +0000 Message-Id: <1604008681-414157-5-git-send-email-matan@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1604008681-414157-1-git-send-email-matan@nvidia.com> References: <1604008681-414157-1-git-send-email-matan@nvidia.com> Subject: [dpdk-dev] [PATCH 4/8] common/mlx5: add glue func create flow hit action X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Dekel Peled Add glue function to create the flow hit action using DV API, if rdma-core support exists. Signed-off-by: Dekel Peled --- drivers/common/mlx5/linux/meson.build | 2 ++ drivers/common/mlx5/linux/mlx5_glue.c | 16 ++++++++++++++++ drivers/common/mlx5/linux/mlx5_glue.h | 3 +++ 3 files changed, 21 insertions(+) diff --git a/drivers/common/mlx5/linux/meson.build b/drivers/common/mlx5/linux/meson.build index 9ef8e18..7c552a3 100644 --- a/drivers/common/mlx5/linux/meson.build +++ b/drivers/common/mlx5/linux/meson.build @@ -179,6 +179,8 @@ has_sym_args = [ [ 'HAVE_MLX5_DR_CREATE_ACTION_DEST_ARRAY', 'infiniband/mlx5dv.h', 'mlx5dv_dr_action_create_dest_array'], [ 'HAVE_DEVLINK', 'linux/devlink.h', 'DEVLINK_GENL_NAME' ], + [ 'HAVE_MLX5DV_DR_ACTION_FLOW_HIT', 'infiniband/mlx5dv.h', + 'mlx5dv_dr_action_create_flow_hit'], ] config = configuration_data() foreach arg:has_sym_args diff --git a/drivers/common/mlx5/linux/mlx5_glue.c b/drivers/common/mlx5/linux/mlx5_glue.c index 4a76902..02007f6 100644 --- a/drivers/common/mlx5/linux/mlx5_glue.c +++ b/drivers/common/mlx5/linux/mlx5_glue.c @@ -1283,6 +1283,21 @@ #endif } +static void * +mlx5_glue_dr_action_create_flow_hit(struct mlx5dv_devx_obj *devx_obj, + uint32_t offset, uint8_t reg_c_index) +{ +#ifdef HAVE_MLX5DV_DR_ACTION_FLOW_HIT + return mlx5dv_dr_action_create_flow_hit(devx_obj, offset, reg_c_index); +#else + (void)(devx_obj); + (void)(offset); + (void)(reg_c_index); + errno = ENOTSUP; + return NULL; +#endif +} + __rte_cache_aligned const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue) { .version = MLX5_GLUE_VERSION, @@ -1402,4 +1417,5 @@ .dv_free_var = mlx5_glue_dv_free_var, .dv_alloc_pp = mlx5_glue_dv_alloc_pp, .dv_free_pp = mlx5_glue_dv_free_pp, + .dr_action_create_flow_hit = mlx5_glue_dr_action_create_flow_hit, }; diff --git a/drivers/common/mlx5/linux/mlx5_glue.h b/drivers/common/mlx5/linux/mlx5_glue.h index a5e7fb3..1a50e01 100644 --- a/drivers/common/mlx5/linux/mlx5_glue.h +++ b/drivers/common/mlx5/linux/mlx5_glue.h @@ -345,6 +345,9 @@ struct mlx5_glue { (void *domain, size_t num_dest, struct mlx5dv_dr_action_dest_attr *dests[]); + void *(*dr_action_create_flow_hit)(struct mlx5dv_devx_obj *devx_obj, + uint32_t offset, + uint8_t reg_c_index); }; extern const struct mlx5_glue *mlx5_glue; From patchwork Thu Oct 29 21:57:58 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 82844 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D0D70A04B5; Thu, 29 Oct 2020 22:59:36 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 3D29FCB1A; Thu, 29 Oct 2020 22:58:19 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 8B2FDCAB6 for ; Thu, 29 Oct 2020 22:58:11 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@nvidia.com) with SMTP; 29 Oct 2020 23:58:05 +0200 Received: from nvidia.com (pegasus25.mtr.labs.mlnx [10.210.16.10]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09TLw4Ts022832; Thu, 29 Oct 2020 23:58:04 +0200 From: Matan Azrad To: Viacheslav Ovsiienko Cc: dev@dpdk.org, Dekel Peled Date: Thu, 29 Oct 2020 21:57:58 +0000 Message-Id: <1604008681-414157-6-git-send-email-matan@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1604008681-414157-1-git-send-email-matan@nvidia.com> References: <1604008681-414157-1-git-send-email-matan@nvidia.com> Subject: [dpdk-dev] [PATCH 5/8] common/mlx5: add definitions for ASO flow hit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Dekel Peled This patch adds different PRM definitions, related to ASO flow hit feature, in MLX5 PMD code. Signed-off-by: Dekel Peled --- drivers/common/mlx5/linux/meson.build | 2 ++ drivers/common/mlx5/mlx5_prm.h | 66 ++++++++++++++++++++++++++++++++++- 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/drivers/common/mlx5/linux/meson.build b/drivers/common/mlx5/linux/meson.build index 7c552a3..a738cd2 100644 --- a/drivers/common/mlx5/linux/meson.build +++ b/drivers/common/mlx5/linux/meson.build @@ -126,6 +126,8 @@ has_sym_args = [ 'MLX5_OPCODE_SEND_EN' ], [ 'HAVE_MLX5_OPCODE_WAIT', 'infiniband/mlx5dv.h', 'MLX5_OPCODE_WAIT' ], + [ 'HAVE_MLX5_OPCODE_ACCESS_ASO', 'infiniband/mlx5dv.h', + 'MLX5_OPCODE_ACCESS_ASO' ], [ 'HAVE_SUPPORTED_40000baseKR4_Full', 'linux/ethtool.h', 'SUPPORTED_40000baseKR4_Full' ], [ 'HAVE_SUPPORTED_40000baseCR4_Full', 'linux/ethtool.h', diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h index 9514aba..cd50d13 100644 --- a/drivers/common/mlx5/mlx5_prm.h +++ b/drivers/common/mlx5/mlx5_prm.h @@ -120,7 +120,7 @@ MLX5_WQE_DSEG_SIZE + \ MLX5_ESEG_MIN_INLINE_SIZE) -/* Missed in mlv5dv.h, should define here. */ +/* Missed in mlx5dv.h, should define here. */ #ifndef HAVE_MLX5_OPCODE_ENHANCED_MPSW #define MLX5_OPCODE_ENHANCED_MPSW 0x29u #endif @@ -133,6 +133,10 @@ #define MLX5_OPCODE_WAIT 0x0fu #endif +#ifndef HAVE_MLX5_OPCODE_ACCESS_ASO +#define MLX5_OPCODE_ACCESS_ASO 0x2du +#endif + /* CQE value to inform that VLAN is stripped. */ #define MLX5_CQE_VLAN_STRIPPED (1u << 0) @@ -2348,6 +2352,66 @@ struct mlx5_ifc_create_flow_hit_aso_in_bits { struct mlx5_ifc_flow_hit_aso_bits flow_hit_aso; }; +enum mlx5_access_aso_op_mod { + ASO_OP_MOD_IPSEC = 0x0, + ASO_OP_MOD_CONNECTION_TRACKING = 0x1, + ASO_OP_MOD_POLICER = 0x2, + ASO_OP_MOD_RACE_AVOIDANCE = 0x3, + ASO_OP_MOD_FLOW_HIT = 0x4, +}; + +enum mlx5_aso_data_mask_mode { + BITWISE_64BIT = 0x0, + BYTEWISE_64BYTE = 0x1, + CALCULATED_64BYTE = 0x2, +}; + +enum mlx5_aso_pre_cond_op { + ASO_OP_ALWAYS_FALSE = 0x0, + ASO_OP_ALWAYS_TRUE = 0x1, + ASO_OP_EQUAL = 0x2, + ASO_OP_NOT_EQUAL = 0x3, + ASO_OP_GREATER_OR_EQUAL = 0x4, + ASO_OP_LESSER_OR_EQUAL = 0x5, + ASO_OP_LESSER = 0x6, + ASO_OP_GREATER = 0x7, + ASO_OP_CYCLIC_GREATER = 0x8, + ASO_OP_CYCLIC_LESSER = 0x9, +}; + +enum mlx5_aso_op { + ASO_OPER_LOGICAL_AND = 0x0, + ASO_OPER_LOGICAL_OR = 0x1, +}; + +/* ASO WQE CTRL segment. */ +struct mlx5_aso_cseg { + uint32_t va_h; + uint32_t va_l_ro; + uint32_t lkey; + uint32_t operand_masks; + uint32_t condition_0_data; + uint32_t condition_0_mask; + uint32_t condition_1_data; + uint32_t condition_1_mask; + uint64_t bitwise_data; + uint64_t data_mask; +} __rte_packed; + +#define MLX5_ASO_WQE_DSEG_SIZE 0x40 + +/* ASO WQE Data segment. */ +struct mlx5_aso_dseg { + uint8_t data[MLX5_ASO_WQE_DSEG_SIZE]; +} __rte_packed; + +/* ASO WQE. */ +struct mlx5_aso_wqe { + struct mlx5_wqe_cseg general_cseg; + struct mlx5_aso_cseg aso_cseg; + struct mlx5_aso_dseg aso_dseg; +} __rte_packed; + enum { MLX5_QP_ST_RC = 0x0, }; From patchwork Thu Oct 29 21:57:59 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 82845 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 77D42A04B5; Thu, 29 Oct 2020 22:59:56 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A61F8CB45; Thu, 29 Oct 2020 22:58:20 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 970E9CABF for ; Thu, 29 Oct 2020 22:58:11 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@nvidia.com) with SMTP; 29 Oct 2020 23:58:05 +0200 Received: from nvidia.com (pegasus25.mtr.labs.mlnx [10.210.16.10]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09TLw4Tt022832; Thu, 29 Oct 2020 23:58:05 +0200 From: Matan Azrad To: Viacheslav Ovsiienko Cc: dev@dpdk.org, Dekel Peled Date: Thu, 29 Oct 2020 21:57:59 +0000 Message-Id: <1604008681-414157-7-git-send-email-matan@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1604008681-414157-1-git-send-email-matan@nvidia.com> References: <1604008681-414157-1-git-send-email-matan@nvidia.com> Subject: [dpdk-dev] [PATCH 6/8] net/mlx5: support flow hit action for aging X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Dekel Peled A new ASO (Advanced Steering Operation) feature was added in the last mlx5 adapters to support flow hit detection. Using this new steering action, the driver can detect flow traffic hit and to reset this indication any time. Add support for flow aging action in rte_flow using this new feature. The counter aging mode will be taken only when the ASO feature is not supported. Signed-off-by: Dekel Peled Signed-off-by: Matan Azrad --- drivers/common/mlx5/mlx5_prm.h | 9 +- drivers/net/mlx5/linux/mlx5_os.c | 11 + drivers/net/mlx5/meson.build | 1 + drivers/net/mlx5/mlx5.c | 70 ++++ drivers/net/mlx5/mlx5.h | 98 +++++- drivers/net/mlx5/mlx5_flow.h | 1 + drivers/net/mlx5/mlx5_flow_age.c | 675 +++++++++++++++++++++++++++++++++++++++ drivers/net/mlx5/mlx5_flow_dv.c | 338 ++++++++++++++++++-- 8 files changed, 1171 insertions(+), 32 deletions(-) create mode 100644 drivers/net/mlx5/mlx5_flow_age.c diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h index cd50d13..bf5b6d9 100644 --- a/drivers/common/mlx5/mlx5_prm.h +++ b/drivers/common/mlx5/mlx5_prm.h @@ -2360,12 +2360,17 @@ enum mlx5_access_aso_op_mod { ASO_OP_MOD_FLOW_HIT = 0x4, }; +#define ASO_CSEG_DATA_MASK_MODE_OFFSET 30 + enum mlx5_aso_data_mask_mode { BITWISE_64BIT = 0x0, BYTEWISE_64BYTE = 0x1, CALCULATED_64BYTE = 0x2, }; +#define ASO_CSEG_COND_0_OPER_OFFSET 20 +#define ASO_CSEG_COND_1_OPER_OFFSET 16 + enum mlx5_aso_pre_cond_op { ASO_OP_ALWAYS_FALSE = 0x0, ASO_OP_ALWAYS_TRUE = 0x1, @@ -2379,6 +2384,8 @@ enum mlx5_aso_pre_cond_op { ASO_OP_CYCLIC_LESSER = 0x9, }; +#define ASO_CSEG_COND_OPER_OFFSET 6 + enum mlx5_aso_op { ASO_OPER_LOGICAL_AND = 0x0, ASO_OPER_LOGICAL_OR = 0x1, @@ -2387,7 +2394,7 @@ enum mlx5_aso_op { /* ASO WQE CTRL segment. */ struct mlx5_aso_cseg { uint32_t va_h; - uint32_t va_l_ro; + uint32_t va_l_r; uint32_t lkey; uint32_t operand_masks; uint32_t condition_0_data; diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index d4f2194..79dc65d 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -1129,6 +1129,17 @@ err = -err; goto error; } +#ifdef HAVE_MLX5DV_DR_ACTION_FLOW_HIT + if (config->hca_attr.flow_hit_aso) { + sh->flow_hit_aso_en = 1; + err = mlx5_flow_aso_age_mng_init(sh); + if (err) { + err = -err; + goto error; + } + DRV_LOG(DEBUG, "Flow Hit ASO is supported."); + } +#endif /* HAVE_MLX5DV_DR_ACTION_FLOW_HIT */ /* Check relax ordering support. */ if (config->hca_attr.relaxed_ordering_write && config->hca_attr.relaxed_ordering_read && diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build index 9a97bb9..e7495a7 100644 --- a/drivers/net/mlx5/meson.build +++ b/drivers/net/mlx5/meson.build @@ -16,6 +16,7 @@ sources = files( 'mlx5_flow_meter.c', 'mlx5_flow_dv.c', 'mlx5_flow_verbs.c', + 'mlx5_flow_age.c', 'mlx5_mac.c', 'mlx5_mr.c', 'mlx5_rss.c', diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 862bd40..a5c50ff 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -345,6 +345,72 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096 /** + * Initialize the ASO aging management structure. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object to free + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh) +{ + int err; + + if (sh->aso_age_mng) + return 0; + sh->aso_age_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->aso_age_mng), + RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); + if (!sh->aso_age_mng) { + DRV_LOG(ERR, "aso_age_mng allocation was failed."); + rte_errno = ENOMEM; + return -ENOMEM; + } + err = mlx5_aso_queue_init(sh); + if (err) { + mlx5_free(sh->aso_age_mng); + return -1; + } + rte_spinlock_init(&sh->aso_age_mng->resize_sl); + rte_spinlock_init(&sh->aso_age_mng->free_sl); + LIST_INIT(&sh->aso_age_mng->free); + return 0; +} + +/** + * Close and release all the resources of the ASO aging management structure. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object to free. + */ +static void +mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh) +{ + int i, j; + + mlx5_aso_queue_stop(sh); + mlx5_aso_queue_uninit(sh); + if (sh->aso_age_mng->pools) { + struct mlx5_aso_age_pool *pool; + + for (i = 0; i < sh->aso_age_mng->next; ++i) { + pool = sh->aso_age_mng->pools[i]; + claim_zero(mlx5_devx_cmd_destroy + (pool->flow_hit_aso_obj)); + for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) + if (pool->actions[j].dr_action) + claim_zero + (mlx5_glue->destroy_flow_action + (pool->actions[j].dr_action)); + mlx5_free(pool); + } + mlx5_free(sh->aso_age_mng->pools); + } + memset(&sh->aso_age_mng, 0, sizeof(sh->aso_age_mng)); +} + +/** * Initialize the shared aging list information per port. * * @param[in] sh @@ -984,6 +1050,10 @@ struct mlx5_dev_ctx_shared * * Only primary process handles async device events. **/ mlx5_flow_counters_mng_close(sh); + if (sh->aso_age_mng) { + mlx5_flow_aso_age_mng_close(sh); + sh->aso_age_mng = NULL; + } mlx5_flow_ipool_destroy(sh); mlx5_os_dev_shared_handler_uninstall(sh); if (sh->cnt_id_tbl) { diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index b080426..cf6975d 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -471,6 +471,84 @@ struct mlx5_flow_counter_mng { LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws; }; +/* ASO structures. */ +#define MLX5_ASO_QUEUE_LOG_DESC 10 + +struct mlx5_aso_cq { + uint16_t log_desc_n; + uint32_t cq_ci:24; + struct mlx5_devx_obj *cq; + struct mlx5dv_devx_umem *umem_obj; + union { + volatile void *umem_buf; + volatile struct mlx5_cqe *cqes; + }; + volatile uint32_t *db_rec; + uint64_t errors; +}; + +struct mlx5_aso_devx_mr { + void *buf; + uint64_t length; + struct mlx5dv_devx_umem *umem; + struct mlx5_devx_obj *mkey; + bool is_indirect; +}; + +struct mlx5_aso_sq_elem { + struct mlx5_aso_age_pool *pool; + uint16_t burst_size; +}; + +struct mlx5_aso_sq { + uint16_t log_desc_n; + struct mlx5_aso_cq cq; + struct mlx5_devx_obj *sq; + struct mlx5dv_devx_umem *wqe_umem; /* SQ buffer umem. */ + union { + volatile void *umem_buf; + volatile struct mlx5_aso_wqe *wqes; + }; + volatile uint32_t *db_rec; + struct mlx5dv_devx_uar *uar_obj; + volatile uint64_t *uar_addr; + struct mlx5_aso_devx_mr mr; + uint16_t pi; + uint16_t ci; + uint32_t sqn; + struct mlx5_aso_sq_elem elts[1 << MLX5_ASO_QUEUE_LOG_DESC]; + uint16_t next; /* Pool index of the next pool to query. */ +}; + +struct mlx5_aso_age_action { + LIST_ENTRY(mlx5_aso_age_action) next; + void *dr_action; + /* Following fields relevant only when action is active. */ + uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */ + struct mlx5_age_param age_params; +}; + +#define MLX5_ASO_AGE_ACTIONS_PER_POOL 512 + +struct mlx5_aso_age_pool { + struct mlx5_devx_obj *flow_hit_aso_obj; + uint16_t index; /* Pool index in pools array. */ + uint64_t time_of_last_age_check; /* In seconds. */ + struct mlx5_aso_age_action actions[MLX5_ASO_AGE_ACTIONS_PER_POOL]; +}; + +LIST_HEAD(aso_age_list, mlx5_aso_age_action); + +struct mlx5_aso_age_mng { + struct mlx5_aso_age_pool **pools; + uint16_t n; /* Total number of pools. */ + uint16_t next; /* Number of pools in use, index of next free pool. */ + rte_spinlock_t resize_sl; /* Lock for resize objects. */ + rte_spinlock_t free_sl; /* Lock for free list access. */ + struct aso_age_list free; /* Free age actions list - ready to use. */ + struct mlx5_aso_sq aso_sq; /* ASO queue objects. */ +}; + #define MLX5_AGE_EVENT_NEW 1 #define MLX5_AGE_TRIGGER 2 #define MLX5_AGE_SET(age_info, BIT) \ @@ -485,8 +563,11 @@ struct mlx5_flow_counter_mng { /* Aging information for per port. */ struct mlx5_age_info { uint8_t flags; /* Indicate if is new event or need to be triggered. */ - struct mlx5_counters aged_counters; /* Aged flow counter list. */ - rte_spinlock_t aged_sl; /* Aged flow counter list lock. */ + union { + struct mlx5_counters aged_counters; /* Aged counter list. */ + struct aso_age_list aged_aso; /* Aged ASO actions list. */ + }; + rte_spinlock_t aged_sl; /* Aged flow list lock. */ }; /* Per port data of shared IB device. */ @@ -623,6 +704,7 @@ struct mlx5_dev_ctx_shared { LIST_ENTRY(mlx5_dev_ctx_shared) next; uint32_t refcnt; uint32_t devx:1; /* Opened with DV. */ + uint32_t flow_hit_aso_en:1; /* Flow Hit ASO is supported. */ uint32_t eqn; /* Event Queue number. */ uint32_t max_port; /* Maximal IB device port index. */ void *ctx; /* Verbs/DV/DevX context. */ @@ -678,6 +760,8 @@ struct mlx5_dev_ctx_shared { struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX]; /* Flex parser profiles information. */ void *devx_rx_uar; /* DevX UAR for Rx. */ + struct mlx5_aso_age_mng *aso_age_mng; + /* Management data for aging mechanism using ASO Flow Hit. */ struct mlx5_dev_shared_port port[]; /* per device port data array. */ }; @@ -811,6 +895,8 @@ enum mlx5_txq_modify_type { MLX5_TXQ_MOD_ERR2RDY, /* modify state from error to ready. */ }; + + /* HW objects operations structure. */ struct mlx5_obj_ops { int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_obj *rxq_obj, int on); @@ -961,6 +1047,7 @@ int mlx5_hairpin_cap_get(struct rte_eth_dev *dev, struct rte_eth_hairpin_cap *cap); bool mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev); int mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev); +int mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh); /* mlx5_ethdev.c */ @@ -1219,4 +1306,11 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev, eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev); +/* mlx5_flow_age.c */ + +int mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh); +int mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh); +int mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh); +void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh); + #endif /* RTE_PMD_MLX5_H_ */ diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 8ef2a85..1b4a9d1 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -1035,6 +1035,7 @@ struct rte_flow { /**< Index to metadata register copy table resource. */ uint32_t counter; /**< Holds flow counter. */ uint32_t tunnel_id; /**< Tunnel id */ + uint32_t age; /**< Holds ASO age bit index. */ } __rte_packed; /* diff --git a/drivers/net/mlx5/mlx5_flow_age.c b/drivers/net/mlx5/mlx5_flow_age.c new file mode 100644 index 0000000..0d47d8e --- /dev/null +++ b/drivers/net/mlx5/mlx5_flow_age.c @@ -0,0 +1,675 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2020 Mellanox Technologies, Ltd + */ +#include +#include +#include + +#include + +#include "mlx5.h" +#include "mlx5_flow.h" + +/** + * Destroy Completion Queue used for ASO access. + * + * @param[in] cq + * ASO CQ to destroy. + */ +static void +mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq) +{ + if (cq->cq) + claim_zero(mlx5_devx_cmd_destroy(cq->cq)); + if (cq->umem_obj) + claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj)); + if (cq->umem_buf) + mlx5_free((void *)(uintptr_t)cq->umem_buf); + memset(cq, 0, sizeof(*cq)); +} + +/** + * Create Completion Queue used for ASO access. + * + * @param[in] ctx + * Context returned from mlx5 open_device() glue function. + * @param[in/out] cq + * Pointer to CQ to create. + * @param[in] log_desc_n + * Log of number of descriptors in queue. + * @param[in] socket + * Socket to use for allocation. + * @param[in] uar_page_id + * UAR page ID to use. + * @param[in] eqn + * EQ number. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n, + int socket, int uar_page_id, uint32_t eqn) +{ + struct mlx5_devx_cq_attr attr = { 0 }; + size_t pgsize = sysconf(_SC_PAGESIZE); + uint32_t umem_size; + uint16_t cq_size = 1 << log_desc_n; + + cq->log_desc_n = log_desc_n; + umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2; + cq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, + 4096, socket); + if (!cq->umem_buf) { + DRV_LOG(ERR, "Failed to allocate memory for CQ."); + rte_errno = ENOMEM; + return -ENOMEM; + } + cq->umem_obj = mlx5_glue->devx_umem_reg(ctx, + (void *)(uintptr_t)cq->umem_buf, + umem_size, + IBV_ACCESS_LOCAL_WRITE); + if (!cq->umem_obj) { + DRV_LOG(ERR, "Failed to register umem for aso CQ."); + goto error; + } + attr.q_umem_valid = 1; + attr.db_umem_valid = 1; + attr.use_first_only = 0; + attr.overrun_ignore = 0; + attr.uar_page_id = uar_page_id; + attr.q_umem_id = cq->umem_obj->umem_id; + attr.q_umem_offset = 0; + attr.db_umem_id = attr.q_umem_id; + attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size; + attr.eqn = eqn; + attr.log_cq_size = log_desc_n; + attr.log_page_size = rte_log2_u32(pgsize); + cq->cq = mlx5_devx_cmd_create_cq(ctx, &attr); + if (!cq->cq) + goto error; + cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset); + cq->cq_ci = 0; + memset((void *)(uintptr_t)cq->umem_buf, 0xFF, attr.db_umem_offset); + return 0; +error: + mlx5_aso_cq_destroy(cq); + return -1; +} + +/** + * Free MR resources. + * + * @param[in] mr + * MR to free. + */ +static void +mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr) +{ + claim_zero(mlx5_devx_cmd_destroy(mr->mkey)); + if (!mr->is_indirect && mr->umem) + claim_zero(mlx5_glue->devx_umem_dereg(mr->umem)); + mlx5_free(mr->buf); + memset(mr, 0, sizeof(*mr)); +} + +/** + * Register Memory Region. + * + * @param[in] ctx + * Context returned from mlx5 open_device() glue function. + * @param[in] length + * Size of MR buffer. + * @param[in/out] mr + * Pointer to MR to create. + * @param[in] socket + * Socket to use for allocation. + * @param[in] pdn + * Protection Domain number to use. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr, + int socket, int pdn) +{ + struct mlx5_devx_mkey_attr mkey_attr; + + mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096, + socket); + if (!mr->buf) { + DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx."); + return -1; + } + mr->umem = mlx5_glue->devx_umem_reg(ctx, mr->buf, length, + IBV_ACCESS_LOCAL_WRITE); + if (!mr->umem) { + DRV_LOG(ERR, "Failed to register Umem for MR by Devx."); + goto error; + } + mkey_attr.addr = (uintptr_t)mr->buf; + mkey_attr.size = length; + mkey_attr.umem_id = mr->umem->umem_id; + mkey_attr.pd = pdn; + mkey_attr.pg_access = 1; + mkey_attr.klm_array = NULL; + mkey_attr.klm_num = 0; + mkey_attr.relaxed_ordering = 0; + mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr); + if (!mr->mkey) { + DRV_LOG(ERR, "Failed to create direct Mkey."); + goto error; + } + mr->length = length; + mr->is_indirect = false; + return 0; +error: + if (mr->umem) + claim_zero(mlx5_glue->devx_umem_dereg(mr->umem)); + mlx5_free(mr->buf); + return -1; +} + +/** + * Destroy Send Queue used for ASO access. + * + * @param[in] sq + * ASO SQ to destroy. + */ +static void +mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq) +{ + if (sq->wqe_umem) { + mlx5_glue->devx_umem_dereg(sq->wqe_umem); + sq->wqe_umem = NULL; + } + if (sq->umem_buf) { + mlx5_free((void *)(uintptr_t)sq->umem_buf); + sq->umem_buf = NULL; + } + if (sq->sq) { + mlx5_devx_cmd_destroy(sq->sq); + sq->sq = NULL; + } + if (sq->cq.cq) + mlx5_aso_cq_destroy(&sq->cq); + if (sq->uar_obj) + mlx5_glue->devx_free_uar(sq->uar_obj); + mlx5_aso_devx_dereg_mr(&sq->mr); + memset(sq, 0, sizeof(*sq)); +} + +/** + * Initialize Send Queue used for ASO access. + * + * @param[in] sq + * ASO SQ to initialize. + */ +static void +mlx5_aso_init_sq(struct mlx5_aso_sq *sq) +{ + volatile struct mlx5_aso_wqe *restrict wqe; + int i; + int size = 1 << sq->log_desc_n; + uint64_t addr; + + /* All the next fields state should stay constant. */ + for (i = 0, wqe = &sq->wqes[0]; i < size; ++i, ++wqe) { + wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) | + (sizeof(*wqe) >> 4)); + wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.mkey->id); + addr = (uint64_t)((uint64_t *)sq->mr.buf + i * + MLX5_ASO_AGE_ACTIONS_PER_POOL / 64); + wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32)); + wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u); + wqe->aso_cseg.operand_masks = rte_cpu_to_be_32 + (0u | + (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) | + (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) | + (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) | + (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET)); + wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX); + } +} + +/** + * Create Send Queue used for ASO access. + * + * @param[in] ctx + * Context returned from mlx5 open_device() glue function. + * @param[in/out] sq + * Pointer to SQ to create. + * @param[in] socket + * Socket to use for allocation. + * @param[in] pdn + * Protection Domain number to use. + * @param[in] eqn + * EQ number. + * @param[in] log_desc_n + * Log of number of descriptors in queue. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket, + uint32_t pdn, uint32_t eqn, uint16_t log_desc_n) +{ + struct mlx5_devx_create_sq_attr attr = { 0 }; + struct mlx5_devx_modify_sq_attr modify_attr = { 0 }; + size_t pgsize = sysconf(_SC_PAGESIZE); + struct mlx5_devx_wq_attr *wq_attr = &attr.wq_attr; + uint32_t sq_desc_n = 1 << log_desc_n; + uint32_t wq_size = sizeof(struct mlx5_aso_wqe) * sq_desc_n; + int ret; + + if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) * + sq_desc_n, &sq->mr, socket, pdn)) + return -1; + sq->uar_obj = mlx5_glue->devx_alloc_uar(ctx, 0); + if (!sq->uar_obj) + goto error; + if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket, + sq->uar_obj->page_id, eqn)) + goto error; + sq->log_desc_n = log_desc_n; + sq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size + + sizeof(*sq->db_rec) * 2, 4096, socket); + if (!sq->umem_buf) { + DRV_LOG(ERR, "Can't allocate wqe buffer."); + return -ENOMEM; + } + sq->wqe_umem = mlx5_glue->devx_umem_reg(ctx, + (void *)(uintptr_t)sq->umem_buf, + wq_size + + sizeof(*sq->db_rec) * 2, + IBV_ACCESS_LOCAL_WRITE); + if (!sq->wqe_umem) { + DRV_LOG(ERR, "Failed to register umem for SQ."); + rte_errno = ENOMEM; + goto error; + } + attr.state = MLX5_SQC_STATE_RST; + attr.tis_lst_sz = 0; + attr.tis_num = 0; + attr.user_index = 0xFFFF; + attr.cqn = sq->cq.cq->id; + wq_attr->uar_page = sq->uar_obj->page_id; + wq_attr->pd = pdn; + wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC; + wq_attr->log_wq_pg_sz = rte_log2_u32(pgsize); + wq_attr->wq_umem_id = sq->wqe_umem->umem_id; + wq_attr->wq_umem_offset = 0; + wq_attr->wq_umem_valid = 1; + wq_attr->log_wq_stride = 6; + wq_attr->log_wq_sz = rte_log2_u32(wq_size) - 6; + wq_attr->dbr_umem_id = wq_attr->wq_umem_id; + wq_attr->dbr_addr = wq_size; + wq_attr->dbr_umem_valid = 1; + sq->sq = mlx5_devx_cmd_create_sq(ctx, &attr); + if (!sq->sq) { + DRV_LOG(ERR, "Can't create sq object."); + rte_errno = ENOMEM; + goto error; + } + modify_attr.state = MLX5_SQC_STATE_RDY; + ret = mlx5_devx_cmd_modify_sq(sq->sq, &modify_attr); + if (ret) { + DRV_LOG(ERR, "Can't change sq state to ready."); + rte_errno = ENOMEM; + goto error; + } + sq->ci = 0; + sq->pi = 0; + sq->sqn = sq->sq->id; + sq->db_rec = RTE_PTR_ADD(sq->umem_buf, (uintptr_t)(wq_attr->dbr_addr)); + sq->uar_addr = (volatile uint64_t *)((uint8_t *)sq->uar_obj->base_addr + + 0x800); + mlx5_aso_init_sq(sq); + return 0; +error: + mlx5_aso_destroy_sq(sq); + return -1; +} + +/** + * API to create and initialize Send Queue used for ASO access. + * + * @param[in] sh + * Pointer to shared device context. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh) +{ + return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0, sh->pdn, + sh->eqn, MLX5_ASO_QUEUE_LOG_DESC); +} + +/** + * API to destroy Send Queue used for ASO access. + * + * @param[in] sh + * Pointer to shared device context. + */ +void +mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh) +{ + mlx5_aso_destroy_sq(&sh->aso_age_mng->aso_sq); +} + +/** + * Write a burst of WQEs to ASO SQ. + * + * @param[in] mng + * ASO management data, contains the SQ. + * @param[in] n + * Index of the last valid pool. + * + * @return + * Number of WQEs in burst. + */ +static uint16_t +mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n) +{ + volatile struct mlx5_aso_wqe *wqe; + struct mlx5_aso_sq *sq = &mng->aso_sq; + struct mlx5_aso_age_pool *pool; + uint16_t size = 1 << sq->log_desc_n; + uint16_t mask = size - 1; + uint16_t max; + uint16_t start_pi = sq->pi; + + max = RTE_MIN(size - (uint16_t)(sq->pi - sq->ci), n - sq->next); + if (unlikely(!max)) + return 0; + sq->elts[start_pi & mask].burst_size = max; + do { + wqe = &sq->wqes[sq->pi & mask]; + rte_prefetch0(&sq->wqes[(sq->pi + 1) & mask]); + /* Fill next WQE. */ + rte_spinlock_lock(&mng->resize_sl); + pool = mng->pools[sq->next]; + rte_spinlock_unlock(&mng->resize_sl); + sq->elts[sq->pi & mask].pool = pool; + wqe->general_cseg.misc = + rte_cpu_to_be_32(((struct mlx5_devx_obj *) + (pool->flow_hit_aso_obj))->id); + wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR << + MLX5_COMP_MODE_OFFSET); + wqe->general_cseg.opcode = rte_cpu_to_be_32 + (MLX5_OPCODE_ACCESS_ASO | + ASO_OP_MOD_FLOW_HIT << 24 | + sq->pi << 9); + sq->pi++; + sq->next++; + max--; + } while (max); + wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS << + MLX5_COMP_MODE_OFFSET); + rte_io_wmb(); + sq->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi << 1); + rte_wmb(); + *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/ + rte_wmb(); + return sq->elts[start_pi & mask].burst_size; +} + +/** + * Debug utility function. Dump contents of error CQE and WQE. + * + * @param[in] cqe + * Error CQE to dump. + * @param[in] wqe + * Error WQE to dump. + */ +static void +mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe) +{ + int i; + + DRV_LOG(ERR, "Error cqe:"); + for (i = 0; i < 16; i += 4) + DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1], + cqe[i + 2], cqe[i + 3]); + DRV_LOG(ERR, "\nError wqe:"); + for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4) + DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1], + wqe[i + 2], wqe[i + 3]); +} + +/** + * Handle case of error CQE. + * + * @param[in] sq + * ASO SQ to use. + */ +static void +mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq) +{ + struct mlx5_aso_cq *cq = &sq->cq; + uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1); + volatile struct mlx5_err_cqe *cqe = + (volatile struct mlx5_err_cqe *)&cq->cqes[idx]; + + cq->errors++; + idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n); + mlx5_aso_dump_err_objs((volatile uint32_t *)cqe, + (volatile uint32_t *)&sq->wqes[idx]); +} + +/** + * Update ASO objects upon completion. + * + * @param[in] sh + * Shared device context. + * @param[in] n + * Number of completed ASO objects. + */ +static void +mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n) +{ + struct mlx5_aso_age_mng *mng = sh->aso_age_mng; + struct mlx5_aso_sq *sq = &mng->aso_sq; + struct mlx5_age_info *age_info; + const uint16_t size = 1 << sq->log_desc_n; + const uint16_t mask = size - 1; + const uint64_t curr = MLX5_CURR_TIME_SEC; + uint16_t expected = AGE_CANDIDATE; + uint16_t i; + + for (i = 0; i < n; ++i) { + uint16_t idx = (sq->ci + i) & mask; + struct mlx5_aso_age_pool *pool = sq->elts[idx].pool; + uint64_t diff = curr - pool->time_of_last_age_check; + uint64_t *addr = sq->mr.buf; + int j; + + addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64; + pool->time_of_last_age_check = curr; + for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) { + struct mlx5_aso_age_action *act = &pool->actions[j]; + struct mlx5_age_param *ap = &act->age_params; + uint8_t byte; + uint8_t offset; + uint8_t *u8addr; + uint8_t hit; + + if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) != + AGE_CANDIDATE) + continue; + byte = 63 - (j / 8); + offset = j % 8; + u8addr = (uint8_t *)addr; + hit = (u8addr[byte] >> offset) & 0x1; + if (hit) { + __atomic_store_n(&ap->sec_since_last_hit, 0, + __ATOMIC_RELAXED); + } else { + struct mlx5_priv *priv; + + __atomic_fetch_add(&ap->sec_since_last_hit, + diff, __ATOMIC_RELAXED); + /* If timeout passed add to aged-out list. */ + if (ap->sec_since_last_hit <= ap->timeout) + continue; + priv = + rte_eth_devices[ap->port_id].data->dev_private; + age_info = GET_PORT_AGE_INFO(priv); + rte_spinlock_lock(&age_info->aged_sl); + if (__atomic_compare_exchange_n(&ap->state, + &expected, + AGE_TMOUT, + false, + __ATOMIC_RELAXED, + __ATOMIC_RELAXED)) { + LIST_INSERT_HEAD(&age_info->aged_aso, + act, next); + MLX5_AGE_SET(age_info, + MLX5_AGE_EVENT_NEW); + } + rte_spinlock_unlock(&age_info->aged_sl); + } + } + } + for (i = 0; i < sh->max_port; i++) { + age_info = &sh->port[i].age_info; + if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW)) + continue; + if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) + rte_eth_dev_callback_process + (&rte_eth_devices[sh->port[i].devx_ih_port_id], + RTE_ETH_EVENT_FLOW_AGED, NULL); + age_info->flags = 0; + } +} + +/** + * Handle completions from WQEs sent to ASO SQ. + * + * @param[in] sh + * Shared device context. + * + * @return + * Number of CQEs handled. + */ +static uint16_t +mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh) +{ + struct mlx5_aso_age_mng *mng = sh->aso_age_mng; + struct mlx5_aso_sq *sq = &mng->aso_sq; + struct mlx5_aso_cq *cq = &sq->cq; + volatile struct mlx5_cqe *restrict cqe; + const unsigned int cq_size = 1 << cq->log_desc_n; + const unsigned int mask = cq_size - 1; + uint32_t idx; + uint32_t next_idx = cq->cq_ci & mask; + const uint16_t max = (uint16_t)(sq->pi - sq->ci); + uint16_t i = 0; + int ret; + if (unlikely(!max)) + return 0; + do { + idx = next_idx; + next_idx = (cq->cq_ci + 1) & mask; + rte_prefetch0(&cq->cqes[next_idx]); + cqe = &cq->cqes[idx]; + ret = check_cqe(cqe, cq_size, cq->cq_ci); + /* + * Be sure owner read is done before any other cookie field or + * opaque field. + */ + rte_io_rmb(); + if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { + if (likely(ret == MLX5_CQE_STATUS_HW_OWN)) + break; + mlx5_aso_cqe_err_handle(sq); + } else { + i += sq->elts[(sq->ci + i) & mask].burst_size; + } + cq->cq_ci++; + } while (1); + if (likely(i)) { + mlx5_aso_age_action_update(sh, i); + sq->ci += i; + rte_io_wmb(); + cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci); + } + return i; +} + +/** + * Periodically read CQEs and send WQEs to ASO SQ. + * + * @param[in] arg + * Shared device context containing the ASO SQ. + */ +static void +mlx5_flow_aso_alarm(void *arg) +{ + struct mlx5_dev_ctx_shared *sh = arg; + struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq; + uint32_t us = 100u; + uint16_t n; + + rte_spinlock_lock(&sh->aso_age_mng->resize_sl); + n = sh->aso_age_mng->next; + rte_spinlock_unlock(&sh->aso_age_mng->resize_sl); + mlx5_aso_completion_handle(sh); + if (sq->next == n) { + /* End of loop: wait 1 second. */ + us = 1000000u; + sq->next = 0; + } + mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n); + if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh)) + DRV_LOG(ERR, "Cannot reinitialize aso alarm."); +} + +/** + * API to start ASO access using ASO SQ. + * + * @param[in] sh + * Pointer to shared device context. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh) +{ + if (rte_eal_alarm_set(1000000, mlx5_flow_aso_alarm, sh)) { + DRV_LOG(ERR, "Cannot reinitialize ASO age alarm."); + return -rte_errno; + } + return 0; +} + +/** + * API to stop ASO access using ASO SQ. + * + * @param[in] sh + * Pointer to shared device context. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh) +{ + int retries = 1024; + + if (!sh->aso_age_mng->aso_sq.sq) + return -EINVAL; + rte_errno = 0; + while (--retries) { + rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh); + if (rte_errno != EINPROGRESS) + break; + rte_pause(); + } + return -rte_errno; +} diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 01b6e7c..dcc397d 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -4127,7 +4127,8 @@ struct mlx5_cache_entry * struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_age *age = action->conf; - if (!priv->config.devx || priv->sh->cmng.counter_fallback) + if (!priv->config.devx || (priv->sh->cmng.counter_fallback && + !priv->sh->aso_age_mng)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -8400,6 +8401,7 @@ struct mlx5_hlist_entry * __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED); return counter; } + /** * Add Tx queue matcher * @@ -9239,6 +9241,253 @@ struct mlx5_cache_entry * } /** + * Get ASO age action by index. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] age_idx + * Index to the ASO age action. + * + * @return + * The specified ASO age action. + */ +static struct mlx5_aso_age_action* +flow_dv_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx) +{ + uint16_t pool_idx = age_idx & UINT16_MAX; + uint16_t offset = (age_idx >> 16) & UINT16_MAX; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + struct mlx5_aso_age_pool *pool = mng->pools[pool_idx]; + + return &pool->actions[offset - 1]; +} + +/** + * Remove a flow counter from aged counter list. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] age + * Pointer to the aso age action handler. + */ +static void +flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev, + struct mlx5_aso_age_action *age) +{ + struct mlx5_age_info *age_info; + struct mlx5_age_param *age_param = &age->age_params; + struct mlx5_priv *priv = dev->data->dev_private; + uint16_t expected = AGE_CANDIDATE; + + age_info = GET_PORT_AGE_INFO(priv); + if (!__atomic_compare_exchange_n(&age_param->state, &expected, + AGE_FREE, false, __ATOMIC_RELAXED, + __ATOMIC_RELAXED)) { + /** + * We need the lock even it is age timeout, + * since age action may still in process. + */ + rte_spinlock_lock(&age_info->aged_sl); + LIST_REMOVE(age, next); + rte_spinlock_unlock(&age_info->aged_sl); + __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED); + } +} + +static void +flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + struct mlx5_aso_age_action *age = flow_dv_aso_age_get_by_idx(dev, + age_idx); + + flow_dv_aso_age_remove_from_age(dev, age); + rte_spinlock_lock(&mng->free_sl); + LIST_INSERT_HEAD(&mng->free, age, next); + rte_spinlock_unlock(&mng->free_sl); +} + +/** + * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * + * @return + * 0 on success, otherwise negative errno value and rte_errno is set. + */ +static int +flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + void *old_pools = mng->pools; + uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE; + uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize; + void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); + + if (!pools) { + rte_errno = ENOMEM; + return -ENOMEM; + } + if (old_pools) { + memcpy(pools, old_pools, + mng->n * sizeof(struct mlx5_flow_counter_pool *)); + mlx5_free(old_pools); + } else { + /* First ASO flow hit allocation - starting ASO data-path. */ + int ret = mlx5_aso_queue_start(priv->sh); + + if (ret) + return ret; + } + mng->n = resize; + mng->pools = pools; + return 0; +} + +/** + * Create and initialize a new ASO aging pool. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[out] age_free + * Where to put the pointer of a new age action. + * + * @return + * The age actions pool pointer and @p age_free is set on success, + * NULL otherwise and rte_errno is set. + */ +static struct mlx5_aso_age_pool * +flow_dv_age_pool_create(struct rte_eth_dev *dev, + struct mlx5_aso_age_action **age_free) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + struct mlx5_aso_age_pool *pool = NULL; + struct mlx5_devx_obj *obj = NULL; + uint32_t i; + + obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx, + priv->sh->pdn); + if (!obj) { + rte_errno = ENODATA; + DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX."); + return NULL; + } + pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY); + if (!pool) { + claim_zero(mlx5_devx_cmd_destroy(obj)); + rte_errno = ENOMEM; + return NULL; + } + pool->flow_hit_aso_obj = obj; + pool->time_of_last_age_check = MLX5_CURR_TIME_SEC; + rte_spinlock_lock(&mng->resize_sl); + pool->index = mng->next; + /* Resize pools array if there is no room for the new pool in it. */ + if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) { + claim_zero(mlx5_devx_cmd_destroy(obj)); + mlx5_free(pool); + rte_spinlock_unlock(&mng->resize_sl); + return NULL; + } + mng->pools[pool->index] = pool; + mng->next++; + rte_spinlock_unlock(&mng->resize_sl); + /* Assign the first action in the new pool, the rest go to free list. */ + *age_free = &pool->actions[0]; + for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) { + pool->actions[i].offset = i; + LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next); + } + return pool; +} + +/** + * Allocate a ASO aging bit. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * + * @return + * Index to ASO age action on success, 0 otherwise and rte_errno is set. + */ +static uint32_t +flow_dv_aso_age_alloc(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_pool *pool; + struct mlx5_aso_age_action *age_free = NULL; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + + MLX5_ASSERT(mng); + /* Try to get the next free age action bit. */ + rte_spinlock_lock(&mng->free_sl); + age_free = LIST_FIRST(&mng->free); + if (age_free) { + LIST_REMOVE(age_free, next); + } else if (!flow_dv_age_pool_create(dev, &age_free)) { + rte_spinlock_unlock(&mng->free_sl); + goto err; + } + rte_spinlock_unlock(&mng->free_sl); + pool = container_of + ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL]) + (age_free - age_free->offset), struct mlx5_aso_age_pool, + actions); + if (!age_free->dr_action) { + age_free->dr_action = mlx5_glue->dr_action_create_flow_hit + (pool->flow_hit_aso_obj->obj, + age_free->offset, REG_C_5); + if (!age_free->dr_action) + goto err; + } + return pool->index | ((age_free->offset + 1) << 16); +err: + if (age_free) { + rte_spinlock_lock(&mng->free_sl); + LIST_INSERT_HEAD(&mng->free, age_free, next); + rte_spinlock_unlock(&mng->free_sl); + } + return 0; +} + +/** + * Create a age action using ASO mechanism. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in] age + * Pointer to the aging action configuration. + * + * @return + * Index to flow counter on success, 0 otherwise. + */ +static uint32_t +flow_dv_translate_create_aso_age(struct rte_eth_dev *dev, + const struct rte_flow_action_age *age) +{ + uint32_t age_idx = 0; + struct mlx5_aso_age_action *aso_age = NULL; + + age_idx = flow_dv_aso_age_alloc(dev); + if (!age_idx) + return 0; + aso_age = flow_dv_aso_age_get_by_idx(dev, age_idx); + aso_age->age_params.context = age->context; + aso_age->age_params.timeout = age->timeout; + aso_age->age_params.port_id = dev->data->port_id; + __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0, + __ATOMIC_RELAXED); + __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE, + __ATOMIC_RELAXED); + return age_idx; +} + +/** * Fill the flow with DV spec, lock free * (mutex should be acquired by caller). * @@ -9528,6 +9777,22 @@ struct mlx5_cache_entry * dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; break; case RTE_FLOW_ACTION_TYPE_AGE: + if (priv->sh->flow_hit_aso_en) { + flow->age = flow_dv_translate_create_aso_age + (dev, action->conf); + if (!flow->age) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "can't create age action"); + dev_flow->dv.actions[actions_n++] = + (flow_dv_aso_age_get_by_idx + (dev, flow->age))->dr_action; + action_flags |= MLX5_FLOW_ACTION_AGE; + break; + } + /* Fall-through */ case RTE_FLOW_ACTION_TYPE_COUNT: if (!dev_conf->devx) { return rte_flow_error_set @@ -10859,6 +11124,8 @@ struct mlx5_cache_entry * mlx5_flow_meter_detach(fm); flow->meter = 0; } + if (flow->age) + flow_dv_aso_age_release(dev, flow->age); while (flow->dev_handles) { uint32_t tmp_idx = flow->dev_handles; @@ -11391,30 +11658,33 @@ struct mlx5_cache_entry * void *data, struct rte_flow_error *error) { struct rte_flow_query_age *resp = data; + struct mlx5_age_param *age_param; - if (flow->counter) { - struct mlx5_age_param *age_param = - flow_dv_counter_idx_get_age(dev, flow->counter); + if (flow->age) { + struct mlx5_aso_age_action *act = + flow_dv_aso_age_get_by_idx(dev, flow->age); + + age_param = &act->age_params; + } else if (flow->counter) { + age_param = flow_dv_counter_idx_get_age(dev, flow->counter); if (!age_param || !age_param->timeout) return rte_flow_error_set (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot read age data"); - resp->aged = __atomic_load_n(&age_param->state, - __ATOMIC_RELAXED) == - AGE_TMOUT ? 1 : 0; - resp->sec_since_last_hit_valid = !resp->aged; - if (resp->sec_since_last_hit_valid) - resp->sec_since_last_hit = - __atomic_load_n(&age_param->sec_since_last_hit, - __ATOMIC_RELAXED); - return 0; - } - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "age data not available"); + } else { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "age data not available"); + } + resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) == + AGE_TMOUT ? 1 : 0; + resp->sec_since_last_hit_valid = !resp->aged; + if (resp->sec_since_last_hit_valid) + resp->sec_since_last_hit = __atomic_load_n + (&age_param->sec_since_last_hit, __ATOMIC_RELAXED); + return 0; } /** @@ -12038,25 +12308,35 @@ struct mlx5_cache_entry * struct mlx5_age_info *age_info; struct mlx5_age_param *age_param; struct mlx5_flow_counter *counter; + struct mlx5_aso_age_action *act; int nb_flows = 0; if (nb_contexts && !context) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "Should assign at least one flow or" - " context to get if nb_contexts != 0"); + NULL, "empty context"); age_info = GET_PORT_AGE_INFO(priv); rte_spinlock_lock(&age_info->aged_sl); - TAILQ_FOREACH(counter, &age_info->aged_counters, next) { - nb_flows++; - if (nb_contexts) { - age_param = MLX5_CNT_TO_AGE(counter); - context[nb_flows - 1] = age_param->context; - if (!(--nb_contexts)) - break; + if (priv->sh->flow_hit_aso_en) + LIST_FOREACH(act, &age_info->aged_aso, next) { + nb_flows++; + if (nb_contexts) { + context[nb_flows - 1] = + act->age_params.context; + if (!(--nb_contexts)) + break; + } + } + else + TAILQ_FOREACH(counter, &age_info->aged_counters, next) { + nb_flows++; + if (nb_contexts) { + age_param = MLX5_CNT_TO_AGE(counter); + context[nb_flows - 1] = age_param->context; + if (!(--nb_contexts)) + break; + } } - } rte_spinlock_unlock(&age_info->aged_sl); MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER); return nb_flows; From patchwork Thu Oct 29 21:58:00 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 82847 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 17BADA04B5; Thu, 29 Oct 2020 23:00:34 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 91258CBA8; Thu, 29 Oct 2020 22:58:23 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id AA890CAC0 for ; Thu, 29 Oct 2020 22:58:11 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@nvidia.com) with SMTP; 29 Oct 2020 23:58:05 +0200 Received: from nvidia.com (pegasus25.mtr.labs.mlnx [10.210.16.10]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09TLw4Tu022832; Thu, 29 Oct 2020 23:58:05 +0200 From: Matan Azrad To: Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Thu, 29 Oct 2020 21:58:00 +0000 Message-Id: <1604008681-414157-8-git-send-email-matan@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1604008681-414157-1-git-send-email-matan@nvidia.com> References: <1604008681-414157-1-git-send-email-matan@nvidia.com> Subject: [dpdk-dev] [PATCH 7/8] net/mlx5: optimize shared RSS action memory X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The RSS shared action was saved in flow memory by a pointer. It means that every flow memory includes 8B only for optional shared RSS case. Move the RSS objects to be used by indexed pool which reduces the flow handle memory to 4B. So, now, the shared action handler is also just a 4B index. Signed-off-by: Matan Azrad --- drivers/net/mlx5/mlx5.c | 12 +++ drivers/net/mlx5/mlx5.h | 8 +- drivers/net/mlx5/mlx5_flow.c | 128 +++++++++++++-------------- drivers/net/mlx5/mlx5_flow.h | 24 +++--- drivers/net/mlx5/mlx5_flow_dv.c | 186 +++++++++++++++++++++++----------------- 5 files changed, 194 insertions(+), 164 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index a5c50ff..245685e 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -336,6 +336,18 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = .need_lock = 1, .type = "mlx5_flow_tnl_tbl_ipool", }, + { + .size = sizeof(struct mlx5_shared_action_rss), + .trunk_size = 64, + .grow_trunk = 3, + .grow_shift = 2, + .need_lock = 1, + .release_mem_en = 1, + .malloc = mlx5_malloc, + .free = mlx5_free, + .type = "mlx5_shared_action_rss", + }, + }; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index cf6975d..9b1e5d5 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -53,6 +53,7 @@ enum mlx5_ipool_index { MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */ MLX5_IPOOL_TUNNEL_ID, /* Pool for flow tunnel ID. */ MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */ + MLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */ MLX5_IPOOL_MAX, }; @@ -895,8 +896,6 @@ enum mlx5_txq_modify_type { MLX5_TXQ_MOD_ERR2RDY, /* modify state from error to ready. */ }; - - /* HW objects operations structure. */ struct mlx5_obj_ops { int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_obj *rxq_obj, int on); @@ -922,6 +921,8 @@ struct mlx5_obj_ops { void (*txq_obj_release)(struct mlx5_txq_obj *txq_obj); }; +#define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields) + struct mlx5_priv { struct rte_eth_dev_data *dev_data; /* Pointer to device data. */ struct mlx5_dev_ctx_shared *sh; /* Shared device context. */ @@ -998,8 +999,7 @@ struct mlx5_priv { struct mlx5_mp_id mp_id; /* ID of a multi-process process */ LIST_HEAD(fdir, mlx5_fdir_flow) fdir_flows; /* fdir flows. */ rte_spinlock_t shared_act_sl; /* Shared actions spinlock. */ - LIST_HEAD(shared_action, rte_flow_shared_action) shared_actions; - /* shared actions */ + uint32_t rss_shared_actions; /* RSS shared actions. */ }; #define PORT_ID(priv) ((priv)->dev_data->port_id) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index a6e60af..29e67f4 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -3306,6 +3306,8 @@ struct mlx5_translated_shared_action { * action handling should be preformed on *shared* actions list returned * from this call. * + * @param[in] dev + * Pointer to Ethernet device. * @param[in] actions * List of actions to translate. * @param[out] shared @@ -3323,12 +3325,14 @@ struct mlx5_translated_shared_action { * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -flow_shared_actions_translate(const struct rte_flow_action actions[], - struct mlx5_translated_shared_action *shared, - int *shared_n, - struct rte_flow_action **translated_actions, - struct rte_flow_error *error) +flow_shared_actions_translate(struct rte_eth_dev *dev, + const struct rte_flow_action actions[], + struct mlx5_translated_shared_action *shared, + int *shared_n, + struct rte_flow_action **translated_actions, + struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_action *translated = NULL; size_t actions_size; int n; @@ -3360,15 +3364,20 @@ struct mlx5_translated_shared_action { } memcpy(translated, actions, actions_size); for (shared_end = shared + copied_n; shared < shared_end; shared++) { - const struct rte_flow_shared_action *shared_action; - - shared_action = shared->action; - switch (shared_action->type) { - case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS: + struct mlx5_shared_action_rss *shared_rss; + uint32_t act_idx = (uint32_t)(uintptr_t)shared->action; + uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) + - 1); + + switch (type) { + case MLX5_SHARED_ACTION_TYPE_RSS: + shared_rss = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); translated[shared->index].type = RTE_FLOW_ACTION_TYPE_RSS; translated[shared->index].conf = - &shared_action->rss.origin; + &shared_rss->origin; break; default: mlx5_free(translated); @@ -3384,44 +3393,44 @@ struct mlx5_translated_shared_action { /** * Get Shared RSS action from the action list. * + * @param[in] dev + * Pointer to Ethernet device. * @param[in] shared * Pointer to the list of actions. * @param[in] shared_n * Actions list length. * * @return - * Pointer to the MLX5 RSS action if exists, otherwise return NULL. + * The MLX5 RSS action ID if exists, otherwise return 0. */ -static struct mlx5_shared_action_rss * -flow_get_shared_rss_action(struct mlx5_translated_shared_action *shared, +static uint32_t +flow_get_shared_rss_action(struct rte_eth_dev *dev, + struct mlx5_translated_shared_action *shared, int shared_n) { struct mlx5_translated_shared_action *shared_end; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss; - for (shared_end = shared + shared_n; shared < shared_end; shared++) { - struct rte_flow_shared_action *shared_action; - shared_action = shared->action; - switch (shared_action->type) { - case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS: - __atomic_add_fetch(&shared_action->refcnt, 1, + for (shared_end = shared + shared_n; shared < shared_end; shared++) { + uint32_t act_idx = (uint32_t)(uintptr_t)shared->action; + uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & + ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1); + switch (type) { + case MLX5_SHARED_ACTION_TYPE_RSS: + shared_rss = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + idx); + __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED); - return &shared_action->rss; + return idx; default: break; } } - return NULL; -} - -struct rte_flow_shared_action * -mlx5_flow_get_shared_rss(struct rte_flow *flow) -{ - if (flow->shared_rss) - return container_of(flow->shared_rss, - struct rte_flow_shared_action, rss); - else - return NULL; + return 0; } static unsigned int @@ -5538,7 +5547,7 @@ struct tunnel_default_miss_ctx { MLX5_ASSERT(wks); rss_desc = &wks->rss_desc[fidx]; - ret = flow_shared_actions_translate(original_actions, + ret = flow_shared_actions_translate(dev, original_actions, shared_actions, &shared_actions_n, &translated_actions, error); @@ -5599,7 +5608,7 @@ struct tunnel_default_miss_ctx { buf->entries = 1; buf->entry[0].pattern = (void *)(uintptr_t)items; } - flow->shared_rss = flow_get_shared_rss_action(shared_actions, + flow->shared_rss = flow_get_shared_rss_action(dev, shared_actions, shared_actions_n); /* * Record the start index when there is a nested call. All sub-flows @@ -5787,7 +5796,7 @@ struct rte_flow * int shared_actions_n = MLX5_MAX_SHARED_ACTIONS; const struct rte_flow_action *actions; struct rte_flow_action *translated_actions = NULL; - int ret = flow_shared_actions_translate(original_actions, + int ret = flow_shared_actions_translate(dev, original_actions, shared_actions, &shared_actions_n, &translated_actions, error); @@ -7856,25 +7865,11 @@ struct mlx5_meter_domains_infos * flow_get_drv_ops(flow_get_drv_type(dev, &attr)); int ret; - switch (shared_action->type) { - case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS: - if (action->type != RTE_FLOW_ACTION_TYPE_RSS) { - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "update action type invalid"); - } - ret = flow_drv_action_validate(dev, NULL, action, fops, error); - if (ret) - return ret; - return flow_drv_action_update(dev, shared_action, action->conf, - fops, error); - default: - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "action type not supported"); - } + ret = flow_drv_action_validate(dev, NULL, action, fops, error); + if (ret) + return ret; + return flow_drv_action_update(dev, shared_action, action->conf, fops, + error); } /** @@ -7906,17 +7901,10 @@ struct mlx5_meter_domains_infos * struct rte_flow_error *error) { (void)dev; - switch (action->type) { - case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS: - __atomic_load(&action->refcnt, (uint32_t *)data, - __ATOMIC_RELAXED); - return 0; - default: - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "action type not supported"); - } + (void)action; + (void)data; + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "action type query not supported"); } /** @@ -7933,12 +7921,14 @@ struct mlx5_meter_domains_infos * { struct rte_flow_error error; struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow_shared_action *action; + struct mlx5_shared_action_rss *action; int ret = 0; + uint32_t idx; - while (!LIST_EMPTY(&priv->shared_actions)) { - action = LIST_FIRST(&priv->shared_actions); - ret = mlx5_shared_action_destroy(dev, action, &error); + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + priv->rss_shared_actions, idx, action, next) { + ret |= mlx5_shared_action_destroy(dev, + (struct rte_flow_shared_action *)(uintptr_t)idx, &error); } return ret; } diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 1b4a9d1..742971c 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -35,10 +35,15 @@ enum mlx5_rte_flow_action_type { MLX5_RTE_FLOW_ACTION_TYPE_MARK, MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS, - MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS, MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET, }; +#define MLX5_SHARED_ACTION_TYPE_OFFSET 30 + +enum { + MLX5_SHARED_ACTION_TYPE_RSS, +}; + /* Matches on selected register. */ struct mlx5_rte_flow_item_tag { enum modify_reg id; @@ -1024,7 +1029,7 @@ struct tunnel_tbl_entry { /* Flow structure. */ struct rte_flow { ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */ - struct mlx5_shared_action_rss *shared_rss; /** < Shred RSS action. */ + uint32_t shared_rss; /** < Shared RSS action ID. */ uint32_t dev_handles; /**< Device flow handles that are part of the flow. */ uint32_t drv_type:2; /**< Driver type. */ @@ -1069,10 +1074,10 @@ struct rte_flow { MLX5_RSS_HASH_NONE, }; -#define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields) - /* Shared RSS action structure */ struct mlx5_shared_action_rss { + ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */ + uint32_t refcnt; /**< Atomically accessed refcnt. */ struct rte_flow_action_rss origin; /**< Original rte RSS action. */ uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ uint16_t *queue; /**< Queue indices to use. */ @@ -1083,15 +1088,7 @@ struct mlx5_shared_action_rss { }; struct rte_flow_shared_action { - LIST_ENTRY(rte_flow_shared_action) next; - /**< Pointer to the next element. */ - uint32_t refcnt; /**< Atomically accessed refcnt. */ - uint64_t type; - /**< Shared action type (see MLX5_FLOW_ACTION_SHARED_*). */ - union { - struct mlx5_shared_action_rss rss; - /**< Shared RSS action. */ - }; + uint32_t id; }; /* Thread specific flow workspace intermediate data. */ @@ -1391,7 +1388,6 @@ int mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, int mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error); int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev); -struct rte_flow_shared_action *mlx5_flow_get_shared_rss(struct rte_flow *flow); int mlx5_shared_action_flush(struct rte_eth_dev *dev); void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id); int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh); diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index dcc397d..05f5871 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -10509,8 +10509,10 @@ struct mlx5_cache_entry * * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields) * and tunnel. * - * @param[in] action - * Shred RSS action holding hash RX queue objects. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] idx + * Shared RSS action ID holding hash RX queue objects. * @param[in] hash_fields * Defines combination of packet fields to participate in RX hash. * @param[in] tunnel @@ -10520,11 +10522,15 @@ struct mlx5_cache_entry * * Valid hash RX queue index, otherwise 0. */ static uint32_t -__flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action, +__flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx, const uint64_t hash_fields, const int tunnel) { - const uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); + const uint32_t *hrxqs = tunnel ? shared_rss->hrxq : + shared_rss->hrxq_tunnel; switch (hash_fields & ~IBV_RX_HASH_INNER) { case MLX5_RSS_HASH_IPV4: @@ -10551,6 +10557,8 @@ struct mlx5_cache_entry * * If shared action configured for *flow* suitable hash RX queue will be * retrieved from attached shared action. * + * @param[in] dev + * Pointer to the Ethernet device structure. * @param[in] flow * Shred RSS action holding hash RX queue objects. * @param[in] dev_flow @@ -10572,7 +10580,7 @@ struct mlx5_cache_entry * if (flow->shared_rss) { hrxq_idx = __flow_dv_action_rss_hrxq_lookup - (flow->shared_rss, dev_flow->hash_fields, + (dev, flow->shared_rss, dev_flow->hash_fields, !!(dev_flow->handle->layers & MLX5_FLOW_LAYER_TUNNEL)); if (hrxq_idx) { @@ -11101,16 +11109,19 @@ struct mlx5_cache_entry * static void flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct rte_flow_shared_action *shared; struct mlx5_flow_handle *dev_handle; struct mlx5_priv *priv = dev->data->dev_private; if (!flow) return; flow_dv_remove(dev, flow); - shared = mlx5_flow_get_shared_rss(flow); - if (shared) - __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED); + if (flow->shared_rss) { + struct mlx5_shared_action_rss *shared_rss = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + flow->shared_rss); + + __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED); + } if (flow->counter) { flow_dv_counter_free(dev, flow->counter); flow->counter = 0; @@ -11281,56 +11292,69 @@ struct mlx5_cache_entry * * error only. * * @return - * A valid shared action handle in case of success, NULL otherwise and + * A valid shared action ID in case of success, 0 otherwise and * rte_errno is set. */ -static struct rte_flow_shared_action * +static uint32_t __flow_dv_action_rss_create(struct rte_eth_dev *dev, const struct rte_flow_shared_action_conf *conf, const struct rte_flow_action_rss *rss, struct rte_flow_error *error) { - struct rte_flow_shared_action *shared_action = NULL; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_action = NULL; void *queue = NULL; - struct mlx5_shared_action_rss *shared_rss; struct rte_flow_action_rss *origin; const uint8_t *rss_key; uint32_t queue_size = rss->queue_num * sizeof(uint16_t); + uint32_t idx; RTE_SET_USED(conf); queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)), 0, SOCKET_ID_ANY); - shared_action = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*shared_action), 0, - SOCKET_ID_ANY); + shared_action = mlx5_ipool_zmalloc + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx); if (!shared_action || !queue) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); goto error_rss_init; } - shared_rss = &shared_action->rss; - shared_rss->queue = queue; - origin = &shared_rss->origin; + if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) { + rte_flow_error_set(error, E2BIG, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "rss action number out of range"); + goto error_rss_init; + } + shared_action->queue = queue; + origin = &shared_action->origin; origin->func = rss->func; origin->level = rss->level; /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ origin->types = !rss->types ? ETH_RSS_IP : rss->types; /* NULL RSS key indicates default RSS key. */ rss_key = !rss->key ? rss_hash_default_key : rss->key; - memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN); - origin->key = &shared_rss->key[0]; + memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN); + origin->key = &shared_action->key[0]; origin->key_len = MLX5_RSS_HASH_KEY_LEN; - memcpy(shared_rss->queue, rss->queue, queue_size); - origin->queue = shared_rss->queue; + memcpy(shared_action->queue, rss->queue, queue_size); + origin->queue = shared_action->queue; origin->queue_num = rss->queue_num; - if (__flow_dv_action_rss_setup(dev, shared_rss, error)) + if (__flow_dv_action_rss_setup(dev, shared_action, error)) goto error_rss_init; - shared_action->type = MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS; - return shared_action; + __atomic_add_fetch(&shared_action->refcnt, 2, __ATOMIC_RELAXED); + rte_spinlock_lock(&priv->shared_act_sl); + ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + &priv->rss_shared_actions, idx, shared_action, next); + rte_spinlock_unlock(&priv->shared_act_sl); + return idx; error_rss_init: - mlx5_free(shared_action); - mlx5_free(queue); - return NULL; + if (shared_action) + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + idx); + if (queue) + mlx5_free(queue); + return 0; } /** @@ -11339,8 +11363,8 @@ struct mlx5_cache_entry * * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] shared_rss - * The shared RSS action object to be removed. + * @param[in] idx + * The shared RSS action object ID to be removed. * @param[out] error * Perform verbose error reporting if not NULL. Initialized in case of * error only. @@ -11349,31 +11373,39 @@ struct mlx5_cache_entry * * 0 on success, otherwise negative errno value. */ static int -__flow_dv_action_rss_release(struct rte_eth_dev *dev, - struct mlx5_shared_action_rss *shared_rss, - struct rte_flow_error *error) +__flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx, + struct rte_flow_error *error) { - struct rte_flow_shared_action *shared_action = NULL; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); uint32_t old_refcnt = 1; - int remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss); + int remaining; - if (remaining) { + if (!shared_rss) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "invalid shared action"); + remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss); + if (remaining) return rte_flow_error_set(error, ETOOMANYREFS, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "shared rss hrxq has references"); - } - shared_action = container_of(shared_rss, - struct rte_flow_shared_action, rss); - if (!__atomic_compare_exchange_n(&shared_action->refcnt, &old_refcnt, - 0, 0, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { + if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt, + 0, 0, __ATOMIC_ACQUIRE, + __ATOMIC_RELAXED)) return rte_flow_error_set(error, ETOOMANYREFS, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "shared rss has references"); - } rte_free(shared_rss->queue); + rte_spinlock_lock(&priv->shared_act_sl); + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + &priv->rss_shared_actions, idx, shared_rss, next); + rte_spinlock_unlock(&priv->shared_act_sl); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + idx); return 0; } @@ -11400,30 +11432,23 @@ struct mlx5_cache_entry * flow_dv_action_create(struct rte_eth_dev *dev, const struct rte_flow_shared_action_conf *conf, const struct rte_flow_action *action, - struct rte_flow_error *error) + struct rte_flow_error *err) { - struct rte_flow_shared_action *shared_action = NULL; - struct mlx5_priv *priv = dev->data->dev_private; + uint32_t idx = 0; + uint32_t ret = 0; switch (action->type) { case RTE_FLOW_ACTION_TYPE_RSS: - shared_action = __flow_dv_action_rss_create(dev, conf, - action->conf, - error); + ret = __flow_dv_action_rss_create(dev, conf, action->conf, err); + idx = (MLX5_SHARED_ACTION_TYPE_RSS << + MLX5_SHARED_ACTION_TYPE_OFFSET) | ret; break; default: - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "action type not supported"); break; } - if (shared_action) { - __atomic_add_fetch(&shared_action->refcnt, 1, - __ATOMIC_RELAXED); - rte_spinlock_lock(&priv->shared_act_sl); - LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next); - rte_spinlock_unlock(&priv->shared_act_sl); - } - return shared_action; + return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL; } /** @@ -11448,12 +11473,14 @@ struct mlx5_cache_entry * struct rte_flow_shared_action *action, struct rte_flow_error *error) { - struct mlx5_priv *priv = dev->data->dev_private; + uint32_t act_idx = (uint32_t)(uintptr_t)action; + uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1); int ret; - switch (action->type) { - case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS: - ret = __flow_dv_action_rss_release(dev, &action->rss, error); + switch (type) { + case MLX5_SHARED_ACTION_TYPE_RSS: + ret = __flow_dv_action_rss_release(dev, idx, error); break; default: return rte_flow_error_set(error, ENOTSUP, @@ -11463,10 +11490,6 @@ struct mlx5_cache_entry * } if (ret) return ret; - rte_spinlock_lock(&priv->shared_act_sl); - LIST_REMOVE(action, next); - rte_spinlock_unlock(&priv->shared_act_sl); - rte_free(action); return 0; } @@ -11475,8 +11498,8 @@ struct mlx5_cache_entry * * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] shared_rss - * The shared RSS action object to be updated. + * @param[in] idx + * The shared RSS action object ID to be updated. * @param[in] action_conf * RSS action specification used to modify *shared_rss*. * @param[out] error @@ -11488,11 +11511,13 @@ struct mlx5_cache_entry * * @note: currently only support update of RSS queues. */ static int -__flow_dv_action_rss_update(struct rte_eth_dev *dev, - struct mlx5_shared_action_rss *shared_rss, +__flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, const struct rte_flow_action_rss *action_conf, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); size_t i; int ret; void *queue = NULL; @@ -11500,6 +11525,10 @@ struct mlx5_cache_entry * uint32_t rss_key_len; uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t); + if (!shared_rss) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "invalid shared action to update"); queue = mlx5_malloc(MLX5_MEM_ZERO, RTE_ALIGN_CEIL(queue_size, sizeof(void *)), 0, SOCKET_ID_ANY); @@ -11522,7 +11551,7 @@ struct mlx5_cache_entry * for (tunnel = 0; tunnel < 2; tunnel++) { hrxq_idx = __flow_dv_action_rss_hrxq_lookup - (shared_rss, hash_fields, tunnel); + (dev, idx, hash_fields, tunnel); MLX5_ASSERT(hrxq_idx); ret = mlx5_hrxq_modify (dev, hrxq_idx, @@ -11569,14 +11598,17 @@ struct mlx5_cache_entry * flow_dv_action_update(struct rte_eth_dev *dev, struct rte_flow_shared_action *action, const void *action_conf, - struct rte_flow_error *error) + struct rte_flow_error *err) { - switch (action->type) { - case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS: - return __flow_dv_action_rss_update(dev, &action->rss, - action_conf, error); + uint32_t act_idx = (uint32_t)(uintptr_t)action; + uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1); + + switch (type) { + case MLX5_SHARED_ACTION_TYPE_RSS: + return __flow_dv_action_rss_update(dev, idx, action_conf, err); default: - return rte_flow_error_set(error, ENOTSUP, + return rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "action type not supported"); From patchwork Thu Oct 29 21:58:01 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 82846 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id A3754A04B5; Thu, 29 Oct 2020 23:00:12 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2BEEFCB7F; Thu, 29 Oct 2020 22:58:22 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id AFB9ECAC5 for ; Thu, 29 Oct 2020 22:58:11 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@nvidia.com) with SMTP; 29 Oct 2020 23:58:05 +0200 Received: from nvidia.com (pegasus25.mtr.labs.mlnx [10.210.16.10]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09TLw4Tv022832; Thu, 29 Oct 2020 23:58:05 +0200 From: Matan Azrad To: Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Thu, 29 Oct 2020 21:58:01 +0000 Message-Id: <1604008681-414157-9-git-send-email-matan@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1604008681-414157-1-git-send-email-matan@nvidia.com> References: <1604008681-414157-1-git-send-email-matan@nvidia.com> Subject: [dpdk-dev] [PATCH 8/8] net/mlx5: support shared age action X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for rte_flow shared action API for age action. First step here to support validate, create, query and destroy. The support is only for age ASO mode. Signed-off-by: Matan Azrad --- drivers/net/mlx5/mlx5.h | 1 + drivers/net/mlx5/mlx5_flow.c | 61 +++++++++++++++-- drivers/net/mlx5/mlx5_flow.h | 11 ++++ drivers/net/mlx5/mlx5_flow_dv.c | 140 ++++++++++++++++++++++++++++------------ 4 files changed, 165 insertions(+), 48 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 9b1e5d5..da994db 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -524,6 +524,7 @@ struct mlx5_aso_sq { struct mlx5_aso_age_action { LIST_ENTRY(mlx5_aso_age_action) next; void *dr_action; + uint32_t refcnt; /* Following fields relevant only when action is active. */ uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */ struct mlx5_age_param age_params; diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 29e67f4..d62d8ff 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -3292,6 +3292,29 @@ struct mlx5_flow_tunnel_info { return NULL; } +/** + * Get ASO age action by index. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] age_idx + * Index to the ASO age action. + * + * @return + * The specified ASO age action. + */ +struct mlx5_aso_age_action* +flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx) +{ + uint16_t pool_idx = age_idx & UINT16_MAX; + uint16_t offset = (age_idx >> 16) & UINT16_MAX; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + struct mlx5_aso_age_pool *pool = mng->pools[pool_idx]; + + return &pool->actions[offset - 1]; +} + /* maps shared action to translated non shared in some actions array */ struct mlx5_translated_shared_action { struct rte_flow_shared_action *action; /**< Shared action */ @@ -3379,6 +3402,15 @@ struct mlx5_translated_shared_action { translated[shared->index].conf = &shared_rss->origin; break; + case MLX5_SHARED_ACTION_TYPE_AGE: + if (priv->sh->flow_hit_aso_en) { + translated[shared->index].type = + MLX5_RTE_FLOW_ACTION_TYPE_AGE; + translated[shared->index].conf = + (void *)(uintptr_t)idx; + break; + } + /* Fall-through */ default: mlx5_free(translated); return rte_flow_error_set @@ -7798,6 +7830,25 @@ struct mlx5_meter_domains_infos * return fops->action_update(dev, action, action_conf, error); } +/* Wrapper for driver action_destroy op callback */ +static int +flow_drv_action_query(struct rte_eth_dev *dev, + const struct rte_flow_shared_action *action, + void *data, + const struct mlx5_flow_driver_ops *fops, + struct rte_flow_error *error) +{ + static const char err_msg[] = "shared action query unsupported"; + + if (!fops->action_query) { + DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, err_msg); + return -rte_errno; + } + return fops->action_query(dev, action, data, error); +} + /** * Create shared action for reuse in multiple flow rules. * @@ -7900,11 +7951,11 @@ struct mlx5_meter_domains_infos * void *data, struct rte_flow_error *error) { - (void)dev; - (void)action; - (void)data; - return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, - NULL, "action type query not supported"); + struct rte_flow_attr attr = { .transfer = 0 }; + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(flow_get_drv_type(dev, &attr)); + + return flow_drv_action_query(dev, action, data, fops, error); } /** diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 742971c..adb293c 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -36,12 +36,14 @@ enum mlx5_rte_flow_action_type { MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS, MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET, + MLX5_RTE_FLOW_ACTION_TYPE_AGE, }; #define MLX5_SHARED_ACTION_TYPE_OFFSET 30 enum { MLX5_SHARED_ACTION_TYPE_RSS, + MLX5_SHARED_ACTION_TYPE_AGE, }; /* Matches on selected register. */ @@ -1173,6 +1175,12 @@ typedef int (*mlx5_flow_action_update_t) struct rte_flow_shared_action *action, const void *action_conf, struct rte_flow_error *error); +typedef int (*mlx5_flow_action_query_t) + (struct rte_eth_dev *dev, + const struct rte_flow_shared_action *action, + void *data, + struct rte_flow_error *error); + typedef int (*mlx5_flow_sync_domain_t) (struct rte_eth_dev *dev, uint32_t domains, @@ -1197,6 +1205,7 @@ struct mlx5_flow_driver_ops { mlx5_flow_action_create_t action_create; mlx5_flow_action_destroy_t action_destroy; mlx5_flow_action_update_t action_update; + mlx5_flow_action_query_t action_query; mlx5_flow_sync_domain_t sync_domain; }; @@ -1465,4 +1474,6 @@ struct mlx5_cache_entry *flow_dv_dest_array_create_cb struct mlx5_cache_entry *entry, void *cb_ctx); void flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list, struct mlx5_cache_entry *entry); +struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev, + uint32_t age_idx); #endif /* RTE_PMD_MLX5_FLOW_H_ */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 05f5871..b587b3f 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -5926,6 +5926,10 @@ struct mlx5_hlist_entry * /* Meter action will add one more TAG action. */ rw_act_num += MLX5_ACT_NUM_SET_TAG; break; + case MLX5_RTE_FLOW_ACTION_TYPE_AGE: + action_flags |= MLX5_FLOW_ACTION_AGE; + ++actions_n; + break; case RTE_FLOW_ACTION_TYPE_AGE: ret = flow_dv_validate_action_age(action_flags, actions, dev, @@ -9241,29 +9245,6 @@ struct mlx5_cache_entry * } /** - * Get ASO age action by index. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] age_idx - * Index to the ASO age action. - * - * @return - * The specified ASO age action. - */ -static struct mlx5_aso_age_action* -flow_dv_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx) -{ - uint16_t pool_idx = age_idx & UINT16_MAX; - uint16_t offset = (age_idx >> 16) & UINT16_MAX; - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; - struct mlx5_aso_age_pool *pool = mng->pools[pool_idx]; - - return &pool->actions[offset - 1]; -} - -/** * Remove a flow counter from aged counter list. * * @param[in] dev @@ -9295,18 +9276,22 @@ struct mlx5_cache_entry * } } -static void +/* Return 0 when age action was removed, otherwise the number of references. */ +static int flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; - struct mlx5_aso_age_action *age = flow_dv_aso_age_get_by_idx(dev, - age_idx); + struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx); + uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED); - flow_dv_aso_age_remove_from_age(dev, age); - rte_spinlock_lock(&mng->free_sl); - LIST_INSERT_HEAD(&mng->free, age, next); - rte_spinlock_unlock(&mng->free_sl); + if (!ret) { + flow_dv_aso_age_remove_from_age(dev, age); + rte_spinlock_lock(&mng->free_sl); + LIST_INSERT_HEAD(&mng->free, age, next); + rte_spinlock_unlock(&mng->free_sl); + } + return ret; } /** @@ -9445,6 +9430,7 @@ struct mlx5_cache_entry * if (!age_free->dr_action) goto err; } + __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED); return pool->index | ((age_free->offset + 1) << 16); err: if (age_free) { @@ -9471,12 +9457,12 @@ struct mlx5_cache_entry * const struct rte_flow_action_age *age) { uint32_t age_idx = 0; - struct mlx5_aso_age_action *aso_age = NULL; + struct mlx5_aso_age_action *aso_age; age_idx = flow_dv_aso_age_alloc(dev); if (!age_idx) return 0; - aso_age = flow_dv_aso_age_get_by_idx(dev, age_idx); + aso_age = flow_aso_age_get_by_idx(dev, age_idx); aso_age->age_params.context = age->context; aso_age->age_params.timeout = age->timeout; aso_age->age_params.port_id = dev->data->port_id; @@ -9640,6 +9626,7 @@ struct mlx5_cache_entry * const uint8_t *rss_key; const struct rte_flow_action_meter *mtr; struct mlx5_flow_tbl_resource *tbl; + struct mlx5_aso_age_action *age_act; uint32_t port_id = 0; struct mlx5_flow_dv_port_id_action_resource port_id_resource; int action_type = actions->type; @@ -9776,6 +9763,14 @@ struct mlx5_cache_entry * action_flags |= MLX5_FLOW_ACTION_RSS; dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; break; + case MLX5_RTE_FLOW_ACTION_TYPE_AGE: + flow->age = (uint32_t)(uintptr_t)(action->conf); + age_act = flow_aso_age_get_by_idx(dev, flow->age); + __atomic_fetch_add(&age_act->refcnt, 1, + __ATOMIC_RELAXED); + dev_flow->dv.actions[actions_n++] = age_act->dr_action; + action_flags |= MLX5_FLOW_ACTION_AGE; + break; case RTE_FLOW_ACTION_TYPE_AGE: if (priv->sh->flow_hit_aso_en) { flow->age = flow_dv_translate_create_aso_age @@ -9787,7 +9782,7 @@ struct mlx5_cache_entry * NULL, "can't create age action"); dev_flow->dv.actions[actions_n++] = - (flow_dv_aso_age_get_by_idx + (flow_aso_age_get_by_idx (dev, flow->age))->dr_action; action_flags |= MLX5_FLOW_ACTION_AGE; break; @@ -11443,6 +11438,19 @@ struct mlx5_cache_entry * idx = (MLX5_SHARED_ACTION_TYPE_RSS << MLX5_SHARED_ACTION_TYPE_OFFSET) | ret; break; + case RTE_FLOW_ACTION_TYPE_AGE: + ret = flow_dv_translate_create_aso_age(dev, action->conf); + idx = (MLX5_SHARED_ACTION_TYPE_AGE << + MLX5_SHARED_ACTION_TYPE_OFFSET) | ret; + if (ret) { + struct mlx5_aso_age_action *aso_age = + flow_aso_age_get_by_idx(dev, ret); + + if (!aso_age->age_params.context) + aso_age->age_params.context = + (void *)(uintptr_t)idx; + } + break; default: rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "action type not supported"); @@ -11480,17 +11488,21 @@ struct mlx5_cache_entry * switch (type) { case MLX5_SHARED_ACTION_TYPE_RSS: - ret = __flow_dv_action_rss_release(dev, idx, error); - break; + return __flow_dv_action_rss_release(dev, idx, error); + case MLX5_SHARED_ACTION_TYPE_AGE: + ret = flow_dv_aso_age_release(dev, idx); + if (ret) + return rte_flow_error_set(error, ETOOMANYREFS, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "shared age has references"); + return ret; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "action type not supported"); } - if (ret) - return ret; - return 0; } /** @@ -11611,9 +11623,41 @@ struct mlx5_cache_entry * return rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "action type not supported"); + "action type update not supported"); } } + +static int +flow_dv_action_query(struct rte_eth_dev *dev, + const struct rte_flow_shared_action *action, void *data, + struct rte_flow_error *error) +{ + struct mlx5_age_param *age_param; + struct rte_flow_query_age *resp; + uint32_t act_idx = (uint32_t)(uintptr_t)action; + uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1); + + switch (type) { + case MLX5_SHARED_ACTION_TYPE_AGE: + age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params; + resp = data; + resp->aged = __atomic_load_n(&age_param->state, + __ATOMIC_RELAXED) == AGE_TMOUT ? + 1 : 0; + resp->sec_since_last_hit_valid = !resp->aged; + if (resp->sec_since_last_hit_valid) + resp->sec_since_last_hit = __atomic_load_n + (&age_param->sec_since_last_hit, __ATOMIC_RELAXED); + return 0; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "action type query not supported"); + } +} + /** * Query a dv flow rule for its statistics via devx. * @@ -11694,7 +11738,7 @@ struct mlx5_cache_entry * if (flow->age) { struct mlx5_aso_age_action *act = - flow_dv_aso_age_get_by_idx(dev, flow->age); + flow_aso_age_get_by_idx(dev, flow->age); age_param = &act->age_params; } else if (flow->counter) { @@ -12404,14 +12448,23 @@ struct mlx5_cache_entry * flow_dv_action_validate(struct rte_eth_dev *dev, const struct rte_flow_shared_action_conf *conf, const struct rte_flow_action *action, - struct rte_flow_error *error) + struct rte_flow_error *err) { + struct mlx5_priv *priv = dev->data->dev_private; + RTE_SET_USED(conf); switch (action->type) { case RTE_FLOW_ACTION_TYPE_RSS: - return mlx5_validate_action_rss(dev, action, error); + return mlx5_validate_action_rss(dev, action, err); + case RTE_FLOW_ACTION_TYPE_AGE: + if (!priv->sh->aso_age_mng) + return rte_flow_error_set(err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "shared age action not supported"); + return flow_dv_validate_action_age(0, action, dev, err); default: - return rte_flow_error_set(error, ENOTSUP, + return rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "action type not supported"); @@ -12463,6 +12516,7 @@ struct mlx5_cache_entry * .action_create = flow_dv_action_create, .action_destroy = flow_dv_action_destroy, .action_update = flow_dv_action_update, + .action_query = flow_dv_action_query, .sync_domain = flow_dv_sync_domain, };