diff mbox series

net/mlx5: fix indexed pools allocate on Windows

Message ID 20210721083448.14598-1-suanmingm@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers show
Series net/mlx5: fix indexed pools allocate on Windows | expand

Checks

Context Check Description
ci/iol-testing success Testing PASS
ci/iol-abi-testing warning Testing issues
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/intel-Testing success Testing PASS
ci/Intel-compilation success Compilation OK
ci/github-robot success github build: passed
ci/checkpatch warning coding style issues

Commit Message

Suanming Mou July 21, 2021, 8:34 a.m. UTC
Currently, the flow indexed pools are allocated per port, the allocation
was missing in Windows code.

This commit fixes the the Windows flow indexed pools are not allocated
issue.

Fixes: b4edeaf3efd5 ("net/mlx5: replace flow list with indexed pool")

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
---
 drivers/net/mlx5/windows/mlx5_os.c | 47 ++++++++++++++++++++++++++++++
 1 file changed, 47 insertions(+)

Comments

Tal Shnaiderman July 21, 2021, 8:40 a.m. UTC | #1
> Subject: [PATCH] net/mlx5: fix indexed pools allocate on Windows
> 
> Currently, the flow indexed pools are allocated per port, the allocation was
> missing in Windows code.
> 
> This commit fixes the the Windows flow indexed pools are not allocated issue.
> 
> Fixes: b4edeaf3efd5 ("net/mlx5: replace flow list with indexed pool")
> 
> Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
> ---
>  drivers/net/mlx5/windows/mlx5_os.c | 47
> ++++++++++++++++++++++++++++++
>  1 file changed, 47 insertions(+)
> 
> diff --git a/drivers/net/mlx5/windows/mlx5_os.c
> b/drivers/net/mlx5/windows/mlx5_os.c
> index 5da362a9d5..a31fafc90d 100644
> --- a/drivers/net/mlx5/windows/mlx5_os.c
> +++ b/drivers/net/mlx5/windows/mlx5_os.c
> @@ -35,6 +35,44 @@ static const char *MZ_MLX5_PMD_SHARED_DATA =
> "mlx5_pmd_shared_data";
>  /* Spinlock for mlx5_shared_data allocation. */  static rte_spinlock_t
> mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
> 
> +/* rte flow indexed pool configuration. */ static struct
> +mlx5_indexed_pool_config icfg[] = {
> +	{
> +		.size = sizeof(struct rte_flow),
> +		.trunk_size = 64,
> +		.need_lock = 1,
> +		.release_mem_en = 0,
> +		.malloc = mlx5_malloc,
> +		.free = mlx5_free,
> +		.per_core_cache = 0,
> +		.type = "ctl_flow_ipool",
> +	},
> +	{
> +		.size = sizeof(struct rte_flow),
> +		.trunk_size = 64,
> +		.grow_trunk = 3,
> +		.grow_shift = 2,
> +		.need_lock = 1,
> +		.release_mem_en = 0,
> +		.malloc = mlx5_malloc,
> +		.free = mlx5_free,
> +		.per_core_cache = 1 << 14,
> +		.type = "rte_flow_ipool",
> +	},
> +	{
> +		.size = sizeof(struct rte_flow),
> +		.trunk_size = 64,
> +		.grow_trunk = 3,
> +		.grow_shift = 2,
> +		.need_lock = 1,
> +		.release_mem_en = 0,
> +		.malloc = mlx5_malloc,
> +		.free = mlx5_free,
> +		.per_core_cache = 0,
> +		.type = "mcp_flow_ipool",
> +	},
> +};
> +
>  /**
>   * Initialize shared data between primary and secondary process.
>   *
> @@ -317,6 +355,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
>  	char name[RTE_ETH_NAME_MAX_LEN];
>  	int own_domain_id = 0;
>  	uint16_t port_id;
> +	int i;
> 
>  	/* Build device name. */
>  	strlcpy(name, dpdk_dev->name, sizeof(name)); @@ -584,6 +623,14
> @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
>  	mlx5_set_min_inline(spawn, config);
>  	/* Store device configuration on private structure. */
>  	priv->config = *config;
> +	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
> +		icfg[i].release_mem_en = !!config->reclaim_mode;
> +		if (config->reclaim_mode)
> +			icfg[i].per_core_cache = 0;
> +		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
> +		if (!priv->flows[i])
> +			goto error;
> +	}
>  	/* Create context for virtual machine VLAN workaround. */
>  	priv->vmwa_context = NULL;
>  	if (config->dv_flow_en) {
> --
> 2.25.1

Acked-by: Tal Shnaiderman <talshn@nvidia.com>
Odi Assli July 21, 2021, 8:42 a.m. UTC | #2
> -----Original Message-----
> From: Tal Shnaiderman <talshn@nvidia.com>
> Sent: Wednesday, July 21, 2021 11:40 AM
> To: Suanming Mou <suanmingm@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Matan Azrad <matan@nvidia.com>; Odi Assli
> <odia@nvidia.com>
> Cc: Raslan Darawsheh <rasland@nvidia.com>; dev@dpdk.org
> Subject: RE: [PATCH] net/mlx5: fix indexed pools allocate on Windows
> 
> > Subject: [PATCH] net/mlx5: fix indexed pools allocate on Windows
> >
> > Currently, the flow indexed pools are allocated per port, the
> > allocation was missing in Windows code.
> >
> > This commit fixes the the Windows flow indexed pools are not allocated
> issue.
> >
> > Fixes: b4edeaf3efd5 ("net/mlx5: replace flow list with indexed pool")
> >
> > Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
> > ---
> >  drivers/net/mlx5/windows/mlx5_os.c | 47
> > ++++++++++++++++++++++++++++++
> >  1 file changed, 47 insertions(+)
> >
> > diff --git a/drivers/net/mlx5/windows/mlx5_os.c
> > b/drivers/net/mlx5/windows/mlx5_os.c
> > index 5da362a9d5..a31fafc90d 100644
> > --- a/drivers/net/mlx5/windows/mlx5_os.c
> > +++ b/drivers/net/mlx5/windows/mlx5_os.c
> > @@ -35,6 +35,44 @@ static const char *MZ_MLX5_PMD_SHARED_DATA =
> > "mlx5_pmd_shared_data";
> >  /* Spinlock for mlx5_shared_data allocation. */  static
> > rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
> >
> > +/* rte flow indexed pool configuration. */ static struct
> > +mlx5_indexed_pool_config icfg[] = {
> > +	{
> > +		.size = sizeof(struct rte_flow),
> > +		.trunk_size = 64,
> > +		.need_lock = 1,
> > +		.release_mem_en = 0,
> > +		.malloc = mlx5_malloc,
> > +		.free = mlx5_free,
> > +		.per_core_cache = 0,
> > +		.type = "ctl_flow_ipool",
> > +	},
> > +	{
> > +		.size = sizeof(struct rte_flow),
> > +		.trunk_size = 64,
> > +		.grow_trunk = 3,
> > +		.grow_shift = 2,
> > +		.need_lock = 1,
> > +		.release_mem_en = 0,
> > +		.malloc = mlx5_malloc,
> > +		.free = mlx5_free,
> > +		.per_core_cache = 1 << 14,
> > +		.type = "rte_flow_ipool",
> > +	},
> > +	{
> > +		.size = sizeof(struct rte_flow),
> > +		.trunk_size = 64,
> > +		.grow_trunk = 3,
> > +		.grow_shift = 2,
> > +		.need_lock = 1,
> > +		.release_mem_en = 0,
> > +		.malloc = mlx5_malloc,
> > +		.free = mlx5_free,
> > +		.per_core_cache = 0,
> > +		.type = "mcp_flow_ipool",
> > +	},
> > +};
> > +
> >  /**
> >   * Initialize shared data between primary and secondary process.
> >   *
> > @@ -317,6 +355,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
> >  	char name[RTE_ETH_NAME_MAX_LEN];
> >  	int own_domain_id = 0;
> >  	uint16_t port_id;
> > +	int i;
> >
> >  	/* Build device name. */
> >  	strlcpy(name, dpdk_dev->name, sizeof(name)); @@ -584,6 +623,14
> @@
> > mlx5_dev_spawn(struct rte_device *dpdk_dev,
> >  	mlx5_set_min_inline(spawn, config);
> >  	/* Store device configuration on private structure. */
> >  	priv->config = *config;
> > +	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
> > +		icfg[i].release_mem_en = !!config->reclaim_mode;
> > +		if (config->reclaim_mode)
> > +			icfg[i].per_core_cache = 0;
> > +		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
> > +		if (!priv->flows[i])
> > +			goto error;
> > +	}
> >  	/* Create context for virtual machine VLAN workaround. */
> >  	priv->vmwa_context = NULL;
> >  	if (config->dv_flow_en) {
> > --
> > 2.25.1
> 
> Acked-by: Tal Shnaiderman <talshn@nvidia.com>
Tested-by: Odi Assli <odia@nvidia.com>
Matan Azrad July 21, 2021, 8:43 a.m. UTC | #3
Hi

From: Suanming Mou:
> Currently, the flow indexed pools are allocated per port, the allocation was
> missing in Windows code.
> 
> This commit fixes the the Windows flow indexed pools are not allocated

Double "the"

Instead, you can use:
Allocate indexed pool for the Windows case too.

> issue.
> 
> Fixes: b4edeaf3efd5 ("net/mlx5: replace flow list with indexed pool")
> 
> Signed-off-by: Suanming Mou <suanmingm@nvidia.com>

Better title:
net/mlx5/windows: fix indexed pools allocation

Besides,
Acked-by: Matan Azrad <matan@nvidia.com>

> ---
>  drivers/net/mlx5/windows/mlx5_os.c | 47
> ++++++++++++++++++++++++++++++
>  1 file changed, 47 insertions(+)
> 
> diff --git a/drivers/net/mlx5/windows/mlx5_os.c
> b/drivers/net/mlx5/windows/mlx5_os.c
> index 5da362a9d5..a31fafc90d 100644
> --- a/drivers/net/mlx5/windows/mlx5_os.c
> +++ b/drivers/net/mlx5/windows/mlx5_os.c
> @@ -35,6 +35,44 @@ static const char *MZ_MLX5_PMD_SHARED_DATA =
> "mlx5_pmd_shared_data";
>  /* Spinlock for mlx5_shared_data allocation. */  static rte_spinlock_t
> mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
> 
> +/* rte flow indexed pool configuration. */ static struct
> +mlx5_indexed_pool_config icfg[] = {
> +	{
> +		.size = sizeof(struct rte_flow),
> +		.trunk_size = 64,
> +		.need_lock = 1,
> +		.release_mem_en = 0,
> +		.malloc = mlx5_malloc,
> +		.free = mlx5_free,
> +		.per_core_cache = 0,
> +		.type = "ctl_flow_ipool",
> +	},
> +	{
> +		.size = sizeof(struct rte_flow),
> +		.trunk_size = 64,
> +		.grow_trunk = 3,
> +		.grow_shift = 2,
> +		.need_lock = 1,
> +		.release_mem_en = 0,
> +		.malloc = mlx5_malloc,
> +		.free = mlx5_free,
> +		.per_core_cache = 1 << 14,
> +		.type = "rte_flow_ipool",
> +	},
> +	{
> +		.size = sizeof(struct rte_flow),
> +		.trunk_size = 64,
> +		.grow_trunk = 3,
> +		.grow_shift = 2,
> +		.need_lock = 1,
> +		.release_mem_en = 0,
> +		.malloc = mlx5_malloc,
> +		.free = mlx5_free,
> +		.per_core_cache = 0,
> +		.type = "mcp_flow_ipool",
> +	},
> +};
> +
>  /**
>   * Initialize shared data between primary and secondary process.
>   *
> @@ -317,6 +355,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
>  	char name[RTE_ETH_NAME_MAX_LEN];
>  	int own_domain_id = 0;
>  	uint16_t port_id;
> +	int i;
> 
>  	/* Build device name. */
>  	strlcpy(name, dpdk_dev->name, sizeof(name)); @@ -584,6 +623,14
> @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
>  	mlx5_set_min_inline(spawn, config);
>  	/* Store device configuration on private structure. */
>  	priv->config = *config;
> +	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
> +		icfg[i].release_mem_en = !!config->reclaim_mode;
> +		if (config->reclaim_mode)
> +			icfg[i].per_core_cache = 0;
> +		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
> +		if (!priv->flows[i])
> +			goto error;
> +	}
>  	/* Create context for virtual machine VLAN workaround. */
>  	priv->vmwa_context = NULL;
>  	if (config->dv_flow_en) {
> --
> 2.25.1
Thomas Monjalon July 22, 2021, 2:16 p.m. UTC | #4
21/07/2021 10:43, Matan Azrad:
> Better title:
> net/mlx5/windows: fix indexed pools allocation

even better: keep the "on Windows" at the end.
diff mbox series

Patch

diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 5da362a9d5..a31fafc90d 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -35,6 +35,44 @@  static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
 /* Spinlock for mlx5_shared_data allocation. */
 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
 
+/* rte flow indexed pool configuration. */
+static struct mlx5_indexed_pool_config icfg[] = {
+	{
+		.size = sizeof(struct rte_flow),
+		.trunk_size = 64,
+		.need_lock = 1,
+		.release_mem_en = 0,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
+		.per_core_cache = 0,
+		.type = "ctl_flow_ipool",
+	},
+	{
+		.size = sizeof(struct rte_flow),
+		.trunk_size = 64,
+		.grow_trunk = 3,
+		.grow_shift = 2,
+		.need_lock = 1,
+		.release_mem_en = 0,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
+		.per_core_cache = 1 << 14,
+		.type = "rte_flow_ipool",
+	},
+	{
+		.size = sizeof(struct rte_flow),
+		.trunk_size = 64,
+		.grow_trunk = 3,
+		.grow_shift = 2,
+		.need_lock = 1,
+		.release_mem_en = 0,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
+		.per_core_cache = 0,
+		.type = "mcp_flow_ipool",
+	},
+};
+
 /**
  * Initialize shared data between primary and secondary process.
  *
@@ -317,6 +355,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	char name[RTE_ETH_NAME_MAX_LEN];
 	int own_domain_id = 0;
 	uint16_t port_id;
+	int i;
 
 	/* Build device name. */
 	strlcpy(name, dpdk_dev->name, sizeof(name));
@@ -584,6 +623,14 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	mlx5_set_min_inline(spawn, config);
 	/* Store device configuration on private structure. */
 	priv->config = *config;
+	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
+		icfg[i].release_mem_en = !!config->reclaim_mode;
+		if (config->reclaim_mode)
+			icfg[i].per_core_cache = 0;
+		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
+		if (!priv->flows[i])
+			goto error;
+	}
 	/* Create context for virtual machine VLAN workaround. */
 	priv->vmwa_context = NULL;
 	if (config->dv_flow_en) {