examples/ipsec-secgw: increase number of qps to lcore_params

Message ID 1578667598-18942-1-git-send-email-anoobj@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series examples/ipsec-secgw: increase number of qps to lcore_params |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-testing success Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-nxp-Performance success Performance Testing PASS
ci/travis-robot warning Travis build: failed
ci/Intel-compilation success Compilation OK

Commit Message

Anoob Joseph Jan. 10, 2020, 2:46 p.m. UTC
  Currently only one qp will be used for one core. The number of qps can
be increased to match the number of lcore params.

Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
 examples/ipsec-secgw/ipsec-secgw.c | 39 +++++++++++++++++++-------------------
 examples/ipsec-secgw/ipsec.h       |  4 +++-
 2 files changed, 23 insertions(+), 20 deletions(-)
  

Comments

Ananyev, Konstantin Jan. 29, 2020, 10:12 p.m. UTC | #1
> 
> Currently only one qp will be used for one core. The number of qps can
> be increased to match the number of lcore params.

I don't really understand the purpose of that patch....
As I understand, all it does - unconditionally increases number of crypto-queues mapped
to the same lcore.
The question is what for?
All these extra queues woulnd't be used by current poll mode data-path anyway.
Plus in some cases  hash_lookup() would fail to find existing mapping.
I understand that for your eventdev implementation you need one crypto queue
for each eth device.
But I think it could be done in less intrusive way
(see my previous mail as one possible option).
Konstantin  

> 
> Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> ---
>  examples/ipsec-secgw/ipsec-secgw.c | 39 +++++++++++++++++++-------------------
>  examples/ipsec-secgw/ipsec.h       |  4 +++-
>  2 files changed, 23 insertions(+), 20 deletions(-)
> 
> diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
> index 3b5aaf6..d8c435e 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.c
> +++ b/examples/ipsec-secgw/ipsec-secgw.c
> @@ -1709,6 +1709,8 @@ add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
>  	unsigned long i;
>  	struct cdev_key key = { 0 };
> 
> +	key.port_id = params->port_id;
> +	key.queue_id = params->queue_id;
>  	key.lcore_id = params->lcore_id;
>  	if (cipher)
>  		key.cipher_algo = cipher->sym.cipher.algo;
> @@ -1721,23 +1723,17 @@ add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
>  	if (ret != -ENOENT)
>  		return 0;
> 
> -	for (i = 0; i < ipsec_ctx->nb_qps; i++)
> -		if (ipsec_ctx->tbl[i].id == cdev_id)
> -			break;
> -
> -	if (i == ipsec_ctx->nb_qps) {
> -		if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
> -			printf("Maximum number of crypto devices assigned to "
> -				"a core, increase MAX_QP_PER_LCORE value\n");
> -			return 0;
> -		}
> -		ipsec_ctx->tbl[i].id = cdev_id;
> -		ipsec_ctx->tbl[i].qp = qp;
> -		ipsec_ctx->nb_qps++;
> -		printf("%s cdev mapping: lcore %u using cdev %u qp %u "
> -				"(cdev_id_qp %lu)\n", str, key.lcore_id,
> -				cdev_id, qp, i);
> +	i = ipsec_ctx->nb_qps;
> +	if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
> +		printf("Maximum number of crypto devices assigned to a core, "
> +		       "increase MAX_QP_PER_LCORE value\n");
> +		return 0;
>  	}
> +	ipsec_ctx->tbl[i].id = cdev_id;
> +	ipsec_ctx->tbl[i].qp = qp;
> +	ipsec_ctx->nb_qps++;
> +	printf("%s cdev mapping: lcore %u using cdev %u qp %u "
> +	       "(cdev_id_qp %lu)\n", str, key.lcore_id, cdev_id, qp, i);
> 
>  	ret = rte_hash_add_key_data(map, &key, (void *)i);
>  	if (ret < 0) {
> @@ -1785,8 +1781,10 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
>  			continue;
> 
>  		if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> -			ret |= add_mapping(map, str, cdev_id, qp, params,
> +			ret = add_mapping(map, str, cdev_id, qp, params,
>  					ipsec_ctx, NULL, NULL, i);
> +			if (ret)
> +				return ret;
>  			continue;
>  		}
> 
> @@ -1801,12 +1799,15 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
>  			if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
>  				continue;
> 
> -			ret |= add_mapping(map, str, cdev_id, qp, params,
> +			ret = add_mapping(map, str, cdev_id, qp, params,
>  						ipsec_ctx, i, j, NULL);
> +			if (ret)
> +				return ret;
> +			continue;
>  		}
>  	}
> 
> -	return ret;
> +	return 0;
>  }
> 
>  /* Check if the device is enabled by cryptodev_mask */
> diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
> index 8e07521..92fd5eb 100644
> --- a/examples/ipsec-secgw/ipsec.h
> +++ b/examples/ipsec-secgw/ipsec.h
> @@ -200,7 +200,9 @@ struct ipsec_ctx {
>  };
> 
>  struct cdev_key {
> -	uint16_t lcore_id;
> +	uint16_t port_id;
> +	uint8_t queue_id;
> +	uint8_t lcore_id;
>  	uint8_t cipher_algo;
>  	uint8_t auth_algo;
>  	uint8_t aead_algo;
> --
> 2.7.4
  
Anoob Joseph Jan. 30, 2020, 4:28 p.m. UTC | #2
Hi Konstantin,

Please see inline.

Thanks,
Anoob

> -----Original Message-----
> From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Sent: Thursday, January 30, 2020 3:43 AM
> To: Anoob Joseph <anoobj@marvell.com>; Akhil Goyal <akhil.goyal@nxp.com>;
> Nicolau, Radu <radu.nicolau@intel.com>
> Cc: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Lukas Bartosik
> <lbartosik@marvell.com>; Narayana Prasad Raju Athreya
> <pathreya@marvell.com>; dev@dpdk.org
> Subject: [EXT] RE: [PATCH] examples/ipsec-secgw: increase number of qps to
> lcore_params
> 
> External Email
> 
> ----------------------------------------------------------------------
> 
> 
> >
> > Currently only one qp will be used for one core. The number of qps can
> > be increased to match the number of lcore params.
> 
> I don't really understand the purpose of that patch....
> As I understand, all it does - unconditionally increases number of crypto-queues
> mapped to the same lcore.

[Anoob] With the current code, we will have only crypto qp mapped to one lcore. So even if you have large number of crypto qps, you would be only using as much as the number of lcores.

This is an inefficient model as a fat flow(say with large packet sizes) on one eth queue hitting one core can starve another flow which happens to hit the same core, because both flows would get queued to the same qp. And, we cannot just randomly submit to multiple qps from the same core as then the ordering would be messed up. So the best possible usage model would be to map one eth queue to one crypto qp. That way, the core wouldn't be unnecessarily pipeline the crypto processing.
 
> The question is what for?
> All these extra queues woulnd't be used by current poll mode data-path anyway.
> Plus in some cases  hash_lookup() would fail to find existing mapping.

[Anoob] It was an issue with v1 that we have addressed with v2. I'll send v2 shortly. Please do check that see if you have more comments.
 
> I understand that for your eventdev implementation you need one crypto queue
> for each eth device.
> But I think it could be done in less intrusive way (see my previous mail as one
> possible option).

[Anoob] If this suggestion is agreeable, then we can solve both qp number requirement (for inline) and default nb_mbuf calculation in a much more sensible way. If this doesn't fly, I'll probably go back to your suggestion, but then there would be more code just to handle these cases. And the nb_mbuf calculation could turn slightly complicated.
 
> Konstantin
> 
> >
> > Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> > ---
> >  examples/ipsec-secgw/ipsec-secgw.c | 39 +++++++++++++++++++--------------
> -----
> >  examples/ipsec-secgw/ipsec.h       |  4 +++-
> >  2 files changed, 23 insertions(+), 20 deletions(-)
> >
> > diff --git a/examples/ipsec-secgw/ipsec-secgw.c
> > b/examples/ipsec-secgw/ipsec-secgw.c
> > index 3b5aaf6..d8c435e 100644
> > --- a/examples/ipsec-secgw/ipsec-secgw.c
> > +++ b/examples/ipsec-secgw/ipsec-secgw.c
> > @@ -1709,6 +1709,8 @@ add_mapping(struct rte_hash *map, const char
> *str, uint16_t cdev_id,
> >  	unsigned long i;
> >  	struct cdev_key key = { 0 };
> >
> > +	key.port_id = params->port_id;
> > +	key.queue_id = params->queue_id;
> >  	key.lcore_id = params->lcore_id;
> >  	if (cipher)
> >  		key.cipher_algo = cipher->sym.cipher.algo; @@ -1721,23
> +1723,17 @@
> > add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
> >  	if (ret != -ENOENT)
> >  		return 0;
> >
> > -	for (i = 0; i < ipsec_ctx->nb_qps; i++)
> > -		if (ipsec_ctx->tbl[i].id == cdev_id)
> > -			break;
> > -
> > -	if (i == ipsec_ctx->nb_qps) {
> > -		if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
> > -			printf("Maximum number of crypto devices assigned to
> "
> > -				"a core, increase MAX_QP_PER_LCORE
> value\n");
> > -			return 0;
> > -		}
> > -		ipsec_ctx->tbl[i].id = cdev_id;
> > -		ipsec_ctx->tbl[i].qp = qp;
> > -		ipsec_ctx->nb_qps++;
> > -		printf("%s cdev mapping: lcore %u using cdev %u qp %u "
> > -				"(cdev_id_qp %lu)\n", str, key.lcore_id,
> > -				cdev_id, qp, i);
> > +	i = ipsec_ctx->nb_qps;
> > +	if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
> > +		printf("Maximum number of crypto devices assigned to a core, "
> > +		       "increase MAX_QP_PER_LCORE value\n");
> > +		return 0;
> >  	}
> > +	ipsec_ctx->tbl[i].id = cdev_id;
> > +	ipsec_ctx->tbl[i].qp = qp;
> > +	ipsec_ctx->nb_qps++;
> > +	printf("%s cdev mapping: lcore %u using cdev %u qp %u "
> > +	       "(cdev_id_qp %lu)\n", str, key.lcore_id, cdev_id, qp, i);
> >
> >  	ret = rte_hash_add_key_data(map, &key, (void *)i);
> >  	if (ret < 0) {
> > @@ -1785,8 +1781,10 @@ add_cdev_mapping(struct rte_cryptodev_info
> *dev_info, uint16_t cdev_id,
> >  			continue;
> >
> >  		if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> > -			ret |= add_mapping(map, str, cdev_id, qp, params,
> > +			ret = add_mapping(map, str, cdev_id, qp, params,
> >  					ipsec_ctx, NULL, NULL, i);
> > +			if (ret)
> > +				return ret;
> >  			continue;
> >  		}
> >
> > @@ -1801,12 +1799,15 @@ add_cdev_mapping(struct rte_cryptodev_info
> *dev_info, uint16_t cdev_id,
> >  			if (j->sym.xform_type !=
> RTE_CRYPTO_SYM_XFORM_AUTH)
> >  				continue;
> >
> > -			ret |= add_mapping(map, str, cdev_id, qp, params,
> > +			ret = add_mapping(map, str, cdev_id, qp, params,
> >  						ipsec_ctx, i, j, NULL);
> > +			if (ret)
> > +				return ret;
> > +			continue;
> >  		}
> >  	}
> >
> > -	return ret;
> > +	return 0;
> >  }
> >
> >  /* Check if the device is enabled by cryptodev_mask */ diff --git
> > a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index
> > 8e07521..92fd5eb 100644
> > --- a/examples/ipsec-secgw/ipsec.h
> > +++ b/examples/ipsec-secgw/ipsec.h
> > @@ -200,7 +200,9 @@ struct ipsec_ctx {  };
> >
> >  struct cdev_key {
> > -	uint16_t lcore_id;
> > +	uint16_t port_id;
> > +	uint8_t queue_id;
> > +	uint8_t lcore_id;
> >  	uint8_t cipher_algo;
> >  	uint8_t auth_algo;
> >  	uint8_t aead_algo;
> > --
> > 2.7.4
  
Ananyev, Konstantin Jan. 31, 2020, 4:32 p.m. UTC | #3
> >
> > ----------------------------------------------------------------------
> >
> >
> > >
> > > Currently only one qp will be used for one core. The number of qps can
> > > be increased to match the number of lcore params.
> >
> > I don't really understand the purpose of that patch....
> > As I understand, all it does - unconditionally increases number of crypto-queues
> > mapped to the same lcore.
> 
> [Anoob] With the current code, we will have only crypto qp mapped to one lcore. So even if you have large number of crypto qps, you
> would be only using as much as the number of lcores.
> 
> This is an inefficient model as a fat flow(say with large packet sizes) on one eth queue hitting one core can starve another flow which
> happens to hit the same core, because both flows would get queued to the same qp.
> And, we cannot just randomly submit to multiple qps
> from the same core as then the ordering would be messed up. 

No-one suggesting that one I believe.

So the best possible usage model would be to map one eth queue to one
> crypto qp. That way, the core wouldn't be unnecessarily pipeline the crypto processing.

I doubt it is really the 'best possible usage model'.
There could be cases when forcing lcore to use/manage more crypto-queues will lead to
negative effects: perf drop, not enough crypto queues for all eth queues, etc.
If your HW/SW requires to match each eth queue with a separate crypto-queue,
then I think it should be an optional feature, while keeping default behavior intact.  

> 
> > The question is what for?
> > All these extra queues woulnd't be used by current poll mode data-path anyway.
> > Plus in some cases  hash_lookup() would fail to find existing mapping.
> 
> [Anoob] It was an issue with v1 that we have addressed with v2. I'll send v2 shortly. Please do check that see if you have more comments.
> 
> > I understand that for your eventdev implementation you need one crypto queue
> > for each eth device.
> > But I think it could be done in less intrusive way (see my previous mail as one
> > possible option).
> 
> [Anoob] If this suggestion is agreeable, then we can solve both qp number requirement (for inline) and default nb_mbuf calculation in a
> much more sensible way. If this doesn't fly, I'll probably go back to your suggestion, but then there would be more code just to handle these
> cases. And the nb_mbuf calculation could turn slightly complicated.

Don't see how it affects number of required mbufs...
Wouldn't it just be:
<num_of_eth_queues> * <eth_rxd_num + eth+txd_num> + <num_of_crypto_queues> *<crq_desc_num> +
<lcore_num>*<lcore_cache+reassemble_table_size>+....
?
 


> 
> > Konstantin
> >
> > >
> > > Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> > > ---
> > >  examples/ipsec-secgw/ipsec-secgw.c | 39 +++++++++++++++++++--------------
> > -----
> > >  examples/ipsec-secgw/ipsec.h       |  4 +++-
> > >  2 files changed, 23 insertions(+), 20 deletions(-)
> > >
> > > diff --git a/examples/ipsec-secgw/ipsec-secgw.c
> > > b/examples/ipsec-secgw/ipsec-secgw.c
> > > index 3b5aaf6..d8c435e 100644
> > > --- a/examples/ipsec-secgw/ipsec-secgw.c
> > > +++ b/examples/ipsec-secgw/ipsec-secgw.c
> > > @@ -1709,6 +1709,8 @@ add_mapping(struct rte_hash *map, const char
> > *str, uint16_t cdev_id,
> > >  	unsigned long i;
> > >  	struct cdev_key key = { 0 };
> > >
> > > +	key.port_id = params->port_id;
> > > +	key.queue_id = params->queue_id;
> > >  	key.lcore_id = params->lcore_id;
> > >  	if (cipher)
> > >  		key.cipher_algo = cipher->sym.cipher.algo; @@ -1721,23
> > +1723,17 @@
> > > add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
> > >  	if (ret != -ENOENT)
> > >  		return 0;
> > >
> > > -	for (i = 0; i < ipsec_ctx->nb_qps; i++)
> > > -		if (ipsec_ctx->tbl[i].id == cdev_id)
> > > -			break;
> > > -
> > > -	if (i == ipsec_ctx->nb_qps) {
> > > -		if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
> > > -			printf("Maximum number of crypto devices assigned to
> > "
> > > -				"a core, increase MAX_QP_PER_LCORE
> > value\n");
> > > -			return 0;
> > > -		}
> > > -		ipsec_ctx->tbl[i].id = cdev_id;
> > > -		ipsec_ctx->tbl[i].qp = qp;
> > > -		ipsec_ctx->nb_qps++;
> > > -		printf("%s cdev mapping: lcore %u using cdev %u qp %u "
> > > -				"(cdev_id_qp %lu)\n", str, key.lcore_id,
> > > -				cdev_id, qp, i);
> > > +	i = ipsec_ctx->nb_qps;
> > > +	if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
> > > +		printf("Maximum number of crypto devices assigned to a core, "
> > > +		       "increase MAX_QP_PER_LCORE value\n");
> > > +		return 0;
> > >  	}
> > > +	ipsec_ctx->tbl[i].id = cdev_id;
> > > +	ipsec_ctx->tbl[i].qp = qp;
> > > +	ipsec_ctx->nb_qps++;
> > > +	printf("%s cdev mapping: lcore %u using cdev %u qp %u "
> > > +	       "(cdev_id_qp %lu)\n", str, key.lcore_id, cdev_id, qp, i);
> > >
> > >  	ret = rte_hash_add_key_data(map, &key, (void *)i);
> > >  	if (ret < 0) {
> > > @@ -1785,8 +1781,10 @@ add_cdev_mapping(struct rte_cryptodev_info
> > *dev_info, uint16_t cdev_id,
> > >  			continue;
> > >
> > >  		if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> > > -			ret |= add_mapping(map, str, cdev_id, qp, params,
> > > +			ret = add_mapping(map, str, cdev_id, qp, params,
> > >  					ipsec_ctx, NULL, NULL, i);
> > > +			if (ret)
> > > +				return ret;
> > >  			continue;
> > >  		}
> > >
> > > @@ -1801,12 +1799,15 @@ add_cdev_mapping(struct rte_cryptodev_info
> > *dev_info, uint16_t cdev_id,
> > >  			if (j->sym.xform_type !=
> > RTE_CRYPTO_SYM_XFORM_AUTH)
> > >  				continue;
> > >
> > > -			ret |= add_mapping(map, str, cdev_id, qp, params,
> > > +			ret = add_mapping(map, str, cdev_id, qp, params,
> > >  						ipsec_ctx, i, j, NULL);
> > > +			if (ret)
> > > +				return ret;
> > > +			continue;
> > >  		}
> > >  	}
> > >
> > > -	return ret;
> > > +	return 0;
> > >  }
> > >
> > >  /* Check if the device is enabled by cryptodev_mask */ diff --git
> > > a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index
> > > 8e07521..92fd5eb 100644
> > > --- a/examples/ipsec-secgw/ipsec.h
> > > +++ b/examples/ipsec-secgw/ipsec.h
> > > @@ -200,7 +200,9 @@ struct ipsec_ctx {  };
> > >
> > >  struct cdev_key {
> > > -	uint16_t lcore_id;
> > > +	uint16_t port_id;
> > > +	uint8_t queue_id;
> > > +	uint8_t lcore_id;
> > >  	uint8_t cipher_algo;
> > >  	uint8_t auth_algo;
> > >  	uint8_t aead_algo;
> > > --
> > > 2.7.4
  
Anoob Joseph Jan. 31, 2020, 5:04 p.m. UTC | #4
Hi Konstantin,

Please see inline.

Thanks,
Anoob

> -----Original Message-----
> From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Sent: Friday, January 31, 2020 10:02 PM
> To: Anoob Joseph <anoobj@marvell.com>; Akhil Goyal <akhil.goyal@nxp.com>;
> Nicolau, Radu <radu.nicolau@intel.com>
> Cc: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Lukas Bartosik
> <lbartosik@marvell.com>; Narayana Prasad Raju Athreya
> <pathreya@marvell.com>; dev@dpdk.org
> Subject: [EXT] RE: [PATCH] examples/ipsec-secgw: increase number of qps to
> lcore_params
> 
> External Email
> 
> ----------------------------------------------------------------------
> 
> 
> > >
> > > --------------------------------------------------------------------
> > > --
> > >
> > >
> > > >
> > > > Currently only one qp will be used for one core. The number of qps
> > > > can be increased to match the number of lcore params.
> > >
> > > I don't really understand the purpose of that patch....
> > > As I understand, all it does - unconditionally increases number of
> > > crypto-queues mapped to the same lcore.
> >
> > [Anoob] With the current code, we will have only crypto qp mapped to
> > one lcore. So even if you have large number of crypto qps, you would be only
> using as much as the number of lcores.
> >
> > This is an inefficient model as a fat flow(say with large packet
> > sizes) on one eth queue hitting one core can starve another flow which
> happens to hit the same core, because both flows would get queued to the
> same qp.
> > And, we cannot just randomly submit to multiple qps from the same core
> > as then the ordering would be messed up.
> 
> No-one suggesting that one I believe.
> 
> So the best possible usage model would be to map one eth queue to one
> > crypto qp. That way, the core wouldn't be unnecessarily pipeline the crypto
> processing.
> 
> I doubt it is really the 'best possible usage model'.
> There could be cases when forcing lcore to use/manage more crypto-queues
> will lead to negative effects: perf drop, not enough crypto queues for all eth
> queues, etc.
> If your HW/SW requires to match each eth queue with a separate crypto-queue,
> then I think it should be an optional feature, while keeping default behavior
> intact.

[Anoob] I think the question here is more to do with s/w crypto PMDs vs h/w crypto PMDs. For s/w PMDs, having more queues doesn't really make sense and for h/w PMDs it's better. I'll see how we can make this an optional feature. Would you be okay with allowing such behavior if the underlying PMD can support as many queues as lcore_params?

As in, if the PMD doesn't support enough queues, we do 1 qp per core. Would that work for you? 

Also, if we want to stick to 1 crypto qp per lcore, I don't understand why we should have the following macro defined such,

#define MAX_QP_PER_LCORE 256
 
> 
> >
> > > The question is what for?
> > > All these extra queues woulnd't be used by current poll mode data-path
> anyway.
> > > Plus in some cases  hash_lookup() would fail to find existing mapping.
> >
> > [Anoob] It was an issue with v1 that we have addressed with v2. I'll send v2
> shortly. Please do check that see if you have more comments.
> >
> > > I understand that for your eventdev implementation you need one
> > > crypto queue for each eth device.
> > > But I think it could be done in less intrusive way (see my previous
> > > mail as one possible option).
> >
> > [Anoob] If this suggestion is agreeable, then we can solve both qp
> > number requirement (for inline) and default nb_mbuf calculation in a
> > much more sensible way. If this doesn't fly, I'll probably go back to your
> suggestion, but then there would be more code just to handle these cases. And
> the nb_mbuf calculation could turn slightly complicated.
> 
> Don't see how it affects number of required mbufs...
> Wouldn't it just be:
> <num_of_eth_queues> * <eth_rxd_num + eth+txd_num> +
> <num_of_crypto_queues> *<crq_desc_num> +
> <lcore_num>*<lcore_cache+reassemble_table_size>+....

[Anoob] What would be num_of_crypto_queues? 

> ?
> 
> 
> 
> >
> > > Konstantin
> > >
> > > >
> > > > Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> > > > ---
> > > >  examples/ipsec-secgw/ipsec-secgw.c | 39
> > > > +++++++++++++++++++--------------
> > > -----
> > > >  examples/ipsec-secgw/ipsec.h       |  4 +++-
> > > >  2 files changed, 23 insertions(+), 20 deletions(-)
> > > >
> > > > diff --git a/examples/ipsec-secgw/ipsec-secgw.c
> > > > b/examples/ipsec-secgw/ipsec-secgw.c
> > > > index 3b5aaf6..d8c435e 100644
> > > > --- a/examples/ipsec-secgw/ipsec-secgw.c
> > > > +++ b/examples/ipsec-secgw/ipsec-secgw.c
> > > > @@ -1709,6 +1709,8 @@ add_mapping(struct rte_hash *map, const char
> > > *str, uint16_t cdev_id,
> > > >  	unsigned long i;
> > > >  	struct cdev_key key = { 0 };
> > > >
> > > > +	key.port_id = params->port_id;
> > > > +	key.queue_id = params->queue_id;
> > > >  	key.lcore_id = params->lcore_id;
> > > >  	if (cipher)
> > > >  		key.cipher_algo = cipher->sym.cipher.algo; @@ -1721,23
> > > +1723,17 @@
> > > > add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
> > > >  	if (ret != -ENOENT)
> > > >  		return 0;
> > > >
> > > > -	for (i = 0; i < ipsec_ctx->nb_qps; i++)
> > > > -		if (ipsec_ctx->tbl[i].id == cdev_id)
> > > > -			break;
> > > > -
> > > > -	if (i == ipsec_ctx->nb_qps) {
> > > > -		if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
> > > > -			printf("Maximum number of crypto devices assigned to
> > > "
> > > > -				"a core, increase MAX_QP_PER_LCORE
> > > value\n");
> > > > -			return 0;
> > > > -		}
> > > > -		ipsec_ctx->tbl[i].id = cdev_id;
> > > > -		ipsec_ctx->tbl[i].qp = qp;
> > > > -		ipsec_ctx->nb_qps++;
> > > > -		printf("%s cdev mapping: lcore %u using cdev %u qp %u "
> > > > -				"(cdev_id_qp %lu)\n", str, key.lcore_id,
> > > > -				cdev_id, qp, i);
> > > > +	i = ipsec_ctx->nb_qps;
> > > > +	if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
> > > > +		printf("Maximum number of crypto devices assigned to a core, "
> > > > +		       "increase MAX_QP_PER_LCORE value\n");
> > > > +		return 0;
> > > >  	}
> > > > +	ipsec_ctx->tbl[i].id = cdev_id;
> > > > +	ipsec_ctx->tbl[i].qp = qp;
> > > > +	ipsec_ctx->nb_qps++;
> > > > +	printf("%s cdev mapping: lcore %u using cdev %u qp %u "
> > > > +	       "(cdev_id_qp %lu)\n", str, key.lcore_id, cdev_id, qp, i);
> > > >
> > > >  	ret = rte_hash_add_key_data(map, &key, (void *)i);
> > > >  	if (ret < 0) {
> > > > @@ -1785,8 +1781,10 @@ add_cdev_mapping(struct rte_cryptodev_info
> > > *dev_info, uint16_t cdev_id,
> > > >  			continue;
> > > >
> > > >  		if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> > > > -			ret |= add_mapping(map, str, cdev_id, qp, params,
> > > > +			ret = add_mapping(map, str, cdev_id, qp, params,
> > > >  					ipsec_ctx, NULL, NULL, i);
> > > > +			if (ret)
> > > > +				return ret;
> > > >  			continue;
> > > >  		}
> > > >
> > > > @@ -1801,12 +1799,15 @@ add_cdev_mapping(struct rte_cryptodev_info
> > > *dev_info, uint16_t cdev_id,
> > > >  			if (j->sym.xform_type !=
> > > RTE_CRYPTO_SYM_XFORM_AUTH)
> > > >  				continue;
> > > >
> > > > -			ret |= add_mapping(map, str, cdev_id, qp, params,
> > > > +			ret = add_mapping(map, str, cdev_id, qp, params,
> > > >  						ipsec_ctx, i, j, NULL);
> > > > +			if (ret)
> > > > +				return ret;
> > > > +			continue;
> > > >  		}
> > > >  	}
> > > >
> > > > -	return ret;
> > > > +	return 0;
> > > >  }
> > > >
> > > >  /* Check if the device is enabled by cryptodev_mask */ diff --git
> > > > a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
> > > > index 8e07521..92fd5eb 100644
> > > > --- a/examples/ipsec-secgw/ipsec.h
> > > > +++ b/examples/ipsec-secgw/ipsec.h
> > > > @@ -200,7 +200,9 @@ struct ipsec_ctx {  };
> > > >
> > > >  struct cdev_key {
> > > > -	uint16_t lcore_id;
> > > > +	uint16_t port_id;
> > > > +	uint8_t queue_id;
> > > > +	uint8_t lcore_id;
> > > >  	uint8_t cipher_algo;
> > > >  	uint8_t auth_algo;
> > > >  	uint8_t aead_algo;
> > > > --
> > > > 2.7.4
  
Ananyev, Konstantin Jan. 31, 2020, 6:49 p.m. UTC | #5
> > > > > Currently only one qp will be used for one core. The number of qps
> > > > > can be increased to match the number of lcore params.
> > > >
> > > > I don't really understand the purpose of that patch....
> > > > As I understand, all it does - unconditionally increases number of
> > > > crypto-queues mapped to the same lcore.
> > >
> > > [Anoob] With the current code, we will have only crypto qp mapped to
> > > one lcore. So even if you have large number of crypto qps, you would be only
> > using as much as the number of lcores.
> > >
> > > This is an inefficient model as a fat flow(say with large packet
> > > sizes) on one eth queue hitting one core can starve another flow which
> > happens to hit the same core, because both flows would get queued to the
> > same qp.
> > > And, we cannot just randomly submit to multiple qps from the same core
> > > as then the ordering would be messed up.
> >
> > No-one suggesting that one I believe.
> >
> > So the best possible usage model would be to map one eth queue to one
> > > crypto qp. That way, the core wouldn't be unnecessarily pipeline the crypto
> > processing.
> >
> > I doubt it is really the 'best possible usage model'.
> > There could be cases when forcing lcore to use/manage more crypto-queues
> > will lead to negative effects: perf drop, not enough crypto queues for all eth
> > queues, etc.
> > If your HW/SW requires to match each eth queue with a separate crypto-queue,
> > then I think it should be an optional feature, while keeping default behavior
> > intact.
> 
> [Anoob] I think the question here is more to do with s/w crypto PMDs vs h/w crypto PMDs. For s/w PMDs, having more queues doesn't
> really make sense and for h/w PMDs it's better.

Not always.
If these queues belongs to the same device, sometimes it is faster to use just one queue for device per core.
HW descriptor status polling, etc. is not free.

> I'll see how we can make this an optional feature. Would you be okay with allowing such
> behavior if the underlying PMD can support as many queues as lcore_params?
> 
> As in, if the PMD doesn't support enough queues, we do 1 qp per core. Would that work for you?

I am not fond of idea to change default mapping method silently.
My preference would be a new command-line option (--cdev-maping or so).
Another thought, make it more fine-grained and user-controlled
by extending eth-port-queue-lcore mapping option.
from current: <port>,<queue>,<lcore>
to <port>,<queue>,<lcore>,<cdev-queue>.

So let say with 2 cores , 2 eth ports/2 queues per port
for current mapping user would do:
# use cdev queue 0 on all cdevs for lcore 6
# use cdev queue 1 on all cdevs for lcore 7
--lcores="6,7" ... -- --config="(0,0,6,0),(1,0,6,0),(0,1,7,1),(1,1,7,1)"

for the mapping you'd like to have:
--lcores="6,7" ... -- --config="(0,0,6,0),(1,0,6,1),(0,1,7,2),(1,1,7,3)"

> Also, if we want to stick to 1 crypto qp per lcore, I don't understand why we should have the following macro defined such,
> 
> #define MAX_QP_PER_LCORE 256
> 
> >
> > >
> > > > The question is what for?
> > > > All these extra queues woulnd't be used by current poll mode data-path
> > anyway.
> > > > Plus in some cases  hash_lookup() would fail to find existing mapping.
> > >
> > > [Anoob] It was an issue with v1 that we have addressed with v2. I'll send v2
> > shortly. Please do check that see if you have more comments.
> > >
> > > > I understand that for your eventdev implementation you need one
> > > > crypto queue for each eth device.
> > > > But I think it could be done in less intrusive way (see my previous
> > > > mail as one possible option).
> > >
> > > [Anoob] If this suggestion is agreeable, then we can solve both qp
> > > number requirement (for inline) and default nb_mbuf calculation in a
> > > much more sensible way. If this doesn't fly, I'll probably go back to your
> > suggestion, but then there would be more code just to handle these cases. And
> > the nb_mbuf calculation could turn slightly complicated.
> >
> > Don't see how it affects number of required mbufs...
> > Wouldn't it just be:
> > <num_of_eth_queues> * <eth_rxd_num + eth+txd_num> +
> > <num_of_crypto_queues> *<crq_desc_num> +
> > <lcore_num>*<lcore_cache+reassemble_table_size>+....
> 
> [Anoob] What would be num_of_crypto_queues?
> 
> > ?
> >
> >
> >
> > >
> > > > Konstantin
> > > >
> > > > >
> > > > > Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> > > > > ---
> > > > >  examples/ipsec-secgw/ipsec-secgw.c | 39
> > > > > +++++++++++++++++++--------------
> > > > -----
> > > > >  examples/ipsec-secgw/ipsec.h       |  4 +++-
> > > > >  2 files changed, 23 insertions(+), 20 deletions(-)
> > > > >
> > > > > diff --git a/examples/ipsec-secgw/ipsec-secgw.c
> > > > > b/examples/ipsec-secgw/ipsec-secgw.c
> > > > > index 3b5aaf6..d8c435e 100644
> > > > > --- a/examples/ipsec-secgw/ipsec-secgw.c
> > > > > +++ b/examples/ipsec-secgw/ipsec-secgw.c
> > > > > @@ -1709,6 +1709,8 @@ add_mapping(struct rte_hash *map, const char
> > > > *str, uint16_t cdev_id,
> > > > >  	unsigned long i;
> > > > >  	struct cdev_key key = { 0 };
> > > > >
> > > > > +	key.port_id = params->port_id;
> > > > > +	key.queue_id = params->queue_id;
> > > > >  	key.lcore_id = params->lcore_id;
> > > > >  	if (cipher)
> > > > >  		key.cipher_algo = cipher->sym.cipher.algo; @@ -1721,23
> > > > +1723,17 @@
> > > > > add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
> > > > >  	if (ret != -ENOENT)
> > > > >  		return 0;
> > > > >
> > > > > -	for (i = 0; i < ipsec_ctx->nb_qps; i++)
> > > > > -		if (ipsec_ctx->tbl[i].id == cdev_id)
> > > > > -			break;
> > > > > -
> > > > > -	if (i == ipsec_ctx->nb_qps) {
> > > > > -		if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
> > > > > -			printf("Maximum number of crypto devices assigned to
> > > > "
> > > > > -				"a core, increase MAX_QP_PER_LCORE
> > > > value\n");
> > > > > -			return 0;
> > > > > -		}
> > > > > -		ipsec_ctx->tbl[i].id = cdev_id;
> > > > > -		ipsec_ctx->tbl[i].qp = qp;
> > > > > -		ipsec_ctx->nb_qps++;
> > > > > -		printf("%s cdev mapping: lcore %u using cdev %u qp %u "
> > > > > -				"(cdev_id_qp %lu)\n", str, key.lcore_id,
> > > > > -				cdev_id, qp, i);
> > > > > +	i = ipsec_ctx->nb_qps;
> > > > > +	if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
> > > > > +		printf("Maximum number of crypto devices assigned to a core, "
> > > > > +		       "increase MAX_QP_PER_LCORE value\n");
> > > > > +		return 0;
> > > > >  	}
> > > > > +	ipsec_ctx->tbl[i].id = cdev_id;
> > > > > +	ipsec_ctx->tbl[i].qp = qp;
> > > > > +	ipsec_ctx->nb_qps++;
> > > > > +	printf("%s cdev mapping: lcore %u using cdev %u qp %u "
> > > > > +	       "(cdev_id_qp %lu)\n", str, key.lcore_id, cdev_id, qp, i);
> > > > >
> > > > >  	ret = rte_hash_add_key_data(map, &key, (void *)i);
> > > > >  	if (ret < 0) {
> > > > > @@ -1785,8 +1781,10 @@ add_cdev_mapping(struct rte_cryptodev_info
> > > > *dev_info, uint16_t cdev_id,
> > > > >  			continue;
> > > > >
> > > > >  		if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> > > > > -			ret |= add_mapping(map, str, cdev_id, qp, params,
> > > > > +			ret = add_mapping(map, str, cdev_id, qp, params,
> > > > >  					ipsec_ctx, NULL, NULL, i);
> > > > > +			if (ret)
> > > > > +				return ret;
> > > > >  			continue;
> > > > >  		}
> > > > >
> > > > > @@ -1801,12 +1799,15 @@ add_cdev_mapping(struct rte_cryptodev_info
> > > > *dev_info, uint16_t cdev_id,
> > > > >  			if (j->sym.xform_type !=
> > > > RTE_CRYPTO_SYM_XFORM_AUTH)
> > > > >  				continue;
> > > > >
> > > > > -			ret |= add_mapping(map, str, cdev_id, qp, params,
> > > > > +			ret = add_mapping(map, str, cdev_id, qp, params,
> > > > >  						ipsec_ctx, i, j, NULL);
> > > > > +			if (ret)
> > > > > +				return ret;
> > > > > +			continue;
> > > > >  		}
> > > > >  	}
> > > > >
> > > > > -	return ret;
> > > > > +	return 0;
> > > > >  }
> > > > >
> > > > >  /* Check if the device is enabled by cryptodev_mask */ diff --git
> > > > > a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
> > > > > index 8e07521..92fd5eb 100644
> > > > > --- a/examples/ipsec-secgw/ipsec.h
> > > > > +++ b/examples/ipsec-secgw/ipsec.h
> > > > > @@ -200,7 +200,9 @@ struct ipsec_ctx {  };
> > > > >
> > > > >  struct cdev_key {
> > > > > -	uint16_t lcore_id;
> > > > > +	uint16_t port_id;
> > > > > +	uint8_t queue_id;
> > > > > +	uint8_t lcore_id;
> > > > >  	uint8_t cipher_algo;
> > > > >  	uint8_t auth_algo;
> > > > >  	uint8_t aead_algo;
> > > > > --
> > > > > 2.7.4
  
Anoob Joseph Feb. 3, 2020, 9:05 a.m. UTC | #6
Hi Konstantin, Akhil,

Please see inline.

Thanks,
Anoob

> -----Original Message-----
> From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Sent: Saturday, February 1, 2020 12:20 AM
> To: Anoob Joseph <anoobj@marvell.com>; Akhil Goyal
> <akhil.goyal@nxp.com>; Nicolau, Radu <radu.nicolau@intel.com>
> Cc: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Lukas Bartosik
> <lbartosik@marvell.com>; Narayana Prasad Raju Athreya
> <pathreya@marvell.com>; dev@dpdk.org
> Subject: [EXT] RE: [PATCH] examples/ipsec-secgw: increase number of qps to
> lcore_params
> 
> External Email
> 
> ----------------------------------------------------------------------
> > > > > > Currently only one qp will be used for one core. The number of
> > > > > > qps can be increased to match the number of lcore params.
> > > > >
> > > > > I don't really understand the purpose of that patch....
> > > > > As I understand, all it does - unconditionally increases number
> > > > > of crypto-queues mapped to the same lcore.
> > > >
> > > > [Anoob] With the current code, we will have only crypto qp mapped
> > > > to one lcore. So even if you have large number of crypto qps, you
> > > > would be only
> > > using as much as the number of lcores.
> > > >
> > > > This is an inefficient model as a fat flow(say with large packet
> > > > sizes) on one eth queue hitting one core can starve another flow
> > > > which
> > > happens to hit the same core, because both flows would get queued to
> > > the same qp.
> > > > And, we cannot just randomly submit to multiple qps from the same
> > > > core as then the ordering would be messed up.
> > >
> > > No-one suggesting that one I believe.
> > >
> > > So the best possible usage model would be to map one eth queue to
> > > one
> > > > crypto qp. That way, the core wouldn't be unnecessarily pipeline
> > > > the crypto
> > > processing.
> > >
> > > I doubt it is really the 'best possible usage model'.
> > > There could be cases when forcing lcore to use/manage more
> > > crypto-queues will lead to negative effects: perf drop, not enough
> > > crypto queues for all eth queues, etc.
> > > If your HW/SW requires to match each eth queue with a separate
> > > crypto-queue, then I think it should be an optional feature, while
> > > keeping default behavior intact.
> >
> > [Anoob] I think the question here is more to do with s/w crypto PMDs
> > vs h/w crypto PMDs. For s/w PMDs, having more queues doesn't really
> make sense and for h/w PMDs it's better.
> 
> Not always.
> If these queues belongs to the same device, sometimes it is faster to use just
> one queue for device per core.
> HW descriptor status polling, etc. is not free.
> 
> > I'll see how we can make this an optional feature. Would you be okay
> > with allowing such behavior if the underlying PMD can support as many
> queues as lcore_params?
> >
> > As in, if the PMD doesn't support enough queues, we do 1 qp per core.
> Would that work for you?
> 
> I am not fond of idea to change default mapping method silently.
> My preference would be a new command-line option (--cdev-maping or so).
> Another thought, make it more fine-grained and user-controlled by
> extending eth-port-queue-lcore mapping option.
> from current: <port>,<queue>,<lcore>
> to <port>,<queue>,<lcore>,<cdev-queue>.
> 
> So let say with 2 cores , 2 eth ports/2 queues per port for current mapping
> user would do:
> # use cdev queue 0 on all cdevs for lcore 6 # use cdev queue 1 on all cdevs for
> lcore 7 --lcores="6,7" ... -- --config="(0,0,6,0),(1,0,6,0),(0,1,7,1),(1,1,7,1)"
> 
> for the mapping you'd like to have:
> --lcores="6,7" ... -- --config="(0,0,6,0),(1,0,6,1),(0,1,7,2),(1,1,7,3)"

[Anoob] I like this idea. This would work for inline case as well.

@Akhil, do you have any comments?

Also, I think we should make it <port>,<queue>,<lcore>,<cdev_id>,<cdev-queue>
 
> 
> > Also, if we want to stick to 1 crypto qp per lcore, I don't understand
> > why we should have the following macro defined such,
> >
> > #define MAX_QP_PER_LCORE 256
> >
> > >
> > > >
> > > > > The question is what for?
> > > > > All these extra queues woulnd't be used by current poll mode
> > > > > data-path
> > > anyway.
> > > > > Plus in some cases  hash_lookup() would fail to find existing mapping.
> > > >
> > > > [Anoob] It was an issue with v1 that we have addressed with v2.
> > > > I'll send v2
> > > shortly. Please do check that see if you have more comments.
> > > >
> > > > > I understand that for your eventdev implementation you need one
> > > > > crypto queue for each eth device.
> > > > > But I think it could be done in less intrusive way (see my
> > > > > previous mail as one possible option).
> > > >
> > > > [Anoob] If this suggestion is agreeable, then we can solve both qp
> > > > number requirement (for inline) and default nb_mbuf calculation in
> > > > a much more sensible way. If this doesn't fly, I'll probably go
> > > > back to your
> > > suggestion, but then there would be more code just to handle these
> > > cases. And the nb_mbuf calculation could turn slightly complicated.
> > >
> > > Don't see how it affects number of required mbufs...
> > > Wouldn't it just be:
> > > <num_of_eth_queues> * <eth_rxd_num + eth+txd_num> +
> > > <num_of_crypto_queues> *<crq_desc_num> +
> > > <lcore_num>*<lcore_cache+reassemble_table_size>+....
> >
> > [Anoob] What would be num_of_crypto_queues?
> >
> > > ?
> > >
> > >
> > >
> > > >
> > > > > Konstantin
> > > > >
> > > > > >
> > > > > > Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> > > > > > ---
> > > > > >  examples/ipsec-secgw/ipsec-secgw.c | 39
> > > > > > +++++++++++++++++++--------------
> > > > > -----
> > > > > >  examples/ipsec-secgw/ipsec.h       |  4 +++-
> > > > > >  2 files changed, 23 insertions(+), 20 deletions(-)
> > > > > >
> > > > > > diff --git a/examples/ipsec-secgw/ipsec-secgw.c
> > > > > > b/examples/ipsec-secgw/ipsec-secgw.c
> > > > > > index 3b5aaf6..d8c435e 100644
> > > > > > --- a/examples/ipsec-secgw/ipsec-secgw.c
> > > > > > +++ b/examples/ipsec-secgw/ipsec-secgw.c
> > > > > > @@ -1709,6 +1709,8 @@ add_mapping(struct rte_hash *map, const
> > > > > > char
> > > > > *str, uint16_t cdev_id,
> > > > > >  	unsigned long i;
> > > > > >  	struct cdev_key key = { 0 };
> > > > > >
> > > > > > +	key.port_id = params->port_id;
> > > > > > +	key.queue_id = params->queue_id;
> > > > > >  	key.lcore_id = params->lcore_id;
> > > > > >  	if (cipher)
> > > > > >  		key.cipher_algo = cipher->sym.cipher.algo; @@ -
> 1721,23
> > > > > +1723,17 @@
> > > > > > add_mapping(struct rte_hash *map, const char *str, uint16_t
> cdev_id,
> > > > > >  	if (ret != -ENOENT)
> > > > > >  		return 0;
> > > > > >
> > > > > > -	for (i = 0; i < ipsec_ctx->nb_qps; i++)
> > > > > > -		if (ipsec_ctx->tbl[i].id == cdev_id)
> > > > > > -			break;
> > > > > > -
> > > > > > -	if (i == ipsec_ctx->nb_qps) {
> > > > > > -		if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
> > > > > > -			printf("Maximum number of crypto devices
> assigned to
> > > > > "
> > > > > > -				"a core, increase
> MAX_QP_PER_LCORE
> > > > > value\n");
> > > > > > -			return 0;
> > > > > > -		}
> > > > > > -		ipsec_ctx->tbl[i].id = cdev_id;
> > > > > > -		ipsec_ctx->tbl[i].qp = qp;
> > > > > > -		ipsec_ctx->nb_qps++;
> > > > > > -		printf("%s cdev mapping: lcore %u using cdev %u qp
> %u "
> > > > > > -				"(cdev_id_qp %lu)\n", str,
> key.lcore_id,
> > > > > > -				cdev_id, qp, i);
> > > > > > +	i = ipsec_ctx->nb_qps;
> > > > > > +	if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
> > > > > > +		printf("Maximum number of crypto devices assigned
> to a core, "
> > > > > > +		       "increase MAX_QP_PER_LCORE value\n");
> > > > > > +		return 0;
> > > > > >  	}
> > > > > > +	ipsec_ctx->tbl[i].id = cdev_id;
> > > > > > +	ipsec_ctx->tbl[i].qp = qp;
> > > > > > +	ipsec_ctx->nb_qps++;
> > > > > > +	printf("%s cdev mapping: lcore %u using cdev %u qp %u "
> > > > > > +	       "(cdev_id_qp %lu)\n", str, key.lcore_id, cdev_id, qp,
> > > > > > +i);
> > > > > >
> > > > > >  	ret = rte_hash_add_key_data(map, &key, (void *)i);
> > > > > >  	if (ret < 0) {
> > > > > > @@ -1785,8 +1781,10 @@ add_cdev_mapping(struct
> > > > > > rte_cryptodev_info
> > > > > *dev_info, uint16_t cdev_id,
> > > > > >  			continue;
> > > > > >
> > > > > >  		if (i->sym.xform_type ==
> RTE_CRYPTO_SYM_XFORM_AEAD) {
> > > > > > -			ret |= add_mapping(map, str, cdev_id, qp,
> params,
> > > > > > +			ret = add_mapping(map, str, cdev_id, qp,
> params,
> > > > > >  					ipsec_ctx, NULL, NULL, i);
> > > > > > +			if (ret)
> > > > > > +				return ret;
> > > > > >  			continue;
> > > > > >  		}
> > > > > >
> > > > > > @@ -1801,12 +1799,15 @@ add_cdev_mapping(struct
> > > > > > rte_cryptodev_info
> > > > > *dev_info, uint16_t cdev_id,
> > > > > >  			if (j->sym.xform_type !=
> > > > > RTE_CRYPTO_SYM_XFORM_AUTH)
> > > > > >  				continue;
> > > > > >
> > > > > > -			ret |= add_mapping(map, str, cdev_id, qp,
> params,
> > > > > > +			ret = add_mapping(map, str, cdev_id, qp,
> params,
> > > > > >  						ipsec_ctx, i, j, NULL);
> > > > > > +			if (ret)
> > > > > > +				return ret;
> > > > > > +			continue;
> > > > > >  		}
> > > > > >  	}
> > > > > >
> > > > > > -	return ret;
> > > > > > +	return 0;
> > > > > >  }
> > > > > >
> > > > > >  /* Check if the device is enabled by cryptodev_mask */ diff
> > > > > > --git a/examples/ipsec-secgw/ipsec.h
> > > > > > b/examples/ipsec-secgw/ipsec.h index 8e07521..92fd5eb 100644
> > > > > > --- a/examples/ipsec-secgw/ipsec.h
> > > > > > +++ b/examples/ipsec-secgw/ipsec.h
> > > > > > @@ -200,7 +200,9 @@ struct ipsec_ctx {  };
> > > > > >
> > > > > >  struct cdev_key {
> > > > > > -	uint16_t lcore_id;
> > > > > > +	uint16_t port_id;
> > > > > > +	uint8_t queue_id;
> > > > > > +	uint8_t lcore_id;
> > > > > >  	uint8_t cipher_algo;
> > > > > >  	uint8_t auth_algo;
> > > > > >  	uint8_t aead_algo;
> > > > > > --
> > > > > > 2.7.4
  
Akhil Goyal Feb. 3, 2020, 9:15 a.m. UTC | #7
> > > > > > > Currently only one qp will be used for one core. The number of
> > > > > > > qps can be increased to match the number of lcore params.
> > > > > >
> > > > > > I don't really understand the purpose of that patch....
> > > > > > As I understand, all it does - unconditionally increases number
> > > > > > of crypto-queues mapped to the same lcore.
> > > > >
> > > > > [Anoob] With the current code, we will have only crypto qp mapped
> > > > > to one lcore. So even if you have large number of crypto qps, you
> > > > > would be only
> > > > using as much as the number of lcores.
> > > > >
> > > > > This is an inefficient model as a fat flow(say with large packet
> > > > > sizes) on one eth queue hitting one core can starve another flow
> > > > > which
> > > > happens to hit the same core, because both flows would get queued to
> > > > the same qp.
> > > > > And, we cannot just randomly submit to multiple qps from the same
> > > > > core as then the ordering would be messed up.
> > > >
> > > > No-one suggesting that one I believe.
> > > >
> > > > So the best possible usage model would be to map one eth queue to
> > > > one
> > > > > crypto qp. That way, the core wouldn't be unnecessarily pipeline
> > > > > the crypto
> > > > processing.
> > > >
> > > > I doubt it is really the 'best possible usage model'.
> > > > There could be cases when forcing lcore to use/manage more
> > > > crypto-queues will lead to negative effects: perf drop, not enough
> > > > crypto queues for all eth queues, etc.
> > > > If your HW/SW requires to match each eth queue with a separate
> > > > crypto-queue, then I think it should be an optional feature, while
> > > > keeping default behavior intact.
> > >
> > > [Anoob] I think the question here is more to do with s/w crypto PMDs
> > > vs h/w crypto PMDs. For s/w PMDs, having more queues doesn't really
> > make sense and for h/w PMDs it's better.
> >
> > Not always.
> > If these queues belongs to the same device, sometimes it is faster to use just
> > one queue for device per core.
> > HW descriptor status polling, etc. is not free.
> >
> > > I'll see how we can make this an optional feature. Would you be okay
> > > with allowing such behavior if the underlying PMD can support as many
> > queues as lcore_params?
> > >
> > > As in, if the PMD doesn't support enough queues, we do 1 qp per core.
> > Would that work for you?
> >
> > I am not fond of idea to change default mapping method silently.
> > My preference would be a new command-line option (--cdev-maping or so).
> > Another thought, make it more fine-grained and user-controlled by
> > extending eth-port-queue-lcore mapping option.
> > from current: <port>,<queue>,<lcore>
> > to <port>,<queue>,<lcore>,<cdev-queue>.
> >
> > So let say with 2 cores , 2 eth ports/2 queues per port for current mapping
> > user would do:
> > # use cdev queue 0 on all cdevs for lcore 6 # use cdev queue 1 on all cdevs for
> > lcore 7 --lcores="6,7" ... -- --config="(0,0,6,0),(1,0,6,0),(0,1,7,1),(1,1,7,1)"
> >
> > for the mapping you'd like to have:
> > --lcores="6,7" ... -- --config="(0,0,6,0),(1,0,6,1),(0,1,7,2),(1,1,7,3)"
> 
> [Anoob] I like this idea. This would work for inline case as well.
> 
> @Akhil, do you have any comments?
> 
> Also, I think we should make it <port>,<queue>,<lcore>,<cdev_id>,<cdev-queue>
> 
Looks good to me, but I believe this would need more changes and testing in event patches.
Also it does not have any changes for lookaside cases.
Can we move this to next release and add lookaside case as well in a single go.
We need to close the RC2 on 5th Feb, and I don't want to push this series for RC3 as it is a massive change in ipsec-secgw.

-Akhil
  
Anoob Joseph Feb. 3, 2020, 9:22 a.m. UTC | #8
Hi Akhil,

Please see inline.

Thanks,
Anoob

> -----Original Message-----
> From: Akhil Goyal <akhil.goyal@nxp.com>
> Sent: Monday, February 3, 2020 2:46 PM
> To: Anoob Joseph <anoobj@marvell.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Nicolau, Radu <radu.nicolau@intel.com>
> Cc: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Lukas Bartosik
> <lbartosik@marvell.com>; Narayana Prasad Raju Athreya
> <pathreya@marvell.com>; dev@dpdk.org
> Subject: [EXT] RE: [PATCH] examples/ipsec-secgw: increase number of qps to
> lcore_params
> 
> External Email
> 
> ----------------------------------------------------------------------
> > > > > > > > Currently only one qp will be used for one core. The
> > > > > > > > number of qps can be increased to match the number of lcore
> params.
> > > > > > >
> > > > > > > I don't really understand the purpose of that patch....
> > > > > > > As I understand, all it does - unconditionally increases
> > > > > > > number of crypto-queues mapped to the same lcore.
> > > > > >
> > > > > > [Anoob] With the current code, we will have only crypto qp
> > > > > > mapped to one lcore. So even if you have large number of
> > > > > > crypto qps, you would be only
> > > > > using as much as the number of lcores.
> > > > > >
> > > > > > This is an inefficient model as a fat flow(say with large
> > > > > > packet
> > > > > > sizes) on one eth queue hitting one core can starve another
> > > > > > flow which
> > > > > happens to hit the same core, because both flows would get
> > > > > queued to the same qp.
> > > > > > And, we cannot just randomly submit to multiple qps from the
> > > > > > same core as then the ordering would be messed up.
> > > > >
> > > > > No-one suggesting that one I believe.
> > > > >
> > > > > So the best possible usage model would be to map one eth queue
> > > > > to one
> > > > > > crypto qp. That way, the core wouldn't be unnecessarily
> > > > > > pipeline the crypto
> > > > > processing.
> > > > >
> > > > > I doubt it is really the 'best possible usage model'.
> > > > > There could be cases when forcing lcore to use/manage more
> > > > > crypto-queues will lead to negative effects: perf drop, not
> > > > > enough crypto queues for all eth queues, etc.
> > > > > If your HW/SW requires to match each eth queue with a separate
> > > > > crypto-queue, then I think it should be an optional feature,
> > > > > while keeping default behavior intact.
> > > >
> > > > [Anoob] I think the question here is more to do with s/w crypto
> > > > PMDs vs h/w crypto PMDs. For s/w PMDs, having more queues doesn't
> > > > really
> > > make sense and for h/w PMDs it's better.
> > >
> > > Not always.
> > > If these queues belongs to the same device, sometimes it is faster
> > > to use just one queue for device per core.
> > > HW descriptor status polling, etc. is not free.
> > >
> > > > I'll see how we can make this an optional feature. Would you be
> > > > okay with allowing such behavior if the underlying PMD can support
> > > > as many
> > > queues as lcore_params?
> > > >
> > > > As in, if the PMD doesn't support enough queues, we do 1 qp per core.
> > > Would that work for you?
> > >
> > > I am not fond of idea to change default mapping method silently.
> > > My preference would be a new command-line option (--cdev-maping or
> so).
> > > Another thought, make it more fine-grained and user-controlled by
> > > extending eth-port-queue-lcore mapping option.
> > > from current: <port>,<queue>,<lcore> to
> > > <port>,<queue>,<lcore>,<cdev-queue>.
> > >
> > > So let say with 2 cores , 2 eth ports/2 queues per port for current
> > > mapping user would do:
> > > # use cdev queue 0 on all cdevs for lcore 6 # use cdev queue 1 on
> > > all cdevs for lcore 7 --lcores="6,7" ... -- --
> config="(0,0,6,0),(1,0,6,0),(0,1,7,1),(1,1,7,1)"
> > >
> > > for the mapping you'd like to have:
> > > --lcores="6,7" ... -- --config="(0,0,6,0),(1,0,6,1),(0,1,7,2),(1,1,7,3)"
> >
> > [Anoob] I like this idea. This would work for inline case as well.
> >
> > @Akhil, do you have any comments?
> >
> > Also, I think we should make it
> > <port>,<queue>,<lcore>,<cdev_id>,<cdev-queue>
> >
> Looks good to me, but I believe this would need more changes and testing in
> event patches.
> Also it does not have any changes for lookaside cases.
> Can we move this to next release and add lookaside case as well in a single
> go.

[Anoob] Not a problem. I'll defer this to next release then.
 
> We need to close the RC2 on 5th Feb, and I don't want to push this series for
> RC3 as it is a massive change in ipsec-secgw.
> 
> -Akhil
  
Anoob Joseph Feb. 6, 2020, 10:46 a.m. UTC | #9
Hi Akhil,

> > > @Akhil, do you have any comments?
> > >
> > > Also, I think we should make it
> > > <port>,<queue>,<lcore>,<cdev_id>,<cdev-queue>
> > >
> > Looks good to me, but I believe this would need more changes and
> > testing in event patches.
> > Also it does not have any changes for lookaside cases.
> > Can we move this to next release and add lookaside case as well in a
> > single go.
> 
> [Anoob] Not a problem. I'll defer this to next release then.

This patch is not part of the event additions to ipsec-segcw. This patch is required to handle few corner cases, but is equally applicable to lookaside crypto as well. I had agreed to only deferring this patch.

Lukasz is preparing v4 of event mode patches with doc update and addressing one minor comment from Konstantin. If the event ipsec-secgw patches are not getting merged for this release, can you confirm it will be merged immediately after this release? I'm assuming you have finished the reviews as the patch was submitted early in this release cycle.

Thanks,
Anoob

> -----Original Message-----
> From: Anoob Joseph
> Sent: Monday, February 3, 2020 2:52 PM
> To: Akhil Goyal <akhil.goyal@nxp.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Nicolau, Radu <radu.nicolau@intel.com>
> Cc: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Lukas Bartosik
> <lbartosik@marvell.com>; Narayana Prasad Raju Athreya
> <pathreya@marvell.com>; dev@dpdk.org
> Subject: RE: [PATCH] examples/ipsec-secgw: increase number of qps to
> lcore_params
> 
> Hi Akhil,
> 
> Please see inline.
> 
> Thanks,
> Anoob
> 
> > -----Original Message-----
> > From: Akhil Goyal <akhil.goyal@nxp.com>
> > Sent: Monday, February 3, 2020 2:46 PM
> > To: Anoob Joseph <anoobj@marvell.com>; Ananyev, Konstantin
> > <konstantin.ananyev@intel.com>; Nicolau, Radu
> <radu.nicolau@intel.com>
> > Cc: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Lukas Bartosik
> > <lbartosik@marvell.com>; Narayana Prasad Raju Athreya
> > <pathreya@marvell.com>; dev@dpdk.org
> > Subject: [EXT] RE: [PATCH] examples/ipsec-secgw: increase number of
> > qps to lcore_params
> >
> > External Email
> >
> > ----------------------------------------------------------------------
> > > > > > > > > Currently only one qp will be used for one core. The
> > > > > > > > > number of qps can be increased to match the number of
> > > > > > > > > lcore
> > params.
> > > > > > > >
> > > > > > > > I don't really understand the purpose of that patch....
> > > > > > > > As I understand, all it does - unconditionally increases
> > > > > > > > number of crypto-queues mapped to the same lcore.
> > > > > > >
> > > > > > > [Anoob] With the current code, we will have only crypto qp
> > > > > > > mapped to one lcore. So even if you have large number of
> > > > > > > crypto qps, you would be only
> > > > > > using as much as the number of lcores.
> > > > > > >
> > > > > > > This is an inefficient model as a fat flow(say with large
> > > > > > > packet
> > > > > > > sizes) on one eth queue hitting one core can starve another
> > > > > > > flow which
> > > > > > happens to hit the same core, because both flows would get
> > > > > > queued to the same qp.
> > > > > > > And, we cannot just randomly submit to multiple qps from the
> > > > > > > same core as then the ordering would be messed up.
> > > > > >
> > > > > > No-one suggesting that one I believe.
> > > > > >
> > > > > > So the best possible usage model would be to map one eth queue
> > > > > > to one
> > > > > > > crypto qp. That way, the core wouldn't be unnecessarily
> > > > > > > pipeline the crypto
> > > > > > processing.
> > > > > >
> > > > > > I doubt it is really the 'best possible usage model'.
> > > > > > There could be cases when forcing lcore to use/manage more
> > > > > > crypto-queues will lead to negative effects: perf drop, not
> > > > > > enough crypto queues for all eth queues, etc.
> > > > > > If your HW/SW requires to match each eth queue with a separate
> > > > > > crypto-queue, then I think it should be an optional feature,
> > > > > > while keeping default behavior intact.
> > > > >
> > > > > [Anoob] I think the question here is more to do with s/w crypto
> > > > > PMDs vs h/w crypto PMDs. For s/w PMDs, having more queues
> > > > > doesn't really
> > > > make sense and for h/w PMDs it's better.
> > > >
> > > > Not always.
> > > > If these queues belongs to the same device, sometimes it is faster
> > > > to use just one queue for device per core.
> > > > HW descriptor status polling, etc. is not free.
> > > >
> > > > > I'll see how we can make this an optional feature. Would you be
> > > > > okay with allowing such behavior if the underlying PMD can
> > > > > support as many
> > > > queues as lcore_params?
> > > > >
> > > > > As in, if the PMD doesn't support enough queues, we do 1 qp per
> core.
> > > > Would that work for you?
> > > >
> > > > I am not fond of idea to change default mapping method silently.
> > > > My preference would be a new command-line option (--cdev-maping
> or
> > so).
> > > > Another thought, make it more fine-grained and user-controlled by
> > > > extending eth-port-queue-lcore mapping option.
> > > > from current: <port>,<queue>,<lcore> to
> > > > <port>,<queue>,<lcore>,<cdev-queue>.
> > > >
> > > > So let say with 2 cores , 2 eth ports/2 queues per port for
> > > > current mapping user would do:
> > > > # use cdev queue 0 on all cdevs for lcore 6 # use cdev queue 1 on
> > > > all cdevs for lcore 7 --lcores="6,7" ... -- --
> > config="(0,0,6,0),(1,0,6,0),(0,1,7,1),(1,1,7,1)"
> > > >
> > > > for the mapping you'd like to have:
> > > > --lcores="6,7" ... -- --config="(0,0,6,0),(1,0,6,1),(0,1,7,2),(1,1,7,3)"
> > >
> > > [Anoob] I like this idea. This would work for inline case as well.
> > >
> > > @Akhil, do you have any comments?
> > >
> > > Also, I think we should make it
> > > <port>,<queue>,<lcore>,<cdev_id>,<cdev-queue>
> > >
> > Looks good to me, but I believe this would need more changes and
> > testing in event patches.
> > Also it does not have any changes for lookaside cases.
> > Can we move this to next release and add lookaside case as well in a
> > single go.
> 
> [Anoob] Not a problem. I'll defer this to next release then.
> 
> > We need to close the RC2 on 5th Feb, and I don't want to push this
> > series for
> > RC3 as it is a massive change in ipsec-secgw.
> >
> > -Akhil
  
Akhil Goyal Feb. 6, 2020, 11:30 a.m. UTC | #10
> 
> Hi Akhil,
> 
> > > > @Akhil, do you have any comments?
> > > >
> > > > Also, I think we should make it
> > > > <port>,<queue>,<lcore>,<cdev_id>,<cdev-queue>
> > > >
> > > Looks good to me, but I believe this would need more changes and
> > > testing in event patches.
> > > Also it does not have any changes for lookaside cases.
> > > Can we move this to next release and add lookaside case as well in a
> > > single go.
> >
> > [Anoob] Not a problem. I'll defer this to next release then.
> 
> This patch is not part of the event additions to ipsec-segcw. This patch is
> required to handle few corner cases, but is equally applicable to lookaside
> crypto as well. I had agreed to only deferring this patch.
> 
> Lukasz is preparing v4 of event mode patches with doc update and addressing
> one minor comment from Konstantin. If the event ipsec-secgw patches are not
> getting merged for this release, can you confirm it will be merged immediately
> after this release? I'm assuming you have finished the reviews as the patch was
> submitted early in this release cycle.
> 
I would say Lukasz should wait for sending the next version till I complete the review.
I will notify as soon as possible.
I am not done with the review of the event patches yet. I need more time for that.
Yes, I believe these will be merged early in the next cycle.

-Akhil
  
Thomas Monjalon Feb. 6, 2020, 11:32 a.m. UTC | #11
06/02/2020 11:46, Anoob Joseph:
> Hi Akhil,
> 
> > > > @Akhil, do you have any comments?
> > > >
> > > > Also, I think we should make it
> > > > <port>,<queue>,<lcore>,<cdev_id>,<cdev-queue>
> > > >
> > > Looks good to me, but I believe this would need more changes and
> > > testing in event patches.
> > > Also it does not have any changes for lookaside cases.
> > > Can we move this to next release and add lookaside case as well in a
> > > single go.
> > 
> > [Anoob] Not a problem. I'll defer this to next release then.
> 
> This patch is not part of the event additions to ipsec-segcw. This patch is required to handle few corner cases, but is equally applicable to lookaside crypto as well. I had agreed to only deferring this patch.
> 
> Lukasz is preparing v4 of event mode patches with doc update and addressing one minor comment from Konstantin. If the event ipsec-secgw patches are not getting merged for this release, can you confirm it will be merged immediately after this release? I'm assuming you have finished the reviews as the patch was submitted early in this release cycle.

Akhil, as all other tree maintainers, is doing its best.
Akhil is doing a great job, and had a lot of work in this release cycle.
Please be patient.
  

Patch

diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 3b5aaf6..d8c435e 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1709,6 +1709,8 @@  add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
 	unsigned long i;
 	struct cdev_key key = { 0 };
 
+	key.port_id = params->port_id;
+	key.queue_id = params->queue_id;
 	key.lcore_id = params->lcore_id;
 	if (cipher)
 		key.cipher_algo = cipher->sym.cipher.algo;
@@ -1721,23 +1723,17 @@  add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
 	if (ret != -ENOENT)
 		return 0;
 
-	for (i = 0; i < ipsec_ctx->nb_qps; i++)
-		if (ipsec_ctx->tbl[i].id == cdev_id)
-			break;
-
-	if (i == ipsec_ctx->nb_qps) {
-		if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
-			printf("Maximum number of crypto devices assigned to "
-				"a core, increase MAX_QP_PER_LCORE value\n");
-			return 0;
-		}
-		ipsec_ctx->tbl[i].id = cdev_id;
-		ipsec_ctx->tbl[i].qp = qp;
-		ipsec_ctx->nb_qps++;
-		printf("%s cdev mapping: lcore %u using cdev %u qp %u "
-				"(cdev_id_qp %lu)\n", str, key.lcore_id,
-				cdev_id, qp, i);
+	i = ipsec_ctx->nb_qps;
+	if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
+		printf("Maximum number of crypto devices assigned to a core, "
+		       "increase MAX_QP_PER_LCORE value\n");
+		return 0;
 	}
+	ipsec_ctx->tbl[i].id = cdev_id;
+	ipsec_ctx->tbl[i].qp = qp;
+	ipsec_ctx->nb_qps++;
+	printf("%s cdev mapping: lcore %u using cdev %u qp %u "
+	       "(cdev_id_qp %lu)\n", str, key.lcore_id, cdev_id, qp, i);
 
 	ret = rte_hash_add_key_data(map, &key, (void *)i);
 	if (ret < 0) {
@@ -1785,8 +1781,10 @@  add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
 			continue;
 
 		if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-			ret |= add_mapping(map, str, cdev_id, qp, params,
+			ret = add_mapping(map, str, cdev_id, qp, params,
 					ipsec_ctx, NULL, NULL, i);
+			if (ret)
+				return ret;
 			continue;
 		}
 
@@ -1801,12 +1799,15 @@  add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
 			if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
 				continue;
 
-			ret |= add_mapping(map, str, cdev_id, qp, params,
+			ret = add_mapping(map, str, cdev_id, qp, params,
 						ipsec_ctx, i, j, NULL);
+			if (ret)
+				return ret;
+			continue;
 		}
 	}
 
-	return ret;
+	return 0;
 }
 
 /* Check if the device is enabled by cryptodev_mask */
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 8e07521..92fd5eb 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -200,7 +200,9 @@  struct ipsec_ctx {
 };
 
 struct cdev_key {
-	uint16_t lcore_id;
+	uint16_t port_id;
+	uint8_t queue_id;
+	uint8_t lcore_id;
 	uint8_t cipher_algo;
 	uint8_t auth_algo;
 	uint8_t aead_algo;