[v13] eventdev: simplify Rx adapter event vector config

Message ID 20210818065728.1877-1-pbhagavatula@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers
Series [v13] eventdev: simplify Rx adapter event vector config |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-spell-check-testing success Testing PASS
ci/github-robot: build success github build: passed
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-aarch64-unit-testing fail Testing issues
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/Intel-compilation success Compilation OK
ci/iol-x86_64-unit-testing fail Testing issues
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/intel-Testing success Testing PASS

Commit Message

Pavan Nikhilesh Bhagavatula Aug. 18, 2021, 6:57 a.m. UTC
  From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Include vector configuration into the structure
``rte_event_eth_rx_adapter_queue_conf`` that is used to configure
Rx adapter ethernet device Rx queue parameters.
This simplifies event vector configuration as it avoids splitting
configuration per Rx queue.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
---
 v13 Changes:
 - Fix cnxk driver compilation.
 v12 Changes:
 - Remove deprication notice.
 - Remove unnecessary change Id.

 app/test-eventdev/test_pipeline_common.c |  16 +-
 doc/guides/rel_notes/deprecation.rst     |   9 --
 drivers/event/cnxk/cn10k_eventdev.c      |  77 ----------
 drivers/event/cnxk/cnxk_eventdev_adptr.c |  41 ++++++
 lib/eventdev/eventdev_pmd.h              |  29 ----
 lib/eventdev/rte_event_eth_rx_adapter.c  | 179 ++++++++---------------
 lib/eventdev/rte_event_eth_rx_adapter.h  |  30 ----
 lib/eventdev/version.map                 |   1 -
 8 files changed, 104 insertions(+), 278 deletions(-)

--
2.17.1
  

Comments

Jayatheerthan, Jay Aug. 18, 2021, 8:22 a.m. UTC | #1
HI Harish,
Could you review this patch ?

-Jay


> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Wednesday, August 18, 2021 12:27 PM
> To: jerinj@marvell.com; Ray Kinsella <mdr@ashroe.eu>; Pavan Nikhilesh <pbhagavatula@marvell.com>; Shijith Thotton
> <sthotton@marvell.com>; Jayatheerthan, Jay <jay.jayatheerthan@intel.com>
> Cc: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v13] eventdev: simplify Rx adapter event vector config
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Include vector configuration into the structure
> ``rte_event_eth_rx_adapter_queue_conf`` that is used to configure
> Rx adapter ethernet device Rx queue parameters.
> This simplifies event vector configuration as it avoids splitting
> configuration per Rx queue.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
> ---
>  v13 Changes:
>  - Fix cnxk driver compilation.
>  v12 Changes:
>  - Remove deprication notice.
>  - Remove unnecessary change Id.
> 
>  app/test-eventdev/test_pipeline_common.c |  16 +-
>  doc/guides/rel_notes/deprecation.rst     |   9 --
>  drivers/event/cnxk/cn10k_eventdev.c      |  77 ----------
>  drivers/event/cnxk/cnxk_eventdev_adptr.c |  41 ++++++
>  lib/eventdev/eventdev_pmd.h              |  29 ----
>  lib/eventdev/rte_event_eth_rx_adapter.c  | 179 ++++++++---------------
>  lib/eventdev/rte_event_eth_rx_adapter.h  |  30 ----
>  lib/eventdev/version.map                 |   1 -
>  8 files changed, 104 insertions(+), 278 deletions(-)
> 
> diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
> index 6ee530d4cd..2697547641 100644
> --- a/app/test-eventdev/test_pipeline_common.c
> +++ b/app/test-eventdev/test_pipeline_common.c
> @@ -332,7 +332,6 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
>  	uint16_t prod;
>  	struct rte_mempool *vector_pool = NULL;
>  	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
> -	struct rte_event_eth_rx_adapter_event_vector_config vec_conf;
> 
>  	memset(&queue_conf, 0,
>  			sizeof(struct rte_event_eth_rx_adapter_queue_conf));
> @@ -398,8 +397,12 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
>  			}
> 
>  			if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
> +				queue_conf.vector_sz = opt->vector_size;
> +				queue_conf.vector_timeout_ns =
> +					opt->vector_tmo_nsec;
>  				queue_conf.rx_queue_flags |=
>  				RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
> +				queue_conf.vector_mp = vector_pool;
>  			} else {
>  				evt_err("Rx adapter doesn't support event vector");
>  				return -EINVAL;
> @@ -419,17 +422,6 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
>  			return ret;
>  		}
> 
> -		if (opt->ena_vector) {
> -			vec_conf.vector_sz = opt->vector_size;
> -			vec_conf.vector_timeout_ns = opt->vector_tmo_nsec;
> -			vec_conf.vector_mp = vector_pool;
> -			if (rte_event_eth_rx_adapter_queue_event_vector_config(
> -				    prod, prod, -1, &vec_conf) < 0) {
> -				evt_err("Failed to configure event vectorization for Rx adapter");
> -				return -EINVAL;
> -			}
> -		}
> -
>  		if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
>  			uint32_t service_id = -1U;
> 
> diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
> index 76a4abfd6b..2c37d7222c 100644
> --- a/doc/guides/rel_notes/deprecation.rst
> +++ b/doc/guides/rel_notes/deprecation.rst
> @@ -257,15 +257,6 @@ Deprecation Notices
>    An 8-byte reserved field will be added to the structure ``rte_event_timer`` to
>    support future extensions.
> 
> -* eventdev: The structure ``rte_event_eth_rx_adapter_queue_conf`` will be
> -  extended to include ``rte_event_eth_rx_adapter_event_vector_config`` elements
> -  and the function ``rte_event_eth_rx_adapter_queue_event_vector_config`` will
> -  be removed in DPDK 21.11.
> -
> -  An application can enable event vectorization by passing the desired vector
> -  values to the function ``rte_event_eth_rx_adapter_queue_add`` using
> -  the structure ``rte_event_eth_rx_adapter_queue_add``.
> -
>  * eventdev: Reserved bytes of ``rte_event_crypto_request`` is a space holder
>    for ``response_info``. Both should be decoupled for better clarity.
>    New space for ``response_info`` can be made by changing
> diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
> index 6f37c5bd23..160192bb27 100644
> --- a/drivers/event/cnxk/cn10k_eventdev.c
> +++ b/drivers/event/cnxk/cn10k_eventdev.c
> @@ -696,81 +696,6 @@ cn10k_sso_rx_adapter_vector_limits(
>  	return 0;
>  }
> 
> -static int
> -cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
> -				uint16_t port_id, uint16_t rq_id, uint16_t sz,
> -				uint64_t tmo_ns, struct rte_mempool *vmp)
> -{
> -	struct roc_nix_rq *rq;
> -
> -	rq = &cnxk_eth_dev->rqs[rq_id];
> -
> -	if (!rq->sso_ena)
> -		return -EINVAL;
> -	if (rq->flow_tag_width == 0)
> -		return -EINVAL;
> -
> -	rq->vwqe_ena = 1;
> -	rq->vwqe_first_skip = 0;
> -	rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
> -	rq->vwqe_max_sz_exp = rte_log2_u32(sz);
> -	rq->vwqe_wait_tmo =
> -		tmo_ns /
> -		((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
> -	rq->tag_mask = (port_id & 0xF) << 20;
> -	rq->tag_mask |=
> -		(((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
> -		<< 24;
> -
> -	return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
> -}
> -
> -static int
> -cn10k_sso_rx_adapter_vector_config(
> -	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
> -	int32_t rx_queue_id,
> -	const struct rte_event_eth_rx_adapter_event_vector_config *config)
> -{
> -	struct cnxk_eth_dev *cnxk_eth_dev;
> -	struct cnxk_sso_evdev *dev;
> -	int i, rc;
> -
> -	rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
> -	if (rc)
> -		return -EINVAL;
> -
> -	dev = cnxk_sso_pmd_priv(event_dev);
> -	cnxk_eth_dev = cnxk_eth_pmd_priv(eth_dev);
> -	if (rx_queue_id < 0) {
> -		for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
> -			cnxk_sso_updt_xae_cnt(dev, config->vector_mp,
> -					      RTE_EVENT_TYPE_ETHDEV_VECTOR);
> -			rc = cnxk_sso_xae_reconfigure(
> -				(struct rte_eventdev *)(uintptr_t)event_dev);
> -			rc = cnxk_sso_rx_adapter_vwqe_enable(
> -				cnxk_eth_dev, eth_dev->data->port_id, i,
> -				config->vector_sz, config->vector_timeout_ns,
> -				config->vector_mp);
> -			if (rc)
> -				return -EINVAL;
> -		}
> -	} else {
> -
> -		cnxk_sso_updt_xae_cnt(dev, config->vector_mp,
> -				      RTE_EVENT_TYPE_ETHDEV_VECTOR);
> -		rc = cnxk_sso_xae_reconfigure(
> -			(struct rte_eventdev *)(uintptr_t)event_dev);
> -		rc = cnxk_sso_rx_adapter_vwqe_enable(
> -			cnxk_eth_dev, eth_dev->data->port_id, rx_queue_id,
> -			config->vector_sz, config->vector_timeout_ns,
> -			config->vector_mp);
> -		if (rc)
> -			return -EINVAL;
> -	}
> -
> -	return 0;
> -}
> -
>  static int
>  cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
>  			      const struct rte_eth_dev *eth_dev, uint32_t *caps)
> @@ -841,8 +766,6 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {
>  	.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
> 
>  	.eth_rx_adapter_vector_limits_get = cn10k_sso_rx_adapter_vector_limits,
> -	.eth_rx_adapter_event_vector_config =
> -		cn10k_sso_rx_adapter_vector_config,
> 
>  	.eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,
>  	.eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add,
> diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> index baf2f2aa6b..80f5602286 100644
> --- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
> +++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> @@ -156,6 +156,35 @@ cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)
>  	return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
>  }
> 
> +static int
> +cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
> +				uint16_t port_id, uint16_t rq_id, uint16_t sz,
> +				uint64_t tmo_ns, struct rte_mempool *vmp)
> +{
> +	struct roc_nix_rq *rq;
> +
> +	rq = &cnxk_eth_dev->rqs[rq_id];
> +
> +	if (!rq->sso_ena)
> +		return -EINVAL;
> +	if (rq->flow_tag_width == 0)
> +		return -EINVAL;
> +
> +	rq->vwqe_ena = 1;
> +	rq->vwqe_first_skip = 0;
> +	rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
> +	rq->vwqe_max_sz_exp = rte_log2_u32(sz);
> +	rq->vwqe_wait_tmo =
> +		tmo_ns /
> +		((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
> +	rq->tag_mask = (port_id & 0xF) << 20;
> +	rq->tag_mask |=
> +		(((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
> +		<< 24;
> +
> +	return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
> +}
> +
>  int
>  cnxk_sso_rx_adapter_queue_add(
>  	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
> @@ -183,6 +212,18 @@ cnxk_sso_rx_adapter_queue_add(
>  			&queue_conf->ev,
>  			!!(queue_conf->rx_queue_flags &
>  			   RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
> +		if (queue_conf->rx_queue_flags &
> +		    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
> +			cnxk_sso_updt_xae_cnt(dev, queue_conf->vector_mp,
> +					      RTE_EVENT_TYPE_ETHDEV_VECTOR);
> +			rc |= cnxk_sso_xae_reconfigure(
> +				(struct rte_eventdev *)(uintptr_t)event_dev);
> +			rc |= cnxk_sso_rx_adapter_vwqe_enable(
> +				cnxk_eth_dev, port, rx_queue_id,
> +				queue_conf->vector_sz,
> +				queue_conf->vector_timeout_ns,
> +				queue_conf->vector_mp);
> +		}
>  		rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
>  				      rxq_sp->qconf.mp->pool_id, true,
>  				      dev->force_ena_bp);
> diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
> index 0f724ac85d..63b3bc4b51 100644
> --- a/lib/eventdev/eventdev_pmd.h
> +++ b/lib/eventdev/eventdev_pmd.h
> @@ -667,32 +667,6 @@ typedef int (*eventdev_eth_rx_adapter_vector_limits_get_t)(
>  	const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
>  	struct rte_event_eth_rx_adapter_vector_limits *limits);
> 
> -struct rte_event_eth_rx_adapter_event_vector_config;
> -/**
> - * Enable event vector on an given Rx queue of a ethernet devices belonging to
> - * the Rx adapter.
> - *
> - * @param dev
> - *   Event device pointer
> - *
> - * @param eth_dev
> - *   Ethernet device pointer
> - *
> - * @param rx_queue_id
> - *   The Rx queue identifier
> - *
> - * @param config
> - *   Pointer to the event vector configuration structure.
> - *
> - * @return
> - *   - 0: Success.
> - *   - <0: Error code returned by the driver function.
> - */
> -typedef int (*eventdev_eth_rx_adapter_event_vector_config_t)(
> -	const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
> -	int32_t rx_queue_id,
> -	const struct rte_event_eth_rx_adapter_event_vector_config *config);
> -
>  typedef uint32_t rte_event_pmd_selftest_seqn_t;
>  extern int rte_event_pmd_selftest_seqn_dynfield_offset;
> 
> @@ -1118,9 +1092,6 @@ struct rte_eventdev_ops {
>  	eventdev_eth_rx_adapter_vector_limits_get_t
>  		eth_rx_adapter_vector_limits_get;
>  	/**< Get event vector limits for the Rx adapter */
> -	eventdev_eth_rx_adapter_event_vector_config_t
> -		eth_rx_adapter_event_vector_config;
> -	/**< Configure Rx adapter with event vector */
> 
>  	eventdev_timer_adapter_caps_get_t timer_adapter_caps_get;
>  	/**< Get timer adapter capabilities */
> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
> index 13dfb28401..2b2dd688fc 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.c
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.c
> @@ -1895,6 +1895,24 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  	} else
>  		qi_ev->flow_id = 0;
> 
> +	if (conf->rx_queue_flags &
> +	    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
> +		queue_info->ena_vector = 1;
> +		qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
> +		rxa_set_vector_data(queue_info, conf->vector_sz,
> +				    conf->vector_timeout_ns, conf->vector_mp,
> +				    rx_queue_id, dev_info->dev->data->port_id);
> +		rx_adapter->ena_vector = 1;
> +		rx_adapter->vector_tmo_ticks =
> +			rx_adapter->vector_tmo_ticks ?
> +				      RTE_MIN(queue_info->vector_data
> +							.vector_timeout_ticks >>
> +						1,
> +					rx_adapter->vector_tmo_ticks) :
> +				queue_info->vector_data.vector_timeout_ticks >>
> +					1;
> +	}
> +
>  	rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
>  	if (rxa_polled_queue(dev_info, rx_queue_id)) {
>  		rx_adapter->num_rx_polled += !pollq;
> @@ -1920,42 +1938,6 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  	}
>  }
> 
> -static void
> -rxa_sw_event_vector_configure(
> -	struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
> -	int rx_queue_id,
> -	const struct rte_event_eth_rx_adapter_event_vector_config *config)
> -{
> -	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
> -	struct eth_rx_queue_info *queue_info;
> -	struct rte_event *qi_ev;
> -
> -	if (rx_queue_id == -1) {
> -		uint16_t nb_rx_queues;
> -		uint16_t i;
> -
> -		nb_rx_queues = dev_info->dev->data->nb_rx_queues;
> -		for (i = 0; i < nb_rx_queues; i++)
> -			rxa_sw_event_vector_configure(rx_adapter, eth_dev_id, i,
> -						      config);
> -		return;
> -	}
> -
> -	queue_info = &dev_info->rx_queue[rx_queue_id];
> -	qi_ev = (struct rte_event *)&queue_info->event;
> -	queue_info->ena_vector = 1;
> -	qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
> -	rxa_set_vector_data(queue_info, config->vector_sz,
> -			    config->vector_timeout_ns, config->vector_mp,
> -			    rx_queue_id, dev_info->dev->data->port_id);
> -	rx_adapter->ena_vector = 1;
> -	rx_adapter->vector_tmo_ticks =
> -		rx_adapter->vector_tmo_ticks ?
> -			      RTE_MIN(config->vector_timeout_ns >> 1,
> -				rx_adapter->vector_tmo_ticks) :
> -			      config->vector_timeout_ns >> 1;
> -}
> -
>  static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
>  		uint16_t eth_dev_id,
>  		int rx_queue_id,
> @@ -2270,6 +2252,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
>  	struct rte_event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	struct eth_device_info *dev_info;
> +	struct rte_event_eth_rx_adapter_vector_limits limits;
> 
>  	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
>  	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
> @@ -2297,13 +2280,46 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
>  		return -EINVAL;
>  	}
> 
> -	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0 &&
> -	    (queue_conf->rx_queue_flags &
> -	     RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR)) {
> -		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
> -				 " eth port: %" PRIu16 " adapter id: %" PRIu8,
> -				 eth_dev_id, id);
> -		return -EINVAL;
> +	if (queue_conf->rx_queue_flags &
> +	    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
> +
> +		if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
> +			RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
> +					 " eth port: %" PRIu16
> +					 " adapter id: %" PRIu8,
> +					 eth_dev_id, id);
> +			return -EINVAL;
> +		}
> +
> +		ret = rte_event_eth_rx_adapter_vector_limits_get(
> +			rx_adapter->eventdev_id, eth_dev_id, &limits);
> +		if (ret < 0) {
> +			RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
> +					 " eth port: %" PRIu16
> +					 " adapter id: %" PRIu8,
> +					 eth_dev_id, id);
> +			return -EINVAL;
> +		}
> +		if (queue_conf->vector_sz < limits.min_sz ||
> +		    queue_conf->vector_sz > limits.max_sz ||
> +		    queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
> +		    queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
> +		    queue_conf->vector_mp == NULL) {
> +			RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> +					 " eth port: %" PRIu16
> +					 " adapter id: %" PRIu8,
> +					 eth_dev_id, id);
> +			return -EINVAL;
> +		}
> +		if (queue_conf->vector_mp->elt_size <
> +		    (sizeof(struct rte_event_vector) +
> +		     (sizeof(uintptr_t) * queue_conf->vector_sz))) {
> +			RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> +					 " eth port: %" PRIu16
> +					 " adapter id: %" PRIu8,
> +					 eth_dev_id, id);
> +			return -EINVAL;
> +		}
>  	}
> 
>  	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
> @@ -2499,83 +2515,6 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
>  	return ret;
>  }
> 
> -int
> -rte_event_eth_rx_adapter_queue_event_vector_config(
> -	uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
> -	struct rte_event_eth_rx_adapter_event_vector_config *config)
> -{
> -	struct rte_event_eth_rx_adapter_vector_limits limits;
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> -	struct rte_eventdev *dev;
> -	uint32_t cap;
> -	int ret;
> -
> -	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> -	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
> -
> -	rx_adapter = rxa_id_to_adapter(id);
> -	if ((rx_adapter == NULL) || (config == NULL))
> -		return -EINVAL;
> -
> -	dev = &rte_eventdevs[rx_adapter->eventdev_id];
> -	ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
> -						eth_dev_id, &cap);
> -	if (ret) {
> -		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
> -				 "eth port %" PRIu16,
> -				 id, eth_dev_id);
> -		return ret;
> -	}
> -
> -	if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR)) {
> -		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
> -				 " eth port: %" PRIu16 " adapter id: %" PRIu8,
> -				 eth_dev_id, id);
> -		return -EINVAL;
> -	}
> -
> -	ret = rte_event_eth_rx_adapter_vector_limits_get(
> -		rx_adapter->eventdev_id, eth_dev_id, &limits);
> -	if (ret) {
> -		RTE_EDEV_LOG_ERR("Failed to get vector limits edev %" PRIu8
> -				 "eth port %" PRIu16,
> -				 rx_adapter->eventdev_id, eth_dev_id);
> -		return ret;
> -	}
> -
> -	if (config->vector_sz < limits.min_sz ||
> -	    config->vector_sz > limits.max_sz ||
> -	    config->vector_timeout_ns < limits.min_timeout_ns ||
> -	    config->vector_timeout_ns > limits.max_timeout_ns ||
> -	    config->vector_mp == NULL) {
> -		RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> -				 " eth port: %" PRIu16 " adapter id: %" PRIu8,
> -				 eth_dev_id, id);
> -		return -EINVAL;
> -	}
> -	if (config->vector_mp->elt_size <
> -	    (sizeof(struct rte_event_vector) +
> -	     (sizeof(uintptr_t) * config->vector_sz))) {
> -		RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> -				 " eth port: %" PRIu16 " adapter id: %" PRIu8,
> -				 eth_dev_id, id);
> -		return -EINVAL;
> -	}
> -
> -	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
> -		RTE_FUNC_PTR_OR_ERR_RET(
> -			*dev->dev_ops->eth_rx_adapter_event_vector_config,
> -			-ENOTSUP);
> -		ret = dev->dev_ops->eth_rx_adapter_event_vector_config(
> -			dev, &rte_eth_devices[eth_dev_id], rx_queue_id, config);
> -	} else {
> -		rxa_sw_event_vector_configure(rx_adapter, eth_dev_id,
> -					      rx_queue_id, config);
> -	}
> -
> -	return ret;
> -}
> -
>  int
>  rte_event_eth_rx_adapter_vector_limits_get(
>  	uint8_t dev_id, uint16_t eth_port_id,
> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
> index 182dd2e5dd..3f8b362295 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.h
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.h
> @@ -171,9 +171,6 @@ struct rte_event_eth_rx_adapter_queue_conf {
>  	 * The event adapter sets ev.event_type to RTE_EVENT_TYPE_ETHDEV in the
>  	 * enqueued event.
>  	 */
> -};
> -
> -struct rte_event_eth_rx_adapter_event_vector_config {
>  	uint16_t vector_sz;
>  	/**<
>  	 * Indicates the maximum number for mbufs to combine and form a vector.
> @@ -548,33 +545,6 @@ int rte_event_eth_rx_adapter_vector_limits_get(
>  	uint8_t dev_id, uint16_t eth_port_id,
>  	struct rte_event_eth_rx_adapter_vector_limits *limits);
> 
> -/**
> - * Configure event vectorization for a given ethernet device queue, that has
> - * been added to a event eth Rx adapter.
> - *
> - * @param id
> - *  The identifier of the ethernet Rx event adapter.
> - *
> - * @param eth_dev_id
> - *  The identifier of the ethernet device.
> - *
> - * @param rx_queue_id
> - *  Ethernet device receive queue index.
> - *  If rx_queue_id is -1, then all Rx queues configured for the ethernet device
> - *  are configured with event vectorization.
> - *
> - * @param config
> - *  Event vector configuration structure.
> - *
> - * @return
> - *  - 0: Success, Receive queue configured correctly.
> - *  - <0: Error code on failure.
> - */
> -__rte_experimental
> -int rte_event_eth_rx_adapter_queue_event_vector_config(
> -	uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
> -	struct rte_event_eth_rx_adapter_event_vector_config *config);
> -
>  #ifdef __cplusplus
>  }
>  #endif
> diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
> index 88625621ec..cd86d2d908 100644
> --- a/lib/eventdev/version.map
> +++ b/lib/eventdev/version.map
> @@ -142,7 +142,6 @@ EXPERIMENTAL {
>  	#added in 21.05
>  	rte_event_vector_pool_create;
>  	rte_event_eth_rx_adapter_vector_limits_get;
> -	rte_event_eth_rx_adapter_queue_event_vector_config;
>  	__rte_eventdev_trace_crypto_adapter_enqueue;
>  };
> 
> --
> 2.17.1
  
Naga Harish K, S V Aug. 20, 2021, 7:33 a.m. UTC | #2
-----Original Message-----
From: Jayatheerthan, Jay <jay.jayatheerthan@intel.com> 
Sent: Wednesday, August 18, 2021 1:53 PM
To: pbhagavatula@marvell.com; jerinj@marvell.com; Ray Kinsella <mdr@ashroe.eu>; Shijith Thotton <sthotton@marvell.com>; Naga Harish K, S V <s.v.naga.harish.k@intel.com>
Cc: dev@dpdk.org
Subject: RE: [dpdk-dev] [PATCH v13] eventdev: simplify Rx adapter event vector config

HI Harish,
Could you review this patch ?

-Jay


> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Wednesday, August 18, 2021 12:27 PM
> To: jerinj@marvell.com; Ray Kinsella <mdr@ashroe.eu>; Pavan Nikhilesh 
> <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>; 
> Jayatheerthan, Jay <jay.jayatheerthan@intel.com>
> Cc: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v13] eventdev: simplify Rx adapter event 
> vector config
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Include vector configuration into the structure 
> ``rte_event_eth_rx_adapter_queue_conf`` that is used to configure Rx 
> adapter ethernet device Rx queue parameters.
> This simplifies event vector configuration as it avoids splitting 
> configuration per Rx queue.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
> ---
>  v13 Changes:
>  - Fix cnxk driver compilation.
>  v12 Changes:
>  - Remove deprication notice.
>  - Remove unnecessary change Id.
> 
>  app/test-eventdev/test_pipeline_common.c |  16 +-
>  doc/guides/rel_notes/deprecation.rst     |   9 --
>  drivers/event/cnxk/cn10k_eventdev.c      |  77 ----------
>  drivers/event/cnxk/cnxk_eventdev_adptr.c |  41 ++++++
>  lib/eventdev/eventdev_pmd.h              |  29 ----
>  lib/eventdev/rte_event_eth_rx_adapter.c  | 179 
> ++++++++---------------  lib/eventdev/rte_event_eth_rx_adapter.h  |  30 ----
>  lib/eventdev/version.map                 |   1 -
>  8 files changed, 104 insertions(+), 278 deletions(-)
> 
> diff --git a/app/test-eventdev/test_pipeline_common.c 
> b/app/test-eventdev/test_pipeline_common.c
> index 6ee530d4cd..2697547641 100644
> --- a/app/test-eventdev/test_pipeline_common.c
> +++ b/app/test-eventdev/test_pipeline_common.c
> @@ -332,7 +332,6 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
>  	uint16_t prod;
>  	struct rte_mempool *vector_pool = NULL;
>  	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
> -	struct rte_event_eth_rx_adapter_event_vector_config vec_conf;
> 
>  	memset(&queue_conf, 0,
>  			sizeof(struct rte_event_eth_rx_adapter_queue_conf));
> @@ -398,8 +397,12 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
>  			}
> 
>  			if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
> +				queue_conf.vector_sz = opt->vector_size;
> +				queue_conf.vector_timeout_ns =
> +					opt->vector_tmo_nsec;
>  				queue_conf.rx_queue_flags |=
>  				RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
> +				queue_conf.vector_mp = vector_pool;
>  			} else {
>  				evt_err("Rx adapter doesn't support event vector");
>  				return -EINVAL;
> @@ -419,17 +422,6 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
>  			return ret;
>  		}
> 
> -		if (opt->ena_vector) {
> -			vec_conf.vector_sz = opt->vector_size;
> -			vec_conf.vector_timeout_ns = opt->vector_tmo_nsec;
> -			vec_conf.vector_mp = vector_pool;
> -			if (rte_event_eth_rx_adapter_queue_event_vector_config(
> -				    prod, prod, -1, &vec_conf) < 0) {
> -				evt_err("Failed to configure event vectorization for Rx adapter");
> -				return -EINVAL;
> -			}
> -		}
> -
>  		if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
>  			uint32_t service_id = -1U;
> 
> diff --git a/doc/guides/rel_notes/deprecation.rst 
> b/doc/guides/rel_notes/deprecation.rst
> index 76a4abfd6b..2c37d7222c 100644
> --- a/doc/guides/rel_notes/deprecation.rst
> +++ b/doc/guides/rel_notes/deprecation.rst
> @@ -257,15 +257,6 @@ Deprecation Notices
>    An 8-byte reserved field will be added to the structure ``rte_event_timer`` to
>    support future extensions.
> 
> -* eventdev: The structure ``rte_event_eth_rx_adapter_queue_conf`` 
> will be
> -  extended to include 
> ``rte_event_eth_rx_adapter_event_vector_config`` elements
> -  and the function 
> ``rte_event_eth_rx_adapter_queue_event_vector_config`` will
> -  be removed in DPDK 21.11.
> -
> -  An application can enable event vectorization by passing the 
> desired vector
> -  values to the function ``rte_event_eth_rx_adapter_queue_add`` using
> -  the structure ``rte_event_eth_rx_adapter_queue_add``.
> -

The above paragraph on how to enable Rx adapter vectorization may need to be 
added in rx adapter documentation with correct structure name.


>  * eventdev: Reserved bytes of ``rte_event_crypto_request`` is a space holder
>    for ``response_info``. Both should be decoupled for better clarity.
>    New space for ``response_info`` can be made by changing diff --git 
> a/drivers/event/cnxk/cn10k_eventdev.c 
> b/drivers/event/cnxk/cn10k_eventdev.c
> index 6f37c5bd23..160192bb27 100644
> --- a/drivers/event/cnxk/cn10k_eventdev.c
> +++ b/drivers/event/cnxk/cn10k_eventdev.c
> @@ -696,81 +696,6 @@ cn10k_sso_rx_adapter_vector_limits(
>  	return 0;
>  }
> 
> -static int
> -cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
> -				uint16_t port_id, uint16_t rq_id, uint16_t sz,
> -				uint64_t tmo_ns, struct rte_mempool *vmp)
> -{
> -	struct roc_nix_rq *rq;
> -
> -	rq = &cnxk_eth_dev->rqs[rq_id];
> -
> -	if (!rq->sso_ena)
> -		return -EINVAL;
> -	if (rq->flow_tag_width == 0)
> -		return -EINVAL;
> -
> -	rq->vwqe_ena = 1;
> -	rq->vwqe_first_skip = 0;
> -	rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
> -	rq->vwqe_max_sz_exp = rte_log2_u32(sz);
> -	rq->vwqe_wait_tmo =
> -		tmo_ns /
> -		((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
> -	rq->tag_mask = (port_id & 0xF) << 20;
> -	rq->tag_mask |=
> -		(((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
> -		<< 24;
> -
> -	return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
> -}
> -
> -static int
> -cn10k_sso_rx_adapter_vector_config(
> -	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
> -	int32_t rx_queue_id,
> -	const struct rte_event_eth_rx_adapter_event_vector_config *config)
> -{
> -	struct cnxk_eth_dev *cnxk_eth_dev;
> -	struct cnxk_sso_evdev *dev;
> -	int i, rc;
> -
> -	rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
> -	if (rc)
> -		return -EINVAL;
> -
> -	dev = cnxk_sso_pmd_priv(event_dev);
> -	cnxk_eth_dev = cnxk_eth_pmd_priv(eth_dev);
> -	if (rx_queue_id < 0) {
> -		for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
> -			cnxk_sso_updt_xae_cnt(dev, config->vector_mp,
> -					      RTE_EVENT_TYPE_ETHDEV_VECTOR);
> -			rc = cnxk_sso_xae_reconfigure(
> -				(struct rte_eventdev *)(uintptr_t)event_dev);
> -			rc = cnxk_sso_rx_adapter_vwqe_enable(
> -				cnxk_eth_dev, eth_dev->data->port_id, i,
> -				config->vector_sz, config->vector_timeout_ns,
> -				config->vector_mp);
> -			if (rc)
> -				return -EINVAL;
> -		}
> -	} else {
> -
> -		cnxk_sso_updt_xae_cnt(dev, config->vector_mp,
> -				      RTE_EVENT_TYPE_ETHDEV_VECTOR);
> -		rc = cnxk_sso_xae_reconfigure(
> -			(struct rte_eventdev *)(uintptr_t)event_dev);
> -		rc = cnxk_sso_rx_adapter_vwqe_enable(
> -			cnxk_eth_dev, eth_dev->data->port_id, rx_queue_id,
> -			config->vector_sz, config->vector_timeout_ns,
> -			config->vector_mp);
> -		if (rc)
> -			return -EINVAL;
> -	}
> -
> -	return 0;
> -}
> -
>  static int
>  cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
>  			      const struct rte_eth_dev *eth_dev, uint32_t *caps) @@ -841,8 
> +766,6 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {
>  	.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
> 
>  	.eth_rx_adapter_vector_limits_get = cn10k_sso_rx_adapter_vector_limits,
> -	.eth_rx_adapter_event_vector_config =
> -		cn10k_sso_rx_adapter_vector_config,
> 
>  	.eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,
>  	.eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add, diff 
> --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c 
> b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> index baf2f2aa6b..80f5602286 100644
> --- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
> +++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> @@ -156,6 +156,35 @@ cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)
>  	return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);  }
> 
> +static int
> +cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
> +				uint16_t port_id, uint16_t rq_id, uint16_t sz,
> +				uint64_t tmo_ns, struct rte_mempool *vmp) {
> +	struct roc_nix_rq *rq;
> +
> +	rq = &cnxk_eth_dev->rqs[rq_id];
> +
> +	if (!rq->sso_ena)
> +		return -EINVAL;
> +	if (rq->flow_tag_width == 0)
> +		return -EINVAL;
> +
> +	rq->vwqe_ena = 1;
> +	rq->vwqe_first_skip = 0;
> +	rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
> +	rq->vwqe_max_sz_exp = rte_log2_u32(sz);
> +	rq->vwqe_wait_tmo =
> +		tmo_ns /
> +		((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
> +	rq->tag_mask = (port_id & 0xF) << 20;
> +	rq->tag_mask |=
> +		(((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
> +		<< 24;
> +
> +	return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0); }
> +
>  int
>  cnxk_sso_rx_adapter_queue_add(
>  	const struct rte_eventdev *event_dev, const struct rte_eth_dev 
> *eth_dev, @@ -183,6 +212,18 @@ cnxk_sso_rx_adapter_queue_add(
>  			&queue_conf->ev,
>  			!!(queue_conf->rx_queue_flags &
>  			   RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
> +		if (queue_conf->rx_queue_flags &
> +		    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
> +			cnxk_sso_updt_xae_cnt(dev, queue_conf->vector_mp,
> +					      RTE_EVENT_TYPE_ETHDEV_VECTOR);
> +			rc |= cnxk_sso_xae_reconfigure(
> +				(struct rte_eventdev *)(uintptr_t)event_dev);
> +			rc |= cnxk_sso_rx_adapter_vwqe_enable(
> +				cnxk_eth_dev, port, rx_queue_id,
> +				queue_conf->vector_sz,
> +				queue_conf->vector_timeout_ns,
> +				queue_conf->vector_mp);
> +		}
>  		rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
>  				      rxq_sp->qconf.mp->pool_id, true,
>  				      dev->force_ena_bp);
> diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h 
> index 0f724ac85d..63b3bc4b51 100644
> --- a/lib/eventdev/eventdev_pmd.h
> +++ b/lib/eventdev/eventdev_pmd.h
> @@ -667,32 +667,6 @@ typedef int (*eventdev_eth_rx_adapter_vector_limits_get_t)(
>  	const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
>  	struct rte_event_eth_rx_adapter_vector_limits *limits);
> 
> -struct rte_event_eth_rx_adapter_event_vector_config;
> -/**
> - * Enable event vector on an given Rx queue of a ethernet devices 
> belonging to
> - * the Rx adapter.
> - *
> - * @param dev
> - *   Event device pointer
> - *
> - * @param eth_dev
> - *   Ethernet device pointer
> - *
> - * @param rx_queue_id
> - *   The Rx queue identifier
> - *
> - * @param config
> - *   Pointer to the event vector configuration structure.
> - *
> - * @return
> - *   - 0: Success.
> - *   - <0: Error code returned by the driver function.
> - */
> -typedef int (*eventdev_eth_rx_adapter_event_vector_config_t)(
> -	const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
> -	int32_t rx_queue_id,
> -	const struct rte_event_eth_rx_adapter_event_vector_config *config);
> -
>  typedef uint32_t rte_event_pmd_selftest_seqn_t;  extern int 
> rte_event_pmd_selftest_seqn_dynfield_offset;
> 
> @@ -1118,9 +1092,6 @@ struct rte_eventdev_ops {
>  	eventdev_eth_rx_adapter_vector_limits_get_t
>  		eth_rx_adapter_vector_limits_get;
>  	/**< Get event vector limits for the Rx adapter */
> -	eventdev_eth_rx_adapter_event_vector_config_t
> -		eth_rx_adapter_event_vector_config;
> -	/**< Configure Rx adapter with event vector */
> 
>  	eventdev_timer_adapter_caps_get_t timer_adapter_caps_get;
>  	/**< Get timer adapter capabilities */ diff --git 
> a/lib/eventdev/rte_event_eth_rx_adapter.c 
> b/lib/eventdev/rte_event_eth_rx_adapter.c
> index 13dfb28401..2b2dd688fc 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.c
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.c
> @@ -1895,6 +1895,24 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  	} else
>  		qi_ev->flow_id = 0;
> 
> +	if (conf->rx_queue_flags &
> +	    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
> +		queue_info->ena_vector = 1;
> +		qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
> +		rxa_set_vector_data(queue_info, conf->vector_sz,
> +				    conf->vector_timeout_ns, conf->vector_mp,
> +				    rx_queue_id, dev_info->dev->data->port_id);
> +		rx_adapter->ena_vector = 1;
> +		rx_adapter->vector_tmo_ticks =
> +			rx_adapter->vector_tmo_ticks ?
> +				      RTE_MIN(queue_info->vector_data
> +							.vector_timeout_ticks >>
> +						1,
> +					rx_adapter->vector_tmo_ticks) :
> +				queue_info->vector_data.vector_timeout_ticks >>
> +					1;
> +	}
> +
>  	rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
>  	if (rxa_polled_queue(dev_info, rx_queue_id)) {
>  		rx_adapter->num_rx_polled += !pollq; @@ -1920,42 +1938,6 @@ 
> rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  	}
>  }
> 
> -static void
> -rxa_sw_event_vector_configure(
> -	struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
> -	int rx_queue_id,
> -	const struct rte_event_eth_rx_adapter_event_vector_config *config)
> -{
> -	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
> -	struct eth_rx_queue_info *queue_info;
> -	struct rte_event *qi_ev;
> -
> -	if (rx_queue_id == -1) {
> -		uint16_t nb_rx_queues;
> -		uint16_t i;
> -
> -		nb_rx_queues = dev_info->dev->data->nb_rx_queues;
> -		for (i = 0; i < nb_rx_queues; i++)
> -			rxa_sw_event_vector_configure(rx_adapter, eth_dev_id, i,
> -						      config);
> -		return;
> -	}
> -
> -	queue_info = &dev_info->rx_queue[rx_queue_id];
> -	qi_ev = (struct rte_event *)&queue_info->event;
> -	queue_info->ena_vector = 1;
> -	qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
> -	rxa_set_vector_data(queue_info, config->vector_sz,
> -			    config->vector_timeout_ns, config->vector_mp,
> -			    rx_queue_id, dev_info->dev->data->port_id);
> -	rx_adapter->ena_vector = 1;
> -	rx_adapter->vector_tmo_ticks =
> -		rx_adapter->vector_tmo_ticks ?
> -			      RTE_MIN(config->vector_timeout_ns >> 1,
> -				rx_adapter->vector_tmo_ticks) :
> -			      config->vector_timeout_ns >> 1;
> -}
> -
>  static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
>  		uint16_t eth_dev_id,
>  		int rx_queue_id,
> @@ -2270,6 +2252,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
>  	struct rte_event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	struct eth_device_info *dev_info;
> +	struct rte_event_eth_rx_adapter_vector_limits limits;
> 
>  	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
>  	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); @@ -2297,13 
> +2280,46 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
>  		return -EINVAL;
>  	}
> 
> -	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0 &&
> -	    (queue_conf->rx_queue_flags &
> -	     RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR)) {
> -		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
> -				 " eth port: %" PRIu16 " adapter id: %" PRIu8,
> -				 eth_dev_id, id);
> -		return -EINVAL;
> +	if (queue_conf->rx_queue_flags &
> +	    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
> +
> +		if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
> +			RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
> +					 " eth port: %" PRIu16
> +					 " adapter id: %" PRIu8,
> +					 eth_dev_id, id);
> +			return -EINVAL;
> +		}
> +
> +		ret = rte_event_eth_rx_adapter_vector_limits_get(
> +			rx_adapter->eventdev_id, eth_dev_id, &limits);
> +		if (ret < 0) {
> +			RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
> +					 " eth port: %" PRIu16
> +					 " adapter id: %" PRIu8,
> +					 eth_dev_id, id);
> +			return -EINVAL;
> +		}
> +		if (queue_conf->vector_sz < limits.min_sz ||
> +		    queue_conf->vector_sz > limits.max_sz ||
> +		    queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
> +		    queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
> +		    queue_conf->vector_mp == NULL) {
> +			RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> +					 " eth port: %" PRIu16
> +					 " adapter id: %" PRIu8,
> +					 eth_dev_id, id);
> +			return -EINVAL;
> +		}
> +		if (queue_conf->vector_mp->elt_size <
> +		    (sizeof(struct rte_event_vector) +
> +		     (sizeof(uintptr_t) * queue_conf->vector_sz))) {
> +			RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> +					 " eth port: %" PRIu16
> +					 " adapter id: %" PRIu8,
> +					 eth_dev_id, id);
> +			return -EINVAL;
> +		}
>  	}
> 
>  	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 && @@ 
> -2499,83 +2515,6 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
>  	return ret;
>  }
> 
> -int
> -rte_event_eth_rx_adapter_queue_event_vector_config(
> -	uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
> -	struct rte_event_eth_rx_adapter_event_vector_config *config)
> -{
> -	struct rte_event_eth_rx_adapter_vector_limits limits;
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> -	struct rte_eventdev *dev;
> -	uint32_t cap;
> -	int ret;
> -
> -	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> -	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
> -
> -	rx_adapter = rxa_id_to_adapter(id);
> -	if ((rx_adapter == NULL) || (config == NULL))
> -		return -EINVAL;
> -
> -	dev = &rte_eventdevs[rx_adapter->eventdev_id];
> -	ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
> -						eth_dev_id, &cap);
> -	if (ret) {
> -		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
> -				 "eth port %" PRIu16,
> -				 id, eth_dev_id);
> -		return ret;
> -	}
> -
> -	if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR)) {
> -		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
> -				 " eth port: %" PRIu16 " adapter id: %" PRIu8,
> -				 eth_dev_id, id);
> -		return -EINVAL;
> -	}
> -
> -	ret = rte_event_eth_rx_adapter_vector_limits_get(
> -		rx_adapter->eventdev_id, eth_dev_id, &limits);
> -	if (ret) {
> -		RTE_EDEV_LOG_ERR("Failed to get vector limits edev %" PRIu8
> -				 "eth port %" PRIu16,
> -				 rx_adapter->eventdev_id, eth_dev_id);
> -		return ret;
> -	}
> -
> -	if (config->vector_sz < limits.min_sz ||
> -	    config->vector_sz > limits.max_sz ||
> -	    config->vector_timeout_ns < limits.min_timeout_ns ||
> -	    config->vector_timeout_ns > limits.max_timeout_ns ||
> -	    config->vector_mp == NULL) {
> -		RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> -				 " eth port: %" PRIu16 " adapter id: %" PRIu8,
> -				 eth_dev_id, id);
> -		return -EINVAL;
> -	}
> -	if (config->vector_mp->elt_size <
> -	    (sizeof(struct rte_event_vector) +
> -	     (sizeof(uintptr_t) * config->vector_sz))) {
> -		RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> -				 " eth port: %" PRIu16 " adapter id: %" PRIu8,
> -				 eth_dev_id, id);
> -		return -EINVAL;
> -	}
> -
> -	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
> -		RTE_FUNC_PTR_OR_ERR_RET(
> -			*dev->dev_ops->eth_rx_adapter_event_vector_config,
> -			-ENOTSUP);
> -		ret = dev->dev_ops->eth_rx_adapter_event_vector_config(
> -			dev, &rte_eth_devices[eth_dev_id], rx_queue_id, config);
> -	} else {
> -		rxa_sw_event_vector_configure(rx_adapter, eth_dev_id,
> -					      rx_queue_id, config);
> -	}
> -
> -	return ret;
> -}
> -
>  int
>  rte_event_eth_rx_adapter_vector_limits_get(
>  	uint8_t dev_id, uint16_t eth_port_id, diff --git 
> a/lib/eventdev/rte_event_eth_rx_adapter.h 
> b/lib/eventdev/rte_event_eth_rx_adapter.h
> index 182dd2e5dd..3f8b362295 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.h
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.h
> @@ -171,9 +171,6 @@ struct rte_event_eth_rx_adapter_queue_conf {
>  	 * The event adapter sets ev.event_type to RTE_EVENT_TYPE_ETHDEV in the
>  	 * enqueued event.
>  	 */
> -};
> -
> -struct rte_event_eth_rx_adapter_event_vector_config {
>  	uint16_t vector_sz;
>  	/**<
>  	 * Indicates the maximum number for mbufs to combine and form a vector.
> @@ -548,33 +545,6 @@ int rte_event_eth_rx_adapter_vector_limits_get(
>  	uint8_t dev_id, uint16_t eth_port_id,
>  	struct rte_event_eth_rx_adapter_vector_limits *limits);
> 
> -/**
> - * Configure event vectorization for a given ethernet device queue, 
> that has
> - * been added to a event eth Rx adapter.
> - *
> - * @param id
> - *  The identifier of the ethernet Rx event adapter.
> - *
> - * @param eth_dev_id
> - *  The identifier of the ethernet device.
> - *
> - * @param rx_queue_id
> - *  Ethernet device receive queue index.
> - *  If rx_queue_id is -1, then all Rx queues configured for the 
> ethernet device
> - *  are configured with event vectorization.
> - *
> - * @param config
> - *  Event vector configuration structure.
> - *
> - * @return
> - *  - 0: Success, Receive queue configured correctly.
> - *  - <0: Error code on failure.
> - */
> -__rte_experimental
> -int rte_event_eth_rx_adapter_queue_event_vector_config(
> -	uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
> -	struct rte_event_eth_rx_adapter_event_vector_config *config);
> -
>  #ifdef __cplusplus
>  }
>  #endif
> diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index 
> 88625621ec..cd86d2d908 100644
> --- a/lib/eventdev/version.map
> +++ b/lib/eventdev/version.map
> @@ -142,7 +142,6 @@ EXPERIMENTAL {
>  	#added in 21.05
>  	rte_event_vector_pool_create;
>  	rte_event_eth_rx_adapter_vector_limits_get;
> -	rte_event_eth_rx_adapter_queue_event_vector_config;
>  	__rte_eventdev_trace_crypto_adapter_enqueue;
>  };
> 
> --
> 2.17.1
  
Jerin Jacob Sept. 7, 2021, 8:30 a.m. UTC | #3
On Fri, Aug 20, 2021 at 1:04 PM Naga Harish K, S V
<s.v.naga.harish.k@intel.com> wrote:
>
>
>
> -----Original Message-----
> From: Jayatheerthan, Jay <jay.jayatheerthan@intel.com>
> Sent: Wednesday, August 18, 2021 1:53 PM
> To: pbhagavatula@marvell.com; jerinj@marvell.com; Ray Kinsella <mdr@ashroe.eu>; Shijith Thotton <sthotton@marvell.com>; Naga Harish K, S V <s.v.naga.harish.k@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v13] eventdev: simplify Rx adapter event vector config
>
> HI Harish,
> Could you review this patch ?
>
> -Jay
>
>
> > -----Original Message-----
> > From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> > Sent: Wednesday, August 18, 2021 12:27 PM
> > To: jerinj@marvell.com; Ray Kinsella <mdr@ashroe.eu>; Pavan Nikhilesh
> > <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>;
> > Jayatheerthan, Jay <jay.jayatheerthan@intel.com>
> > Cc: dev@dpdk.org
> > Subject: [dpdk-dev] [PATCH v13] eventdev: simplify Rx adapter event
> > vector config
> >
> > From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> >
> > Include vector configuration into the structure
> > ``rte_event_eth_rx_adapter_queue_conf`` that is used to configure Rx
> > adapter ethernet device Rx queue parameters.
> > This simplifies event vector configuration as it avoids splitting
> > configuration per Rx queue.
> >
> > Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> > Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
> > ---
> >  v13 Changes:
> >  - Fix cnxk driver compilation.
> >  v12 Changes:
> >  - Remove deprication notice.
> >  - Remove unnecessary change Id.
> >
> >  app/test-eventdev/test_pipeline_common.c |  16 +-
> >  doc/guides/rel_notes/deprecation.rst     |   9 --
> >  drivers/event/cnxk/cn10k_eventdev.c      |  77 ----------
> >  drivers/event/cnxk/cnxk_eventdev_adptr.c |  41 ++++++
> >  lib/eventdev/eventdev_pmd.h              |  29 ----
> >  lib/eventdev/rte_event_eth_rx_adapter.c  | 179
> > ++++++++---------------  lib/eventdev/rte_event_eth_rx_adapter.h  |  30 ----
> >  lib/eventdev/version.map                 |   1 -
> >  8 files changed, 104 insertions(+), 278 deletions(-)
> >
> > diff --git a/app/test-eventdev/test_pipeline_common.c
> > b/app/test-eventdev/test_pipeline_common.c
> > index 6ee530d4cd..2697547641 100644
> > --- a/app/test-eventdev/test_pipeline_common.c
> > +++ b/app/test-eventdev/test_pipeline_common.c
> > @@ -332,7 +332,6 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
> >       uint16_t prod;
> >       struct rte_mempool *vector_pool = NULL;
> >       struct rte_event_eth_rx_adapter_queue_conf queue_conf;
> > -     struct rte_event_eth_rx_adapter_event_vector_config vec_conf;
> >
> >       memset(&queue_conf, 0,
> >                       sizeof(struct rte_event_eth_rx_adapter_queue_conf));
> > @@ -398,8 +397,12 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
> >                       }
> >
> >                       if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
> > +                             queue_conf.vector_sz = opt->vector_size;
> > +                             queue_conf.vector_timeout_ns =
> > +                                     opt->vector_tmo_nsec;
> >                               queue_conf.rx_queue_flags |=
> >                               RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
> > +                             queue_conf.vector_mp = vector_pool;
> >                       } else {
> >                               evt_err("Rx adapter doesn't support event vector");
> >                               return -EINVAL;
> > @@ -419,17 +422,6 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
> >                       return ret;
> >               }
> >
> > -             if (opt->ena_vector) {
> > -                     vec_conf.vector_sz = opt->vector_size;
> > -                     vec_conf.vector_timeout_ns = opt->vector_tmo_nsec;
> > -                     vec_conf.vector_mp = vector_pool;
> > -                     if (rte_event_eth_rx_adapter_queue_event_vector_config(
> > -                                 prod, prod, -1, &vec_conf) < 0) {
> > -                             evt_err("Failed to configure event vectorization for Rx adapter");
> > -                             return -EINVAL;
> > -                     }
> > -             }
> > -
> >               if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
> >                       uint32_t service_id = -1U;
> >
> > diff --git a/doc/guides/rel_notes/deprecation.rst
> > b/doc/guides/rel_notes/deprecation.rst
> > index 76a4abfd6b..2c37d7222c 100644
> > --- a/doc/guides/rel_notes/deprecation.rst
> > +++ b/doc/guides/rel_notes/deprecation.rst
> > @@ -257,15 +257,6 @@ Deprecation Notices
> >    An 8-byte reserved field will be added to the structure ``rte_event_timer`` to
> >    support future extensions.
> >
> > -* eventdev: The structure ``rte_event_eth_rx_adapter_queue_conf``
> > will be
> > -  extended to include
> > ``rte_event_eth_rx_adapter_event_vector_config`` elements
> > -  and the function
> > ``rte_event_eth_rx_adapter_queue_event_vector_config`` will
> > -  be removed in DPDK 21.11.
> > -
> > -  An application can enable event vectorization by passing the
> > desired vector
> > -  values to the function ``rte_event_eth_rx_adapter_queue_add`` using
> > -  the structure ``rte_event_eth_rx_adapter_queue_add``.
> > -
>
> The above paragraph on how to enable Rx adapter vectorization may need to be
> added in rx adapter documentation with correct structure name.


@Pavan Nikhilesh  Could you send the next version based on @Naga
Harish K, S V  comments?
Marking as "Changes requested"



>
>
> >  * eventdev: Reserved bytes of ``rte_event_crypto_request`` is a space holder
> >    for ``response_info``. Both should be decoupled for better clarity.
> >    New space for ``response_info`` can be made by changing diff --git
> > a/drivers/event/cnxk/cn10k_eventdev.c
> > b/drivers/event/cnxk/cn10k_eventdev.c
> > index 6f37c5bd23..160192bb27 100644
> > --- a/drivers/event/cnxk/cn10k_eventdev.c
> > +++ b/drivers/event/cnxk/cn10k_eventdev.c
> > @@ -696,81 +696,6 @@ cn10k_sso_rx_adapter_vector_limits(
> >       return 0;
> >  }
> >
> > -static int
> > -cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
> > -                             uint16_t port_id, uint16_t rq_id, uint16_t sz,
> > -                             uint64_t tmo_ns, struct rte_mempool *vmp)
> > -{
> > -     struct roc_nix_rq *rq;
> > -
> > -     rq = &cnxk_eth_dev->rqs[rq_id];
> > -
> > -     if (!rq->sso_ena)
> > -             return -EINVAL;
> > -     if (rq->flow_tag_width == 0)
> > -             return -EINVAL;
> > -
> > -     rq->vwqe_ena = 1;
> > -     rq->vwqe_first_skip = 0;
> > -     rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
> > -     rq->vwqe_max_sz_exp = rte_log2_u32(sz);
> > -     rq->vwqe_wait_tmo =
> > -             tmo_ns /
> > -             ((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
> > -     rq->tag_mask = (port_id & 0xF) << 20;
> > -     rq->tag_mask |=
> > -             (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
> > -             << 24;
> > -
> > -     return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
> > -}
> > -
> > -static int
> > -cn10k_sso_rx_adapter_vector_config(
> > -     const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
> > -     int32_t rx_queue_id,
> > -     const struct rte_event_eth_rx_adapter_event_vector_config *config)
> > -{
> > -     struct cnxk_eth_dev *cnxk_eth_dev;
> > -     struct cnxk_sso_evdev *dev;
> > -     int i, rc;
> > -
> > -     rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
> > -     if (rc)
> > -             return -EINVAL;
> > -
> > -     dev = cnxk_sso_pmd_priv(event_dev);
> > -     cnxk_eth_dev = cnxk_eth_pmd_priv(eth_dev);
> > -     if (rx_queue_id < 0) {
> > -             for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
> > -                     cnxk_sso_updt_xae_cnt(dev, config->vector_mp,
> > -                                           RTE_EVENT_TYPE_ETHDEV_VECTOR);
> > -                     rc = cnxk_sso_xae_reconfigure(
> > -                             (struct rte_eventdev *)(uintptr_t)event_dev);
> > -                     rc = cnxk_sso_rx_adapter_vwqe_enable(
> > -                             cnxk_eth_dev, eth_dev->data->port_id, i,
> > -                             config->vector_sz, config->vector_timeout_ns,
> > -                             config->vector_mp);
> > -                     if (rc)
> > -                             return -EINVAL;
> > -             }
> > -     } else {
> > -
> > -             cnxk_sso_updt_xae_cnt(dev, config->vector_mp,
> > -                                   RTE_EVENT_TYPE_ETHDEV_VECTOR);
> > -             rc = cnxk_sso_xae_reconfigure(
> > -                     (struct rte_eventdev *)(uintptr_t)event_dev);
> > -             rc = cnxk_sso_rx_adapter_vwqe_enable(
> > -                     cnxk_eth_dev, eth_dev->data->port_id, rx_queue_id,
> > -                     config->vector_sz, config->vector_timeout_ns,
> > -                     config->vector_mp);
> > -             if (rc)
> > -                     return -EINVAL;
> > -     }
> > -
> > -     return 0;
> > -}
> > -
> >  static int
> >  cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
> >                             const struct rte_eth_dev *eth_dev, uint32_t *caps) @@ -841,8
> > +766,6 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {
> >       .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
> >
> >       .eth_rx_adapter_vector_limits_get = cn10k_sso_rx_adapter_vector_limits,
> > -     .eth_rx_adapter_event_vector_config =
> > -             cn10k_sso_rx_adapter_vector_config,
> >
> >       .eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,
> >       .eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add, diff
> > --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c
> > b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> > index baf2f2aa6b..80f5602286 100644
> > --- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
> > +++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> > @@ -156,6 +156,35 @@ cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)
> >       return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);  }
> >
> > +static int
> > +cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
> > +                             uint16_t port_id, uint16_t rq_id, uint16_t sz,
> > +                             uint64_t tmo_ns, struct rte_mempool *vmp) {
> > +     struct roc_nix_rq *rq;
> > +
> > +     rq = &cnxk_eth_dev->rqs[rq_id];
> > +
> > +     if (!rq->sso_ena)
> > +             return -EINVAL;
> > +     if (rq->flow_tag_width == 0)
> > +             return -EINVAL;
> > +
> > +     rq->vwqe_ena = 1;
> > +     rq->vwqe_first_skip = 0;
> > +     rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
> > +     rq->vwqe_max_sz_exp = rte_log2_u32(sz);
> > +     rq->vwqe_wait_tmo =
> > +             tmo_ns /
> > +             ((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
> > +     rq->tag_mask = (port_id & 0xF) << 20;
> > +     rq->tag_mask |=
> > +             (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
> > +             << 24;
> > +
> > +     return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0); }
> > +
> >  int
> >  cnxk_sso_rx_adapter_queue_add(
> >       const struct rte_eventdev *event_dev, const struct rte_eth_dev
> > *eth_dev, @@ -183,6 +212,18 @@ cnxk_sso_rx_adapter_queue_add(
> >                       &queue_conf->ev,
> >                       !!(queue_conf->rx_queue_flags &
> >                          RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
> > +             if (queue_conf->rx_queue_flags &
> > +                 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
> > +                     cnxk_sso_updt_xae_cnt(dev, queue_conf->vector_mp,
> > +                                           RTE_EVENT_TYPE_ETHDEV_VECTOR);
> > +                     rc |= cnxk_sso_xae_reconfigure(
> > +                             (struct rte_eventdev *)(uintptr_t)event_dev);
> > +                     rc |= cnxk_sso_rx_adapter_vwqe_enable(
> > +                             cnxk_eth_dev, port, rx_queue_id,
> > +                             queue_conf->vector_sz,
> > +                             queue_conf->vector_timeout_ns,
> > +                             queue_conf->vector_mp);
> > +             }
> >               rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
> >                                     rxq_sp->qconf.mp->pool_id, true,
> >                                     dev->force_ena_bp);
> > diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
> > index 0f724ac85d..63b3bc4b51 100644
> > --- a/lib/eventdev/eventdev_pmd.h
> > +++ b/lib/eventdev/eventdev_pmd.h
> > @@ -667,32 +667,6 @@ typedef int (*eventdev_eth_rx_adapter_vector_limits_get_t)(
> >       const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
> >       struct rte_event_eth_rx_adapter_vector_limits *limits);
> >
> > -struct rte_event_eth_rx_adapter_event_vector_config;
> > -/**
> > - * Enable event vector on an given Rx queue of a ethernet devices
> > belonging to
> > - * the Rx adapter.
> > - *
> > - * @param dev
> > - *   Event device pointer
> > - *
> > - * @param eth_dev
> > - *   Ethernet device pointer
> > - *
> > - * @param rx_queue_id
> > - *   The Rx queue identifier
> > - *
> > - * @param config
> > - *   Pointer to the event vector configuration structure.
> > - *
> > - * @return
> > - *   - 0: Success.
> > - *   - <0: Error code returned by the driver function.
> > - */
> > -typedef int (*eventdev_eth_rx_adapter_event_vector_config_t)(
> > -     const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
> > -     int32_t rx_queue_id,
> > -     const struct rte_event_eth_rx_adapter_event_vector_config *config);
> > -
> >  typedef uint32_t rte_event_pmd_selftest_seqn_t;  extern int
> > rte_event_pmd_selftest_seqn_dynfield_offset;
> >
> > @@ -1118,9 +1092,6 @@ struct rte_eventdev_ops {
> >       eventdev_eth_rx_adapter_vector_limits_get_t
> >               eth_rx_adapter_vector_limits_get;
> >       /**< Get event vector limits for the Rx adapter */
> > -     eventdev_eth_rx_adapter_event_vector_config_t
> > -             eth_rx_adapter_event_vector_config;
> > -     /**< Configure Rx adapter with event vector */
> >
> >       eventdev_timer_adapter_caps_get_t timer_adapter_caps_get;
> >       /**< Get timer adapter capabilities */ diff --git
> > a/lib/eventdev/rte_event_eth_rx_adapter.c
> > b/lib/eventdev/rte_event_eth_rx_adapter.c
> > index 13dfb28401..2b2dd688fc 100644
> > --- a/lib/eventdev/rte_event_eth_rx_adapter.c
> > +++ b/lib/eventdev/rte_event_eth_rx_adapter.c
> > @@ -1895,6 +1895,24 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> >       } else
> >               qi_ev->flow_id = 0;
> >
> > +     if (conf->rx_queue_flags &
> > +         RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
> > +             queue_info->ena_vector = 1;
> > +             qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
> > +             rxa_set_vector_data(queue_info, conf->vector_sz,
> > +                                 conf->vector_timeout_ns, conf->vector_mp,
> > +                                 rx_queue_id, dev_info->dev->data->port_id);
> > +             rx_adapter->ena_vector = 1;
> > +             rx_adapter->vector_tmo_ticks =
> > +                     rx_adapter->vector_tmo_ticks ?
> > +                                   RTE_MIN(queue_info->vector_data
> > +                                                     .vector_timeout_ticks >>
> > +                                             1,
> > +                                     rx_adapter->vector_tmo_ticks) :
> > +                             queue_info->vector_data.vector_timeout_ticks >>
> > +                                     1;
> > +     }
> > +
> >       rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
> >       if (rxa_polled_queue(dev_info, rx_queue_id)) {
> >               rx_adapter->num_rx_polled += !pollq; @@ -1920,42 +1938,6 @@
> > rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> >       }
> >  }
> >
> > -static void
> > -rxa_sw_event_vector_configure(
> > -     struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
> > -     int rx_queue_id,
> > -     const struct rte_event_eth_rx_adapter_event_vector_config *config)
> > -{
> > -     struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
> > -     struct eth_rx_queue_info *queue_info;
> > -     struct rte_event *qi_ev;
> > -
> > -     if (rx_queue_id == -1) {
> > -             uint16_t nb_rx_queues;
> > -             uint16_t i;
> > -
> > -             nb_rx_queues = dev_info->dev->data->nb_rx_queues;
> > -             for (i = 0; i < nb_rx_queues; i++)
> > -                     rxa_sw_event_vector_configure(rx_adapter, eth_dev_id, i,
> > -                                                   config);
> > -             return;
> > -     }
> > -
> > -     queue_info = &dev_info->rx_queue[rx_queue_id];
> > -     qi_ev = (struct rte_event *)&queue_info->event;
> > -     queue_info->ena_vector = 1;
> > -     qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
> > -     rxa_set_vector_data(queue_info, config->vector_sz,
> > -                         config->vector_timeout_ns, config->vector_mp,
> > -                         rx_queue_id, dev_info->dev->data->port_id);
> > -     rx_adapter->ena_vector = 1;
> > -     rx_adapter->vector_tmo_ticks =
> > -             rx_adapter->vector_tmo_ticks ?
> > -                           RTE_MIN(config->vector_timeout_ns >> 1,
> > -                             rx_adapter->vector_tmo_ticks) :
> > -                           config->vector_timeout_ns >> 1;
> > -}
> > -
> >  static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
> >               uint16_t eth_dev_id,
> >               int rx_queue_id,
> > @@ -2270,6 +2252,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
> >       struct rte_event_eth_rx_adapter *rx_adapter;
> >       struct rte_eventdev *dev;
> >       struct eth_device_info *dev_info;
> > +     struct rte_event_eth_rx_adapter_vector_limits limits;
> >
> >       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> >       RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); @@ -2297,13
> > +2280,46 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
> >               return -EINVAL;
> >       }
> >
> > -     if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0 &&
> > -         (queue_conf->rx_queue_flags &
> > -          RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR)) {
> > -             RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
> > -                              " eth port: %" PRIu16 " adapter id: %" PRIu8,
> > -                              eth_dev_id, id);
> > -             return -EINVAL;
> > +     if (queue_conf->rx_queue_flags &
> > +         RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
> > +
> > +             if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
> > +                     RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
> > +                                      " eth port: %" PRIu16
> > +                                      " adapter id: %" PRIu8,
> > +                                      eth_dev_id, id);
> > +                     return -EINVAL;
> > +             }
> > +
> > +             ret = rte_event_eth_rx_adapter_vector_limits_get(
> > +                     rx_adapter->eventdev_id, eth_dev_id, &limits);
> > +             if (ret < 0) {
> > +                     RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
> > +                                      " eth port: %" PRIu16
> > +                                      " adapter id: %" PRIu8,
> > +                                      eth_dev_id, id);
> > +                     return -EINVAL;
> > +             }
> > +             if (queue_conf->vector_sz < limits.min_sz ||
> > +                 queue_conf->vector_sz > limits.max_sz ||
> > +                 queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
> > +                 queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
> > +                 queue_conf->vector_mp == NULL) {
> > +                     RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> > +                                      " eth port: %" PRIu16
> > +                                      " adapter id: %" PRIu8,
> > +                                      eth_dev_id, id);
> > +                     return -EINVAL;
> > +             }
> > +             if (queue_conf->vector_mp->elt_size <
> > +                 (sizeof(struct rte_event_vector) +
> > +                  (sizeof(uintptr_t) * queue_conf->vector_sz))) {
> > +                     RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> > +                                      " eth port: %" PRIu16
> > +                                      " adapter id: %" PRIu8,
> > +                                      eth_dev_id, id);
> > +                     return -EINVAL;
> > +             }
> >       }
> >
> >       if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 && @@
> > -2499,83 +2515,6 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
> >       return ret;
> >  }
> >
> > -int
> > -rte_event_eth_rx_adapter_queue_event_vector_config(
> > -     uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
> > -     struct rte_event_eth_rx_adapter_event_vector_config *config)
> > -{
> > -     struct rte_event_eth_rx_adapter_vector_limits limits;
> > -     struct rte_event_eth_rx_adapter *rx_adapter;
> > -     struct rte_eventdev *dev;
> > -     uint32_t cap;
> > -     int ret;
> > -
> > -     RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> > -     RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
> > -
> > -     rx_adapter = rxa_id_to_adapter(id);
> > -     if ((rx_adapter == NULL) || (config == NULL))
> > -             return -EINVAL;
> > -
> > -     dev = &rte_eventdevs[rx_adapter->eventdev_id];
> > -     ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
> > -                                             eth_dev_id, &cap);
> > -     if (ret) {
> > -             RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
> > -                              "eth port %" PRIu16,
> > -                              id, eth_dev_id);
> > -             return ret;
> > -     }
> > -
> > -     if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR)) {
> > -             RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
> > -                              " eth port: %" PRIu16 " adapter id: %" PRIu8,
> > -                              eth_dev_id, id);
> > -             return -EINVAL;
> > -     }
> > -
> > -     ret = rte_event_eth_rx_adapter_vector_limits_get(
> > -             rx_adapter->eventdev_id, eth_dev_id, &limits);
> > -     if (ret) {
> > -             RTE_EDEV_LOG_ERR("Failed to get vector limits edev %" PRIu8
> > -                              "eth port %" PRIu16,
> > -                              rx_adapter->eventdev_id, eth_dev_id);
> > -             return ret;
> > -     }
> > -
> > -     if (config->vector_sz < limits.min_sz ||
> > -         config->vector_sz > limits.max_sz ||
> > -         config->vector_timeout_ns < limits.min_timeout_ns ||
> > -         config->vector_timeout_ns > limits.max_timeout_ns ||
> > -         config->vector_mp == NULL) {
> > -             RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> > -                              " eth port: %" PRIu16 " adapter id: %" PRIu8,
> > -                              eth_dev_id, id);
> > -             return -EINVAL;
> > -     }
> > -     if (config->vector_mp->elt_size <
> > -         (sizeof(struct rte_event_vector) +
> > -          (sizeof(uintptr_t) * config->vector_sz))) {
> > -             RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> > -                              " eth port: %" PRIu16 " adapter id: %" PRIu8,
> > -                              eth_dev_id, id);
> > -             return -EINVAL;
> > -     }
> > -
> > -     if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
> > -             RTE_FUNC_PTR_OR_ERR_RET(
> > -                     *dev->dev_ops->eth_rx_adapter_event_vector_config,
> > -                     -ENOTSUP);
> > -             ret = dev->dev_ops->eth_rx_adapter_event_vector_config(
> > -                     dev, &rte_eth_devices[eth_dev_id], rx_queue_id, config);
> > -     } else {
> > -             rxa_sw_event_vector_configure(rx_adapter, eth_dev_id,
> > -                                           rx_queue_id, config);
> > -     }
> > -
> > -     return ret;
> > -}
> > -
> >  int
> >  rte_event_eth_rx_adapter_vector_limits_get(
> >       uint8_t dev_id, uint16_t eth_port_id, diff --git
> > a/lib/eventdev/rte_event_eth_rx_adapter.h
> > b/lib/eventdev/rte_event_eth_rx_adapter.h
> > index 182dd2e5dd..3f8b362295 100644
> > --- a/lib/eventdev/rte_event_eth_rx_adapter.h
> > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h
> > @@ -171,9 +171,6 @@ struct rte_event_eth_rx_adapter_queue_conf {
> >        * The event adapter sets ev.event_type to RTE_EVENT_TYPE_ETHDEV in the
> >        * enqueued event.
> >        */
> > -};
> > -
> > -struct rte_event_eth_rx_adapter_event_vector_config {
> >       uint16_t vector_sz;
> >       /**<
> >        * Indicates the maximum number for mbufs to combine and form a vector.
> > @@ -548,33 +545,6 @@ int rte_event_eth_rx_adapter_vector_limits_get(
> >       uint8_t dev_id, uint16_t eth_port_id,
> >       struct rte_event_eth_rx_adapter_vector_limits *limits);
> >
> > -/**
> > - * Configure event vectorization for a given ethernet device queue,
> > that has
> > - * been added to a event eth Rx adapter.
> > - *
> > - * @param id
> > - *  The identifier of the ethernet Rx event adapter.
> > - *
> > - * @param eth_dev_id
> > - *  The identifier of the ethernet device.
> > - *
> > - * @param rx_queue_id
> > - *  Ethernet device receive queue index.
> > - *  If rx_queue_id is -1, then all Rx queues configured for the
> > ethernet device
> > - *  are configured with event vectorization.
> > - *
> > - * @param config
> > - *  Event vector configuration structure.
> > - *
> > - * @return
> > - *  - 0: Success, Receive queue configured correctly.
> > - *  - <0: Error code on failure.
> > - */
> > -__rte_experimental
> > -int rte_event_eth_rx_adapter_queue_event_vector_config(
> > -     uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
> > -     struct rte_event_eth_rx_adapter_event_vector_config *config);
> > -
> >  #ifdef __cplusplus
> >  }
> >  #endif
> > diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index
> > 88625621ec..cd86d2d908 100644
> > --- a/lib/eventdev/version.map
> > +++ b/lib/eventdev/version.map
> > @@ -142,7 +142,6 @@ EXPERIMENTAL {
> >       #added in 21.05
> >       rte_event_vector_pool_create;
> >       rte_event_eth_rx_adapter_vector_limits_get;
> > -     rte_event_eth_rx_adapter_queue_event_vector_config;
> >       __rte_eventdev_trace_crypto_adapter_enqueue;
> >  };
> >
> > --
> > 2.17.1
>
  

Patch

diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index 6ee530d4cd..2697547641 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -332,7 +332,6 @@  pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
 	uint16_t prod;
 	struct rte_mempool *vector_pool = NULL;
 	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
-	struct rte_event_eth_rx_adapter_event_vector_config vec_conf;

 	memset(&queue_conf, 0,
 			sizeof(struct rte_event_eth_rx_adapter_queue_conf));
@@ -398,8 +397,12 @@  pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
 			}

 			if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
+				queue_conf.vector_sz = opt->vector_size;
+				queue_conf.vector_timeout_ns =
+					opt->vector_tmo_nsec;
 				queue_conf.rx_queue_flags |=
 				RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
+				queue_conf.vector_mp = vector_pool;
 			} else {
 				evt_err("Rx adapter doesn't support event vector");
 				return -EINVAL;
@@ -419,17 +422,6 @@  pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
 			return ret;
 		}

-		if (opt->ena_vector) {
-			vec_conf.vector_sz = opt->vector_size;
-			vec_conf.vector_timeout_ns = opt->vector_tmo_nsec;
-			vec_conf.vector_mp = vector_pool;
-			if (rte_event_eth_rx_adapter_queue_event_vector_config(
-				    prod, prod, -1, &vec_conf) < 0) {
-				evt_err("Failed to configure event vectorization for Rx adapter");
-				return -EINVAL;
-			}
-		}
-
 		if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
 			uint32_t service_id = -1U;

diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 76a4abfd6b..2c37d7222c 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -257,15 +257,6 @@  Deprecation Notices
   An 8-byte reserved field will be added to the structure ``rte_event_timer`` to
   support future extensions.

-* eventdev: The structure ``rte_event_eth_rx_adapter_queue_conf`` will be
-  extended to include ``rte_event_eth_rx_adapter_event_vector_config`` elements
-  and the function ``rte_event_eth_rx_adapter_queue_event_vector_config`` will
-  be removed in DPDK 21.11.
-
-  An application can enable event vectorization by passing the desired vector
-  values to the function ``rte_event_eth_rx_adapter_queue_add`` using
-  the structure ``rte_event_eth_rx_adapter_queue_add``.
-
 * eventdev: Reserved bytes of ``rte_event_crypto_request`` is a space holder
   for ``response_info``. Both should be decoupled for better clarity.
   New space for ``response_info`` can be made by changing
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 6f37c5bd23..160192bb27 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -696,81 +696,6 @@  cn10k_sso_rx_adapter_vector_limits(
 	return 0;
 }

-static int
-cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
-				uint16_t port_id, uint16_t rq_id, uint16_t sz,
-				uint64_t tmo_ns, struct rte_mempool *vmp)
-{
-	struct roc_nix_rq *rq;
-
-	rq = &cnxk_eth_dev->rqs[rq_id];
-
-	if (!rq->sso_ena)
-		return -EINVAL;
-	if (rq->flow_tag_width == 0)
-		return -EINVAL;
-
-	rq->vwqe_ena = 1;
-	rq->vwqe_first_skip = 0;
-	rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
-	rq->vwqe_max_sz_exp = rte_log2_u32(sz);
-	rq->vwqe_wait_tmo =
-		tmo_ns /
-		((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
-	rq->tag_mask = (port_id & 0xF) << 20;
-	rq->tag_mask |=
-		(((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
-		<< 24;
-
-	return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
-}
-
-static int
-cn10k_sso_rx_adapter_vector_config(
-	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
-	int32_t rx_queue_id,
-	const struct rte_event_eth_rx_adapter_event_vector_config *config)
-{
-	struct cnxk_eth_dev *cnxk_eth_dev;
-	struct cnxk_sso_evdev *dev;
-	int i, rc;
-
-	rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
-	if (rc)
-		return -EINVAL;
-
-	dev = cnxk_sso_pmd_priv(event_dev);
-	cnxk_eth_dev = cnxk_eth_pmd_priv(eth_dev);
-	if (rx_queue_id < 0) {
-		for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
-			cnxk_sso_updt_xae_cnt(dev, config->vector_mp,
-					      RTE_EVENT_TYPE_ETHDEV_VECTOR);
-			rc = cnxk_sso_xae_reconfigure(
-				(struct rte_eventdev *)(uintptr_t)event_dev);
-			rc = cnxk_sso_rx_adapter_vwqe_enable(
-				cnxk_eth_dev, eth_dev->data->port_id, i,
-				config->vector_sz, config->vector_timeout_ns,
-				config->vector_mp);
-			if (rc)
-				return -EINVAL;
-		}
-	} else {
-
-		cnxk_sso_updt_xae_cnt(dev, config->vector_mp,
-				      RTE_EVENT_TYPE_ETHDEV_VECTOR);
-		rc = cnxk_sso_xae_reconfigure(
-			(struct rte_eventdev *)(uintptr_t)event_dev);
-		rc = cnxk_sso_rx_adapter_vwqe_enable(
-			cnxk_eth_dev, eth_dev->data->port_id, rx_queue_id,
-			config->vector_sz, config->vector_timeout_ns,
-			config->vector_mp);
-		if (rc)
-			return -EINVAL;
-	}
-
-	return 0;
-}
-
 static int
 cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
 			      const struct rte_eth_dev *eth_dev, uint32_t *caps)
@@ -841,8 +766,6 @@  static struct rte_eventdev_ops cn10k_sso_dev_ops = {
 	.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,

 	.eth_rx_adapter_vector_limits_get = cn10k_sso_rx_adapter_vector_limits,
-	.eth_rx_adapter_event_vector_config =
-		cn10k_sso_rx_adapter_vector_config,

 	.eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,
 	.eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add,
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index baf2f2aa6b..80f5602286 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -156,6 +156,35 @@  cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)
 	return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
 }

+static int
+cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
+				uint16_t port_id, uint16_t rq_id, uint16_t sz,
+				uint64_t tmo_ns, struct rte_mempool *vmp)
+{
+	struct roc_nix_rq *rq;
+
+	rq = &cnxk_eth_dev->rqs[rq_id];
+
+	if (!rq->sso_ena)
+		return -EINVAL;
+	if (rq->flow_tag_width == 0)
+		return -EINVAL;
+
+	rq->vwqe_ena = 1;
+	rq->vwqe_first_skip = 0;
+	rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
+	rq->vwqe_max_sz_exp = rte_log2_u32(sz);
+	rq->vwqe_wait_tmo =
+		tmo_ns /
+		((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
+	rq->tag_mask = (port_id & 0xF) << 20;
+	rq->tag_mask |=
+		(((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
+		<< 24;
+
+	return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
+}
+
 int
 cnxk_sso_rx_adapter_queue_add(
 	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
@@ -183,6 +212,18 @@  cnxk_sso_rx_adapter_queue_add(
 			&queue_conf->ev,
 			!!(queue_conf->rx_queue_flags &
 			   RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
+		if (queue_conf->rx_queue_flags &
+		    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
+			cnxk_sso_updt_xae_cnt(dev, queue_conf->vector_mp,
+					      RTE_EVENT_TYPE_ETHDEV_VECTOR);
+			rc |= cnxk_sso_xae_reconfigure(
+				(struct rte_eventdev *)(uintptr_t)event_dev);
+			rc |= cnxk_sso_rx_adapter_vwqe_enable(
+				cnxk_eth_dev, port, rx_queue_id,
+				queue_conf->vector_sz,
+				queue_conf->vector_timeout_ns,
+				queue_conf->vector_mp);
+		}
 		rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
 				      rxq_sp->qconf.mp->pool_id, true,
 				      dev->force_ena_bp);
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 0f724ac85d..63b3bc4b51 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -667,32 +667,6 @@  typedef int (*eventdev_eth_rx_adapter_vector_limits_get_t)(
 	const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
 	struct rte_event_eth_rx_adapter_vector_limits *limits);

-struct rte_event_eth_rx_adapter_event_vector_config;
-/**
- * Enable event vector on an given Rx queue of a ethernet devices belonging to
- * the Rx adapter.
- *
- * @param dev
- *   Event device pointer
- *
- * @param eth_dev
- *   Ethernet device pointer
- *
- * @param rx_queue_id
- *   The Rx queue identifier
- *
- * @param config
- *   Pointer to the event vector configuration structure.
- *
- * @return
- *   - 0: Success.
- *   - <0: Error code returned by the driver function.
- */
-typedef int (*eventdev_eth_rx_adapter_event_vector_config_t)(
-	const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
-	int32_t rx_queue_id,
-	const struct rte_event_eth_rx_adapter_event_vector_config *config);
-
 typedef uint32_t rte_event_pmd_selftest_seqn_t;
 extern int rte_event_pmd_selftest_seqn_dynfield_offset;

@@ -1118,9 +1092,6 @@  struct rte_eventdev_ops {
 	eventdev_eth_rx_adapter_vector_limits_get_t
 		eth_rx_adapter_vector_limits_get;
 	/**< Get event vector limits for the Rx adapter */
-	eventdev_eth_rx_adapter_event_vector_config_t
-		eth_rx_adapter_event_vector_config;
-	/**< Configure Rx adapter with event vector */

 	eventdev_timer_adapter_caps_get_t timer_adapter_caps_get;
 	/**< Get timer adapter capabilities */
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index 13dfb28401..2b2dd688fc 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -1895,6 +1895,24 @@  rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 	} else
 		qi_ev->flow_id = 0;

+	if (conf->rx_queue_flags &
+	    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
+		queue_info->ena_vector = 1;
+		qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
+		rxa_set_vector_data(queue_info, conf->vector_sz,
+				    conf->vector_timeout_ns, conf->vector_mp,
+				    rx_queue_id, dev_info->dev->data->port_id);
+		rx_adapter->ena_vector = 1;
+		rx_adapter->vector_tmo_ticks =
+			rx_adapter->vector_tmo_ticks ?
+				      RTE_MIN(queue_info->vector_data
+							.vector_timeout_ticks >>
+						1,
+					rx_adapter->vector_tmo_ticks) :
+				queue_info->vector_data.vector_timeout_ticks >>
+					1;
+	}
+
 	rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
 	if (rxa_polled_queue(dev_info, rx_queue_id)) {
 		rx_adapter->num_rx_polled += !pollq;
@@ -1920,42 +1938,6 @@  rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 	}
 }

-static void
-rxa_sw_event_vector_configure(
-	struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
-	int rx_queue_id,
-	const struct rte_event_eth_rx_adapter_event_vector_config *config)
-{
-	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
-	struct eth_rx_queue_info *queue_info;
-	struct rte_event *qi_ev;
-
-	if (rx_queue_id == -1) {
-		uint16_t nb_rx_queues;
-		uint16_t i;
-
-		nb_rx_queues = dev_info->dev->data->nb_rx_queues;
-		for (i = 0; i < nb_rx_queues; i++)
-			rxa_sw_event_vector_configure(rx_adapter, eth_dev_id, i,
-						      config);
-		return;
-	}
-
-	queue_info = &dev_info->rx_queue[rx_queue_id];
-	qi_ev = (struct rte_event *)&queue_info->event;
-	queue_info->ena_vector = 1;
-	qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
-	rxa_set_vector_data(queue_info, config->vector_sz,
-			    config->vector_timeout_ns, config->vector_mp,
-			    rx_queue_id, dev_info->dev->data->port_id);
-	rx_adapter->ena_vector = 1;
-	rx_adapter->vector_tmo_ticks =
-		rx_adapter->vector_tmo_ticks ?
-			      RTE_MIN(config->vector_timeout_ns >> 1,
-				rx_adapter->vector_tmo_ticks) :
-			      config->vector_timeout_ns >> 1;
-}
-
 static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
 		uint16_t eth_dev_id,
 		int rx_queue_id,
@@ -2270,6 +2252,7 @@  rte_event_eth_rx_adapter_queue_add(uint8_t id,
 	struct rte_event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
+	struct rte_event_eth_rx_adapter_vector_limits limits;

 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
@@ -2297,13 +2280,46 @@  rte_event_eth_rx_adapter_queue_add(uint8_t id,
 		return -EINVAL;
 	}

-	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0 &&
-	    (queue_conf->rx_queue_flags &
-	     RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR)) {
-		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
-				 " eth port: %" PRIu16 " adapter id: %" PRIu8,
-				 eth_dev_id, id);
-		return -EINVAL;
+	if (queue_conf->rx_queue_flags &
+	    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
+
+		if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
+			RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
+					 " eth port: %" PRIu16
+					 " adapter id: %" PRIu8,
+					 eth_dev_id, id);
+			return -EINVAL;
+		}
+
+		ret = rte_event_eth_rx_adapter_vector_limits_get(
+			rx_adapter->eventdev_id, eth_dev_id, &limits);
+		if (ret < 0) {
+			RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
+					 " eth port: %" PRIu16
+					 " adapter id: %" PRIu8,
+					 eth_dev_id, id);
+			return -EINVAL;
+		}
+		if (queue_conf->vector_sz < limits.min_sz ||
+		    queue_conf->vector_sz > limits.max_sz ||
+		    queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
+		    queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
+		    queue_conf->vector_mp == NULL) {
+			RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
+					 " eth port: %" PRIu16
+					 " adapter id: %" PRIu8,
+					 eth_dev_id, id);
+			return -EINVAL;
+		}
+		if (queue_conf->vector_mp->elt_size <
+		    (sizeof(struct rte_event_vector) +
+		     (sizeof(uintptr_t) * queue_conf->vector_sz))) {
+			RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
+					 " eth port: %" PRIu16
+					 " adapter id: %" PRIu8,
+					 eth_dev_id, id);
+			return -EINVAL;
+		}
 	}

 	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
@@ -2499,83 +2515,6 @@  rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
 	return ret;
 }

-int
-rte_event_eth_rx_adapter_queue_event_vector_config(
-	uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
-	struct rte_event_eth_rx_adapter_event_vector_config *config)
-{
-	struct rte_event_eth_rx_adapter_vector_limits limits;
-	struct rte_event_eth_rx_adapter *rx_adapter;
-	struct rte_eventdev *dev;
-	uint32_t cap;
-	int ret;
-
-	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
-
-	rx_adapter = rxa_id_to_adapter(id);
-	if ((rx_adapter == NULL) || (config == NULL))
-		return -EINVAL;
-
-	dev = &rte_eventdevs[rx_adapter->eventdev_id];
-	ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
-						eth_dev_id, &cap);
-	if (ret) {
-		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
-				 "eth port %" PRIu16,
-				 id, eth_dev_id);
-		return ret;
-	}
-
-	if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR)) {
-		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
-				 " eth port: %" PRIu16 " adapter id: %" PRIu8,
-				 eth_dev_id, id);
-		return -EINVAL;
-	}
-
-	ret = rte_event_eth_rx_adapter_vector_limits_get(
-		rx_adapter->eventdev_id, eth_dev_id, &limits);
-	if (ret) {
-		RTE_EDEV_LOG_ERR("Failed to get vector limits edev %" PRIu8
-				 "eth port %" PRIu16,
-				 rx_adapter->eventdev_id, eth_dev_id);
-		return ret;
-	}
-
-	if (config->vector_sz < limits.min_sz ||
-	    config->vector_sz > limits.max_sz ||
-	    config->vector_timeout_ns < limits.min_timeout_ns ||
-	    config->vector_timeout_ns > limits.max_timeout_ns ||
-	    config->vector_mp == NULL) {
-		RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
-				 " eth port: %" PRIu16 " adapter id: %" PRIu8,
-				 eth_dev_id, id);
-		return -EINVAL;
-	}
-	if (config->vector_mp->elt_size <
-	    (sizeof(struct rte_event_vector) +
-	     (sizeof(uintptr_t) * config->vector_sz))) {
-		RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
-				 " eth port: %" PRIu16 " adapter id: %" PRIu8,
-				 eth_dev_id, id);
-		return -EINVAL;
-	}
-
-	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
-		RTE_FUNC_PTR_OR_ERR_RET(
-			*dev->dev_ops->eth_rx_adapter_event_vector_config,
-			-ENOTSUP);
-		ret = dev->dev_ops->eth_rx_adapter_event_vector_config(
-			dev, &rte_eth_devices[eth_dev_id], rx_queue_id, config);
-	} else {
-		rxa_sw_event_vector_configure(rx_adapter, eth_dev_id,
-					      rx_queue_id, config);
-	}
-
-	return ret;
-}
-
 int
 rte_event_eth_rx_adapter_vector_limits_get(
 	uint8_t dev_id, uint16_t eth_port_id,
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
index 182dd2e5dd..3f8b362295 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/eventdev/rte_event_eth_rx_adapter.h
@@ -171,9 +171,6 @@  struct rte_event_eth_rx_adapter_queue_conf {
 	 * The event adapter sets ev.event_type to RTE_EVENT_TYPE_ETHDEV in the
 	 * enqueued event.
 	 */
-};
-
-struct rte_event_eth_rx_adapter_event_vector_config {
 	uint16_t vector_sz;
 	/**<
 	 * Indicates the maximum number for mbufs to combine and form a vector.
@@ -548,33 +545,6 @@  int rte_event_eth_rx_adapter_vector_limits_get(
 	uint8_t dev_id, uint16_t eth_port_id,
 	struct rte_event_eth_rx_adapter_vector_limits *limits);

-/**
- * Configure event vectorization for a given ethernet device queue, that has
- * been added to a event eth Rx adapter.
- *
- * @param id
- *  The identifier of the ethernet Rx event adapter.
- *
- * @param eth_dev_id
- *  The identifier of the ethernet device.
- *
- * @param rx_queue_id
- *  Ethernet device receive queue index.
- *  If rx_queue_id is -1, then all Rx queues configured for the ethernet device
- *  are configured with event vectorization.
- *
- * @param config
- *  Event vector configuration structure.
- *
- * @return
- *  - 0: Success, Receive queue configured correctly.
- *  - <0: Error code on failure.
- */
-__rte_experimental
-int rte_event_eth_rx_adapter_queue_event_vector_config(
-	uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
-	struct rte_event_eth_rx_adapter_event_vector_config *config);
-
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 88625621ec..cd86d2d908 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -142,7 +142,6 @@  EXPERIMENTAL {
 	#added in 21.05
 	rte_event_vector_pool_create;
 	rte_event_eth_rx_adapter_vector_limits_get;
-	rte_event_eth_rx_adapter_queue_event_vector_config;
 	__rte_eventdev_trace_crypto_adapter_enqueue;
 };