[RFC,10/15] eventdev: remove rte prefix for internal structs

Message ID 20210823194020.1229-10-pbhagavatula@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers
Series [RFC,01/15] eventdev: make driver interface as internal |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Pavan Nikhilesh Bhagavatula Aug. 23, 2021, 7:40 p.m. UTC
  From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Remove rte_ prefix from rte_eth_event_enqueue_buffer,
rte_event_eth_rx_adapter and rte_event_crypto_adapter
as they are only used in rte_event_eth_rx_adapter.c and
rte_event_crypto_adapter.c

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/rte_event_crypto_adapter.c |  66 +++----
 lib/eventdev/rte_event_eth_rx_adapter.c | 249 ++++++++++--------------
 lib/eventdev/rte_eventdev.h             |   2 +-
 3 files changed, 141 insertions(+), 176 deletions(-)
  

Comments

Jayatheerthan, Jay Aug. 30, 2021, 2:42 p.m. UTC | #1
> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Tuesday, August 24, 2021 1:10 AM
> To: jerinj@marvell.com; Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; Jayatheerthan, Jay <jay.jayatheerthan@intel.com>
> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@marvell.com>
> Subject: [dpdk-dev] [RFC 10/15] eventdev: remove rte prefix for internal structs
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Remove rte_ prefix from rte_eth_event_enqueue_buffer,
> rte_event_eth_rx_adapter and rte_event_crypto_adapter
> as they are only used in rte_event_eth_rx_adapter.c and
> rte_event_crypto_adapter.c
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  lib/eventdev/rte_event_crypto_adapter.c |  66 +++----
>  lib/eventdev/rte_event_eth_rx_adapter.c | 249 ++++++++++--------------
>  lib/eventdev/rte_eventdev.h             |   2 +-
>  3 files changed, 141 insertions(+), 176 deletions(-)
> 
> diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
> index e1d38d383d..8a2a25c02e 100644
> --- a/lib/eventdev/rte_event_crypto_adapter.c
> +++ b/lib/eventdev/rte_event_crypto_adapter.c
> @@ -30,7 +30,7 @@
>   */
>  #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
> 
> -struct rte_event_crypto_adapter {
> +struct event_crypto_adapter {
>  	/* Event device identifier */
>  	uint8_t eventdev_id;
>  	/* Event port identifier */
> @@ -99,7 +99,7 @@ struct crypto_queue_pair_info {
>  	uint8_t len;
>  } __rte_cache_aligned;
> 
> -static struct rte_event_crypto_adapter **event_crypto_adapter;
> +static struct event_crypto_adapter **event_crypto_adapter;
> 
>  /* Macros to check for valid adapter */
>  #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
> @@ -141,7 +141,7 @@ eca_init(void)
>  	return 0;
>  }
> 
> -static inline struct rte_event_crypto_adapter *
> +static inline struct event_crypto_adapter *
>  eca_id_to_adapter(uint8_t id)
>  {
>  	return event_crypto_adapter ?
> @@ -158,7 +158,7 @@ eca_default_config_cb(uint8_t id, uint8_t dev_id,
>  	int started;
>  	int ret;
>  	struct rte_event_port_conf *port_conf = arg;
> -	struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
> +	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
> 
>  	if (adapter == NULL)
>  		return -EINVAL;
> @@ -202,7 +202,7 @@ rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
>  				enum rte_event_crypto_adapter_mode mode,
>  				void *conf_arg)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
>  	struct rte_event_dev_info dev_info;
>  	int socket_id;
> @@ -304,7 +304,7 @@ rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
>  int
>  rte_event_crypto_adapter_free(uint8_t id)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
> 
>  	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> 
> @@ -329,8 +329,8 @@ rte_event_crypto_adapter_free(uint8_t id)
>  }
> 
>  static inline unsigned int
> -eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
> -		 struct rte_event *ev, unsigned int cnt)
> +eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
> +		     unsigned int cnt)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
>  	union rte_event_crypto_metadata *m_data = NULL;
> @@ -420,7 +420,7 @@ eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
>  }
> 
>  static unsigned int
> -eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
> +eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
>  	struct crypto_device_info *curr_dev;
> @@ -466,8 +466,8 @@ eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
>  }
> 
>  static int
> -eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
> -			unsigned int max_enq)
> +eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
> +			   unsigned int max_enq)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
>  	struct rte_event ev[BATCH_SIZE];
> @@ -500,8 +500,8 @@ eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
>  }
> 
>  static inline void
> -eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
> -		  struct rte_crypto_op **ops, uint16_t num)
> +eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
> +		      struct rte_crypto_op **ops, uint16_t num)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
>  	union rte_event_crypto_metadata *m_data = NULL;
> @@ -564,8 +564,8 @@ eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
>  }
> 
>  static inline unsigned int
> -eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
> -			unsigned int max_deq)
> +eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
> +			   unsigned int max_deq)
>  {
>  	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
>  	struct crypto_device_info *curr_dev;
> @@ -627,8 +627,8 @@ eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
>  }
> 
>  static void
> -eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
> -			unsigned int max_ops)
> +eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
> +		       unsigned int max_ops)
>  {
>  	while (max_ops) {
>  		unsigned int e_cnt, d_cnt;
> @@ -648,7 +648,7 @@ eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
>  static int
>  eca_service_func(void *args)
>  {
> -	struct rte_event_crypto_adapter *adapter = args;
> +	struct event_crypto_adapter *adapter = args;
> 
>  	if (rte_spinlock_trylock(&adapter->lock) == 0)
>  		return 0;
> @@ -659,7 +659,7 @@ eca_service_func(void *args)
>  }
> 
>  static int
> -eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
> +eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
>  {
>  	struct rte_event_crypto_adapter_conf adapter_conf;
>  	struct rte_service_spec service;
> @@ -699,10 +699,9 @@ eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
>  }
> 
>  static void
> -eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
> -			struct crypto_device_info *dev_info,
> -			int32_t queue_pair_id,
> -			uint8_t add)
> +eca_update_qp_info(struct event_crypto_adapter *adapter,
> +		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
> +		   uint8_t add)
>  {
>  	struct crypto_queue_pair_info *qp_info;
>  	int enabled;
> @@ -729,9 +728,8 @@ eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
>  }
> 
>  static int
> -eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
> -		uint8_t cdev_id,
> -		int queue_pair_id)
> +eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
> +		   int queue_pair_id)
>  {
>  	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
>  	struct crypto_queue_pair_info *qpairs;
> @@ -773,7 +771,7 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
>  			int32_t queue_pair_id,
>  			const struct rte_event *event)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct rte_eventdev *dev;
>  	struct crypto_device_info *dev_info;
>  	uint32_t cap;
> @@ -889,7 +887,7 @@ int
>  rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
>  					int32_t queue_pair_id)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct crypto_device_info *dev_info;
>  	struct rte_eventdev *dev;
>  	int ret;
> @@ -975,7 +973,7 @@ rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
>  static int
>  eca_adapter_ctrl(uint8_t id, int start)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct crypto_device_info *dev_info;
>  	struct rte_eventdev *dev;
>  	uint32_t i;
> @@ -1017,7 +1015,7 @@ eca_adapter_ctrl(uint8_t id, int start)
>  int
>  rte_event_crypto_adapter_start(uint8_t id)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
> 
>  	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
>  	adapter = eca_id_to_adapter(id);
> @@ -1039,7 +1037,7 @@ int
>  rte_event_crypto_adapter_stats_get(uint8_t id,
>  				struct rte_event_crypto_adapter_stats *stats)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
>  	struct rte_event_crypto_adapter_stats dev_stats;
>  	struct rte_eventdev *dev;
> @@ -1083,7 +1081,7 @@ rte_event_crypto_adapter_stats_get(uint8_t id,
>  int
>  rte_event_crypto_adapter_stats_reset(uint8_t id)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
>  	struct crypto_device_info *dev_info;
>  	struct rte_eventdev *dev;
>  	uint32_t i;
> @@ -1111,7 +1109,7 @@ rte_event_crypto_adapter_stats_reset(uint8_t id)
>  int
>  rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
> 
>  	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> 
> @@ -1128,7 +1126,7 @@ rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
>  int
>  rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
>  {
> -	struct rte_event_crypto_adapter *adapter;
> +	struct event_crypto_adapter *adapter;
> 
>  	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> 
> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
> index 13dfb28401..f8225ebd3d 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.c
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.c
> @@ -78,14 +78,14 @@ struct eth_rx_vector_data {
>  TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
> 
>  /* Instance per adapter */
> -struct rte_eth_event_enqueue_buffer {
> +struct eth_event_enqueue_buffer {
>  	/* Count of events in this buffer */
>  	uint16_t count;
>  	/* Array of events in this buffer */
>  	struct rte_event events[ETH_EVENT_BUFFER_SIZE];
>  };
> 
> -struct rte_event_eth_rx_adapter {
> +struct event_eth_rx_adapter {
>  	/* RSS key */
>  	uint8_t rss_key_be[RSS_KEY_SIZE];
>  	/* Event device identifier */
> @@ -109,7 +109,7 @@ struct rte_event_eth_rx_adapter {
>  	/* Next entry in wrr[] to begin polling */
>  	uint32_t wrr_pos;
>  	/* Event burst buffer */
> -	struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
> +	struct eth_event_enqueue_buffer event_enqueue_buffer;
>  	/* Vector enable flag */
>  	uint8_t ena_vector;
>  	/* Timestamp of previous vector expiry list traversal */
> @@ -231,7 +231,7 @@ struct eth_rx_queue_info {
>  	struct eth_rx_vector_data vector_data;
>  };
> 
> -static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
> +static struct event_eth_rx_adapter **event_eth_rx_adapter;
> 
>  static inline int
>  rxa_validate_id(uint8_t id)
> @@ -247,7 +247,7 @@ rxa_validate_id(uint8_t id)
>  } while (0)
> 
>  static inline int
> -rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
>  }
> @@ -265,10 +265,9 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
>   * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
>   */
>  static int
> -rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
> -	 unsigned int n, int *cw,
> -	 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
> -	 uint16_t gcd, int prev)
> +rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
> +	     struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
> +	     uint16_t gcd, int prev)
>  {
>  	int i = prev;
>  	uint16_t w;
> @@ -373,10 +372,9 @@ rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
>  /* Calculate nb_rx_intr after deleting interrupt mode rx queues
>   */
>  static void
> -rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			int rx_queue_id,
> -			uint32_t *nb_rx_intr)
> +rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info, int rx_queue_id,
> +			  uint32_t *nb_rx_intr)
>  {
>  	uint32_t intr_diff;
> 
> @@ -392,12 +390,10 @@ rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
>   * interrupt queues could currently be poll mode Rx queues
>   */
>  static void
> -rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			int rx_queue_id,
> -			uint32_t *nb_rx_poll,
> -			uint32_t *nb_rx_intr,
> -			uint32_t *nb_wrr)
> +rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info, int rx_queue_id,
> +			  uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
> +			  uint32_t *nb_wrr)
>  {
>  	uint32_t intr_diff;
>  	uint32_t poll_diff;
> @@ -424,11 +420,9 @@ rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
>   * after deleting poll mode rx queues
>   */
>  static void
> -rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			int rx_queue_id,
> -			uint32_t *nb_rx_poll,
> -			uint32_t *nb_wrr)
> +rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info, int rx_queue_id,
> +			  uint32_t *nb_rx_poll, uint32_t *nb_wrr)
>  {
>  	uint32_t poll_diff;
>  	uint32_t wrr_len_diff;
> @@ -449,13 +443,10 @@ rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
>  /* Calculate nb_rx_* after adding poll mode rx queues
>   */
>  static void
> -rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			int rx_queue_id,
> -			uint16_t wt,
> -			uint32_t *nb_rx_poll,
> -			uint32_t *nb_rx_intr,
> -			uint32_t *nb_wrr)
> +rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info, int rx_queue_id,
> +			  uint16_t wt, uint32_t *nb_rx_poll,
> +			  uint32_t *nb_rx_intr, uint32_t *nb_wrr)
>  {
>  	uint32_t intr_diff;
>  	uint32_t poll_diff;
> @@ -482,13 +473,10 @@ rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Calculate nb_rx_* after adding rx_queue_id */
>  static void
> -rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
> -		struct eth_device_info *dev_info,
> -		int rx_queue_id,
> -		uint16_t wt,
> -		uint32_t *nb_rx_poll,
> -		uint32_t *nb_rx_intr,
> -		uint32_t *nb_wrr)
> +rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
> +		     struct eth_device_info *dev_info, int rx_queue_id,
> +		     uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
> +		     uint32_t *nb_wrr)
>  {
>  	if (wt != 0)
>  		rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
> @@ -500,12 +488,10 @@ rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Calculate nb_rx_* after deleting rx_queue_id */
>  static void
> -rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
> -		struct eth_device_info *dev_info,
> -		int rx_queue_id,
> -		uint32_t *nb_rx_poll,
> -		uint32_t *nb_rx_intr,
> -		uint32_t *nb_wrr)
> +rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
> +		     struct eth_device_info *dev_info, int rx_queue_id,
> +		     uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
> +		     uint32_t *nb_wrr)
>  {
>  	rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
>  				nb_wrr);
> @@ -517,8 +503,7 @@ rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
>   * Allocate the rx_poll array
>   */
>  static struct eth_rx_poll_entry *
> -rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
> -	uint32_t num_rx_polled)
> +rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
>  {
>  	size_t len;
> 
> @@ -534,7 +519,7 @@ rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
>   * Allocate the WRR array
>   */
>  static uint32_t *
> -rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
> +rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
>  {
>  	size_t len;
> 
> @@ -547,11 +532,9 @@ rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
>  }
> 
>  static int
> -rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
> -		uint32_t nb_poll,
> -		uint32_t nb_wrr,
> -		struct eth_rx_poll_entry **rx_poll,
> -		uint32_t **wrr_sched)
> +rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
> +		      uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
> +		      uint32_t **wrr_sched)
>  {
> 
>  	if (nb_poll == 0) {
> @@ -576,9 +559,8 @@ rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Precalculate WRR polling sequence for all queues in rx_adapter */
>  static void
> -rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
> -		struct eth_rx_poll_entry *rx_poll,
> -		uint32_t *rx_wrr)
> +rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
> +		      struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
>  {
>  	uint16_t d;
>  	uint16_t q;
> @@ -705,13 +687,13 @@ rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
>  }
> 
>  static inline int
> -rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	return !!rx_adapter->enq_block_count;
>  }
> 
>  static inline void
> -rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	if (rx_adapter->rx_enq_block_start_ts)
>  		return;
> @@ -724,8 +706,8 @@ rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
>  }
> 
>  static inline void
> -rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
> -		    struct rte_event_eth_rx_adapter_stats *stats)
> +rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
> +		     struct rte_event_eth_rx_adapter_stats *stats)
>  {
>  	if (unlikely(!stats->rx_enq_start_ts))
>  		stats->rx_enq_start_ts = rte_get_tsc_cycles();
> @@ -744,10 +726,10 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Enqueue buffered events to event device */
>  static inline uint16_t
> -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter)
>  {
> -	struct rte_eth_event_enqueue_buffer *buf =
> -	    &rx_adapter->event_enqueue_buffer;
> +	struct eth_event_enqueue_buffer *buf =
> +		&rx_adapter->event_enqueue_buffer;
>  	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
> 
>  	if (!buf->count)
> @@ -774,7 +756,7 @@ rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
>  }
> 
>  static inline void
> -rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
> +rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
>  		struct eth_rx_vector_data *vec)
>  {
>  	vec->vector_ev->nb_elem = 0;
> @@ -785,9 +767,9 @@ rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static inline uint16_t
> -rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
> +rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
>  			struct eth_rx_queue_info *queue_info,
> -			struct rte_eth_event_enqueue_buffer *buf,
> +			struct eth_event_enqueue_buffer *buf,
>  			struct rte_mbuf **mbufs, uint16_t num)
>  {
>  	struct rte_event *ev = &buf->events[buf->count];
> @@ -845,19 +827,16 @@ rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static inline void
> -rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
> -		uint16_t eth_dev_id,
> -		uint16_t rx_queue_id,
> -		struct rte_mbuf **mbufs,
> -		uint16_t num)
> +rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
> +		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num)
>  {
>  	uint32_t i;
>  	struct eth_device_info *dev_info =
>  					&rx_adapter->eth_devices[eth_dev_id];
>  	struct eth_rx_queue_info *eth_rx_queue_info =
>  					&dev_info->rx_queue[rx_queue_id];
> -	struct rte_eth_event_enqueue_buffer *buf =
> -					&rx_adapter->event_enqueue_buffer;
> +	struct eth_event_enqueue_buffer *buf =
> +		&rx_adapter->event_enqueue_buffer;
>  	struct rte_event *ev = &buf->events[buf->count];
>  	uint64_t event = eth_rx_queue_info->event;
>  	uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
> @@ -909,16 +888,13 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Enqueue packets from  <port, q>  to event buffer */
>  static inline uint32_t
> -rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
> -	uint16_t port_id,
> -	uint16_t queue_id,
> -	uint32_t rx_count,
> -	uint32_t max_rx,
> -	int *rxq_empty)
> +rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
> +	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
> +	   int *rxq_empty)
>  {
>  	struct rte_mbuf *mbufs[BATCH_SIZE];
> -	struct rte_eth_event_enqueue_buffer *buf =
> -					&rx_adapter->event_enqueue_buffer;
> +	struct eth_event_enqueue_buffer *buf =
> +		&rx_adapter->event_enqueue_buffer;
>  	struct rte_event_eth_rx_adapter_stats *stats =
>  					&rx_adapter->stats;
>  	uint16_t n;
> @@ -953,8 +929,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static inline void
> -rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
> -		void *data)
> +rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
>  {
>  	uint16_t port_id;
>  	uint16_t queue;
> @@ -994,8 +969,8 @@ rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static int
> -rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
> -			uint32_t num_intr_vec)
> +rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
> +			  uint32_t num_intr_vec)
>  {
>  	if (rx_adapter->num_intr_vec + num_intr_vec >
>  				RTE_EVENT_ETH_INTR_RING_SIZE) {
> @@ -1010,9 +985,9 @@ rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Delete entries for (dev, queue) from the interrupt ring */
>  static void
> -rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
> -			struct eth_device_info *dev_info,
> -			uint16_t rx_queue_id)
> +rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
> +			  struct eth_device_info *dev_info,
> +			  uint16_t rx_queue_id)
>  {
>  	int i, n;
>  	union queue_data qd;
> @@ -1045,7 +1020,7 @@ rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
>  static void *
>  rxa_intr_thread(void *arg)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter = arg;
> +	struct event_eth_rx_adapter *rx_adapter = arg;
>  	struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
>  	int n, i;
> 
> @@ -1068,12 +1043,12 @@ rxa_intr_thread(void *arg)
>   * mbufs to eventdev
>   */
>  static inline uint32_t
> -rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	uint32_t n;
>  	uint32_t nb_rx = 0;
>  	int rxq_empty;
> -	struct rte_eth_event_enqueue_buffer *buf;
> +	struct eth_event_enqueue_buffer *buf;
>  	rte_spinlock_t *ring_lock;
>  	uint8_t max_done = 0;
> 
> @@ -1188,11 +1163,11 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
>   * it.
>   */
>  static inline uint32_t
> -rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_poll(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	uint32_t num_queue;
>  	uint32_t nb_rx = 0;
> -	struct rte_eth_event_enqueue_buffer *buf;
> +	struct eth_event_enqueue_buffer *buf;
>  	uint32_t wrr_pos;
>  	uint32_t max_nb_rx;
> 
> @@ -1233,8 +1208,8 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
>  static void
>  rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter = arg;
> -	struct rte_eth_event_enqueue_buffer *buf =
> +	struct event_eth_rx_adapter *rx_adapter = arg;
> +	struct eth_event_enqueue_buffer *buf =
>  		&rx_adapter->event_enqueue_buffer;
>  	struct rte_event *ev;
> 
> @@ -1257,7 +1232,7 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
>  static int
>  rxa_service_func(void *args)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter = args;
> +	struct event_eth_rx_adapter *rx_adapter = args;
>  	struct rte_event_eth_rx_adapter_stats *stats;
> 
>  	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
> @@ -1318,7 +1293,7 @@ rte_event_eth_rx_adapter_init(void)
>  	return 0;
>  }
> 
> -static inline struct rte_event_eth_rx_adapter *
> +static inline struct event_eth_rx_adapter *
>  rxa_id_to_adapter(uint8_t id)
>  {
>  	return event_eth_rx_adapter ?
> @@ -1335,7 +1310,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
>  	int started;
>  	uint8_t port_id;
>  	struct rte_event_port_conf *port_conf = arg;
> -	struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
> +	struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
> 
>  	dev = &rte_eventdevs[rx_adapter->eventdev_id];
>  	dev_conf = dev->data->dev_conf;
> @@ -1384,7 +1359,7 @@ rxa_epoll_create1(void)
>  }
> 
>  static int
> -rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	if (rx_adapter->epd != INIT_FD)
>  		return 0;
> @@ -1401,7 +1376,7 @@ rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
>  }
> 
>  static int
> -rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	int err;
>  	char thread_name[RTE_MAX_THREAD_NAME_LEN];
> @@ -1445,7 +1420,7 @@ rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
>  }
> 
>  static int
> -rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	int err;
> 
> @@ -1466,7 +1441,7 @@ rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
>  }
> 
>  static int
> -rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
>  {
>  	int ret;
> 
> @@ -1484,9 +1459,8 @@ rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
>  }
> 
>  static int
> -rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	uint16_t rx_queue_id)
> +rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
> +		 struct eth_device_info *dev_info, uint16_t rx_queue_id)
>  {
>  	int err;
>  	uint16_t eth_dev_id = dev_info->dev->data->port_id;
> @@ -1514,9 +1488,8 @@ rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static int
> -rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> -		struct eth_device_info *dev_info,
> -		int rx_queue_id)
> +rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
> +		   struct eth_device_info *dev_info, int rx_queue_id)
>  {
>  	int err;
>  	int i;
> @@ -1573,9 +1546,8 @@ rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static int
> -rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	uint16_t rx_queue_id)
> +rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
> +		struct eth_device_info *dev_info, uint16_t rx_queue_id)
>  {
>  	int err, err1;
>  	uint16_t eth_dev_id = dev_info->dev->data->port_id;
> @@ -1663,9 +1635,8 @@ rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static int
> -rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	int rx_queue_id)
> +rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
> +		   struct eth_device_info *dev_info, int rx_queue_id)
> 
>  {
>  	int i, j, err;
> @@ -1713,9 +1684,8 @@ rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  	return err;
>  }
> 
> -
>  static int
> -rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
> +rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
>  {
>  	int ret;
>  	struct rte_service_spec service;
> @@ -1758,10 +1728,9 @@ rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
>  }
> 
>  static void
> -rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> -		struct eth_device_info *dev_info,
> -		int32_t rx_queue_id,
> -		uint8_t add)
> +rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
> +		 struct eth_device_info *dev_info, int32_t rx_queue_id,
> +		 uint8_t add)
>  {
>  	struct eth_rx_queue_info *queue_info;
>  	int enabled;
> @@ -1811,9 +1780,8 @@ rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
>  }
> 
>  static void
> -rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	int32_t rx_queue_id)
> +rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
> +	   struct eth_device_info *dev_info, int32_t rx_queue_id)
>  {
>  	struct eth_rx_vector_data *vec;
>  	int pollq;
> @@ -1854,10 +1822,9 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
> 
>  static void
> -rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> -	struct eth_device_info *dev_info,
> -	int32_t rx_queue_id,
> -	const struct rte_event_eth_rx_adapter_queue_conf *conf)
> +rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
> +	      struct eth_device_info *dev_info, int32_t rx_queue_id,
> +	      const struct rte_event_eth_rx_adapter_queue_conf *conf)
>  {
>  	struct eth_rx_queue_info *queue_info;
>  	const struct rte_event *ev = &conf->ev;
> @@ -1922,7 +1889,7 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  static void
>  rxa_sw_event_vector_configure(
> -	struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
> +	struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
>  	int rx_queue_id,
>  	const struct rte_event_eth_rx_adapter_event_vector_config *config)
>  {
> @@ -1956,10 +1923,10 @@ rxa_sw_event_vector_configure(
>  			      config->vector_timeout_ns >> 1;
>  }
> 
> -static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
> -		uint16_t eth_dev_id,
> -		int rx_queue_id,
> -		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
> +static int
> +rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
> +	   int rx_queue_id,
> +	   const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
>  {
>  	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
>  	struct rte_event_eth_rx_adapter_queue_conf temp_conf;
> @@ -2088,7 +2055,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
>  static int
>  rxa_ctrl(uint8_t id, int start)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	struct eth_device_info *dev_info;
>  	uint32_t i;
> @@ -2135,7 +2102,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
>  				rte_event_eth_rx_adapter_conf_cb conf_cb,
>  				void *conf_arg)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	int ret;
>  	int socket_id;
>  	uint16_t i;
> @@ -2235,7 +2202,7 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
>  int
>  rte_event_eth_rx_adapter_free(uint8_t id)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
> 
>  	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> 
> @@ -2267,7 +2234,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
>  {
>  	int ret;
>  	uint32_t cap;
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	struct eth_device_info *dev_info;
> 
> @@ -2385,7 +2352,7 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
>  {
>  	int ret = 0;
>  	struct rte_eventdev *dev;
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct eth_device_info *dev_info;
>  	uint32_t cap;
>  	uint32_t nb_rx_poll = 0;
> @@ -2505,7 +2472,7 @@ rte_event_eth_rx_adapter_queue_event_vector_config(
>  	struct rte_event_eth_rx_adapter_event_vector_config *config)
>  {
>  	struct rte_event_eth_rx_adapter_vector_limits limits;
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	uint32_t cap;
>  	int ret;
> @@ -2632,7 +2599,7 @@ int
>  rte_event_eth_rx_adapter_stats_get(uint8_t id,
>  			       struct rte_event_eth_rx_adapter_stats *stats)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
>  	struct rte_event_eth_rx_adapter_stats dev_stats;
>  	struct rte_eventdev *dev;
> @@ -2673,7 +2640,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
>  int
>  rte_event_eth_rx_adapter_stats_reset(uint8_t id)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct rte_eventdev *dev;
>  	struct eth_device_info *dev_info;
>  	uint32_t i;
> @@ -2701,7 +2668,7 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
>  int
>  rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
> 
>  	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> 
> @@ -2721,7 +2688,7 @@ rte_event_eth_rx_adapter_cb_register(uint8_t id,
>  					rte_event_eth_rx_adapter_cb_fn cb_fn,
>  					void *cb_arg)
>  {
> -	struct rte_event_eth_rx_adapter *rx_adapter;
> +	struct event_eth_rx_adapter *rx_adapter;
>  	struct eth_device_info *dev_info;
>  	uint32_t cap;
>  	int ret;

Looks good to me.
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>

> diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
> index 7378597846..5853fadb0d 100644
> --- a/lib/eventdev/rte_eventdev.h
> +++ b/lib/eventdev/rte_eventdev.h
> @@ -1193,7 +1193,7 @@ struct rte_event {
>  #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
>  /**< The application can override the adapter generated flow ID in the
>   * event. This flow ID can be specified when adding an ethdev Rx queue
> - * to the adapter using the ev member of struct rte_event_eth_rx_adapter
> + * to the adapter using the ev.flow_id member.
>   * @see struct rte_event_eth_rx_adapter_queue_conf::ev
>   * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
>   */
> --
> 2.17.1
  

Patch

diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
index e1d38d383d..8a2a25c02e 100644
--- a/lib/eventdev/rte_event_crypto_adapter.c
+++ b/lib/eventdev/rte_event_crypto_adapter.c
@@ -30,7 +30,7 @@ 
  */
 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
 
-struct rte_event_crypto_adapter {
+struct event_crypto_adapter {
 	/* Event device identifier */
 	uint8_t eventdev_id;
 	/* Event port identifier */
@@ -99,7 +99,7 @@  struct crypto_queue_pair_info {
 	uint8_t len;
 } __rte_cache_aligned;
 
-static struct rte_event_crypto_adapter **event_crypto_adapter;
+static struct event_crypto_adapter **event_crypto_adapter;
 
 /* Macros to check for valid adapter */
 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
@@ -141,7 +141,7 @@  eca_init(void)
 	return 0;
 }
 
-static inline struct rte_event_crypto_adapter *
+static inline struct event_crypto_adapter *
 eca_id_to_adapter(uint8_t id)
 {
 	return event_crypto_adapter ?
@@ -158,7 +158,7 @@  eca_default_config_cb(uint8_t id, uint8_t dev_id,
 	int started;
 	int ret;
 	struct rte_event_port_conf *port_conf = arg;
-	struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
+	struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
 
 	if (adapter == NULL)
 		return -EINVAL;
@@ -202,7 +202,7 @@  rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
 				enum rte_event_crypto_adapter_mode mode,
 				void *conf_arg)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	char mem_name[CRYPTO_ADAPTER_NAME_LEN];
 	struct rte_event_dev_info dev_info;
 	int socket_id;
@@ -304,7 +304,7 @@  rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
 int
 rte_event_crypto_adapter_free(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -329,8 +329,8 @@  rte_event_crypto_adapter_free(uint8_t id)
 }
 
 static inline unsigned int
-eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
-		 struct rte_event *ev, unsigned int cnt)
+eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
+		     unsigned int cnt)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	union rte_event_crypto_metadata *m_data = NULL;
@@ -420,7 +420,7 @@  eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
 }
 
 static unsigned int
-eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
+eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct crypto_device_info *curr_dev;
@@ -466,8 +466,8 @@  eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
 }
 
 static int
-eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_enq)
+eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
+			   unsigned int max_enq)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct rte_event ev[BATCH_SIZE];
@@ -500,8 +500,8 @@  eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
 }
 
 static inline void
-eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
-		  struct rte_crypto_op **ops, uint16_t num)
+eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
+		      struct rte_crypto_op **ops, uint16_t num)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	union rte_event_crypto_metadata *m_data = NULL;
@@ -564,8 +564,8 @@  eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
 }
 
 static inline unsigned int
-eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_deq)
+eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
+			   unsigned int max_deq)
 {
 	struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
 	struct crypto_device_info *curr_dev;
@@ -627,8 +627,8 @@  eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
 }
 
 static void
-eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
-			unsigned int max_ops)
+eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
+		       unsigned int max_ops)
 {
 	while (max_ops) {
 		unsigned int e_cnt, d_cnt;
@@ -648,7 +648,7 @@  eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
 static int
 eca_service_func(void *args)
 {
-	struct rte_event_crypto_adapter *adapter = args;
+	struct event_crypto_adapter *adapter = args;
 
 	if (rte_spinlock_trylock(&adapter->lock) == 0)
 		return 0;
@@ -659,7 +659,7 @@  eca_service_func(void *args)
 }
 
 static int
-eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
+eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
 {
 	struct rte_event_crypto_adapter_conf adapter_conf;
 	struct rte_service_spec service;
@@ -699,10 +699,9 @@  eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
 }
 
 static void
-eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
-			struct crypto_device_info *dev_info,
-			int32_t queue_pair_id,
-			uint8_t add)
+eca_update_qp_info(struct event_crypto_adapter *adapter,
+		   struct crypto_device_info *dev_info, int32_t queue_pair_id,
+		   uint8_t add)
 {
 	struct crypto_queue_pair_info *qp_info;
 	int enabled;
@@ -729,9 +728,8 @@  eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
 }
 
 static int
-eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
-		uint8_t cdev_id,
-		int queue_pair_id)
+eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
+		   int queue_pair_id)
 {
 	struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
 	struct crypto_queue_pair_info *qpairs;
@@ -773,7 +771,7 @@  rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 			int32_t queue_pair_id,
 			const struct rte_event *event)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct rte_eventdev *dev;
 	struct crypto_device_info *dev_info;
 	uint32_t cap;
@@ -889,7 +887,7 @@  int
 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
 					int32_t queue_pair_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	int ret;
@@ -975,7 +973,7 @@  rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
 static int
 eca_adapter_ctrl(uint8_t id, int start)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	uint32_t i;
@@ -1017,7 +1015,7 @@  eca_adapter_ctrl(uint8_t id, int start)
 int
 rte_event_crypto_adapter_start(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 	adapter = eca_id_to_adapter(id);
@@ -1039,7 +1037,7 @@  int
 rte_event_crypto_adapter_stats_get(uint8_t id,
 				struct rte_event_crypto_adapter_stats *stats)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
 	struct rte_event_crypto_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
@@ -1083,7 +1081,7 @@  rte_event_crypto_adapter_stats_get(uint8_t id,
 int
 rte_event_crypto_adapter_stats_reset(uint8_t id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 	struct crypto_device_info *dev_info;
 	struct rte_eventdev *dev;
 	uint32_t i;
@@ -1111,7 +1109,7 @@  rte_event_crypto_adapter_stats_reset(uint8_t id)
 int
 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -1128,7 +1126,7 @@  rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 int
 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
 {
-	struct rte_event_crypto_adapter *adapter;
+	struct event_crypto_adapter *adapter;
 
 	EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index 13dfb28401..f8225ebd3d 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -78,14 +78,14 @@  struct eth_rx_vector_data {
 TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
 
 /* Instance per adapter */
-struct rte_eth_event_enqueue_buffer {
+struct eth_event_enqueue_buffer {
 	/* Count of events in this buffer */
 	uint16_t count;
 	/* Array of events in this buffer */
 	struct rte_event events[ETH_EVENT_BUFFER_SIZE];
 };
 
-struct rte_event_eth_rx_adapter {
+struct event_eth_rx_adapter {
 	/* RSS key */
 	uint8_t rss_key_be[RSS_KEY_SIZE];
 	/* Event device identifier */
@@ -109,7 +109,7 @@  struct rte_event_eth_rx_adapter {
 	/* Next entry in wrr[] to begin polling */
 	uint32_t wrr_pos;
 	/* Event burst buffer */
-	struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
+	struct eth_event_enqueue_buffer event_enqueue_buffer;
 	/* Vector enable flag */
 	uint8_t ena_vector;
 	/* Timestamp of previous vector expiry list traversal */
@@ -231,7 +231,7 @@  struct eth_rx_queue_info {
 	struct eth_rx_vector_data vector_data;
 };
 
-static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
+static struct event_eth_rx_adapter **event_eth_rx_adapter;
 
 static inline int
 rxa_validate_id(uint8_t id)
@@ -247,7 +247,7 @@  rxa_validate_id(uint8_t id)
 } while (0)
 
 static inline int
-rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
 {
 	return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
 }
@@ -265,10 +265,9 @@  static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
  * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
  */
 static int
-rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
-	 unsigned int n, int *cw,
-	 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
-	 uint16_t gcd, int prev)
+rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
+	     struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
+	     uint16_t gcd, int prev)
 {
 	int i = prev;
 	uint16_t w;
@@ -373,10 +372,9 @@  rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
 /* Calculate nb_rx_intr after deleting interrupt mode rx queues
  */
 static void
-rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_intr)
+rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_intr)
 {
 	uint32_t intr_diff;
 
@@ -392,12 +390,10 @@  rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * interrupt queues could currently be poll mode Rx queues
  */
 static void
-rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_rx_intr,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+			  uint32_t *nb_wrr)
 {
 	uint32_t intr_diff;
 	uint32_t poll_diff;
@@ -424,11 +420,9 @@  rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
  * after deleting poll mode rx queues
  */
 static void
-rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint32_t *nb_rx_poll, uint32_t *nb_wrr)
 {
 	uint32_t poll_diff;
 	uint32_t wrr_len_diff;
@@ -449,13 +443,10 @@  rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
 /* Calculate nb_rx_* after adding poll mode rx queues
  */
 static void
-rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			int rx_queue_id,
-			uint16_t wt,
-			uint32_t *nb_rx_poll,
-			uint32_t *nb_rx_intr,
-			uint32_t *nb_wrr)
+rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info, int rx_queue_id,
+			  uint16_t wt, uint32_t *nb_rx_poll,
+			  uint32_t *nb_rx_intr, uint32_t *nb_wrr)
 {
 	uint32_t intr_diff;
 	uint32_t poll_diff;
@@ -482,13 +473,10 @@  rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after adding rx_queue_id */
 static void
-rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id,
-		uint16_t wt,
-		uint32_t *nb_rx_poll,
-		uint32_t *nb_rx_intr,
-		uint32_t *nb_wrr)
+rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
+		     struct eth_device_info *dev_info, int rx_queue_id,
+		     uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+		     uint32_t *nb_wrr)
 {
 	if (wt != 0)
 		rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
@@ -500,12 +488,10 @@  rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after deleting rx_queue_id */
 static void
-rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id,
-		uint32_t *nb_rx_poll,
-		uint32_t *nb_rx_intr,
-		uint32_t *nb_wrr)
+rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
+		     struct eth_device_info *dev_info, int rx_queue_id,
+		     uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+		     uint32_t *nb_wrr)
 {
 	rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
 				nb_wrr);
@@ -517,8 +503,7 @@  rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the rx_poll array
  */
 static struct eth_rx_poll_entry *
-rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-	uint32_t num_rx_polled)
+rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
 {
 	size_t len;
 
@@ -534,7 +519,7 @@  rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the WRR array
  */
 static uint32_t *
-rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
+rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
 {
 	size_t len;
 
@@ -547,11 +532,9 @@  rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
 }
 
 static int
-rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint32_t nb_poll,
-		uint32_t nb_wrr,
-		struct eth_rx_poll_entry **rx_poll,
-		uint32_t **wrr_sched)
+rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
+		      uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
+		      uint32_t **wrr_sched)
 {
 
 	if (nb_poll == 0) {
@@ -576,9 +559,8 @@  rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Precalculate WRR polling sequence for all queues in rx_adapter */
 static void
-rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_rx_poll_entry *rx_poll,
-		uint32_t *rx_wrr)
+rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
+		      struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
 {
 	uint16_t d;
 	uint16_t q;
@@ -705,13 +687,13 @@  rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
 }
 
 static inline int
-rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
 {
 	return !!rx_adapter->enq_block_count;
 }
 
 static inline void
-rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
 {
 	if (rx_adapter->rx_enq_block_start_ts)
 		return;
@@ -724,8 +706,8 @@  rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static inline void
-rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
-		    struct rte_event_eth_rx_adapter_stats *stats)
+rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
+		     struct rte_event_eth_rx_adapter_stats *stats)
 {
 	if (unlikely(!stats->rx_enq_start_ts))
 		stats->rx_enq_start_ts = rte_get_tsc_cycles();
@@ -744,10 +726,10 @@  rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Enqueue buffered events to event device */
 static inline uint16_t
-rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter)
 {
-	struct rte_eth_event_enqueue_buffer *buf =
-	    &rx_adapter->event_enqueue_buffer;
+	struct eth_event_enqueue_buffer *buf =
+		&rx_adapter->event_enqueue_buffer;
 	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
 
 	if (!buf->count)
@@ -774,7 +756,7 @@  rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static inline void
-rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
 		struct eth_rx_vector_data *vec)
 {
 	vec->vector_ev->nb_elem = 0;
@@ -785,9 +767,9 @@  rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline uint16_t
-rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
 			struct eth_rx_queue_info *queue_info,
-			struct rte_eth_event_enqueue_buffer *buf,
+			struct eth_event_enqueue_buffer *buf,
 			struct rte_mbuf **mbufs, uint16_t num)
 {
 	struct rte_event *ev = &buf->events[buf->count];
@@ -845,19 +827,16 @@  rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint16_t eth_dev_id,
-		uint16_t rx_queue_id,
-		struct rte_mbuf **mbufs,
-		uint16_t num)
+rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num)
 {
 	uint32_t i;
 	struct eth_device_info *dev_info =
 					&rx_adapter->eth_devices[eth_dev_id];
 	struct eth_rx_queue_info *eth_rx_queue_info =
 					&dev_info->rx_queue[rx_queue_id];
-	struct rte_eth_event_enqueue_buffer *buf =
-					&rx_adapter->event_enqueue_buffer;
+	struct eth_event_enqueue_buffer *buf =
+		&rx_adapter->event_enqueue_buffer;
 	struct rte_event *ev = &buf->events[buf->count];
 	uint64_t event = eth_rx_queue_info->event;
 	uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
@@ -909,16 +888,13 @@  rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Enqueue packets from  <port, q>  to event buffer */
 static inline uint32_t
-rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
-	uint16_t port_id,
-	uint16_t queue_id,
-	uint32_t rx_count,
-	uint32_t max_rx,
-	int *rxq_empty)
+rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
+	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
+	   int *rxq_empty)
 {
 	struct rte_mbuf *mbufs[BATCH_SIZE];
-	struct rte_eth_event_enqueue_buffer *buf =
-					&rx_adapter->event_enqueue_buffer;
+	struct eth_event_enqueue_buffer *buf =
+		&rx_adapter->event_enqueue_buffer;
 	struct rte_event_eth_rx_adapter_stats *stats =
 					&rx_adapter->stats;
 	uint16_t n;
@@ -953,8 +929,7 @@  rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
-		void *data)
+rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
 {
 	uint16_t port_id;
 	uint16_t queue;
@@ -994,8 +969,8 @@  rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
-			uint32_t num_intr_vec)
+rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
+			  uint32_t num_intr_vec)
 {
 	if (rx_adapter->num_intr_vec + num_intr_vec >
 				RTE_EVENT_ETH_INTR_RING_SIZE) {
@@ -1010,9 +985,9 @@  rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Delete entries for (dev, queue) from the interrupt ring */
 static void
-rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
-			struct eth_device_info *dev_info,
-			uint16_t rx_queue_id)
+rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
+			  struct eth_device_info *dev_info,
+			  uint16_t rx_queue_id)
 {
 	int i, n;
 	union queue_data qd;
@@ -1045,7 +1020,7 @@  rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
 static void *
 rxa_intr_thread(void *arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = arg;
+	struct event_eth_rx_adapter *rx_adapter = arg;
 	struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
 	int n, i;
 
@@ -1068,12 +1043,12 @@  rxa_intr_thread(void *arg)
  * mbufs to eventdev
  */
 static inline uint32_t
-rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t n;
 	uint32_t nb_rx = 0;
 	int rxq_empty;
-	struct rte_eth_event_enqueue_buffer *buf;
+	struct eth_event_enqueue_buffer *buf;
 	rte_spinlock_t *ring_lock;
 	uint8_t max_done = 0;
 
@@ -1188,11 +1163,11 @@  rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
  * it.
  */
 static inline uint32_t
-rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 {
 	uint32_t num_queue;
 	uint32_t nb_rx = 0;
-	struct rte_eth_event_enqueue_buffer *buf;
+	struct eth_event_enqueue_buffer *buf;
 	uint32_t wrr_pos;
 	uint32_t max_nb_rx;
 
@@ -1233,8 +1208,8 @@  rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
 static void
 rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = arg;
-	struct rte_eth_event_enqueue_buffer *buf =
+	struct event_eth_rx_adapter *rx_adapter = arg;
+	struct eth_event_enqueue_buffer *buf =
 		&rx_adapter->event_enqueue_buffer;
 	struct rte_event *ev;
 
@@ -1257,7 +1232,7 @@  rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 static int
 rxa_service_func(void *args)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter = args;
+	struct event_eth_rx_adapter *rx_adapter = args;
 	struct rte_event_eth_rx_adapter_stats *stats;
 
 	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
@@ -1318,7 +1293,7 @@  rte_event_eth_rx_adapter_init(void)
 	return 0;
 }
 
-static inline struct rte_event_eth_rx_adapter *
+static inline struct event_eth_rx_adapter *
 rxa_id_to_adapter(uint8_t id)
 {
 	return event_eth_rx_adapter ?
@@ -1335,7 +1310,7 @@  rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
 	int started;
 	uint8_t port_id;
 	struct rte_event_port_conf *port_conf = arg;
-	struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
+	struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
 
 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
 	dev_conf = dev->data->dev_conf;
@@ -1384,7 +1359,7 @@  rxa_epoll_create1(void)
 }
 
 static int
-rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
 {
 	if (rx_adapter->epd != INIT_FD)
 		return 0;
@@ -1401,7 +1376,7 @@  rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
 	int err;
 	char thread_name[RTE_MAX_THREAD_NAME_LEN];
@@ -1445,7 +1420,7 @@  rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
 	int err;
 
@@ -1466,7 +1441,7 @@  rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
 {
 	int ret;
 
@@ -1484,9 +1459,8 @@  rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	uint16_t rx_queue_id)
+rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
+		 struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
 	int err;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1514,9 +1488,8 @@  rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int rx_queue_id)
+rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+		   struct eth_device_info *dev_info, int rx_queue_id)
 {
 	int err;
 	int i;
@@ -1573,9 +1546,8 @@  rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	uint16_t rx_queue_id)
+rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
+		struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
 	int err, err1;
 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1663,9 +1635,8 @@  rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int rx_queue_id)
+rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+		   struct eth_device_info *dev_info, int rx_queue_id)
 
 {
 	int i, j, err;
@@ -1713,9 +1684,8 @@  rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 	return err;
 }
 
-
 static int
-rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
+rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
 {
 	int ret;
 	struct rte_service_spec service;
@@ -1758,10 +1728,9 @@  rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
 }
 
 static void
-rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-		struct eth_device_info *dev_info,
-		int32_t rx_queue_id,
-		uint8_t add)
+rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
+		 struct eth_device_info *dev_info, int32_t rx_queue_id,
+		 uint8_t add)
 {
 	struct eth_rx_queue_info *queue_info;
 	int enabled;
@@ -1811,9 +1780,8 @@  rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
 }
 
 static void
-rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int32_t rx_queue_id)
+rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
+	   struct eth_device_info *dev_info, int32_t rx_queue_id)
 {
 	struct eth_rx_vector_data *vec;
 	int pollq;
@@ -1854,10 +1822,9 @@  rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static void
-rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-	struct eth_device_info *dev_info,
-	int32_t rx_queue_id,
-	const struct rte_event_eth_rx_adapter_queue_conf *conf)
+rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
+	      struct eth_device_info *dev_info, int32_t rx_queue_id,
+	      const struct rte_event_eth_rx_adapter_queue_conf *conf)
 {
 	struct eth_rx_queue_info *queue_info;
 	const struct rte_event *ev = &conf->ev;
@@ -1922,7 +1889,7 @@  rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 
 static void
 rxa_sw_event_vector_configure(
-	struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+	struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
 	int rx_queue_id,
 	const struct rte_event_eth_rx_adapter_event_vector_config *config)
 {
@@ -1956,10 +1923,10 @@  rxa_sw_event_vector_configure(
 			      config->vector_timeout_ns >> 1;
 }
 
-static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
-		uint16_t eth_dev_id,
-		int rx_queue_id,
-		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+static int
+rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+	   int rx_queue_id,
+	   const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
 	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
 	struct rte_event_eth_rx_adapter_queue_conf temp_conf;
@@ -2088,7 +2055,7 @@  static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
 static int
 rxa_ctrl(uint8_t id, int start)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 	uint32_t i;
@@ -2135,7 +2102,7 @@  rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
 				rte_event_eth_rx_adapter_conf_cb conf_cb,
 				void *conf_arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	int ret;
 	int socket_id;
 	uint16_t i;
@@ -2235,7 +2202,7 @@  rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
 int
 rte_event_eth_rx_adapter_free(uint8_t id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 
 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -2267,7 +2234,7 @@  rte_event_eth_rx_adapter_queue_add(uint8_t id,
 {
 	int ret;
 	uint32_t cap;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 
@@ -2385,7 +2352,7 @@  rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
 {
 	int ret = 0;
 	struct rte_eventdev *dev;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct eth_device_info *dev_info;
 	uint32_t cap;
 	uint32_t nb_rx_poll = 0;
@@ -2505,7 +2472,7 @@  rte_event_eth_rx_adapter_queue_event_vector_config(
 	struct rte_event_eth_rx_adapter_event_vector_config *config)
 {
 	struct rte_event_eth_rx_adapter_vector_limits limits;
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	uint32_t cap;
 	int ret;
@@ -2632,7 +2599,7 @@  int
 rte_event_eth_rx_adapter_stats_get(uint8_t id,
 			       struct rte_event_eth_rx_adapter_stats *stats)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
 	struct rte_event_eth_rx_adapter_stats dev_stats;
 	struct rte_eventdev *dev;
@@ -2673,7 +2640,7 @@  rte_event_eth_rx_adapter_stats_get(uint8_t id,
 int
 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct rte_eventdev *dev;
 	struct eth_device_info *dev_info;
 	uint32_t i;
@@ -2701,7 +2668,7 @@  rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 int
 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 
 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -2721,7 +2688,7 @@  rte_event_eth_rx_adapter_cb_register(uint8_t id,
 					rte_event_eth_rx_adapter_cb_fn cb_fn,
 					void *cb_arg)
 {
-	struct rte_event_eth_rx_adapter *rx_adapter;
+	struct event_eth_rx_adapter *rx_adapter;
 	struct eth_device_info *dev_info;
 	uint32_t cap;
 	int ret;
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 7378597846..5853fadb0d 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1193,7 +1193,7 @@  struct rte_event {
 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
 /**< The application can override the adapter generated flow ID in the
  * event. This flow ID can be specified when adding an ethdev Rx queue
- * to the adapter using the ev member of struct rte_event_eth_rx_adapter
+ * to the adapter using the ev.flow_id member.
  * @see struct rte_event_eth_rx_adapter_queue_conf::ev
  * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
  */