[v2,1/6] eal/interrupts: implement get set APIs

Message ID 20211005121502.66964-2-hkalra@marvell.com (mailing list archive)
State Changes Requested, archived
Delegated to: David Marchand
Headers
Series make rte_intr_handle internal |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/iol-testing warning apply patch failure

Commit Message

Harman Kalra Oct. 5, 2021, 12:14 p.m. UTC
  Prototype/Implement get set APIs for interrupt handle fields.
User wont be able to access any of the interrupt handle fields
directly while should use these get/set APIs to access/manipulate
them.

Internal interrupt header i.e. rte_eal_interrupt.h is rearranged,
as APIs defined are moved to rte_interrupts.h and epoll specific
definitions are moved to a new header rte_epoll.h.
Later in the series rte_eal_interrupt.h will be removed.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
---
 MAINTAINERS                            |   1 +
 lib/eal/common/eal_common_interrupts.c | 470 +++++++++++++++++++
 lib/eal/common/meson.build             |   1 +
 lib/eal/include/meson.build            |   1 +
 lib/eal/include/rte_eal_interrupts.h   | 207 +--------
 lib/eal/include/rte_epoll.h            | 118 +++++
 lib/eal/include/rte_interrupts.h       | 614 ++++++++++++++++++++++++-
 lib/eal/version.map                    |  46 +-
 8 files changed, 1245 insertions(+), 213 deletions(-)
 create mode 100644 lib/eal/common/eal_common_interrupts.c
 create mode 100644 lib/eal/include/rte_epoll.h
  

Comments

Dmitry Kozlyuk Oct. 14, 2021, 12:58 a.m. UTC | #1
2021-10-05 17:44 (UTC+0530), Harman Kalra:
> [...]
> +int rte_intr_instance_copy(struct rte_intr_handle *intr_handle,
> +			   const struct rte_intr_handle *src)
> +{
> +	if (intr_handle == NULL) {
> +		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
> +		rte_errno = ENOTSUP;
> +		goto fail;
> +	}
> +
> +	if (src == NULL) {
> +		RTE_LOG(ERR, EAL, "Source interrupt instance unallocated\n");
> +		rte_errno = EINVAL;
> +		goto fail;
> +	}
> +
> +	intr_handle->fd = src->fd;
> +	intr_handle->vfio_dev_fd = src->vfio_dev_fd;
> +	intr_handle->type = src->type;
> +	intr_handle->max_intr = src->max_intr;
> +	intr_handle->nb_efd = src->nb_efd;
> +	intr_handle->efd_counter_size = src->efd_counter_size;
> +
> +	memcpy(intr_handle->efds, src->efds, src->nb_intr);
> +	memcpy(intr_handle->elist, src->elist, src->nb_intr);

Buffer overrun if "intr_handle->nb_intr < src->nb_intr"?

> +
> +	return 0;
> +fail:
> +	return -rte_errno;
> +}
> +
> +int rte_intr_instance_mem_allocator_get(
> +				const struct rte_intr_handle *intr_handle)
> +{
> +	if (intr_handle == NULL) {
> +		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
> +		return -ENOTSUP;

ENOTSUP usually means the operation is valid from API standpoint
but not supported by the implementation. EINVAL/EFAULT suits better.

> +	}
> +
> +	return intr_handle->mem_allocator;
> +}

What do you think about having an API to retrieve the entire flags instead?

> +
> +void rte_intr_instance_free(struct rte_intr_handle *intr_handle)
> +{
> +	if (intr_handle == NULL) {
> +		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
> +		rte_errno = ENOTSUP;
> +	}

API are neater when free(NULL) is a no-op.

> +
> +	if (intr_handle->mem_allocator)
> +		rte_free(intr_handle);
> +	else
> +		free(intr_handle);
> +}
> +
> +int rte_intr_fd_set(struct rte_intr_handle *intr_handle, int fd)
> +{
> +	if (intr_handle == NULL) {
> +		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
> +		rte_errno = ENOTSUP;
> +		goto fail;
> +	}

This piece repeats over and over, how about making it a function or a macro,
like in ethdev?

> +
> +	intr_handle->fd = fd;
> +
> +	return 0;
> +fail:
> +	return -rte_errno;
> +}
> +
> +int rte_intr_fd_get(const struct rte_intr_handle *intr_handle)
> +{
> +	if (intr_handle == NULL) {
> +		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
> +		rte_errno = ENOTSUP;
> +		goto fail;
> +	}
> +
> +	return intr_handle->fd;
> +fail:
> +	return -1;
> +}

Please add a similar pair of experimental API for the "handle" member,
it is needed for Windows interrupt support I'm working on top of these series
(IIUC, API changes should be closed by RC1.)
If you will be doing this and don't like "handle" name, it might be like
"dev_handle" or "windows_device".

> [...]
> +int rte_intr_max_intr_set(struct rte_intr_handle *intr_handle,
> +				 int max_intr)
> +{
> +	if (intr_handle == NULL) {
> +		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
> +		rte_errno = ENOTSUP;
> +		goto fail;
> +	}
> +
> +	if (max_intr > intr_handle->nb_intr) {
> +		RTE_LOG(ERR, EAL, "Max_intr=%d greater than RTE_MAX_RXTX_INTR_VEC_ID=%d",

The macros is not used in the comparison, neither should the log mention it.

> [...]
> @@ -420,6 +412,14 @@ EXPERIMENTAL {
>  
>  	# added in 21.08
>  	rte_power_monitor_multi; # WINDOWS_NO_EXPORT
> +
> +	# added in 21.11
> +	rte_intr_fd_set;
> +	rte_intr_fd_get;

WINDOWS_NO_EXPORT

> +	rte_intr_type_set;
> +	rte_intr_type_get;
> +	rte_intr_instance_alloc;
> +	rte_intr_instance_free;
>  };

Do I understand correctly that these exports are needed
to allow an application to use DPDK callback facilities
for its own interrupt sources?
If so, I'd suggest that instead we export a simpler set of functions:
1. Create/free a handle instance with automatic fixed type selection.
2. Trigger an interrupt on the specified handle instance.
The flow would be that the application listens on whatever it wants,
probably with OS-specific mechanisms, and just notifies the interrupt thread
about events to trigger callbacks.
Because these APIs are experimental we don't need to change it now,
just my thoughts for the future.
  
David Marchand Oct. 14, 2021, 7:31 a.m. UTC | #2
On Tue, Oct 5, 2021 at 2:17 PM Harman Kalra <hkalra@marvell.com> wrote:
> +struct rte_intr_handle *rte_intr_instance_alloc(uint32_t flags)
> +{
> +       struct rte_intr_handle *intr_handle;
> +       bool mem_allocator;

Regardless of the currently defined flags, we want to have an ABI
ready for future changes, so if there is a "flags" input parameter, it
must be checked against valid values.
You can build a RTE_INTR_ALLOC_KNOWN_FLAGS define that contains all
valid flags either in a private header or only in this .c file if no
other unit needs it.
Next, in this function:

if ((flags & ~RTE_INTR_ALLOC_KNOWN_FLAGS) != 0) {
    rte_errno = EINVAL;
    return NULL;
}

A check in unit tests is then a good thing to add so that developpers
adding new flag get a CI failure.

This is not a blocker as this API is still experimental, but please
let's do this from the start.


> +
> +       mem_allocator = (flags & RTE_INTR_ALLOC_DPDK_ALLOCATOR) != 0;
> +       if (mem_allocator)
> +               intr_handle = rte_zmalloc(NULL, sizeof(struct rte_intr_handle),
> +                                         0);
> +       else
> +               intr_handle = calloc(1, sizeof(struct rte_intr_handle));
  
Harman Kalra Oct. 14, 2021, 5:15 p.m. UTC | #3
Hi Dmitry,

Thanks for your inputs.
Please see inline.

> -----Original Message-----
> From: Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>
> Sent: Thursday, October 14, 2021 6:29 AM
> To: Harman Kalra <hkalra@marvell.com>
> Cc: dev@dpdk.org; Thomas Monjalon <thomas@monjalon.net>; Ray Kinsella
> <mdr@ashroe.eu>; david.marchand@redhat.com
> Subject: [EXT] Re: [PATCH v2 1/6] eal/interrupts: implement get set APIs
> 
> External Email
> 
> ----------------------------------------------------------------------
> 2021-10-05 17:44 (UTC+0530), Harman Kalra:
> > [...]
> > +int rte_intr_instance_copy(struct rte_intr_handle *intr_handle,
> > +			   const struct rte_intr_handle *src) {
> > +	if (intr_handle == NULL) {
> > +		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
> > +		rte_errno = ENOTSUP;
> > +		goto fail;
> > +	}
> > +
> > +	if (src == NULL) {
> > +		RTE_LOG(ERR, EAL, "Source interrupt instance
> unallocated\n");
> > +		rte_errno = EINVAL;
> > +		goto fail;
> > +	}
> > +
> > +	intr_handle->fd = src->fd;
> > +	intr_handle->vfio_dev_fd = src->vfio_dev_fd;
> > +	intr_handle->type = src->type;
> > +	intr_handle->max_intr = src->max_intr;
> > +	intr_handle->nb_efd = src->nb_efd;
> > +	intr_handle->efd_counter_size = src->efd_counter_size;
> > +
> > +	memcpy(intr_handle->efds, src->efds, src->nb_intr);
> > +	memcpy(intr_handle->elist, src->elist, src->nb_intr);
> 
> Buffer overrun if "intr_handle->nb_intr < src->nb_intr"?

Ack, I will add the check.

> 
> > +
> > +	return 0;
> > +fail:
> > +	return -rte_errno;
> > +}
> > +
> > +int rte_intr_instance_mem_allocator_get(
> > +				const struct rte_intr_handle *intr_handle) {
> > +	if (intr_handle == NULL) {
> > +		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
> > +		return -ENOTSUP;
> 
> ENOTSUP usually means the operation is valid from API standpoint but not
> supported by the implementation. EINVAL/EFAULT suits better.

Ack, will make it EFAULT.

> 
> > +	}
> > +
> > +	return intr_handle->mem_allocator;
> > +}
> 
> What do you think about having an API to retrieve the entire flags instead?

Now since we are planning to remove this flag variable and rely on auto detection mechanism.
I will remove this API.


> 
> > +
> > +void rte_intr_instance_free(struct rte_intr_handle *intr_handle) {
> > +	if (intr_handle == NULL) {
> > +		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
> > +		rte_errno = ENOTSUP;
> > +	}
> 
> API are neater when free(NULL) is a no-op.

Correct.

> 
> > +
> > +	if (intr_handle->mem_allocator)
> > +		rte_free(intr_handle);
> > +	else
> > +		free(intr_handle);
> > +}
> > +
> > +int rte_intr_fd_set(struct rte_intr_handle *intr_handle, int fd) {
> > +	if (intr_handle == NULL) {
> > +		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
> > +		rte_errno = ENOTSUP;
> > +		goto fail;
> > +	}
> 
> This piece repeats over and over, how about making it a function or a macro,
> like in ethdev?

Ack, will define a macro for the same.

> 
> > +
> > +	intr_handle->fd = fd;
> > +
> > +	return 0;
> > +fail:
> > +	return -rte_errno;
> > +}
> > +
> > +int rte_intr_fd_get(const struct rte_intr_handle *intr_handle) {
> > +	if (intr_handle == NULL) {
> > +		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
> > +		rte_errno = ENOTSUP;
> > +		goto fail;
> > +	}
> > +
> > +	return intr_handle->fd;
> > +fail:
> > +	return -1;
> > +}
> 
> Please add a similar pair of experimental API for the "handle" member, it is
> needed for Windows interrupt support I'm working on top of these series
> (IIUC, API changes should be closed by RC1.) If you will be doing this and
> don't like "handle" name, it might be like "dev_handle" or
> "windows_device".

I add new APIs to get/set handle. Let's rename it to "windows_handle"


> 
> > [...]
> > +int rte_intr_max_intr_set(struct rte_intr_handle *intr_handle,
> > +				 int max_intr)
> > +{
> > +	if (intr_handle == NULL) {
> > +		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
> > +		rte_errno = ENOTSUP;
> > +		goto fail;
> > +	}
> > +
> > +	if (max_intr > intr_handle->nb_intr) {
> > +		RTE_LOG(ERR, EAL, "Max_intr=%d greater than
> > +RTE_MAX_RXTX_INTR_VEC_ID=%d",
> 
> The macros is not used in the comparison, neither should the log mention it.

I will add the check.


> 
> > [...]
> > @@ -420,6 +412,14 @@ EXPERIMENTAL {
> >
> >  	# added in 21.08
> >  	rte_power_monitor_multi; # WINDOWS_NO_EXPORT
> > +
> > +	# added in 21.11
> > +	rte_intr_fd_set;
> > +	rte_intr_fd_get;
> 
> WINDOWS_NO_EXPORT

Ack.

> 
> > +	rte_intr_type_set;
> > +	rte_intr_type_get;
> > +	rte_intr_instance_alloc;
> > +	rte_intr_instance_free;
> >  };
> 
> Do I understand correctly that these exports are needed to allow an
> application to use DPDK callback facilities for its own interrupt sources?

I exported only those APIs which are currently used by test suite or example
applications, may be later more APIs can be moved from internal to public on
need basis.


> If so, I'd suggest that instead we export a simpler set of functions:
> 1. Create/free a handle instance with automatic fixed type selection.
> 2. Trigger an interrupt on the specified handle instance.
> The flow would be that the application listens on whatever it wants, probably
> with OS-specific mechanisms, and just notifies the interrupt thread about
> events to trigger callbacks.
> Because these APIs are experimental we don't need to change it now, just my
> thoughts for the future.

I am sorry but I did not followed your suggestion, can you please explain.

Thanks
Harman
  
Dmitry Kozlyuk Oct. 14, 2021, 5:53 p.m. UTC | #4
2021-10-14 17:15 (UTC+0000), Harman Kalra:
> [...]
> > > +int rte_intr_fd_get(const struct rte_intr_handle *intr_handle) {
> > > +	if (intr_handle == NULL) {
> > > +		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
> > > +		rte_errno = ENOTSUP;
> > > +		goto fail;
> > > +	}
> > > +
> > > +	return intr_handle->fd;
> > > +fail:
> > > +	return -1;
> > > +}  
> > 
> > Please add a similar pair of experimental API for the "handle" member, it is
> > needed for Windows interrupt support I'm working on top of these series
> > (IIUC, API changes should be closed by RC1.) If you will be doing this and
> > don't like "handle" name, it might be like "dev_handle" or
> > "windows_device".  
> 
> I add new APIs to get/set handle. Let's rename it to "windows_handle"

The name works for me, thanks.
 
> > > [...]
> > > +int rte_intr_max_intr_set(struct rte_intr_handle *intr_handle,
> > > +				 int max_intr)
> > > +{
> > > +	if (intr_handle == NULL) {
> > > +		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
> > > +		rte_errno = ENOTSUP;
> > > +		goto fail;
> > > +	}
> > > +
> > > +	if (max_intr > intr_handle->nb_intr) {
> > > +		RTE_LOG(ERR, EAL, "Max_intr=%d greater than
> > > +RTE_MAX_RXTX_INTR_VEC_ID=%d",  
> > 
> > The macros is not used in the comparison, neither should the log mention it.  
> 
> I will add the check.

What check? I mean that the condition is `max_intr > intr_handle->nb_intr`,
so `RTE_MAX_RXTX_INTR_VEC_ID` is not relevant, `intr_handle->nb_intr` is
dynamic. Probably it should be like this:

	RTE_LOG(ERR, EAL, "Maximum interrupt vector ID (%d) exceeds "
		"the number of available events (%d)\n",
		max_intr, intr_handle->nb_intr);

> [...] 
> > > +	rte_intr_type_set;
> > > +	rte_intr_type_get;
> > > +	rte_intr_instance_alloc;
> > > +	rte_intr_instance_free;
> > >  };  
> > 
> > Do I understand correctly that these exports are needed to allow an
> > application to use DPDK callback facilities for its own interrupt sources?  
> 
> I exported only those APIs which are currently used by test suite or example
> applications, may be later more APIs can be moved from internal to public on
> need basis.
> 
> 
> > If so, I'd suggest that instead we export a simpler set of functions:
> > 1. Create/free a handle instance with automatic fixed type selection.
> > 2. Trigger an interrupt on the specified handle instance.
> > The flow would be that the application listens on whatever it wants, probably
> > with OS-specific mechanisms, and just notifies the interrupt thread about
> > events to trigger callbacks.
> > Because these APIs are experimental we don't need to change it now, just my
> > thoughts for the future.  
> 
> I am sorry but I did not followed your suggestion, can you please explain.

These API is used as follows. The application has a file descriptor
that becomes readable on some event. The programmer doesn't want to create
another thread like EAL interrupt thread, implement thread-safe callback
registration and invocation. They want to reuse DPDK mechanism instead.
So they create an instance of type EXT and give it the descriptor.
In case of the unit test the descriptor is a pipe read end.
In case of a real application it can be a socket, like in mlx5 PMD.
This is often convenient, but not always. An event may be a signal,
or busy-wait end, or it may be Windows with its completely different IO model
(it's "issue an IO, wait for completion" instead of POSIX
"wait for IO readiness, do a blocking IO").
In all these cases the user needs to create a fake pipe (or whatever)
to fit into how the interrupt thread waits for events.
But what the application really needs is to say "there's an event, please run
the callback on this handle". It's a function call that doesn't require any
explicit file descriptors or handles, doesn't rely on any IO model.
How it is implemented depends on the EAL, for POSIX it will probably be
an internal pipe, Windows can use APC as in eal_intr_thread_schedule().
Again, I'm thinking out loud here, nothing of this needs to be done now.
  
Thomas Monjalon Oct. 15, 2021, 7:53 a.m. UTC | #5
14/10/2021 19:53, Dmitry Kozlyuk:
> 2021-10-14 17:15 (UTC+0000), Harman Kalra:
> > > > +	rte_intr_type_set;
> > > > +	rte_intr_type_get;
> > > > +	rte_intr_instance_alloc;
> > > > +	rte_intr_instance_free;
> > > >  };  
> > > 
> > > Do I understand correctly that these exports are needed to allow an
> > > application to use DPDK callback facilities for its own interrupt sources?  
> > 
> > I exported only those APIs which are currently used by test suite or example
> > applications, may be later more APIs can be moved from internal to public on
> > need basis.
> > 
> > > If so, I'd suggest that instead we export a simpler set of functions:
> > > 1. Create/free a handle instance with automatic fixed type selection.
> > > 2. Trigger an interrupt on the specified handle instance.
> > > The flow would be that the application listens on whatever it wants, probably
> > > with OS-specific mechanisms, and just notifies the interrupt thread about
> > > events to trigger callbacks.
> > > Because these APIs are experimental we don't need to change it now, just my
> > > thoughts for the future.  
> > 
> > I am sorry but I did not followed your suggestion, can you please explain.
> 
> These API is used as follows. The application has a file descriptor
> that becomes readable on some event. The programmer doesn't want to create
> another thread like EAL interrupt thread, implement thread-safe callback
> registration and invocation. They want to reuse DPDK mechanism instead.
> So they create an instance of type EXT and give it the descriptor.
> In case of the unit test the descriptor is a pipe read end.
> In case of a real application it can be a socket, like in mlx5 PMD.
> This is often convenient, but not always. An event may be a signal,
> or busy-wait end, or it may be Windows with its completely different IO model
> (it's "issue an IO, wait for completion" instead of POSIX
> "wait for IO readiness, do a blocking IO").
> In all these cases the user needs to create a fake pipe (or whatever)
> to fit into how the interrupt thread waits for events.
> But what the application really needs is to say "there's an event, please run
> the callback on this handle". It's a function call that doesn't require any
> explicit file descriptors or handles, doesn't rely on any IO model.
> How it is implemented depends on the EAL, for POSIX it will probably be
> an internal pipe, Windows can use APC as in eal_intr_thread_schedule().
> Again, I'm thinking out loud here, nothing of this needs to be done now.

I like this way of thinking.
  

Patch

diff --git a/MAINTAINERS b/MAINTAINERS
index 278e5b3226..c0e7bba4f7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -210,6 +210,7 @@  F: app/test/test_memzone.c
 
 Interrupt Subsystem
 M: Harman Kalra <hkalra@marvell.com>
+F: lib/eal/include/rte_epoll.h
 F: lib/eal/*/*interrupts.*
 F: app/test/test_interrupts.c
 
diff --git a/lib/eal/common/eal_common_interrupts.c b/lib/eal/common/eal_common_interrupts.c
new file mode 100644
index 0000000000..9b572a805f
--- /dev/null
+++ b/lib/eal/common/eal_common_interrupts.c
@@ -0,0 +1,470 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_errno.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+
+#include <rte_interrupts.h>
+
+
+struct rte_intr_handle *rte_intr_instance_alloc(uint32_t flags)
+{
+	struct rte_intr_handle *intr_handle;
+	bool mem_allocator;
+
+	mem_allocator = (flags & RTE_INTR_ALLOC_DPDK_ALLOCATOR) != 0;
+	if (mem_allocator)
+		intr_handle = rte_zmalloc(NULL, sizeof(struct rte_intr_handle),
+					  0);
+	else
+		intr_handle = calloc(1, sizeof(struct rte_intr_handle));
+	if (!intr_handle) {
+		RTE_LOG(ERR, EAL, "Fail to allocate intr_handle\n");
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	intr_handle->nb_intr = RTE_MAX_RXTX_INTR_VEC_ID;
+	intr_handle->mem_allocator = mem_allocator;
+
+	return intr_handle;
+}
+
+int rte_intr_instance_copy(struct rte_intr_handle *intr_handle,
+			   const struct rte_intr_handle *src)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	if (src == NULL) {
+		RTE_LOG(ERR, EAL, "Source interrupt instance unallocated\n");
+		rte_errno = EINVAL;
+		goto fail;
+	}
+
+	intr_handle->fd = src->fd;
+	intr_handle->vfio_dev_fd = src->vfio_dev_fd;
+	intr_handle->type = src->type;
+	intr_handle->max_intr = src->max_intr;
+	intr_handle->nb_efd = src->nb_efd;
+	intr_handle->efd_counter_size = src->efd_counter_size;
+
+	memcpy(intr_handle->efds, src->efds, src->nb_intr);
+	memcpy(intr_handle->elist, src->elist, src->nb_intr);
+
+	return 0;
+fail:
+	return -rte_errno;
+}
+
+int rte_intr_instance_mem_allocator_get(
+				const struct rte_intr_handle *intr_handle)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		return -ENOTSUP;
+	}
+
+	return intr_handle->mem_allocator;
+}
+
+void rte_intr_instance_free(struct rte_intr_handle *intr_handle)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+	}
+
+	if (intr_handle->mem_allocator)
+		rte_free(intr_handle);
+	else
+		free(intr_handle);
+}
+
+int rte_intr_fd_set(struct rte_intr_handle *intr_handle, int fd)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	intr_handle->fd = fd;
+
+	return 0;
+fail:
+	return -rte_errno;
+}
+
+int rte_intr_fd_get(const struct rte_intr_handle *intr_handle)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	return intr_handle->fd;
+fail:
+	return -1;
+}
+
+int rte_intr_type_set(struct rte_intr_handle *intr_handle,
+		      enum rte_intr_handle_type type)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	intr_handle->type = type;
+
+	return 0;
+fail:
+	return -rte_errno;
+}
+
+enum rte_intr_handle_type rte_intr_type_get(
+				const struct rte_intr_handle *intr_handle)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		return RTE_INTR_HANDLE_UNKNOWN;
+	}
+
+	return intr_handle->type;
+}
+
+int rte_intr_dev_fd_set(struct rte_intr_handle *intr_handle, int fd)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	intr_handle->vfio_dev_fd = fd;
+
+	return 0;
+fail:
+	return -rte_errno;
+}
+
+int rte_intr_dev_fd_get(const struct rte_intr_handle *intr_handle)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	return intr_handle->vfio_dev_fd;
+fail:
+	return -1;
+}
+
+int rte_intr_max_intr_set(struct rte_intr_handle *intr_handle,
+				 int max_intr)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	if (max_intr > intr_handle->nb_intr) {
+		RTE_LOG(ERR, EAL, "Max_intr=%d greater than RTE_MAX_RXTX_INTR_VEC_ID=%d",
+			max_intr, intr_handle->nb_intr);
+		rte_errno = ERANGE;
+		goto fail;
+	}
+
+	intr_handle->max_intr = max_intr;
+
+	return 0;
+fail:
+	return -rte_errno;
+}
+
+int rte_intr_max_intr_get(const struct rte_intr_handle *intr_handle)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	return intr_handle->max_intr;
+fail:
+	return -rte_errno;
+}
+
+int rte_intr_nb_efd_set(struct rte_intr_handle *intr_handle,
+				 int nb_efd)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	intr_handle->nb_efd = nb_efd;
+
+	return 0;
+fail:
+	return -rte_errno;
+}
+
+int rte_intr_nb_efd_get(const struct rte_intr_handle *intr_handle)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	return intr_handle->nb_efd;
+fail:
+	return -rte_errno;
+}
+
+int rte_intr_nb_intr_get(const struct rte_intr_handle *intr_handle)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	return intr_handle->nb_intr;
+fail:
+	return -rte_errno;
+}
+
+int rte_intr_efd_counter_size_set(struct rte_intr_handle *intr_handle,
+				 uint8_t efd_counter_size)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	intr_handle->efd_counter_size = efd_counter_size;
+
+	return 0;
+fail:
+	return -rte_errno;
+}
+
+int rte_intr_efd_counter_size_get(const struct rte_intr_handle *intr_handle)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	return intr_handle->efd_counter_size;
+fail:
+	return -rte_errno;
+}
+
+int rte_intr_efds_index_get(const struct rte_intr_handle *intr_handle,
+				   int index)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	if (index >= intr_handle->nb_intr) {
+		RTE_LOG(ERR, EAL, "Invalid size %d, max limit %d\n", index,
+			intr_handle->nb_intr);
+		rte_errno = EINVAL;
+		goto fail;
+	}
+
+	return intr_handle->efds[index];
+fail:
+	return -rte_errno;
+}
+
+int rte_intr_efds_index_set(struct rte_intr_handle *intr_handle,
+				   int index, int fd)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	if (index >= intr_handle->nb_intr) {
+		RTE_LOG(ERR, EAL, "Invalid size %d, max limit %d\n", index,
+			intr_handle->nb_intr);
+		rte_errno = ERANGE;
+		goto fail;
+	}
+
+	intr_handle->efds[index] = fd;
+
+	return 0;
+fail:
+	return -rte_errno;
+}
+
+struct rte_epoll_event *rte_intr_elist_index_get(
+				struct rte_intr_handle *intr_handle, int index)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	if (index >= intr_handle->nb_intr) {
+		RTE_LOG(ERR, EAL, "Invalid size %d, max limit %d\n", index,
+			intr_handle->nb_intr);
+		rte_errno = ERANGE;
+		goto fail;
+	}
+
+	return &intr_handle->elist[index];
+fail:
+	return NULL;
+}
+
+int rte_intr_elist_index_set(struct rte_intr_handle *intr_handle,
+				   int index, struct rte_epoll_event elist)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	if (index >= intr_handle->nb_intr) {
+		RTE_LOG(ERR, EAL, "Invalid size %d, max limit %d\n", index,
+			intr_handle->nb_intr);
+		rte_errno = ERANGE;
+		goto fail;
+	}
+
+	intr_handle->elist[index] = elist;
+
+	return 0;
+fail:
+	return -rte_errno;
+}
+
+int rte_intr_vec_list_alloc(struct rte_intr_handle *intr_handle,
+				   const char *name, int size)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	/* Vector list already allocated */
+	if (intr_handle->intr_vec)
+		return 0;
+
+	if (size > intr_handle->nb_intr) {
+		RTE_LOG(ERR, EAL, "Invalid size %d, max limit %d\n", size,
+		       intr_handle->nb_intr);
+		rte_errno = ERANGE;
+		goto fail;
+	}
+
+	intr_handle->intr_vec = rte_zmalloc(name, size * sizeof(int), 0);
+	if (!intr_handle->intr_vec) {
+		RTE_LOG(ERR, EAL, "Failed to allocate %d intr_vec", size);
+			rte_errno = ENOMEM;
+			goto fail;
+	}
+
+	intr_handle->vec_list_size = size;
+
+	return 0;
+fail:
+	return -rte_errno;
+}
+
+int rte_intr_vec_list_index_get(const struct rte_intr_handle *intr_handle,
+				int index)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	if (!intr_handle->intr_vec) {
+		RTE_LOG(ERR, EAL, "Intr vector list not allocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	if (index > intr_handle->vec_list_size) {
+		RTE_LOG(ERR, EAL, "Index %d greater than vec list size %d\n",
+			index, intr_handle->vec_list_size);
+		rte_errno = ERANGE;
+		goto fail;
+	}
+
+	return intr_handle->intr_vec[index];
+fail:
+	return -rte_errno;
+}
+
+int rte_intr_vec_list_index_set(struct rte_intr_handle *intr_handle,
+				   int index, int vec)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	if (!intr_handle->intr_vec) {
+		RTE_LOG(ERR, EAL, "Intr vector list not allocated\n");
+		rte_errno = ENOTSUP;
+		goto fail;
+	}
+
+	if (index > intr_handle->vec_list_size) {
+		RTE_LOG(ERR, EAL, "Index %d greater than vec list size %d\n",
+			index, intr_handle->vec_list_size);
+		rte_errno = ERANGE;
+		goto fail;
+	}
+
+	intr_handle->intr_vec[index] = vec;
+
+	return 0;
+fail:
+	return -rte_errno;
+}
+
+void rte_intr_vec_list_free(struct rte_intr_handle *intr_handle)
+{
+	if (intr_handle == NULL) {
+		RTE_LOG(ERR, EAL, "Interrupt instance unallocated\n");
+		rte_errno = ENOTSUP;
+	}
+
+	rte_free(intr_handle->intr_vec);
+	intr_handle->intr_vec = NULL;
+}
diff --git a/lib/eal/common/meson.build b/lib/eal/common/meson.build
index 6d01b0f072..917758cc65 100644
--- a/lib/eal/common/meson.build
+++ b/lib/eal/common/meson.build
@@ -15,6 +15,7 @@  sources += files(
         'eal_common_errno.c',
         'eal_common_fbarray.c',
         'eal_common_hexdump.c',
+        'eal_common_interrupts.c',
         'eal_common_launch.c',
         'eal_common_lcore.c',
         'eal_common_log.c',
diff --git a/lib/eal/include/meson.build b/lib/eal/include/meson.build
index 88a9eba12f..8e258607b8 100644
--- a/lib/eal/include/meson.build
+++ b/lib/eal/include/meson.build
@@ -19,6 +19,7 @@  headers += files(
         'rte_eal_memconfig.h',
         'rte_eal_trace.h',
         'rte_errno.h',
+        'rte_epoll.h',
         'rte_fbarray.h',
         'rte_hexdump.h',
         'rte_hypervisor.h',
diff --git a/lib/eal/include/rte_eal_interrupts.h b/lib/eal/include/rte_eal_interrupts.h
index 00bcc19b6d..b01e987898 100644
--- a/lib/eal/include/rte_eal_interrupts.h
+++ b/lib/eal/include/rte_eal_interrupts.h
@@ -39,32 +39,6 @@  enum rte_intr_handle_type {
 	RTE_INTR_HANDLE_MAX           /**< count of elements */
 };
 
-#define RTE_INTR_EVENT_ADD            1UL
-#define RTE_INTR_EVENT_DEL            2UL
-
-typedef void (*rte_intr_event_cb_t)(int fd, void *arg);
-
-struct rte_epoll_data {
-	uint32_t event;               /**< event type */
-	void *data;                   /**< User data */
-	rte_intr_event_cb_t cb_fun;   /**< IN: callback fun */
-	void *cb_arg;	              /**< IN: callback arg */
-};
-
-enum {
-	RTE_EPOLL_INVALID = 0,
-	RTE_EPOLL_VALID,
-	RTE_EPOLL_EXEC,
-};
-
-/** interrupt epoll event obj, taken by epoll_event.ptr */
-struct rte_epoll_event {
-	uint32_t status;           /**< OUT: event status */
-	int fd;                    /**< OUT: event fd */
-	int epfd;       /**< OUT: epoll instance the ev associated with */
-	struct rte_epoll_data epdata;
-};
-
 /** Handle for interrupts. */
 struct rte_intr_handle {
 	RTE_STD_C11
@@ -81,189 +55,18 @@  struct rte_intr_handle {
 		};
 		void *handle; /**< device driver handle (Windows) */
 	};
+	bool mem_allocator;
 	enum rte_intr_handle_type type;  /**< handle type */
 	uint32_t max_intr;             /**< max interrupt requested */
 	uint32_t nb_efd;               /**< number of available efd(event fd) */
 	uint8_t efd_counter_size;      /**< size of efd counter, used for vdev */
+	uint16_t nb_intr;
+		/**< Max vector count, default RTE_MAX_RXTX_INTR_VEC_ID */
 	int efds[RTE_MAX_RXTX_INTR_VEC_ID];  /**< intr vectors/efds mapping */
 	struct rte_epoll_event elist[RTE_MAX_RXTX_INTR_VEC_ID];
-				       /**< intr vector epoll event */
+						/**< intr vector epoll event */
+	uint16_t vec_list_size;
 	int *intr_vec;                 /**< intr vector number array */
 };
 
-#define RTE_EPOLL_PER_THREAD        -1  /**< to hint using per thread epfd */
-
-/**
- * It waits for events on the epoll instance.
- * Retries if signal received.
- *
- * @param epfd
- *   Epoll instance fd on which the caller wait for events.
- * @param events
- *   Memory area contains the events that will be available for the caller.
- * @param maxevents
- *   Up to maxevents are returned, must greater than zero.
- * @param timeout
- *   Specifying a timeout of -1 causes a block indefinitely.
- *   Specifying a timeout equal to zero cause to return immediately.
- * @return
- *   - On success, returns the number of available event.
- *   - On failure, a negative value.
- */
-int
-rte_epoll_wait(int epfd, struct rte_epoll_event *events,
-	       int maxevents, int timeout);
-
-/**
- * It waits for events on the epoll instance.
- * Does not retry if signal received.
- *
- * @param epfd
- *   Epoll instance fd on which the caller wait for events.
- * @param events
- *   Memory area contains the events that will be available for the caller.
- * @param maxevents
- *   Up to maxevents are returned, must greater than zero.
- * @param timeout
- *   Specifying a timeout of -1 causes a block indefinitely.
- *   Specifying a timeout equal to zero cause to return immediately.
- * @return
- *   - On success, returns the number of available event.
- *   - On failure, a negative value.
- */
-__rte_experimental
-int
-rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events,
-	       int maxevents, int timeout);
-
-/**
- * It performs control operations on epoll instance referred by the epfd.
- * It requests that the operation op be performed for the target fd.
- *
- * @param epfd
- *   Epoll instance fd on which the caller perform control operations.
- * @param op
- *   The operation be performed for the target fd.
- * @param fd
- *   The target fd on which the control ops perform.
- * @param event
- *   Describes the object linked to the fd.
- *   Note: The caller must take care the object deletion after CTL_DEL.
- * @return
- *   - On success, zero.
- *   - On failure, a negative value.
- */
-int
-rte_epoll_ctl(int epfd, int op, int fd,
-	      struct rte_epoll_event *event);
-
-/**
- * The function returns the per thread epoll instance.
- *
- * @return
- *   epfd the epoll instance referred to.
- */
-int
-rte_intr_tls_epfd(void);
-
-/**
- * @param intr_handle
- *   Pointer to the interrupt handle.
- * @param epfd
- *   Epoll instance fd which the intr vector associated to.
- * @param op
- *   The operation be performed for the vector.
- *   Operation type of {ADD, DEL}.
- * @param vec
- *   RX intr vector number added to the epoll instance wait list.
- * @param data
- *   User raw data.
- * @return
- *   - On success, zero.
- *   - On failure, a negative value.
- */
-int
-rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
-		int epfd, int op, unsigned int vec, void *data);
-
-/**
- * It deletes registered eventfds.
- *
- * @param intr_handle
- *   Pointer to the interrupt handle.
- */
-void
-rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle);
-
-/**
- * It enables the packet I/O interrupt event if it's necessary.
- * It creates event fd for each interrupt vector when MSIX is used,
- * otherwise it multiplexes a single event fd.
- *
- * @param intr_handle
- *   Pointer to the interrupt handle.
- * @param nb_efd
- *   Number of interrupt vector trying to enable.
- *   The value 0 is not allowed.
- * @return
- *   - On success, zero.
- *   - On failure, a negative value.
- */
-int
-rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd);
-
-/**
- * It disables the packet I/O interrupt event.
- * It deletes registered eventfds and closes the open fds.
- *
- * @param intr_handle
- *   Pointer to the interrupt handle.
- */
-void
-rte_intr_efd_disable(struct rte_intr_handle *intr_handle);
-
-/**
- * The packet I/O interrupt on datapath is enabled or not.
- *
- * @param intr_handle
- *   Pointer to the interrupt handle.
- */
-int
-rte_intr_dp_is_en(struct rte_intr_handle *intr_handle);
-
-/**
- * The interrupt handle instance allows other causes or not.
- * Other causes stand for any none packet I/O interrupts.
- *
- * @param intr_handle
- *   Pointer to the interrupt handle.
- */
-int
-rte_intr_allow_others(struct rte_intr_handle *intr_handle);
-
-/**
- * The multiple interrupt vector capability of interrupt handle instance.
- * It returns zero if no multiple interrupt vector support.
- *
- * @param intr_handle
- *   Pointer to the interrupt handle.
- */
-int
-rte_intr_cap_multiple(struct rte_intr_handle *intr_handle);
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
- * @internal
- * Check if currently executing in interrupt context
- *
- * @return
- *  - non zero in case of interrupt context
- *  - zero in case of process context
- */
-__rte_experimental
-int
-rte_thread_is_intr(void);
-
 #endif /* _RTE_EAL_INTERRUPTS_H_ */
diff --git a/lib/eal/include/rte_epoll.h b/lib/eal/include/rte_epoll.h
new file mode 100644
index 0000000000..56b7b6bad6
--- /dev/null
+++ b/lib/eal/include/rte_epoll.h
@@ -0,0 +1,118 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell International Ltd.
+ */
+
+#ifndef __RTE_EPOLL_H__
+#define __RTE_EPOLL_H__
+
+/**
+ * @file
+ * The rte_epoll provides interfaces functions to add delete events,
+ * wait poll for an event.
+ */
+
+#include <stdint.h>
+
+#include <rte_compat.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_INTR_EVENT_ADD            1UL
+#define RTE_INTR_EVENT_DEL            2UL
+
+typedef void (*rte_intr_event_cb_t)(int fd, void *arg);
+
+struct rte_epoll_data {
+	uint32_t event;               /**< event type */
+	void *data;                   /**< User data */
+	rte_intr_event_cb_t cb_fun;   /**< IN: callback fun */
+	void *cb_arg;	              /**< IN: callback arg */
+};
+
+enum {
+	RTE_EPOLL_INVALID = 0,
+	RTE_EPOLL_VALID,
+	RTE_EPOLL_EXEC,
+};
+
+/** interrupt epoll event obj, taken by epoll_event.ptr */
+struct rte_epoll_event {
+	uint32_t status;           /**< OUT: event status */
+	int fd;                    /**< OUT: event fd */
+	int epfd;       /**< OUT: epoll instance the ev associated with */
+	struct rte_epoll_data epdata;
+};
+
+#define RTE_EPOLL_PER_THREAD        -1  /**< to hint using per thread epfd */
+
+/**
+ * It waits for events on the epoll instance.
+ * Retries if signal received.
+ *
+ * @param epfd
+ *   Epoll instance fd on which the caller wait for events.
+ * @param events
+ *   Memory area contains the events that will be available for the caller.
+ * @param maxevents
+ *   Up to maxevents are returned, must greater than zero.
+ * @param timeout
+ *   Specifying a timeout of -1 causes a block indefinitely.
+ *   Specifying a timeout equal to zero cause to return immediately.
+ * @return
+ *   - On success, returns the number of available event.
+ *   - On failure, a negative value.
+ */
+int
+rte_epoll_wait(int epfd, struct rte_epoll_event *events,
+	       int maxevents, int timeout);
+
+/**
+ * It waits for events on the epoll instance.
+ * Does not retry if signal received.
+ *
+ * @param epfd
+ *   Epoll instance fd on which the caller wait for events.
+ * @param events
+ *   Memory area contains the events that will be available for the caller.
+ * @param maxevents
+ *   Up to maxevents are returned, must greater than zero.
+ * @param timeout
+ *   Specifying a timeout of -1 causes a block indefinitely.
+ *   Specifying a timeout equal to zero cause to return immediately.
+ * @return
+ *   - On success, returns the number of available event.
+ *   - On failure, a negative value.
+ */
+__rte_experimental
+int
+rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events,
+	       int maxevents, int timeout);
+
+/**
+ * It performs control operations on epoll instance referred by the epfd.
+ * It requests that the operation op be performed for the target fd.
+ *
+ * @param epfd
+ *   Epoll instance fd on which the caller perform control operations.
+ * @param op
+ *   The operation be performed for the target fd.
+ * @param fd
+ *   The target fd on which the control ops perform.
+ * @param event
+ *   Describes the object linked to the fd.
+ *   Note: The caller must take care the object deletion after CTL_DEL.
+ * @return
+ *   - On success, zero.
+ *   - On failure, a negative value.
+ */
+int
+rte_epoll_ctl(int epfd, int op, int fd,
+	      struct rte_epoll_event *event);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_EPOLL_H__ */
diff --git a/lib/eal/include/rte_interrupts.h b/lib/eal/include/rte_interrupts.h
index cc3bf45d8c..db830907fb 100644
--- a/lib/eal/include/rte_interrupts.h
+++ b/lib/eal/include/rte_interrupts.h
@@ -5,8 +5,11 @@ 
 #ifndef _RTE_INTERRUPTS_H_
 #define _RTE_INTERRUPTS_H_
 
+#include <stdbool.h>
+
 #include <rte_common.h>
 #include <rte_compat.h>
+#include <rte_epoll.h>
 
 /**
  * @file
@@ -22,6 +25,16 @@  extern "C" {
 /** Interrupt handle */
 struct rte_intr_handle;
 
+/** Interrupt instance allocation flags
+ * @see rte_intr_instance_alloc
+ */
+/** Allocate interrupt instance using DPDK memory management APIs */
+#define RTE_INTR_ALLOC_DPDK_ALLOCATOR	0x00000001
+
+#define RTE_INTR_HANDLE_DEFAULT_SIZE  1
+
+#include "rte_eal_interrupts.h"
+
 /** Function to be registered for the specific interrupt */
 typedef void (*rte_intr_callback_fn)(void *cb_arg);
 
@@ -32,8 +45,6 @@  typedef void (*rte_intr_callback_fn)(void *cb_arg);
 typedef void (*rte_intr_unregister_callback_fn)(struct rte_intr_handle *intr_handle,
 						void *cb_arg);
 
-#include "rte_eal_interrupts.h"
-
 /**
  * It registers the callback for the specific interrupt. Multiple
  * callbacks can be registered at the same time.
@@ -163,6 +174,605 @@  int rte_intr_disable(const struct rte_intr_handle *intr_handle);
 __rte_experimental
 int rte_intr_ack(const struct rte_intr_handle *intr_handle);
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Check if currently executing in interrupt context
+ *
+ * @return
+ *  - non zero in case of interrupt context
+ *  - zero in case of process context
+ */
+__rte_experimental
+int
+rte_thread_is_intr(void);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * It allocates memory for interrupt instance. API takes flag as an argument
+ * which define from where memory should be allocated i.e. using DPDK memory
+ * management library APIs or normal heap allocation.
+ * Default memory allocation for event fds and event list array is done which
+ * can be realloced later as per the requirement.
+ *
+ * This function should be called from application or driver, before calling any
+ * of the interrupt APIs.
+ *
+ * @param flags
+ *  Memory allocation from DPDK allocator or normal allocation
+ *
+ * @return
+ *  - On success, address of first interrupt handle.
+ *  - On failure, NULL.
+ */
+__rte_experimental
+struct rte_intr_handle *
+rte_intr_instance_alloc(uint32_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * This API is used to free the memory allocated for event fds. event lists
+ * and interrupt handle array.
+ *
+ * @param intr_handle
+ *  Base address of interrupt handle array.
+ *
+ */
+__rte_experimental
+void
+rte_intr_instance_free(struct rte_intr_handle *intr_handle);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * This API is used to set the fd field of interrupt handle with user provided
+ * file descriptor.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param fd
+ *  file descriptor value provided by user.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+__rte_experimental
+int
+rte_intr_fd_set(struct rte_intr_handle *intr_handle, int fd);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Returns the fd field of the given interrupt handle instance.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ *
+ * @return
+ *  - On success, fd field.
+ *  - On failure, a negative value.
+ */
+__rte_experimental
+int
+rte_intr_fd_get(const struct rte_intr_handle *intr_handle);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * This API is used to set the type field of interrupt handle with user provided
+ * interrupt type.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param type
+ *  interrupt type
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+__rte_experimental
+int
+rte_intr_type_set(struct rte_intr_handle *intr_handle,
+		  enum rte_intr_handle_type type);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Returns the type field of the given interrupt handle instance.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ *
+ * @return
+ *  - On success, interrupt type
+ *  - On failure, RTE_INTR_HANDLE_UNKNOWN.
+ */
+__rte_experimental
+enum rte_intr_handle_type
+rte_intr_type_get(const struct rte_intr_handle *intr_handle);
+
+/**
+ * @internal
+ * The function returns the per thread epoll instance.
+ *
+ * @return
+ *   epfd the epoll instance referred to.
+ */
+__rte_internal
+int
+rte_intr_tls_epfd(void);
+
+/**
+ * @internal
+ * @param intr_handle
+ *   Pointer to the interrupt handle.
+ * @param epfd
+ *   Epoll instance fd which the intr vector associated to.
+ * @param op
+ *   The operation be performed for the vector.
+ *   Operation type of {ADD, DEL}.
+ * @param vec
+ *   RX intr vector number added to the epoll instance wait list.
+ * @param data
+ *   User raw data.
+ * @return
+ *   - On success, zero.
+ *   - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
+		int epfd, int op, unsigned int vec, void *data);
+
+/**
+ * @internal
+ * It deletes registered eventfds.
+ *
+ * @param intr_handle
+ *   Pointer to the interrupt handle.
+ */
+__rte_internal
+void
+rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle);
+
+/**
+ * @internal
+ * It enables the packet I/O interrupt event if it's necessary.
+ * It creates event fd for each interrupt vector when MSIX is used,
+ * otherwise it multiplexes a single event fd.
+ *
+ * @param intr_handle
+ *   Pointer to the interrupt handle.
+ * @param nb_efd
+ *   Number of interrupt vector trying to enable.
+ *   The value 0 is not allowed.
+ * @return
+ *   - On success, zero.
+ *   - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd);
+
+/**
+ * @internal
+ * It disables the packet I/O interrupt event.
+ * It deletes registered eventfds and closes the open fds.
+ *
+ * @param intr_handle
+ *   Pointer to the interrupt handle.
+ */
+__rte_internal
+void
+rte_intr_efd_disable(struct rte_intr_handle *intr_handle);
+
+/**
+ * @internal
+ * The packet I/O interrupt on datapath is enabled or not.
+ *
+ * @param intr_handle
+ *   Pointer to the interrupt handle.
+ */
+__rte_internal
+int
+rte_intr_dp_is_en(struct rte_intr_handle *intr_handle);
+
+/**
+ * @internal
+ * The interrupt handle instance allows other causes or not.
+ * Other causes stand for any none packet I/O interrupts.
+ *
+ * @param intr_handle
+ *   Pointer to the interrupt handle.
+ */
+__rte_internal
+int
+rte_intr_allow_others(struct rte_intr_handle *intr_handle);
+
+/**
+ * @internal
+ * The multiple interrupt vector capability of interrupt handle instance.
+ * It returns zero if no multiple interrupt vector support.
+ *
+ * @param intr_handle
+ *   Pointer to the interrupt handle.
+ */
+__rte_internal
+int
+rte_intr_cap_multiple(struct rte_intr_handle *intr_handle);
+
+/**
+ * @internal
+ * This API is used to populate interrupt handle, with src handler fields.
+ *
+ * @param intr_handle
+ *  Start address of interrupt handles
+ * @param src
+ *  Source interrupt handle to be cloned.
+ *
+ * @return
+ *   - On success, zero.
+ *   - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_instance_copy(struct rte_intr_handle *intr_handle,
+		       const struct rte_intr_handle *src);
+
+/**
+ * @internal
+ * This API is used to set the device fd field of interrupt handle with user
+ * provided dev fd. Device fd corresponds to VFIO device fd or UIO config fd.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param fd
+ *  interrupt type
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_dev_fd_set(struct rte_intr_handle *intr_handle, int fd);
+
+/**
+ * @internal
+ * Returns the device fd field of the given interrupt handle instance.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ *
+ * @return
+ *  - On success, dev fd.
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_dev_fd_get(const struct rte_intr_handle *intr_handle);
+
+/**
+ * @internal
+ * This API is used to set the max intr field of interrupt handle with user
+ * provided max intr value.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param max_intr
+ *  interrupt type
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_max_intr_set(struct rte_intr_handle *intr_handle, int max_intr);
+
+/**
+ * @internal
+ * Returns the max intr field of the given interrupt handle instance.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ *
+ * @return
+ *  - On success, max intr.
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_max_intr_get(const struct rte_intr_handle *intr_handle);
+
+/**
+ * @internal
+ * This API is used to set the no of event fd field of interrupt handle with
+ * user provided available event file descriptor value.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param nb_efd
+ *  Available event fd
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_nb_efd_set(struct rte_intr_handle *intr_handle, int nb_efd);
+
+/**
+ * @internal
+ * Returns the no of available event fd field of the given interrupt handle
+ * instance.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ *
+ * @return
+ *  - On success, nb_efd
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_nb_efd_get(const struct rte_intr_handle *intr_handle);
+
+/**
+ * @internal
+ * Returns the no of interrupt vector field of the given interrupt handle
+ * instance. This field is to configured on device probe time, and based on
+ * this value efds and elist arrays are dynamically allocated. By default
+ * this value is set to RTE_MAX_RXTX_INTR_VEC_ID.
+ * For eg. in case of PCI device, its msix size is queried and efds/elist
+ * arrays are allocated accordingly.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ *
+ * @return
+ *  - On success, nb_intr
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_nb_intr_get(const struct rte_intr_handle *intr_handle);
+
+/**
+ * @internal
+ * This API is used to set the event fd counter size field of interrupt handle
+ * with user provided efd counter size.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param efd_counter_size
+ *  size of efd counter, used for vdev
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_efd_counter_size_set(struct rte_intr_handle *intr_handle,
+			      uint8_t efd_counter_size);
+
+/**
+ * @internal
+ * Returns the event fd counter size field of the given interrupt handle
+ * instance.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ *
+ * @return
+ *  - On success, efd_counter_size
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_efd_counter_size_get(const struct rte_intr_handle *intr_handle);
+
+/**
+ * @internal
+ * This API is used to set the event fd array index with the given fd.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param index
+ *  efds array index to be set
+ * @param fd
+ *  event fd
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_efds_index_set(struct rte_intr_handle *intr_handle, int index, int fd);
+
+/**
+ * @internal
+ * Returns the fd value of event fds array at a given index.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param index
+ *  efds array index to be returned
+ *
+ * @return
+ *  - On success, fd
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_efds_index_get(const struct rte_intr_handle *intr_handle, int index);
+
+/**
+ * @internal
+ * This API is used to set the event list array index with the given elist
+ * instance.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param index
+ *  elist array index to be set
+ * @param elist
+ *  event list instance of struct rte_epoll_event
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_elist_index_set(struct rte_intr_handle *intr_handle, int index,
+			 struct rte_epoll_event elist);
+
+/**
+ * @internal
+ * Returns the address of elist instance of event list array at a given index.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param index
+ *  elist array index to be returned
+ *
+ * @return
+ *  - On success, elist
+ *  - On failure, a negative value.
+ */
+__rte_internal
+struct rte_epoll_event *
+rte_intr_elist_index_get(struct rte_intr_handle *intr_handle, int index);
+
+/**
+ * @internal
+ * Allocates the memory of interrupt vector list array, with size defining the
+ * no of elements required in the array.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param name
+ *  Name assigned to the allocation, or NULL.
+ * @param size
+ * No of element required in the array.
+ *
+ * @return
+ *  - On success, zero
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_vec_list_alloc(struct rte_intr_handle *intr_handle, const char *name,
+			int size);
+
+/**
+ * @internal
+ * Sets the vector value at given index of interrupt vector list field of given
+ * interrupt handle.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param index
+ *  intr_vec array index to be set
+ * @param vec
+ *  Interrupt vector value.
+ *
+ * @return
+ *  - On success, zero
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_vec_list_index_set(struct rte_intr_handle *intr_handle, int index,
+			    int vec);
+
+/**
+ * @internal
+ * Returns the vector value at the given index of interrupt vector list array.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param index
+ *  intr_vec array index to be returned
+ *
+ * @return
+ *  - On success, interrupt vector
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_vec_list_index_get(const struct rte_intr_handle *intr_handle,
+			    int index);
+
+/**
+ * @internal
+ * Freeing the memory allocated for interrupt vector list array.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ *
+ * @return
+ *  - On success, zero
+ *  - On failure, a negative value.
+ */
+__rte_internal
+void
+rte_intr_vec_list_free(struct rte_intr_handle *intr_handle);
+
+/**
+ * @internal
+ * Reallocates the size efds and elist array based on size provided by user.
+ * By default efds and elist array are allocated with default size
+ * RTE_MAX_RXTX_INTR_VEC_ID on interrupt handle array creation. Later on device
+ * probe, device may have capability of more interrupts than
+ * RTE_MAX_RXTX_INTR_VEC_ID. Hence using this API, PMDs can reallocate the
+ * arrays as per the max interrupts capability of device.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param size
+ *  efds and elist array size.
+ *
+ * @return
+ *  - On success, zero
+ *  - On failure, a negative value.
+ */
+__rte_internal
+int
+rte_intr_event_list_update(struct rte_intr_handle *intr_handle, int size);
+
+/**
+ * @internal
+ * This API returns the sources from where memory is allocated for interrupt
+ * instance.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ *
+ * @return
+ *  - On success, 1 corresponds to memory allocated via DPDK allocator APIs
+ *  - On success, 0 corresponds to memory allocated from traditional heap.
+ *  - On failure, negative value.
+ */
+__rte_internal
+int
+rte_intr_instance_mem_allocator_get(const struct rte_intr_handle *intr_handle);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 38f7de83e1..4c11202faf 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -109,18 +109,10 @@  DPDK_22 {
 	rte_hexdump;
 	rte_hypervisor_get;
 	rte_hypervisor_get_name; # WINDOWS_NO_EXPORT
-	rte_intr_allow_others;
 	rte_intr_callback_register;
 	rte_intr_callback_unregister;
-	rte_intr_cap_multiple;
-	rte_intr_disable;
-	rte_intr_dp_is_en;
-	rte_intr_efd_disable;
-	rte_intr_efd_enable;
 	rte_intr_enable;
-	rte_intr_free_epoll_fd;
-	rte_intr_rx_ctl;
-	rte_intr_tls_epfd;
+	rte_intr_disable;
 	rte_keepalive_create; # WINDOWS_NO_EXPORT
 	rte_keepalive_dispatch_pings; # WINDOWS_NO_EXPORT
 	rte_keepalive_mark_alive; # WINDOWS_NO_EXPORT
@@ -420,6 +412,14 @@  EXPERIMENTAL {
 
 	# added in 21.08
 	rte_power_monitor_multi; # WINDOWS_NO_EXPORT
+
+	# added in 21.11
+	rte_intr_fd_set;
+	rte_intr_fd_get;
+	rte_intr_type_set;
+	rte_intr_type_get;
+	rte_intr_instance_alloc;
+	rte_intr_instance_free;
 };
 
 INTERNAL {
@@ -430,4 +430,32 @@  INTERNAL {
 	rte_mem_map;
 	rte_mem_page_size;
 	rte_mem_unmap;
+	rte_intr_cap_multiple;
+	rte_intr_dp_is_en;
+	rte_intr_efd_disable;
+	rte_intr_efd_enable;
+	rte_intr_free_epoll_fd;
+	rte_intr_rx_ctl;
+	rte_intr_allow_others;
+	rte_intr_tls_epfd;
+	rte_intr_dev_fd_set;
+	rte_intr_dev_fd_get;
+	rte_intr_instance_copy;
+	rte_intr_event_list_update;
+	rte_intr_max_intr_set;
+	rte_intr_max_intr_get;
+	rte_intr_nb_efd_set;
+	rte_intr_nb_efd_get;
+	rte_intr_nb_intr_get;
+	rte_intr_efds_index_set;
+	rte_intr_efds_index_get;
+	rte_intr_elist_index_set;
+	rte_intr_elist_index_get;
+	rte_intr_efd_counter_size_set;
+	rte_intr_efd_counter_size_get;
+	rte_intr_vec_list_alloc;
+	rte_intr_vec_list_index_set;
+	rte_intr_vec_list_index_get;
+	rte_intr_vec_list_free;
+	rte_intr_instance_mem_allocator_get;
 };