[v7,4/5] net/enetfec: add Rx/Tx support

Message ID 20211103192045.22240-5-apeksha.gupta@nxp.com (mailing list archive)
State Changes Requested, archived
Delegated to: Ferruh Yigit
Headers
Series drivers/net: add NXP ENETFEC driver |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Apeksha Gupta Nov. 3, 2021, 7:20 p.m. UTC
  This patch adds burst enqueue and dequeue operations to the enetfec
PMD. Loopback mode is also added, compile time flag 'ENETFEC_LOOPBACK' is
used to enable this feature. By default loopback mode is disabled.
Basic features added like promiscuous enable, basic stats.

Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
---
 doc/guides/nics/enetfec.rst          |   2 +
 doc/guides/nics/features/enetfec.ini |   2 +
 drivers/net/enetfec/enet_ethdev.c    | 189 +++++++++++-
 drivers/net/enetfec/enet_ethdev.h    |  24 ++
 drivers/net/enetfec/enet_rxtx.c      | 445 +++++++++++++++++++++++++++
 drivers/net/enetfec/meson.build      |   3 +-
 6 files changed, 663 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/enetfec/enet_rxtx.c
  

Comments

Ferruh Yigit Nov. 4, 2021, 6:28 p.m. UTC | #1
On 11/3/2021 7:20 PM, Apeksha Gupta wrote:
> This patch adds burst enqueue and dequeue operations to the enetfec
> PMD. Loopback mode is also added, compile time flag 'ENETFEC_LOOPBACK' is
> used to enable this feature. By default loopback mode is disabled.
> Basic features added like promiscuous enable, basic stats.
> 
> Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
> Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>

<...>

> +static int
> +enetfec_eth_link_update(struct rte_eth_dev *dev,
> +			int wait_to_complete __rte_unused)
> +{
> +	struct rte_eth_link link;
> +	unsigned int lstatus = 1;
> +
> +	if (dev == NULL) {

'dev' can't be null for a dev_ops,
unless it is called internally in PMD which seems not the case here.

> +		ENETFEC_PMD_ERR("Invalid device in link_update.");
> +		return 0;
> +	}
> +
> +	memset(&link, 0, sizeof(struct rte_eth_link));
> +
> +	link.link_status = lstatus;
> +	link.link_speed = ETH_SPEED_NUM_1G;

Isn't there an actual way to get real link status from device?

> +
> +	ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
> +			 "Up");
> +
> +	return rte_eth_linkstatus_set(dev, &link);
> +}
> +

<...>

> @@ -501,6 +658,21 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
>   		fep->bd_addr_p = fep->bd_addr_p + bdsize;
>   	}
>   
> +	/* Copy the station address into the dev structure, */
> +	dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
> +	if (dev->data->mac_addrs == NULL) {
> +		ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
> +			ETHER_ADDR_LEN);
> +		rc = -ENOMEM;
> +		goto err;
> +	}
> +
> +	/*
> +	 * Set default mac address
> +	 */
> +	enetfec_set_mac_address(dev, &macaddr);

In each device start, a different MAC address is set by 'enetfec_restart()',
I also put some comment there, but there seems two different MAC address set
in two different parts of the driver.

> +
> +	fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
>   	rc = enetfec_eth_init(dev);
>   	if (rc)
>   		goto failed_init;
> @@ -509,6 +681,8 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
>   
>   failed_init:
>   	ENETFEC_PMD_ERR("Failed to init");
> +err:
> +	rte_eth_dev_release_port(dev);
>   	return rc;
>   }
>   
> @@ -516,6 +690,8 @@ static int
>   pmd_enetfec_remove(struct rte_vdev_device *vdev)
>   {
>   	struct rte_eth_dev *eth_dev = NULL;
> +	struct enetfec_private *fep;
> +	struct enetfec_priv_rx_q *rxq;
>   	int ret;
>   
>   	/* find the ethdev entry */
> @@ -523,11 +699,22 @@ pmd_enetfec_remove(struct rte_vdev_device *vdev)
>   	if (eth_dev == NULL)
>   		return -ENODEV;
>   
> +	fep = eth_dev->data->dev_private;
> +	/* Free descriptor base of first RX queue as it was configured
> +	 * first in enetfec_eth_init().
> +	 */
> +	rxq = fep->rx_queues[0];
> +	rte_free(rxq->bd.base);
> +	enet_free_queue(eth_dev);
> +	enetfec_eth_stop(eth_dev);
> +
>   	ret = rte_eth_dev_release_port(eth_dev);
>   	if (ret != 0)
>   		return -EINVAL;
>   
>   	ENETFEC_PMD_INFO("Release enetfec sw device");
> +	munmap(fep->hw_baseaddr_v, fep->cbus_size);

instead of unmap directly here, what about having a function in 'enet_uio.c',
and call that cleanup function from here?

> +
>   	return 0;
>   }
>   
> diff --git a/drivers/net/enetfec/enet_ethdev.h b/drivers/net/enetfec/enet_ethdev.h
> index 36202ba6c7..e48f958ad9 100644
> --- a/drivers/net/enetfec/enet_ethdev.h
> +++ b/drivers/net/enetfec/enet_ethdev.h
> @@ -7,6 +7,11 @@
>   
>   #include <rte_ethdev.h>
>   
> +#define ETHER_ADDR_LEN		6

Below defines 'ETH_ALEN', seems for same reason.

And in DPDK we already have 'RTE_ETHER_ADDR_LEN'.

Tor prevent all these redundancy, why not drop 'ETH_ALEN' & 'ETHER_ADDR_LEN',
and just use 'RTE_ETHER_ADDR_LEN'.

> +#define BD_LEN			49152
> +#define ENETFEC_TX_FR_SIZE	2048
> +#define ETH_HLEN		RTE_ETHER_HDR_LEN
> +
>   /* full duplex or half duplex */
>   #define HALF_DUPLEX             0x00
>   #define FULL_DUPLEX             0x01
> @@ -19,6 +24,20 @@
>   #define ETH_ALEN		RTE_ETHER_ADDR_LEN
>   
>   #define __iomem
> +#if defined(RTE_ARCH_ARM)
> +#if defined(RTE_ARCH_64)
> +#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
> +#define dcbf_64(p) dcbf(p)
> +
> +#else /* RTE_ARCH_32 */
> +#define dcbf(p) RTE_SET_USED(p)
> +#define dcbf_64(p) dcbf(p)
> +#endif
> +
> +#else
> +#define dcbf(p) RTE_SET_USED(p)
> +#define dcbf_64(p) dcbf(p)
> +#endif
>   
>   /*
>    * ENETFEC with AVB IP can support maximum 3 rx and tx queues.
> @@ -142,4 +161,9 @@ enet_get_bd_index(struct bufdesc *bdp, struct bufdesc_prop *bd)
>   	return ((const char *)bdp - (const char *)bd->base) >> bd->d_size_log2;
>   }
>   
> +uint16_t enetfec_recv_pkts(void *rxq1, __rte_unused struct rte_mbuf **rx_pkts,
> +		uint16_t nb_pkts);
> +uint16_t enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
> +		uint16_t nb_pkts);
> +
>   #endif /*__ENETFEC_ETHDEV_H__*/
> diff --git a/drivers/net/enetfec/enet_rxtx.c b/drivers/net/enetfec/enet_rxtx.c
> new file mode 100644
> index 0000000000..6ac4624553
> --- /dev/null
> +++ b/drivers/net/enetfec/enet_rxtx.c
> @@ -0,0 +1,445 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 NXP
> + */
> +
> +#include <signal.h>
> +#include <rte_mbuf.h>
> +#include <rte_io.h>
> +#include "enet_regs.h"
> +#include "enet_ethdev.h"
> +#include "enet_pmd_logs.h"
> +
> +#define ENETFEC_LOOPBACK	0
> +#define ENETFEC_DUMP		0

There was a request to convert them into devargs, again seems
silently ignored, copy/paste from previous version:

Instead of compile time flags, why not convert them to devargs so
they can be updated without recompile?
This also make sure all code is enabled and prevent possible dead
code by time.

> +
> +#if ENETFEC_DUMP
> +static void
> +enet_dump(struct enetfec_priv_tx_q *txq)
> +{
> +	struct bufdesc *bdp;
> +	int index = 0;
> +
> +	ENETFEC_PMD_DEBUG("TX ring dump\n");
> +	ENETFEC_PMD_DEBUG("Nr     SC     addr       len  MBUF\n");
> +
> +	bdp = txq->bd.base;
> +	do {
> +		ENETFEC_PMD_DEBUG("%3u %c%c 0x%04x 0x%08x %4u %p\n",
> +			index,
> +			bdp == txq->bd.cur ? 'S' : ' ',
> +			bdp == txq->dirty_tx ? 'H' : ' ',
> +			rte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),
> +			rte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),
> +			rte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),
> +			txq->tx_mbuf[index]);
> +		bdp = enet_get_nextdesc(bdp, &txq->bd);
> +		index++;
> +	} while (bdp != txq->bd.base);
> +}
> +
> +static void
> +enet_dump_rx(struct enetfec_priv_rx_q *rxq)
> +{
> +	struct bufdesc *bdp;
> +	int index = 0;
> +
> +	ENETFEC_PMD_DEBUG("RX ring dump\n");
> +	ENETFEC_PMD_DEBUG("Nr     SC     addr       len  MBUF\n");
> +
> +	bdp = rxq->bd.base;
> +	do {
> +		ENETFEC_PMD_DEBUG("%3u %c 0x%04x 0x%08x %4u %p\n",
> +			index,
> +			bdp == rxq->bd.cur ? 'S' : ' ',
> +			rte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),
> +			rte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),
> +			rte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),
> +			rxq->rx_mbuf[index]);
> +		rte_pktmbuf_dump(stdout, rxq->rx_mbuf[index],
> +				rxq->rx_mbuf[index]->pkt_len);
> +		bdp = enet_get_nextdesc(bdp, &rxq->bd);
> +		index++;
> +	} while (bdp != rxq->bd.base);
> +}
> +#endif
> +
> +#if ENETFEC_LOOPBACK
> +static volatile bool lb_quit;
> +
> +static void fec_signal_handler(int signum)
> +{
> +	if (signum == SIGINT || signum == SIGTSTP || signum == SIGTERM) {
> +		ENETFEC_PMD_INFO("\n\n %s: Signal %d received, preparing to exit...\n",
> +				__func__, signum);
> +		lb_quit = true;
> +	}
> +}
> +

Again another comment ignored from previos version, we shouldn't have
signals handled by driver, that is application task.

I am stopping reviewing here, there are too many things from previous
version just ignored, can you please check/answer comments on the
previous version of the set?
  
Apeksha Gupta Nov. 9, 2021, 4:20 p.m. UTC | #2
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: Thursday, November 4, 2021 11:58 PM
> To: Apeksha Gupta <apeksha.gupta@nxp.com>; david.marchand@redhat.com;
> andrew.rybchenko@oktetlabs.ru
> Cc: dev@dpdk.org; Sachin Saxena <sachin.saxena@nxp.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>
> Subject: [EXT] Re: [PATCH v7 4/5] net/enetfec: add Rx/Tx support
> 
> Caution: EXT Email
> 
> On 11/3/2021 7:20 PM, Apeksha Gupta wrote:
> > This patch adds burst enqueue and dequeue operations to the enetfec
> > PMD. Loopback mode is also added, compile time flag 'ENETFEC_LOOPBACK'
> is
> > used to enable this feature. By default loopback mode is disabled.
> > Basic features added like promiscuous enable, basic stats.
> >
> > Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
> > Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
> 
> <...>
> 
> > +static int
> > +enetfec_eth_link_update(struct rte_eth_dev *dev,
> > +                     int wait_to_complete __rte_unused)
> > +{
> > +     struct rte_eth_link link;
> > +     unsigned int lstatus = 1;
> > +
> > +     if (dev == NULL) {
> 
> 'dev' can't be null for a dev_ops,
> unless it is called internally in PMD which seems not the case here.
[Apeksha] okay.

> 
> > +             ENETFEC_PMD_ERR("Invalid device in link_update.");
> > +             return 0;
> > +     }
> > +
> > +     memset(&link, 0, sizeof(struct rte_eth_link));
> > +
> > +     link.link_status = lstatus;
> > +     link.link_speed = ETH_SPEED_NUM_1G;
> 
> Isn't there an actual way to get real link status from device?
[Apeksha] 'Get link status' feature support is yet to be implemented.

> 
> > +
> > +     ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
> > +                      "Up");
> > +
> > +     return rte_eth_linkstatus_set(dev, &link);
> > +}
> > +
> 
> <...>
> 
> > @@ -501,6 +658,21 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
> >               fep->bd_addr_p = fep->bd_addr_p + bdsize;
> >       }
> >
> > +     /* Copy the station address into the dev structure, */
> > +     dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
> > +     if (dev->data->mac_addrs == NULL) {
> > +             ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC
> addresses",
> > +                     ETHER_ADDR_LEN);
> > +             rc = -ENOMEM;
> > +             goto err;
> > +     }
> > +
> > +     /*
> > +      * Set default mac address
> > +      */
> > +     enetfec_set_mac_address(dev, &macaddr);
> 
> In each device start, a different MAC address is set by 'enetfec_restart()',
> I also put some comment there, but there seems two different MAC address set
> in two different parts of the driver.
> 
> > +
> > +     fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
> >       rc = enetfec_eth_init(dev);
> >       if (rc)
> >               goto failed_init;
> > @@ -509,6 +681,8 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
> >
> >   failed_init:
> >       ENETFEC_PMD_ERR("Failed to init");
> > +err:
> > +     rte_eth_dev_release_port(dev);
> >       return rc;
> >   }
> >
> > @@ -516,6 +690,8 @@ static int
> >   pmd_enetfec_remove(struct rte_vdev_device *vdev)
> >   {
> >       struct rte_eth_dev *eth_dev = NULL;
> > +     struct enetfec_private *fep;
> > +     struct enetfec_priv_rx_q *rxq;
> >       int ret;
> >
> >       /* find the ethdev entry */
> > @@ -523,11 +699,22 @@ pmd_enetfec_remove(struct rte_vdev_device *vdev)
> >       if (eth_dev == NULL)
> >               return -ENODEV;
> >
> > +     fep = eth_dev->data->dev_private;
> > +     /* Free descriptor base of first RX queue as it was configured
> > +      * first in enetfec_eth_init().
> > +      */
> > +     rxq = fep->rx_queues[0];
> > +     rte_free(rxq->bd.base);
> > +     enet_free_queue(eth_dev);
> > +     enetfec_eth_stop(eth_dev);
> > +
> >       ret = rte_eth_dev_release_port(eth_dev);
> >       if (ret != 0)
> >               return -EINVAL;
> >
> >       ENETFEC_PMD_INFO("Release enetfec sw device");
> > +     munmap(fep->hw_baseaddr_v, fep->cbus_size);
> 
> instead of unmap directly here, what about having a function in 'enet_uio.c',
> and call that cleanup function from here?
[Apeksha]  Yes, this can be done. Updated in v8 series.

> 
> > +
> >       return 0;
> >   }
> >
> > diff --git a/drivers/net/enetfec/enet_ethdev.h
> b/drivers/net/enetfec/enet_ethdev.h
> > index 36202ba6c7..e48f958ad9 100644
> > --- a/drivers/net/enetfec/enet_ethdev.h
> > +++ b/drivers/net/enetfec/enet_ethdev.h
> > @@ -7,6 +7,11 @@
> >
> >   #include <rte_ethdev.h>
> >
> > +#define ETHER_ADDR_LEN               6
> 
> Below defines 'ETH_ALEN', seems for same reason.
> 
> And in DPDK we already have 'RTE_ETHER_ADDR_LEN'.
> 
> Tor prevent all these redundancy, why not drop 'ETH_ALEN' &
> 'ETHER_ADDR_LEN',
> and just use 'RTE_ETHER_ADDR_LEN'.
[Apeksha]  Okay, we will remove these redundancy and use 'RTE_ETHER_ADDR_LEN'.

> 
> > +#define BD_LEN                       49152
> > +#define ENETFEC_TX_FR_SIZE   2048
> > +#define ETH_HLEN             RTE_ETHER_HDR_LEN
> > +
> >   /* full duplex or half duplex */
> >   #define HALF_DUPLEX             0x00
> >   #define FULL_DUPLEX             0x01
> > @@ -19,6 +24,20 @@
> >   #define ETH_ALEN            RTE_ETHER_ADDR_LEN
> >
> >   #define __iomem
> > +#if defined(RTE_ARCH_ARM)
> > +#if defined(RTE_ARCH_64)
> > +#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
> > +#define dcbf_64(p) dcbf(p)
> > +
> > +#else /* RTE_ARCH_32 */
> > +#define dcbf(p) RTE_SET_USED(p)
> > +#define dcbf_64(p) dcbf(p)
> > +#endif
> > +
> > +#else
> > +#define dcbf(p) RTE_SET_USED(p)
> > +#define dcbf_64(p) dcbf(p)
> > +#endif
> >
> >   /*
> >    * ENETFEC with AVB IP can support maximum 3 rx and tx queues.
> > @@ -142,4 +161,9 @@ enet_get_bd_index(struct bufdesc *bdp, struct
> bufdesc_prop *bd)
> >       return ((const char *)bdp - (const char *)bd->base) >> bd->d_size_log2;
> >   }
> >
> > +uint16_t enetfec_recv_pkts(void *rxq1, __rte_unused struct rte_mbuf
> **rx_pkts,
> > +             uint16_t nb_pkts);
> > +uint16_t enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
> > +             uint16_t nb_pkts);
> > +
> >   #endif /*__ENETFEC_ETHDEV_H__*/
> > diff --git a/drivers/net/enetfec/enet_rxtx.c b/drivers/net/enetfec/enet_rxtx.c
> > new file mode 100644
> > index 0000000000..6ac4624553
> > --- /dev/null
> > +++ b/drivers/net/enetfec/enet_rxtx.c
> > @@ -0,0 +1,445 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright 2021 NXP
> > + */
> > +
> > +#include <signal.h>
> > +#include <rte_mbuf.h>
> > +#include <rte_io.h>
> > +#include "enet_regs.h"
> > +#include "enet_ethdev.h"
> > +#include "enet_pmd_logs.h"
> > +
> > +#define ENETFEC_LOOPBACK     0
> > +#define ENETFEC_DUMP         0
> 
> There was a request to convert them into devargs, again seems
> silently ignored, copy/paste from previous version:
> 
> Instead of compile time flags, why not convert them to devargs so
> they can be updated without recompile?
> This also make sure all code is enabled and prevent possible dead
> code by time.
> 
> > +
> > +#if ENETFEC_DUMP
> > +static void
> > +enet_dump(struct enetfec_priv_tx_q *txq)
> > +{
> > +     struct bufdesc *bdp;
> > +     int index = 0;
> > +
> > +     ENETFEC_PMD_DEBUG("TX ring dump\n");
> > +     ENETFEC_PMD_DEBUG("Nr     SC     addr       len  MBUF\n");
> > +
> > +     bdp = txq->bd.base;
> > +     do {
> > +             ENETFEC_PMD_DEBUG("%3u %c%c 0x%04x 0x%08x %4u %p\n",
> > +                     index,
> > +                     bdp == txq->bd.cur ? 'S' : ' ',
> > +                     bdp == txq->dirty_tx ? 'H' : ' ',
> > +                     rte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),
> > +                     rte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),
> > +                     rte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),
> > +                     txq->tx_mbuf[index]);
> > +             bdp = enet_get_nextdesc(bdp, &txq->bd);
> > +             index++;
> > +     } while (bdp != txq->bd.base);
> > +}
> > +
> > +static void
> > +enet_dump_rx(struct enetfec_priv_rx_q *rxq)
> > +{
> > +     struct bufdesc *bdp;
> > +     int index = 0;
> > +
> > +     ENETFEC_PMD_DEBUG("RX ring dump\n");
> > +     ENETFEC_PMD_DEBUG("Nr     SC     addr       len  MBUF\n");
> > +
> > +     bdp = rxq->bd.base;
> > +     do {
> > +             ENETFEC_PMD_DEBUG("%3u %c 0x%04x 0x%08x %4u %p\n",
> > +                     index,
> > +                     bdp == rxq->bd.cur ? 'S' : ' ',
> > +                     rte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),
> > +                     rte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),
> > +                     rte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),
> > +                     rxq->rx_mbuf[index]);
> > +             rte_pktmbuf_dump(stdout, rxq->rx_mbuf[index],
> > +                             rxq->rx_mbuf[index]->pkt_len);
> > +             bdp = enet_get_nextdesc(bdp, &rxq->bd);
> > +             index++;
> > +     } while (bdp != rxq->bd.base);
> > +}
> > +#endif
> > +
> > +#if ENETFEC_LOOPBACK
> > +static volatile bool lb_quit;
> > +
> > +static void fec_signal_handler(int signum)
> > +{
> > +     if (signum == SIGINT || signum == SIGTSTP || signum == SIGTERM) {
> > +             ENETFEC_PMD_INFO("\n\n %s: Signal %d received, preparing to
> exit...\n",
> > +                             __func__, signum);
> > +             lb_quit = true;
> > +     }
> > +}
> > +
> 
> Again another comment ignored from previos version, we shouldn't have
> signals handled by driver, that is application task.
> 
> I am stopping reviewing here, there are too many things from previous
> version just ignored, can you please check/answer comments on the
> previous version of the set?

All above comments are handled in v8 patch series.
  

Patch

diff --git a/doc/guides/nics/enetfec.rst b/doc/guides/nics/enetfec.rst
index dfcd032098..47836630b6 100644
--- a/doc/guides/nics/enetfec.rst
+++ b/doc/guides/nics/enetfec.rst
@@ -82,6 +82,8 @@  driver.
 ENETFEC Features
 ~~~~~~~~~~~~~~~~~
 
+- Basic stats
+- Promiscuous
 - Linux
 - ARMv8
 
diff --git a/doc/guides/nics/features/enetfec.ini b/doc/guides/nics/features/enetfec.ini
index bdfbdbd9d4..3d8aa5b627 100644
--- a/doc/guides/nics/features/enetfec.ini
+++ b/doc/guides/nics/features/enetfec.ini
@@ -4,6 +4,8 @@ 
 ; Refer to default.ini for the full list of available PMD features.
 ;
 [Features]
+Promiscuous mode     = Y
+Basic stats	     = Y
 Linux		     = Y
 ARMv8		     = Y
 Usage doc	     = Y
diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c
index aa96093eb8..42b6cac26f 100644
--- a/drivers/net/enetfec/enet_ethdev.c
+++ b/drivers/net/enetfec/enet_ethdev.c
@@ -41,6 +41,8 @@ 
 #define ENETFEC_RAFL_V			0x8
 #define ENETFEC_OPD_V			0xFFF0
 
+/* Extended buffer descriptor */
+#define ENETFEC_EXTENDED_BD		0
 #define NUM_OF_BD_QUEUES		6
 
 static uint32_t enetfec_e_cntl;
@@ -179,6 +181,40 @@  enetfec_restart(struct rte_eth_dev *dev)
 	rte_delay_us(10);
 }
 
+static void
+enet_free_buffers(struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	unsigned int i, q;
+	struct rte_mbuf *mbuf;
+	struct bufdesc  *bdp;
+	struct enetfec_priv_rx_q *rxq;
+	struct enetfec_priv_tx_q *txq;
+
+	for (q = 0; q < dev->data->nb_rx_queues; q++) {
+		rxq = fep->rx_queues[q];
+		bdp = rxq->bd.base;
+		for (i = 0; i < rxq->bd.ring_size; i++) {
+			mbuf = rxq->rx_mbuf[i];
+			rxq->rx_mbuf[i] = NULL;
+			if (mbuf)
+				rte_pktmbuf_free(mbuf);
+			bdp = enet_get_nextdesc(bdp, &rxq->bd);
+		}
+	}
+
+	for (q = 0; q < dev->data->nb_tx_queues; q++) {
+		txq = fep->tx_queues[q];
+		bdp = txq->bd.base;
+		for (i = 0; i < txq->bd.ring_size; i++) {
+			mbuf = txq->tx_mbuf[i];
+			txq->tx_mbuf[i] = NULL;
+			if (mbuf)
+				rte_pktmbuf_free(mbuf);
+		}
+	}
+}
+
 static int
 enetfec_eth_configure(__rte_unused struct rte_eth_dev *dev)
 {
@@ -192,6 +228,8 @@  static int
 enetfec_eth_start(struct rte_eth_dev *dev)
 {
 	enetfec_restart(dev);
+	dev->rx_pkt_burst = &enetfec_recv_pkts;
+	dev->tx_pkt_burst = &enetfec_xmit_pkts;
 
 	return 0;
 }
@@ -217,6 +255,105 @@  enetfec_eth_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+enetfec_eth_close(struct rte_eth_dev *dev)
+{
+	enet_free_buffers(dev);
+	return 0;
+}
+
+static int
+enetfec_eth_link_update(struct rte_eth_dev *dev,
+			int wait_to_complete __rte_unused)
+{
+	struct rte_eth_link link;
+	unsigned int lstatus = 1;
+
+	if (dev == NULL) {
+		ENETFEC_PMD_ERR("Invalid device in link_update.");
+		return 0;
+	}
+
+	memset(&link, 0, sizeof(struct rte_eth_link));
+
+	link.link_status = lstatus;
+	link.link_speed = ETH_SPEED_NUM_1G;
+
+	ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
+			 "Up");
+
+	return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+enetfec_promiscuous_enable(struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	uint32_t tmp;
+
+	tmp = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
+	tmp |= 0x8;
+	tmp &= ~0x2;
+	rte_write32(rte_cpu_to_le_32(tmp),
+		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
+
+	return 0;
+}
+
+static int
+enetfec_multicast_enable(struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+
+	rte_write32(rte_cpu_to_le_32(0xffffffff),
+			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
+	rte_write32(rte_cpu_to_le_32(0xffffffff),
+			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
+	dev->data->all_multicast = 1;
+
+	rte_write32(rte_cpu_to_le_32(0x04400002),
+			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
+	rte_write32(rte_cpu_to_le_32(0x10800049),
+			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
+
+	return 0;
+}
+
+/* Set a MAC change in hardware. */
+static int
+enetfec_set_mac_address(struct rte_eth_dev *dev,
+		    struct rte_ether_addr *addr)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+
+	writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) |
+		(addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24),
+		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_PALR);
+	writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24),
+		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_PAUR);
+
+	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
+
+	return 0;
+}
+
+static int
+enetfec_stats_get(struct rte_eth_dev *dev,
+	      struct rte_eth_stats *stats)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	struct rte_eth_stats *eth_stats = &fep->stats;
+
+	stats->ipackets = eth_stats->ipackets;
+	stats->ibytes = eth_stats->ibytes;
+	stats->ierrors = eth_stats->ierrors;
+	stats->opackets = eth_stats->opackets;
+	stats->obytes = eth_stats->obytes;
+	stats->oerrors = eth_stats->oerrors;
+
+	return 0;
+}
+
 static int
 enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
 	struct rte_eth_dev_info *dev_info)
@@ -228,6 +365,18 @@  enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
 	return 0;
 }
 
+static void
+enet_free_queue(struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	unsigned int i;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		rte_free(fep->rx_queues[i]);
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		rte_free(fep->rx_queues[i]);
+}
+
 static const unsigned short offset_des_active_rxq[] = {
 	ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
 };
@@ -433,6 +582,12 @@  static const struct eth_dev_ops enetfec_ops = {
 	.dev_configure          = enetfec_eth_configure,
 	.dev_start              = enetfec_eth_start,
 	.dev_stop               = enetfec_eth_stop,
+	.dev_close              = enetfec_eth_close,
+	.link_update            = enetfec_eth_link_update,
+	.promiscuous_enable     = enetfec_promiscuous_enable,
+	.allmulticast_enable    = enetfec_multicast_enable,
+	.mac_addr_set           = enetfec_set_mac_address,
+	.stats_get              = enetfec_stats_get,
 	.dev_infos_get          = enetfec_eth_info,
 	.rx_queue_setup         = enetfec_rx_queue_setup,
 	.tx_queue_setup         = enetfec_tx_queue_setup
@@ -459,7 +614,9 @@  pmd_enetfec_probe(struct rte_vdev_device *vdev)
 	int rc;
 	int i;
 	unsigned int bdsize;
-
+	struct rte_ether_addr macaddr = {
+		.addr_bytes = { 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }
+	};
 	name = rte_vdev_device_name(vdev);
 	if (name == NULL)
 		return -EINVAL;
@@ -501,6 +658,21 @@  pmd_enetfec_probe(struct rte_vdev_device *vdev)
 		fep->bd_addr_p = fep->bd_addr_p + bdsize;
 	}
 
+	/* Copy the station address into the dev structure, */
+	dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
+	if (dev->data->mac_addrs == NULL) {
+		ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
+			ETHER_ADDR_LEN);
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	/*
+	 * Set default mac address
+	 */
+	enetfec_set_mac_address(dev, &macaddr);
+
+	fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
 	rc = enetfec_eth_init(dev);
 	if (rc)
 		goto failed_init;
@@ -509,6 +681,8 @@  pmd_enetfec_probe(struct rte_vdev_device *vdev)
 
 failed_init:
 	ENETFEC_PMD_ERR("Failed to init");
+err:
+	rte_eth_dev_release_port(dev);
 	return rc;
 }
 
@@ -516,6 +690,8 @@  static int
 pmd_enetfec_remove(struct rte_vdev_device *vdev)
 {
 	struct rte_eth_dev *eth_dev = NULL;
+	struct enetfec_private *fep;
+	struct enetfec_priv_rx_q *rxq;
 	int ret;
 
 	/* find the ethdev entry */
@@ -523,11 +699,22 @@  pmd_enetfec_remove(struct rte_vdev_device *vdev)
 	if (eth_dev == NULL)
 		return -ENODEV;
 
+	fep = eth_dev->data->dev_private;
+	/* Free descriptor base of first RX queue as it was configured
+	 * first in enetfec_eth_init().
+	 */
+	rxq = fep->rx_queues[0];
+	rte_free(rxq->bd.base);
+	enet_free_queue(eth_dev);
+	enetfec_eth_stop(eth_dev);
+
 	ret = rte_eth_dev_release_port(eth_dev);
 	if (ret != 0)
 		return -EINVAL;
 
 	ENETFEC_PMD_INFO("Release enetfec sw device");
+	munmap(fep->hw_baseaddr_v, fep->cbus_size);
+
 	return 0;
 }
 
diff --git a/drivers/net/enetfec/enet_ethdev.h b/drivers/net/enetfec/enet_ethdev.h
index 36202ba6c7..e48f958ad9 100644
--- a/drivers/net/enetfec/enet_ethdev.h
+++ b/drivers/net/enetfec/enet_ethdev.h
@@ -7,6 +7,11 @@ 
 
 #include <rte_ethdev.h>
 
+#define ETHER_ADDR_LEN		6
+#define BD_LEN			49152
+#define ENETFEC_TX_FR_SIZE	2048
+#define ETH_HLEN		RTE_ETHER_HDR_LEN
+
 /* full duplex or half duplex */
 #define HALF_DUPLEX             0x00
 #define FULL_DUPLEX             0x01
@@ -19,6 +24,20 @@ 
 #define ETH_ALEN		RTE_ETHER_ADDR_LEN
 
 #define __iomem
+#if defined(RTE_ARCH_ARM)
+#if defined(RTE_ARCH_64)
+#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
+#define dcbf_64(p) dcbf(p)
+
+#else /* RTE_ARCH_32 */
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#endif
+
+#else
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#endif
 
 /*
  * ENETFEC with AVB IP can support maximum 3 rx and tx queues.
@@ -142,4 +161,9 @@  enet_get_bd_index(struct bufdesc *bdp, struct bufdesc_prop *bd)
 	return ((const char *)bdp - (const char *)bd->base) >> bd->d_size_log2;
 }
 
+uint16_t enetfec_recv_pkts(void *rxq1, __rte_unused struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts);
+uint16_t enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
+
 #endif /*__ENETFEC_ETHDEV_H__*/
diff --git a/drivers/net/enetfec/enet_rxtx.c b/drivers/net/enetfec/enet_rxtx.c
new file mode 100644
index 0000000000..6ac4624553
--- /dev/null
+++ b/drivers/net/enetfec/enet_rxtx.c
@@ -0,0 +1,445 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <signal.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+#include "enet_regs.h"
+#include "enet_ethdev.h"
+#include "enet_pmd_logs.h"
+
+#define ENETFEC_LOOPBACK	0
+#define ENETFEC_DUMP		0
+
+#if ENETFEC_DUMP
+static void
+enet_dump(struct enetfec_priv_tx_q *txq)
+{
+	struct bufdesc *bdp;
+	int index = 0;
+
+	ENETFEC_PMD_DEBUG("TX ring dump\n");
+	ENETFEC_PMD_DEBUG("Nr     SC     addr       len  MBUF\n");
+
+	bdp = txq->bd.base;
+	do {
+		ENETFEC_PMD_DEBUG("%3u %c%c 0x%04x 0x%08x %4u %p\n",
+			index,
+			bdp == txq->bd.cur ? 'S' : ' ',
+			bdp == txq->dirty_tx ? 'H' : ' ',
+			rte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),
+			rte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),
+			rte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),
+			txq->tx_mbuf[index]);
+		bdp = enet_get_nextdesc(bdp, &txq->bd);
+		index++;
+	} while (bdp != txq->bd.base);
+}
+
+static void
+enet_dump_rx(struct enetfec_priv_rx_q *rxq)
+{
+	struct bufdesc *bdp;
+	int index = 0;
+
+	ENETFEC_PMD_DEBUG("RX ring dump\n");
+	ENETFEC_PMD_DEBUG("Nr     SC     addr       len  MBUF\n");
+
+	bdp = rxq->bd.base;
+	do {
+		ENETFEC_PMD_DEBUG("%3u %c 0x%04x 0x%08x %4u %p\n",
+			index,
+			bdp == rxq->bd.cur ? 'S' : ' ',
+			rte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),
+			rte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),
+			rte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),
+			rxq->rx_mbuf[index]);
+		rte_pktmbuf_dump(stdout, rxq->rx_mbuf[index],
+				rxq->rx_mbuf[index]->pkt_len);
+		bdp = enet_get_nextdesc(bdp, &rxq->bd);
+		index++;
+	} while (bdp != rxq->bd.base);
+}
+#endif
+
+#if ENETFEC_LOOPBACK
+static volatile bool lb_quit;
+
+static void fec_signal_handler(int signum)
+{
+	if (signum == SIGINT || signum == SIGTSTP || signum == SIGTERM) {
+		ENETFEC_PMD_INFO("\n\n %s: Signal %d received, preparing to exit...\n",
+				__func__, signum);
+		lb_quit = true;
+	}
+}
+
+static void
+enetfec_lb_rxtx(void *rxq1)
+{
+	struct rte_mempool *pool;
+	struct bufdesc *rx_bdp = NULL, *tx_bdp = NULL;
+	struct rte_mbuf *mbuf = NULL, *new_mbuf = NULL;
+	unsigned short status;
+	unsigned short pkt_len = 0;
+	int index_r = 0, index_t = 0;
+	u8 *data;
+	struct enetfec_priv_rx_q *rxq  = (struct enetfec_priv_rx_q *)rxq1;
+	struct rte_eth_stats *stats = &rxq->fep->stats;
+	unsigned int i;
+	struct enetfec_private *fep;
+	struct enetfec_priv_tx_q *txq;
+	fep = rxq->fep->dev->data->dev_private;
+	txq = fep->tx_queues[0];
+
+	pool = rxq->pool;
+	rx_bdp = rxq->bd.cur;
+	tx_bdp = txq->bd.cur;
+
+	signal(SIGTSTP, fec_signal_handler);
+	while (!lb_quit) {
+chk_again:
+		status = rte_le_to_cpu_16(rte_read16(&rx_bdp->bd_sc));
+		if (status & RX_BD_EMPTY) {
+			if (!lb_quit)
+				goto chk_again;
+			rxq->bd.cur = rx_bdp;
+			txq->bd.cur = tx_bdp;
+			return;
+		}
+
+		/* Check for errors. */
+		status ^= RX_BD_LAST;
+		if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
+			RX_BD_CR | RX_BD_OV | RX_BD_LAST |
+			RX_BD_TR)) {
+			stats->ierrors++;
+			if (status & RX_BD_OV) {
+				/* FIFO overrun */
+				ENETFEC_DP_LOG(DEBUG, "rx_fifo_error");
+				goto rx_processing_done;
+			}
+			if (status & (RX_BD_LG | RX_BD_SH
+						| RX_BD_LAST)) {
+				/* Frame too long or too short. */
+				ENETFEC_DP_LOG("DEBUG, rx_length_error");
+				if (status & RX_BD_LAST)
+					ENETFEC_DP_LOG("DEBUG, rcv is not +last");
+			}
+			/* CRC Error */
+			if (status & RX_BD_CR)
+				ENETFEC_DP_LOG("DEBUG, rx_crc_errors");
+
+			/* Report late collisions as a frame error. */
+			if (status & (RX_BD_NO | RX_BD_TR))
+				ENETFEC_DP_LOG("DEBUG, rx_frame_error");
+			mbuf = NULL;
+			goto rx_processing_done;
+		}
+
+		new_mbuf = rte_pktmbuf_alloc(pool);
+		if (unlikely(!new_mbuf)) {
+			stats->ierrors++;
+			break;
+		}
+		/* Process the incoming frame. */
+		pkt_len = rte_le_to_cpu_16(rte_read16(&rx_bdp->bd_datlen));
+
+		/* shows data with respect to the data_off field. */
+		index_r = enet_get_bd_index(rx_bdp, &rxq->bd);
+		mbuf = rxq->rx_mbuf[index_r];
+
+		/* adjust pkt_len */
+		rte_pktmbuf_append((struct rte_mbuf *)mbuf, pkt_len - 4);
+		if (rxq->fep->quirks & QUIRK_RACC)
+			rte_pktmbuf_adj(mbuf, 2);
+
+		/* Replace Buffer in BD */
+		rxq->rx_mbuf[index_r] = new_mbuf;
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
+				&rx_bdp->bd_bufaddr);
+
+rx_processing_done:
+		/* when rx_processing_done clear the status flags
+		 * for this buffer
+		 */
+		status &= ~RX_BD_STATS;
+
+		/* Mark the buffer empty */
+		status |= RX_BD_EMPTY;
+
+		/* Make sure the updates to rest of the descriptor are
+		 * performed before transferring ownership.
+		 */
+		rte_wmb();
+		rte_write16(rte_cpu_to_le_16(status), &rx_bdp->bd_sc);
+
+		/* Update BD pointer to next entry */
+		rx_bdp = enet_get_nextdesc(rx_bdp, &rxq->bd);
+
+		/* Doing this here will keep the FEC running while we process
+		 * incoming frames.
+		 */
+		rte_write32(0, rxq->bd.active_reg_desc);
+
+		/* TX begins: First clean the ring then process packet */
+		index_t = enet_get_bd_index(tx_bdp, &txq->bd);
+		status = rte_le_to_cpu_16(rte_read16(&tx_bdp->bd_sc));
+		if (status & TX_BD_READY)
+			stats->oerrors++;
+			break;
+		if (txq->tx_mbuf[index_t]) {
+			rte_pktmbuf_free(txq->tx_mbuf[index_t]);
+			txq->tx_mbuf[index_t] = NULL;
+		}
+
+		if (mbuf == NULL)
+			continue;
+
+		/* Fill in a Tx ring entry */
+		status &= ~TX_BD_STATS;
+
+		/* Set buffer length and buffer pointer */
+		pkt_len = rte_pktmbuf_pkt_len(mbuf);
+		status |= (TX_BD_LAST);
+		data = rte_pktmbuf_mtod(mbuf, void *);
+
+		for (i = 0; i <= pkt_len; i += RTE_CACHE_LINE_SIZE)
+			dcbf(data + i);
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+			&tx_bdp->bd_bufaddr);
+		rte_write16(rte_cpu_to_le_16(pkt_len), &tx_bdp->bd_datlen);
+
+		/* Make sure the updates to rest of the descriptor are performed
+		 * before transferring ownership.
+		 */
+		status |= (TX_BD_READY | TX_BD_TC);
+		rte_wmb();
+		rte_write16(rte_cpu_to_le_16(status), &tx_bdp->bd_sc);
+
+		/* Trigger transmission start */
+		rte_write32(0, txq->bd.active_reg_desc);
+
+		/* Save mbuf pointer to clean later */
+		txq->tx_mbuf[index_t] = mbuf;
+
+		/* If this was the last BD in the ring, start at the
+		 * beginning again.
+		 */
+		tx_bdp = enet_get_nextdesc(tx_bdp, &txq->bd);
+	}
+}
+#endif
+
+/* This function does enetfec_rx_queue processing. Dequeue packet from Rx queue
+ * When update through the ring, just set the empty indicator.
+ */
+uint16_t
+enetfec_recv_pkts(void *rxq1, __rte_unused struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	struct rte_mempool *pool;
+	struct bufdesc *bdp;
+	struct rte_mbuf *mbuf, *new_mbuf = NULL;
+	unsigned short status;
+	unsigned short pkt_len;
+	int pkt_received = 0, index = 0;
+	void *data;
+	struct enetfec_priv_rx_q *rxq  = (struct enetfec_priv_rx_q *)rxq1;
+	struct rte_eth_stats *stats = &rxq->fep->stats;
+	pool = rxq->pool;
+	bdp = rxq->bd.cur;
+#if ENETFEC_LOOPBACK
+	enetfec_lb_rxtx(rxq1);
+#endif
+	/* Process the incoming packet */
+	status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+	while ((status & RX_BD_EMPTY) == 0) {
+		if (pkt_received >= nb_pkts)
+			break;
+
+		new_mbuf = rte_pktmbuf_alloc(pool);
+		if (unlikely(new_mbuf == NULL)) {
+			stats->ierrors++;
+			break;
+		}
+		/* Check for errors. */
+		status ^= RX_BD_LAST;
+		if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
+			RX_BD_CR | RX_BD_OV | RX_BD_LAST |
+			RX_BD_TR)) {
+			stats->ierrors++;
+			if (status & RX_BD_OV) {
+				/* FIFO overrun */
+				/* enet_dump_rx(rxq); */
+				ENETFEC_DP_LOG(DEBUG, "rx_fifo_error");
+				goto rx_processing_done;
+			}
+			if (status & (RX_BD_LG | RX_BD_SH
+						| RX_BD_LAST)) {
+				/* Frame too long or too short. */
+				ENETFEC_DP_LOG(DEBUG, "rx_length_error");
+				if (status & RX_BD_LAST)
+					ENETFEC_DP_LOG(DEBUG, "rcv is not +last");
+			}
+			if (status & RX_BD_CR) {     /* CRC Error */
+				ENETFEC_DP_LOG(DEBUG, "rx_crc_errors");
+			}
+			/* Report late collisions as a frame error. */
+			if (status & (RX_BD_NO | RX_BD_TR))
+				ENETFEC_DP_LOG(DEBUG, "rx_frame_error");
+			goto rx_processing_done;
+		}
+
+		/* Process the incoming frame. */
+		stats->ipackets++;
+		pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen));
+		stats->ibytes += pkt_len;
+
+		/* shows data with respect to the data_off field. */
+		index = enet_get_bd_index(bdp, &rxq->bd);
+		mbuf = rxq->rx_mbuf[index];
+
+		data = rte_pktmbuf_mtod(mbuf, uint8_t *);
+		rte_prefetch0(data);
+		rte_pktmbuf_append((struct rte_mbuf *)mbuf,
+				pkt_len - 4);
+
+		if (rxq->fep->quirks & QUIRK_RACC)
+			data = rte_pktmbuf_adj(mbuf, 2);
+
+		rx_pkts[pkt_received] = mbuf;
+		pkt_received++;
+		rxq->rx_mbuf[index] = new_mbuf;
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
+				&bdp->bd_bufaddr);
+rx_processing_done:
+		/* when rx_processing_done clear the status flags
+		 * for this buffer
+		 */
+		status &= ~RX_BD_STATS;
+
+		/* Mark the buffer empty */
+		status |= RX_BD_EMPTY;
+
+		if (rxq->fep->bufdesc_ex) {
+			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+			rte_write32(rte_cpu_to_le_32(RX_BD_INT),
+				    &ebdp->bd_esc);
+			rte_write32(0, &ebdp->bd_prot);
+			rte_write32(0, &ebdp->bd_bdu);
+		}
+
+		/* Make sure the updates to rest of the descriptor are
+		 * performed before transferring ownership.
+		 */
+		rte_wmb();
+		rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
+
+		/* Update BD pointer to next entry */
+		bdp = enet_get_nextdesc(bdp, &rxq->bd);
+
+		/* Doing this here will keep the FEC running while we process
+		 * incoming frames.
+		 */
+		rte_write32(0, rxq->bd.active_reg_desc);
+		status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+	}
+	rxq->bd.cur = bdp;
+	return pkt_received;
+}
+
+uint16_t
+enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct enetfec_priv_tx_q *txq  =
+			(struct enetfec_priv_tx_q *)tx_queue;
+	struct rte_eth_stats *stats = &txq->fep->stats;
+	struct bufdesc *bdp, *last_bdp;
+	struct rte_mbuf *mbuf;
+	unsigned short status;
+	unsigned short buflen;
+	unsigned int index, estatus = 0;
+	unsigned int i, pkt_transmitted = 0;
+	uint8_t *data;
+	int tx_st = 1;
+
+	while (tx_st) {
+		if (pkt_transmitted >= nb_pkts) {
+			tx_st = 0;
+			break;
+		}
+		bdp = txq->bd.cur;
+		/* First clean the ring */
+		index = enet_get_bd_index(bdp, &txq->bd);
+		status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+
+		if (status & TX_BD_READY) {
+			stats->oerrors++;
+			break;
+		}
+		if (txq->tx_mbuf[index]) {
+			rte_pktmbuf_free(txq->tx_mbuf[index]);
+			txq->tx_mbuf[index] = NULL;
+		}
+
+		mbuf = *(tx_pkts);
+		tx_pkts++;
+
+		/* Fill in a Tx ring entry */
+		last_bdp = bdp;
+		status &= ~TX_BD_STATS;
+
+		/* Set buffer length and buffer pointer */
+		buflen = rte_pktmbuf_pkt_len(mbuf);
+		stats->opackets++;
+		stats->obytes += buflen;
+
+		if (mbuf->nb_segs > 1) {
+			ENETFEC_PMD_DEBUG("SG not supported");
+			return -1;
+		}
+		status |= (TX_BD_LAST);
+		data = rte_pktmbuf_mtod(mbuf, void *);
+		for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE)
+			dcbf(data + i);
+
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+			    &bdp->bd_bufaddr);
+		rte_write16(rte_cpu_to_le_16(buflen), &bdp->bd_datlen);
+
+		if (txq->fep->bufdesc_ex) {
+			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+			rte_write32(0, &ebdp->bd_bdu);
+			rte_write32(rte_cpu_to_le_32(estatus),
+				    &ebdp->bd_esc);
+		}
+
+		index = enet_get_bd_index(last_bdp, &txq->bd);
+		/* Save mbuf pointer */
+		txq->tx_mbuf[index] = mbuf;
+
+		/* Make sure the updates to rest of the descriptor are performed
+		 * before transferring ownership.
+		 */
+		status |= (TX_BD_READY | TX_BD_TC);
+		rte_wmb();
+		rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
+
+		/* Trigger transmission start */
+		rte_write32(0, txq->bd.active_reg_desc);
+		pkt_transmitted++;
+
+		/* If this was the last BD in the ring, start at the
+		 * beginning again.
+		 */
+		bdp = enet_get_nextdesc(last_bdp, &txq->bd);
+
+		/* Make sure the update to bdp and tx_skbuff are performed
+		 * before txq->bd.cur.
+		 */
+		txq->bd.cur = bdp;
+	}
+	return nb_pkts;
+}
diff --git a/drivers/net/enetfec/meson.build b/drivers/net/enetfec/meson.build
index 57f316b8a5..79dca58dea 100644
--- a/drivers/net/enetfec/meson.build
+++ b/drivers/net/enetfec/meson.build
@@ -7,4 +7,5 @@  if not is_linux
 endif
 
 sources = files('enet_ethdev.c',
-		'enet_uio.c')
+		'enet_uio.c',
+		'enet_rxtx.c')