get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/92457/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 92457,
    "url": "http://patches.dpdk.org/api/patches/92457/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210430043424.19752-5-apeksha.gupta@nxp.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210430043424.19752-5-apeksha.gupta@nxp.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210430043424.19752-5-apeksha.gupta@nxp.com",
    "date": "2021-04-30T04:34:24",
    "name": "[4/4] drivers/net/enetfec: add enqueue and dequeue support",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "311f60826cf9071bec37acb132da6e62fb9e0d61",
    "submitter": {
        "id": 1570,
        "url": "http://patches.dpdk.org/api/people/1570/?format=api",
        "name": "Apeksha Gupta",
        "email": "apeksha.gupta@nxp.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patches.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210430043424.19752-5-apeksha.gupta@nxp.com/mbox/",
    "series": [
        {
            "id": 16767,
            "url": "http://patches.dpdk.org/api/series/16767/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=16767",
            "date": "2021-04-30T04:34:20",
            "name": "drivers/net: add NXP ENETFEC driver",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/16767/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/92457/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/92457/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id A335FA0546;\n\tFri, 30 Apr 2021 06:35:20 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id C5C7441196;\n\tFri, 30 Apr 2021 06:35:08 +0200 (CEST)",
            "from inva020.nxp.com (inva020.nxp.com [92.121.34.13])\n by mails.dpdk.org (Postfix) with ESMTP id AEBE0410FA\n for <dev@dpdk.org>; Fri, 30 Apr 2021 06:35:03 +0200 (CEST)",
            "from inva020.nxp.com (localhost [127.0.0.1])\n by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 89A5B1A00CD;\n Fri, 30 Apr 2021 06:35:03 +0200 (CEST)",
            "from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com\n [165.114.16.14])\n by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id DD0281A0068;\n Fri, 30 Apr 2021 06:35:00 +0200 (CEST)",
            "from lsv03186.swis.in-blr01.nxp.com (lsv03186.swis.in-blr01.nxp.com\n [92.120.146.182])\n by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id 1F6F3402F6;\n Fri, 30 Apr 2021 06:34:58 +0200 (CEST)"
        ],
        "From": "Apeksha Gupta <apeksha.gupta@nxp.com>",
        "To": "ferruh.yigit@intel.com",
        "Cc": "dev@dpdk.org, hemant.agrawal@nxp.com, sachin.saxena@nxp.com,\n Apeksha Gupta <apeksha.gupta@nxp.com>",
        "Date": "Fri, 30 Apr 2021 10:04:24 +0530",
        "Message-Id": "<20210430043424.19752-5-apeksha.gupta@nxp.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20210430043424.19752-1-apeksha.gupta@nxp.com>",
        "References": "<20210430043424.19752-1-apeksha.gupta@nxp.com>",
        "X-Virus-Scanned": "ClamAV using ClamSMTP",
        "Subject": "[dpdk-dev] [PATCH 4/4] drivers/net/enetfec: add enqueue and dequeue\n support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch supported checksum offloads and add burst enqueue and\ndequeue operations to the enetfec PMD.\n\nLoopback mode is added, compile time flag 'ENETFEC_LOOPBACK' is\nused to enable this feature. By default loopback mode is disabled.\nBasic features added like promiscuous enable, basic stats.\n\nSigned-off-by: Sachin Saxena <sachin.saxena@nxp.com>\nSigned-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>\n---\n doc/guides/nics/enetfec.rst          |   4 +\n doc/guides/nics/features/enetfec.ini |   5 +\n drivers/net/enetfec/enet_ethdev.c    | 212 +++++++++++-\n drivers/net/enetfec/enet_rxtx.c      | 499 +++++++++++++++++++++++++++\n 4 files changed, 719 insertions(+), 1 deletion(-)\n create mode 100644 drivers/net/enetfec/enet_rxtx.c",
    "diff": "diff --git a/doc/guides/nics/enetfec.rst b/doc/guides/nics/enetfec.rst\nindex 10f495fb9..adbb52392 100644\n--- a/doc/guides/nics/enetfec.rst\n+++ b/doc/guides/nics/enetfec.rst\n@@ -75,6 +75,10 @@ ENETFEC driver.\n ENETFEC Features\n ~~~~~~~~~~~~~~~~~\n \n+- Basic stats\n+- Promiscuous\n+- VLAN offload\n+- L3/L4 checksum offload\n - ARMv8\n \n Supported ENETFEC SoCs\ndiff --git a/doc/guides/nics/features/enetfec.ini b/doc/guides/nics/features/enetfec.ini\nindex 570069798..fcc217773 100644\n--- a/doc/guides/nics/features/enetfec.ini\n+++ b/doc/guides/nics/features/enetfec.ini\n@@ -4,5 +4,10 @@\n ; Refer to default.ini for the full list of available PMD features.\n ;\n [Features]\n+Basic stats          = Y\n+Promiscuous mode     = Y\n+VLAN offload         = Y\n+L3 checksum offload  = Y\n+L4 checksum offload  = Y\n ARMv8                = Y\n Usage doc            = Y\ndiff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c\nindex b4816179a..ca2cf929f 100644\n--- a/drivers/net/enetfec/enet_ethdev.c\n+++ b/drivers/net/enetfec/enet_ethdev.c\n@@ -46,6 +46,9 @@\n #define ENET_ENET_OPD_V\t\t0xFFF0\n #define ENET_MDIO_PM_TIMEOUT\t100 /* ms */\n \n+/* Extended buffer descriptor */\n+#define ENETFEC_EXTENDED_BD\t0\n+\n int enetfec_logtype_pmd;\n \n /* Supported Rx offloads */\n@@ -61,6 +64,50 @@ static uint64_t dev_tx_offloads_sup =\n \t\tDEV_TX_OFFLOAD_UDP_CKSUM |\n \t\tDEV_TX_OFFLOAD_TCP_CKSUM;\n \n+static void enet_free_buffers(struct rte_eth_dev *dev)\n+{\n+\tstruct enetfec_private *fep = dev->data->dev_private;\n+\tunsigned int i, q;\n+\tstruct rte_mbuf *mbuf;\n+\tstruct bufdesc  *bdp;\n+\tstruct enetfec_priv_rx_q *rxq;\n+\tstruct enetfec_priv_tx_q *txq;\n+\n+\tfor (q = 0; q < dev->data->nb_rx_queues; q++) {\n+\t\trxq = fep->rx_queues[q];\n+\t\tbdp = rxq->bd.base;\n+\t\tfor (i = 0; i < rxq->bd.ring_size; i++) {\n+\t\t\tmbuf = rxq->rx_mbuf[i];\n+\t\t\trxq->rx_mbuf[i] = NULL;\n+\t\t\tif (mbuf)\n+\t\t\t\trte_pktmbuf_free(mbuf);\n+\t\t\tbdp = enet_get_nextdesc(bdp, &rxq->bd);\n+\t\t}\n+\t}\n+\n+\tfor (q = 0; q < dev->data->nb_tx_queues; q++) {\n+\t\ttxq = fep->tx_queues[q];\n+\t\tbdp = txq->bd.base;\n+\t\tfor (i = 0; i < txq->bd.ring_size; i++) {\n+\t\t\tmbuf = txq->tx_mbuf[i];\n+\t\t\ttxq->tx_mbuf[i] = NULL;\n+\t\t\tif (mbuf)\n+\t\t\t\trte_pktmbuf_free(mbuf);\n+\t\t}\n+\t}\n+}\n+\n+static void enet_free_queue(struct rte_eth_dev *dev)\n+{\n+\tstruct enetfec_private *fep = dev->data->dev_private;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++)\n+\t\trte_free(fep->rx_queues[i]);\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++)\n+\t\trte_free(fep->rx_queues[i]);\n+}\n+\n /*\n  * This function is called to start or restart the FEC during a link\n  * change, transmit timeout or to reconfigure the FEC. The network\n@@ -189,7 +236,6 @@ enetfec_eth_open(struct rte_eth_dev *dev)\n \treturn 0;\n }\n \n-\n static int\n enetfec_eth_configure(__rte_unused struct rte_eth_dev *dev)\n {\n@@ -395,12 +441,137 @@ enetfec_rx_queue_setup(struct rte_eth_dev *dev,\n \treturn -1;\n }\n \n+static int\n+enetfec_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)\n+{\n+\tstruct enetfec_private *fep = dev->data->dev_private;\n+\tuint32_t tmp;\n+\n+\ttmp = rte_read32(fep->hw_baseaddr_v + ENET_RCR);\n+\ttmp |= 0x8;\n+\ttmp &= ~0x2;\n+\trte_write32(tmp, fep->hw_baseaddr_v + ENET_RCR);\n+\n+\treturn 0;\n+}\n+\n+static int\n+enetfec_eth_link_update(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t__rte_unused int wait_to_complete)\n+{\n+\treturn 0;\n+}\n+\n+static int\n+enetfec_stats_get(struct rte_eth_dev *dev,\n+\t      struct rte_eth_stats *stats)\n+{\n+\tstruct enetfec_private *fep = dev->data->dev_private;\n+\tstruct rte_eth_stats *eth_stats = &fep->stats;\n+\n+\tif (stats == NULL)\n+\t\treturn -1;\n+\n+\tmemset(stats, 0, sizeof(struct rte_eth_stats));\n+\n+\tstats->ipackets = eth_stats->ipackets;\n+\tstats->ibytes = eth_stats->ibytes;\n+\tstats->ierrors = eth_stats->ierrors;\n+\tstats->opackets = eth_stats->opackets;\n+\tstats->obytes = eth_stats->obytes;\n+\tstats->oerrors = eth_stats->oerrors;\n+\n+\treturn 0;\n+}\n+\n+static void\n+enetfec_stop(__rte_unused struct rte_eth_dev *dev)\n+{\n+/*TODO*/\n+}\n+\n+static int\n+enetfec_eth_close(__rte_unused struct rte_eth_dev *dev)\n+{\n+\t/* phy_stop(ndev->phydev); */\n+\tenetfec_stop(dev);\n+\t/* phy_disconnect(ndev->phydev); */\n+\n+\tenet_free_buffers(dev);\n+\treturn 0;\n+}\n+\n+static uint16_t\n+enetfec_dummy_xmit_pkts(__rte_unused void *tx_queue,\n+\t\t__rte_unused struct rte_mbuf **tx_pkts,\n+\t\t__rte_unused uint16_t nb_pkts)\n+{\n+\treturn 0;\n+}\n+\n+static uint16_t\n+enetfec_dummy_recv_pkts(__rte_unused void *rxq,\n+\t\t__rte_unused struct rte_mbuf **rx_pkts,\n+\t\t__rte_unused uint16_t nb_pkts)\n+{\n+\treturn 0;\n+}\n+\n+static int\n+enetfec_eth_stop(__rte_unused struct rte_eth_dev *dev)\n+{\n+\tdev->rx_pkt_burst = &enetfec_dummy_recv_pkts;\n+\tdev->tx_pkt_burst = &enetfec_dummy_xmit_pkts;\n+\n+\treturn 0;\n+}\n+\n+static int\n+enetfec_multicast_enable(struct rte_eth_dev *dev)\n+{\n+\tstruct enetfec_private *fep = dev->data->dev_private;\n+\n+\trte_write32(0xffffffff, fep->hw_baseaddr_v + ENET_GAUR);\n+\trte_write32(0xffffffff, fep->hw_baseaddr_v + ENET_GALR);\n+\tdev->data->all_multicast = 1;\n+\n+\trte_write32(0x04400002, fep->hw_baseaddr_v + ENET_GAUR);\n+\trte_write32(0x10800049, fep->hw_baseaddr_v + ENET_GALR);\n+\n+\treturn 0;\n+}\n+\n+/* Set a MAC change in hardware. */\n+static int\n+enetfec_set_mac_address(struct rte_eth_dev *dev,\n+\t\t    struct rte_ether_addr *addr)\n+{\n+\tstruct enetfec_private *fep = dev->data->dev_private;\n+\n+\twritel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) |\n+\t\t(addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24),\n+\t\tfep->hw_baseaddr_v + ENET_PALR);\n+\twritel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24),\n+\t\tfep->hw_baseaddr_v + ENET_PAUR);\n+\n+\trte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);\n+\n+\treturn 0;\n+}\n+\n static const struct eth_dev_ops ops = {\n \t.dev_start = enetfec_eth_open,\n+\t.dev_stop = enetfec_eth_stop,\n+\t.dev_close = enetfec_eth_close,\n \t.dev_configure = enetfec_eth_configure,\n \t.dev_infos_get = enetfec_eth_info,\n \t.rx_queue_setup = enetfec_rx_queue_setup,\n \t.tx_queue_setup = enetfec_tx_queue_setup,\n+\t.link_update  = enetfec_eth_link_update,\n+\t.mac_addr_set\t      = enetfec_set_mac_address,\n+\t.stats_get  = enetfec_stats_get,\n+\t.promiscuous_enable   = enetfec_promiscuous_enable,\n+\t.allmulticast_enable = enetfec_multicast_enable\n };\n \n static int\n@@ -434,6 +605,7 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)\n \tstruct enetfec_private *fep;\n \tconst char *name;\n \tint rc = -1;\n+\tstruct rte_ether_addr macaddr;\n \tint i;\n \tunsigned int bdsize;\n \n@@ -474,12 +646,37 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)\n \t\tfep->bd_addr_p = fep->bd_addr_p + bdsize;\n \t}\n \n+\t/* Copy the station address into the dev structure, */\n+\tdev->data->mac_addrs = rte_zmalloc(\"mac_addr\", ETHER_ADDR_LEN, 0);\n+\tif (dev->data->mac_addrs == NULL) {\n+\t\tENET_PMD_ERR(\"Failed to allocate mem %d to store MAC addresses\",\n+\t\t\tETHER_ADDR_LEN);\n+\t\trc = -ENOMEM;\n+\t\tgoto err;\n+\t}\n+\n+\t/* TODO get mac address from device tree or get random addr.\n+\t * Currently setting default as 1,1,1,1,1,1\n+\t */\n+\tmacaddr.addr_bytes[0] = 1;\n+\tmacaddr.addr_bytes[1] = 1;\n+\tmacaddr.addr_bytes[2] = 1;\n+\tmacaddr.addr_bytes[3] = 1;\n+\tmacaddr.addr_bytes[4] = 1;\n+\tmacaddr.addr_bytes[5] = 1;\n+\n+\tenetfec_set_mac_address(dev, &macaddr);\n+\t/* enable the extended buffer mode */\n+\tfep->bufdesc_ex = ENETFEC_EXTENDED_BD;\n+\n \trc = enetfec_eth_init(dev);\n \tif (rc)\n \t\tgoto failed_init;\n \treturn 0;\n failed_init:\n \tENET_PMD_ERR(\"Failed to init\");\n+err:\n+\trte_eth_dev_release_port(dev);\n \treturn rc;\n }\n \n@@ -487,15 +684,28 @@ static int\n pmd_enetfec_remove(struct rte_vdev_device *vdev)\n {\n \tstruct rte_eth_dev *eth_dev = NULL;\n+\tstruct enetfec_private *fep;\n+\tstruct enetfec_priv_rx_q *rxq;\n \n \t/* find the ethdev entry */\n \teth_dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));\n \tif (!eth_dev)\n \t\treturn -ENODEV;\n \n+\tfep = eth_dev->data->dev_private;\n+\t/* Free descriptor base of first RX queue as it was configured\n+\t * first in enetfec_eth_init().\n+\t */\n+\trxq = fep->rx_queues[0];\n+\trte_free(rxq->bd.base);\n+\tenet_free_queue(eth_dev);\n+\n+\tenetfec_eth_stop(eth_dev);\n \trte_eth_dev_release_port(eth_dev);\n \n \tENET_PMD_INFO(\"Closing sw device\\n\");\n+\tmunmap(fep->hw_baseaddr_v, fep->cbus_size);\n+\n \treturn 0;\n }\n \ndiff --git a/drivers/net/enetfec/enet_rxtx.c b/drivers/net/enetfec/enet_rxtx.c\nnew file mode 100644\nindex 000000000..1b9b86c35\n--- /dev/null\n+++ b/drivers/net/enetfec/enet_rxtx.c\n@@ -0,0 +1,499 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2020 NXP\n+ */\n+\n+#include <signal.h>\n+#include <rte_mbuf.h>\n+#include <rte_io.h>\n+#include \"enet_regs.h\"\n+#include \"enet_ethdev.h\"\n+#include \"enet_pmd_logs.h\"\n+\n+#define ENETFEC_LOOPBACK\t0\n+#define ENETFEC_DUMP\t\t0\n+\n+static volatile bool lb_quit;\n+\n+#if ENETFEC_DUMP\n+static void\n+enet_dump(struct enetfec_priv_tx_q *txq)\n+{\n+\tstruct bufdesc *bdp;\n+\tint index = 0;\n+\n+\tENET_PMD_DEBUG(\"TX ring dump\\n\");\n+\tENET_PMD_DEBUG(\"Nr     SC     addr       len  MBUF\\n\");\n+\n+\tbdp = txq->bd.base;\n+\tdo {\n+\t\tENET_PMD_DEBUG(\"%3u %c%c 0x%04x 0x%08x %4u %p\\n\",\n+\t\t\tindex,\n+\t\t\tbdp == txq->bd.cur ? 'S' : ' ',\n+\t\t\tbdp == txq->dirty_tx ? 'H' : ' ',\n+\t\t\trte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),\n+\t\t\trte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),\n+\t\t\trte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),\n+\t\t\ttxq->tx_mbuf[index]);\n+\t\tbdp = enet_get_nextdesc(bdp, &txq->bd);\n+\t\tindex++;\n+\t} while (bdp != txq->bd.base);\n+}\n+\n+static void\n+enet_dump_rx(struct enetfec_priv_rx_q *rxq)\n+{\n+\tstruct bufdesc *bdp;\n+\tint index = 0;\n+\n+\tENET_PMD_DEBUG(\"RX ring dump\\n\");\n+\tENET_PMD_DEBUG(\"Nr     SC     addr       len  MBUF\\n\");\n+\n+\tbdp = rxq->bd.base;\n+\tdo {\n+\t\tENET_PMD_DEBUG(\"%3u %c 0x%04x 0x%08x %4u %p\\n\",\n+\t\t\tindex,\n+\t\t\tbdp == rxq->bd.cur ? 'S' : ' ',\n+\t\t\trte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),\n+\t\t\trte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),\n+\t\t\trte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),\n+\t\t\trxq->rx_mbuf[index]);\n+\t\trte_pktmbuf_dump(stdout, rxq->rx_mbuf[index],\n+\t\t\t\trxq->rx_mbuf[index]->pkt_len);\n+\t\tbdp = enet_get_nextdesc(bdp, &rxq->bd);\n+\t\tindex++;\n+\t} while (bdp != rxq->bd.base);\n+}\n+#endif\n+\n+#if ENETFEC_LOOPBACK\n+static void fec_signal_handler(int signum)\n+{\n+\tif (signum == SIGINT || signum == SIGTSTP || signum == SIGTERM) {\n+\t\tprintf(\"\\n\\n %s: Signal %d received, preparing to exit...\\n\",\n+\t\t\t\t__func__, signum);\n+\t\tlb_quit = true;\n+\t}\n+}\n+\n+static void\n+enetfec_lb_rxtx(void *rxq1)\n+{\n+\tstruct rte_mempool *pool;\n+\tstruct bufdesc *rx_bdp = NULL, *tx_bdp = NULL;\n+\tstruct rte_mbuf *mbuf = NULL, *new_mbuf = NULL;\n+\tunsigned short status;\n+\tunsigned short pkt_len = 0;\n+\tint index_r = 0, index_t = 0;\n+\tu8 *data;\n+\tstruct enetfec_priv_rx_q *rxq  = (struct enetfec_priv_rx_q *)rxq1;\n+\tstruct rte_eth_stats *stats = &rxq->fep->stats;\n+\tunsigned int i;\n+\tstruct enetfec_private *fep;\n+\tstruct enetfec_priv_tx_q *txq;\n+\tfep = rxq->fep->dev->data->dev_private;\n+\ttxq = fep->tx_queues[0];\n+\n+\tpool = rxq->pool;\n+\trx_bdp = rxq->bd.cur;\n+\ttx_bdp = txq->bd.cur;\n+\n+\tsignal(SIGTSTP, fec_signal_handler);\n+\twhile (!lb_quit) {\n+chk_again:\n+\t\tstatus = rte_le_to_cpu_16(rte_read16(&rx_bdp->bd_sc));\n+\t\tif (status & RX_BD_EMPTY) {\n+\t\t\tif (!lb_quit)\n+\t\t\t\tgoto chk_again;\n+\t\t\trxq->bd.cur = rx_bdp;\n+\t\t\ttxq->bd.cur = tx_bdp;\n+\t\t\treturn;\n+\t\t}\n+\n+\t\t/* Check for errors. */\n+\t\tstatus ^= RX_BD_LAST;\n+\t\tif (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |\n+\t\t\tRX_BD_CR | RX_BD_OV | RX_BD_LAST |\n+\t\t\tRX_BD_TR)) {\n+\t\t\tstats->ierrors++;\n+\t\t\tif (status & RX_BD_OV) {\n+\t\t\t\t/* FIFO overrun */\n+\t\t\t\tENET_PMD_ERR(\"rx_fifo_error\\n\");\n+\t\t\t\tgoto rx_processing_done;\n+\t\t\t}\n+\t\t\tif (status & (RX_BD_LG | RX_BD_SH\n+\t\t\t\t\t\t| RX_BD_LAST)) {\n+\t\t\t\t/* Frame too long or too short. */\n+\t\t\t\tENET_PMD_ERR(\"rx_length_error\\n\");\n+\t\t\t\tif (status & RX_BD_LAST)\n+\t\t\t\t\tENET_PMD_ERR(\"rcv is not +last\\n\");\n+\t\t\t}\n+\t\t\t/* CRC Error */\n+\t\t\tif (status & RX_BD_CR)\n+\t\t\t\tENET_PMD_ERR(\"rx_crc_errors\\n\");\n+\n+\t\t\t/* Report late collisions as a frame error. */\n+\t\t\tif (status & (RX_BD_NO | RX_BD_TR))\n+\t\t\t\tENET_PMD_ERR(\"rx_frame_error\\n\");\n+\t\t\tmbuf = NULL;\n+\t\t\tgoto rx_processing_done;\n+\t\t}\n+\n+\t\tnew_mbuf = rte_pktmbuf_alloc(pool);\n+\t\tif (unlikely(!new_mbuf)) {\n+\t\t\tstats->ierrors++;\n+\t\t\tbreak;\n+\t\t}\n+\t\t/* Process the incoming frame. */\n+\t\tpkt_len = rte_le_to_cpu_16(rte_read16(&rx_bdp->bd_datlen));\n+\n+\t\t/* shows data with respect to the data_off field. */\n+\t\tindex_r = enet_get_bd_index(rx_bdp, &rxq->bd);\n+\t\tmbuf = rxq->rx_mbuf[index_r];\n+\n+\t\t/* adjust pkt_len */\n+\t\trte_pktmbuf_append((struct rte_mbuf *)mbuf, pkt_len - 4);\n+\t\tif (rxq->fep->quirks & QUIRK_RACC)\n+\t\t\trte_pktmbuf_adj(mbuf, 2);\n+\n+\t\t/* Replace Buffer in BD */\n+\t\trxq->rx_mbuf[index_r] = new_mbuf;\n+\t\trte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),\n+\t\t\t\t&rx_bdp->bd_bufaddr);\n+\n+rx_processing_done:\n+\t\t/* when rx_processing_done clear the status flags\n+\t\t * for this buffer\n+\t\t */\n+\t\tstatus &= ~RX_BD_STATS;\n+\n+\t\t/* Mark the buffer empty */\n+\t\tstatus |= RX_BD_EMPTY;\n+\n+\t\t/* Make sure the updates to rest of the descriptor are\n+\t\t * performed before transferring ownership.\n+\t\t */\n+\t\trte_wmb();\n+\t\trte_write16(rte_cpu_to_le_16(status), &rx_bdp->bd_sc);\n+\n+\t\t/* Update BD pointer to next entry */\n+\t\trx_bdp = enet_get_nextdesc(rx_bdp, &rxq->bd);\n+\n+\t\t/* Doing this here will keep the FEC running while we process\n+\t\t * incoming frames.\n+\t\t */\n+\t\trte_write32(0, rxq->bd.active_reg_desc);\n+\n+\t\t/* TX begins: First clean the ring then process packet */\n+\t\tindex_t = enet_get_bd_index(tx_bdp, &txq->bd);\n+\t\tstatus = rte_le_to_cpu_16(rte_read16(&tx_bdp->bd_sc));\n+\t\tif (status & TX_BD_READY)\n+\t\t\tstats->oerrors++;\n+\t\t\tbreak;\n+\t\tif (txq->tx_mbuf[index_t]) {\n+\t\t\trte_pktmbuf_free(txq->tx_mbuf[index_t]);\n+\t\t\ttxq->tx_mbuf[index_t] = NULL;\n+\t\t}\n+\n+\t\tif (mbuf == NULL)\n+\t\t\tcontinue;\n+\n+\t\t/* Fill in a Tx ring entry */\n+\t\tstatus &= ~TX_BD_STATS;\n+\n+\t\t/* Set buffer length and buffer pointer */\n+\t\tpkt_len = rte_pktmbuf_pkt_len(mbuf);\n+\t\tstatus |= (TX_BD_LAST);\n+\t\tdata = rte_pktmbuf_mtod(mbuf, void *);\n+\n+\t\tfor (i = 0; i <= pkt_len; i += RTE_CACHE_LINE_SIZE)\n+\t\t\tdcbf(data + i);\n+\t\trte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),\n+\t\t\t&tx_bdp->bd_bufaddr);\n+\t\trte_write16(rte_cpu_to_le_16(pkt_len), &tx_bdp->bd_datlen);\n+\n+\t\t/* Make sure the updates to rest of the descriptor are performed\n+\t\t * before transferring ownership.\n+\t\t */\n+\t\tstatus |= (TX_BD_READY | TX_BD_TC);\n+\t\trte_wmb();\n+\t\trte_write16(rte_cpu_to_le_16(status), &tx_bdp->bd_sc);\n+\n+\t\t/* Trigger transmission start */\n+\t\trte_write32(0, txq->bd.active_reg_desc);\n+\n+\t\t/* Save mbuf pointer to clean later */\n+\t\ttxq->tx_mbuf[index_t] = mbuf;\n+\n+\t\t/* If this was the last BD in the ring, start at the\n+\t\t * beginning again.\n+\t\t */\n+\t\ttx_bdp = enet_get_nextdesc(tx_bdp, &txq->bd);\n+\t}\n+}\n+#endif\n+\n+/* This function does enetfec_rx_queue processing. Dequeue packet from Rx queue\n+ * When update through the ring, just set the empty indicator.\n+ */\n+uint16_t\n+enetfec_recv_pkts(void *rxq1, __rte_unused struct rte_mbuf **rx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tstruct rte_mempool *pool;\n+\tstruct bufdesc *bdp;\n+\tstruct rte_mbuf *mbuf, *new_mbuf = NULL;\n+\tunsigned short status;\n+\tunsigned short pkt_len;\n+\tint pkt_received = 0, index = 0;\n+\tvoid *data, *mbuf_data;\n+\tuint16_t vlan_tag;\n+\tstruct  bufdesc_ex *ebdp = NULL;\n+\tbool    vlan_packet_rcvd = false;\n+\tstruct enetfec_priv_rx_q *rxq  = (struct enetfec_priv_rx_q *)rxq1;\n+\tstruct rte_eth_stats *stats = &rxq->fep->stats;\n+\tstruct rte_eth_conf *eth_conf = &rxq->fep->dev->data->dev_conf;\n+\tuint64_t rx_offloads = eth_conf->rxmode.offloads;\n+\tpool = rxq->pool;\n+\tbdp = rxq->bd.cur;\n+#if ENETFEC_LOOPBACK\n+\tenetfec_lb_rxtx(rxq1);\n+#endif\n+\t/* Process the incoming packet */\n+\tstatus = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));\n+\twhile (!(status & RX_BD_EMPTY)) {\n+\t\tif (pkt_received >= nb_pkts)\n+\t\t\tbreak;\n+\n+\t\tnew_mbuf = rte_pktmbuf_alloc(pool);\n+\t\tif (unlikely(!new_mbuf)) {\n+\t\t\tstats->ierrors++;\n+\t\t\tbreak;\n+\t\t}\n+\t\t/* Check for errors. */\n+\t\tstatus ^= RX_BD_LAST;\n+\t\tif (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |\n+\t\t\tRX_BD_CR | RX_BD_OV | RX_BD_LAST |\n+\t\t\tRX_BD_TR)) {\n+\t\t\tstats->ierrors++;\n+\t\t\tif (status & RX_BD_OV) {\n+\t\t\t\t/* FIFO overrun */\n+\t\t\t\t/* enet_dump_rx(rxq); */\n+\t\t\t\tENET_PMD_ERR(\"rx_fifo_error\\n\");\n+\t\t\t\tgoto rx_processing_done;\n+\t\t\t}\n+\t\t\tif (status & (RX_BD_LG | RX_BD_SH\n+\t\t\t\t\t\t| RX_BD_LAST)) {\n+\t\t\t\t/* Frame too long or too short. */\n+\t\t\t\tENET_PMD_ERR(\"rx_length_error\\n\");\n+\t\t\t\tif (status & RX_BD_LAST)\n+\t\t\t\t\tENET_PMD_ERR(\"rcv is not +last\\n\");\n+\t\t\t}\n+\t\t\tif (status & RX_BD_CR) {     /* CRC Error */\n+\t\t\t\tENET_PMD_ERR(\"rx_crc_errors\\n\");\n+\t\t\t}\n+\t\t\t/* Report late collisions as a frame error. */\n+\t\t\tif (status & (RX_BD_NO | RX_BD_TR))\n+\t\t\t\tENET_PMD_ERR(\"rx_frame_error\\n\");\n+\t\t\tgoto rx_processing_done;\n+\t\t}\n+\n+\t\t/* Process the incoming frame. */\n+\t\tstats->ipackets++;\n+\t\tpkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen));\n+\t\tstats->ibytes += pkt_len;\n+\n+\t\t/* shows data with respect to the data_off field. */\n+\t\tindex = enet_get_bd_index(bdp, &rxq->bd);\n+\t\tmbuf = rxq->rx_mbuf[index];\n+\n+\t\tdata = rte_pktmbuf_mtod(mbuf, uint8_t *);\n+\t\tmbuf_data = data;\n+\t\trte_prefetch0(data);\n+\t\trte_pktmbuf_append((struct rte_mbuf *)mbuf,\n+\t\t\t\tpkt_len - 4);\n+\n+\t\tif (rxq->fep->quirks & QUIRK_RACC)\n+\t\t\tdata = rte_pktmbuf_adj(mbuf, 2);\n+\n+\t\trx_pkts[pkt_received] = mbuf;\n+\t\tpkt_received++;\n+\n+\t\t/* Extract the enhanced buffer descriptor */\n+\t\tebdp = NULL;\n+\t\tif (rxq->fep->bufdesc_ex)\n+\t\t\tebdp = (struct bufdesc_ex *)bdp;\n+\n+\t\t/* If this is a VLAN packet remove the VLAN Tag */\n+\t\tvlan_packet_rcvd = false;\n+\t\tif ((rx_offloads & DEV_RX_OFFLOAD_VLAN) &&\n+\t\t\t\trxq->fep->bufdesc_ex &&\n+\t\t\t\t(rte_read32(&ebdp->bd_esc) &\n+\t\t\t\trte_cpu_to_le_32(BD_ENET_RX_VLAN))) {\n+\t\t\t/* Push and remove the vlan tag */\n+\t\t\tstruct rte_vlan_hdr *vlan_header =\n+\t\t\t\t(struct rte_vlan_hdr *)(data + ETH_HLEN);\n+\t\t\tvlan_tag = rte_be_to_cpu_16(vlan_header->vlan_tci);\n+\n+\t\t\tvlan_packet_rcvd = true;\n+\t\t\tmemmove(mbuf_data + VLAN_HLEN, data, ETH_ALEN * 2);\n+\t\t\trte_pktmbuf_adj(mbuf, VLAN_HLEN);\n+\t\t}\n+\n+\t\t/* Get receive timestamp from the mbuf */\n+\t\tif (rxq->fep->hw_ts_rx_en && rxq->fep->bufdesc_ex)\n+\t\t\tmbuf->timestamp =\n+\t\t\t\trte_le_to_cpu_32(rte_read32(&ebdp->ts));\n+\n+\t\tif (rxq->fep->bufdesc_ex &&\n+\t\t\t\t(rxq->fep->flag_csum & RX_FLAG_CSUM_EN)) {\n+\t\t\tif (!(rte_read32(&ebdp->bd_esc) &\n+\t\t\t\t\trte_cpu_to_le_32(RX_FLAG_CSUM_ERR))) {\n+\t\t\t\t/* don't check it */\n+\t\t\t\tmbuf->ol_flags = PKT_RX_IP_CKSUM_BAD;\n+\t\t\t} else {\n+\t\t\t\tmbuf->ol_flags = PKT_RX_IP_CKSUM_GOOD;\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* Handle received VLAN packets */\n+\t\tif (vlan_packet_rcvd) {\n+\t\t\tmbuf->vlan_tci = vlan_tag;\n+\t\t\tmbuf->ol_flags |= PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;\n+\t\t}\n+\n+\t\trxq->rx_mbuf[index] = new_mbuf;\n+\t\trte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),\n+\t\t\t\t&bdp->bd_bufaddr);\n+rx_processing_done:\n+\t\t/* when rx_processing_done clear the status flags\n+\t\t * for this buffer\n+\t\t */\n+\t\tstatus &= ~RX_BD_STATS;\n+\n+\t\t/* Mark the buffer empty */\n+\t\tstatus |= RX_BD_EMPTY;\n+\n+\t\tif (rxq->fep->bufdesc_ex) {\n+\t\t\tstruct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;\n+\t\t\trte_write32(rte_cpu_to_le_32(RX_BD_INT),\n+\t\t\t\t    &ebdp->bd_esc);\n+\t\t\trte_write32(0, &ebdp->bd_prot);\n+\t\t\trte_write32(0, &ebdp->bd_bdu);\n+\t\t}\n+\n+\t\t/* Make sure the updates to rest of the descriptor are\n+\t\t * performed before transferring ownership.\n+\t\t */\n+\t\trte_wmb();\n+\t\trte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);\n+\n+\t\t/* Update BD pointer to next entry */\n+\t\tbdp = enet_get_nextdesc(bdp, &rxq->bd);\n+\n+\t\t/* Doing this here will keep the FEC running while we process\n+\t\t * incoming frames.\n+\t\t */\n+\t\trte_write32(0, rxq->bd.active_reg_desc);\n+\t\tstatus = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));\n+\t}\n+\trxq->bd.cur = bdp;\n+\treturn pkt_received;\n+}\n+\n+uint16_t\n+enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct enetfec_priv_tx_q *txq  =\n+\t\t\t(struct enetfec_priv_tx_q *)tx_queue;\n+\tstruct rte_eth_stats *stats = &txq->fep->stats;\n+\tstruct bufdesc *bdp, *last_bdp;\n+\tstruct rte_mbuf *mbuf;\n+\tunsigned short status;\n+\tunsigned short buflen;\n+\tunsigned int index, estatus = 0;\n+\tunsigned int i, pkt_transmitted = 0;\n+\tu8 *data;\n+\tint tx_st = 1;\n+\n+\twhile (tx_st) {\n+\t\tif (pkt_transmitted >= nb_pkts) {\n+\t\t\ttx_st = 0;\n+\t\t\tbreak;\n+\t\t}\n+\t\tbdp = txq->bd.cur;\n+\t\t/* First clean the ring */\n+\t\tindex = enet_get_bd_index(bdp, &txq->bd);\n+\t\tstatus = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));\n+\n+\t\tif (status & TX_BD_READY) {\n+\t\t\tstats->oerrors++;\n+\t\t\tbreak;\n+\t\t}\n+\t\tif (txq->tx_mbuf[index]) {\n+\t\t\trte_pktmbuf_free(txq->tx_mbuf[index]);\n+\t\t\ttxq->tx_mbuf[index] = NULL;\n+\t\t}\n+\n+\t\tmbuf = *(tx_pkts);\n+\t\ttx_pkts++;\n+\n+\t\t/* Fill in a Tx ring entry */\n+\t\tlast_bdp = bdp;\n+\t\tstatus &= ~TX_BD_STATS;\n+\n+\t\t/* Set buffer length and buffer pointer */\n+\t\tbuflen = rte_pktmbuf_pkt_len(mbuf);\n+\t\tstats->opackets++;\n+\t\tstats->obytes += buflen;\n+\n+\t\tif (mbuf->nb_segs > 1) {\n+\t\t\tENET_PMD_DEBUG(\"SG not supported\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\tstatus |= (TX_BD_LAST);\n+\t\tdata = rte_pktmbuf_mtod(mbuf, void *);\n+\t\tfor (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE)\n+\t\t\tdcbf(data + i);\n+\n+\t\trte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),\n+\t\t\t    &bdp->bd_bufaddr);\n+\t\trte_write16(rte_cpu_to_le_16(buflen), &bdp->bd_datlen);\n+\n+\t\tif (txq->fep->bufdesc_ex) {\n+\t\t\tstruct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;\n+\n+\t\t\tif (mbuf->ol_flags == PKT_RX_IP_CKSUM_GOOD)\n+\t\t\t\testatus |= TX_BD_PINS | TX_BD_IINS;\n+\n+\t\t\trte_write32(0, &ebdp->bd_bdu);\n+\t\t\trte_write32(rte_cpu_to_le_32(estatus),\n+\t\t\t\t    &ebdp->bd_esc);\n+\t\t}\n+\n+\t\tindex = enet_get_bd_index(last_bdp, &txq->bd);\n+\t\t/* Save mbuf pointer */\n+\t\ttxq->tx_mbuf[index] = mbuf;\n+\n+\t\t/* Make sure the updates to rest of the descriptor are performed\n+\t\t * before transferring ownership.\n+\t\t */\n+\t\tstatus |= (TX_BD_READY | TX_BD_TC);\n+\t\trte_wmb();\n+\t\trte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);\n+\n+\t\t/* Trigger transmission start */\n+\t\trte_write32(0, txq->bd.active_reg_desc);\n+\t\tpkt_transmitted++;\n+\n+\t\t/* If this was the last BD in the ring, start at the\n+\t\t * beginning again.\n+\t\t */\n+\t\tbdp = enet_get_nextdesc(last_bdp, &txq->bd);\n+\n+\t\t/* Make sure the update to bdp and tx_skbuff are performed\n+\t\t * before txq->bd.cur.\n+\t\t */\n+\t\ttxq->bd.cur = bdp;\n+\t}\n+\treturn nb_pkts;\n+}\n",
    "prefixes": [
        "4/4"
    ]
}