get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/44997/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 44997,
    "url": "http://patches.dpdk.org/api/patches/44997/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1537434339-22570-3-git-send-email-amo@semihalf.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1537434339-22570-3-git-send-email-amo@semihalf.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1537434339-22570-3-git-send-email-amo@semihalf.com",
    "date": "2018-09-20T09:05:33",
    "name": "[v5,2/8] net/mvneta: add Rx/Tx support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "344f519cfd40466c1b66aab3b4b58f65521c3ca4",
    "submitter": {
        "id": 1112,
        "url": "http://patches.dpdk.org/api/people/1112/?format=api",
        "name": "Andrzej Ostruszka",
        "email": "amo@semihalf.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1537434339-22570-3-git-send-email-amo@semihalf.com/mbox/",
    "series": [
        {
            "id": 1410,
            "url": "http://patches.dpdk.org/api/series/1410/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=1410",
            "date": "2018-09-20T09:05:31",
            "name": "Add Marvell NETA PMD",
            "version": 5,
            "mbox": "http://patches.dpdk.org/series/1410/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/44997/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/44997/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 157D66833;\n\tThu, 20 Sep 2018 11:05:49 +0200 (CEST)",
            "from mail-lj1-f196.google.com (mail-lj1-f196.google.com\n\t[209.85.208.196]) by dpdk.org (Postfix) with ESMTP id 599F85F2E\n\tfor <dev@dpdk.org>; Thu, 20 Sep 2018 11:05:45 +0200 (CEST)",
            "by mail-lj1-f196.google.com with SMTP id l15-v6so7677306lji.6\n\tfor <dev@dpdk.org>; Thu, 20 Sep 2018 02:05:45 -0700 (PDT)",
            "from amok.semihalf.local (31-172-191-173.noc.fibertech.net.pl.\n\t[31.172.191.173]) by smtp.googlemail.com with ESMTPSA id\n\tg16-v6sm4338106lje.1.2018.09.20.02.05.43\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128);\n\tThu, 20 Sep 2018 02:05:43 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=semihalf-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=txTUYtBXRhJEXIKPOW8Jrq2y7ZwvV76FumJzTNSKzwI=;\n\tb=djPU7PqDXlZ8E0CyDbfXoW7Zt3c9dbYW5s58Ypc9nkB7j0ZVyDCdcOfaX0lDR0czQ7\n\tY6MWNsSmL8ARQb9nQi2jWQPTARVxWkwhOhyqyzMPiGPsgMOKBRXFizDtZfvpjMJJKLbz\n\tc1g4c1g0kXq78nLTpxhHalYvWh/VRAafswY2p1E5GckVYzY/KuTdi5c23duwYe5yl0ih\n\tR6+X9MW+j+0/JVhz3QPSlEON9cfyQ8rI+rVmdeQV7/DrvapMn4UrLxW1dC4unLoyEFoD\n\tt1FVqAcFCL1FXG49EU0gVqpBdCQNo/qbES5lVtO1BRNinKDRX5pwxwH1ENKy+uLMOS8S\n\tFm1A==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=txTUYtBXRhJEXIKPOW8Jrq2y7ZwvV76FumJzTNSKzwI=;\n\tb=Z5cHF5G06VF0iJNDRSFsp0m5HPOMlgg7kGXqUH/gPfDDdvLQsXqujYSmob8a5AX4wn\n\to8G7nTS1QFrU3CvXWKX4G7yZVB/lzNXew6yDzTN6avWnIFcQGynYiJArnGBTXHcVdZiC\n\t24K4+K02tdG9LfMkbAC5OtonjAcPs/22WhH0wYd3TQcQoIjX4xayxx37HusjCAO2z4XV\n\tz1/MmpSx67JSiswzU/kwwHE6JcI7UCW88DK7hF7i+y4v9Xw7eJ31rA6LnGuRpR1m0LR2\n\tqGw78CtDNwC2kopPvJOHIbOEZh0woqs1WUGB77BEJlVnRTPj4Wzi6z9QKF7k0sVpbWwV\n\t4r6A==",
        "X-Gm-Message-State": "APzg51AeKTjqCifob/SuRsCx0J/wcGHPqy7CJZRBy0kFzQyg+EKaTBcS\n\t3gRCDVwGuAodM2JLsSUfRGszVviXm4Q=",
        "X-Google-Smtp-Source": "ANB0VdamQos1B5XvcitufMUQAUAeQD2eBGoLIxWKD1M3XXqblGBZjNUr18cfF1i/mh1b+rAiGVXqRw==",
        "X-Received": "by 2002:a2e:9d45:: with SMTP id\n\ty5-v6mr26431846ljj.136.1537434344306; \n\tThu, 20 Sep 2018 02:05:44 -0700 (PDT)",
        "From": "Andrzej Ostruszka <amo@semihalf.com>",
        "To": "dev@dpdk.org",
        "Cc": "mw@semihalf.com, zr@semihalf.com, tdu@semihalf.com, nadavh@marvell.com",
        "Date": "Thu, 20 Sep 2018 11:05:33 +0200",
        "Message-Id": "<1537434339-22570-3-git-send-email-amo@semihalf.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1537434339-22570-1-git-send-email-amo@semihalf.com>",
        "References": "<1537369294-17099-1-git-send-email-amo@semihalf.com>\n\t<1537434339-22570-1-git-send-email-amo@semihalf.com>",
        "Subject": "[dpdk-dev] [PATCH v5 2/8] net/mvneta: add Rx/Tx support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Zyta Szpak <zr@semihalf.com>\n\nAdd part of PMD for actual reception/transmission.\n\nSigned-off-by: Yelena Krivosheev <yelena@marvell.com>\nSigned-off-by: Dmitri Epshtein <dima@marvell.com>\nSigned-off-by: Zyta Szpak <zr@semihalf.com>\n---\n doc/guides/nics/features/mvneta.ini |   3 +\n doc/guides/nics/mvneta.rst          |   4 +\n drivers/net/mvneta/Makefile         |   2 +-\n drivers/net/mvneta/meson.build      |   3 +-\n drivers/net/mvneta/mvneta_ethdev.c  |  51 ++-\n drivers/net/mvneta/mvneta_ethdev.h  |   4 +\n drivers/net/mvneta/mvneta_rxtx.c    | 850 ++++++++++++++++++++++++++++++++++++\n drivers/net/mvneta/mvneta_rxtx.h    | 168 +++++++\n 8 files changed, 1080 insertions(+), 5 deletions(-)\n create mode 100644 drivers/net/mvneta/mvneta_rxtx.c\n create mode 100644 drivers/net/mvneta/mvneta_rxtx.h",
    "diff": "diff --git a/doc/guides/nics/features/mvneta.ini b/doc/guides/nics/features/mvneta.ini\nindex ba6fe4b..0a89e2f 100644\n--- a/doc/guides/nics/features/mvneta.ini\n+++ b/doc/guides/nics/features/mvneta.ini\n@@ -7,5 +7,8 @@\n Speed capabilities   = Y\n Jumbo frame          = Y\n CRC offload          = Y\n+L3 checksum offload  = Y\n+L4 checksum offload  = Y\n+Packet type parsing  = Y\n ARMv8                = Y\n Usage doc            = Y\ndiff --git a/doc/guides/nics/mvneta.rst b/doc/guides/nics/mvneta.rst\nindex bf08417..9d25c40 100644\n--- a/doc/guides/nics/mvneta.rst\n+++ b/doc/guides/nics/mvneta.rst\n@@ -27,9 +27,13 @@ Features of the MVNETA PMD are:\n \n - Start/stop\n - tx/rx_queue_setup\n+- tx/rx_burst\n - Speed capabilities\n - Jumbo frame\n - CRC offload\n+- L3 checksum offload\n+- L4 checksum offload\n+- Packet type parsing\n \n \n Limitations\ndiff --git a/drivers/net/mvneta/Makefile b/drivers/net/mvneta/Makefile\nindex 149992e..349f550 100644\n--- a/drivers/net/mvneta/Makefile\n+++ b/drivers/net/mvneta/Makefile\n@@ -37,6 +37,6 @@ LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile\n LDLIBS += -lrte_bus_vdev\n \n # library source files\n-SRCS-$(CONFIG_RTE_LIBRTE_MVNETA_PMD) += mvneta_ethdev.c\n+SRCS-$(CONFIG_RTE_LIBRTE_MVNETA_PMD) += mvneta_ethdev.c mvneta_rxtx.c\n \n include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/net/mvneta/meson.build b/drivers/net/mvneta/meson.build\nindex 2f31954..c0b1bce 100644\n--- a/drivers/net/mvneta/meson.build\n+++ b/drivers/net/mvneta/meson.build\n@@ -21,7 +21,8 @@ else\n endif\n \n sources = files(\n-\t'mvneta_ethdev.c'\n+\t'mvneta_ethdev.c',\n+\t'mvneta_rxtx.c'\n )\n \n deps += ['cfgfile', 'common_mvep']\ndiff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c\nindex 9ee197a..331cd1d 100644\n--- a/drivers/net/mvneta/mvneta_ethdev.c\n+++ b/drivers/net/mvneta/mvneta_ethdev.c\n@@ -6,8 +6,6 @@\n \n #include <rte_ethdev_driver.h>\n #include <rte_kvargs.h>\n-#include <rte_log.h>\n-#include <rte_malloc.h>\n #include <rte_bus_vdev.h>\n \n #include <stdio.h>\n@@ -23,7 +21,7 @@\n \n #include <rte_mvep_common.h>\n \n-#include \"mvneta_ethdev.h\"\n+#include \"mvneta_rxtx.h\"\n \n \n #define MVNETA_IFACE_NAME_ARG \"iface\"\n@@ -308,6 +306,18 @@ mvneta_dev_start(struct rte_eth_dev *dev)\n \t\tpriv->uc_mc_flushed = 1;\n \t}\n \n+\t/* Allocate buffers */\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tstruct mvneta_rxq *rxq = dev->data->rx_queues[i];\n+\t\tint num = rxq->size;\n+\n+\t\tret = mvneta_buffs_alloc(priv, rxq, &num);\n+\t\tif (ret || num != rxq->size) {\n+\t\t\trte_free(rxq);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n \tret = mvneta_dev_set_link_up(dev);\n \tif (ret) {\n \t\tMVNETA_LOG(ERR, \"Failed to set link up\");\n@@ -318,6 +328,8 @@ mvneta_dev_start(struct rte_eth_dev *dev)\n \tfor (i = 0; i < dev->data->nb_tx_queues; i++)\n \t\tdev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;\n \n+\tmvneta_set_tx_function(dev);\n+\n \treturn 0;\n \n out:\n@@ -336,11 +348,25 @@ static void\n mvneta_dev_stop(struct rte_eth_dev *dev)\n {\n \tstruct mvneta_priv *priv = dev->data->dev_private;\n+\tint i;\n \n \tif (!priv->ppio)\n \t\treturn;\n \n \tmvneta_dev_set_link_down(dev);\n+\tMVNETA_LOG(INFO, \"Flushing rx queues\");\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tstruct mvneta_rxq *rxq = dev->data->rx_queues[i];\n+\n+\t\tmvneta_rx_queue_flush(rxq);\n+\t}\n+\n+\tMVNETA_LOG(INFO, \"Flushing tx queues\");\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\tstruct mvneta_txq *txq = dev->data->tx_queues[i];\n+\n+\t\tmvneta_tx_queue_flush(txq);\n+\t}\n \n \tneta_ppio_deinit(priv->ppio);\n \n@@ -357,9 +383,20 @@ static void\n mvneta_dev_close(struct rte_eth_dev *dev)\n {\n \tstruct mvneta_priv *priv = dev->data->dev_private;\n+\tint i;\n \n \tif (priv->ppio)\n \t\tmvneta_dev_stop(dev);\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tmvneta_rx_queue_release(dev->data->rx_queues[i]);\n+\t\tdev->data->rx_queues[i] = NULL;\n+\t}\n+\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\tmvneta_tx_queue_release(dev->data->tx_queues[i]);\n+\t\tdev->data->tx_queues[i] = NULL;\n+\t}\n }\n \n /**\n@@ -398,6 +435,12 @@ static const struct eth_dev_ops mvneta_ops = {\n \t.mac_addr_set = mvneta_mac_addr_set,\n \t.dev_infos_get = mvneta_dev_infos_get,\n \t.dev_supported_ptypes_get = mvneta_dev_supported_ptypes_get,\n+\t.rxq_info_get = mvneta_rxq_info_get,\n+\t.txq_info_get = mvneta_txq_info_get,\n+\t.rx_queue_setup = mvneta_rx_queue_setup,\n+\t.rx_queue_release = mvneta_rx_queue_release,\n+\t.tx_queue_setup = mvneta_tx_queue_setup,\n+\t.tx_queue_release = mvneta_tx_queue_release,\n };\n \n /**\n@@ -448,6 +491,8 @@ mvneta_eth_dev_create(struct rte_vdev_device *vdev, const char *name)\n \teth_dev->data->kdrv = RTE_KDRV_NONE;\n \teth_dev->data->dev_private = priv;\n \teth_dev->device = &vdev->device;\n+\teth_dev->rx_pkt_burst = mvneta_rx_pkt_burst;\n+\tmvneta_set_tx_function(eth_dev);\n \teth_dev->dev_ops = &mvneta_ops;\n \n \treturn 0;\ndiff --git a/drivers/net/mvneta/mvneta_ethdev.h b/drivers/net/mvneta/mvneta_ethdev.h\nindex 8b8d726..1a78a41 100644\n--- a/drivers/net/mvneta/mvneta_ethdev.h\n+++ b/drivers/net/mvneta/mvneta_ethdev.h\n@@ -7,6 +7,10 @@\n #ifndef _MVNETA_ETHDEV_H_\n #define _MVNETA_ETHDEV_H_\n \n+#include <rte_ethdev.h>\n+#include <rte_malloc.h>\n+#include <rte_log.h>\n+\n /*\n  * container_of is defined by both DPDK and MUSDK,\n  * we'll declare only one version.\ndiff --git a/drivers/net/mvneta/mvneta_rxtx.c b/drivers/net/mvneta/mvneta_rxtx.c\nnew file mode 100644\nindex 0000000..d5ea5a8\n--- /dev/null\n+++ b/drivers/net/mvneta/mvneta_rxtx.c\n@@ -0,0 +1,850 @@\n+#include \"mvneta_rxtx.h\"\n+\n+uint64_t cookie_addr_high = MVNETA_COOKIE_ADDR_INVALID;\n+uint16_t rx_desc_free_thresh = MRVL_NETA_BUF_RELEASE_BURST_SIZE_MIN;\n+\n+static inline void\n+mvneta_fill_shadowq(struct mvneta_shadow_txq *sq, struct rte_mbuf *buf)\n+{\n+\tsq->ent[sq->head].cookie = (uint64_t)buf;\n+\tsq->ent[sq->head].addr = buf ?\n+\t\trte_mbuf_data_iova_default(buf) : 0;\n+\n+\tsq->head = (sq->head + 1) & MRVL_NETA_TX_SHADOWQ_MASK;\n+\tsq->size++;\n+}\n+\n+static inline void\n+mvneta_fill_desc(struct neta_ppio_desc *desc, struct rte_mbuf *buf)\n+{\n+\tneta_ppio_outq_desc_reset(desc);\n+\tneta_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf));\n+\tneta_ppio_outq_desc_set_pkt_offset(desc, 0);\n+\tneta_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf));\n+}\n+\n+/**\n+ * Release already sent buffers to mempool.\n+ *\n+ * @param ppio\n+ *   Pointer to the port structure.\n+ * @param sq\n+ *   Pointer to the shadow queue.\n+ * @param qid\n+ *   Queue id number.\n+ * @param force\n+ *   Force releasing packets.\n+ */\n+static inline void\n+mvneta_sent_buffers_free(struct neta_ppio *ppio,\n+\t\t\t struct mvneta_shadow_txq *sq, int qid)\n+{\n+\tstruct neta_buff_inf *entry;\n+\tuint16_t nb_done = 0;\n+\tint i;\n+\tint tail = sq->tail;\n+\n+\tneta_ppio_get_num_outq_done(ppio, qid, &nb_done);\n+\n+\tif (nb_done > sq->size) {\n+\t\tMVNETA_LOG(ERR, \"nb_done: %d, sq->size %d\",\n+\t\t\t   nb_done, sq->size);\n+\t\treturn;\n+\t}\n+\n+\tfor (i = 0; i < nb_done; i++) {\n+\t\tentry = &sq->ent[tail];\n+\n+\t\tif (unlikely(!entry->addr)) {\n+\t\t\tMVNETA_LOG(DEBUG,\n+\t\t\t\t\"Shadow memory @%d: cookie(%lx), pa(%lx)!\",\n+\t\t\t\ttail, (u64)entry->cookie,\n+\t\t\t\t(u64)entry->addr);\n+\t\t\ttail = (tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tstruct rte_mbuf *mbuf;\n+\n+\t\tmbuf = (struct rte_mbuf *)\n+\t\t\t   (cookie_addr_high | entry->cookie);\n+\t\trte_pktmbuf_free(mbuf);\n+\t\ttail = (tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;\n+\t}\n+\n+\tsq->tail = tail;\n+\tsq->size -= nb_done;\n+}\n+\n+/**\n+ * Return packet type information and l3/l4 offsets.\n+ *\n+ * @param desc\n+ *   Pointer to the received packet descriptor.\n+ * @param l3_offset\n+ *   l3 packet offset.\n+ * @param l4_offset\n+ *   l4 packet offset.\n+ *\n+ * @return\n+ *   Packet type information.\n+ */\n+static inline uint64_t\n+mvneta_desc_to_packet_type_and_offset(struct neta_ppio_desc *desc,\n+\t\t\t\t    uint8_t *l3_offset, uint8_t *l4_offset)\n+{\n+\tenum neta_inq_l3_type l3_type;\n+\tenum neta_inq_l4_type l4_type;\n+\tuint64_t packet_type;\n+\n+\tneta_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);\n+\tneta_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);\n+\n+\tpacket_type = RTE_PTYPE_L2_ETHER;\n+\n+\tif (NETA_RXD_GET_VLAN_INFO(desc))\n+\t\tpacket_type |= RTE_PTYPE_L2_ETHER_VLAN;\n+\n+\tswitch (l3_type) {\n+\tcase NETA_INQ_L3_TYPE_IPV4_BAD:\n+\tcase NETA_INQ_L3_TYPE_IPV4_OK:\n+\t\tpacket_type |= RTE_PTYPE_L3_IPV4;\n+\t\tbreak;\n+\tcase NETA_INQ_L3_TYPE_IPV6:\n+\t\tpacket_type |= RTE_PTYPE_L3_IPV6;\n+\t\tbreak;\n+\tdefault:\n+\t\tpacket_type |= RTE_PTYPE_UNKNOWN;\n+\t\tMVNETA_LOG(DEBUG, \"Failed to recognize l3 packet type\");\n+\t\tbreak;\n+\t}\n+\n+\tswitch (l4_type) {\n+\tcase NETA_INQ_L4_TYPE_TCP:\n+\t\tpacket_type |= RTE_PTYPE_L4_TCP;\n+\t\tbreak;\n+\tcase NETA_INQ_L4_TYPE_UDP:\n+\t\tpacket_type |= RTE_PTYPE_L4_UDP;\n+\t\tbreak;\n+\tdefault:\n+\t\tpacket_type |= RTE_PTYPE_UNKNOWN;\n+\t\tMVNETA_LOG(DEBUG, \"Failed to recognize l4 packet type\");\n+\t\tbreak;\n+\t}\n+\n+\treturn packet_type;\n+}\n+\n+/**\n+ * Prepare offload information.\n+ *\n+ * @param ol_flags\n+ *   Offload flags.\n+ * @param packet_type\n+ *   Packet type bitfield.\n+ * @param l3_type\n+ *   Pointer to the neta_ouq_l3_type structure.\n+ * @param l4_type\n+ *   Pointer to the neta_outq_l4_type structure.\n+ * @param gen_l3_cksum\n+ *   Will be set to 1 in case l3 checksum is computed.\n+ * @param l4_cksum\n+ *   Will be set to 1 in case l4 checksum is computed.\n+ *\n+ * @return\n+ *   0 on success, negative error value otherwise.\n+ */\n+static inline int\n+mvneta_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,\n+\t\t\tenum neta_outq_l3_type *l3_type,\n+\t\t\tenum neta_outq_l4_type *l4_type,\n+\t\t\tint *gen_l3_cksum,\n+\t\t\tint *gen_l4_cksum)\n+{\n+\t/*\n+\t * Based on ol_flags prepare information\n+\t * for neta_ppio_outq_desc_set_proto_info() which setups descriptor\n+\t * for offloading.\n+\t */\n+\tif (ol_flags & PKT_TX_IPV4) {\n+\t\t*l3_type = NETA_OUTQ_L3_TYPE_IPV4;\n+\t\t*gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;\n+\t} else if (ol_flags & PKT_TX_IPV6) {\n+\t\t*l3_type = NETA_OUTQ_L3_TYPE_IPV6;\n+\t\t/* no checksum for ipv6 header */\n+\t\t*gen_l3_cksum = 0;\n+\t} else {\n+\t\t/* if something different then stop processing */\n+\t\treturn -1;\n+\t}\n+\n+\tol_flags &= PKT_TX_L4_MASK;\n+\tif ((packet_type & RTE_PTYPE_L4_TCP) &&\n+\t    ol_flags == PKT_TX_TCP_CKSUM) {\n+\t\t*l4_type = NETA_OUTQ_L4_TYPE_TCP;\n+\t\t*gen_l4_cksum = 1;\n+\t} else if ((packet_type & RTE_PTYPE_L4_UDP) &&\n+\t\t   ol_flags == PKT_TX_UDP_CKSUM) {\n+\t\t*l4_type = NETA_OUTQ_L4_TYPE_UDP;\n+\t\t*gen_l4_cksum = 1;\n+\t} else {\n+\t\t*l4_type = NETA_OUTQ_L4_TYPE_OTHER;\n+\t\t/* no checksum for other type */\n+\t\t*gen_l4_cksum = 0;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Get offload information from the received packet descriptor.\n+ *\n+ * @param desc\n+ *   Pointer to the received packet descriptor.\n+ *\n+ * @return\n+ *   Mbuf offload flags.\n+ */\n+static inline uint64_t\n+mvneta_desc_to_ol_flags(struct neta_ppio_desc *desc)\n+{\n+\tuint64_t flags;\n+\tenum neta_inq_desc_status status;\n+\n+\tstatus = neta_ppio_inq_desc_get_l3_pkt_error(desc);\n+\tif (unlikely(status != NETA_DESC_ERR_OK))\n+\t\tflags = PKT_RX_IP_CKSUM_BAD;\n+\telse\n+\t\tflags = PKT_RX_IP_CKSUM_GOOD;\n+\n+\tstatus = neta_ppio_inq_desc_get_l4_pkt_error(desc);\n+\tif (unlikely(status != NETA_DESC_ERR_OK))\n+\t\tflags |= PKT_RX_L4_CKSUM_BAD;\n+\telse\n+\t\tflags |= PKT_RX_L4_CKSUM_GOOD;\n+\n+\treturn flags;\n+}\n+\n+/**\n+ * DPDK callback for transmit.\n+ *\n+ * @param txq\n+ *   Generic pointer transmit queue.\n+ * @param tx_pkts\n+ *   Packets to transmit.\n+ * @param nb_pkts\n+ *   Number of packets in array.\n+ *\n+ * @return\n+ *   Number of packets successfully transmitted.\n+ */\n+static uint16_t\n+mvneta_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct mvneta_txq *q = txq;\n+\tstruct mvneta_shadow_txq *sq;\n+\tstruct neta_ppio_desc descs[nb_pkts];\n+\n+\tint i, ret, bytes_sent = 0;\n+\tuint16_t num, sq_free_size;\n+\tuint64_t addr;\n+\n+\tsq = &q->shadow_txq;\n+\tif (unlikely(!nb_pkts || !q->priv->ppio))\n+\t\treturn 0;\n+\n+\tif (sq->size)\n+\t\tmvneta_sent_buffers_free(q->priv->ppio,\n+\t\t\t\t\t sq, q->queue_id);\n+\n+\tsq_free_size = MRVL_NETA_TX_SHADOWQ_SIZE - sq->size - 1;\n+\tif (unlikely(nb_pkts > sq_free_size)) {\n+\t\tMVNETA_LOG(DEBUG,\n+\t\t\t\"No room in shadow queue for %d packets! %d packets will be sent.\",\n+\t\t\tnb_pkts, sq_free_size);\n+\t\tnb_pkts = sq_free_size;\n+\t}\n+\n+\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tstruct rte_mbuf *mbuf = tx_pkts[i];\n+\t\tint gen_l3_cksum, gen_l4_cksum;\n+\t\tenum neta_outq_l3_type l3_type;\n+\t\tenum neta_outq_l4_type l4_type;\n+\n+\t\t/* Fill first mbuf info in shadow queue */\n+\t\tmvneta_fill_shadowq(sq, mbuf);\n+\t\tmvneta_fill_desc(&descs[i], mbuf);\n+\n+\t\tbytes_sent += rte_pktmbuf_pkt_len(mbuf);\n+\n+\t\tret = mvneta_prepare_proto_info(mbuf->ol_flags,\n+\t\t\t\t\t\tmbuf->packet_type,\n+\t\t\t\t\t\t&l3_type, &l4_type,\n+\t\t\t\t\t\t&gen_l3_cksum,\n+\t\t\t\t\t\t&gen_l4_cksum);\n+\t\tif (unlikely(ret))\n+\t\t\tcontinue;\n+\n+\t\tneta_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,\n+\t\t\t\t\t\t   mbuf->l2_len,\n+\t\t\t\t\t\t   mbuf->l2_len + mbuf->l3_len,\n+\t\t\t\t\t\t   gen_l3_cksum, gen_l4_cksum);\n+\t}\n+\tnum = nb_pkts;\n+\tneta_ppio_send(q->priv->ppio, q->queue_id, descs, &nb_pkts);\n+\n+\n+\t/* number of packets that were not sent */\n+\tif (unlikely(num > nb_pkts)) {\n+\t\tfor (i = nb_pkts; i < num; i++) {\n+\t\t\tsq->head = (MRVL_NETA_TX_SHADOWQ_SIZE + sq->head - 1) &\n+\t\t\t\tMRVL_NETA_TX_SHADOWQ_MASK;\n+\t\t\taddr = cookie_addr_high | sq->ent[sq->head].cookie;\n+\t\t\tbytes_sent -=\n+\t\t\t\trte_pktmbuf_pkt_len((struct rte_mbuf *)addr);\n+\t\t}\n+\t\tsq->size -= num - nb_pkts;\n+\t}\n+\n+\tq->bytes_sent += bytes_sent;\n+\n+\treturn nb_pkts;\n+}\n+\n+/** DPDK callback for S/G transmit.\n+ *\n+ * @param txq\n+ *   Generic pointer transmit queue.\n+ * @param tx_pkts\n+ *   Packets to transmit.\n+ * @param nb_pkts\n+ *   Number of packets in array.\n+ *\n+ * @return\n+ *   Number of packets successfully transmitted.\n+ */\n+static uint16_t\n+mvneta_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct mvneta_txq *q = txq;\n+\tstruct mvneta_shadow_txq *sq;\n+\tstruct neta_ppio_desc descs[nb_pkts * NETA_PPIO_DESC_NUM_FRAGS];\n+\tstruct neta_ppio_sg_pkts pkts;\n+\tuint8_t frags[nb_pkts];\n+\tint i, j, ret, bytes_sent = 0;\n+\tint tail, tail_first;\n+\tuint16_t num, sq_free_size;\n+\tuint16_t nb_segs, total_descs = 0;\n+\tuint64_t addr;\n+\n+\tsq = &q->shadow_txq;\n+\tpkts.frags = frags;\n+\tpkts.num = 0;\n+\n+\tif (unlikely(!q->priv->ppio))\n+\t\treturn 0;\n+\n+\tif (sq->size)\n+\t\tmvneta_sent_buffers_free(q->priv->ppio,\n+\t\t\t\t\t sq, q->queue_id);\n+\t/* Save shadow queue free size */\n+\tsq_free_size = MRVL_NETA_TX_SHADOWQ_SIZE - sq->size - 1;\n+\n+\ttail = 0;\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tstruct rte_mbuf *mbuf = tx_pkts[i];\n+\t\tstruct rte_mbuf *seg = NULL;\n+\t\tint gen_l3_cksum, gen_l4_cksum;\n+\t\tenum neta_outq_l3_type l3_type;\n+\t\tenum neta_outq_l4_type l4_type;\n+\n+\t\tnb_segs = mbuf->nb_segs;\n+\t\ttotal_descs += nb_segs;\n+\n+\t\t/*\n+\t\t * Check if total_descs does not exceed\n+\t\t * shadow queue free size\n+\t\t */\n+\t\tif (unlikely(total_descs > sq_free_size)) {\n+\t\t\ttotal_descs -= nb_segs;\n+\t\t\tMVNETA_LOG(DEBUG,\n+\t\t\t\t\"No room in shadow queue for %d packets! \"\n+\t\t\t\t\"%d packets will be sent.\",\n+\t\t\t\tnb_pkts, i);\n+\t\t\tbreak;\n+\t\t}\n+\n+\n+\t\t/* Check if nb_segs does not exceed the max nb of desc per\n+\t\t * fragmented packet\n+\t\t */\n+\t\tif (unlikely(nb_segs > NETA_PPIO_DESC_NUM_FRAGS)) {\n+\t\t\ttotal_descs -= nb_segs;\n+\t\t\tMVNETA_LOG(ERR,\n+\t\t\t\t\"Too many segments. Packet won't be sent.\");\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tpkts.frags[pkts.num] = nb_segs;\n+\t\tpkts.num++;\n+\t\ttail_first = tail;\n+\n+\t\tseg = mbuf;\n+\t\tfor (j = 0; j < nb_segs - 1; j++) {\n+\t\t\t/* For the subsequent segments, set shadow queue\n+\t\t\t * buffer to NULL\n+\t\t\t */\n+\t\t\tmvneta_fill_shadowq(sq, NULL);\n+\t\t\tmvneta_fill_desc(&descs[tail], seg);\n+\n+\t\t\ttail++;\n+\t\t\tseg = seg->next;\n+\t\t}\n+\t\t/* Put first mbuf info in last shadow queue entry */\n+\t\tmvneta_fill_shadowq(sq, mbuf);\n+\t\t/* Update descriptor with last segment */\n+\t\tmvneta_fill_desc(&descs[tail++], seg);\n+\n+\t\tbytes_sent += rte_pktmbuf_pkt_len(mbuf);\n+\n+\t\tret = mvneta_prepare_proto_info(mbuf->ol_flags,\n+\t\t\t\t\t\tmbuf->packet_type,\n+\t\t\t\t\t\t&l3_type, &l4_type,\n+\t\t\t\t\t\t&gen_l3_cksum,\n+\t\t\t\t\t\t&gen_l4_cksum);\n+\t\tif (unlikely(ret))\n+\t\t\tcontinue;\n+\n+\t\tneta_ppio_outq_desc_set_proto_info(&descs[tail_first],\n+\t\t\t\t\t\t   l3_type, l4_type,\n+\t\t\t\t\t\t   mbuf->l2_len,\n+\t\t\t\t\t\t   mbuf->l2_len + mbuf->l3_len,\n+\t\t\t\t\t\t   gen_l3_cksum, gen_l4_cksum);\n+\t}\n+\tnum = total_descs;\n+\tneta_ppio_send_sg(q->priv->ppio, q->queue_id, descs, &total_descs,\n+\t\t\t  &pkts);\n+\n+\t/* number of packets that were not sent */\n+\tif (unlikely(num > total_descs)) {\n+\t\tfor (i = total_descs; i < num; i++) {\n+\t\t\tsq->head = (MRVL_NETA_TX_SHADOWQ_SIZE +\n+\t\t\t\t\tsq->head - 1) &\n+\t\t\t\t\tMRVL_NETA_TX_SHADOWQ_MASK;\n+\t\t\taddr = sq->ent[sq->head].cookie;\n+\t\t\tif (addr) {\n+\t\t\t\tstruct rte_mbuf *mbuf;\n+\n+\t\t\t\tmbuf = (struct rte_mbuf *)\n+\t\t\t\t\t\t(cookie_addr_high | addr);\n+\t\t\t\tbytes_sent -= rte_pktmbuf_pkt_len(mbuf);\n+\t\t\t}\n+\t\t}\n+\t\tsq->size -= num - total_descs;\n+\t\tnb_pkts = pkts.num;\n+\t}\n+\n+\tq->bytes_sent += bytes_sent;\n+\n+\treturn nb_pkts;\n+}\n+\n+/**\n+ * Set tx burst function according to offload flag\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ */\n+void\n+mvneta_set_tx_function(struct rte_eth_dev *dev)\n+{\n+\tstruct mvneta_priv *priv = dev->data->dev_private;\n+\n+\t/* Use a simple Tx queue (no offloads, no multi segs) if possible */\n+\tif (priv->multiseg) {\n+\t\tMVNETA_LOG(INFO, \"Using multi-segment tx callback\");\n+\t\tdev->tx_pkt_burst = mvneta_tx_sg_pkt_burst;\n+\t} else {\n+\t\tMVNETA_LOG(INFO, \"Using single-segment tx callback\");\n+\t\tdev->tx_pkt_burst = mvneta_tx_pkt_burst;\n+\t}\n+}\n+\n+/**\n+ * DPDK callback for receive.\n+ *\n+ * @param rxq\n+ *   Generic pointer to the receive queue.\n+ * @param rx_pkts\n+ *   Array to store received packets.\n+ * @param nb_pkts\n+ *   Maximum number of packets in array.\n+ *\n+ * @return\n+ *   Number of packets successfully received.\n+ */\n+uint16_t\n+mvneta_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct mvneta_rxq *q = rxq;\n+\tstruct neta_ppio_desc descs[nb_pkts];\n+\tint i, ret, rx_done = 0, rx_dropped = 0;\n+\n+\tif (unlikely(!q || !q->priv->ppio))\n+\t\treturn 0;\n+\n+\tret = neta_ppio_recv(q->priv->ppio, q->queue_id,\n+\t\t\tdescs, &nb_pkts);\n+\n+\tif (unlikely(ret < 0)) {\n+\t\tMVNETA_LOG(ERR, \"Failed to receive packets\");\n+\t\treturn 0;\n+\t}\n+\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tstruct rte_mbuf *mbuf;\n+\t\tuint8_t l3_offset, l4_offset;\n+\t\tenum neta_inq_desc_status status;\n+\t\tuint64_t addr;\n+\n+\t\taddr = cookie_addr_high |\n+\t\t\tneta_ppio_inq_desc_get_cookie(&descs[i]);\n+\t\tmbuf = (struct rte_mbuf *)addr;\n+\n+\t\trte_pktmbuf_reset(mbuf);\n+\n+\t\t/* drop packet in case of mac, overrun or resource error */\n+\t\tstatus = neta_ppio_inq_desc_get_l2_pkt_error(&descs[i]);\n+\t\tif (unlikely(status != NETA_DESC_ERR_OK)) {\n+\t\t\t/* Release the mbuf to the mempool since\n+\t\t\t * it won't be transferred to tx path\n+\t\t\t */\n+\t\t\trte_pktmbuf_free(mbuf);\n+\t\t\tq->drop_mac++;\n+\t\t\trx_dropped++;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tmbuf->data_off += MVNETA_PKT_EFFEC_OFFS;\n+\t\tmbuf->pkt_len = neta_ppio_inq_desc_get_pkt_len(&descs[i]);\n+\t\tmbuf->data_len = mbuf->pkt_len;\n+\t\tmbuf->port = q->port_id;\n+\t\tmbuf->packet_type =\n+\t\t\tmvneta_desc_to_packet_type_and_offset(&descs[i],\n+\t\t\t\t\t\t\t\t&l3_offset,\n+\t\t\t\t\t\t\t\t&l4_offset);\n+\t\tmbuf->l2_len = l3_offset;\n+\t\tmbuf->l3_len = l4_offset - l3_offset;\n+\n+\t\tif (likely(q->cksum_enabled))\n+\t\t\tmbuf->ol_flags = mvneta_desc_to_ol_flags(&descs[i]);\n+\n+\t\trx_pkts[rx_done++] = mbuf;\n+\t\tq->bytes_recv += mbuf->pkt_len;\n+\t}\n+\tq->pkts_processed += rx_done + rx_dropped;\n+\n+\tif (q->pkts_processed > rx_desc_free_thresh) {\n+\t\tint buf_to_refill = rx_desc_free_thresh;\n+\n+\t\tret = mvneta_buffs_alloc(q->priv, q, &buf_to_refill);\n+\t\tif (ret)\n+\t\t\tMVNETA_LOG(ERR, \"Refill failed\");\n+\t\tq->pkts_processed -= buf_to_refill;\n+\t}\n+\n+\treturn rx_done;\n+}\n+\n+/**\n+ * DPDK callback to configure the receive queue.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param idx\n+ *   RX queue index.\n+ * @param desc\n+ *   Number of descriptors to configure in queue.\n+ * @param socket\n+ *   NUMA socket on which memory must be allocated.\n+ * @param conf\n+ *   Thresholds parameters (unused_).\n+ * @param mp\n+ *   Memory pool for buffer allocations.\n+ *\n+ * @return\n+ *   0 on success, negative error value otherwise.\n+ */\n+int\n+mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n+\t\t      unsigned int socket,\n+\t\t      const struct rte_eth_rxconf *conf __rte_unused,\n+\t\t      struct rte_mempool *mp)\n+{\n+\tstruct mvneta_priv *priv = dev->data->dev_private;\n+\tstruct mvneta_rxq *rxq;\n+\tuint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);\n+\tuint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;\n+\n+\tframe_size = buf_size - RTE_PKTMBUF_HEADROOM - MVNETA_PKT_EFFEC_OFFS;\n+\n+\tif (frame_size < max_rx_pkt_len) {\n+\t\tMVNETA_LOG(ERR,\n+\t\t\t\"Mbuf size must be increased to %u bytes to hold up \"\n+\t\t\t\"to %u bytes of data.\",\n+\t\t\tbuf_size + max_rx_pkt_len - frame_size,\n+\t\t\tmax_rx_pkt_len);\n+\t\tdev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;\n+\t\tMVNETA_LOG(INFO, \"Setting max rx pkt len to %u\",\n+\t\t\tdev->data->dev_conf.rxmode.max_rx_pkt_len);\n+\t}\n+\n+\tif (dev->data->rx_queues[idx]) {\n+\t\trte_free(dev->data->rx_queues[idx]);\n+\t\tdev->data->rx_queues[idx] = NULL;\n+\t}\n+\n+\trxq = rte_zmalloc_socket(\"rxq\", sizeof(*rxq), 0, socket);\n+\tif (!rxq)\n+\t\treturn -ENOMEM;\n+\n+\trxq->priv = priv;\n+\trxq->mp = mp;\n+\trxq->cksum_enabled = dev->data->dev_conf.rxmode.offloads &\n+\t\t\t     DEV_RX_OFFLOAD_IPV4_CKSUM;\n+\trxq->queue_id = idx;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->size = desc;\n+\trx_desc_free_thresh = RTE_MIN(rx_desc_free_thresh, (desc / 2));\n+\tpriv->ppio_params.inqs_params.tcs_params[MRVL_NETA_DEFAULT_TC].size =\n+\t\tdesc;\n+\n+\tdev->data->rx_queues[idx] = rxq;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * DPDK callback to release the receive queue.\n+ *\n+ * @param rxq\n+ *   Generic receive queue pointer.\n+ */\n+void\n+mvneta_rx_queue_release(void *rxq)\n+{\n+\tstruct mvneta_rxq *q = rxq;\n+\n+\tif (!q)\n+\t\treturn;\n+\n+\t/* If dev_stop was called already, mbufs are already\n+\t * returned to mempool and ppio is deinitialized.\n+\t * Skip this step.\n+\t */\n+\n+\tif (q->priv->ppio)\n+\t\tmvneta_rx_queue_flush(q);\n+\n+\trte_free(rxq);\n+}\n+\n+/**\n+ * DPDK callback to configure the transmit queue.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param idx\n+ *   Transmit queue index.\n+ * @param desc\n+ *   Number of descriptors to configure in the queue.\n+ * @param socket\n+ *   NUMA socket on which memory must be allocated.\n+ * @param conf\n+ *   Tx queue configuration parameters.\n+ *\n+ * @return\n+ *   0 on success, negative error value otherwise.\n+ */\n+int\n+mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n+\t\t      unsigned int socket, const struct rte_eth_txconf *conf)\n+{\n+\tstruct mvneta_priv *priv = dev->data->dev_private;\n+\tstruct mvneta_txq *txq;\n+\n+\tif (dev->data->tx_queues[idx]) {\n+\t\trte_free(dev->data->tx_queues[idx]);\n+\t\tdev->data->tx_queues[idx] = NULL;\n+\t}\n+\n+\ttxq = rte_zmalloc_socket(\"txq\", sizeof(*txq), 0, socket);\n+\tif (!txq)\n+\t\treturn -ENOMEM;\n+\n+\ttxq->priv = priv;\n+\ttxq->queue_id = idx;\n+\ttxq->port_id = dev->data->port_id;\n+\ttxq->tx_deferred_start = conf->tx_deferred_start;\n+\tdev->data->tx_queues[idx] = txq;\n+\n+\tpriv->ppio_params.outqs_params.outqs_params[idx].size = desc;\n+\tpriv->ppio_params.outqs_params.outqs_params[idx].weight = 1;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * DPDK callback to release the transmit queue.\n+ *\n+ * @param txq\n+ *   Generic transmit queue pointer.\n+ */\n+void\n+mvneta_tx_queue_release(void *txq)\n+{\n+\tstruct mvneta_txq *q = txq;\n+\n+\tif (!q)\n+\t\treturn;\n+\n+\trte_free(q);\n+}\n+\n+/**\n+ * Return mbufs to mempool.\n+ *\n+ * @param rxq\n+ *    Pointer to rx queue structure\n+ * @param desc\n+ *    Array of rx descriptors\n+ */\n+static void\n+mvneta_recv_buffs_free(struct neta_ppio_desc *desc, uint16_t num)\n+{\n+\tuint64_t addr;\n+\tuint8_t i;\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\tif (desc) {\n+\t\t\taddr = cookie_addr_high |\n+\t\t\t\t\tneta_ppio_inq_desc_get_cookie(desc);\n+\t\t\tif (addr)\n+\t\t\t\trte_pktmbuf_free((struct rte_mbuf *)addr);\n+\t\t\tdesc++;\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * Flush single receive queue.\n+ *\n+ * @param rxq\n+ *   Pointer to rx queue structure.\n+ * @param descs\n+ *   Array of rx descriptors\n+ */\n+void\n+mvneta_rx_queue_flush(struct mvneta_rxq *rxq)\n+{\n+\tstruct neta_ppio_desc *descs;\n+\tstruct neta_buff_inf *bufs;\n+\tuint16_t num;\n+\tint ret, i;\n+\n+\tdescs = rte_malloc(\"rxdesc\", MRVL_NETA_RXD_MAX * sizeof(*descs), 0);\n+\tbufs = rte_malloc(\"buffs\", MRVL_NETA_RXD_MAX * sizeof(*bufs), 0);\n+\n+\tdo {\n+\t\tnum = MRVL_NETA_RXD_MAX;\n+\t\tret = neta_ppio_recv(rxq->priv->ppio,\n+\t\t\t\t     rxq->queue_id,\n+\t\t\t\t     descs, &num);\n+\t\tmvneta_recv_buffs_free(descs, num);\n+\t} while (ret == 0 && num);\n+\n+\trxq->pkts_processed = 0;\n+\n+\tnum = MRVL_NETA_RXD_MAX;\n+\n+\tneta_ppio_inq_get_all_buffs(rxq->priv->ppio, rxq->queue_id, bufs, &num);\n+\tMVNETA_LOG(INFO, \"freeing %u unused bufs.\", num);\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\tuint64_t addr;\n+\t\tif (bufs[i].cookie) {\n+\t\t\taddr = cookie_addr_high | bufs[i].cookie;\n+\t\t\trte_pktmbuf_free((struct rte_mbuf *)addr);\n+\t\t}\n+\t}\n+\n+\trte_free(descs);\n+\trte_free(bufs);\n+}\n+\n+/**\n+ * Flush single transmit queue.\n+ *\n+ * @param txq\n+ *     Pointer to tx queue structure\n+ */\n+void\n+mvneta_tx_queue_flush(struct mvneta_txq *txq)\n+{\n+\tstruct mvneta_shadow_txq *sq = &txq->shadow_txq;\n+\n+\tif (sq->size)\n+\t\tmvneta_sent_buffers_free(txq->priv->ppio, sq,\n+\t\t\t\t\t txq->queue_id);\n+\n+\t/* free the rest of them */\n+\twhile (sq->tail != sq->head) {\n+\t\tuint64_t addr = cookie_addr_high |\n+\t\t\tsq->ent[sq->tail].cookie;\n+\t\trte_pktmbuf_free((struct rte_mbuf *)addr);\n+\t\tsq->tail = (sq->tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;\n+\t}\n+\tmemset(sq, 0, sizeof(*sq));\n+}\n+\n+/**\n+ * DPDK callback to get information about specific receive queue.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param rx_queue_id\n+ *   Receive queue index.\n+ * @param qinfo\n+ *   Receive queue information structure.\n+ */\n+void\n+mvneta_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n+\t\t    struct rte_eth_rxq_info *qinfo)\n+{\n+\tstruct mvneta_rxq *q = dev->data->rx_queues[rx_queue_id];\n+\n+\tqinfo->mp = q->mp;\n+\tqinfo->nb_desc = q->size;\n+}\n+\n+/**\n+ * DPDK callback to get information about specific transmit queue.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param tx_queue_id\n+ *   Transmit queue index.\n+ * @param qinfo\n+ *   Transmit queue information structure.\n+ */\n+void\n+mvneta_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n+\t\t    struct rte_eth_txq_info *qinfo)\n+{\n+\tstruct mvneta_priv *priv = dev->data->dev_private;\n+\n+\tqinfo->nb_desc =\n+\t\tpriv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;\n+}\ndiff --git a/drivers/net/mvneta/mvneta_rxtx.h b/drivers/net/mvneta/mvneta_rxtx.h\nnew file mode 100644\nindex 0000000..7867c18\n--- /dev/null\n+++ b/drivers/net/mvneta/mvneta_rxtx.h\n@@ -0,0 +1,168 @@\n+#ifndef _MVNETA_RXTX_H_\n+#define _MVNETA_RXTX_H_\n+\n+#include \"mvneta_ethdev.h\"\n+\n+#define MVNETA_PKT_EFFEC_OFFS (MRVL_NETA_PKT_OFFS + MV_MH_SIZE)\n+\n+#define MRVL_NETA_DEFAULT_TC 0\n+\n+/** Maximum number of descriptors in shadow queue. Must be power of 2 */\n+#define MRVL_NETA_TX_SHADOWQ_SIZE MRVL_NETA_TXD_MAX\n+\n+/** Shadow queue size mask (since shadow queue size is power of 2) */\n+#define MRVL_NETA_TX_SHADOWQ_MASK (MRVL_NETA_TX_SHADOWQ_SIZE - 1)\n+\n+/** Minimum number of sent buffers to release from shadow queue to BM */\n+#define MRVL_NETA_BUF_RELEASE_BURST_SIZE_MIN\t16\n+\n+/** Maximum number of sent buffers to release from shadow queue to BM */\n+#define MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX\t64\n+\n+#define MVNETA_COOKIE_ADDR_INVALID ~0ULL\n+#define MVNETA_COOKIE_HIGH_ADDR_SHIFT\t(sizeof(neta_cookie_t) * 8)\n+#define MVNETA_COOKIE_HIGH_ADDR_MASK\t(~0ULL << MVNETA_COOKIE_HIGH_ADDR_SHIFT)\n+\n+#define MVNETA_SET_COOKIE_HIGH_ADDR(addr) {\t\t\t\t\\\n+\tif (unlikely(cookie_addr_high == MVNETA_COOKIE_ADDR_INVALID))\t\\\n+\t\tcookie_addr_high =\t\t\t\t\t\\\n+\t\t\t(uint64_t)(addr) & MVNETA_COOKIE_HIGH_ADDR_MASK;\\\n+}\n+\n+#define MVNETA_CHECK_COOKIE_HIGH_ADDR(addr)\t\t\t\\\n+\t((likely(cookie_addr_high ==\t\t\t\t\\\n+\t((uint64_t)(addr) & MVNETA_COOKIE_HIGH_ADDR_MASK))) ? 1 : 0)\n+\n+/*\n+ * To use buffer harvesting based on loopback port shadow queue structure\n+ * was introduced for buffers information bookkeeping.\n+ */\n+struct mvneta_shadow_txq {\n+\tint head;           /* write index - used when sending buffers */\n+\tint tail;           /* read index - used when releasing buffers */\n+\tu16 size;           /* queue occupied size */\n+\tstruct neta_buff_inf ent[MRVL_NETA_TX_SHADOWQ_SIZE]; /* q entries */\n+};\n+\n+struct mvneta_rxq {\n+\tstruct mvneta_priv *priv;\n+\tstruct rte_mempool *mp;\n+\tint queue_id;\n+\tint port_id;\n+\tint size;\n+\tint cksum_enabled;\n+\tuint64_t bytes_recv;\n+\tuint64_t drop_mac;\n+\tuint64_t pkts_processed;\n+};\n+\n+\n+struct mvneta_txq {\n+\tstruct mvneta_priv *priv;\n+\tint queue_id;\n+\tint port_id;\n+\tuint64_t bytes_sent;\n+\tstruct mvneta_shadow_txq shadow_txq;\n+\tint tx_deferred_start;\n+};\n+\n+extern uint64_t cookie_addr_high;\n+extern uint16_t rx_desc_free_thresh;\n+\n+static inline int\n+mvneta_buffs_refill(struct mvneta_priv *priv, struct mvneta_rxq *rxq, u16 *num)\n+{\n+\tstruct rte_mbuf *mbufs[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX];\n+\tstruct neta_buff_inf entries[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX];\n+\tint i, ret;\n+\tuint16_t nb_desc = *num;\n+\n+\tret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, nb_desc);\n+\tif (ret) {\n+\t\tMVNETA_LOG(ERR, \"Failed to allocate %u mbufs.\", nb_desc);\n+\t\t*num = 0;\n+\t\treturn -1;\n+\t}\n+\n+\tMVNETA_SET_COOKIE_HIGH_ADDR(mbufs[0]);\n+\n+\tfor (i = 0; i < nb_desc; i++) {\n+\t\tif (unlikely(!MVNETA_CHECK_COOKIE_HIGH_ADDR(mbufs[i]))) {\n+\t\t\tMVNETA_LOG(ERR,\n+\t\t\t\t\"mbuf virt high addr 0x%lx out of range 0x%lx\",\n+\t\t\t\t(uint64_t)mbufs[i] >> 32,\n+\t\t\t\tcookie_addr_high >> 32);\n+\t\t\t*num = 0;\n+\t\t\tgoto out;\n+\t\t}\n+\t\tentries[i].addr = rte_mbuf_data_iova_default(mbufs[i]);\n+\t\tentries[i].cookie = (neta_cookie_t)(uint64_t)mbufs[i];\n+\t}\n+\tneta_ppio_inq_put_buffs(priv->ppio, rxq->queue_id, entries, num);\n+\n+out:\n+\tfor (i = *num; i < nb_desc; i++)\n+\t\trte_pktmbuf_free(mbufs[i]);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Allocate buffers from mempool\n+ * and store addresses in rx descriptors.\n+ *\n+ * @return\n+ *   0 on success, negative error value otherwise.\n+ */\n+static inline int\n+mvneta_buffs_alloc(struct mvneta_priv *priv, struct mvneta_rxq *rxq, int *num)\n+{\n+\tuint16_t nb_desc, nb_desc_burst, sent = 0;\n+\tint ret = 0;\n+\n+\tnb_desc = *num;\n+\n+\tdo {\n+\t\tnb_desc_burst =\n+\t\t\t(nb_desc < MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX) ?\n+\t\t\tnb_desc : MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX;\n+\n+\t\tret = mvneta_buffs_refill(priv, rxq, &nb_desc_burst);\n+\t\tif (unlikely(ret || !nb_desc_burst))\n+\t\t\tbreak;\n+\n+\t\tsent += nb_desc_burst;\n+\t\tnb_desc -= nb_desc_burst;\n+\n+\t} while (nb_desc);\n+\n+\t*num = sent;\n+\n+\treturn ret;\n+}\n+\n+void mvneta_rx_queue_flush(struct mvneta_rxq *rxq);\n+void mvneta_tx_queue_flush(struct mvneta_txq *txq);\n+\n+void mvneta_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n+\t\t\t struct rte_eth_rxq_info *qinfo);\n+void mvneta_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n+\t\t\t struct rte_eth_txq_info *qinfo);\n+\n+void mvneta_set_tx_function(struct rte_eth_dev *dev);\n+uint16_t\n+mvneta_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);\n+\n+int\n+mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n+\t\t      unsigned int socket,\n+\t\t      const struct rte_eth_rxconf *conf __rte_unused,\n+\t\t      struct rte_mempool *mp);\n+int\n+mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n+\t\t      unsigned int socket, const struct rte_eth_txconf *conf);\n+\n+void mvneta_rx_queue_release(void *rxq);\n+void mvneta_tx_queue_release(void *txq);\n+\n+#endif /* _MVNETA_RXTX_H_ */\n",
    "prefixes": [
        "v5",
        "2/8"
    ]
}