get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/44102/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 44102,
    "url": "http://patches.dpdk.org/api/patches/44102/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1535718368-15803-6-git-send-email-amo@semihalf.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1535718368-15803-6-git-send-email-amo@semihalf.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1535718368-15803-6-git-send-email-amo@semihalf.com",
    "date": "2018-08-31T12:26:01",
    "name": "[v2,3/8] net/mvneta: add Rx/Tx support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "e546b9d47fe4aaedeefdc3d338a28a26f278844d",
    "submitter": {
        "id": 1112,
        "url": "http://patches.dpdk.org/api/people/1112/?format=api",
        "name": "Andrzej Ostruszka",
        "email": "amo@semihalf.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1535718368-15803-6-git-send-email-amo@semihalf.com/mbox/",
    "series": [
        {
            "id": 1131,
            "url": "http://patches.dpdk.org/api/series/1131/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=1131",
            "date": "2018-08-31T12:25:56",
            "name": "Add Marvell NETA PMD",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/1131/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/44102/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/44102/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id BFDA75B12;\n\tFri, 31 Aug 2018 14:26:22 +0200 (CEST)",
            "from mail-lj1-f196.google.com (mail-lj1-f196.google.com\n\t[209.85.208.196]) by dpdk.org (Postfix) with ESMTP id 40C5C559A\n\tfor <dev@dpdk.org>; Fri, 31 Aug 2018 14:26:17 +0200 (CEST)",
            "by mail-lj1-f196.google.com with SMTP id p10-v6so9948512ljg.2\n\tfor <dev@dpdk.org>; Fri, 31 Aug 2018 05:26:17 -0700 (PDT)",
            "from amok.semihalf.local (31-172-191-173.noc.fibertech.net.pl.\n\t[31.172.191.173]) by smtp.googlemail.com with ESMTPSA id\n\tt4-v6sm1825422lfd.13.2018.08.31.05.26.15\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128);\n\tFri, 31 Aug 2018 05:26:15 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=semihalf-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=0RblTTTEBlWMrbWXIG35nIhleMH/EKkR2JVa3AbcS8I=;\n\tb=lxozzkoJNpfmKBMP41VYVgXheq8NugcxSoco9y8406NCknAkfQR9tye0Q7y4fRepRW\n\tf5aLpUD5M7D6IazsPSfvZsQgg115lbA4vQUXiWdyAX5QAKvdDt7TVaZ4AsQQFluu8yON\n\tkm4gSswECRDFzkMVefcGfPzwiiInOwqwiGggG01KFhs3Si5hD/qxS1EzEBypojN3NQPD\n\tg5zjZdbPmPJ26ZF2Ud9Qx/lX9GhFlGy7D8NCmFGZpJY30MaUdHA3AcSLARVvu07bnyNl\n\tgogw9BOwcbOPiOvyERySIJ/9Qs1NxhTdWPWxE218zt/wh1cTmoSV3JKr/KdVVoKvNZCj\n\tYp3A==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=0RblTTTEBlWMrbWXIG35nIhleMH/EKkR2JVa3AbcS8I=;\n\tb=dOX3FoBZiegpQ1gx3nXZjUjTjvyTAFUTCg50xmF3Nw4dmhQ/NB9Rqy3CDrz/gGTSPc\n\tKJEqfq5dfUMaDISYT6busQ7ltGMiJkxaAGHsds7bXsPGADp0U3SjgASA18DoD7Y0mZXW\n\t4ccVBUT55wUKW5xEtsNl/pX1FcFYbOWcf8XS6d4hhl7LDskAc1WgWHJJQVWaapa2sg50\n\tRTQksAoAxwCZqRQQD7tBRJVSiVke+Q9a6BOZ61HFJql4/Xfn+8hmiUwC64A19erYsMVt\n\trxiSRYTgd977Fk+JZ4o7tZV/9MiUulEP+uQGb/1qJ82ecGfD4ygkfiG3+hx8Xf4DJgTe\n\tqtkA==",
        "X-Gm-Message-State": "APzg51AaHLvtWrHW18kZ8GGh8MVtp8QICK36ZEJAY8ZExhqy3TE/LUPi\n\tTQoTz+Go0xsVdvCWzryQWg7z2/M6JEw=",
        "X-Google-Smtp-Source": "ANB0VdZVhDD+Q8OKR83VJ0bYDS2OHE1mE7OPXAzmnomUTGVlqvyePKSptgRBwf6hVVP/IAVbV2s3QQ==",
        "X-Received": "by 2002:a2e:99c3:: with SMTP id\n\tl3-v6mr10925691ljj.48.1535718376370; \n\tFri, 31 Aug 2018 05:26:16 -0700 (PDT)",
        "From": "Andrzej Ostruszka <amo@semihalf.com>",
        "To": "dev@dpdk.org",
        "Cc": "mw@semihalf.com, zr@semihalf.com, tdu@semihalf.com, nsamsono@marvell.com",
        "Date": "Fri, 31 Aug 2018 14:26:01 +0200",
        "Message-Id": "<1535718368-15803-6-git-send-email-amo@semihalf.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1535718368-15803-1-git-send-email-amo@semihalf.com>",
        "References": "<1535469030-18647-1-git-send-email-amo@semihalf.com>\n\t<1535718368-15803-1-git-send-email-amo@semihalf.com>",
        "Subject": "[dpdk-dev] [PATCH v2 3/8] net/mvneta: add Rx/Tx support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Zyta Szpak <zr@semihalf.com>\n\nAdd part of PMD for actual reception/transmission.\n\nSigned-off-by: Yelena Krivosheev <yelena@marvell.com>\nSigned-off-by: Dmitri Epshtein <dima@marvell.com>\nSigned-off-by: Zyta Szpak <zr@semihalf.com>\n---\n drivers/net/mvneta/mvneta_ethdev.c | 791 +++++++++++++++++++++++++++++++++++++\n drivers/net/mvneta/mvneta_ethdev.h |  11 +\n 2 files changed, 802 insertions(+)",
    "diff": "diff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c\nindex 621f38a..968f920 100644\n--- a/drivers/net/mvneta/mvneta_ethdev.c\n+++ b/drivers/net/mvneta/mvneta_ethdev.c\n@@ -27,6 +27,11 @@\n \n #define MVNETA_IFACE_NAME_ARG \"iface\"\n \n+#define MVNETA_COOKIE_ADDR_INVALID ~0ULL\n+\n+#define MVNETA_COOKIE_HIGH_ADDR_SHIFT\t(sizeof(neta_cookie_t) * 8)\n+#define MVNETA_COOKIE_HIGH_ADDR_MASK\t(~0ULL << MVNETA_COOKIE_HIGH_ADDR_SHIFT)\n+\n #define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_JUMBO_FRAME | \\\n \t\t\t  DEV_RX_OFFLOAD_CRC_STRIP | \\\n \t\t\t  DEV_RX_OFFLOAD_CHECKSUM)\n@@ -46,6 +51,19 @@\n \n #define MVNETA_PKT_EFFEC_OFFS (MRVL_NETA_PKT_OFFS + MV_MH_SIZE)\n \n+static uint64_t cookie_addr_high = MVNETA_COOKIE_ADDR_INVALID;\n+static uint16_t rx_desc_free_thresh = MRVL_NETA_BUF_RELEASE_BURST_SIZE_MIN;\n+\n+#define MVNETA_SET_COOKIE_HIGH_ADDR(addr) {\t\t\t\t\\\n+\tif (unlikely(cookie_addr_high == MVNETA_COOKIE_ADDR_INVALID))\t\\\n+\t\tcookie_addr_high =\t\t\t\t\t\\\n+\t\t\t(uint64_t)(addr) & MVNETA_COOKIE_HIGH_ADDR_MASK;\\\n+}\n+\n+#define MVNETA_CHECK_COOKIE_HIGH_ADDR(addr)\t\t\t\\\n+\t((likely(cookie_addr_high ==\t\t\t\t\\\n+\t((uint64_t)(addr) & MVNETA_COOKIE_HIGH_ADDR_MASK))) ? 1 : 0)\n+\n int mvneta_logtype;\n \n static const char * const valid_args[] = {\n@@ -58,6 +76,17 @@ struct mvneta_ifnames {\n \tint idx;\n };\n \n+/*\n+ * To use buffer harvesting based on loopback port shadow queue structure\n+ * was introduced for buffers information bookkeeping.\n+ */\n+struct mvneta_shadow_txq {\n+\tint head;           /* write index - used when sending buffers */\n+\tint tail;           /* read index - used when releasing buffers */\n+\tu16 size;           /* queue occupied size */\n+\tstruct neta_buff_inf ent[MRVL_NETA_TX_SHADOWQ_SIZE]; /* q entries */\n+};\n+\n \n struct mvneta_rxq {\n \tstruct mvneta_priv *priv;\n@@ -77,6 +106,7 @@ struct mvneta_txq {\n \tint queue_id;\n \tint port_id;\n \tuint64_t bytes_sent;\n+\tstruct mvneta_shadow_txq shadow_txq;\n \tint tx_deferred_start;\n };\n \n@@ -84,6 +114,247 @@ static int mvneta_dev_num;\n static int mvneta_lcore_first;\n static int mvneta_lcore_last;\n \n+static inline void\n+mvneta_fill_shadowq(struct mvneta_shadow_txq *sq, struct rte_mbuf *buf)\n+{\n+\tsq->ent[sq->head].cookie = (uint64_t)buf;\n+\tsq->ent[sq->head].addr = buf ?\n+\t\trte_mbuf_data_iova_default(buf) : 0;\n+\n+\tsq->head = (sq->head + 1) & MRVL_NETA_TX_SHADOWQ_MASK;\n+\tsq->size++;\n+}\n+\n+static inline void\n+mvneta_fill_desc(struct neta_ppio_desc *desc, struct rte_mbuf *buf)\n+{\n+\tneta_ppio_outq_desc_reset(desc);\n+\tneta_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf));\n+\tneta_ppio_outq_desc_set_pkt_offset(desc, 0);\n+\tneta_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf));\n+}\n+\n+static inline int\n+mvneta_buffs_refill(struct mvneta_priv *priv, struct mvneta_rxq *rxq, u16 *num)\n+{\n+\tstruct rte_mbuf *mbufs[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX];\n+\tstruct neta_buff_inf entries[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX];\n+\tint i, ret;\n+\tuint16_t nb_desc = *num;\n+\n+\tret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, nb_desc);\n+\tif (ret) {\n+\t\tMVNETA_LOG(ERR, \"Failed to allocate %u mbufs.\", nb_desc);\n+\t\t*num = 0;\n+\t\treturn -1;\n+\t}\n+\n+\tMVNETA_SET_COOKIE_HIGH_ADDR(mbufs[0]);\n+\n+\tfor (i = 0; i < nb_desc; i++) {\n+\t\tif (unlikely(!MVNETA_CHECK_COOKIE_HIGH_ADDR(mbufs[i]))) {\n+\t\t\tMVNETA_LOG(ERR,\n+\t\t\t\t\"mbuf virt high addr 0x%lx out of range 0x%lx\",\n+\t\t\t\t(uint64_t)mbufs[i] >> 32,\n+\t\t\t\tcookie_addr_high >> 32);\n+\t\t\t*num = 0;\n+\t\t\tgoto out;\n+\t\t}\n+\t\tentries[i].addr = rte_mbuf_data_iova_default(mbufs[i]);\n+\t\tentries[i].cookie = (neta_cookie_t)(uint64_t)mbufs[i];\n+\t}\n+\tneta_ppio_inq_put_buffs(priv->ppio, rxq->queue_id, entries, num);\n+\n+out:\n+\tfor (i = *num; i < nb_desc; i++)\n+\t\trte_pktmbuf_free(mbufs[i]);\n+\n+\treturn 0;\n+}\n+\n+\n+/**\n+ * Allocate buffers from mempool\n+ * and store addresses in rx descriptors.\n+ *\n+ * @return\n+ *   0 on success, negative error value otherwise.\n+ */\n+static inline int\n+mvneta_buffs_alloc(struct mvneta_priv *priv, struct mvneta_rxq *rxq, int *num)\n+{\n+\tuint16_t nb_desc, nb_desc_burst, sent = 0;\n+\tint ret = 0;\n+\n+\tnb_desc = *num;\n+\n+\tdo {\n+\t\tnb_desc_burst =\n+\t\t\t(nb_desc < MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX) ?\n+\t\t\tnb_desc : MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX;\n+\n+\t\tret = mvneta_buffs_refill(priv, rxq, &nb_desc_burst);\n+\t\tif (unlikely(ret || !nb_desc_burst))\n+\t\t\tbreak;\n+\n+\t\tsent += nb_desc_burst;\n+\t\tnb_desc -= nb_desc_burst;\n+\n+\t} while (nb_desc);\n+\n+\t*num = sent;\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * Return mbufs to mempool.\n+ *\n+ * @param rxq\n+ *    Pointer to rx queue structure\n+ * @param desc\n+ *    Array of rx descriptors\n+ */\n+static void\n+mvneta_recv_buffs_free(struct neta_ppio_desc *desc, uint16_t num)\n+{\n+\tuint64_t addr;\n+\tuint8_t i;\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\tif (desc) {\n+\t\t\taddr = cookie_addr_high |\n+\t\t\t\t\tneta_ppio_inq_desc_get_cookie(desc);\n+\t\t\tif (addr)\n+\t\t\t\trte_pktmbuf_free((struct rte_mbuf *)addr);\n+\t\t\tdesc++;\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * Release already sent buffers to mempool.\n+ *\n+ * @param ppio\n+ *   Pointer to the port structure.\n+ * @param sq\n+ *   Pointer to the shadow queue.\n+ * @param qid\n+ *   Queue id number.\n+ * @param force\n+ *   Force releasing packets.\n+ */\n+static inline void\n+mvneta_sent_buffers_free(struct neta_ppio *ppio,\n+\t\t\t struct mvneta_shadow_txq *sq, int qid)\n+{\n+\tstruct neta_buff_inf *entry;\n+\tuint16_t nb_done = 0;\n+\tint i;\n+\tint tail = sq->tail;\n+\n+\tneta_ppio_get_num_outq_done(ppio, qid, &nb_done);\n+\n+\tif (nb_done > sq->size) {\n+\t\tMVNETA_LOG(ERR, \"nb_done: %d, sq->size %d\",\n+\t\t\t   nb_done, sq->size);\n+\t\treturn;\n+\t}\n+\n+\tfor (i = 0; i < nb_done; i++) {\n+\t\tentry = &sq->ent[tail];\n+\n+\t\tif (unlikely(!entry->addr)) {\n+\t\t\tMVNETA_LOG(DEBUG,\n+\t\t\t\t\"Shadow memory @%d: cookie(%lx), pa(%lx)!\",\n+\t\t\t\ttail, (u64)entry->cookie,\n+\t\t\t\t(u64)entry->addr);\n+\t\t\ttail = (tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tstruct rte_mbuf *mbuf;\n+\n+\t\tmbuf = (struct rte_mbuf *)\n+\t\t\t   (cookie_addr_high | entry->cookie);\n+\t\trte_pktmbuf_free(mbuf);\n+\t\ttail = (tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;\n+\t}\n+\n+\tsq->tail = tail;\n+\tsq->size -= nb_done;\n+}\n+\n+/**\n+ * Flush single receive queue.\n+ *\n+ * @param rxq\n+ *   Pointer to rx queue structure.\n+ * @param descs\n+ *   Array of rx descriptors\n+ */\n+static void\n+mvneta_rx_queue_flush(struct mvneta_rxq *rxq)\n+{\n+\tstruct neta_ppio_desc *descs;\n+\tstruct neta_buff_inf *bufs;\n+\tuint16_t num;\n+\tint ret, i;\n+\n+\tdescs = rte_malloc(\"rxdesc\", MRVL_NETA_RXD_MAX * sizeof(*descs), 0);\n+\tbufs = rte_malloc(\"buffs\", MRVL_NETA_RXD_MAX * sizeof(*bufs), 0);\n+\n+\tdo {\n+\t\tnum = MRVL_NETA_RXD_MAX;\n+\t\tret = neta_ppio_recv(rxq->priv->ppio,\n+\t\t\t\t     rxq->queue_id,\n+\t\t\t\t     descs, &num);\n+\t\tmvneta_recv_buffs_free(descs, num);\n+\t} while (ret == 0 && num);\n+\n+\trxq->pkts_processed = 0;\n+\n+\tnum = MRVL_NETA_RXD_MAX;\n+\n+\tneta_ppio_inq_get_all_buffs(rxq->priv->ppio, rxq->queue_id, bufs, &num);\n+\tMVNETA_LOG(INFO, \"freeing %u unused bufs.\", num);\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\tuint64_t addr;\n+\t\tif (bufs[i].cookie) {\n+\t\t\taddr = cookie_addr_high | bufs[i].cookie;\n+\t\t\trte_pktmbuf_free((struct rte_mbuf *)addr);\n+\t\t}\n+\t}\n+\n+\trte_free(descs);\n+\trte_free(bufs);\n+}\n+\n+/**\n+ * Flush single transmit queue.\n+ *\n+ * @param txq\n+ *     Pointer to tx queue structure\n+ */\n+static void\n+mvneta_tx_queue_flush(struct mvneta_txq *txq)\n+{\n+\tstruct mvneta_shadow_txq *sq = &txq->shadow_txq;\n+\n+\tif (sq->size)\n+\t\tmvneta_sent_buffers_free(txq->priv->ppio, sq,\n+\t\t\t\t\t txq->queue_id);\n+\n+\t/* free the rest of them */\n+\twhile (sq->tail != sq->head) {\n+\t\tuint64_t addr = cookie_addr_high |\n+\t\t\tsq->ent[sq->tail].cookie;\n+\t\trte_pktmbuf_free((struct rte_mbuf *)addr);\n+\t\tsq->tail = (sq->tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;\n+\t}\n+\tmemset(sq, 0, sizeof(*sq));\n+}\n \n /**\n  * Deinitialize packet processor.\n@@ -132,6 +403,467 @@ mvneta_ifnames_get(const char *key __rte_unused, const char *value,\n }\n \n /**\n+ * Return packet type information and l3/l4 offsets.\n+ *\n+ * @param desc\n+ *   Pointer to the received packet descriptor.\n+ * @param l3_offset\n+ *   l3 packet offset.\n+ * @param l4_offset\n+ *   l4 packet offset.\n+ *\n+ * @return\n+ *   Packet type information.\n+ */\n+static inline uint64_t\n+mvneta_desc_to_packet_type_and_offset(struct neta_ppio_desc *desc,\n+\t\t\t\t    uint8_t *l3_offset, uint8_t *l4_offset)\n+{\n+\tenum neta_inq_l3_type l3_type;\n+\tenum neta_inq_l4_type l4_type;\n+\tuint64_t packet_type;\n+\n+\tneta_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);\n+\tneta_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);\n+\n+\tpacket_type = RTE_PTYPE_L2_ETHER;\n+\n+\tif (NETA_RXD_GET_VLAN_INFO(desc))\n+\t\tpacket_type |= RTE_PTYPE_L2_ETHER_VLAN;\n+\n+\tswitch (l3_type) {\n+\tcase NETA_INQ_L3_TYPE_IPV4_BAD:\n+\tcase NETA_INQ_L3_TYPE_IPV4_OK:\n+\t\tpacket_type |= RTE_PTYPE_L3_IPV4;\n+\t\tbreak;\n+\tcase NETA_INQ_L3_TYPE_IPV6:\n+\t\tpacket_type |= RTE_PTYPE_L3_IPV6;\n+\t\tbreak;\n+\tdefault:\n+\t\tpacket_type |= RTE_PTYPE_UNKNOWN;\n+\t\tMVNETA_LOG(DEBUG, \"Failed to recognize l3 packet type\");\n+\t\tbreak;\n+\t}\n+\n+\tswitch (l4_type) {\n+\tcase NETA_INQ_L4_TYPE_TCP:\n+\t\tpacket_type |= RTE_PTYPE_L4_TCP;\n+\t\tbreak;\n+\tcase NETA_INQ_L4_TYPE_UDP:\n+\t\tpacket_type |= RTE_PTYPE_L4_UDP;\n+\t\tbreak;\n+\tdefault:\n+\t\tpacket_type |= RTE_PTYPE_UNKNOWN;\n+\t\tMVNETA_LOG(DEBUG, \"Failed to recognize l4 packet type\");\n+\t\tbreak;\n+\t}\n+\n+\treturn packet_type;\n+}\n+\n+/**\n+ * Prepare offload information.\n+ *\n+ * @param ol_flags\n+ *   Offload flags.\n+ * @param packet_type\n+ *   Packet type bitfield.\n+ * @param l3_type\n+ *   Pointer to the neta_ouq_l3_type structure.\n+ * @param l4_type\n+ *   Pointer to the neta_outq_l4_type structure.\n+ * @param gen_l3_cksum\n+ *   Will be set to 1 in case l3 checksum is computed.\n+ * @param l4_cksum\n+ *   Will be set to 1 in case l4 checksum is computed.\n+ *\n+ * @return\n+ *   0 on success, negative error value otherwise.\n+ */\n+static inline int\n+mvneta_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,\n+\t\t\tenum neta_outq_l3_type *l3_type,\n+\t\t\tenum neta_outq_l4_type *l4_type,\n+\t\t\tint *gen_l3_cksum,\n+\t\t\tint *gen_l4_cksum)\n+{\n+\t/*\n+\t * Based on ol_flags prepare information\n+\t * for neta_ppio_outq_desc_set_proto_info() which setups descriptor\n+\t * for offloading.\n+\t */\n+\tif (ol_flags & PKT_TX_IPV4) {\n+\t\t*l3_type = NETA_OUTQ_L3_TYPE_IPV4;\n+\t\t*gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;\n+\t} else if (ol_flags & PKT_TX_IPV6) {\n+\t\t*l3_type = NETA_OUTQ_L3_TYPE_IPV6;\n+\t\t/* no checksum for ipv6 header */\n+\t\t*gen_l3_cksum = 0;\n+\t} else {\n+\t\t/* if something different then stop processing */\n+\t\treturn -1;\n+\t}\n+\n+\tol_flags &= PKT_TX_L4_MASK;\n+\tif ((packet_type & RTE_PTYPE_L4_TCP) &&\n+\t    ol_flags == PKT_TX_TCP_CKSUM) {\n+\t\t*l4_type = NETA_OUTQ_L4_TYPE_TCP;\n+\t\t*gen_l4_cksum = 1;\n+\t} else if ((packet_type & RTE_PTYPE_L4_UDP) &&\n+\t\t   ol_flags == PKT_TX_UDP_CKSUM) {\n+\t\t*l4_type = NETA_OUTQ_L4_TYPE_UDP;\n+\t\t*gen_l4_cksum = 1;\n+\t} else {\n+\t\t*l4_type = NETA_OUTQ_L4_TYPE_OTHER;\n+\t\t/* no checksum for other type */\n+\t\t*gen_l4_cksum = 0;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Get offload information from the received packet descriptor.\n+ *\n+ * @param desc\n+ *   Pointer to the received packet descriptor.\n+ *\n+ * @return\n+ *   Mbuf offload flags.\n+ */\n+static inline uint64_t\n+mvneta_desc_to_ol_flags(struct neta_ppio_desc *desc)\n+{\n+\tuint64_t flags;\n+\tenum neta_inq_desc_status status;\n+\n+\tstatus = neta_ppio_inq_desc_get_l3_pkt_error(desc);\n+\tif (unlikely(status != NETA_DESC_ERR_OK))\n+\t\tflags = PKT_RX_IP_CKSUM_BAD;\n+\telse\n+\t\tflags = PKT_RX_IP_CKSUM_GOOD;\n+\n+\tstatus = neta_ppio_inq_desc_get_l4_pkt_error(desc);\n+\tif (unlikely(status != NETA_DESC_ERR_OK))\n+\t\tflags |= PKT_RX_L4_CKSUM_BAD;\n+\telse\n+\t\tflags |= PKT_RX_L4_CKSUM_GOOD;\n+\n+\treturn flags;\n+}\n+\n+/**\n+ * DPDK callback for transmit.\n+ *\n+ * @param txq\n+ *   Generic pointer transmit queue.\n+ * @param tx_pkts\n+ *   Packets to transmit.\n+ * @param nb_pkts\n+ *   Number of packets in array.\n+ *\n+ * @return\n+ *   Number of packets successfully transmitted.\n+ */\n+static uint16_t\n+mvneta_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct mvneta_txq *q = txq;\n+\tstruct mvneta_shadow_txq *sq;\n+\tstruct neta_ppio_desc descs[nb_pkts];\n+\n+\tint i, ret, bytes_sent = 0;\n+\tuint16_t num, sq_free_size;\n+\tuint64_t addr;\n+\n+\tsq = &q->shadow_txq;\n+\tif (unlikely(!nb_pkts || !q->priv->ppio))\n+\t\treturn 0;\n+\n+\tif (sq->size)\n+\t\tmvneta_sent_buffers_free(q->priv->ppio,\n+\t\t\t\t\t sq, q->queue_id);\n+\n+\tsq_free_size = MRVL_NETA_TX_SHADOWQ_SIZE - sq->size - 1;\n+\tif (unlikely(nb_pkts > sq_free_size)) {\n+\t\tMVNETA_LOG(DEBUG,\n+\t\t\t\"No room in shadow queue for %d packets! %d packets will be sent.\",\n+\t\t\tnb_pkts, sq_free_size);\n+\t\tnb_pkts = sq_free_size;\n+\t}\n+\n+\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tstruct rte_mbuf *mbuf = tx_pkts[i];\n+\t\tint gen_l3_cksum, gen_l4_cksum;\n+\t\tenum neta_outq_l3_type l3_type;\n+\t\tenum neta_outq_l4_type l4_type;\n+\n+\t\t/* Fill first mbuf info in shadow queue */\n+\t\tmvneta_fill_shadowq(sq, mbuf);\n+\t\tmvneta_fill_desc(&descs[i], mbuf);\n+\n+\t\tbytes_sent += rte_pktmbuf_pkt_len(mbuf);\n+\n+\t\tret = mvneta_prepare_proto_info(mbuf->ol_flags,\n+\t\t\t\t\t\tmbuf->packet_type,\n+\t\t\t\t\t\t&l3_type, &l4_type,\n+\t\t\t\t\t\t&gen_l3_cksum,\n+\t\t\t\t\t\t&gen_l4_cksum);\n+\t\tif (unlikely(ret))\n+\t\t\tcontinue;\n+\n+\t\tneta_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,\n+\t\t\t\t\t\t   mbuf->l2_len,\n+\t\t\t\t\t\t   mbuf->l2_len + mbuf->l3_len,\n+\t\t\t\t\t\t   gen_l3_cksum, gen_l4_cksum);\n+\t}\n+\tnum = nb_pkts;\n+\tneta_ppio_send(q->priv->ppio, q->queue_id, descs, &nb_pkts);\n+\n+\n+\t/* number of packets that were not sent */\n+\tif (unlikely(num > nb_pkts)) {\n+\t\tfor (i = nb_pkts; i < num; i++) {\n+\t\t\tsq->head = (MRVL_NETA_TX_SHADOWQ_SIZE + sq->head - 1) &\n+\t\t\t\tMRVL_NETA_TX_SHADOWQ_MASK;\n+\t\t\taddr = cookie_addr_high | sq->ent[sq->head].cookie;\n+\t\t\tbytes_sent -=\n+\t\t\t\trte_pktmbuf_pkt_len((struct rte_mbuf *)addr);\n+\t\t}\n+\t\tsq->size -= num - nb_pkts;\n+\t}\n+\n+\tq->bytes_sent += bytes_sent;\n+\n+\treturn nb_pkts;\n+}\n+\n+/** DPDK callback for S/G transmit.\n+ *\n+ * @param txq\n+ *   Generic pointer transmit queue.\n+ * @param tx_pkts\n+ *   Packets to transmit.\n+ * @param nb_pkts\n+ *   Number of packets in array.\n+ *\n+ * @return\n+ *   Number of packets successfully transmitted.\n+ */\n+static uint16_t\n+mvneta_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct mvneta_txq *q = txq;\n+\tstruct mvneta_shadow_txq *sq;\n+\tstruct neta_ppio_desc descs[nb_pkts * NETA_PPIO_DESC_NUM_FRAGS];\n+\tstruct neta_ppio_sg_pkts pkts;\n+\tuint8_t frags[nb_pkts];\n+\tint i, j, ret, bytes_sent = 0;\n+\tint tail, tail_first;\n+\tuint16_t num, sq_free_size;\n+\tuint16_t nb_segs, total_descs = 0;\n+\tuint64_t addr;\n+\n+\tsq = &q->shadow_txq;\n+\tpkts.frags = frags;\n+\tpkts.num = 0;\n+\n+\tif (unlikely(!q->priv->ppio))\n+\t\treturn 0;\n+\n+\tif (sq->size)\n+\t\tmvneta_sent_buffers_free(q->priv->ppio,\n+\t\t\t\t\t sq, q->queue_id);\n+\t/* Save shadow queue free size */\n+\tsq_free_size = MRVL_NETA_TX_SHADOWQ_SIZE - sq->size - 1;\n+\n+\ttail = 0;\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tstruct rte_mbuf *mbuf = tx_pkts[i];\n+\t\tstruct rte_mbuf *seg = NULL;\n+\t\tint gen_l3_cksum, gen_l4_cksum;\n+\t\tenum neta_outq_l3_type l3_type;\n+\t\tenum neta_outq_l4_type l4_type;\n+\n+\t\tnb_segs = mbuf->nb_segs;\n+\t\ttotal_descs += nb_segs;\n+\n+\t\t/*\n+\t\t * Check if total_descs does not exceed\n+\t\t * shadow queue free size\n+\t\t */\n+\t\tif (unlikely(total_descs > sq_free_size)) {\n+\t\t\ttotal_descs -= nb_segs;\n+\t\t\tMVNETA_LOG(DEBUG,\n+\t\t\t\t\"No room in shadow queue for %d packets! \"\n+\t\t\t\t\"%d packets will be sent.\",\n+\t\t\t\tnb_pkts, i);\n+\t\t\tbreak;\n+\t\t}\n+\n+\n+\t\t/* Check if nb_segs does not exceed the max nb of desc per\n+\t\t * fragmented packet\n+\t\t */\n+\t\tif (unlikely(nb_segs > NETA_PPIO_DESC_NUM_FRAGS)) {\n+\t\t\ttotal_descs -= nb_segs;\n+\t\t\tMVNETA_LOG(ERR,\n+\t\t\t\t\"Too many segments. Packet won't be sent.\");\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tpkts.frags[pkts.num] = nb_segs;\n+\t\tpkts.num++;\n+\t\ttail_first = tail;\n+\n+\t\tseg = mbuf;\n+\t\tfor (j = 0; j < nb_segs - 1; j++) {\n+\t\t\t/* For the subsequent segments, set shadow queue\n+\t\t\t * buffer to NULL\n+\t\t\t */\n+\t\t\tmvneta_fill_shadowq(sq, NULL);\n+\t\t\tmvneta_fill_desc(&descs[tail], seg);\n+\n+\t\t\ttail++;\n+\t\t\tseg = seg->next;\n+\t\t}\n+\t\t/* Put first mbuf info in last shadow queue entry */\n+\t\tmvneta_fill_shadowq(sq, mbuf);\n+\t\t/* Update descriptor with last segment */\n+\t\tmvneta_fill_desc(&descs[tail++], seg);\n+\n+\t\tbytes_sent += rte_pktmbuf_pkt_len(mbuf);\n+\n+\t\tret = mvneta_prepare_proto_info(mbuf->ol_flags,\n+\t\t\t\t\t\tmbuf->packet_type,\n+\t\t\t\t\t\t&l3_type, &l4_type,\n+\t\t\t\t\t\t&gen_l3_cksum,\n+\t\t\t\t\t\t&gen_l4_cksum);\n+\t\tif (unlikely(ret))\n+\t\t\tcontinue;\n+\n+\t\tneta_ppio_outq_desc_set_proto_info(&descs[tail_first],\n+\t\t\t\t\t\t   l3_type, l4_type,\n+\t\t\t\t\t\t   mbuf->l2_len,\n+\t\t\t\t\t\t   mbuf->l2_len + mbuf->l3_len,\n+\t\t\t\t\t\t   gen_l3_cksum, gen_l4_cksum);\n+\t}\n+\tnum = total_descs;\n+\tneta_ppio_send_sg(q->priv->ppio, q->queue_id, descs, &total_descs,\n+\t\t\t  &pkts);\n+\n+\t/* number of packets that were not sent */\n+\tif (unlikely(num > total_descs)) {\n+\t\tfor (i = total_descs; i < num; i++) {\n+\t\t\tsq->head = (MRVL_NETA_TX_SHADOWQ_SIZE +\n+\t\t\t\t\tsq->head - 1) &\n+\t\t\t\t\tMRVL_NETA_TX_SHADOWQ_MASK;\n+\t\t\taddr = sq->ent[sq->head].cookie;\n+\t\t\tif (addr) {\n+\t\t\t\tstruct rte_mbuf *mbuf;\n+\n+\t\t\t\tmbuf = (struct rte_mbuf *)\n+\t\t\t\t\t\t(cookie_addr_high | addr);\n+\t\t\t\tbytes_sent -= rte_pktmbuf_pkt_len(mbuf);\n+\t\t\t}\n+\t\t}\n+\t\tsq->size -= num - total_descs;\n+\t\tnb_pkts = pkts.num;\n+\t}\n+\n+\tq->bytes_sent += bytes_sent;\n+\n+\treturn nb_pkts;\n+}\n+\n+/**\n+ * DPDK callback for receive.\n+ *\n+ * @param rxq\n+ *   Generic pointer to the receive queue.\n+ * @param rx_pkts\n+ *   Array to store received packets.\n+ * @param nb_pkts\n+ *   Maximum number of packets in array.\n+ *\n+ * @return\n+ *   Number of packets successfully received.\n+ */\n+static uint16_t\n+mvneta_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct mvneta_rxq *q = rxq;\n+\tstruct neta_ppio_desc descs[nb_pkts];\n+\tint i, ret, rx_done = 0, rx_dropped = 0;\n+\n+\tif (unlikely(!q || !q->priv->ppio))\n+\t\treturn 0;\n+\n+\tret = neta_ppio_recv(q->priv->ppio, q->queue_id,\n+\t\t\tdescs, &nb_pkts);\n+\n+\tif (unlikely(ret < 0)) {\n+\t\tMVNETA_LOG(ERR, \"Failed to receive packets\");\n+\t\treturn 0;\n+\t}\n+\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tstruct rte_mbuf *mbuf;\n+\t\tuint8_t l3_offset, l4_offset;\n+\t\tenum neta_inq_desc_status status;\n+\t\tuint64_t addr;\n+\n+\t\taddr = cookie_addr_high |\n+\t\t\tneta_ppio_inq_desc_get_cookie(&descs[i]);\n+\t\tmbuf = (struct rte_mbuf *)addr;\n+\n+\t\trte_pktmbuf_reset(mbuf);\n+\n+\t\t/* drop packet in case of mac, overrun or resource error */\n+\t\tstatus = neta_ppio_inq_desc_get_l2_pkt_error(&descs[i]);\n+\t\tif (unlikely(status != NETA_DESC_ERR_OK)) {\n+\t\t\t/* Release the mbuf to the mempool since\n+\t\t\t * it won't be transferred to tx path\n+\t\t\t */\n+\t\t\trte_pktmbuf_free(mbuf);\n+\t\t\tq->drop_mac++;\n+\t\t\trx_dropped++;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tmbuf->data_off += MVNETA_PKT_EFFEC_OFFS;\n+\t\tmbuf->pkt_len = neta_ppio_inq_desc_get_pkt_len(&descs[i]);\n+\t\tmbuf->data_len = mbuf->pkt_len;\n+\t\tmbuf->port = q->port_id;\n+\t\tmbuf->packet_type =\n+\t\t\tmvneta_desc_to_packet_type_and_offset(&descs[i],\n+\t\t\t\t\t\t\t\t&l3_offset,\n+\t\t\t\t\t\t\t\t&l4_offset);\n+\t\tmbuf->l2_len = l3_offset;\n+\t\tmbuf->l3_len = l4_offset - l3_offset;\n+\n+\t\tif (likely(q->cksum_enabled))\n+\t\t\tmbuf->ol_flags = mvneta_desc_to_ol_flags(&descs[i]);\n+\n+\t\trx_pkts[rx_done++] = mbuf;\n+\t\tq->bytes_recv += mbuf->pkt_len;\n+\t}\n+\tq->pkts_processed += rx_done + rx_dropped;\n+\n+\tif (q->pkts_processed > rx_desc_free_thresh) {\n+\t\tint buf_to_refill = rx_desc_free_thresh;\n+\n+\t\tret = mvneta_buffs_alloc(q->priv, q, &buf_to_refill);\n+\t\tif (ret)\n+\t\t\tMVNETA_LOG(ERR, \"Refill failed\");\n+\t\tq->pkts_processed -= buf_to_refill;\n+\t}\n+\n+\treturn rx_done;\n+}\n+\n+/**\n  * Ethernet device configuration.\n  *\n  * Prepare the driver for a given number of TX and RX queues and\n@@ -391,6 +1123,7 @@ mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \trxq->queue_id = idx;\n \trxq->port_id = dev->data->port_id;\n \trxq->size = desc;\n+\trx_desc_free_thresh = RTE_MIN(rx_desc_free_thresh, (desc / 2));\n \tpriv->ppio_params.inqs_params.tcs_params[MRVL_NETA_DEFAULT_TC].size =\n \t\tdesc;\n \n@@ -413,6 +1146,14 @@ mvneta_rx_queue_release(void *rxq)\n \tif (!q)\n \t\treturn;\n \n+\t/* If dev_stop was called already, mbufs are already\n+\t * returned to mempool and ppio is deinitialized.\n+\t * Skip this step.\n+\t */\n+\n+\tif (q->priv->ppio)\n+\t\tmvneta_rx_queue_flush(q);\n+\n \trte_free(rxq);\n }\n \n@@ -478,6 +1219,26 @@ mvneta_tx_queue_release(void *txq)\n \trte_free(q);\n }\n \n+/**\n+ * Set tx burst function according to offload flag\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ */\n+static void\n+mvneta_set_tx_function(struct rte_eth_dev *dev)\n+{\n+\tstruct mvneta_priv *priv = dev->data->dev_private;\n+\n+\t/* Use a simple Tx queue (no offloads, no multi segs) if possible */\n+\tif (priv->multiseg) {\n+\t\tMVNETA_LOG(INFO, \"Using multi-segment tx callback\");\n+\t\tdev->tx_pkt_burst = mvneta_tx_sg_pkt_burst;\n+\t} else {\n+\t\tMVNETA_LOG(INFO, \"Using single-segment tx callback\");\n+\t\tdev->tx_pkt_burst = mvneta_tx_pkt_burst;\n+\t}\n+}\n \n /**\n  * DPDK callback to start the device.\n@@ -525,6 +1286,18 @@ mvneta_dev_start(struct rte_eth_dev *dev)\n \t\tpriv->uc_mc_flushed = 1;\n \t}\n \n+\t/* Allocate buffers */\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tstruct mvneta_rxq *rxq = dev->data->rx_queues[i];\n+\t\tint num = rxq->size;\n+\n+\t\tret = mvneta_buffs_alloc(priv, rxq, &num);\n+\t\tif (ret || num != rxq->size) {\n+\t\t\trte_free(rxq);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n \tret = mvneta_dev_set_link_up(dev);\n \tif (ret) {\n \t\tMVNETA_LOG(ERR, \"Failed to set link up\");\n@@ -535,6 +1308,8 @@ mvneta_dev_start(struct rte_eth_dev *dev)\n \tfor (i = 0; i < dev->data->nb_tx_queues; i++)\n \t\tdev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;\n \n+\tmvneta_set_tx_function(dev);\n+\n \treturn 0;\n \n out:\n@@ -553,11 +1328,25 @@ static void\n mvneta_dev_stop(struct rte_eth_dev *dev)\n {\n \tstruct mvneta_priv *priv = dev->data->dev_private;\n+\tint i;\n \n \tif (!priv->ppio)\n \t\treturn;\n \n \tmvneta_dev_set_link_down(dev);\n+\tMVNETA_LOG(INFO, \"Flushing rx queues\");\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tstruct mvneta_rxq *rxq = dev->data->rx_queues[i];\n+\n+\t\tmvneta_rx_queue_flush(rxq);\n+\t}\n+\n+\tMVNETA_LOG(INFO, \"Flushing tx queues\");\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\tstruct mvneta_txq *txq = dev->data->tx_queues[i];\n+\n+\t\tmvneta_tx_queue_flush(txq);\n+\t}\n \n \tneta_ppio_deinit(priv->ppio);\n \n@@ -704,6 +1493,8 @@ mvneta_eth_dev_create(struct rte_vdev_device *vdev, const char *name)\n \teth_dev->data->kdrv = RTE_KDRV_NONE;\n \teth_dev->data->dev_private = priv;\n \teth_dev->device = &vdev->device;\n+\teth_dev->rx_pkt_burst = mvneta_rx_pkt_burst;\n+\tmvneta_set_tx_function(eth_dev);\n ;\teth_dev->dev_ops = &mvneta_ops;\n \n \treturn 0;\ndiff --git a/drivers/net/mvneta/mvneta_ethdev.h b/drivers/net/mvneta/mvneta_ethdev.h\nindex 8957034..a05566d 100644\n--- a/drivers/net/mvneta/mvneta_ethdev.h\n+++ b/drivers/net/mvneta/mvneta_ethdev.h\n@@ -43,6 +43,17 @@\n \n #define MRVL_NETA_DEFAULT_TC 0\n \n+/** Maximum number of descriptors in shadow queue. Must be power of 2 */\n+#define MRVL_NETA_TX_SHADOWQ_SIZE MRVL_NETA_TXD_MAX\n+\n+/** Shadow queue size mask (since shadow queue size is power of 2) */\n+#define MRVL_NETA_TX_SHADOWQ_MASK (MRVL_NETA_TX_SHADOWQ_SIZE - 1)\n+\n+/** Minimum number of sent buffers to release from shadow queue to BM */\n+#define MRVL_NETA_BUF_RELEASE_BURST_SIZE_MIN\t16\n+\n+/** Maximum number of sent buffers to release from shadow queue to BM */\n+#define MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX\t64\n \n #define MRVL_NETA_VLAN_TAG_LEN\t\t4\n #define MRVL_NETA_ETH_HDRS_LEN\t\t(ETHER_HDR_LEN + ETHER_CRC_LEN + \\\n",
    "prefixes": [
        "v2",
        "3/8"
    ]
}