get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/80524/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 80524,
    "url": "http://patches.dpdk.org/api/patches/80524/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1602596753-32282-17-git-send-email-arybchenko@solarflare.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1602596753-32282-17-git-send-email-arybchenko@solarflare.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1602596753-32282-17-git-send-email-arybchenko@solarflare.com",
    "date": "2020-10-13T13:45:33",
    "name": "[16/36] net/sfc: implement EF100 native Rx datapath",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "cce92007b26365b493f39b74f5cdbb188b00c470",
    "submitter": {
        "id": 607,
        "url": "http://patches.dpdk.org/api/people/607/?format=api",
        "name": "Andrew Rybchenko",
        "email": "arybchenko@solarflare.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1602596753-32282-17-git-send-email-arybchenko@solarflare.com/mbox/",
    "series": [
        {
            "id": 12916,
            "url": "http://patches.dpdk.org/api/series/12916/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=12916",
            "date": "2020-10-13T13:45:18",
            "name": "net/sfc: add EF100 support",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/12916/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/80524/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/80524/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 8EC38A04B7;\n\tTue, 13 Oct 2020 15:51:28 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 112651DBF2;\n\tTue, 13 Oct 2020 15:46:29 +0200 (CEST)",
            "from dispatch1-us1.ppe-hosted.com (dispatch1-us1.ppe-hosted.com\n [67.231.154.164]) by dpdk.org (Postfix) with ESMTP id 0C8161DB86\n for <dev@dpdk.org>; Tue, 13 Oct 2020 15:46:05 +0200 (CEST)",
            "from mx1-us1.ppe-hosted.com (unknown [10.110.50.150])\n by dispatch1-us1.ppe-hosted.com (PPE Hosted ESMTP Server) with ESMTP id\n D6FBF20117 for <dev@dpdk.org>; Tue, 13 Oct 2020 13:46:02 +0000 (UTC)",
            "from us4-mdac16-56.at1.mdlocal (unknown [10.110.48.199])\n by mx1-us1.ppe-hosted.com (PPE Hosted ESMTP Server) with ESMTP id D02F5800AD\n for <dev@dpdk.org>; Tue, 13 Oct 2020 13:46:02 +0000 (UTC)",
            "from mx1-us1.ppe-hosted.com (unknown [10.110.49.106])\n by mx1-us1.ppe-hosted.com (PPE Hosted ESMTP Server) with ESMTPS id\n 37D7010007F\n for <dev@dpdk.org>; Tue, 13 Oct 2020 13:46:02 +0000 (UTC)",
            "from webmail.solarflare.com (uk.solarflare.com [193.34.186.16])\n (using TLSv1.2 with cipher ECDHE-RSA-AES256-SHA384 (256/256 bits))\n (No client certificate requested)\n by mx1-us1.ppe-hosted.com (PPE Hosted ESMTP Server) with ESMTPS id\n DE864B4007F\n for <dev@dpdk.org>; Tue, 13 Oct 2020 13:46:01 +0000 (UTC)",
            "from ukex01.SolarFlarecom.com (10.17.10.4) by\n ukex01.SolarFlarecom.com (10.17.10.4) with Microsoft SMTP Server (TLS) id\n 15.0.1497.2; Tue, 13 Oct 2020 14:45:56 +0100",
            "from opal.uk.solarflarecom.com (10.17.10.1) by\n ukex01.SolarFlarecom.com (10.17.10.4) with Microsoft SMTP Server id\n 15.0.1497.2 via Frontend Transport; Tue, 13 Oct 2020 14:45:56 +0100",
            "from ukv-loginhost.uk.solarflarecom.com\n (ukv-loginhost.uk.solarflarecom.com [10.17.10.39])\n by opal.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id 09DDjuF9006046\n for <dev@dpdk.org>; Tue, 13 Oct 2020 14:45:56 +0100",
            "from ukv-loginhost.uk.solarflarecom.com (localhost [127.0.0.1])\n by ukv-loginhost.uk.solarflarecom.com (Postfix) with ESMTP id 180061613AB\n for <dev@dpdk.org>; Tue, 13 Oct 2020 14:45:56 +0100 (BST)"
        ],
        "X-Virus-Scanned": "Proofpoint Essentials engine",
        "From": "Andrew Rybchenko <arybchenko@solarflare.com>",
        "To": "<dev@dpdk.org>",
        "Date": "Tue, 13 Oct 2020 14:45:33 +0100",
        "Message-ID": "<1602596753-32282-17-git-send-email-arybchenko@solarflare.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1602596753-32282-1-git-send-email-arybchenko@solarflare.com>",
        "References": "<1602596753-32282-1-git-send-email-arybchenko@solarflare.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-TM-AS-Product-Ver": "SMEX-12.5.0.1300-8.6.1012-25722.003",
        "X-TM-AS-Result": "No-4.221000-8.000000-10",
        "X-TMASE-MatchedRID": "EXK6mOKXkKJbbYRuf3nrh7sHVDDM5xAPSoCG4sefl8QGmHr1eMxt2YB5\n w6KBECW1rdoLblq9S5oO1mVtY5XA0CRUxBx2BTap4M3l0EeXvQ1ReWnUUdhI9bjOUXWmQ3OWCT2\n og8mlUDuSEtmXcPvHCmqRpbgWk2Z7FT1JxtePq0RdDcY5R0jT7trtxg4b+wrsUx4Up3qvMKeDye\n MsiRKLO/wvQWrwRKFG2kt1wREKTsQsO+kVEfVuQg97mDMXdNW3ce/io1zKJlVjsW/dcHO/4crU3\n m4wf5VQ/l6vas/Ugqvv2WoTNgecbHI/MxNRI7Uk/NOUkr6ADzc9/b618SpbHR546Z08FdluXWjv\n A8TpWFiWFTv/8h1cqGdMSbFl0ohwyw0dUT70SVEyIyttzvQ99/ngX/aL8PCN4PRrWDwT3UtolGI\n WaLPyOie5LrMlxEsldYklTdZ7hantzSKzUmDUVyhJ5tvbfbyLjhdrcmlB7cPWXfwzppZ8SEqkiq\n c7rRQWMH85i2Gfh19tjB2EBubuvTEMBympXzDhI0cHLI6lhgIvV5f7P0HVDL53x71IIIES/+9HB\n w3oLFxcnc861BmfZ4e9sL8Z7e1zjNea/HVv9rygpIKfOvr6D7qGBW9J0YqjIFvLuPOiwdc+UBbB\n 9UDbDgDIbN6P3NBYlQ/yYOpZUjZYKI4f2iLMS/UwiX15l0tvBGvINcfHqhdpsnGGIgWMmXgj0Xr\n zki4tVRRcFyJSgQHnaeNu7AozaDHyZ/lLOvopTNDKZy5/hvR+tO36GYDlskoPLn6eZ90+eXxtDc\n Y/PUwvT1VdaBgq6SvTbcAONBtJ3Z0VjXe2WZqeAiCmPx4NwJuJ+Pb8n/VxSnQ4MjwaO9cqtq5d3\n cxkNVbgjcL9hurQjOcU9Vogeew2uiT/NvJn891DQ4TT+z2+GiRBNGK71nM=",
        "X-TM-AS-User-Approved-Sender": "Yes",
        "X-TM-AS-User-Blocked-Sender": "No",
        "X-TMASE-Result": "10--4.221000-8.000000",
        "X-TMASE-Version": "SMEX-12.5.0.1300-8.6.1012-25722.003",
        "X-MDID": "1602596762-VWrnD4z9kObz",
        "X-PPE-DISP": "1602596762;VWrnD4z9kObz",
        "Subject": "[dpdk-dev] [PATCH 16/36] net/sfc: implement EF100 native Rx datapath",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>\n---\n doc/guides/nics/sfc_efx.rst    |   3 +\n drivers/net/sfc/meson.build    |   3 +-\n drivers/net/sfc/sfc_dp_rx.h    |   1 +\n drivers/net/sfc/sfc_ef100.h    |  35 ++\n drivers/net/sfc/sfc_ef100_rx.c | 612 +++++++++++++++++++++++++++++++++\n drivers/net/sfc/sfc_ethdev.c   |   1 +\n drivers/net/sfc/sfc_kvargs.h   |   4 +-\n 7 files changed, 657 insertions(+), 2 deletions(-)\n create mode 100644 drivers/net/sfc/sfc_ef100.h\n create mode 100644 drivers/net/sfc/sfc_ef100_rx.c",
    "diff": "diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst\nindex 84b9b56ddb..c05c565275 100644\n--- a/doc/guides/nics/sfc_efx.rst\n+++ b/doc/guides/nics/sfc_efx.rst\n@@ -301,12 +301,15 @@ boolean parameters value.\n   **auto** allows the driver itself to make a choice based on firmware\n   features available and required by the datapath implementation.\n   **efx** chooses libefx-based datapath which supports Rx scatter.\n+  Supported for SFN7xxx, SFN8xxx and X2xxx family adapters only.\n   **ef10** chooses EF10 (SFN7xxx, SFN8xxx, X2xxx) native datapath which is\n   more efficient than libefx-based and provides richer packet type\n   classification.\n   **ef10_essb** chooses SFNX2xxx equal stride super-buffer datapath\n   which may be used on DPDK firmware variant only\n   (see notes about its limitations above).\n+  **ef100** chooses EF100 native datapath which is the only supported\n+  Rx datapath for EF100 architecture based NICs.\n \n - ``tx_datapath`` [auto|efx|ef10|ef10_simple] (default **auto**)\n \ndiff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build\nindex 304e8686e5..604c67cddd 100644\n--- a/drivers/net/sfc/meson.build\n+++ b/drivers/net/sfc/meson.build\n@@ -51,5 +51,6 @@ sources = files(\n \t'sfc_dp.c',\n \t'sfc_ef10_rx.c',\n \t'sfc_ef10_essb_rx.c',\n-\t'sfc_ef10_tx.c'\n+\t'sfc_ef10_tx.c',\n+\t'sfc_ef100_rx.c',\n )\ndiff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h\nindex 2101fd7547..3aba39658e 100644\n--- a/drivers/net/sfc/sfc_dp_rx.h\n+++ b/drivers/net/sfc/sfc_dp_rx.h\n@@ -266,6 +266,7 @@ const struct sfc_dp_rx *sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq);\n extern struct sfc_dp_rx sfc_efx_rx;\n extern struct sfc_dp_rx sfc_ef10_rx;\n extern struct sfc_dp_rx sfc_ef10_essb_rx;\n+extern struct sfc_dp_rx sfc_ef100_rx;\n \n #ifdef __cplusplus\n }\ndiff --git a/drivers/net/sfc/sfc_ef100.h b/drivers/net/sfc/sfc_ef100.h\nnew file mode 100644\nindex 0000000000..6da6cfabdb\n--- /dev/null\n+++ b/drivers/net/sfc/sfc_ef100.h\n@@ -0,0 +1,35 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ *\n+ * Copyright(c) 2019-2020 Xilinx, Inc.\n+ * Copyright(c) 2018-2019 Solarflare Communications Inc.\n+ *\n+ * This software was jointly developed between OKTET Labs (under contract\n+ * for Solarflare) and Solarflare Communications, Inc.\n+ */\n+\n+#ifndef _SFC_EF100_H\n+#define _SFC_EF100_H\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+static inline bool\n+sfc_ef100_ev_present(const efx_qword_t *ev, bool phase_bit)\n+{\n+\treturn !((ev->eq_u64[0] &\n+\t\t  EFX_INPLACE_MASK64(0, 63, ESF_GZ_EV_EVQ_PHASE)) ^\n+\t\t ((uint64_t)phase_bit << ESF_GZ_EV_EVQ_PHASE_LBN));\n+}\n+\n+static inline bool\n+sfc_ef100_ev_type_is(const efx_qword_t *ev, unsigned int type)\n+{\n+\treturn (ev->eq_u64[0] & EFX_INPLACE_MASK64(0, 63, ESF_GZ_E_TYPE)) ==\n+\t\tEFX_INSERT_FIELD64(0, 63, ESF_GZ_E_TYPE, type);\n+}\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+#endif /* _SFC_EF100_H */\ndiff --git a/drivers/net/sfc/sfc_ef100_rx.c b/drivers/net/sfc/sfc_ef100_rx.c\nnew file mode 100644\nindex 0000000000..c0e70c9943\n--- /dev/null\n+++ b/drivers/net/sfc/sfc_ef100_rx.c\n@@ -0,0 +1,612 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ *\n+ * Copyright(c) 2019-2020 Xilinx, Inc.\n+ * Copyright(c) 2018-2019 Solarflare Communications Inc.\n+ *\n+ * This software was jointly developed between OKTET Labs (under contract\n+ * for Solarflare) and Solarflare Communications, Inc.\n+ */\n+\n+/* EF100 native datapath implementation */\n+\n+#include <stdbool.h>\n+\n+#include <rte_byteorder.h>\n+#include <rte_mbuf_ptype.h>\n+#include <rte_mbuf.h>\n+#include <rte_io.h>\n+\n+#include \"efx_types.h\"\n+#include \"efx_regs_ef100.h\"\n+\n+#include \"sfc_debug.h\"\n+#include \"sfc_tweak.h\"\n+#include \"sfc_dp_rx.h\"\n+#include \"sfc_kvargs.h\"\n+#include \"sfc_ef100.h\"\n+\n+\n+#define sfc_ef100_rx_err(_rxq, ...) \\\n+\tSFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, ERR, &(_rxq)->dp.dpq, __VA_ARGS__)\n+\n+#define sfc_ef100_rx_debug(_rxq, ...) \\\n+\tSFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, DEBUG, &(_rxq)->dp.dpq, \\\n+\t\t   __VA_ARGS__)\n+\n+/**\n+ * Maximum number of descriptors/buffers in the Rx ring.\n+ * It should guarantee that corresponding event queue never overfill.\n+ * EF10 native datapath uses event queue of the same size as Rx queue.\n+ * Maximum number of events on datapath can be estimated as number of\n+ * Rx queue entries (one event per Rx buffer in the worst case) plus\n+ * Rx error and flush events.\n+ */\n+#define SFC_EF100_RXQ_LIMIT(_ndesc) \\\n+\t((_ndesc) - 1 /* head must not step on tail */ - \\\n+\t 1 /* Rx error */ - 1 /* flush */)\n+\n+struct sfc_ef100_rx_sw_desc {\n+\tstruct rte_mbuf\t\t\t*mbuf;\n+};\n+\n+struct sfc_ef100_rxq {\n+\t/* Used on data path */\n+\tunsigned int\t\t\tflags;\n+#define SFC_EF100_RXQ_STARTED\t\t0x1\n+#define SFC_EF100_RXQ_NOT_RUNNING\t0x2\n+#define SFC_EF100_RXQ_EXCEPTION\t\t0x4\n+\tunsigned int\t\t\tptr_mask;\n+\tunsigned int\t\t\tevq_phase_bit_shift;\n+\tunsigned int\t\t\tready_pkts;\n+\tunsigned int\t\t\tcompleted;\n+\tunsigned int\t\t\tevq_read_ptr;\n+\tvolatile efx_qword_t\t\t*evq_hw_ring;\n+\tstruct sfc_ef100_rx_sw_desc\t*sw_ring;\n+\tuint64_t\t\t\trearm_data;\n+\tuint16_t\t\t\tbuf_size;\n+\tuint16_t\t\t\tprefix_size;\n+\n+\t/* Used on refill */\n+\tunsigned int\t\t\tadded;\n+\tunsigned int\t\t\tmax_fill_level;\n+\tunsigned int\t\t\trefill_threshold;\n+\tstruct rte_mempool\t\t*refill_mb_pool;\n+\tefx_qword_t\t\t\t*rxq_hw_ring;\n+\tvolatile void\t\t\t*doorbell;\n+\n+\t/* Datapath receive queue anchor */\n+\tstruct sfc_dp_rxq\t\tdp;\n+};\n+\n+static inline struct sfc_ef100_rxq *\n+sfc_ef100_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)\n+{\n+\treturn container_of(dp_rxq, struct sfc_ef100_rxq, dp);\n+}\n+\n+static inline void\n+sfc_ef100_rx_qpush(struct sfc_ef100_rxq *rxq, unsigned int added)\n+{\n+\tefx_dword_t dword;\n+\n+\tEFX_POPULATE_DWORD_1(dword, ERF_GZ_RX_RING_PIDX, added & rxq->ptr_mask);\n+\n+\t/* DMA sync to device is not required */\n+\n+\t/*\n+\t * rte_write32() has rte_io_wmb() which guarantees that the STORE\n+\t * operations (i.e. Rx and event descriptor updates) that precede\n+\t * the rte_io_wmb() call are visible to NIC before the STORE\n+\t * operations that follow it (i.e. doorbell write).\n+\t */\n+\trte_write32(dword.ed_u32[0], rxq->doorbell);\n+\n+\tsfc_ef100_rx_debug(rxq, \"RxQ pushed doorbell at pidx %u (added=%u)\",\n+\t\t\t   EFX_DWORD_FIELD(dword, ERF_GZ_RX_RING_PIDX),\n+\t\t\t   added);\n+}\n+\n+static void\n+sfc_ef100_rx_qrefill(struct sfc_ef100_rxq *rxq)\n+{\n+\tconst unsigned int ptr_mask = rxq->ptr_mask;\n+\tunsigned int free_space;\n+\tunsigned int bulks;\n+\tvoid *objs[SFC_RX_REFILL_BULK];\n+\tunsigned int added = rxq->added;\n+\n+\tfree_space = rxq->max_fill_level - (added - rxq->completed);\n+\n+\tif (free_space < rxq->refill_threshold)\n+\t\treturn;\n+\n+\tbulks = free_space / RTE_DIM(objs);\n+\t/* refill_threshold guarantees that bulks is positive */\n+\tSFC_ASSERT(bulks > 0);\n+\n+\tdo {\n+\t\tunsigned int id;\n+\t\tunsigned int i;\n+\n+\t\tif (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,\n+\t\t\t\t\t\t  RTE_DIM(objs)) < 0)) {\n+\t\t\tstruct rte_eth_dev_data *dev_data =\n+\t\t\t\trte_eth_devices[rxq->dp.dpq.port_id].data;\n+\n+\t\t\t/*\n+\t\t\t * It is hardly a safe way to increment counter\n+\t\t\t * from different contexts, but all PMDs do it.\n+\t\t\t */\n+\t\t\tdev_data->rx_mbuf_alloc_failed += RTE_DIM(objs);\n+\t\t\t/* Return if we have posted nothing yet */\n+\t\t\tif (added == rxq->added)\n+\t\t\t\treturn;\n+\t\t\t/* Push posted */\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tfor (i = 0, id = added & ptr_mask;\n+\t\t     i < RTE_DIM(objs);\n+\t\t     ++i, ++id) {\n+\t\t\tstruct rte_mbuf *m = objs[i];\n+\t\t\tstruct sfc_ef100_rx_sw_desc *rxd;\n+\t\t\trte_iova_t phys_addr;\n+\n+\t\t\tMBUF_RAW_ALLOC_CHECK(m);\n+\n+\t\t\tSFC_ASSERT((id & ~ptr_mask) == 0);\n+\t\t\trxd = &rxq->sw_ring[id];\n+\t\t\trxd->mbuf = m;\n+\n+\t\t\t/*\n+\t\t\t * Avoid writing to mbuf. It is cheaper to do it\n+\t\t\t * when we receive packet and fill in nearby\n+\t\t\t * structure members.\n+\t\t\t */\n+\n+\t\t\tphys_addr = rte_mbuf_data_iova_default(m);\n+\t\t\tEFX_POPULATE_QWORD_1(rxq->rxq_hw_ring[id],\n+\t\t\t    ESF_GZ_RX_BUF_ADDR, phys_addr);\n+\t\t}\n+\n+\t\tadded += RTE_DIM(objs);\n+\t} while (--bulks > 0);\n+\n+\tSFC_ASSERT(rxq->added != added);\n+\trxq->added = added;\n+\tsfc_ef100_rx_qpush(rxq, added);\n+}\n+\n+static bool\n+sfc_ef100_rx_prefix_to_offloads(const efx_oword_t *rx_prefix,\n+\t\t\t\tstruct rte_mbuf *m)\n+{\n+\tconst efx_word_t *class;\n+\tuint64_t ol_flags = 0;\n+\n+\tRTE_BUILD_BUG_ON(EFX_LOW_BIT(ESF_GZ_RX_PREFIX_CLASS) % CHAR_BIT != 0);\n+\tRTE_BUILD_BUG_ON(EFX_WIDTH(ESF_GZ_RX_PREFIX_CLASS) % CHAR_BIT != 0);\n+\tRTE_BUILD_BUG_ON(EFX_WIDTH(ESF_GZ_RX_PREFIX_CLASS) / CHAR_BIT !=\n+\t\t\t sizeof(*class));\n+\tclass = (const efx_word_t *)((const uint8_t *)rx_prefix +\n+\t\tEFX_LOW_BIT(ESF_GZ_RX_PREFIX_CLASS) / CHAR_BIT);\n+\tif (unlikely(EFX_WORD_FIELD(*class,\n+\t\t\t\t    ESF_GZ_RX_PREFIX_HCLASS_L2_STATUS) !=\n+\t\t     ESE_GZ_RH_HCLASS_L2_STATUS_OK))\n+\t\treturn false;\n+\n+\tm->ol_flags = ol_flags;\n+\treturn true;\n+}\n+\n+static const uint8_t *\n+sfc_ef100_rx_pkt_prefix(const struct rte_mbuf *m)\n+{\n+\treturn (const uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;\n+}\n+\n+static struct rte_mbuf *\n+sfc_ef100_rx_next_mbuf(struct sfc_ef100_rxq *rxq)\n+{\n+\tstruct rte_mbuf *m;\n+\tunsigned int id;\n+\n+\t/* mbuf associated with current Rx descriptor */\n+\tm = rxq->sw_ring[rxq->completed++ & rxq->ptr_mask].mbuf;\n+\n+\t/* completed is already moved to the next one */\n+\tif (unlikely(rxq->completed == rxq->added))\n+\t\tgoto done;\n+\n+\t/*\n+\t * Prefetch Rx prefix of the next packet.\n+\t * Current packet is scattered and the next mbuf is its fragment\n+\t * it simply prefetches some data - no harm since packet rate\n+\t * should not be high if scatter is used.\n+\t */\n+\tid = rxq->completed & rxq->ptr_mask;\n+\trte_prefetch0(sfc_ef100_rx_pkt_prefix(rxq->sw_ring[id].mbuf));\n+\n+\tif (unlikely(rxq->completed + 1 == rxq->added))\n+\t\tgoto done;\n+\n+\t/*\n+\t * Prefetch mbuf control structure of the next after next Rx\n+\t * descriptor.\n+\t */\n+\tid = (id == rxq->ptr_mask) ? 0 : (id + 1);\n+\trte_mbuf_prefetch_part1(rxq->sw_ring[id].mbuf);\n+\n+\t/*\n+\t * If the next time we'll need SW Rx descriptor from the next\n+\t * cache line, try to make sure that we have it in cache.\n+\t */\n+\tif ((id & 0x7) == 0x7)\n+\t\trte_prefetch0(&rxq->sw_ring[(id + 1) & rxq->ptr_mask]);\n+\n+done:\n+\treturn m;\n+}\n+\n+static struct rte_mbuf **\n+sfc_ef100_rx_process_ready_pkts(struct sfc_ef100_rxq *rxq,\n+\t\t\t\tstruct rte_mbuf **rx_pkts,\n+\t\t\t\tstruct rte_mbuf ** const rx_pkts_end)\n+{\n+\twhile (rxq->ready_pkts > 0 && rx_pkts != rx_pkts_end) {\n+\t\tstruct rte_mbuf *pkt;\n+\t\tstruct rte_mbuf *lastseg;\n+\t\tconst efx_oword_t *rx_prefix;\n+\t\tuint16_t pkt_len;\n+\t\tuint16_t seg_len;\n+\t\tbool deliver;\n+\n+\t\trxq->ready_pkts--;\n+\n+\t\tpkt = sfc_ef100_rx_next_mbuf(rxq);\n+\t\tMBUF_RAW_ALLOC_CHECK(pkt);\n+\n+\t\tRTE_BUILD_BUG_ON(sizeof(pkt->rearm_data[0]) !=\n+\t\t\t\t sizeof(rxq->rearm_data));\n+\t\tpkt->rearm_data[0] = rxq->rearm_data;\n+\n+\t\t/* data_off already moved past Rx prefix */\n+\t\trx_prefix = (const efx_oword_t *)sfc_ef100_rx_pkt_prefix(pkt);\n+\n+\t\tpkt_len = EFX_OWORD_FIELD(rx_prefix[0],\n+\t\t\t\t\t  ESF_GZ_RX_PREFIX_LENGTH);\n+\t\tSFC_ASSERT(pkt_len > 0);\n+\t\trte_pktmbuf_pkt_len(pkt) = pkt_len;\n+\n+\t\tseg_len = RTE_MIN(pkt_len, rxq->buf_size - rxq->prefix_size);\n+\t\trte_pktmbuf_data_len(pkt) = seg_len;\n+\n+\t\tdeliver = sfc_ef100_rx_prefix_to_offloads(rx_prefix, pkt);\n+\n+\t\tlastseg = pkt;\n+\t\twhile ((pkt_len -= seg_len) > 0) {\n+\t\t\tstruct rte_mbuf *seg;\n+\n+\t\t\tseg = sfc_ef100_rx_next_mbuf(rxq);\n+\t\t\tMBUF_RAW_ALLOC_CHECK(seg);\n+\n+\t\t\tseg->data_off = RTE_PKTMBUF_HEADROOM;\n+\n+\t\t\tseg_len = RTE_MIN(pkt_len, rxq->buf_size);\n+\t\t\trte_pktmbuf_data_len(seg) = seg_len;\n+\t\t\trte_pktmbuf_pkt_len(seg) = seg_len;\n+\n+\t\t\tpkt->nb_segs++;\n+\t\t\tlastseg->next = seg;\n+\t\t\tlastseg = seg;\n+\t\t}\n+\n+\t\tif (likely(deliver))\n+\t\t\t*rx_pkts++ = pkt;\n+\t\telse\n+\t\t\trte_pktmbuf_free(pkt);\n+\t}\n+\n+\treturn rx_pkts;\n+}\n+\n+static bool\n+sfc_ef100_rx_get_event(struct sfc_ef100_rxq *rxq, efx_qword_t *ev)\n+{\n+\t*ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask];\n+\n+\tif (!sfc_ef100_ev_present(ev,\n+\t\t\t(rxq->evq_read_ptr >> rxq->evq_phase_bit_shift) & 1))\n+\t\treturn false;\n+\n+\tif (unlikely(!sfc_ef100_ev_type_is(ev, ESE_GZ_EF100_EV_RX_PKTS))) {\n+\t\t/*\n+\t\t * Do not move read_ptr to keep the event for exception\n+\t\t * handling by the control path.\n+\t\t */\n+\t\trxq->flags |= SFC_EF100_RXQ_EXCEPTION;\n+\t\tsfc_ef100_rx_err(rxq,\n+\t\t\t\"RxQ exception at EvQ ptr %u(%#x), event %08x:%08x\",\n+\t\t\trxq->evq_read_ptr, rxq->evq_read_ptr & rxq->ptr_mask,\n+\t\t\tEFX_QWORD_FIELD(*ev, EFX_DWORD_1),\n+\t\t\tEFX_QWORD_FIELD(*ev, EFX_DWORD_0));\n+\t\treturn false;\n+\t}\n+\n+\tsfc_ef100_rx_debug(rxq, \"RxQ got event %08x:%08x at %u (%#x)\",\n+\t\t\t   EFX_QWORD_FIELD(*ev, EFX_DWORD_1),\n+\t\t\t   EFX_QWORD_FIELD(*ev, EFX_DWORD_0),\n+\t\t\t   rxq->evq_read_ptr,\n+\t\t\t   rxq->evq_read_ptr & rxq->ptr_mask);\n+\n+\trxq->evq_read_ptr++;\n+\treturn true;\n+}\n+\n+static uint16_t\n+sfc_ef100_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(rx_queue);\n+\tstruct rte_mbuf ** const rx_pkts_end = &rx_pkts[nb_pkts];\n+\tefx_qword_t rx_ev;\n+\n+\trx_pkts = sfc_ef100_rx_process_ready_pkts(rxq, rx_pkts, rx_pkts_end);\n+\n+\tif (unlikely(rxq->flags &\n+\t\t     (SFC_EF100_RXQ_NOT_RUNNING | SFC_EF100_RXQ_EXCEPTION)))\n+\t\tgoto done;\n+\n+\twhile (rx_pkts != rx_pkts_end && sfc_ef100_rx_get_event(rxq, &rx_ev)) {\n+\t\trxq->ready_pkts =\n+\t\t\tEFX_QWORD_FIELD(rx_ev, ESF_GZ_EV_RXPKTS_NUM_PKT);\n+\t\trx_pkts = sfc_ef100_rx_process_ready_pkts(rxq, rx_pkts,\n+\t\t\t\t\t\t\t  rx_pkts_end);\n+\t}\n+\n+\t/* It is not a problem if we refill in the case of exception */\n+\tsfc_ef100_rx_qrefill(rxq);\n+\n+done:\n+\treturn nb_pkts - (rx_pkts_end - rx_pkts);\n+}\n+\n+static const uint32_t *\n+sfc_ef100_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)\n+{\n+\tstatic const uint32_t ef100_native_ptypes[] = {\n+\t\tRTE_PTYPE_UNKNOWN\n+\t};\n+\n+\treturn ef100_native_ptypes;\n+}\n+\n+static sfc_dp_rx_qdesc_npending_t sfc_ef100_rx_qdesc_npending;\n+static unsigned int\n+sfc_ef100_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)\n+{\n+\treturn 0;\n+}\n+\n+static sfc_dp_rx_qdesc_status_t sfc_ef100_rx_qdesc_status;\n+static int\n+sfc_ef100_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq,\n+\t\t\t  __rte_unused uint16_t offset)\n+{\n+\treturn -ENOTSUP;\n+}\n+\n+\n+static sfc_dp_rx_get_dev_info_t sfc_ef100_rx_get_dev_info;\n+static void\n+sfc_ef100_rx_get_dev_info(struct rte_eth_dev_info *dev_info)\n+{\n+\t/*\n+\t * Number of descriptors just defines maximum number of pushed\n+\t * descriptors (fill level).\n+\t */\n+\tdev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;\n+\tdev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;\n+}\n+\n+\n+static sfc_dp_rx_qsize_up_rings_t sfc_ef100_rx_qsize_up_rings;\n+static int\n+sfc_ef100_rx_qsize_up_rings(uint16_t nb_rx_desc,\n+\t\t\t   struct sfc_dp_rx_hw_limits *limits,\n+\t\t\t   __rte_unused struct rte_mempool *mb_pool,\n+\t\t\t   unsigned int *rxq_entries,\n+\t\t\t   unsigned int *evq_entries,\n+\t\t\t   unsigned int *rxq_max_fill_level)\n+{\n+\t/*\n+\t * rte_ethdev API guarantees that the number meets min, max and\n+\t * alignment requirements.\n+\t */\n+\tif (nb_rx_desc <= limits->rxq_min_entries)\n+\t\t*rxq_entries = limits->rxq_min_entries;\n+\telse\n+\t\t*rxq_entries = rte_align32pow2(nb_rx_desc);\n+\n+\t*evq_entries = *rxq_entries;\n+\n+\t*rxq_max_fill_level = RTE_MIN(nb_rx_desc,\n+\t\t\t\t      SFC_EF100_RXQ_LIMIT(*evq_entries));\n+\treturn 0;\n+}\n+\n+\n+static uint64_t\n+sfc_ef100_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)\n+{\n+\tstruct rte_mbuf m;\n+\n+\tmemset(&m, 0, sizeof(m));\n+\n+\trte_mbuf_refcnt_set(&m, 1);\n+\tm.data_off = RTE_PKTMBUF_HEADROOM + prefix_size;\n+\tm.nb_segs = 1;\n+\tm.port = port_id;\n+\n+\t/* rearm_data covers structure members filled in above */\n+\trte_compiler_barrier();\n+\tRTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));\n+\treturn m.rearm_data[0];\n+}\n+\n+static sfc_dp_rx_qcreate_t sfc_ef100_rx_qcreate;\n+static int\n+sfc_ef100_rx_qcreate(uint16_t port_id, uint16_t queue_id,\n+\t\t    const struct rte_pci_addr *pci_addr, int socket_id,\n+\t\t    const struct sfc_dp_rx_qcreate_info *info,\n+\t\t    struct sfc_dp_rxq **dp_rxqp)\n+{\n+\tstruct sfc_ef100_rxq *rxq;\n+\tint rc;\n+\n+\trc = EINVAL;\n+\tif (info->rxq_entries != info->evq_entries)\n+\t\tgoto fail_rxq_args;\n+\n+\trc = ENOMEM;\n+\trxq = rte_zmalloc_socket(\"sfc-ef100-rxq\", sizeof(*rxq),\n+\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (rxq == NULL)\n+\t\tgoto fail_rxq_alloc;\n+\n+\tsfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);\n+\n+\trc = ENOMEM;\n+\trxq->sw_ring = rte_calloc_socket(\"sfc-ef100-rxq-sw_ring\",\n+\t\t\t\t\t info->rxq_entries,\n+\t\t\t\t\t sizeof(*rxq->sw_ring),\n+\t\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (rxq->sw_ring == NULL)\n+\t\tgoto fail_desc_alloc;\n+\n+\trxq->flags |= SFC_EF100_RXQ_NOT_RUNNING;\n+\trxq->ptr_mask = info->rxq_entries - 1;\n+\trxq->evq_phase_bit_shift = rte_bsf32(info->evq_entries);\n+\trxq->evq_hw_ring = info->evq_hw_ring;\n+\trxq->max_fill_level = info->max_fill_level;\n+\trxq->refill_threshold = info->refill_threshold;\n+\trxq->rearm_data =\n+\t\tsfc_ef100_mk_mbuf_rearm_data(port_id, info->prefix_size);\n+\trxq->prefix_size = info->prefix_size;\n+\trxq->buf_size = info->buf_size;\n+\trxq->refill_mb_pool = info->refill_mb_pool;\n+\trxq->rxq_hw_ring = info->rxq_hw_ring;\n+\trxq->doorbell = (volatile uint8_t *)info->mem_bar +\n+\t\t\tER_GZ_RX_RING_DOORBELL_OFST +\n+\t\t\t(info->hw_index << info->vi_window_shift);\n+\n+\tsfc_ef100_rx_debug(rxq, \"RxQ doorbell is %p\", rxq->doorbell);\n+\n+\t*dp_rxqp = &rxq->dp;\n+\treturn 0;\n+\n+fail_desc_alloc:\n+\trte_free(rxq);\n+\n+fail_rxq_alloc:\n+fail_rxq_args:\n+\treturn rc;\n+}\n+\n+static sfc_dp_rx_qdestroy_t sfc_ef100_rx_qdestroy;\n+static void\n+sfc_ef100_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)\n+{\n+\tstruct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);\n+\n+\trte_free(rxq->sw_ring);\n+\trte_free(rxq);\n+}\n+\n+static sfc_dp_rx_qstart_t sfc_ef100_rx_qstart;\n+static int\n+sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)\n+{\n+\tstruct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);\n+\n+\tSFC_ASSERT(rxq->completed == 0);\n+\tSFC_ASSERT(rxq->added == 0);\n+\n+\tsfc_ef100_rx_qrefill(rxq);\n+\n+\trxq->evq_read_ptr = evq_read_ptr;\n+\n+\trxq->flags |= SFC_EF100_RXQ_STARTED;\n+\trxq->flags &= ~(SFC_EF100_RXQ_NOT_RUNNING | SFC_EF100_RXQ_EXCEPTION);\n+\n+\treturn 0;\n+}\n+\n+static sfc_dp_rx_qstop_t sfc_ef100_rx_qstop;\n+static void\n+sfc_ef100_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)\n+{\n+\tstruct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);\n+\n+\trxq->flags |= SFC_EF100_RXQ_NOT_RUNNING;\n+\n+\t*evq_read_ptr = rxq->evq_read_ptr;\n+}\n+\n+static sfc_dp_rx_qrx_ev_t sfc_ef100_rx_qrx_ev;\n+static bool\n+sfc_ef100_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)\n+{\n+\t__rte_unused struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);\n+\n+\tSFC_ASSERT(rxq->flags & SFC_EF100_RXQ_NOT_RUNNING);\n+\n+\t/*\n+\t * It is safe to ignore Rx event since we free all mbufs on\n+\t * queue purge anyway.\n+\t */\n+\n+\treturn false;\n+}\n+\n+static sfc_dp_rx_qpurge_t sfc_ef100_rx_qpurge;\n+static void\n+sfc_ef100_rx_qpurge(struct sfc_dp_rxq *dp_rxq)\n+{\n+\tstruct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);\n+\tunsigned int i;\n+\tstruct sfc_ef100_rx_sw_desc *rxd;\n+\n+\tfor (i = rxq->completed; i != rxq->added; ++i) {\n+\t\trxd = &rxq->sw_ring[i & rxq->ptr_mask];\n+\t\trte_mbuf_raw_free(rxd->mbuf);\n+\t\trxd->mbuf = NULL;\n+\t}\n+\n+\trxq->completed = rxq->added = 0;\n+\trxq->ready_pkts = 0;\n+\n+\trxq->flags &= ~SFC_EF100_RXQ_STARTED;\n+}\n+\n+struct sfc_dp_rx sfc_ef100_rx = {\n+\t.dp = {\n+\t\t.name\t\t= SFC_KVARG_DATAPATH_EF100,\n+\t\t.type\t\t= SFC_DP_RX,\n+\t\t.hw_fw_caps\t= SFC_DP_HW_FW_CAP_EF100,\n+\t},\n+\t.features\t\t= SFC_DP_RX_FEAT_MULTI_PROCESS,\n+\t.dev_offload_capa\t= 0,\n+\t.queue_offload_capa\t= DEV_RX_OFFLOAD_SCATTER,\n+\t.get_dev_info\t\t= sfc_ef100_rx_get_dev_info,\n+\t.qsize_up_rings\t\t= sfc_ef100_rx_qsize_up_rings,\n+\t.qcreate\t\t= sfc_ef100_rx_qcreate,\n+\t.qdestroy\t\t= sfc_ef100_rx_qdestroy,\n+\t.qstart\t\t\t= sfc_ef100_rx_qstart,\n+\t.qstop\t\t\t= sfc_ef100_rx_qstop,\n+\t.qrx_ev\t\t\t= sfc_ef100_rx_qrx_ev,\n+\t.qpurge\t\t\t= sfc_ef100_rx_qpurge,\n+\t.supported_ptypes_get\t= sfc_ef100_supported_ptypes_get,\n+\t.qdesc_npending\t\t= sfc_ef100_rx_qdesc_npending,\n+\t.qdesc_status\t\t= sfc_ef100_rx_qdesc_status,\n+\t.pkt_burst\t\t= sfc_ef100_recv_pkts,\n+};\ndiff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c\nindex ae668face0..e1db9236e9 100644\n--- a/drivers/net/sfc/sfc_ethdev.c\n+++ b/drivers/net/sfc/sfc_ethdev.c\n@@ -2151,6 +2151,7 @@ sfc_register_dp(void)\n \t/* Register once */\n \tif (TAILQ_EMPTY(&sfc_dp_head)) {\n \t\t/* Prefer EF10 datapath */\n+\t\tsfc_dp_register(&sfc_dp_head, &sfc_ef100_rx.dp);\n \t\tsfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);\n \t\tsfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);\n \t\tsfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);\ndiff --git a/drivers/net/sfc/sfc_kvargs.h b/drivers/net/sfc/sfc_kvargs.h\nindex f9d10e71cf..cc3f4a353e 100644\n--- a/drivers/net/sfc/sfc_kvargs.h\n+++ b/drivers/net/sfc/sfc_kvargs.h\n@@ -34,12 +34,14 @@ extern \"C\" {\n #define SFC_KVARG_DATAPATH_EF10\t\t\"ef10\"\n #define SFC_KVARG_DATAPATH_EF10_SIMPLE\t\"ef10_simple\"\n #define SFC_KVARG_DATAPATH_EF10_ESSB\t\"ef10_essb\"\n+#define SFC_KVARG_DATAPATH_EF100\t\"ef100\"\n \n #define SFC_KVARG_RX_DATAPATH\t\t\"rx_datapath\"\n #define SFC_KVARG_VALUES_RX_DATAPATH \\\n \t\"[\" SFC_KVARG_DATAPATH_EFX \"|\" \\\n \t    SFC_KVARG_DATAPATH_EF10 \"|\" \\\n-\t    SFC_KVARG_DATAPATH_EF10_ESSB \"]\"\n+\t    SFC_KVARG_DATAPATH_EF10_ESSB \"|\" \\\n+\t    SFC_KVARG_DATAPATH_EF100 \"]\"\n \n #define SFC_KVARG_TX_DATAPATH\t\t\"tx_datapath\"\n #define SFC_KVARG_VALUES_TX_DATAPATH \\\n",
    "prefixes": [
        "16/36"
    ]
}