get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/11505/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 11505,
    "url": "http://patches.dpdk.org/api/patches/11505/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1457982741-11047-5-git-send-email-jan@semihalf.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1457982741-11047-5-git-send-email-jan@semihalf.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1457982741-11047-5-git-send-email-jan@semihalf.com",
    "date": "2016-03-14T19:12:21",
    "name": "[dpdk-dev,v6,4/4] ena: DPDK polling-mode driver for Amazon Elastic Network Adapters (ENA)",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "413ffe771ec8af04308889b0cfa2a766e720376e",
    "submitter": {
        "id": 421,
        "url": "http://patches.dpdk.org/api/people/421/?format=api",
        "name": "Jan Medala",
        "email": "jan@semihalf.com"
    },
    "delegate": {
        "id": 10,
        "url": "http://patches.dpdk.org/api/users/10/?format=api",
        "username": "bruce",
        "first_name": "Bruce",
        "last_name": "Richardson",
        "email": "bruce.richardson@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1457982741-11047-5-git-send-email-jan@semihalf.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/11505/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/11505/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id D97CB5595;\n\tMon, 14 Mar 2016 20:13:17 +0100 (CET)",
            "from mail-wm0-f49.google.com (mail-wm0-f49.google.com\n\t[74.125.82.49]) by dpdk.org (Postfix) with ESMTP id 478BD5583\n\tfor <dev@dpdk.org>; Mon, 14 Mar 2016 20:13:14 +0100 (CET)",
            "by mail-wm0-f49.google.com with SMTP id p65so116096270wmp.0\n\tfor <dev@dpdk.org>; Mon, 14 Mar 2016 12:13:14 -0700 (PDT)",
            "from anpa-dpdk-2.lab.semihalf.com (cardhu.semihalf.com.\n\t[213.17.239.108]) by smtp.gmail.com with ESMTPSA id\n\tq139sm17489094wmd.2.2016.03.14.12.13.12\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128);\n\tMon, 14 Mar 2016 12:13:13 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=semihalf-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=YKkxQajgiySp6kv17Q1gDFauk8j7SWJ6ZG1cG6hZD2w=;\n\tb=R/Hb+s8IAlB7+HXxgfyG4n4wrJIglJumzpIxsqhJBHrs6ICe7n4aEw1HlfoQYgFcPJ\n\tUtg1thGtaCSxyKB66iyEFDNnBLTNO+h2/B/r3NY/h8Zmf9nlTy8efYAvDnW5qtV7YVTT\n\tiNByVjoJAJjAnBdek48S/1Mr/qeQ1c3p+UsCLtFk0cCflDx3WIL6Zdqc3HaMRgJtV0P1\n\twVEsydVeiQztzTePRRopEpk2nUe+SfmEWmNfaSHgS0nmQxlUeymTGGvz/A/8kYgf5ddt\n\trO8Kkos9Hc3AJD7xeHGvygT3OMu2CZyIcGB5GyFKOpgL8UU9DnxreEKNmU1ylnbD9GrO\n\tomcg==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20130820;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=YKkxQajgiySp6kv17Q1gDFauk8j7SWJ6ZG1cG6hZD2w=;\n\tb=az3mdwWJIWN0cBtXmB8IrH2gtXNkwJNNA0qIfrvUAbDwNq8M5lrzk4gLepaFcovG+6\n\tX5ho08eGeZ3mfizFwkYvbiRXizv81wvbUMIaAPFe9I9E5EUbWtA8p/IRN2/OGz5YRdoo\n\tNFJKVjZn/cc0B/4EDP4zlzhppPf3fH6PDLa136VzCZkI7SNBvLsw4VEnAMD6gCzIb0N7\n\tNP691x3FC+EVHENgyGSdZagtvDct0MZvpOPHHgAIIev2MhOHFe9v9o0RJ7kUUa4/qscP\n\tsm1cNAaCw+jONuLDlWY8ksemaUOsZwT+GBPzj0UQMLe00DCfMgExPtt7jUoZXKMINMXR\n\t+Ltw==",
        "X-Gm-Message-State": "AD7BkJKXbK6UVfg5xbD8Jqg7pOq7dKPHfswv7K3JdilTdKeg/9Eh/cNj353x1q6EeWqvNw==",
        "X-Received": "by 10.28.172.194 with SMTP id v185mr18910846wme.21.1457982794103;\n\tMon, 14 Mar 2016 12:13:14 -0700 (PDT)",
        "From": "Jan Medala <jan@semihalf.com>",
        "To": "dev@dpdk.org",
        "Cc": "matua@amazon.com, Jan Medala <jan@semihalf.com>,\n\tEvgeny Schemeilin <evgenys@amazon.com>, Jakub Palider <jpa@semihalf.com>",
        "Date": "Mon, 14 Mar 2016 20:12:21 +0100",
        "Message-Id": "<1457982741-11047-5-git-send-email-jan@semihalf.com>",
        "X-Mailer": "git-send-email 1.9.1",
        "In-Reply-To": "<1457982741-11047-1-git-send-email-jan@semihalf.com>",
        "References": "<1457982741-11047-1-git-send-email-jan@semihalf.com>",
        "Subject": "[dpdk-dev] [PATCH v6 4/4] ena: DPDK polling-mode driver for Amazon\n\tElastic Network Adapters (ENA)",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This is a PMD for the Amazon ethernet ENA family.\nThe driver operates variety of ENA adapters through feature negotiation\nwith the adapter and upgradable commands set.\nENA driver handles PCI Physical and Virtual ENA functions.\n\nSigned-off-by: Evgeny Schemeilin <evgenys@amazon.com>\nSigned-off-by: Jan Medala <jan@semihalf.com>\nSigned-off-by: Jakub Palider <jpa@semihalf.com>\n---\n config/common_base                      |   11 +\n drivers/net/Makefile                    |    1 +\n drivers/net/ena/Makefile                |   61 ++\n drivers/net/ena/ena_ethdev.c            | 1445 +++++++++++++++++++++++++++++++\n drivers/net/ena/ena_ethdev.h            |  160 ++++\n drivers/net/ena/ena_logs.h              |   74 ++\n drivers/net/ena/ena_platform.h          |   59 ++\n drivers/net/ena/rte_pmd_ena_version.map |    4 +\n mk/rte.app.mk                           |    1 +\n 9 files changed, 1816 insertions(+)\n create mode 100644 drivers/net/ena/Makefile\n create mode 100644 drivers/net/ena/ena_ethdev.c\n create mode 100644 drivers/net/ena/ena_ethdev.h\n create mode 100644 drivers/net/ena/ena_logs.h\n create mode 100644 drivers/net/ena/ena_platform.h\n create mode 100644 drivers/net/ena/rte_pmd_ena_version.map",
    "diff": "diff --git a/config/common_base b/config/common_base\nindex 52bd34f..472a9e9 100644\n--- a/config/common_base\n+++ b/config/common_base\n@@ -135,6 +135,17 @@ CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y\n CONFIG_RTE_NIC_BYPASS=n\n \n #\n+# Compile burst-oriented Amazon ENA PMD driver\n+#\n+CONFIG_RTE_LIBRTE_ENA_PMD=y\n+CONFIG_RTE_LIBRTE_ENA_DEBUG_INIT=y\n+CONFIG_RTE_LIBRTE_ENA_DEBUG_RX=n\n+CONFIG_RTE_LIBRTE_ENA_DEBUG_TX=n\n+CONFIG_RTE_LIBRTE_ENA_DEBUG_TX_FREE=n\n+CONFIG_RTE_LIBRTE_ENA_DEBUG_DRIVER=n\n+CONFIG_RTE_LIBRTE_ENA_COM_DEBUG=n\n+\n+#\n # Compile burst-oriented IGB & EM PMD drivers\n #\n CONFIG_RTE_LIBRTE_EM_PMD=y\ndiff --git a/drivers/net/Makefile b/drivers/net/Makefile\nindex 0c3393f..612e85e 100644\n--- a/drivers/net/Makefile\n+++ b/drivers/net/Makefile\n@@ -36,6 +36,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x\n DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding\n DIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe\n DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000\n+DIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena\n DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic\n DIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k\n DIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e\ndiff --git a/drivers/net/ena/Makefile b/drivers/net/ena/Makefile\nnew file mode 100644\nindex 0000000..ac2b55d\n--- /dev/null\n+++ b/drivers/net/ena/Makefile\n@@ -0,0 +1,61 @@\n+#\n+# BSD LICENSE\n+#\n+# Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.\n+# All rights reserved.\n+#\n+# Redistribution and use in source and binary forms, with or without\n+# modification, are permitted provided that the following conditions\n+# are met:\n+#\n+# * Redistributions of source code must retain the above copyright\n+# notice, this list of conditions and the following disclaimer.\n+# * Redistributions in binary form must reproduce the above copyright\n+# notice, this list of conditions and the following disclaimer in\n+# the documentation and/or other materials provided with the\n+# distribution.\n+# * Neither the name of copyright holder nor the names of its\n+# contributors may be used to endorse or promote products derived\n+# from this software without specific prior written permission.\n+#\n+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+#\n+\n+include $(RTE_SDK)/mk/rte.vars.mk\n+\n+#\n+# library name\n+#\n+LIB = librte_pmd_ena.a\n+CFLAGS += $(WERROR_FLAGS) -O2\n+INCLUDES :=-I$(SRCDIR) -I$(SRCDIR)/base/ena_defs -I$(SRCDIR)/base\n+\n+EXPORT_MAP := rte_pmd_ena_version.map\n+LIBABIVER := 1\n+\n+VPATH += $(SRCDIR)/base\n+#\n+# all source are stored in SRCS-y\n+#\n+SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_ethdev.c\n+SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_com.c\n+SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_eth_com.c\n+\n+# this lib depends upon:\n+DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_eal lib/librte_ether\n+DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_mempool lib/librte_mbuf\n+DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_net lib/librte_malloc\n+\n+CFLAGS += $(INCLUDES)\n+\n+include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c\nnew file mode 100644\nindex 0000000..6e04957\n--- /dev/null\n+++ b/drivers/net/ena/ena_ethdev.c\n@@ -0,0 +1,1445 @@\n+/*-\n+* BSD LICENSE\n+*\n+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.\n+* All rights reserved.\n+*\n+* Redistribution and use in source and binary forms, with or without\n+* modification, are permitted provided that the following conditions\n+* are met:\n+*\n+* * Redistributions of source code must retain the above copyright\n+* notice, this list of conditions and the following disclaimer.\n+* * Redistributions in binary form must reproduce the above copyright\n+* notice, this list of conditions and the following disclaimer in\n+* the documentation and/or other materials provided with the\n+* distribution.\n+* * Neither the name of copyright holder nor the names of its\n+* contributors may be used to endorse or promote products derived\n+* from this software without specific prior written permission.\n+*\n+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+* \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+*/\n+\n+#include <rte_ether.h>\n+#include <rte_ethdev.h>\n+#include <rte_tcp.h>\n+#include <rte_atomic.h>\n+#include <rte_dev.h>\n+#include <rte_errno.h>\n+\n+#include \"ena_ethdev.h\"\n+#include \"ena_logs.h\"\n+#include \"ena_platform.h\"\n+#include \"ena_com.h\"\n+#include \"ena_eth_com.h\"\n+\n+#include <ena_common_defs.h>\n+#include <ena_regs_defs.h>\n+#include <ena_admin_defs.h>\n+#include <ena_eth_io_defs.h>\n+\n+#define ENA_IO_TXQ_IDX(q)\t(2 * (q))\n+#define ENA_IO_RXQ_IDX(q)\t(2 * (q) + 1)\n+/*reverse version of ENA_IO_RXQ_IDX*/\n+#define ENA_IO_RXQ_IDX_REV(q)\t((q - 1) / 2)\n+\n+/* While processing submitted and completed descriptors (rx and tx path\n+ * respectively) in a loop it is desired to:\n+ *  - perform batch submissions while populating sumbissmion queue\n+ *  - avoid blocking transmission of other packets during cleanup phase\n+ * Hence the utilization ratio of 1/8 of a queue size.\n+ */\n+#define ENA_RING_DESCS_RATIO(ring_size)\t(ring_size / 8)\n+\n+#define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)\n+#define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift))\n+\n+#define GET_L4_HDR_LEN(mbuf)\t\t\t\t\t\\\n+\t((rte_pktmbuf_mtod_offset(mbuf,\tstruct tcp_hdr *,\t\\\n+\t\tmbuf->l3_len + mbuf->l2_len)->data_off) >> 4)\n+\n+#define ENA_RX_RSS_TABLE_LOG_SIZE  7\n+#define ENA_RX_RSS_TABLE_SIZE\t(1 << ENA_RX_RSS_TABLE_LOG_SIZE)\n+#define ENA_HASH_KEY_SIZE\t40\n+\n+/** Vendor ID used by Amazon devices */\n+#define PCI_VENDOR_ID_AMAZON 0x1D0F\n+/** Amazon devices */\n+#define PCI_DEVICE_ID_ENA_VF\t0xEC20\n+#define PCI_DEVICE_ID_ENA_LLQ_VF\t0xEC21\n+\n+static struct rte_pci_id pci_id_ena_map[] = {\n+#define RTE_PCI_DEV_ID_DECL_ENA(vend, dev) {RTE_PCI_DEVICE(vend, dev)},\n+\n+\tRTE_PCI_DEV_ID_DECL_ENA(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF)\n+\tRTE_PCI_DEV_ID_DECL_ENA(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF)\n+\t{.device_id = 0},\n+};\n+\n+static int ena_device_init(struct ena_com_dev *ena_dev,\n+\t\t\t   struct ena_com_dev_get_features_ctx *get_feat_ctx);\n+static int ena_dev_configure(struct rte_eth_dev *dev);\n+static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t\t\t  uint16_t nb_pkts);\n+static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t      uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t      const struct rte_eth_txconf *tx_conf);\n+static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t      uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t      const struct rte_eth_rxconf *rx_conf,\n+\t\t\t      struct rte_mempool *mp);\n+static uint16_t eth_ena_recv_pkts(void *rx_queue,\n+\t\t\t\t  struct rte_mbuf **rx_pkts, uint16_t nb_pkts);\n+static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);\n+static void ena_init_rings(struct ena_adapter *adapter);\n+static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);\n+static int ena_start(struct rte_eth_dev *dev);\n+static void ena_close(struct rte_eth_dev *dev);\n+static void ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);\n+static void ena_rx_queue_release_all(struct rte_eth_dev *dev);\n+static void ena_tx_queue_release_all(struct rte_eth_dev *dev);\n+static void ena_rx_queue_release(void *queue);\n+static void ena_tx_queue_release(void *queue);\n+static void ena_rx_queue_release_bufs(struct ena_ring *ring);\n+static void ena_tx_queue_release_bufs(struct ena_ring *ring);\n+static int ena_link_update(struct rte_eth_dev *dev,\n+\t\t\t   __rte_unused int wait_to_complete);\n+static int ena_queue_restart(struct ena_ring *ring);\n+static int ena_queue_restart_all(struct rte_eth_dev *dev,\n+\t\t\t\t enum ena_ring_type ring_type);\n+static void ena_stats_restart(struct rte_eth_dev *dev);\n+static void ena_infos_get(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t  struct rte_eth_dev_info *dev_info);\n+static int ena_rss_reta_update(struct rte_eth_dev *dev,\n+\t\t\t       struct rte_eth_rss_reta_entry64 *reta_conf,\n+\t\t\t       uint16_t reta_size);\n+static int ena_rss_reta_query(struct rte_eth_dev *dev,\n+\t\t\t      struct rte_eth_rss_reta_entry64 *reta_conf,\n+\t\t\t      uint16_t reta_size);\n+\n+static struct eth_dev_ops ena_dev_ops = {\n+\t.dev_configure        = ena_dev_configure,\n+\t.dev_infos_get        = ena_infos_get,\n+\t.rx_queue_setup       = ena_rx_queue_setup,\n+\t.tx_queue_setup       = ena_tx_queue_setup,\n+\t.dev_start            = ena_start,\n+\t.link_update          = ena_link_update,\n+\t.stats_get            = ena_stats_get,\n+\t.mtu_set              = ena_mtu_set,\n+\t.rx_queue_release     = ena_rx_queue_release,\n+\t.tx_queue_release     = ena_tx_queue_release,\n+\t.dev_close            = ena_close,\n+\t.reta_update          = ena_rss_reta_update,\n+\t.reta_query           = ena_rss_reta_query,\n+};\n+\n+static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,\n+\t\t\t\t       struct ena_com_rx_ctx *ena_rx_ctx)\n+{\n+\tuint64_t ol_flags = 0;\n+\n+\tif (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)\n+\t\tol_flags |= PKT_TX_TCP_CKSUM;\n+\telse if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)\n+\t\tol_flags |= PKT_TX_UDP_CKSUM;\n+\n+\tif (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4)\n+\t\tol_flags |= PKT_TX_IPV4;\n+\telse if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6)\n+\t\tol_flags |= PKT_TX_IPV6;\n+\n+\tif (unlikely(ena_rx_ctx->l4_csum_err))\n+\t\tol_flags |= PKT_RX_L4_CKSUM_BAD;\n+\tif (unlikely(ena_rx_ctx->l3_csum_err))\n+\t\tol_flags |= PKT_RX_IP_CKSUM_BAD;\n+\n+\tmbuf->ol_flags = ol_flags;\n+}\n+\n+static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,\n+\t\t\t\t       struct ena_com_tx_ctx *ena_tx_ctx)\n+{\n+\tstruct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;\n+\n+\tif (mbuf->ol_flags &\n+\t    (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) {\n+\t\t/* check if TSO is required */\n+\t\tif (mbuf->ol_flags & PKT_TX_TCP_SEG) {\n+\t\t\tena_tx_ctx->tso_enable = true;\n+\n+\t\t\tena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);\n+\t\t}\n+\n+\t\t/* check if L3 checksum is needed */\n+\t\tif (mbuf->ol_flags & PKT_TX_IP_CKSUM)\n+\t\t\tena_tx_ctx->l3_csum_enable = true;\n+\n+\t\tif (mbuf->ol_flags & PKT_TX_IPV6) {\n+\t\t\tena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;\n+\t\t} else {\n+\t\t\tena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;\n+\n+\t\t\t/* set don't fragment (DF) flag */\n+\t\t\tif (mbuf->packet_type &\n+\t\t\t\t(RTE_PTYPE_L4_NONFRAG\n+\t\t\t\t | RTE_PTYPE_INNER_L4_NONFRAG))\n+\t\t\t\tena_tx_ctx->df = true;\n+\t\t}\n+\n+\t\t/* check if L4 checksum is needed */\n+\t\tswitch (mbuf->ol_flags & PKT_TX_L4_MASK) {\n+\t\tcase PKT_TX_TCP_CKSUM:\n+\t\t\tena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;\n+\t\t\tena_tx_ctx->l4_csum_enable = true;\n+\t\t\tbreak;\n+\t\tcase PKT_TX_UDP_CKSUM:\n+\t\t\tena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;\n+\t\t\tena_tx_ctx->l4_csum_enable = true;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;\n+\t\t\tena_tx_ctx->l4_csum_enable = false;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tena_meta->mss = mbuf->tso_segsz;\n+\t\tena_meta->l3_hdr_len = mbuf->l3_len;\n+\t\tena_meta->l3_hdr_offset = mbuf->l2_len;\n+\t\t/* this param needed only for TSO */\n+\t\tena_meta->l3_outer_hdr_len = 0;\n+\t\tena_meta->l3_outer_hdr_offset = 0;\n+\n+\t\tena_tx_ctx->meta_valid = true;\n+\t} else {\n+\t\tena_tx_ctx->meta_valid = false;\n+\t}\n+}\n+\n+static void ena_close(struct rte_eth_dev *dev)\n+{\n+\tstruct ena_adapter *adapter =\n+\t\t(struct ena_adapter *)(dev->data->dev_private);\n+\n+\tadapter->state = ENA_ADAPTER_STATE_STOPPED;\n+\n+\tena_rx_queue_release_all(dev);\n+\tena_tx_queue_release_all(dev);\n+}\n+\n+static int ena_rss_reta_update(struct rte_eth_dev *dev,\n+\t\t\t       struct rte_eth_rss_reta_entry64 *reta_conf,\n+\t\t\t       uint16_t reta_size)\n+{\n+\tstruct ena_adapter *adapter =\n+\t\t(struct ena_adapter *)(dev->data->dev_private);\n+\tstruct ena_com_dev *ena_dev = &adapter->ena_dev;\n+\tint ret, i;\n+\tu16 entry_value;\n+\tint conf_idx;\n+\tint idx;\n+\n+\tif ((reta_size == 0) || (reta_conf == NULL))\n+\t\treturn -EINVAL;\n+\n+\tif (reta_size > ENA_RX_RSS_TABLE_SIZE) {\n+\t\tRTE_LOG(WARNING, PMD,\n+\t\t\t\"indirection table %d is bigger than supported (%d)\\n\",\n+\t\t\treta_size, ENA_RX_RSS_TABLE_SIZE);\n+\t\tret = -EINVAL;\n+\t\tgoto err;\n+\t}\n+\n+\tfor (i = 0 ; i < reta_size ; i++) {\n+\t\t/* each reta_conf is for 64 entries.\n+\t\t * to support 128 we use 2 conf of 64\n+\t\t */\n+\t\tconf_idx = i / RTE_RETA_GROUP_SIZE;\n+\t\tidx = i % RTE_RETA_GROUP_SIZE;\n+\t\tif (TEST_BIT(reta_conf[conf_idx].mask, idx)) {\n+\t\t\tentry_value =\n+\t\t\t\tENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);\n+\t\t\tret = ena_com_indirect_table_fill_entry(ena_dev,\n+\t\t\t\t\t\t\t\ti,\n+\t\t\t\t\t\t\t\tentry_value);\n+\t\t\tif (unlikely(ret && (ret != ENA_COM_PERMISSION))) {\n+\t\t\t\tRTE_LOG(ERR, PMD,\n+\t\t\t\t\t\"Cannot fill indirect table\\n\");\n+\t\t\t\tret = -ENOTSUP;\n+\t\t\t\tgoto err;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tret = ena_com_indirect_table_set(ena_dev);\n+\tif (unlikely(ret && (ret != ENA_COM_PERMISSION))) {\n+\t\tRTE_LOG(ERR, PMD, \"Cannot flush the indirect table\\n\");\n+\t\tret = -ENOTSUP;\n+\t\tgoto err;\n+\t}\n+\n+\tRTE_LOG(DEBUG, PMD, \"%s(): RSS configured %d entries  for port %d\\n\",\n+\t\t__func__, reta_size, adapter->rte_dev->data->port_id);\n+err:\n+\treturn ret;\n+}\n+\n+/* Query redirection table. */\n+static int ena_rss_reta_query(struct rte_eth_dev *dev,\n+\t\t\t      struct rte_eth_rss_reta_entry64 *reta_conf,\n+\t\t\t      uint16_t reta_size)\n+{\n+\tstruct ena_adapter *adapter =\n+\t\t(struct ena_adapter *)(dev->data->dev_private);\n+\tstruct ena_com_dev *ena_dev = &adapter->ena_dev;\n+\tint ret;\n+\tint i;\n+\tu32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0};\n+\tint reta_conf_idx;\n+\tint reta_idx;\n+\n+\tif (reta_size == 0 || reta_conf == NULL ||\n+\t    (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))\n+\t\treturn -EINVAL;\n+\n+\tret = ena_com_indirect_table_get(ena_dev, indirect_table);\n+\tif (unlikely(ret && (ret != ENA_COM_PERMISSION))) {\n+\t\tRTE_LOG(ERR, PMD, \"cannot get indirect table\\n\");\n+\t\tret = -ENOTSUP;\n+\t\tgoto err;\n+\t}\n+\n+\tfor (i = 0 ; i < reta_size ; i++) {\n+\t\treta_conf_idx = i / RTE_RETA_GROUP_SIZE;\n+\t\treta_idx = i % RTE_RETA_GROUP_SIZE;\n+\t\tif (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))\n+\t\t\treta_conf[reta_conf_idx].reta[reta_idx] =\n+\t\t\t\tENA_IO_RXQ_IDX_REV(indirect_table[i]);\n+\t}\n+err:\n+\treturn ret;\n+}\n+\n+static int ena_rss_init_default(struct ena_adapter *adapter)\n+{\n+\tstruct ena_com_dev *ena_dev = &adapter->ena_dev;\n+\tuint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues;\n+\tint rc, i;\n+\tu32 val;\n+\n+\trc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);\n+\tif (unlikely(rc)) {\n+\t\tRTE_LOG(ERR, PMD, \"Cannot init indirect table\\n\");\n+\t\tgoto err_rss_init;\n+\t}\n+\n+\tfor (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {\n+\t\tval = i % nb_rx_queues;\n+\t\trc = ena_com_indirect_table_fill_entry(ena_dev, i,\n+\t\t\t\t\t\t       ENA_IO_RXQ_IDX(val));\n+\t\tif (unlikely(rc && (rc != ENA_COM_PERMISSION))) {\n+\t\t\tRTE_LOG(ERR, PMD, \"Cannot fill indirect table\\n\");\n+\t\t\tgoto err_fill_indir;\n+\t\t}\n+\t}\n+\n+\trc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,\n+\t\t\t\t\tENA_HASH_KEY_SIZE, 0xFFFFFFFF);\n+\tif (unlikely(rc && (rc != ENA_COM_PERMISSION))) {\n+\t\tRTE_LOG(INFO, PMD, \"Cannot fill hash function\\n\");\n+\t\tgoto err_fill_indir;\n+\t}\n+\n+\trc = ena_com_set_default_hash_ctrl(ena_dev);\n+\tif (unlikely(rc && (rc != ENA_COM_PERMISSION))) {\n+\t\tRTE_LOG(INFO, PMD, \"Cannot fill hash control\\n\");\n+\t\tgoto err_fill_indir;\n+\t}\n+\n+\trc = ena_com_indirect_table_set(ena_dev);\n+\tif (unlikely(rc && (rc != ENA_COM_PERMISSION))) {\n+\t\tRTE_LOG(ERR, PMD, \"Cannot flush the indirect table\\n\");\n+\t\tgoto err_fill_indir;\n+\t}\n+\tRTE_LOG(DEBUG, PMD, \"RSS configured for port %d\\n\",\n+\t\tadapter->rte_dev->data->port_id);\n+\n+\treturn 0;\n+\n+err_fill_indir:\n+\tena_com_rss_destroy(ena_dev);\n+err_rss_init:\n+\n+\treturn rc;\n+}\n+\n+static void ena_rx_queue_release_all(struct rte_eth_dev *dev)\n+{\n+\tstruct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues;\n+\tint nb_queues = dev->data->nb_rx_queues;\n+\tint i;\n+\n+\tfor (i = 0; i < nb_queues; i++)\n+\t\tena_rx_queue_release(queues[i]);\n+}\n+\n+static void ena_tx_queue_release_all(struct rte_eth_dev *dev)\n+{\n+\tstruct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues;\n+\tint nb_queues = dev->data->nb_tx_queues;\n+\tint i;\n+\n+\tfor (i = 0; i < nb_queues; i++)\n+\t\tena_tx_queue_release(queues[i]);\n+}\n+\n+static void ena_rx_queue_release(void *queue)\n+{\n+\tstruct ena_ring *ring = (struct ena_ring *)queue;\n+\tstruct ena_adapter *adapter = ring->adapter;\n+\tint ena_qid;\n+\n+\tena_assert_msg(ring->configured,\n+\t\t       \"API violation - releasing not configured queue\");\n+\tena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING,\n+\t\t       \"API violation\");\n+\n+\t/* Destroy HW queue */\n+\tena_qid = ENA_IO_RXQ_IDX(ring->id);\n+\tena_com_destroy_io_queue(&adapter->ena_dev, ena_qid);\n+\n+\t/* Free all bufs */\n+\tena_rx_queue_release_bufs(ring);\n+\n+\t/* Free ring resources */\n+\tif (ring->rx_buffer_info)\n+\t\trte_free(ring->rx_buffer_info);\n+\tring->rx_buffer_info = NULL;\n+\n+\tring->configured = 0;\n+\n+\tRTE_LOG(NOTICE, PMD, \"RX Queue %d:%d released\\n\",\n+\t\tring->port_id, ring->id);\n+}\n+\n+static void ena_tx_queue_release(void *queue)\n+{\n+\tstruct ena_ring *ring = (struct ena_ring *)queue;\n+\tstruct ena_adapter *adapter = ring->adapter;\n+\tint ena_qid;\n+\n+\tena_assert_msg(ring->configured,\n+\t\t       \"API violation. Releasing not configured queue\");\n+\tena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING,\n+\t\t       \"API violation\");\n+\n+\t/* Destroy HW queue */\n+\tena_qid = ENA_IO_TXQ_IDX(ring->id);\n+\tena_com_destroy_io_queue(&adapter->ena_dev, ena_qid);\n+\n+\t/* Free all bufs */\n+\tena_tx_queue_release_bufs(ring);\n+\n+\t/* Free ring resources */\n+\tif (ring->tx_buffer_info)\n+\t\trte_free(ring->tx_buffer_info);\n+\n+\tif (ring->empty_tx_reqs)\n+\t\trte_free(ring->empty_tx_reqs);\n+\n+\tring->empty_tx_reqs = NULL;\n+\tring->tx_buffer_info = NULL;\n+\n+\tring->configured = 0;\n+\n+\tRTE_LOG(NOTICE, PMD, \"TX Queue %d:%d released\\n\",\n+\t\tring->port_id, ring->id);\n+}\n+\n+static void ena_rx_queue_release_bufs(struct ena_ring *ring)\n+{\n+\tunsigned int ring_mask = ring->ring_size - 1;\n+\n+\twhile (ring->next_to_clean != ring->next_to_use) {\n+\t\tstruct rte_mbuf *m =\n+\t\t\tring->rx_buffer_info[ring->next_to_clean & ring_mask];\n+\n+\t\tif (m)\n+\t\t\t__rte_mbuf_raw_free(m);\n+\n+\t\tring->next_to_clean =\n+\t\t\tENA_CIRC_INC(ring->next_to_clean, 1, ring->ring_size);\n+\t}\n+}\n+\n+static void ena_tx_queue_release_bufs(struct ena_ring *ring)\n+{\n+\tunsigned int ring_mask = ring->ring_size - 1;\n+\n+\twhile (ring->next_to_clean != ring->next_to_use) {\n+\t\tstruct ena_tx_buffer *tx_buf =\n+\t\t\t&ring->tx_buffer_info[ring->next_to_clean & ring_mask];\n+\n+\t\tif (tx_buf->mbuf)\n+\t\t\trte_pktmbuf_free(tx_buf->mbuf);\n+\n+\t\tring->next_to_clean =\n+\t\t\tENA_CIRC_INC(ring->next_to_clean, 1, ring->ring_size);\n+\t}\n+}\n+\n+static int ena_link_update(struct rte_eth_dev *dev,\n+\t\t\t   __rte_unused int wait_to_complete)\n+{\n+\tstruct rte_eth_link *link = &dev->data->dev_link;\n+\n+\tlink->link_status = 1;\n+\tlink->link_speed = ETH_LINK_SPEED_10G;\n+\tlink->link_duplex = ETH_LINK_FULL_DUPLEX;\n+\n+\treturn 0;\n+}\n+\n+static int ena_queue_restart_all(struct rte_eth_dev *dev,\n+\t\t\t\t enum ena_ring_type ring_type)\n+{\n+\tstruct ena_adapter *adapter =\n+\t\t(struct ena_adapter *)(dev->data->dev_private);\n+\tstruct ena_ring *queues = NULL;\n+\tint i = 0;\n+\tint rc = 0;\n+\n+\tqueues = (ring_type == ENA_RING_TYPE_RX) ?\n+\t\tadapter->rx_ring : adapter->tx_ring;\n+\n+\tfor (i = 0; i < adapter->num_queues; i++) {\n+\t\tif (queues[i].configured) {\n+\t\t\tif (ring_type == ENA_RING_TYPE_RX) {\n+\t\t\t\tena_assert_msg(\n+\t\t\t\t\tdev->data->rx_queues[i] == &queues[i],\n+\t\t\t\t\t\"Inconsistent state of rx queues\\n\");\n+\t\t\t} else {\n+\t\t\t\tena_assert_msg(\n+\t\t\t\t\tdev->data->tx_queues[i] == &queues[i],\n+\t\t\t\t\t\"Inconsistent state of tx queues\\n\");\n+\t\t\t}\n+\n+\t\t\trc = ena_queue_restart(&queues[i]);\n+\n+\t\t\tif (rc) {\n+\t\t\t\tPMD_INIT_LOG(ERR,\n+\t\t\t\t\t     \"failed to restart queue %d type(%d)\\n\",\n+\t\t\t\t\t     i, ring_type);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)\n+{\n+\tuint32_t max_frame_len = adapter->max_mtu;\n+\n+\tif (adapter->rte_eth_dev_data->dev_conf.rxmode.jumbo_frame == 1)\n+\t\tmax_frame_len =\n+\t\t\tadapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len;\n+\n+\treturn max_frame_len;\n+}\n+\n+static int ena_check_valid_conf(struct ena_adapter *adapter)\n+{\n+\tuint32_t max_frame_len = ena_get_mtu_conf(adapter);\n+\n+\tif (max_frame_len > adapter->max_mtu) {\n+\t\tPMD_INIT_LOG(ERR, \"Unsupported MTU of %d\\n\", max_frame_len);\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+ena_calc_queue_size(struct ena_com_dev *ena_dev,\n+\t\t    struct ena_com_dev_get_features_ctx *get_feat_ctx)\n+{\n+\tuint32_t queue_size = ENA_DEFAULT_RING_SIZE;\n+\n+\tqueue_size = min_t(uint32_t, queue_size,\n+\t\t\t   get_feat_ctx->max_queues.max_cq_depth);\n+\tqueue_size = min_t(uint32_t, queue_size,\n+\t\t\t   get_feat_ctx->max_queues.max_sq_depth);\n+\n+\tif (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)\n+\t\tqueue_size = min_t(uint32_t, queue_size,\n+\t\t\t\t   get_feat_ctx->max_queues.max_llq_depth);\n+\n+\t/* Round down to power of 2 */\n+\tif (!rte_is_power_of_2(queue_size))\n+\t\tqueue_size = rte_align32pow2(queue_size >> 1);\n+\n+\tif (queue_size == 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Invalid queue size\\n\");\n+\t\treturn -EFAULT;\n+\t}\n+\n+\treturn queue_size;\n+}\n+\n+static void ena_stats_restart(struct rte_eth_dev *dev)\n+{\n+\tstruct ena_adapter *adapter =\n+\t\t(struct ena_adapter *)(dev->data->dev_private);\n+\n+\trte_atomic64_init(&adapter->drv_stats->ierrors);\n+\trte_atomic64_init(&adapter->drv_stats->oerrors);\n+\trte_atomic64_init(&adapter->drv_stats->imcasts);\n+\trte_atomic64_init(&adapter->drv_stats->rx_nombuf);\n+}\n+\n+static void ena_stats_get(struct rte_eth_dev *dev,\n+\t\t\t  struct rte_eth_stats *stats)\n+{\n+\tstruct ena_admin_basic_stats ena_stats;\n+\tstruct ena_adapter *adapter =\n+\t\t(struct ena_adapter *)(dev->data->dev_private);\n+\tstruct ena_com_dev *ena_dev = &adapter->ena_dev;\n+\tint rc;\n+\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n+\t\treturn;\n+\n+\tmemset(&ena_stats, 0, sizeof(ena_stats));\n+\trc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats);\n+\tif (unlikely(rc)) {\n+\t\tRTE_LOG(ERR, PMD, \"Could not retrieve statistics from ENA\");\n+\t\treturn;\n+\t}\n+\n+\t/* Set of basic statistics from ENA */\n+\tstats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high,\n+\t\t\t\t\t  ena_stats.rx_pkts_low);\n+\tstats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high,\n+\t\t\t\t\t  ena_stats.tx_pkts_low);\n+\tstats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high,\n+\t\t\t\t\tena_stats.rx_bytes_low);\n+\tstats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high,\n+\t\t\t\t\tena_stats.tx_bytes_low);\n+\tstats->imissed = __MERGE_64B_H_L(ena_stats.rx_drops_high,\n+\t\t\t\t\t ena_stats.rx_drops_low);\n+\n+\t/* Driver related stats */\n+\tstats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);\n+\tstats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);\n+\tstats->imcasts = rte_atomic64_read(&adapter->drv_stats->imcasts);\n+\tstats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);\n+}\n+\n+static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n+{\n+\tstruct ena_adapter *adapter;\n+\tstruct ena_com_dev *ena_dev;\n+\tint rc = 0;\n+\n+\tena_assert_msg(dev->data != NULL, \"Uninitialized device\");\n+\tena_assert_msg(dev->data->dev_private != NULL, \"Uninitialized device\");\n+\tadapter = (struct ena_adapter *)(dev->data->dev_private);\n+\n+\tena_dev = &adapter->ena_dev;\n+\tena_assert_msg(ena_dev != NULL, \"Uninitialized device\");\n+\n+\tif (mtu > ena_get_mtu_conf(adapter)) {\n+\t\tRTE_LOG(ERR, PMD,\n+\t\t\t\"Given MTU (%d) exceeds maximum MTU supported (%d)\\n\",\n+\t\t\tmtu, ena_get_mtu_conf(adapter));\n+\t\trc = -EINVAL;\n+\t\tgoto err;\n+\t}\n+\n+\trc = ena_com_set_dev_mtu(ena_dev, mtu);\n+\tif (rc)\n+\t\tRTE_LOG(ERR, PMD, \"Could not set MTU: %d\\n\", mtu);\n+\telse\n+\t\tRTE_LOG(NOTICE, PMD, \"Set MTU: %d\\n\", mtu);\n+\n+err:\n+\treturn rc;\n+}\n+\n+static int ena_start(struct rte_eth_dev *dev)\n+{\n+\tstruct ena_adapter *adapter =\n+\t\t(struct ena_adapter *)(dev->data->dev_private);\n+\tint rc = 0;\n+\n+\tif (!(adapter->state == ENA_ADAPTER_STATE_CONFIG ||\n+\t      adapter->state == ENA_ADAPTER_STATE_STOPPED)) {\n+\t\tPMD_INIT_LOG(ERR, \"API violation\");\n+\t\treturn -1;\n+\t}\n+\n+\trc = ena_check_valid_conf(adapter);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\trc = ena_queue_restart_all(dev, ENA_RING_TYPE_RX);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\trc = ena_queue_restart_all(dev, ENA_RING_TYPE_TX);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &\n+\t    ETH_MQ_RX_RSS_FLAG) {\n+\t\trc = ena_rss_init_default(adapter);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\t}\n+\n+\tena_stats_restart(dev);\n+\n+\tadapter->state = ENA_ADAPTER_STATE_RUNNING;\n+\n+\treturn 0;\n+}\n+\n+static int ena_queue_restart(struct ena_ring *ring)\n+{\n+\tint rc;\n+\n+\tena_assert_msg(ring->configured == 1,\n+\t\t       \"Trying to restart unconfigured queue\\n\");\n+\n+\tring->next_to_clean = 0;\n+\tring->next_to_use = 0;\n+\n+\tif (ring->type == ENA_RING_TYPE_TX)\n+\t\treturn 0;\n+\n+\trc = ena_populate_rx_queue(ring, ring->ring_size - 1);\n+\tif ((unsigned int)rc != ring->ring_size - 1) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to populate rx ring !\\n\");\n+\t\treturn (-1);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int ena_tx_queue_setup(struct rte_eth_dev *dev,\n+\t\t\t      uint16_t queue_idx,\n+\t\t\t      uint16_t nb_desc,\n+\t\t\t      __rte_unused unsigned int socket_id,\n+\t\t\t      __rte_unused const struct rte_eth_txconf *tx_conf)\n+{\n+\tstruct ena_ring *txq = NULL;\n+\tstruct ena_adapter *adapter =\n+\t\t(struct ena_adapter *)(dev->data->dev_private);\n+\tunsigned int i;\n+\tint ena_qid;\n+\tint rc;\n+\tstruct ena_com_dev *ena_dev = &adapter->ena_dev;\n+\n+\ttxq = &adapter->tx_ring[queue_idx];\n+\n+\tif (txq->configured) {\n+\t\tRTE_LOG(CRIT, PMD,\n+\t\t\t\"API violation. Queue %d is already configured\\n\",\n+\t\t\tqueue_idx);\n+\t\treturn -1;\n+\t}\n+\n+\tif (nb_desc > adapter->tx_ring_size) {\n+\t\tRTE_LOG(ERR, PMD,\n+\t\t\t\"Unsupported size of TX queue (max size: %d)\\n\",\n+\t\t\tadapter->tx_ring_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tena_qid = ENA_IO_TXQ_IDX(queue_idx);\n+\trc = ena_com_create_io_queue(ena_dev, ena_qid,\n+\t\t\t\t     ENA_COM_IO_QUEUE_DIRECTION_TX,\n+\t\t\t\t     ena_dev->tx_mem_queue_type,\n+\t\t\t\t     -1 /* admin interrupts is not used */,\n+\t\t\t\t     nb_desc);\n+\tif (rc) {\n+\t\tRTE_LOG(ERR, PMD,\n+\t\t\t\"failed to create io TX queue #%d (qid:%d) rc: %d\\n\",\n+\t\t\tqueue_idx, ena_qid, rc);\n+\t}\n+\ttxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];\n+\ttxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];\n+\n+\ttxq->port_id = dev->data->port_id;\n+\ttxq->next_to_clean = 0;\n+\ttxq->next_to_use = 0;\n+\ttxq->ring_size = nb_desc;\n+\n+\ttxq->tx_buffer_info = rte_zmalloc(\"txq->tx_buffer_info\",\n+\t\t\t\t\t  sizeof(struct ena_tx_buffer) *\n+\t\t\t\t\t  txq->ring_size,\n+\t\t\t\t\t  RTE_CACHE_LINE_SIZE);\n+\tif (!txq->tx_buffer_info) {\n+\t\tRTE_LOG(ERR, PMD, \"failed to alloc mem for tx buffer info\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttxq->empty_tx_reqs = rte_zmalloc(\"txq->empty_tx_reqs\",\n+\t\t\t\t\t sizeof(u16) * txq->ring_size,\n+\t\t\t\t\t RTE_CACHE_LINE_SIZE);\n+\tif (!txq->empty_tx_reqs) {\n+\t\tRTE_LOG(ERR, PMD, \"failed to alloc mem for tx reqs\\n\");\n+\t\trte_free(txq->tx_buffer_info);\n+\t\treturn -ENOMEM;\n+\t}\n+\tfor (i = 0; i < txq->ring_size; i++)\n+\t\ttxq->empty_tx_reqs[i] = i;\n+\n+\t/* Store pointer to this queue in upper layer */\n+\ttxq->configured = 1;\n+\tdev->data->tx_queues[queue_idx] = txq;\n+\n+\treturn rc;\n+}\n+\n+static int ena_rx_queue_setup(struct rte_eth_dev *dev,\n+\t\t\t      uint16_t queue_idx,\n+\t\t\t      uint16_t nb_desc,\n+\t\t\t      __rte_unused unsigned int socket_id,\n+\t\t\t      __rte_unused const struct rte_eth_rxconf *rx_conf,\n+\t\t\t      struct rte_mempool *mp)\n+{\n+\tstruct ena_adapter *adapter =\n+\t\t(struct ena_adapter *)(dev->data->dev_private);\n+\tstruct ena_ring *rxq = NULL;\n+\tuint16_t ena_qid = 0;\n+\tint rc = 0;\n+\tstruct ena_com_dev *ena_dev = &adapter->ena_dev;\n+\n+\trxq = &adapter->rx_ring[queue_idx];\n+\tif (rxq->configured) {\n+\t\tRTE_LOG(CRIT, PMD,\n+\t\t\t\"API violation. Queue %d is already configured\\n\",\n+\t\t\tqueue_idx);\n+\t\treturn -1;\n+\t}\n+\n+\tif (nb_desc > adapter->rx_ring_size) {\n+\t\tRTE_LOG(ERR, PMD,\n+\t\t\t\"Unsupported size of RX queue (max size: %d)\\n\",\n+\t\t\tadapter->rx_ring_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tena_qid = ENA_IO_RXQ_IDX(queue_idx);\n+\trc = ena_com_create_io_queue(ena_dev, ena_qid,\n+\t\t\t\t     ENA_COM_IO_QUEUE_DIRECTION_RX,\n+\t\t\t\t     ENA_ADMIN_PLACEMENT_POLICY_HOST,\n+\t\t\t\t     -1 /* admin interrupts not used */,\n+\t\t\t\t     nb_desc);\n+\tif (rc)\n+\t\tRTE_LOG(ERR, PMD, \"failed to create io RX queue #%d rc: %d\\n\",\n+\t\t\tqueue_idx, rc);\n+\n+\trxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];\n+\trxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];\n+\n+\trxq->port_id = dev->data->port_id;\n+\trxq->next_to_clean = 0;\n+\trxq->next_to_use = 0;\n+\trxq->ring_size = nb_desc;\n+\trxq->mb_pool = mp;\n+\n+\trxq->rx_buffer_info = rte_zmalloc(\"rxq->buffer_info\",\n+\t\t\t\t\t  sizeof(struct rte_mbuf *) * nb_desc,\n+\t\t\t\t\t  RTE_CACHE_LINE_SIZE);\n+\tif (!rxq->rx_buffer_info) {\n+\t\tRTE_LOG(ERR, PMD, \"failed to alloc mem for rx buffer info\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Store pointer to this queue in upper layer */\n+\trxq->configured = 1;\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\n+\treturn rc;\n+}\n+\n+static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)\n+{\n+\tunsigned int i;\n+\tint rc;\n+\tunsigned int ring_size = rxq->ring_size;\n+\tunsigned int ring_mask = ring_size - 1;\n+\tint next_to_use = rxq->next_to_use & ring_mask;\n+\tstruct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];\n+\n+\tif (unlikely(!count))\n+\t\treturn 0;\n+\n+\tena_assert_msg((((ENA_CIRC_COUNT(rxq->next_to_use, rxq->next_to_clean,\n+\t\t\t\t\t rxq->ring_size)) +\n+\t\t\t count) < rxq->ring_size), \"bad ring state\");\n+\n+\tcount = RTE_MIN(count, ring_size - next_to_use);\n+\n+\t/* get resources for incoming packets */\n+\trc = rte_mempool_get_bulk(rxq->mb_pool,\n+\t\t\t\t  (void **)(&mbufs[next_to_use]), count);\n+\tif (unlikely(rc < 0)) {\n+\t\trte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);\n+\t\tPMD_RX_LOG(DEBUG, \"there are no enough free buffers\");\n+\t\treturn 0;\n+\t}\n+\n+\tfor (i = 0; i < count; i++) {\n+\t\tstruct rte_mbuf *mbuf = mbufs[next_to_use];\n+\t\tstruct ena_com_buf ebuf;\n+\n+\t\trte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);\n+\t\t/* prepare physical address for DMA transaction */\n+\t\tebuf.paddr = mbuf->buf_physaddr + RTE_PKTMBUF_HEADROOM;\n+\t\tebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;\n+\t\t/* pass resource to device */\n+\t\trc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,\n+\t\t\t\t\t\t&ebuf, next_to_use);\n+\t\tif (unlikely(rc)) {\n+\t\t\tRTE_LOG(WARNING, PMD, \"failed adding rx desc\\n\");\n+\t\t\tbreak;\n+\t\t}\n+\t\tnext_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, ring_size);\n+\t}\n+\n+\trte_wmb();\n+\trxq->next_to_use = next_to_use;\n+\t/* let HW know that it can fill buffers with data */\n+\tena_com_write_sq_doorbell(rxq->ena_com_io_sq);\n+\n+\treturn i;\n+}\n+\n+static int ena_device_init(struct ena_com_dev *ena_dev,\n+\t\t\t   struct ena_com_dev_get_features_ctx *get_feat_ctx)\n+{\n+\tint rc;\n+\n+\t/* Initialize mmio registers */\n+\trc = ena_com_mmio_reg_read_request_init(ena_dev);\n+\tif (rc) {\n+\t\tRTE_LOG(ERR, PMD, \"failed to init mmio read less\\n\");\n+\t\treturn rc;\n+\t}\n+\n+\t/* reset device */\n+\trc = ena_com_dev_reset(ena_dev);\n+\tif (rc) {\n+\t\tRTE_LOG(ERR, PMD, \"cannot reset device\\n\");\n+\t\tgoto err_mmio_read_less;\n+\t}\n+\n+\t/* check FW version */\n+\trc = ena_com_validate_version(ena_dev);\n+\tif (rc) {\n+\t\tRTE_LOG(ERR, PMD, \"device version is too low\\n\");\n+\t\tgoto err_mmio_read_less;\n+\t}\n+\n+\tena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);\n+\n+\t/* ENA device administration layer init */\n+\trc = ena_com_admin_init(ena_dev, NULL, true);\n+\tif (rc) {\n+\t\tRTE_LOG(ERR, PMD,\n+\t\t\t\"cannot initialize ena admin queue with device\\n\");\n+\t\tgoto err_mmio_read_less;\n+\t}\n+\n+\t/* To enable the msix interrupts the driver needs to know the number\n+\t * of queues. So the driver uses polling mode to retrieve this\n+\t * information.\n+\t */\n+\tena_com_set_admin_polling_mode(ena_dev, true);\n+\n+\t/* Get Device Attributes and features */\n+\trc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);\n+\tif (rc) {\n+\t\tRTE_LOG(ERR, PMD,\n+\t\t\t\"cannot get attribute for ena device rc= %d\\n\", rc);\n+\t\tgoto err_admin_init;\n+\t}\n+\n+\treturn 0;\n+\n+err_admin_init:\n+\tena_com_admin_destroy(ena_dev);\n+\n+err_mmio_read_less:\n+\tena_com_mmio_reg_read_request_destroy(ena_dev);\n+\n+\treturn rc;\n+}\n+\n+static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct rte_pci_device *pci_dev;\n+\tstruct ena_adapter *adapter =\n+\t\t(struct ena_adapter *)(eth_dev->data->dev_private);\n+\tstruct ena_com_dev *ena_dev = &adapter->ena_dev;\n+\tstruct ena_com_dev_get_features_ctx get_feat_ctx;\n+\tint queue_size, rc;\n+\n+\tstatic int adapters_found;\n+\n+\tmemset(adapter, 0, sizeof(struct ena_adapter));\n+\tena_dev = &adapter->ena_dev;\n+\n+\teth_dev->dev_ops = &ena_dev_ops;\n+\teth_dev->rx_pkt_burst = &eth_ena_recv_pkts;\n+\teth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;\n+\tadapter->rte_eth_dev_data = eth_dev->data;\n+\tadapter->rte_dev = eth_dev;\n+\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n+\t\treturn 0;\n+\n+\tpci_dev = eth_dev->pci_dev;\n+\tadapter->pdev = pci_dev;\n+\n+\tPMD_INIT_LOG(INFO, \"Initializing %x:%x:%x.%d\\n\",\n+\t\t     pci_dev->addr.domain,\n+\t\t     pci_dev->addr.bus,\n+\t\t     pci_dev->addr.devid,\n+\t\t     pci_dev->addr.function);\n+\n+\tadapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;\n+\tadapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;\n+\n+\t/* Present ENA_MEM_BAR indicates available LLQ mode.\n+\t * Use corresponding policy\n+\t */\n+\tif (adapter->dev_mem_base)\n+\t\tena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;\n+\telse if (adapter->regs)\n+\t\tena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;\n+\telse\n+\t\tPMD_INIT_LOG(CRIT, \"Failed to access registers BAR(%d)\\n\",\n+\t\t\t     ENA_REGS_BAR);\n+\n+\tena_dev->reg_bar = adapter->regs;\n+\tena_dev->dmadev = adapter->pdev;\n+\n+\tadapter->id_number = adapters_found;\n+\n+\tsnprintf(adapter->name, ENA_NAME_MAX_LEN, \"ena_%d\",\n+\t\t adapter->id_number);\n+\n+\t/* device specific initialization routine */\n+\trc = ena_device_init(ena_dev, &get_feat_ctx);\n+\tif (rc) {\n+\t\tPMD_INIT_LOG(CRIT, \"Failed to init ENA device\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tif (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {\n+\t\tif (get_feat_ctx.max_queues.max_llq_num == 0) {\n+\t\t\tPMD_INIT_LOG(ERR,\n+\t\t\t\t     \"Trying to use LLQ but llq_num is 0.\\n\"\n+\t\t\t\t     \"Fall back into regular queues.\\n\");\n+\t\t\tena_dev->tx_mem_queue_type =\n+\t\t\t\tENA_ADMIN_PLACEMENT_POLICY_HOST;\n+\t\t\tadapter->num_queues =\n+\t\t\t\tget_feat_ctx.max_queues.max_sq_num;\n+\t\t} else {\n+\t\t\tadapter->num_queues =\n+\t\t\t\tget_feat_ctx.max_queues.max_llq_num;\n+\t\t}\n+\t} else {\n+\t\tadapter->num_queues = get_feat_ctx.max_queues.max_sq_num;\n+\t}\n+\n+\tqueue_size = ena_calc_queue_size(ena_dev, &get_feat_ctx);\n+\tif ((queue_size <= 0) || (adapter->num_queues <= 0))\n+\t\treturn -EFAULT;\n+\n+\tadapter->tx_ring_size = queue_size;\n+\tadapter->rx_ring_size = queue_size;\n+\n+\t/* prepare ring structures */\n+\tena_init_rings(adapter);\n+\n+\t/* Set max MTU for this device */\n+\tadapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;\n+\n+\t/* Copy MAC address and point DPDK to it */\n+\teth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr;\n+\tether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr,\n+\t\t\t(struct ether_addr *)adapter->mac_addr);\n+\n+\tadapter->drv_stats = rte_zmalloc(\"adapter stats\",\n+\t\t\t\t\t sizeof(*adapter->drv_stats),\n+\t\t\t\t\t RTE_CACHE_LINE_SIZE);\n+\tif (!adapter->drv_stats) {\n+\t\tRTE_LOG(ERR, PMD, \"failed to alloc mem for adapter stats\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tadapters_found++;\n+\tadapter->state = ENA_ADAPTER_STATE_INIT;\n+\n+\treturn 0;\n+}\n+\n+static int ena_dev_configure(struct rte_eth_dev *dev)\n+{\n+\tstruct ena_adapter *adapter =\n+\t\t(struct ena_adapter *)(dev->data->dev_private);\n+\n+\tif (!(adapter->state == ENA_ADAPTER_STATE_INIT ||\n+\t      adapter->state == ENA_ADAPTER_STATE_STOPPED)) {\n+\t\tPMD_INIT_LOG(ERR, \"Illegal adapter state: %d\\n\",\n+\t\t\t     adapter->state);\n+\t\treturn -1;\n+\t}\n+\n+\tswitch (adapter->state) {\n+\tcase ENA_ADAPTER_STATE_INIT:\n+\tcase ENA_ADAPTER_STATE_STOPPED:\n+\t\tadapter->state = ENA_ADAPTER_STATE_CONFIG;\n+\t\tbreak;\n+\tcase ENA_ADAPTER_STATE_CONFIG:\n+\t\tRTE_LOG(WARNING, PMD,\n+\t\t\t\"Ivalid driver state while trying to configure device\\n\");\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void ena_init_rings(struct ena_adapter *adapter)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < adapter->num_queues; i++) {\n+\t\tstruct ena_ring *ring = &adapter->tx_ring[i];\n+\n+\t\tring->configured = 0;\n+\t\tring->type = ENA_RING_TYPE_TX;\n+\t\tring->adapter = adapter;\n+\t\tring->id = i;\n+\t\tring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;\n+\t\tring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;\n+\t}\n+\n+\tfor (i = 0; i < adapter->num_queues; i++) {\n+\t\tstruct ena_ring *ring = &adapter->rx_ring[i];\n+\n+\t\tring->configured = 0;\n+\t\tring->type = ENA_RING_TYPE_RX;\n+\t\tring->adapter = adapter;\n+\t\tring->id = i;\n+\t}\n+}\n+\n+static void ena_infos_get(struct rte_eth_dev *dev,\n+\t\t\t  struct rte_eth_dev_info *dev_info)\n+{\n+\tstruct ena_adapter *adapter;\n+\tstruct ena_com_dev *ena_dev;\n+\tstruct ena_com_dev_get_features_ctx feat;\n+\tuint32_t rx_feat = 0, tx_feat = 0;\n+\tint rc = 0;\n+\n+\tena_assert_msg(dev->data != NULL, \"Uninitialized device\");\n+\tena_assert_msg(dev->data->dev_private != NULL, \"Uninitialized device\");\n+\tadapter = (struct ena_adapter *)(dev->data->dev_private);\n+\n+\tena_dev = &adapter->ena_dev;\n+\tena_assert_msg(ena_dev != NULL, \"Uninitialized device\");\n+\n+\t/* Get supported features from HW */\n+\trc = ena_com_get_dev_attr_feat(ena_dev, &feat);\n+\tif (unlikely(rc)) {\n+\t\tRTE_LOG(ERR, PMD,\n+\t\t\t\"Cannot get attribute for ena device rc= %d\\n\", rc);\n+\t\treturn;\n+\t}\n+\n+\t/* Set Tx & Rx features available for device */\n+\tif (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)\n+\t\ttx_feat\t|= DEV_TX_OFFLOAD_TCP_TSO;\n+\n+\tif (feat.offload.tx &\n+\t    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)\n+\t\ttx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |\n+\t\t\tDEV_TX_OFFLOAD_UDP_CKSUM |\n+\t\t\tDEV_TX_OFFLOAD_TCP_CKSUM;\n+\n+\tif (feat.offload.tx &\n+\t    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)\n+\t\trx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |\n+\t\t\tDEV_RX_OFFLOAD_UDP_CKSUM  |\n+\t\t\tDEV_RX_OFFLOAD_TCP_CKSUM;\n+\n+\t/* Inform framework about available features */\n+\tdev_info->rx_offload_capa = rx_feat;\n+\tdev_info->tx_offload_capa = tx_feat;\n+\n+\tdev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;\n+\tdev_info->max_rx_pktlen  = adapter->max_mtu;\n+\tdev_info->max_mac_addrs = 1;\n+\n+\tdev_info->max_rx_queues = adapter->num_queues;\n+\tdev_info->max_tx_queues = adapter->num_queues;\n+\tdev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;\n+}\n+\n+static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t\t\t  uint16_t nb_pkts)\n+{\n+\tstruct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);\n+\tunsigned int ring_size = rx_ring->ring_size;\n+\tunsigned int ring_mask = ring_size - 1;\n+\tuint16_t next_to_clean = rx_ring->next_to_clean;\n+\tint desc_in_use = 0;\n+\tunsigned int recv_idx = 0;\n+\tstruct rte_mbuf *mbuf = NULL;\n+\tstruct rte_mbuf *mbuf_head = NULL;\n+\tstruct rte_mbuf *mbuf_prev = NULL;\n+\tstruct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info;\n+\tunsigned int completed;\n+\n+\tstruct ena_com_rx_ctx ena_rx_ctx;\n+\tint rc = 0;\n+\n+\t/* Check adapter state */\n+\tif (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {\n+\t\tRTE_LOG(ALERT, PMD,\n+\t\t\t\"Trying to receive pkts while device is NOT running\\n\");\n+\t\treturn 0;\n+\t}\n+\n+\tdesc_in_use = ENA_CIRC_COUNT(rx_ring->next_to_use,\n+\t\t\t\t     next_to_clean, ring_size);\n+\tif (unlikely(nb_pkts > desc_in_use))\n+\t\tnb_pkts = desc_in_use;\n+\n+\tfor (completed = 0; completed < nb_pkts; completed++) {\n+\t\tint segments = 0;\n+\n+\t\tena_rx_ctx.max_bufs = rx_ring->ring_size;\n+\t\tena_rx_ctx.ena_bufs = rx_ring->ena_bufs;\n+\t\tena_rx_ctx.descs = 0;\n+\t\t/* receive packet context */\n+\t\trc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,\n+\t\t\t\t    rx_ring->ena_com_io_sq,\n+\t\t\t\t    &ena_rx_ctx);\n+\t\tif (unlikely(rc)) {\n+\t\t\tRTE_LOG(ERR, PMD, \"ena_com_rx_pkt error %d\\n\", rc);\n+\t\t\treturn 0;\n+\t\t}\n+\n+\t\tif (unlikely(ena_rx_ctx.descs == 0))\n+\t\t\tbreak;\n+\n+\t\twhile (segments < ena_rx_ctx.descs) {\n+\t\t\tmbuf = rx_buff_info[next_to_clean & ring_mask];\n+\t\t\tmbuf->data_len = ena_rx_ctx.ena_bufs[segments].len;\n+\t\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\t\tmbuf->refcnt = 1;\n+\t\t\tmbuf->next = NULL;\n+\t\t\tif (segments == 0) {\n+\t\t\t\tmbuf->nb_segs = ena_rx_ctx.descs;\n+\t\t\t\tmbuf->port = rx_ring->port_id;\n+\t\t\t\tmbuf->pkt_len = 0;\n+\t\t\t\tmbuf_head = mbuf;\n+\t\t\t} else {\n+\t\t\t\t/* for multi-segment pkts create mbuf chain */\n+\t\t\t\tmbuf_prev->next = mbuf;\n+\t\t\t}\n+\t\t\tmbuf_head->pkt_len += mbuf->data_len;\n+\n+\t\t\tmbuf_prev = mbuf;\n+\t\t\tsegments++;\n+\t\t\tnext_to_clean =\n+\t\t\t\tENA_RX_RING_IDX_NEXT(next_to_clean, ring_size);\n+\t\t}\n+\n+\t\t/* fill mbuf attributes if any */\n+\t\tena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx);\n+\t\tmbuf_head->hash.rss = (uint32_t)rx_ring->id;\n+\n+\t\t/* pass to DPDK application head mbuf */\n+\t\trx_pkts[recv_idx] = mbuf_head;\n+\t\trecv_idx++;\n+\t}\n+\n+\t/* Burst refill to save doorbells, memory barriers, const interval */\n+\tif (ring_size - desc_in_use - 1 > ENA_RING_DESCS_RATIO(ring_size))\n+\t\tena_populate_rx_queue(rx_ring, ring_size - desc_in_use - 1);\n+\n+\trx_ring->next_to_clean = next_to_clean & ring_mask;\n+\n+\treturn recv_idx;\n+}\n+\n+static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t\t\t  uint16_t nb_pkts)\n+{\n+\tstruct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);\n+\tunsigned int next_to_use = tx_ring->next_to_use;\n+\tstruct rte_mbuf *mbuf;\n+\tunsigned int ring_size = tx_ring->ring_size;\n+\tunsigned int ring_mask = ring_size - 1;\n+\tstruct ena_com_tx_ctx ena_tx_ctx;\n+\tstruct ena_tx_buffer *tx_info;\n+\tstruct ena_com_buf *ebuf;\n+\tuint16_t rc, req_id, total_tx_descs = 0;\n+\tint sent_idx = 0;\n+\tint nb_hw_desc;\n+\n+\t/* Check adapter state */\n+\tif (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {\n+\t\tRTE_LOG(ALERT, PMD,\n+\t\t\t\"Trying to xmit pkts while device is NOT running\\n\");\n+\t\treturn 0;\n+\t}\n+\n+\tfor (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {\n+\t\tmbuf = tx_pkts[sent_idx];\n+\n+\t\treq_id = tx_ring->empty_tx_reqs[next_to_use];\n+\t\ttx_info = &tx_ring->tx_buffer_info[req_id];\n+\t\ttx_info->mbuf = mbuf;\n+\t\ttx_info->num_of_bufs = 0;\n+\t\tebuf = tx_info->bufs;\n+\n+\t\t/* Prepare TX context */\n+\t\tmemset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));\n+\t\tmemset(&ena_tx_ctx.ena_meta, 0x0,\n+\t\t       sizeof(struct ena_com_tx_meta));\n+\t\tena_tx_ctx.ena_bufs = ebuf;\n+\t\tena_tx_ctx.req_id = req_id;\n+\t\tif (tx_ring->tx_mem_queue_type ==\n+\t\t\t\tENA_ADMIN_PLACEMENT_POLICY_DEV) {\n+\t\t\t/* prepare the push buffer with\n+\t\t\t * virtual address of the data\n+\t\t\t */\n+\t\t\tena_tx_ctx.header_len =\n+\t\t\t\tRTE_MIN(mbuf->data_len,\n+\t\t\t\t\ttx_ring->tx_max_header_size);\n+\t\t\tena_tx_ctx.push_header =\n+\t\t\t\t(void *)((char *)mbuf->buf_addr +\n+\t\t\t\t\t mbuf->data_off);\n+\t\t} /* there's no else as we take advantage of memset zeroing */\n+\n+\t\t/* Set TX offloads flags, if applicable */\n+\t\tena_tx_mbuf_prepare(mbuf, &ena_tx_ctx);\n+\n+\t\tif (unlikely(mbuf->ol_flags &\n+\t\t\t     (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD)))\n+\t\t\trte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors);\n+\n+\t\trte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]);\n+\n+\t\t/* Process first segment taking into\n+\t\t * consideration pushed header\n+\t\t */\n+\t\tif (mbuf->data_len > ena_tx_ctx.header_len) {\n+\t\t\tebuf->paddr = mbuf->buf_physaddr +\n+\t\t\t\t      mbuf->data_off +\n+\t\t\t\t      ena_tx_ctx.header_len;\n+\t\t\tebuf->len = mbuf->data_len - ena_tx_ctx.header_len;\n+\t\t\tebuf++;\n+\t\t\ttx_info->num_of_bufs++;\n+\t\t}\n+\n+\t\twhile ((mbuf = mbuf->next) != NULL) {\n+\t\t\tebuf->paddr = mbuf->buf_physaddr + mbuf->data_off;\n+\t\t\tebuf->len = mbuf->data_len;\n+\t\t\tebuf++;\n+\t\t\ttx_info->num_of_bufs++;\n+\t\t}\n+\n+\t\tena_tx_ctx.num_bufs = tx_info->num_of_bufs;\n+\n+\t\t/* Write data to device */\n+\t\trc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,\n+\t\t\t\t\t&ena_tx_ctx, &nb_hw_desc);\n+\t\tif (unlikely(rc))\n+\t\t\tbreak;\n+\n+\t\ttx_info->tx_descs = nb_hw_desc;\n+\n+\t\tnext_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, ring_size);\n+\t}\n+\n+\t/* Let HW do it's best :-) */\n+\trte_wmb();\n+\tena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);\n+\n+\t/* Clear complete packets  */\n+\twhile (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) {\n+\t\t/* Get Tx info & store how many descs were processed  */\n+\t\ttx_info = &tx_ring->tx_buffer_info[req_id];\n+\t\ttotal_tx_descs += tx_info->tx_descs;\n+\n+\t\t/* Free whole mbuf chain  */\n+\t\tmbuf = tx_info->mbuf;\n+\t\trte_pktmbuf_free(mbuf);\n+\n+\t\t/* Put back descriptor to the ring for reuse */\n+\t\ttx_ring->empty_tx_reqs[tx_ring->next_to_clean] = req_id;\n+\t\ttx_ring->next_to_clean =\n+\t\t\tENA_TX_RING_IDX_NEXT(tx_ring->next_to_clean,\n+\t\t\t\t\t     tx_ring->ring_size);\n+\n+\t\t/* If too many descs to clean, leave it for another run */\n+\t\tif (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size)))\n+\t\t\tbreak;\n+\t}\n+\n+\t/* acknowledge completion of sent packets */\n+\tena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);\n+\ttx_ring->next_to_use = next_to_use;\n+\treturn sent_idx;\n+}\n+\n+static struct eth_driver rte_ena_pmd = {\n+\t{\n+\t\t.name = \"rte_ena_pmd\",\n+\t\t.id_table = pci_id_ena_map,\n+\t\t.drv_flags = RTE_PCI_DRV_NEED_MAPPING,\n+\t},\n+\t.eth_dev_init = eth_ena_dev_init,\n+\t.dev_private_size = sizeof(struct ena_adapter),\n+};\n+\n+static int\n+rte_ena_pmd_init(const char *name __rte_unused,\n+\t\t const char *params __rte_unused)\n+{\n+\trte_eth_driver_register(&rte_ena_pmd);\n+\treturn 0;\n+};\n+\n+struct rte_driver ena_pmd_drv = {\n+\t.name = \"ena_driver\",\n+\t.type = PMD_PDEV,\n+\t.init = rte_ena_pmd_init,\n+};\n+\n+PMD_REGISTER_DRIVER(ena_pmd_drv);\ndiff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h\nnew file mode 100644\nindex 0000000..ba6f01e\n--- /dev/null\n+++ b/drivers/net/ena/ena_ethdev.h\n@@ -0,0 +1,160 @@\n+/*-\n+* BSD LICENSE\n+*\n+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.\n+* All rights reserved.\n+*\n+* Redistribution and use in source and binary forms, with or without\n+* modification, are permitted provided that the following conditions\n+* are met:\n+*\n+* * Redistributions of source code must retain the above copyright\n+* notice, this list of conditions and the following disclaimer.\n+* * Redistributions in binary form must reproduce the above copyright\n+* notice, this list of conditions and the following disclaimer in\n+* the documentation and/or other materials provided with the\n+* distribution.\n+* * Neither the name of copyright holder nor the names of its\n+* contributors may be used to endorse or promote products derived\n+* from this software without specific prior written permission.\n+*\n+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+* \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+*/\n+\n+#ifndef _ENA_ETHDEV_H_\n+#define _ENA_ETHDEV_H_\n+\n+#include <rte_pci.h>\n+\n+#include \"ena_com.h\"\n+\n+#define ENA_REGS_BAR\t0\n+#define ENA_MEM_BAR\t2\n+\n+#define ENA_MAX_NUM_QUEUES\t128\n+\n+#define ENA_DEFAULT_TX_SW_DESCS\t(1024)\n+#define ENA_DEFAULT_TX_HW_DESCS\t(1024)\n+#define ENA_DEFAULT_RING_SIZE\t(1024)\n+\n+#define ENA_MIN_FRAME_LEN\t64\n+\n+#define ENA_NAME_MAX_LEN     20\n+#define ENA_IRQNAME_SIZE     40\n+\n+#define ENA_PKT_MAX_BUFS     17\n+\n+#define\tENA_CIRC_COUNT(head, tail, size)\t\t\t\t\\\n+\t(((uint16_t)((uint16_t)(head) - (uint16_t)(tail))) & ((size) - 1))\n+\n+#define ENA_CIRC_INC(index, step, size)\t\t\t\t\t\\\n+\t((uint16_t)(index) + (uint16_t)(step))\n+#define\tENA_CIRC_INC_WRAP(index, step, size)\t\t\t\t\\\n+\t(((uint16_t)(index) + (uint16_t)(step))\t& ((size) - 1))\n+\n+#define\tENA_TX_RING_IDX_NEXT(idx, ring_size)\t\t\t\t\\\n+\t\tENA_CIRC_INC_WRAP(idx, 1, ring_size)\n+#define\tENA_RX_RING_IDX_NEXT(idx, ring_size)\t\t\t\t\\\n+\t\tENA_CIRC_INC_WRAP(idx, 1, ring_size)\n+\n+struct ena_adapter;\n+\n+enum ena_ring_type {\n+\tENA_RING_TYPE_RX = 1,\n+\tENA_RING_TYPE_TX = 2,\n+};\n+\n+struct ena_tx_buffer {\n+\tstruct rte_mbuf *mbuf;\n+\tunsigned int tx_descs;\n+\tunsigned int num_of_bufs;\n+\tstruct ena_com_buf bufs[ENA_PKT_MAX_BUFS];\n+};\n+\n+struct ena_ring {\n+\tu16 next_to_use;\n+\tu16 next_to_clean;\n+\n+\tenum ena_ring_type type;\n+\tenum ena_admin_placement_policy_type tx_mem_queue_type;\n+\t/* Holds the empty requests for TX OOO completions */\n+\tuint16_t *empty_tx_reqs;\n+\tunion {\n+\t\tstruct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */\n+\t\tstruct rte_mbuf **rx_buffer_info; /* contex of rx packet */\n+\t};\n+\tunsigned int ring_size; /* number of tx/rx_buffer_info's entries */\n+\n+\tstruct ena_com_io_cq *ena_com_io_cq;\n+\tstruct ena_com_io_sq *ena_com_io_sq;\n+\n+\tstruct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS]\n+\t\t\t\t\t\t__rte_cache_aligned;\n+\n+\tstruct rte_mempool *mb_pool;\n+\tunsigned int port_id;\n+\tunsigned int id;\n+\t/* Max length PMD can push to device for LLQ */\n+\tuint8_t tx_max_header_size;\n+\tint configured;\n+\tstruct ena_adapter *adapter;\n+} __rte_cache_aligned;\n+\n+enum ena_adapter_state {\n+\tENA_ADAPTER_STATE_FREE    = 0,\n+\tENA_ADAPTER_STATE_INIT    = 1,\n+\tENA_ADAPTER_STATE_RUNNING  = 2,\n+\tENA_ADAPTER_STATE_STOPPED = 3,\n+\tENA_ADAPTER_STATE_CONFIG  = 4,\n+};\n+\n+struct ena_driver_stats {\n+\trte_atomic64_t ierrors;\n+\trte_atomic64_t oerrors;\n+\trte_atomic64_t imcasts;\n+\trte_atomic64_t rx_nombuf;\n+};\n+\n+/* board specific private data structure */\n+struct ena_adapter {\n+\t/* OS defined structs */\n+\tstruct rte_pci_device *pdev;\n+\tstruct rte_eth_dev_data *rte_eth_dev_data;\n+\tstruct rte_eth_dev *rte_dev;\n+\n+\tstruct ena_com_dev ena_dev __rte_cache_aligned;\n+\n+\t/* TX */\n+\tstruct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;\n+\tint tx_ring_size;\n+\n+\t/* RX */\n+\tstruct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;\n+\tint rx_ring_size;\n+\n+\tu16 num_queues;\n+\tu16 max_mtu;\n+\n+\tint id_number;\n+\tchar name[ENA_NAME_MAX_LEN];\n+\tu8 mac_addr[ETHER_ADDR_LEN];\n+\n+\tvoid *regs;\n+\tvoid *dev_mem_base;\n+\n+\tstruct ena_driver_stats *drv_stats;\n+\tenum ena_adapter_state state;\n+\n+};\n+\n+#endif /* _ENA_ETHDEV_H_ */\ndiff --git a/drivers/net/ena/ena_logs.h b/drivers/net/ena/ena_logs.h\nnew file mode 100644\nindex 0000000..a059058\n--- /dev/null\n+++ b/drivers/net/ena/ena_logs.h\n@@ -0,0 +1,74 @@\n+/*-\n+* BSD LICENSE\n+*\n+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.\n+* All rights reserved.\n+*\n+* Redistribution and use in source and binary forms, with or without\n+* modification, are permitted provided that the following conditions\n+* are met:\n+*\n+* * Redistributions of source code must retain the above copyright\n+* notice, this list of conditions and the following disclaimer.\n+* * Redistributions in binary form must reproduce the above copyright\n+* notice, this list of conditions and the following disclaimer in\n+* the documentation and/or other materials provided with the\n+* distribution.\n+* * Neither the name of copyright holder nor the names of its\n+* contributors may be used to endorse or promote products derived\n+* from this software without specific prior written permission.\n+*\n+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+* \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+*/\n+\n+#ifndef _ENA_LOGS_H_\n+#define _ENA_LOGS_H_\n+\n+#define RTE_LOGTYPE_ENA RTE_LOGTYPE_USER1\n+\n+#ifdef RTE_LIBRTE_ENA_DEBUG_INIT\n+#define PMD_INIT_LOG(level, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): \" fmt \"\\n\", __func__, ## args)\n+#else\n+#define PMD_INIT_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#ifdef RTE_LIBRTE_ENA_DEBUG_RX\n+#define PMD_RX_LOG(level, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): \" fmt \"\\n\", __func__, ## args)\n+#else\n+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#ifdef RTE_LIBRTE_ENA_DEBUG_TX\n+#define PMD_TX_LOG(level, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): \" fmt \"\\n\", __func__, ## args)\n+#else\n+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE\n+#define PMD_TX_FREE_LOG(level, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): \" fmt \"\\n\", __func__, ## args)\n+#else\n+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#ifdef RTE_LIBRTE_ENA_DEBUG_DRIVER\n+#define PMD_DRV_LOG(level, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): \" fmt \"\\n\", __func__, ## args)\n+#else\n+#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#endif /* _ENA_LOGS_H_ */\ndiff --git a/drivers/net/ena/ena_platform.h b/drivers/net/ena/ena_platform.h\nnew file mode 100644\nindex 0000000..0df82d6\n--- /dev/null\n+++ b/drivers/net/ena/ena_platform.h\n@@ -0,0 +1,59 @@\n+/*-\n+* BSD LICENSE\n+*\n+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.\n+* All rights reserved.\n+*\n+* Redistribution and use in source and binary forms, with or without\n+* modification, are permitted provided that the following conditions\n+* are met:\n+*\n+* * Redistributions of source code must retain the above copyright\n+* notice, this list of conditions and the following disclaimer.\n+* * Redistributions in binary form must reproduce the above copyright\n+* notice, this list of conditions and the following disclaimer in\n+* the documentation and/or other materials provided with the\n+* distribution.\n+* * Neither the name of copyright holder nor the names of its\n+* contributors may be used to endorse or promote products derived\n+* from this software without specific prior written permission.\n+*\n+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+* \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+*/\n+\n+#ifndef __ENA_PLATFORM_H__\n+#define __ENA_PLATFORM_H__\n+\n+#define swap16_to_le(x)\t\t(x)\n+\n+#define swap32_to_le(x)\t\t(x)\n+\n+#define swap64_to_le(x)\t\t(x)\n+\n+#define swap16_from_le(x)       (x)\n+\n+#define swap32_from_le(x)\t(x)\n+\n+#define swap64_from_le(x)\t(x)\n+\n+#define ena_assert_msg(cond, msg)\t\t\\\n+\tdo {\t\t\t\t\t\\\n+\t\tif (unlikely(!(cond))) {\t\\\n+\t\t\tRTE_LOG(ERR, ENA,\t\\\n+\t\t\t\t\"Assert failed on %s:%s:%d: \",\t\\\n+\t\t\t\t__FILE__, __func__, __LINE__);\t\\\n+\t\t\trte_panic(msg);\t\t\\\n+\t\t}\t\t\t\t\\\n+\t} while (0)\n+\n+#endif /* __ENA_PLATFORM_H__ */\ndiff --git a/drivers/net/ena/rte_pmd_ena_version.map b/drivers/net/ena/rte_pmd_ena_version.map\nnew file mode 100644\nindex 0000000..349c6e1\n--- /dev/null\n+++ b/drivers/net/ena/rte_pmd_ena_version.map\n@@ -0,0 +1,4 @@\n+DPDK_16.04 {\n+\n+\tlocal: *;\n+};\ndiff --git a/mk/rte.app.mk b/mk/rte.app.mk\nindex a1cd9a3..72adc26 100644\n--- a/mk/rte.app.mk\n+++ b/mk/rte.app.mk\n@@ -138,6 +138,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_I40E_PMD)       += -lrte_pmd_i40e\n _LDLIBS-$(CONFIG_RTE_LIBRTE_FM10K_PMD)      += -lrte_pmd_fm10k\n _LDLIBS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)      += -lrte_pmd_ixgbe\n _LDLIBS-$(CONFIG_RTE_LIBRTE_E1000_PMD)      += -lrte_pmd_e1000\n+_LDLIBS-$(CONFIG_RTE_LIBRTE_ENA_PMD)        += -lrte_pmd_ena\n _LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD)       += -lrte_pmd_mlx4\n _LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD)       += -lrte_pmd_mlx5\n _LDLIBS-$(CONFIG_RTE_LIBRTE_NFP_PMD)        += -lrte_pmd_nfp\n",
    "prefixes": [
        "dpdk-dev",
        "v6",
        "4/4"
    ]
}