get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1462/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1462,
    "url": "https://patches.dpdk.org/api/patches/1462/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1416758899-1351-5-git-send-email-ssujith@cisco.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1416758899-1351-5-git-send-email-ssujith@cisco.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1416758899-1351-5-git-send-email-ssujith@cisco.com",
    "date": "2014-11-23T16:08:17",
    "name": "[dpdk-dev,v3,4/6] ENIC PMD specific code",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "627b36ae1e87b770fe875bce240d353a5007fd66",
    "submitter": {
        "id": 110,
        "url": "https://patches.dpdk.org/api/people/110/?format=api",
        "name": "Sujith Sankar",
        "email": "ssujith@cisco.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1416758899-1351-5-git-send-email-ssujith@cisco.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/1462/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/1462/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 306E77FDD;\n\tSun, 23 Nov 2014 07:29:13 +0100 (CET)",
            "from bgl-iport-1.cisco.com (bgl-iport-1.cisco.com [72.163.197.25])\n\tby dpdk.org (Postfix) with ESMTP id 6D95D7FD2\n\tfor <dev@dpdk.org>; Sun, 23 Nov 2014 07:28:53 +0100 (CET)",
            "from vla196-nat.cisco.com (HELO bgl-core-4.cisco.com)\n\t([72.163.197.24])\n\tby bgl-iport-1.cisco.com with ESMTP; 23 Nov 2014 06:39:35 +0000",
            "from localhost ([10.106.186.168]) (authenticated bits=0)\n\tby bgl-core-4.cisco.com (8.14.5/8.14.5) with ESMTP id sAN6dX8O008501\n\t(version=TLSv1/SSLv3 cipher=DHE-RSA-AES128-SHA bits=128 verify=NO);\n\tSun, 23 Nov 2014 06:39:35 GMT"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n\td=cisco.com; i=@cisco.com; l=65960; q=dns/txt;\n\ts=iport; t=1416724776; x=1417934376;\n\th=from:to:cc:subject:date:message-id:in-reply-to: references;\n\tbh=ryk5HFL9dxptiGejOv8E7fybE4ZL0Q/hwdzBtsHHukc=;\n\tb=nF3vBNA/xLlElolP7cYO+bAOAAJ1bq5K7WwGMsvmtU8G7Ina0uXYOls+\n\tRglUUt770ytRRaYZXXrJQuA7Onq8lg0nl3A+YvpJXsfOtO0lmF0QDYdEz\n\tpA3C1JHjagNquhOdUEZ6T4j6pGxD5lTnt5BG+vvQ2PQsRhhCX6ueQk10l o=;",
        "X-IronPort-AV": "E=Sophos;i=\"5.07,442,1413244800\"; d=\"scan'208\";a=\"47738599\"",
        "From": "Sujith Sankar <ssujith@cisco.com>",
        "To": "dev@dpdk.org",
        "Date": "Sun, 23 Nov 2014 21:38:17 +0530",
        "Message-Id": "<1416758899-1351-5-git-send-email-ssujith@cisco.com>",
        "X-Mailer": "git-send-email 1.9.1",
        "In-Reply-To": "<1416758899-1351-1-git-send-email-ssujith@cisco.com>",
        "References": "<1416758899-1351-1-git-send-email-ssujith@cisco.com>",
        "X-Authenticated-User": "ssujith@cisco.com",
        "Cc": "prrao@cisco.com",
        "Subject": "[dpdk-dev] [PATCH v3 4/6] ENIC PMD specific code",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Sujith Sankar <ssujith@cisco.com>\n---\n lib/librte_pmd_enic/enic.h        |  158 +++++\n lib/librte_pmd_enic/enic_clsf.c   |  244 +++++++\n lib/librte_pmd_enic/enic_compat.h |  142 ++++\n lib/librte_pmd_enic/enic_main.c   | 1328 +++++++++++++++++++++++++++++++++++++\n lib/librte_pmd_enic/enic_res.c    |  221 ++++++\n lib/librte_pmd_enic/enic_res.h    |  168 +++++\n 6 files changed, 2261 insertions(+)\n create mode 100644 lib/librte_pmd_enic/enic.h\n create mode 100644 lib/librte_pmd_enic/enic_clsf.c\n create mode 100644 lib/librte_pmd_enic/enic_compat.h\n create mode 100644 lib/librte_pmd_enic/enic_main.c\n create mode 100644 lib/librte_pmd_enic/enic_res.c\n create mode 100644 lib/librte_pmd_enic/enic_res.h",
    "diff": "diff --git a/lib/librte_pmd_enic/enic.h b/lib/librte_pmd_enic/enic.h\nnew file mode 100644\nindex 0000000..b72c048\n--- /dev/null\n+++ b/lib/librte_pmd_enic/enic.h\n@@ -0,0 +1,158 @@\n+/*\n+ * Copyright 2008-2014 Cisco Systems, Inc.  All rights reserved.\n+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.\n+ *\n+ * Copyright (c) 2014, Cisco Systems, Inc. \n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright\n+ * notice, this list of conditions and the following disclaimer.\n+ *\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ * notice, this list of conditions and the following disclaimer in\n+ * the documentation and/or other materials provided with the\n+ * distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n+ * POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ */\n+#ident \"$Id$\"\n+\n+#ifndef _ENIC_H_\n+#define _ENIC_H_\n+\n+#include \"vnic_enet.h\"\n+#include \"vnic_dev.h\"\n+#include \"vnic_wq.h\"\n+#include \"vnic_rq.h\"\n+#include \"vnic_cq.h\"\n+#include \"vnic_intr.h\"\n+#include \"vnic_stats.h\"\n+#include \"vnic_nic.h\"\n+#include \"vnic_rss.h\"\n+#include \"enic_res.h\"\n+\n+#define DRV_NAME\t\t\"enic_pmd\"\n+#define DRV_DESCRIPTION\t\t\"Cisco VIC Ethernet NIC Poll-mode Driver\"\n+#define DRV_VERSION\t\t\"1.0.0.4\"\n+#define DRV_COPYRIGHT\t\t\"Copyright 2008-2014 Cisco Systems, Inc\"\n+\n+#define ENIC_WQ_MAX\t\t8\n+#define ENIC_RQ_MAX\t\t8\n+#define ENIC_CQ_MAX\t\t(ENIC_WQ_MAX + ENIC_RQ_MAX)\n+#define ENIC_INTR_MAX\t\t(ENIC_CQ_MAX + 2)\n+\n+#define VLAN_ETH_HLEN           18\n+\n+#define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)\n+\n+#define ENICPMD_BDF_LENGTH      13   /* 0000:00:00.0'\\0' */\n+#define PKT_TX_TCP_UDP_CKSUM    0x6000\n+#define ENIC_CALC_IP_CKSUM      1\n+#define ENIC_CALC_TCP_UDP_CKSUM 2\n+#define ENIC_MAX_MTU            9000\n+#define PAGE_SIZE               4096\n+#define PAGE_ROUND_UP(x) \\\n+\t((((unsigned long)(x)) + PAGE_SIZE-1) & (~(PAGE_SIZE-1)))\n+\n+#define ENICPMD_VFIO_PATH          \"/dev/vfio/vfio\"\n+/*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/\n+\n+#define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */\n+#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF      0x0071  /* enet SRIOV VF */\n+\n+\n+#define ENICPMD_FDIR_MAX           64\n+\n+struct enic_fdir_node {\n+\tstruct rte_fdir_filter filter;\n+\tu16 fltr_id;\n+\tu16 rq_index;\n+};\n+\n+struct enic_fdir {\n+\tstruct rte_eth_fdir stats;\n+\tstruct rte_hash *hash;\n+\tstruct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];\n+};\n+\n+/* Per-instance private data structure */\n+struct enic {\n+\tstruct enic *next;\n+\tstruct rte_pci_device *pdev;\n+\tstruct vnic_enet_config config;\n+\tstruct vnic_dev_bar bar0;\n+\tstruct vnic_dev *vdev;\n+\n+\tstruct rte_eth_dev *rte_dev;\n+\tstruct enic_fdir fdir;\n+\tchar bdf_name[ENICPMD_BDF_LENGTH];\n+\tint dev_fd;\n+\tint iommu_group_fd;\n+\tint vfio_fd;\n+\tint iommu_groupid;\n+\tint eventfd;\n+\tu_int8_t mac_addr[ETH_ALEN];\n+\tpthread_t err_intr_thread;\n+\tint promisc;\n+\tint allmulti;\n+\tint ig_vlan_strip_en;\n+\tint link_status;\n+\tu8 hw_ip_checksum;\n+\n+\tunsigned int flags;\n+\tunsigned int priv_flags;\n+\n+\t/* work queue */\n+\tstruct vnic_wq wq[ENIC_WQ_MAX];\n+\tunsigned int wq_count;\n+\n+\t/* receive queue */\n+\tstruct vnic_rq rq[ENIC_RQ_MAX];\n+\tunsigned int rq_count;\n+\n+\t/* completion queue */\n+\tstruct vnic_cq cq[ENIC_CQ_MAX];\n+\tunsigned int cq_count;\n+\n+\t/* interrupt resource */\n+\tstruct vnic_intr intr;\n+\tunsigned int intr_count;\n+};\n+\n+static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)\n+{\n+\treturn rq;\n+}\n+\n+static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)\n+{\n+\treturn enic->rq_count + wq;\n+}\n+\n+static inline unsigned int enic_msix_err_intr(struct enic *enic)\n+{\n+\treturn 0;\n+}\n+\n+static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)\n+{\n+\treturn (struct enic *)eth_dev->data->dev_private;\n+}\n+\n+#endif /* _ENIC_H_ */\ndiff --git a/lib/librte_pmd_enic/enic_clsf.c b/lib/librte_pmd_enic/enic_clsf.c\nnew file mode 100644\nindex 0000000..3ee87c3\n--- /dev/null\n+++ b/lib/librte_pmd_enic/enic_clsf.c\n@@ -0,0 +1,244 @@\n+/*\n+ * Copyright 2008-2014 Cisco Systems, Inc.  All rights reserved.\n+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.\n+ *\n+ * Copyright (c) 2014, Cisco Systems, Inc. \n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright\n+ * notice, this list of conditions and the following disclaimer.\n+ *\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ * notice, this list of conditions and the following disclaimer in\n+ * the documentation and/or other materials provided with the\n+ * distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n+ * POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ */\n+#ident \"$Id$\"\n+\n+#include <libgen.h>\n+\n+#include <rte_ethdev.h>\n+#include <rte_malloc.h>\n+#include <rte_hash.h>\n+#include <rte_byteorder.h>\n+\n+#include \"enic_compat.h\"\n+#include \"enic.h\"\n+#include \"wq_enet_desc.h\"\n+#include \"rq_enet_desc.h\"\n+#include \"cq_enet_desc.h\"\n+#include \"vnic_enet.h\"\n+#include \"vnic_dev.h\"\n+#include \"vnic_wq.h\"\n+#include \"vnic_rq.h\"\n+#include \"vnic_cq.h\"\n+#include \"vnic_intr.h\"\n+#include \"vnic_nic.h\"\n+\n+#ifdef RTE_MACHINE_CPUFLAG_SSE4_2\n+#include <rte_hash_crc.h>\n+#define DEFAULT_HASH_FUNC       rte_hash_crc\n+#else\n+#include <rte_jhash.h>\n+#define DEFAULT_HASH_FUNC       rte_jhash\n+#endif\n+\n+#define SOCKET_0                0\n+#define ENICPMD_CLSF_HASH_ENTRIES       ENICPMD_FDIR_MAX\n+#define ENICPMD_CLSF_BUCKET_ENTRIES     4\n+\n+int enic_fdir_del_fltr(struct enic *enic, struct rte_fdir_filter *params)\n+{\n+\tint32_t pos;\n+\tstruct enic_fdir_node *key;\n+\t/* See if the key is in the table */\n+\tpos = rte_hash_del_key(enic->fdir.hash, params);\n+\tswitch (pos) {\n+\tcase -EINVAL:\n+\tcase -ENOENT:\n+\t\tenic->fdir.stats.f_remove++;\n+\t\treturn -EINVAL;\n+\tdefault:\n+\t\t/* The entry is present in the table */\n+\t\tkey = enic->fdir.nodes[pos];\n+\n+\t\t/* Delete the filter */\n+\t\tvnic_dev_classifier(enic->vdev, CLSF_DEL,\n+\t\t\t&key->fltr_id, NULL);\n+\t\trte_free(key);\n+\t\tenic->fdir.nodes[pos] = NULL;\n+\t\tenic->fdir.stats.free++;\n+\t\tenic->fdir.stats.remove++;\n+\t\tbreak;\n+\t}\n+\treturn 0;\n+}\n+\n+int enic_fdir_add_fltr(struct enic *enic, struct rte_fdir_filter *params,\n+\tu16 queue, u8 drop)\n+{\n+\tstruct enic_fdir_node *key;\n+\tstruct filter fltr = {0};\n+\tint32_t pos;\n+\tu8 do_free = 0;\n+\tu16 old_fltr_id = 0;\n+\n+\tif (!enic->fdir.hash || params->vlan_id || !params->l4type ||\n+\t\t(RTE_FDIR_IPTYPE_IPV6 == params->iptype) ||\n+\t\t(RTE_FDIR_L4TYPE_SCTP == params->l4type) ||\n+\t\tparams->flex_bytes || drop) {\n+\t\tenic->fdir.stats.f_add++;\n+\t\treturn -ENOTSUP;\n+\t}\n+\n+\t/* See if the key is already there in the table */\n+\tpos = rte_hash_del_key(enic->fdir.hash, params);\n+\tswitch (pos) {\n+\tcase -EINVAL:\n+\t\tenic->fdir.stats.f_add++;\n+\t\treturn -EINVAL;\n+\tcase -ENOENT:\n+\t\t/* Add a new classifier entry */\n+\t\tif (!enic->fdir.stats.free) {\n+\t\t\tenic->fdir.stats.f_add++;\n+\t\t\treturn -ENOSPC;\n+\t\t}\n+\t\tkey = (struct enic_fdir_node *)rte_zmalloc(\n+\t\t\t\"enic_fdir_node\",\n+\t\t\tsizeof(struct enic_fdir_node), 0);\n+\t\tif (!key) {\n+\t\t\tenic->fdir.stats.f_add++;\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t\tbreak;\n+\tdefault:\n+\t\t/* The entry is already present in the table.\n+\t\t * Check if there is a change in queue\n+\t\t */\n+\t\tkey = enic->fdir.nodes[pos];\n+\t\tenic->fdir.nodes[pos] = NULL;\n+\t\tif (unlikely(key->rq_index == queue)) {\n+\t\t\t/* Nothing to be done */\n+\t\t\tpos = rte_hash_add_key(enic->fdir.hash, params);\n+\t\t\tenic->fdir.nodes[pos] = key;\n+\t\t\tenic->fdir.stats.f_add++;\n+\t\t\tdev_warning(enic,\n+\t\t\t\t\"FDIR rule is already present\\n\");\n+\t\t\treturn 0;\n+\t\t}\n+\n+\t\tif (likely(enic->fdir.stats.free)) {\n+\t\t\t/* Add the filter and then delete the old one.\n+\t\t\t * This is to avoid packets from going into the\n+\t\t\t * default queue during the window between\n+\t\t\t * delete and add\n+\t\t\t */\n+\t\t\tdo_free = 1;\n+\t\t\told_fltr_id = key->fltr_id;\n+\t\t} else {\n+\t\t\t/* No free slots in the classifier.\n+\t\t\t * Delete the filter and add the modified one later\n+\t\t\t */\n+\t\t\tvnic_dev_classifier(enic->vdev, CLSF_DEL,\n+\t\t\t\t&key->fltr_id, NULL);\n+\t\t\tenic->fdir.stats.free++;\n+\t\t}\n+\n+\t\tbreak;\n+\t}\n+\n+\tkey->filter = *params;\n+\tkey->rq_index = queue;\n+\n+\tfltr.type = FILTER_IPV4_5TUPLE;\n+\tfltr.u.ipv4.src_addr = rte_be_to_cpu_32(params->ip_src.ipv4_addr);\n+\tfltr.u.ipv4.dst_addr = rte_be_to_cpu_32(params->ip_dst.ipv4_addr);\n+\tfltr.u.ipv4.src_port = rte_be_to_cpu_16(params->port_src);\n+\tfltr.u.ipv4.dst_port = rte_be_to_cpu_16(params->port_dst);\n+\n+\tif (RTE_FDIR_L4TYPE_TCP == params->l4type)\n+\t\tfltr.u.ipv4.protocol = PROTO_TCP;\n+\telse\n+\t\tfltr.u.ipv4.protocol = PROTO_UDP;\n+\n+\tfltr.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;\n+\n+\tif (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {\n+\t\tkey->fltr_id = queue;\n+\t} else {\n+\t\tdev_err(enic, \"Add classifier entry failed\\n\");\n+\t\tenic->fdir.stats.f_add++;\n+\t\trte_free(key);\n+\t\treturn -1;\n+\t}\n+\n+\tif (do_free)\n+\t\tvnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL);\n+\telse{\n+\t\tenic->fdir.stats.free--;\n+\t\tenic->fdir.stats.add++;\n+\t}\n+\n+\tpos = rte_hash_add_key(enic->fdir.hash, (void *)key);\n+\tenic->fdir.nodes[pos] = key;\n+\treturn 0;\n+}\n+\n+void enic_clsf_destroy(struct enic *enic)\n+{\n+\tu32 index;\n+\tstruct enic_fdir_node *key;\n+\t/* delete classifier entries */\n+\tfor (index = 0; index < ENICPMD_FDIR_MAX; index++) {\n+\t\tkey = enic->fdir.nodes[index];\n+\t\tif (key) {\n+\t\t\tvnic_dev_classifier(enic->vdev, CLSF_DEL,\n+\t\t\t\t&key->fltr_id, NULL);\n+\t\t\trte_free(key);\n+\t\t}\n+\t}\n+\n+\tif (enic->fdir.hash) {\n+\t\trte_hash_free(enic->fdir.hash);\n+\t\tenic->fdir.hash = NULL;\n+\t}\n+}\n+\n+int enic_clsf_init(struct enic *enic)\n+{\n+\tstruct rte_hash_parameters hash_params = {\n+\t\t.name = \"enicpmd_clsf_hash\",\n+\t\t.entries = ENICPMD_CLSF_HASH_ENTRIES,\n+\t\t.bucket_entries = ENICPMD_CLSF_BUCKET_ENTRIES,\n+\t\t.key_len = sizeof(struct rte_fdir_filter),\n+\t\t.hash_func = DEFAULT_HASH_FUNC,\n+\t\t.hash_func_init_val = 0,\n+\t\t.socket_id = SOCKET_0,\n+\t};\n+\n+\tenic->fdir.hash = rte_hash_create(&hash_params);\n+\tmemset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));\n+\tenic->fdir.stats.free = ENICPMD_FDIR_MAX;\n+\treturn (NULL == enic->fdir.hash);\n+}\n+\n+\n+\ndiff --git a/lib/librte_pmd_enic/enic_compat.h b/lib/librte_pmd_enic/enic_compat.h\nnew file mode 100644\nindex 0000000..d962904\n--- /dev/null\n+++ b/lib/librte_pmd_enic/enic_compat.h\n@@ -0,0 +1,142 @@\n+/*\n+ * Copyright 2008-2014 Cisco Systems, Inc.  All rights reserved.\n+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.\n+ *\n+ * Copyright (c) 2014, Cisco Systems, Inc. \n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright\n+ * notice, this list of conditions and the following disclaimer.\n+ *\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ * notice, this list of conditions and the following disclaimer in\n+ * the documentation and/or other materials provided with the\n+ * distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n+ * POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ */\n+#ident \"$Id$\"\n+\n+#ifndef _ENIC_COMPAT_H_\n+#define _ENIC_COMPAT_H_\n+\n+#include <stdio.h>\n+\n+#include <rte_atomic.h>\n+#include <rte_malloc.h>\n+\n+#define ENIC_PAGE_ALIGN 4096ULL\n+#define ENIC_ALIGN      ENIC_PAGE_ALIGN\n+#define NAME_MAX        255\n+#define ETH_ALEN        6\n+\n+#define __iomem\n+\n+#define rmb()     rte_rmb() /* dpdk rte provided rmb */\n+#define wmb()     rte_wmb() /* dpdk rte provided wmb */\n+\n+#define le16_to_cpu\n+#define le32_to_cpu\n+#define le64_to_cpu\n+#define cpu_to_le16\n+#define cpu_to_le32\n+#define cpu_to_le64\n+\n+#ifndef offsetof\n+#define offsetof(t, m) ((size_t) &((t *)0)->m)\n+#endif\n+\n+#define pr_err(y, args...) dev_err(0, y, ##args)\n+#define pr_warn(y, args...) dev_warning(0, y, ##args)\n+#define BUG() pr_err(\"BUG at %s:%d\", __func__, __LINE__)\n+\n+#define ALIGN(x, a)              __ALIGN_MASK(x, (typeof(x))(a)-1)\n+#define __ALIGN_MASK(x, mask)    (((x)+(mask))&~(mask))\n+#define udelay usleep\n+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))\n+\n+#define kzalloc(size, flags) calloc(1, size)\n+#define kfree(x) free(x)\n+\n+#define dev_err(x, args...) printf(\"rte_enic_pmd : Error - \" args)\n+#define dev_info(x, args...) printf(\"rte_enic_pmd: Info - \" args)\n+#define dev_warning(x, args...) printf(\"rte_enic_pmd: Warning - \" args)\n+#define dev_trace(x, args...) printf(\"rte_enic_pmd: Trace - \" args)\n+\n+#define __le16 u16\n+#define __le32 u32\n+#define __le64 u64\n+\n+typedef\t\tunsigned char       u8;\n+typedef\t\tunsigned short      u16;\n+typedef\t\tunsigned int        u32;\n+typedef         unsigned long long  u64;\n+typedef         unsigned long long  dma_addr_t;\n+\n+static inline u_int32_t ioread32(volatile void *addr)\n+{\n+\treturn *(volatile u_int32_t *)addr;\n+}\n+\n+static inline u16 ioread16(volatile void *addr)\n+{\n+\treturn *(volatile u16 *)addr;\n+}\n+\n+static inline u_int8_t ioread8(volatile void *addr)\n+{\n+\treturn *(volatile u_int8_t *)addr;\n+}\n+\n+static inline void iowrite32(u_int32_t val, volatile void *addr)\n+{\n+\t*(volatile u_int32_t *)addr = val;\n+}\n+\n+static inline void iowrite16(u16 val, volatile void *addr)\n+{\n+\t*(volatile u16 *)addr = val;\n+}\n+\n+static inline void iowrite8(u_int8_t val, volatile void *addr)\n+{\n+\t*(volatile u_int8_t *)addr = val;\n+}\n+\n+static inline unsigned int readl(volatile void __iomem *addr)\n+{\n+\treturn *(volatile unsigned int *)addr;\n+}\n+\n+static inline void writel(unsigned int val, volatile void __iomem *addr)\n+{\n+\t*(volatile unsigned int *)addr = val;\n+}\n+\n+#define min_t(type, x, y) ({                    \\\n+\ttype __min1 = (x);                      \\\n+\ttype __min2 = (y);                      \\\n+\t__min1 < __min2 ? __min1 : __min2; })\n+\n+#define max_t(type, x, y) ({                    \\\n+\ttype __max1 = (x);                      \\\n+\ttype __max2 = (y);                      \\\n+\t__max1 > __max2 ? __max1 : __max2; })\n+\n+#endif /* _ENIC_COMPAT_H_ */\ndiff --git a/lib/librte_pmd_enic/enic_main.c b/lib/librte_pmd_enic/enic_main.c\nnew file mode 100644\nindex 0000000..e1f81bd\n--- /dev/null\n+++ b/lib/librte_pmd_enic/enic_main.c\n@@ -0,0 +1,1328 @@\n+/*\n+ * Copyright 2008-2014 Cisco Systems, Inc.  All rights reserved.\n+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.\n+ *\n+ * Copyright (c) 2014, Cisco Systems, Inc. \n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright\n+ * notice, this list of conditions and the following disclaimer.\n+ *\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ * notice, this list of conditions and the following disclaimer in\n+ * the documentation and/or other materials provided with the\n+ * distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n+ * POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ */\n+#ident \"$Id$\"\n+\n+#include <stdio.h>\n+\n+#include <sys/stat.h>\n+#include <sys/mman.h>\n+#include <fcntl.h>\n+#include <libgen.h>\n+#ifdef RTE_EAL_VFIO\n+#include <linux/vfio.h>\n+#endif\n+\n+#include <rte_pci.h>\n+#include <rte_memzone.h>\n+#include <rte_malloc.h>\n+#include <rte_mbuf.h>\n+#include <rte_string_fns.h>\n+#include <rte_ethdev.h>\n+\n+#include \"enic_compat.h\"\n+#include \"enic.h\"\n+#include \"wq_enet_desc.h\"\n+#include \"rq_enet_desc.h\"\n+#include \"cq_enet_desc.h\"\n+#include \"vnic_enet.h\"\n+#include \"vnic_dev.h\"\n+#include \"vnic_wq.h\"\n+#include \"vnic_rq.h\"\n+#include \"vnic_cq.h\"\n+#include \"vnic_intr.h\"\n+#include \"vnic_nic.h\"\n+\n+static inline int enic_is_sriov_vf(struct enic *enic)\n+{\n+\treturn enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;\n+}\n+\n+static int is_zero_addr(char *addr)\n+{\n+\treturn !(addr[0] |  addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);\n+}\n+\n+static int is_mcast_addr(char *addr)\n+{\n+\treturn addr[0] & 1;\n+}\n+\n+static int is_eth_addr_valid(char *addr)\n+{\n+\treturn !is_mcast_addr(addr) && !is_zero_addr(addr);\n+}\n+\n+static inline struct rte_mbuf *\n+enic_rxmbuf_alloc(struct rte_mempool *mp)\n+{\n+\tstruct rte_mbuf *m;\n+\n+\tm = __rte_mbuf_raw_alloc(mp);\n+\t__rte_mbuf_sanity_check_raw(m, 0);\n+\treturn m;\n+}\n+\n+static const struct rte_memzone *ring_dma_zone_reserve(\n+\tstruct rte_eth_dev *dev, const char *ring_name,\n+\tuint16_t queue_id, uint32_t ring_size, int socket_id)\n+{\n+\tchar z_name[RTE_MEMZONE_NAMESIZE];\n+\tconst struct rte_memzone *mz;\n+\n+\tsnprintf(z_name, sizeof(z_name), \"%s_%s_%d_%d\",\n+\t\tdev->driver->pci_drv.name, ring_name,\n+\t\tdev->data->port_id, queue_id);\n+\n+\tmz = rte_memzone_lookup(z_name);\n+\tif (mz)\n+\t\treturn mz;\n+\n+\treturn rte_memzone_reserve_aligned(z_name, (uint64_t) ring_size,\n+\t\tsocket_id, RTE_MEMZONE_1GB, ENIC_ALIGN);\n+}\n+\n+void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)\n+{\n+\tvnic_set_hdr_split_size(enic->vdev, split_hdr_size);\n+}\n+\n+static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)\n+{\n+\tstruct rte_mbuf *mbuf = (struct rte_mbuf *)buf->os_buf;\n+\n+\trte_mempool_put(mbuf->pool, mbuf);\n+\tbuf->os_buf = NULL;\n+}\n+\n+static void enic_wq_free_buf(struct vnic_wq *wq,\n+\tstruct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)\n+{\n+\tenic_free_wq_buf(wq, buf);\n+}\n+\n+static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,\n+\tu8 type, u16 q_number, u16 completed_index, void *opaque)\n+{\n+\tstruct enic *enic = vnic_dev_priv(vdev);\n+\n+\tvnic_wq_service(&enic->wq[q_number], cq_desc,\n+\t\tcompleted_index, enic_wq_free_buf,\n+\t\topaque);\n+\n+\treturn 0;\n+}\n+\n+static void enic_log_q_error(struct enic *enic)\n+{\n+\tunsigned int i;\n+\tu32 error_status;\n+\n+\tfor (i = 0; i < enic->wq_count; i++) {\n+\t\terror_status = vnic_wq_error_status(&enic->wq[i]);\n+\t\tif (error_status)\n+\t\t\tdev_err(enic, \"WQ[%d] error_status %d\\n\", i,\n+\t\t\t\terror_status);\n+\t}\n+\n+\tfor (i = 0; i < enic->rq_count; i++) {\n+\t\terror_status = vnic_rq_error_status(&enic->rq[i]);\n+\t\tif (error_status)\n+\t\t\tdev_err(enic, \"RQ[%d] error_status %d\\n\", i,\n+\t\t\t\terror_status);\n+\t}\n+}\n+\n+unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq)\n+{\n+\tunsigned int cq = enic_cq_wq(enic, wq->index);\n+\n+\t/* Return the work done */\n+\treturn vnic_cq_service(&enic->cq[cq],\n+\t\t-1 /*wq_work_to_do*/, enic_wq_service, NULL);\n+}\n+\n+\n+int enic_send_pkt(struct enic *enic, struct vnic_wq *wq,\n+\tstruct rte_mbuf *tx_pkt, unsigned short len,\n+\tu_int8_t sop, u_int8_t eop,\n+\tu_int16_t ol_flags, u_int16_t vlan_tag)\n+{\n+\tstruct wq_enet_desc *desc = vnic_wq_next_desc(wq);\n+\tu_int16_t mss = 0;\n+\tu_int16_t header_length = 0;\n+\tu_int8_t cq_entry = eop;\n+\tu_int8_t vlan_tag_insert = 0;\n+\tunsigned char *buf = (unsigned char *)(tx_pkt->buf_addr) +\n+\t    RTE_PKTMBUF_HEADROOM;\n+#ifdef RTE_EAL_VFIO\n+\tu_int64_t bus_addr = (unsigned long)buf;  /* must have IOMMU */\n+#else\n+\tu_int64_t bus_addr = (dma_addr_t)\n+\t    (tx_pkt->buf_physaddr + RTE_PKTMBUF_HEADROOM);\n+#endif\n+\n+\tif (sop) {\n+\t\tif (ol_flags & PKT_TX_VLAN_PKT)\n+\t\t\tvlan_tag_insert = 1;\n+\n+\t\tif (enic->hw_ip_checksum) {\n+\t\t\tif (ol_flags & PKT_TX_IP_CKSUM)\n+\t\t\t\tmss |= ENIC_CALC_IP_CKSUM;\n+\n+\t\t\tif (ol_flags & PKT_TX_TCP_UDP_CKSUM)\n+\t\t\t\tmss |= ENIC_CALC_TCP_UDP_CKSUM;\n+\t\t}\n+\t}\n+\n+\twq_enet_desc_enc(desc,\n+\t\tbus_addr,\n+\t\tlen,\n+\t\tmss,\n+\t\t0 /* header_length */,\n+\t\t0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */,\n+\t\teop,\n+\t\tcq_entry,\n+\t\t0 /* fcoe_encap */,\n+\t\tvlan_tag_insert,\n+\t\tvlan_tag,\n+\t\t0 /* loopback */);\n+\n+\tvnic_wq_post(wq, (void *)tx_pkt, bus_addr, len,\n+\t\tsop, eop,\n+\t\t1 /*desc_skip_cnt*/,\n+\t\tcq_entry,\n+\t\t0 /*compressed send*/,\n+\t\t0 /*wrid*/);\n+\n+\treturn 0;\n+}\n+\n+void enic_dev_stats_clear(struct enic *enic)\n+{\n+\tif (vnic_dev_stats_clear(enic->vdev))\n+\t\tdev_err(enic, \"Error in clearing stats\\n\");\n+}\n+\n+void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)\n+{\n+\tstruct vnic_stats *stats;\n+\n+\tmemset(r_stats, 0, sizeof(*r_stats));\n+\tif (vnic_dev_stats_dump(enic->vdev, &stats)) {\n+\t\tdev_err(enic, \"Error in getting stats\\n\");\n+\t\treturn;\n+\t}\n+\n+\tr_stats->ipackets = stats->rx.rx_frames_ok;\n+\tr_stats->opackets = stats->tx.tx_frames_ok;\n+\n+\tr_stats->ibytes = stats->rx.rx_bytes_ok;\n+\tr_stats->obytes = stats->tx.tx_bytes_ok;\n+\n+\tr_stats->ierrors = stats->rx.rx_errors;\n+\tr_stats->oerrors = stats->tx.tx_errors;\n+\n+\tr_stats->imcasts = stats->rx.rx_multicast_frames_ok;\n+\tr_stats->rx_nombuf = stats->rx.rx_no_bufs;\n+}\n+\n+void enic_del_mac_address(struct enic *enic)\n+{\n+\tif (vnic_dev_del_addr(enic->vdev, enic->mac_addr))\n+\t\tdev_err(enic, \"del mac addr failed\\n\");\n+}\n+\n+void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)\n+{\n+\tint err;\n+\n+\tif (!is_eth_addr_valid(mac_addr)) {\n+\t\tdev_err(enic, \"invalid mac address\\n\");\n+\t\treturn;\n+\t}\n+\n+\terr = vnic_dev_del_addr(enic->vdev, mac_addr);\n+\tif (err) {\n+\t\tdev_err(enic, \"del mac addr failed\\n\");\n+\t\treturn;\n+\t}\n+\n+\tether_addr_copy((struct ether_addr *)mac_addr,\n+\t\t(struct ether_addr *)enic->mac_addr);\n+\n+\terr = vnic_dev_add_addr(enic->vdev, mac_addr);\n+\tif (err) {\n+\t\tdev_err(enic, \"add mac addr failed\\n\");\n+\t\treturn;\n+\t}\n+}\n+\n+static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)\n+{\n+\tstruct enic *enic = vnic_dev_priv(rq->vdev);\n+\n+\tif (!buf->os_buf)\n+\t\treturn;\n+\n+\trte_pktmbuf_free((struct rte_mbuf *)buf->os_buf);\n+\tbuf->os_buf = NULL;\n+}\n+\n+void enic_init_vnic_resources(struct enic *enic)\n+{\n+\tunsigned int error_interrupt_enable = 1;\n+\tunsigned int error_interrupt_offset = 0;\n+\tint index = 0;\n+\tunsigned int cq_index = 0;\n+\n+\tfor (index = 0; index < enic->rq_count; index++) {\n+\t\tvnic_rq_init(&enic->rq[index],\n+\t\t\tenic_cq_rq(enic, index),\n+\t\t\terror_interrupt_enable,\n+\t\t\terror_interrupt_offset);\n+\t}\n+\n+\tfor (index = 0; index < enic->wq_count; index++) {\n+\t\tvnic_wq_init(&enic->wq[index],\n+\t\t\tenic_cq_wq(enic, index),\n+\t\t\terror_interrupt_enable,\n+\t\t\terror_interrupt_offset);\n+\t}\n+\n+\tvnic_dev_stats_clear(enic->vdev);\n+\n+\tfor (index = 0; index < enic->cq_count; index++) {\n+\t\tvnic_cq_init(&enic->cq[index],\n+\t\t\t0 /* flow_control_enable */,\n+\t\t\t1 /* color_enable */,\n+\t\t\t0 /* cq_head */,\n+\t\t\t0 /* cq_tail */,\n+\t\t\t1 /* cq_tail_color */,\n+\t\t\t0 /* interrupt_enable */,\n+\t\t\t1 /* cq_entry_enable */,\n+\t\t\t0 /* cq_message_enable */,\n+\t\t\t0 /* interrupt offset */,\n+\t\t\t0 /* cq_message_addr */);\n+\t}\n+\n+\tvnic_intr_init(&enic->intr,\n+\t\tenic->config.intr_timer_usec,\n+\t\tenic->config.intr_timer_type,\n+\t\t/*mask_on_assertion*/1);\n+}\n+\n+\n+static int enic_rq_alloc_buf(struct vnic_rq *rq)\n+{\n+\tstruct enic *enic = vnic_dev_priv(rq->vdev);\n+\tvoid *buf;\n+\tdma_addr_t dma_addr;\n+\tstruct rq_enet_desc *desc = vnic_rq_next_desc(rq);\n+\tu_int8_t type = RQ_ENET_TYPE_ONLY_SOP;\n+\tu_int16_t len = ENIC_MAX_MTU + VLAN_ETH_HLEN;\n+\tu16 split_hdr_size = vnic_get_hdr_split_size(enic->vdev);\n+\tstruct rte_mbuf *mbuf = enic_rxmbuf_alloc(rq->mp);\n+\tstruct rte_mbuf *hdr_mbuf = NULL;\n+\n+\tif (!mbuf) {\n+\t\tdev_err(enic, \"mbuf alloc in enic_rq_alloc_buf failed\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tif (unlikely(split_hdr_size)) {\n+\t\tif (vnic_rq_desc_avail(rq) < 2) {\n+\t\t\trte_mempool_put(mbuf->pool, mbuf);\n+\t\t\treturn -1;\n+\t\t}\n+\t\thdr_mbuf = enic_rxmbuf_alloc(rq->mp);\n+\t\tif (!hdr_mbuf) {\n+\t\t\trte_mempool_put(mbuf->pool, mbuf);\n+\t\t\tdev_err(enic,\n+\t\t\t\t\"hdr_mbuf alloc in enic_rq_alloc_buf failed\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\thdr_mbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tbuf = rte_pktmbuf_mtod(hdr_mbuf, void *);\n+\n+\t\thdr_mbuf->nb_segs = 2;\n+\t\thdr_mbuf->port = rq->index;\n+\t\thdr_mbuf->next = mbuf;\n+\n+#ifdef RTE_EAL_VFIO\n+\t\tdma_addr = (dma_addr_t)buf;\n+#else\n+\t\tdma_addr = (dma_addr_t)\n+\t\t    (hdr_mbuf->buf_physaddr + hdr_mbuf->data_off);\n+#endif\n+\n+\t\trq_enet_desc_enc(desc, dma_addr, type, split_hdr_size);\n+\n+\t\tvnic_rq_post(rq, (void *)hdr_mbuf, 0 /*os_buf_index*/, dma_addr,\n+\t\t\t(unsigned int)split_hdr_size, 0 /*wrid*/);\n+\n+\t\tdesc = vnic_rq_next_desc(rq);\n+\t\ttype = RQ_ENET_TYPE_NOT_SOP;\n+\t} else {\n+\t\tmbuf->nb_segs = 1;\n+\t\tmbuf->port = rq->index;\n+\t}\n+\n+\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\tbuf = rte_pktmbuf_mtod(mbuf, void *);\n+\tmbuf->next = NULL;\n+\n+#ifdef RTE_EAL_VFIO\n+\tdma_addr = (dma_addr_t)buf;\n+#else\n+\tdma_addr = (dma_addr_t)\n+\t    (mbuf->buf_physaddr + mbuf->data_off);\n+#endif\n+\n+\trq_enet_desc_enc(desc, dma_addr, type, mbuf->buf_len);\n+\n+\tvnic_rq_post(rq, (void *)mbuf, 0 /*os_buf_index*/, dma_addr,\n+\t\t(unsigned int)mbuf->buf_len, 0 /*wrid*/);\n+\n+\treturn 0;\n+}\n+\n+static int enic_rq_indicate_buf(struct vnic_rq *rq,\n+\tstruct cq_desc *cq_desc, struct vnic_rq_buf *buf,\n+\tint skipped, void *opaque)\n+{\n+\tstruct enic *enic = vnic_dev_priv(rq->vdev);\n+\tstruct rte_mbuf **rx_pkt_bucket = (struct rte_mbuf **)opaque;\n+\tstruct rte_mbuf *rx_pkt = NULL;\n+\tstruct rte_mbuf *hdr_rx_pkt = NULL;\n+\n+\tu8 type, color, eop, sop, ingress_port, vlan_stripped;\n+\tu8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;\n+\tu8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;\n+\tu8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;\n+\tu8 packet_error;\n+\tu16 q_number, completed_index, bytes_written, vlan_tci, checksum;\n+\tu32 rss_hash;\n+\n+\tcq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,\n+\t\t&type, &color, &q_number, &completed_index,\n+\t\t&ingress_port, &fcoe, &eop, &sop, &rss_type,\n+\t\t&csum_not_calc, &rss_hash, &bytes_written,\n+\t\t&packet_error, &vlan_stripped, &vlan_tci, &checksum,\n+\t\t&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,\n+\t\t&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,\n+\t\t&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,\n+\t\t&fcs_ok);\n+\n+\tif (packet_error) {\n+\t\tdev_err(enic, \"packet error\\n\");\n+\t\treturn;\n+\t}\n+\n+\trx_pkt = (struct rte_mbuf *)buf->os_buf;\n+\tbuf->os_buf = NULL;\n+\n+\tif (unlikely(skipped)) {\n+\t\trx_pkt->data_len = 0;\n+\t\treturn 0;\n+\t}\n+\n+\tif (likely(!vnic_get_hdr_split_size(enic->vdev))) {\n+\t\t/* No header split configured */\n+\t\t*rx_pkt_bucket = rx_pkt;\n+\t\trx_pkt->pkt_len = bytes_written;\n+\n+\t\tif (ipv4) {\n+\t\t\trx_pkt->ol_flags |= PKT_RX_IPV4_HDR;\n+\t\t\tif (!csum_not_calc) {\n+\t\t\t\tif (unlikely(!ipv4_csum_ok))\n+\t\t\t\t\trx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;\n+\n+\t\t\t\tif ((tcp || udp) && (!tcp_udp_csum_ok))\n+\t\t\t\t\trx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;\n+\t\t\t}\n+\t\t} else if (ipv6)\n+\t\t\trx_pkt->ol_flags |= PKT_RX_IPV6_HDR;\n+\t} else {\n+\t\t/* Header split */\n+\t\tif (sop && !eop) {\n+\t\t\t/* This piece is header */\n+\t\t\t*rx_pkt_bucket = rx_pkt;\n+\t\t\trx_pkt->pkt_len = bytes_written;\n+\t\t} else {\n+\t\t\tif (sop && eop) {\n+\t\t\t\t/* The packet is smaller than split_hdr_size */\n+\t\t\t\t*rx_pkt_bucket = rx_pkt;\n+\t\t\t\trx_pkt->pkt_len = bytes_written;\n+\t\t\t\tif (ipv4) {\n+\t\t\t\t\trx_pkt->ol_flags |= PKT_RX_IPV4_HDR;\n+\t\t\t\t\tif (!csum_not_calc) {\n+\t\t\t\t\t\tif (unlikely(!ipv4_csum_ok))\n+\t\t\t\t\t\t\trx_pkt->ol_flags |=\n+\t\t\t\t\t\t\t    PKT_RX_IP_CKSUM_BAD;\n+\n+\t\t\t\t\t\tif ((tcp || udp) &&\n+\t\t\t\t\t\t    (!tcp_udp_csum_ok))\n+\t\t\t\t\t\t\trx_pkt->ol_flags |=\n+\t\t\t\t\t\t\t    PKT_RX_L4_CKSUM_BAD;\n+\t\t\t\t\t}\n+\t\t\t\t} else if (ipv6)\n+\t\t\t\t\trx_pkt->ol_flags |= PKT_RX_IPV6_HDR;\n+\t\t\t} else {\n+\t\t\t\t/* Payload */\n+\t\t\t\thdr_rx_pkt = *rx_pkt_bucket;\n+\t\t\t\thdr_rx_pkt->pkt_len += bytes_written;\n+\t\t\t\tif (ipv4) {\n+\t\t\t\t\thdr_rx_pkt->ol_flags |= PKT_RX_IPV4_HDR;\n+\t\t\t\t\tif (!csum_not_calc) {\n+\t\t\t\t\t\tif (unlikely(!ipv4_csum_ok))\n+\t\t\t\t\t\t\thdr_rx_pkt->ol_flags |=\n+\t\t\t\t\t\t\t    PKT_RX_IP_CKSUM_BAD;\n+\n+\t\t\t\t\t\tif ((tcp || udp) &&\n+\t\t\t\t\t\t    (!tcp_udp_csum_ok))\n+\t\t\t\t\t\t\thdr_rx_pkt->ol_flags |=\n+\t\t\t\t\t\t\t    PKT_RX_L4_CKSUM_BAD;\n+\t\t\t\t\t}\n+\t\t\t\t} else if (ipv6)\n+\t\t\t\t\thdr_rx_pkt->ol_flags |= PKT_RX_IPV6_HDR;\n+\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\trx_pkt->data_len = bytes_written;\n+\n+\tif (rss_hash) {\n+\t\trx_pkt->ol_flags |= PKT_RX_RSS_HASH;\n+\t\trx_pkt->hash.rss = rss_hash;\n+\t}\n+\n+\tif (vlan_tci) {\n+\t\trx_pkt->ol_flags |= PKT_RX_VLAN_PKT;\n+\t\trx_pkt->vlan_tci = vlan_tci;\n+\t}\n+\n+\treturn eop;\n+}\n+\n+static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,\n+\tu8 type, u16 q_number, u16 completed_index, void *opaque)\n+{\n+\tstruct enic *enic = vnic_dev_priv(vdev);\n+\n+\treturn vnic_rq_service(&enic->rq[q_number], cq_desc,\n+\t\tcompleted_index, VNIC_RQ_RETURN_DESC,\n+\t\tenic_rq_indicate_buf, opaque);\n+\n+}\n+\n+int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts,\n+\tunsigned int budget, unsigned int *work_done)\n+{\n+\tstruct enic *enic = vnic_dev_priv(rq->vdev);\n+\tunsigned int cq = enic_cq_rq(enic, rq->index);\n+\tint err = 0;\n+\n+\t*work_done = vnic_cq_service(&enic->cq[cq],\n+\t\tbudget, enic_rq_service, (void *)rx_pkts);\n+\n+\tif (*work_done) {\n+\t\tvnic_rq_fill(rq, enic_rq_alloc_buf);\n+\n+\t\t/* Need at least one buffer on ring to get going */\n+\t\tif (vnic_rq_desc_used(rq) == 0) {\n+\t\t\tdev_err(enic, \"Unable to alloc receive buffers\\n\");\n+\t\t\terr = -1;\n+\t\t}\n+\t}\n+\treturn err;\n+}\n+\n+#ifdef RTE_EAL_VFIO\n+void *enic_buf_map(void *priv, void *addr, size_t size)\n+{\n+\tstruct enic *enic = (struct enic *)priv;\n+\tstruct vfio_iommu_type1_dma_map dma_map = { .argsz = sizeof(dma_map) };\n+\n+\tsize_t r_size;\n+\n+\tdma_map.vaddr = (__u64)addr;\n+\tdma_map.vaddr &= ~(ENIC_PAGE_ALIGN - 1);\n+\tr_size = (size_t)PAGE_ROUND_UP(\n+\t\tsize + ((__u64)addr & (ENIC_PAGE_ALIGN - 1)));\n+\tdma_map.size = r_size;\n+\tdma_map.iova = dma_map.vaddr;\n+\tdma_map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;\n+\n+\tioctl(enic->vfio_fd, VFIO_IOMMU_MAP_DMA, &dma_map);\n+\t/* Ignore the return value of this ioctl as this fails\n+\t * only because of duplicate entries.\n+\t */\n+\n+\treturn (void *)addr;\n+}\n+#endif\n+\n+void *enic_alloc_consistent(void *priv, size_t size,\n+\tdma_addr_t *dma_handle, u8 *name)\n+{\n+\tstruct enic *enic = (struct enic *)priv;\n+\tvoid *vaddr;\n+\tconst struct rte_memzone *rz;\n+\t*dma_handle = 0;\n+\n+\trz = rte_memzone_reserve_aligned(name, size, 0, 0, ENIC_ALIGN);\n+\tif (!rz) {\n+\t\tpr_err(\"%s : Failed to allocate memory requested for %s\",\n+\t\t\t__func__, name);\n+\t\treturn NULL;\n+\t}\n+\n+#ifdef RTE_EAL_VFIO\n+\tvaddr = enic_buf_map(enic, rz->addr, rz->len);\n+\tif (!vaddr) {\n+\t\tpr_err(\"%s : Failed to map allocated memory requested for %s\",\n+\t\t\t__func__, name);\n+\t\treturn NULL;\n+\t}\n+\n+\t*dma_handle = (dma_addr_t)vaddr;\n+#else\n+\tvaddr = rz->addr;\n+\t*dma_handle = (dma_addr_t)rz->phys_addr;\n+#endif\n+\treturn vaddr;\n+}\n+\n+void enic_free_consistent(struct rte_pci_device *hwdev, size_t size,\n+\tvoid *vaddr, dma_addr_t dma_handle)\n+{\n+\t/* Nothing to be done */\n+}\n+\n+void enic_intr_handler(__rte_unused struct rte_intr_handle *handle,\n+\tvoid *arg)\n+{\n+\tstruct enic *enic = pmd_priv((struct rte_eth_dev *)arg);\n+\n+\tdev_err(enic, \"Err intr.\\n\");\n+\tvnic_intr_return_all_credits(&enic->intr);\n+\n+\tenic_log_q_error(enic);\n+}\n+\n+int enic_enable(struct enic *enic)\n+{\n+\tint index;\n+\tvoid *res;\n+\tchar mz_name[RTE_MEMZONE_NAMESIZE];\n+\tconst struct rte_memzone *rmz;\n+\tstruct rte_eth_dev *eth_dev = enic->rte_dev;\n+\n+\teth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);\n+\teth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;\n+\tvnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */\n+\n+#ifdef RTE_EAL_VFIO\n+\tfor (index = 0; index < enic->rq_count; index++) {\n+\t\tsnprintf(mz_name, sizeof(mz_name), \"MP_%s\",\n+\t\t\tenic->rq[index].mp->name);\n+\t\trmz = rte_memzone_lookup(mz_name);\n+\t\tenic_buf_map((void *)enic, rmz->addr, (size_t)rmz->len);\n+\t}\n+#endif\n+\n+\tif (enic_clsf_init(enic))\n+\t\tdev_warning(enic, \"Init of hash table for clsf failed.\"\\\n+\t\t\t\"Flow director feature will not work\\n\");\n+\n+\t/* Fill RQ bufs */\n+\tfor (index = 0; index < enic->rq_count; index++) {\n+\t\tvnic_rq_fill(&enic->rq[index], enic_rq_alloc_buf);\n+\n+\t\t/* Need at least one buffer on ring to get going\n+\t\t*/\n+\t\tif (vnic_rq_desc_used(&enic->rq[index]) == 0) {\n+\t\t\tdev_err(enic, \"Unable to alloc receive buffers\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tfor (index = 0; index < enic->wq_count; index++)\n+\t\tvnic_wq_enable(&enic->wq[index]);\n+\tfor (index = 0; index < enic->rq_count; index++)\n+\t\tvnic_rq_enable(&enic->rq[index]);\n+\n+\tvnic_dev_enable_wait(enic->vdev);\n+\n+#ifndef RTE_EAL_VFIO\n+\t/* Register and enable error interrupt */\n+\trte_intr_callback_register(&(enic->pdev->intr_handle),\n+\t\tenic_intr_handler, (void *)enic->rte_dev);\n+\n+\trte_intr_enable(&(enic->pdev->intr_handle));\n+#endif\n+\tvnic_intr_unmask(&enic->intr);\n+\n+\treturn 0;\n+}\n+\n+int enic_alloc_intr_resources(struct enic *enic)\n+{\n+\tint err;\n+\n+\tdev_info(enic, \"vNIC resources used:  \"\\\n+\t\t\"wq %d rq %d cq %d intr %d\\n\",\n+\t\tenic->wq_count, enic->rq_count,\n+\t\tenic->cq_count, enic->intr_count);\n+\n+\terr = vnic_intr_alloc(enic->vdev, &enic->intr, 0);\n+\tif (err)\n+\t\tenic_free_vnic_resources(enic);\n+\n+\treturn err;\n+}\n+\n+void enic_free_rq(void *rxq)\n+{\n+\tstruct vnic_rq *rq = (struct vnic_rq *)rxq;\n+\tstruct enic *enic = vnic_dev_priv(rq->vdev);\n+\n+\tvnic_rq_free(rq);\n+\tvnic_cq_free(&enic->cq[rq->index]);\n+}\n+\n+void enic_start_wq(struct enic *enic, uint16_t queue_idx)\n+{\n+\tvnic_wq_enable(&enic->wq[queue_idx]);\n+}\n+\n+int enic_stop_wq(struct enic *enic, uint16_t queue_idx)\n+{\n+\treturn vnic_wq_disable(&enic->wq[queue_idx]);\n+}\n+\n+void enic_start_rq(struct enic *enic, uint16_t queue_idx)\n+{\n+\tvnic_rq_enable(&enic->rq[queue_idx]);\n+}\n+\n+int enic_stop_rq(struct enic *enic, uint16_t queue_idx)\n+{\n+\treturn vnic_rq_disable(&enic->rq[queue_idx]);\n+}\n+\n+int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,\n+\tunsigned int socket_id, struct rte_mempool *mp,\n+\tuint16_t nb_desc)\n+{\n+\tint err;\n+\tstruct vnic_rq *rq = &enic->rq[queue_idx];\n+\n+\trq->socket_id = socket_id;\n+\trq->mp = mp;\n+\n+\tif (nb_desc) {\n+\t\tif (nb_desc > enic->config.rq_desc_count) {\n+\t\t\tdev_warning(enic,\n+\t\t\t\t\"RQ %d - number of rx desc in cmd line (%d)\"\\\n+\t\t\t\t\"is greater than that in the UCSM/CIMC adapter\"\\\n+\t\t\t\t\"policy.  Applying the value in the adapter \"\\\n+\t\t\t\t\"policy (%d).\\n\",\n+\t\t\t\tqueue_idx, nb_desc, enic->config.rq_desc_count);\n+\t\t} else if (nb_desc != enic->config.rq_desc_count) {\n+\t\t\tenic->config.rq_desc_count = nb_desc;\n+\t\t\tdev_info(enic,\n+\t\t\t\t\"RX Queues - effective number of descs:%d\\n\",\n+\t\t\t\tnb_desc);\n+\t\t}\n+\t}\n+\n+\t/* Allocate queue resources */\n+\terr = vnic_rq_alloc(enic->vdev, &enic->rq[queue_idx], queue_idx,\n+\t\tenic->config.rq_desc_count,\n+\t\tsizeof(struct rq_enet_desc));\n+\tif (err) {\n+\t\tdev_err(enic, \"error in allocation of rq\\n\");\n+\t\treturn err;\n+\t}\n+\n+\terr = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,\n+\t\tsocket_id, enic->config.rq_desc_count,\n+\t\tsizeof(struct cq_enet_rq_desc));\n+\tif (err) {\n+\t\tvnic_rq_free(rq);\n+\t\tdev_err(enic, \"error in allocation of cq for rq\\n\");\n+\t}\n+\n+\treturn err;\n+}\n+\n+void enic_free_wq(void *txq)\n+{\n+\tstruct vnic_wq *wq = (struct vnic_wq *)txq;\n+\tstruct enic *enic = vnic_dev_priv(wq->vdev);\n+\n+\tvnic_wq_free(wq);\n+\tvnic_cq_free(&enic->cq[enic->rq_count + wq->index]);\n+}\n+\n+int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,\n+\tunsigned int socket_id, uint16_t nb_desc)\n+{\n+\tint err;\n+\tstruct vnic_wq *wq = &enic->wq[queue_idx];\n+\tunsigned int cq_index = enic_cq_wq(enic, queue_idx);\n+\n+\twq->socket_id = socket_id;\n+\tif (nb_desc) {\n+\t\tif (nb_desc > enic->config.wq_desc_count) {\n+\t\t\tdev_warning(enic,\n+\t\t\t\t\"WQ %d - number of tx desc in cmd line (%d)\"\\\n+\t\t\t\t\"is greater than that in the UCSM/CIMC adapter\"\\\n+\t\t\t\t\"policy.  Applying the value in the adapter \"\\\n+\t\t\t\t\"policy (%d)\\n\",\n+\t\t\t\tqueue_idx, nb_desc, enic->config.wq_desc_count);\n+\t\t} else if (nb_desc != enic->config.wq_desc_count) {\n+\t\t\tenic->config.wq_desc_count = nb_desc;\n+\t\t\tdev_info(enic,\n+\t\t\t\t\"TX Queues - effective number of descs:%d\\n\",\n+\t\t\t\tnb_desc);\n+\t\t}\n+\t}\n+\n+\t/* Allocate queue resources */\n+\terr = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,\n+\t\tenic->config.wq_desc_count,\n+\t\tsizeof(struct wq_enet_desc));\n+\tif (err) {\n+\t\tdev_err(enic, \"error in allocation of wq\\n\");\n+\t\treturn err;\n+\t}\n+\n+\terr = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,\n+\t\tsocket_id, enic->config.wq_desc_count,\n+\t\tsizeof(struct cq_enet_wq_desc));\n+\tif (err) {\n+\t\tvnic_wq_free(wq);\n+\t\tdev_err(enic, \"error in allocation of cq for wq\\n\");\n+\t}\n+\n+\treturn err;\n+}\n+\n+int enic_disable(struct enic *enic)\n+{\n+\tunsigned int i;\n+\tint err;\n+\n+\tvnic_intr_mask(&enic->intr);\n+\t(void)vnic_intr_masked(&enic->intr); /* flush write */\n+\n+\tvnic_dev_disable(enic->vdev);\n+\n+\tenic_clsf_destroy(enic);\n+\n+\tif (!enic_is_sriov_vf(enic))\n+\t\tvnic_dev_del_addr(enic->vdev, enic->mac_addr);\n+\n+\tfor (i = 0; i < enic->wq_count; i++) {\n+\t\terr = vnic_wq_disable(&enic->wq[i]);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t}\n+\tfor (i = 0; i < enic->rq_count; i++) {\n+\t\terr = vnic_rq_disable(&enic->rq[i]);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t}\n+\n+\tvnic_dev_set_reset_flag(enic->vdev, 1);\n+\tvnic_dev_notify_unset(enic->vdev);\n+\n+\tfor (i = 0; i < enic->wq_count; i++)\n+\t\tvnic_wq_clean(&enic->wq[i], enic_free_wq_buf);\n+\tfor (i = 0; i < enic->rq_count; i++)\n+\t\tvnic_rq_clean(&enic->rq[i], enic_free_rq_buf);\n+\tfor (i = 0; i < enic->cq_count; i++)\n+\t\tvnic_cq_clean(&enic->cq[i]);\n+\tvnic_intr_clean(&enic->intr);\n+\n+\treturn 0;\n+}\n+\n+static int enic_dev_wait(struct vnic_dev *vdev,\n+\tint (*start)(struct vnic_dev *, int),\n+\tint (*finished)(struct vnic_dev *, int *),\n+\tint arg)\n+{\n+\tint done;\n+\tint err;\n+\tint i;\n+\n+\terr = start(vdev, arg);\n+\tif (err)\n+\t\treturn err;\n+\n+\t/* Wait for func to complete...2 seconds max */\n+\tfor (i = 0; i < 2000; i++) {\n+\t\terr = finished(vdev, &done);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t\tif (done)\n+\t\t\treturn 0;\n+\t\tusleep(1000);\n+\t}\n+\treturn -ETIMEDOUT;\n+}\n+\n+static int enic_dev_open(struct enic *enic)\n+{\n+\tint err;\n+\n+\terr = enic_dev_wait(enic->vdev, vnic_dev_open,\n+\t\tvnic_dev_open_done, 0);\n+\tif (err)\n+\t\tdev_err(enic_get_dev(enic),\n+\t\t\t\"vNIC device open failed, err %d\\n\", err);\n+\n+\treturn err;\n+}\n+\n+static int enic_set_rsskey(struct enic *enic)\n+{\n+\tdma_addr_t rss_key_buf_pa;\n+\tunion vnic_rss_key *rss_key_buf_va = NULL;\n+\tunion vnic_rss_key rss_key = {\n+\t\t.key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},\n+\t\t.key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},\n+\t\t.key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},\n+\t\t.key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},\n+\t};\n+\tint err;\n+\tchar name[NAME_MAX];\n+\n+\tsnprintf(name, NAME_MAX, \"rss_key-%s\", enic->bdf_name);\n+\trss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),\n+\t\t&rss_key_buf_pa, name);\n+\tif (!rss_key_buf_va)\n+\t\treturn -ENOMEM;\n+\n+\trte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));\n+\n+\terr = enic_set_rss_key(enic,\n+\t\trss_key_buf_pa,\n+\t\tsizeof(union vnic_rss_key));\n+\n+\tenic_free_consistent(enic->pdev, sizeof(union vnic_rss_key),\n+\t\trss_key_buf_va, rss_key_buf_pa);\n+\n+\treturn err;\n+}\n+\n+static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)\n+{\n+\tdma_addr_t rss_cpu_buf_pa;\n+\tunion vnic_rss_cpu *rss_cpu_buf_va = NULL;\n+\tunsigned int i;\n+\tint err;\n+\tchar name[NAME_MAX];\n+\n+\tsnprintf(name, NAME_MAX, \"rss_cpu-%s\", enic->bdf_name);\n+\trss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),\n+\t\t&rss_cpu_buf_pa, name);\n+\tif (!rss_cpu_buf_va)\n+\t\treturn -ENOMEM;\n+\n+\tfor (i = 0; i < (1 << rss_hash_bits); i++)\n+\t\t(*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;\n+\n+\terr = enic_set_rss_cpu(enic,\n+\t\trss_cpu_buf_pa,\n+\t\tsizeof(union vnic_rss_cpu));\n+\n+\tenic_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),\n+\t\trss_cpu_buf_va, rss_cpu_buf_pa);\n+\n+\treturn err;\n+}\n+\n+static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,\n+\tu8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)\n+{\n+\tconst u8 tso_ipid_split_en = 0;\n+\tint err;\n+\n+\t/* Enable VLAN tag stripping */\n+\n+\terr = enic_set_nic_cfg(enic,\n+\t\trss_default_cpu, rss_hash_type,\n+\t\trss_hash_bits, rss_base_cpu,\n+\t\trss_enable, tso_ipid_split_en,\n+\t\tenic->ig_vlan_strip_en);\n+\n+\treturn err;\n+}\n+\n+int enic_set_rss_nic_cfg(struct enic *enic)\n+{\n+\tconst u8 rss_default_cpu = 0;\n+\tconst u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |\n+\t    NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |\n+\t    NIC_CFG_RSS_HASH_TYPE_IPV6 |\n+\t    NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;\n+\tconst u8 rss_hash_bits = 7;\n+\tconst u8 rss_base_cpu = 0;\n+\tu8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);\n+\n+\tif (rss_enable) {\n+\t\tif (!enic_set_rsskey(enic)) {\n+\t\t\tif (enic_set_rsscpu(enic, rss_hash_bits)) {\n+\t\t\t\trss_enable = 0;\n+\t\t\t\tdev_warning(enic, \"RSS disabled, \"\\\n+\t\t\t\t\t\"Failed to set RSS cpu indirection table.\");\n+\t\t\t}\n+\t\t} else {\n+\t\t\trss_enable = 0;\n+\t\t\tdev_warning(enic,\n+\t\t\t\t\"RSS disabled, Failed to set RSS key.\\n\");\n+\t\t}\n+\t}\n+\n+\treturn enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,\n+\t\trss_hash_bits, rss_base_cpu, rss_enable);\n+}\n+\n+int enic_setup_finish(struct enic *enic)\n+{\n+\tint ret;\n+\n+\tret = enic_set_rss_nic_cfg(enic);\n+\tif (ret) {\n+\t\tdev_err(enic, \"Failed to config nic, aborting.\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tvnic_dev_add_addr(enic->vdev, enic->mac_addr);\n+\n+\t/* Default conf */\n+\tvnic_dev_packet_filter(enic->vdev,\n+\t\t1 /* directed  */,\n+\t\t1 /* multicast */,\n+\t\t1 /* broadcast */,\n+\t\t0 /* promisc   */,\n+\t\t1 /* allmulti  */);\n+\n+\tenic->promisc = 0;\n+\tenic->allmulti = 1;\n+\n+\treturn 0;\n+}\n+\n+#ifdef RTE_EAL_VFIO\n+static void enic_eventfd_init(struct enic *enic)\n+{\n+\tenic->eventfd = enic->pdev->intr_handle.fd;\n+}\n+\n+void *enic_err_intr_handler(void *arg)\n+{\n+\tstruct enic *enic = (struct enic *)arg;\n+\tunsigned int intr = enic_msix_err_intr(enic);\n+\tssize_t size;\n+\tuint64_t data;\n+\n+\twhile (1) {\n+\t\tsize = read(enic->eventfd, &data, sizeof(data));\n+\t\tdev_err(enic, \"Err intr.\\n\");\n+\t\tvnic_intr_return_all_credits(&enic->intr);\n+\n+\t\tenic_log_q_error(enic);\n+\t}\n+\n+\treturn NULL;\n+}\n+#endif\n+\n+void enic_add_packet_filter(struct enic *enic)\n+{\n+\t/* Args -> directed, multicast, broadcast, promisc, allmulti */\n+\tvnic_dev_packet_filter(enic->vdev, 1, 1, 1,\n+\t\tenic->promisc, enic->allmulti);\n+}\n+\n+int enic_get_link_status(struct enic *enic)\n+{\n+\treturn vnic_dev_link_status(enic->vdev);\n+}\n+\n+\n+#ifdef RTE_EAL_VFIO\n+static int enic_create_err_intr_thread(struct enic *enic)\n+{\n+\tpthread_attr_t intr_attr;\n+\n+\t/* create threads for error interrupt handling */\n+\tpthread_attr_init(&intr_attr);\n+\tpthread_attr_setstacksize(&intr_attr, 0x100000);\n+\n+\t/* ERR */\n+\tif (pthread_create(&enic->err_intr_thread, &intr_attr,\n+\t\t    enic_err_intr_handler, (void *)enic)) {\n+\t\tdev_err(enic, \"Failed to create err interrupt handler threads\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tpthread_attr_destroy(&intr_attr);\n+\n+\treturn 0;\n+}\n+\n+\n+static int enic_set_intr_mode(struct enic *enic)\n+{\n+\tstruct vfio_irq_set *irq_set;\n+\tint *fds;\n+\tint size;\n+\tint ret = -1;\n+\tint index;\n+\n+\tif (enic->intr_count < 1) {\n+\t\tdev_err(enic, \"Unsupported resource conf.\\n\");\n+\t\treturn -1;\n+\t}\n+\tvnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX);\n+\n+\tenic->intr_count = 1;\n+\n+\tenic_eventfd_init(enic);\n+\tsize = sizeof(*irq_set) + (sizeof(int));\n+\n+\tirq_set = rte_zmalloc(\"enic_vfio_irq\", size, 0);\n+\tirq_set->argsz = size;\n+\tirq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;\n+\tirq_set->start = 0;\n+\tirq_set->count = 1; /* For error interrupt only */\n+\tirq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |\n+\t    VFIO_IRQ_SET_ACTION_TRIGGER;\n+\tfds = (int *)&irq_set->data;\n+\n+\tfds[0] = enic->eventfd;\n+\n+\tret = ioctl(enic->pdev->intr_handle.vfio_dev_fd,\n+\t\tVFIO_DEVICE_SET_IRQS, irq_set);\n+\trte_free(irq_set);\n+\tif (ret) {\n+\t\tdev_err(enic, \"Failed to set eventfds for interrupts\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tenic_create_err_intr_thread(enic);\n+\treturn 0;\n+}\n+\n+static void enic_clear_intr_mode(struct enic *enic)\n+{\n+\tvnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);\n+}\n+#endif\n+\n+static void enic_dev_deinit(struct enic *enic)\n+{\n+\tunsigned int i;\n+\tstruct rte_eth_dev *eth_dev = enic->rte_dev;\n+\n+\tif (eth_dev->data->mac_addrs)\n+\t\trte_free(eth_dev->data->mac_addrs);\n+\n+#ifdef RTE_EAL_VFIO\n+\tenic_clear_intr_mode(enic);\n+#endif\n+}\n+\n+\n+int enic_set_vnic_res(struct enic *enic)\n+{\n+\tstruct rte_eth_dev *eth_dev = enic->rte_dev;\n+\n+\tif ((enic->rq_count < eth_dev->data->nb_rx_queues) ||\n+\t\t(enic->wq_count < eth_dev->data->nb_tx_queues)) {\n+\t\tdev_err(dev, \"Not enough resources configured, aborting\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tenic->rq_count = eth_dev->data->nb_rx_queues;\n+\tenic->wq_count = eth_dev->data->nb_tx_queues;\n+\tif (enic->cq_count < (enic->rq_count + enic->wq_count)) {\n+\t\tdev_err(dev, \"Not enough resources configured, aborting\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tenic->cq_count = enic->rq_count + enic->wq_count;\n+\treturn 0;\n+}\n+\n+static int enic_dev_init(struct enic *enic)\n+{\n+\tunsigned int i;\n+\tint err;\n+\tstruct rte_eth_dev *eth_dev = enic->rte_dev;\n+\n+\tvnic_dev_intr_coal_timer_info_default(enic->vdev);\n+\n+\t/* Get vNIC configuration\n+\t*/\n+\terr = enic_get_vnic_config(enic);\n+\tif (err) {\n+\t\tdev_err(dev, \"Get vNIC configuration failed, aborting\\n\");\n+\t\treturn err;\n+\t}\n+\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"enic_mac_addr\", ETH_ALEN, 0);\n+\tif (!eth_dev->data->mac_addrs) {\n+\t\tdev_err(enic, \"mac addr storage alloc failed, aborting.\\n\");\n+\t\treturn -1;\n+\t}\n+\tether_addr_copy((struct ether_addr *) enic->mac_addr,\n+\t\t&eth_dev->data->mac_addrs[0]);\n+\n+\n+\t/* Get available resource counts\n+\t*/\n+\tenic_get_res_counts(enic);\n+\n+#ifdef RTE_EAL_VFIO\n+\t/* Set interrupt mode based on resource counts and system\n+\t * capabilities\n+\t */\n+\terr = enic_set_intr_mode(enic);\n+\tif (err) {\n+\t\trte_free(eth_dev->data->mac_addrs);\n+\t\tenic_clear_intr_mode(enic);\n+\t\tdev_err(dev, \"Failed to set intr mode based on resource \"\\\n+\t\t\t\"counts and system capabilities, aborting\\n\");\n+\t\treturn err;\n+\t}\n+#endif\n+\n+\tvnic_dev_set_reset_flag(enic->vdev, 0);\n+\n+\treturn 0;\n+\n+}\n+\n+int enic_probe(struct enic *enic)\n+{\n+\tconst char *bdf = enic->bdf_name;\n+\tstruct rte_pci_device *pdev = enic->pdev;\n+\tstruct rte_eth_dev *eth_dev = enic->rte_dev;\n+\tunsigned int i;\n+\tint err = -1;\n+\n+\tdev_info(enic, \" Initializing ENIC PMD version %s\\n\", DRV_VERSION);\n+\n+\tenic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;\n+\tenic->bar0.len = pdev->mem_resource[0].len;\n+#ifdef RTE_EAL_VFIO\n+\tenic->vfio_fd = pci_vfio_container_fd();\n+#endif\n+\n+\t/* Register vNIC device */\n+\tenic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);\n+\tif (!enic->vdev) {\n+\t\tdev_err(enic, \"vNIC registration failed, aborting\\n\");\n+\t\tgoto err_out;\n+\t}\n+\n+\tvnic_register_cbacks(enic->vdev,\n+#ifdef RTE_EAL_VFIO\n+\t\tenic_buf_map,\n+#endif\n+\t\tenic_alloc_consistent,\n+\t\tenic_free_consistent);\n+\n+\t/* Issue device open to get device in known state */\n+\terr = enic_dev_open(enic);\n+\tif (err) {\n+\t\tdev_err(enic, \"vNIC dev open failed, aborting\\n\");\n+\t\tgoto err_out_unregister;\n+\t}\n+\n+\t/* Set ingress vlan rewrite mode before vnic initialization */\n+\terr = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,\n+\t\tIG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);\n+\tif (err) {\n+\t\tdev_err(enic,\n+\t\t\t\"Failed to set ingress vlan rewrite mode, aborting.\\n\");\n+\t\tgoto err_out_dev_close;\n+\t}\n+\n+\t/* Issue device init to initialize the vnic-to-switch link.\n+\t * We'll start with carrier off and wait for link UP\n+\t * notification later to turn on carrier.  We don't need\n+\t * to wait here for the vnic-to-switch link initialization\n+\t * to complete; link UP notification is the indication that\n+\t * the process is complete.\n+\t */\n+\n+\terr = vnic_dev_init(enic->vdev, 0);\n+\tif (err) {\n+\t\tdev_err(enic, \"vNIC dev init failed, aborting\\n\");\n+\t\tgoto err_out_dev_close;\n+\t}\n+\n+\terr = enic_dev_init(enic);\n+\tif (err) {\n+\t\tdev_err(enic, \"Device initialization failed, aborting\\n\");\n+\t\tgoto err_out_dev_close;\n+\t}\n+\n+\treturn 0;\n+\n+err_out_dev_close:\n+\tvnic_dev_close(enic->vdev);\n+err_out_unregister:\n+\tvnic_dev_unregister(enic->vdev);\n+err_out:\n+\treturn err;\n+}\n+\n+void enic_remove(struct enic *enic)\n+{\n+\tenic_dev_deinit(enic);\n+\tvnic_dev_close(enic->vdev);\n+\tvnic_dev_unregister(enic->vdev);\n+}\n+\ndiff --git a/lib/librte_pmd_enic/enic_res.c b/lib/librte_pmd_enic/enic_res.c\nnew file mode 100644\nindex 0000000..b38201d\n--- /dev/null\n+++ b/lib/librte_pmd_enic/enic_res.c\n@@ -0,0 +1,221 @@\n+/*\n+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.\n+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.\n+ *\n+ * Copyright (c) 2014, Cisco Systems, Inc. \n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright\n+ * notice, this list of conditions and the following disclaimer.\n+ *\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ * notice, this list of conditions and the following disclaimer in\n+ * the documentation and/or other materials provided with the\n+ * distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n+ * POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ */\n+#ident \"$Id: enic_res.c 171146 2014-05-02 07:08:20Z ssujith $\"\n+\n+#include \"enic_compat.h\"\n+#include \"rte_ethdev.h\"\n+#include \"wq_enet_desc.h\"\n+#include \"rq_enet_desc.h\"\n+#include \"cq_enet_desc.h\"\n+#include \"vnic_resource.h\"\n+#include \"vnic_enet.h\"\n+#include \"vnic_dev.h\"\n+#include \"vnic_wq.h\"\n+#include \"vnic_rq.h\"\n+#include \"vnic_cq.h\"\n+#include \"vnic_intr.h\"\n+#include \"vnic_stats.h\"\n+#include \"vnic_nic.h\"\n+#include \"vnic_rss.h\"\n+#include \"enic_res.h\"\n+#include \"enic.h\"\n+\n+int enic_get_vnic_config(struct enic *enic)\n+{\n+\tstruct vnic_enet_config *c = &enic->config;\n+\tint err;\n+\n+\terr = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr);\n+\tif (err) {\n+\t\tdev_err(enic_get_dev(enic),\n+\t\t\t\"Error getting MAC addr, %d\\n\", err);\n+\t\treturn err;\n+\t}\n+\n+#define GET_CONFIG(m) \\\n+\tdo { \\\n+\t\terr = vnic_dev_spec(enic->vdev, \\\n+\t\t\toffsetof(struct vnic_enet_config, m), \\\n+\t\t\tsizeof(c->m), &c->m); \\\n+\t\tif (err) { \\\n+\t\t\tdev_err(enic_get_dev(enic), \\\n+\t\t\t\t\"Error getting %s, %d\\n\", #m, err); \\\n+\t\t\treturn err; \\\n+\t\t} \\\n+\t} while (0)\n+\n+\tGET_CONFIG(flags);\n+\tGET_CONFIG(wq_desc_count);\n+\tGET_CONFIG(rq_desc_count);\n+\tGET_CONFIG(mtu);\n+\tGET_CONFIG(intr_timer_type);\n+\tGET_CONFIG(intr_mode);\n+\tGET_CONFIG(intr_timer_usec);\n+\tGET_CONFIG(loop_tag);\n+\tGET_CONFIG(num_arfs);\n+\n+\tc->wq_desc_count =\n+\t\tmin_t(u32, ENIC_MAX_WQ_DESCS,\n+\t\tmax_t(u32, ENIC_MIN_WQ_DESCS,\n+\t\tc->wq_desc_count));\n+\tc->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */\n+\n+\tc->rq_desc_count =\n+\t\tmin_t(u32, ENIC_MAX_RQ_DESCS,\n+\t\tmax_t(u32, ENIC_MIN_RQ_DESCS,\n+\t\tc->rq_desc_count));\n+\tc->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */\n+\n+\tif (c->mtu == 0)\n+\t\tc->mtu = 1500;\n+\tc->mtu = min_t(u16, ENIC_MAX_MTU,\n+\t\tmax_t(u16, ENIC_MIN_MTU,\n+\t\tc->mtu));\n+\n+\tc->intr_timer_usec = min_t(u32, c->intr_timer_usec,\n+\t\tvnic_dev_get_intr_coal_timer_max(enic->vdev));\n+\n+\tdev_info(enic_get_dev(enic),\n+\t\t\"vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x \"\n+\t\t\"wq/rq %d/%d mtu %d\\n\",\n+\t\tenic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],\n+\t\tenic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],\n+\t\tc->wq_desc_count, c->rq_desc_count, c->mtu);\n+\tdev_info(enic_get_dev(enic), \"vNIC csum tx/rx %s/%s \"\n+\t\t\"rss %s intr mode %s type %s timer %d usec \"\n+\t\t\"loopback tag 0x%04x\\n\",\n+\t\tENIC_SETTING(enic, TXCSUM) ? \"yes\" : \"no\",\n+\t\tENIC_SETTING(enic, RXCSUM) ? \"yes\" : \"no\",\n+\t\tENIC_SETTING(enic, RSS) ? \"yes\" : \"no\",\n+\t\tc->intr_mode == VENET_INTR_MODE_INTX ? \"INTx\" :\n+\t\tc->intr_mode == VENET_INTR_MODE_MSI ? \"MSI\" :\n+\t\tc->intr_mode == VENET_INTR_MODE_ANY ? \"any\" :\n+\t\t\"unknown\",\n+\t\tc->intr_timer_type == VENET_INTR_TYPE_MIN ? \"min\" :\n+\t\tc->intr_timer_type == VENET_INTR_TYPE_IDLE ? \"idle\" :\n+\t\t\"unknown\",\n+\t\tc->intr_timer_usec,\n+\t\tc->loop_tag);\n+\n+\treturn 0;\n+}\n+\n+int enic_add_vlan(struct enic *enic, u16 vlanid)\n+{\n+\tu64 a0 = vlanid, a1 = 0;\n+\tint wait = 1000;\n+\tint err;\n+\n+\terr = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);\n+\tif (err)\n+\t\tdev_err(enic_get_dev(enic), \"Can't add vlan id, %d\\n\", err);\n+\n+\treturn err;\n+}\n+\n+int enic_del_vlan(struct enic *enic, u16 vlanid)\n+{\n+\tu64 a0 = vlanid, a1 = 0;\n+\tint wait = 1000;\n+\tint err;\n+\n+\terr = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);\n+\tif (err)\n+\t\tdev_err(enic_get_dev(enic), \"Can't delete vlan id, %d\\n\", err);\n+\n+\treturn err;\n+}\n+\n+int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,\n+\tu8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,\n+\tu8 ig_vlan_strip_en)\n+{\n+\tu64 a0, a1;\n+\tu32 nic_cfg;\n+\tint wait = 1000;\n+\n+\tvnic_set_nic_cfg(&nic_cfg, rss_default_cpu,\n+\t\trss_hash_type, rss_hash_bits, rss_base_cpu,\n+\t\trss_enable, tso_ipid_split_en, ig_vlan_strip_en);\n+\n+\ta0 = nic_cfg;\n+\ta1 = 0;\n+\n+\treturn vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);\n+}\n+\n+int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len)\n+{\n+\tu64 a0 = (u64)key_pa, a1 = len;\n+\tint wait = 1000;\n+\n+\treturn vnic_dev_cmd(enic->vdev, CMD_RSS_KEY, &a0, &a1, wait);\n+}\n+\n+int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len)\n+{\n+\tu64 a0 = (u64)cpu_pa, a1 = len;\n+\tint wait = 1000;\n+\n+\treturn vnic_dev_cmd(enic->vdev, CMD_RSS_CPU, &a0, &a1, wait);\n+}\n+\n+void enic_free_vnic_resources(struct enic *enic)\n+{\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < enic->wq_count; i++)\n+\t\tvnic_wq_free(&enic->wq[i]);\n+\tfor (i = 0; i < enic->rq_count; i++)\n+\t\tvnic_rq_free(&enic->rq[i]);\n+\tfor (i = 0; i < enic->cq_count; i++)\n+\t\tvnic_cq_free(&enic->cq[i]);\n+        vnic_intr_free(&enic->intr);\n+}\n+\n+void enic_get_res_counts(struct enic *enic)\n+{\n+\tenic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);\n+\tenic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);\n+\tenic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);\n+\tenic->intr_count = vnic_dev_get_res_count(enic->vdev,\n+\t\tRES_TYPE_INTR_CTRL);\n+\n+\tdev_info(enic_get_dev(enic),\n+\t\t\"vNIC resources avail: wq %d rq %d cq %d intr %d\\n\",\n+\t\tenic->wq_count, enic->rq_count,\n+\t\tenic->cq_count, enic->intr_count);\n+}\n+\n+\ndiff --git a/lib/librte_pmd_enic/enic_res.h b/lib/librte_pmd_enic/enic_res.h\nnew file mode 100644\nindex 0000000..76e0a28\n--- /dev/null\n+++ b/lib/librte_pmd_enic/enic_res.h\n@@ -0,0 +1,168 @@\n+/*\n+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.\n+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.\n+ *\n+ * Copyright (c) 2014, Cisco Systems, Inc. \n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright\n+ * notice, this list of conditions and the following disclaimer.\n+ *\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ * notice, this list of conditions and the following disclaimer in\n+ * the documentation and/or other materials provided with the\n+ * distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n+ * POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ */\n+#ident \"$Id: enic_res.h 173137 2014-05-16 03:27:22Z sanpilla $\"\n+\n+#ifndef _ENIC_RES_H_\n+#define _ENIC_RES_H_\n+\n+#include \"wq_enet_desc.h\"\n+#include \"rq_enet_desc.h\"\n+#include \"vnic_wq.h\"\n+#include \"vnic_rq.h\"\n+\n+#define ENIC_MIN_WQ_DESCS\t\t64\n+#define ENIC_MAX_WQ_DESCS\t\t4096\n+#define ENIC_MIN_RQ_DESCS\t\t64\n+#define ENIC_MAX_RQ_DESCS\t\t4096\n+\n+#define ENIC_MIN_MTU\t\t\t68\n+#define ENIC_MAX_MTU\t\t\t9000\n+\n+#define ENIC_MULTICAST_PERFECT_FILTERS\t32\n+#define ENIC_UNICAST_PERFECT_FILTERS\t32\n+\n+#define ENIC_NON_TSO_MAX_DESC\t\t16\n+\n+#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)\n+\n+static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,\n+\tvoid *os_buf, dma_addr_t dma_addr, unsigned int len,\n+\tunsigned int mss_or_csum_offset, unsigned int hdr_len,\n+\tint vlan_tag_insert, unsigned int vlan_tag,\n+\tint offload_mode, int cq_entry, int sop, int eop, int loopback)\n+{\n+\tstruct wq_enet_desc *desc = vnic_wq_next_desc(wq);\n+\tu8 desc_skip_cnt = 1;\n+\tu8 compressed_send = 0;\n+\tu64 wrid = 0;\n+\n+\twq_enet_desc_enc(desc,\n+\t\t(u64)dma_addr | VNIC_PADDR_TARGET,\n+\t\t(u16)len,\n+\t\t(u16)mss_or_csum_offset,\n+\t\t(u16)hdr_len, (u8)offload_mode,\n+\t\t(u8)eop, (u8)cq_entry,\n+\t\t0, /* fcoe_encap */\n+\t\t(u8)vlan_tag_insert,\n+\t\t(u16)vlan_tag,\n+\t\t(u8)loopback);\n+\n+\tvnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,\n+\t\t\t(u8)cq_entry, compressed_send, wrid);\n+}\n+\n+static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,\n+\tvoid *os_buf, dma_addr_t dma_addr, unsigned int len,\n+\tint eop, int loopback)\n+{\n+\tenic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,\n+\t\t0, 0, 0, 0, 0,\n+\t\teop, 0 /* !SOP */, eop, loopback);\n+}\n+\n+static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,\n+\tdma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,\n+\tunsigned int vlan_tag, int eop, int loopback)\n+{\n+\tenic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,\n+\t\t0, 0, vlan_tag_insert, vlan_tag,\n+\t\tWQ_ENET_OFFLOAD_MODE_CSUM,\n+\t\teop, 1 /* SOP */, eop, loopback);\n+}\n+\n+static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,\n+\tvoid *os_buf, dma_addr_t dma_addr, unsigned int len,\n+\tint ip_csum, int tcpudp_csum, int vlan_tag_insert,\n+\tunsigned int vlan_tag, int eop, int loopback)\n+{\n+\tenic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,\n+\t\t(ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),\n+\t\t0, vlan_tag_insert, vlan_tag,\n+\t\tWQ_ENET_OFFLOAD_MODE_CSUM,\n+\t\teop, 1 /* SOP */, eop, loopback);\n+}\n+\n+static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,\n+\tvoid *os_buf, dma_addr_t dma_addr, unsigned int len,\n+\tunsigned int csum_offset, unsigned int hdr_len,\n+\tint vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback)\n+{\n+\tenic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,\n+\t\tcsum_offset, hdr_len, vlan_tag_insert, vlan_tag,\n+\t\tWQ_ENET_OFFLOAD_MODE_CSUM_L4,\n+\t\teop, 1 /* SOP */, eop, loopback);\n+}\n+\n+static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,\n+\tvoid *os_buf, dma_addr_t dma_addr, unsigned int len,\n+\tunsigned int mss, unsigned int hdr_len, int vlan_tag_insert,\n+\tunsigned int vlan_tag, int eop, int loopback)\n+{\n+\tenic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,\n+\t\tmss, hdr_len, vlan_tag_insert, vlan_tag,\n+\t\tWQ_ENET_OFFLOAD_MODE_TSO,\n+\t\teop, 1 /* SOP */, eop, loopback);\n+}\n+static inline void enic_queue_rq_desc(struct vnic_rq *rq,\n+\tvoid *os_buf, unsigned int os_buf_index,\n+\tdma_addr_t dma_addr, unsigned int len)\n+{\n+\tstruct rq_enet_desc *desc = vnic_rq_next_desc(rq);\n+\tu64 wrid = 0;\n+\tu8 type = os_buf_index ?\n+\t\tRQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;\n+\n+\trq_enet_desc_enc(desc,\n+\t\t(u64)dma_addr | VNIC_PADDR_TARGET,\n+\t\ttype, (u16)len);\n+\n+\tvnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);\n+}\n+\n+struct enic;\n+\n+int enic_get_vnic_config(struct enic *);\n+int enic_add_vlan(struct enic *enic, u16 vlanid);\n+int enic_del_vlan(struct enic *enic, u16 vlanid);\n+int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,\n+\tu8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,\n+\tu8 ig_vlan_strip_en);\n+int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len);\n+int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len);\n+void enic_get_res_counts(struct enic *enic);\n+void enic_init_vnic_resources(struct enic *enic);\n+int enic_alloc_vnic_resources(struct enic *);\n+void enic_free_vnic_resources(struct enic *);\n+\n+#endif /* _ENIC_RES_H_ */\n",
    "prefixes": [
        "dpdk-dev",
        "v3",
        "4/6"
    ]
}