get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/22979/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 22979,
    "url": "http://patches.dpdk.org/api/patches/22979/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20170331020622.25498-4-johndale@cisco.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20170331020622.25498-4-johndale@cisco.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20170331020622.25498-4-johndale@cisco.com",
    "date": "2017-03-31T02:06:18",
    "name": "[dpdk-dev,v2,3/7] net/enic: flow API for NICs with advanced filters enabled",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "dba99e1a975e192f8bb21f5978f93c19b3fa94d0",
    "submitter": {
        "id": 359,
        "url": "http://patches.dpdk.org/api/people/359/?format=api",
        "name": "John Daley (johndale)",
        "email": "johndale@cisco.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20170331020622.25498-4-johndale@cisco.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/22979/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/22979/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id F14142C55;\n\tFri, 31 Mar 2017 04:06:44 +0200 (CEST)",
            "from rcdn-iport-8.cisco.com (rcdn-iport-8.cisco.com [173.37.86.79])\n\tby dpdk.org (Postfix) with ESMTP id 698172B92\n\tfor <dev@dpdk.org>; Fri, 31 Mar 2017 04:06:27 +0200 (CEST)",
            "from alln-core-12.cisco.com ([173.36.13.134])\n\tby rcdn-iport-8.cisco.com with ESMTP/TLS/DHE-RSA-AES256-SHA;\n\t31 Mar 2017 02:06:26 +0000",
            "from cisco.com (savbu-usnic-a.cisco.com [10.193.184.48])\n\tby alln-core-12.cisco.com (8.14.5/8.14.5) with ESMTP id\n\tv2V26Qiv002747; Fri, 31 Mar 2017 02:06:26 GMT",
            "by cisco.com (Postfix, from userid 392789)\n\tid 16D233FAAF17; Thu, 30 Mar 2017 19:06:26 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n\td=cisco.com; i=@cisco.com; l=32236; q=dns/txt;\n\ts=iport; t=1490925987; x=1492135587;\n\th=from:to:cc:subject:date:message-id:in-reply-to: references;\n\tbh=mDvk/fa1EvkGuwo19SNSyePzY/yVu83nOtH9X6rLOjg=;\n\tb=Ntvkt/B6fsr89NX0DY3S0MW4MlMJml1oPXvc3RJQEQxM+rNndAfXP+/7\n\tVXCC8xejkMuiMBOOWh9geALhDME7owZ4InzZL2kpC+/zTMS5jI+jaLJkd\n\tKrL6fJZ4GFAO6fXhuu+v2BkxaDXRsgFtUhpBcSp7PxhVg1AI1GtqPiwm3 g=;",
        "X-IronPort-AV": "E=Sophos;i=\"5.36,250,1486425600\"; d=\"scan'208\";a=\"225122784\"",
        "From": "John Daley <johndale@cisco.com>",
        "To": "ferruh.yigit@intel.com, john.mcnamara@intel.com",
        "Cc": "dev@dpdk.org, John Daley <johndale@cisco.com>",
        "Date": "Thu, 30 Mar 2017 19:06:18 -0700",
        "Message-Id": "<20170331020622.25498-4-johndale@cisco.com>",
        "X-Mailer": "git-send-email 2.12.0",
        "In-Reply-To": "<20170331020622.25498-1-johndale@cisco.com>",
        "References": "<20170330212838.31291-1-johndale@cisco.com>\n\t<20170331020622.25498-1-johndale@cisco.com>",
        "Subject": "[dpdk-dev] [PATCH v2 3/7] net/enic: flow API for NICs with advanced\n\tfilters enabled",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Flow support for 1300 series adapters with the 'Advanced Filter'\nmode enabled via the UCS management interface. This enables:\nAttributes: ingress\nItems: eth, ipv4, ipv6, udp, tcp, vxlan, inner eth (full hdr masking)\nActions: queue, mark, flag and void\nSelectors: 'is', 'spec' and 'mask'. 'last' is not supported\n\nSigned-off-by: John Daley <johndale@cisco.com>\nReviewed-by: Nelson Escobar <neescoba@cisco.com>\n---\n drivers/net/enic/enic.h      |  14 +-\n drivers/net/enic/enic_flow.c | 924 ++++++++++++++++++++++++++++++++++++++++++-\n drivers/net/enic/enic_main.c |   3 +\n drivers/net/enic/enic_res.c  |  15 +\n drivers/net/enic/enic_rxtx.c |  16 +-\n 5 files changed, 953 insertions(+), 19 deletions(-)",
    "diff": "diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h\nindex af1098b05..41dadb0ba 100644\n--- a/drivers/net/enic/enic.h\n+++ b/drivers/net/enic/enic.h\n@@ -80,6 +80,8 @@\n #define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */\n #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF      0x0071  /* enet SRIOV VF */\n \n+/* Special Filter id for non-specific packet flagging. Don't change value */\n+#define ENIC_MAGIC_FILTER_ID 0xffff\n \n #define ENICPMD_FDIR_MAX           64\n \n@@ -111,6 +113,12 @@ struct enic_memzone_entry {\n \tLIST_ENTRY(enic_memzone_entry) entries;\n };\n \n+struct rte_flow {\n+\tLIST_ENTRY(rte_flow) next;\n+\tu16 enic_filter_id;\n+\tstruct filter_v2 enic_filter;\n+};\n+\n /* Per-instance private data structure */\n struct enic {\n \tstruct enic *next;\n@@ -135,7 +143,9 @@ struct enic {\n \tint link_status;\n \tu8 hw_ip_checksum;\n \tu16 max_mtu;\n-\tu16 adv_filters;\n+\tu8 adv_filters;\n+\tu32 flow_filter_mode;\n+\tu8 filter_tags;\n \n \tunsigned int flags;\n \tunsigned int priv_flags;\n@@ -170,6 +180,8 @@ struct enic {\n \trte_spinlock_t memzone_list_lock;\n \trte_spinlock_t mtu_lock;\n \n+\tLIST_HEAD(enic_flows, rte_flow) flows;\n+\trte_spinlock_t flows_lock;\n };\n \n /* Get the CQ index from a Start of Packet(SOP) RQ index */\ndiff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c\nindex d25390f8a..e4c043665 100644\n--- a/drivers/net/enic/enic_flow.c\n+++ b/drivers/net/enic/enic_flow.c\n@@ -55,6 +55,875 @@\n #define FLOW_LOG(level, fmt, args...) do { } while (0)\n #endif\n \n+/** Info about how to copy items into enic filters. */\n+struct enic_items {\n+\t/** Function for copying and validating an item. */\n+\tint (*copy_item)(const struct rte_flow_item *item,\n+\t\t\t struct filter_v2 *enic_filter, u8 *inner_ofst);\n+\t/** List of valid previous items. */\n+\tconst enum rte_flow_item_type * const prev_items;\n+\t/** True if it's OK for this item to be the first item. For some NIC\n+\t * versions, it's invalid to start the stack above layer 3.\n+\t */\n+\tconst u8 valid_start_item;\n+};\n+\n+/** Filtering capabilities for various NIC and firmware versions. */\n+struct enic_filter_cap {\n+\t/** list of valid items and their handlers and attributes. */\n+\tconst struct enic_items *item_info;\n+};\n+\n+/* functions for copying flow actions into enic actions */\n+typedef int (copy_action_fn)(const struct rte_flow_action actions[],\n+\t\t\t     struct filter_action_v2 *enic_action);\n+\n+/* functions for copying items into enic filters */\n+typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,\n+\t\t\t  struct filter_v2 *enic_filter, u8 *inner_ofst);\n+\n+/** Action capabilities for various NICs. */\n+struct enic_action_cap {\n+\t/** list of valid actions */\n+\tconst enum rte_flow_action_type *actions;\n+\t/** copy function for a particular NIC */\n+\tint (*copy_fn)(const struct rte_flow_action actions[],\n+\t\t       struct filter_action_v2 *enic_action);\n+};\n+\n+/* Forward declarations */\n+static enic_copy_item_fn enic_copy_item_eth_v2;\n+static enic_copy_item_fn enic_copy_item_vlan_v2;\n+static enic_copy_item_fn enic_copy_item_ipv4_v2;\n+static enic_copy_item_fn enic_copy_item_ipv6_v2;\n+static enic_copy_item_fn enic_copy_item_udp_v2;\n+static enic_copy_item_fn enic_copy_item_tcp_v2;\n+static enic_copy_item_fn enic_copy_item_sctp_v2;\n+static enic_copy_item_fn enic_copy_item_sctp_v2;\n+static enic_copy_item_fn enic_copy_item_vxlan_v2;\n+static copy_action_fn enic_copy_action_v2;\n+\n+/** NICs with Advanced filters enabled */\n+static const struct enic_items enic_items_v3[] = {\n+\t[RTE_FLOW_ITEM_TYPE_ETH] = {\n+\t\t.copy_item = enic_copy_item_eth_v2,\n+\t\t.valid_start_item = 1,\n+\t\t.prev_items = (const enum rte_flow_item_type[]) {\n+\t\t\t       RTE_FLOW_ITEM_TYPE_VXLAN,\n+\t\t\t       RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t},\n+\t[RTE_FLOW_ITEM_TYPE_VLAN] = {\n+\t\t.copy_item = enic_copy_item_vlan_v2,\n+\t\t.valid_start_item = 1,\n+\t\t.prev_items = (const enum rte_flow_item_type[]) {\n+\t\t\t       RTE_FLOW_ITEM_TYPE_ETH,\n+\t\t\t       RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t},\n+\t[RTE_FLOW_ITEM_TYPE_IPV4] = {\n+\t\t.copy_item = enic_copy_item_ipv4_v2,\n+\t\t.valid_start_item = 1,\n+\t\t.prev_items = (const enum rte_flow_item_type[]) {\n+\t\t\t       RTE_FLOW_ITEM_TYPE_ETH,\n+\t\t\t       RTE_FLOW_ITEM_TYPE_VLAN,\n+\t\t\t       RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t},\n+\t[RTE_FLOW_ITEM_TYPE_IPV6] = {\n+\t\t.copy_item = enic_copy_item_ipv6_v2,\n+\t\t.valid_start_item = 1,\n+\t\t.prev_items = (const enum rte_flow_item_type[]) {\n+\t\t\t       RTE_FLOW_ITEM_TYPE_ETH,\n+\t\t\t       RTE_FLOW_ITEM_TYPE_VLAN,\n+\t\t\t       RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t},\n+\t[RTE_FLOW_ITEM_TYPE_UDP] = {\n+\t\t.copy_item = enic_copy_item_udp_v2,\n+\t\t.valid_start_item = 1,\n+\t\t.prev_items = (const enum rte_flow_item_type[]) {\n+\t\t\t       RTE_FLOW_ITEM_TYPE_IPV4,\n+\t\t\t       RTE_FLOW_ITEM_TYPE_IPV6,\n+\t\t\t       RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t},\n+\t[RTE_FLOW_ITEM_TYPE_TCP] = {\n+\t\t.copy_item = enic_copy_item_tcp_v2,\n+\t\t.valid_start_item = 1,\n+\t\t.prev_items = (const enum rte_flow_item_type[]) {\n+\t\t\t       RTE_FLOW_ITEM_TYPE_IPV4,\n+\t\t\t       RTE_FLOW_ITEM_TYPE_IPV6,\n+\t\t\t       RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t},\n+\t[RTE_FLOW_ITEM_TYPE_SCTP] = {\n+\t\t.copy_item = enic_copy_item_sctp_v2,\n+\t\t.valid_start_item = 1,\n+\t\t.prev_items = (const enum rte_flow_item_type[]) {\n+\t\t\t       RTE_FLOW_ITEM_TYPE_IPV4,\n+\t\t\t       RTE_FLOW_ITEM_TYPE_IPV6,\n+\t\t\t       RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t},\n+\t[RTE_FLOW_ITEM_TYPE_VXLAN] = {\n+\t\t.copy_item = enic_copy_item_vxlan_v2,\n+\t\t.valid_start_item = 1,\n+\t\t.prev_items = (const enum rte_flow_item_type[]) {\n+\t\t\t       RTE_FLOW_ITEM_TYPE_UDP,\n+\t\t\t       RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t},\n+};\n+\n+/** Filtering capabilites indexed this NICs supported filter type. */\n+static const struct enic_filter_cap enic_filter_cap[] = {\n+\t[FILTER_DPDK_1] = {\n+\t\t.item_info = enic_items_v3,\n+\t},\n+};\n+\n+/** Supported actions for newer NICs */\n+static const enum rte_flow_action_type enic_supported_actions_v2[] = {\n+\tRTE_FLOW_ACTION_TYPE_QUEUE,\n+\tRTE_FLOW_ACTION_TYPE_MARK,\n+\tRTE_FLOW_ACTION_TYPE_FLAG,\n+\tRTE_FLOW_ACTION_TYPE_END,\n+};\n+\n+/** Action capabilites indexed by NIC version information */\n+static const struct enic_action_cap enic_action_cap[] = {\n+\t[FILTER_ACTION_V2_ALL] = {\n+\t\t.actions = enic_supported_actions_v2,\n+\t\t.copy_fn = enic_copy_action_v2,\n+\t},\n+};\n+/**\n+ * Copy ETH item into version 2 NIC filter.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param enic_filter[out]\n+ *   Partially filled in NIC filter structure.\n+ * @param inner_ofst[in]\n+ *   If zero, this is an outer header. If non-zero, this is the offset into L5\n+ *   where the header begins.\n+ */\n+static int\n+enic_copy_item_eth_v2(const struct rte_flow_item *item,\n+\t\t      struct filter_v2 *enic_filter, u8 *inner_ofst)\n+{\n+\tstruct ether_hdr enic_spec;\n+\tstruct ether_hdr enic_mask;\n+\tconst struct rte_flow_item_eth *spec = item->spec;\n+\tconst struct rte_flow_item_eth *mask = item->mask;\n+\tstruct filter_generic_1 *gp = &enic_filter->u.generic_1;\n+\n+\tFLOW_TRACE();\n+\n+\t/* Match all if no spec */\n+\tif (!spec)\n+\t\treturn 0;\n+\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_eth_mask;\n+\n+\tmemcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,\n+\t       ETHER_ADDR_LEN);\n+\tmemcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,\n+\t       ETHER_ADDR_LEN);\n+\n+\tmemcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,\n+\t       ETHER_ADDR_LEN);\n+\tmemcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,\n+\t       ETHER_ADDR_LEN);\n+\tenic_spec.ether_type = spec->type;\n+\tenic_mask.ether_type = mask->type;\n+\n+\tif (!*inner_ofst) {\n+\t\t/* outer header */\n+\t\tmemcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,\n+\t\t       sizeof(struct ether_hdr));\n+\t\tmemcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,\n+\t\t       sizeof(struct ether_hdr));\n+\t} else {\n+\t\t/* inner header */\n+\t\tif ((*inner_ofst + sizeof(struct ether_hdr)) >\n+\t\t     FILTER_GENERIC_1_KEY_LEN)\n+\t\t\treturn ENOTSUP;\n+\t\t/* Offset into L5 where inner Ethernet header goes */\n+\t\tmemcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],\n+\t\t       &enic_mask, sizeof(struct ether_hdr));\n+\t\tmemcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],\n+\t\t       &enic_spec, sizeof(struct ether_hdr));\n+\t\t*inner_ofst += sizeof(struct ether_hdr);\n+\t}\n+\treturn 0;\n+}\n+\n+/**\n+ * Copy VLAN item into version 2 NIC filter.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param enic_filter[out]\n+ *   Partially filled in NIC filter structure.\n+ * @param inner_ofst[in]\n+ *   If zero, this is an outer header. If non-zero, this is the offset into L5\n+ *   where the header begins.\n+ */\n+static int\n+enic_copy_item_vlan_v2(const struct rte_flow_item *item,\n+\t\t       struct filter_v2 *enic_filter, u8 *inner_ofst)\n+{\n+\tconst struct rte_flow_item_vlan *spec = item->spec;\n+\tconst struct rte_flow_item_vlan *mask = item->mask;\n+\tstruct filter_generic_1 *gp = &enic_filter->u.generic_1;\n+\n+\tFLOW_TRACE();\n+\n+\t/* Match all if no spec */\n+\tif (!spec)\n+\t\treturn 0;\n+\n+\t/* Don't support filtering in tpid */\n+\tif (mask) {\n+\t\tif (mask->tpid != 0)\n+\t\t\treturn ENOTSUP;\n+\t} else {\n+\t\tmask = &rte_flow_item_vlan_mask;\n+\t\tRTE_ASSERT(mask->tpid == 0);\n+\t}\n+\n+\tif (!*inner_ofst) {\n+\t\t/* Outer header. Use the vlan mask/val fields */\n+\t\tgp->mask_vlan = mask->tci;\n+\t\tgp->val_vlan = spec->tci;\n+\t} else {\n+\t\t/* Inner header. Mask/Val start at *inner_ofst into L5 */\n+\t\tif ((*inner_ofst + sizeof(struct vlan_hdr)) >\n+\t\t     FILTER_GENERIC_1_KEY_LEN)\n+\t\t\treturn ENOTSUP;\n+\t\tmemcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],\n+\t\t       mask, sizeof(struct vlan_hdr));\n+\t\tmemcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],\n+\t\t       spec, sizeof(struct vlan_hdr));\n+\t\t*inner_ofst += sizeof(struct vlan_hdr);\n+\t}\n+\treturn 0;\n+}\n+\n+/**\n+ * Copy IPv4 item into version 2 NIC filter.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param enic_filter[out]\n+ *   Partially filled in NIC filter structure.\n+ * @param inner_ofst[in]\n+ *   Must be 0. Don't support inner IPv4 filtering.\n+ */\n+static int\n+enic_copy_item_ipv4_v2(const struct rte_flow_item *item,\n+\t\t       struct filter_v2 *enic_filter, u8 *inner_ofst)\n+{\n+\tconst struct rte_flow_item_ipv4 *spec = item->spec;\n+\tconst struct rte_flow_item_ipv4 *mask = item->mask;\n+\tstruct filter_generic_1 *gp = &enic_filter->u.generic_1;\n+\n+\tFLOW_TRACE();\n+\n+\tif (*inner_ofst)\n+\t\treturn ENOTSUP;\n+\n+\t/* Match IPv4 */\n+\tgp->mask_flags |= FILTER_GENERIC_1_IPV4;\n+\tgp->val_flags |= FILTER_GENERIC_1_IPV4;\n+\n+\t/* Match all if no spec */\n+\tif (!spec)\n+\t\treturn 0;\n+\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_ipv4_mask;\n+\n+\tmemcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,\n+\t       sizeof(struct ipv4_hdr));\n+\tmemcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,\n+\t       sizeof(struct ipv4_hdr));\n+\treturn 0;\n+}\n+\n+/**\n+ * Copy IPv6 item into version 2 NIC filter.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param enic_filter[out]\n+ *   Partially filled in NIC filter structure.\n+ * @param inner_ofst[in]\n+ *   Must be 0. Don't support inner IPv6 filtering.\n+ */\n+static int\n+enic_copy_item_ipv6_v2(const struct rte_flow_item *item,\n+\t\t       struct filter_v2 *enic_filter, u8 *inner_ofst)\n+{\n+\tconst struct rte_flow_item_ipv6 *spec = item->spec;\n+\tconst struct rte_flow_item_ipv6 *mask = item->mask;\n+\tstruct filter_generic_1 *gp = &enic_filter->u.generic_1;\n+\n+\tFLOW_TRACE();\n+\n+\tif (*inner_ofst)\n+\t\treturn ENOTSUP;\n+\n+\t/* Match IPv6 */\n+\tgp->mask_flags |= FILTER_GENERIC_1_IPV6;\n+\tgp->val_flags |= FILTER_GENERIC_1_IPV6;\n+\n+\t/* Match all if no spec */\n+\tif (!spec)\n+\t\treturn 0;\n+\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_ipv6_mask;\n+\n+\tmemcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,\n+\t       sizeof(struct ipv6_hdr));\n+\tmemcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,\n+\t       sizeof(struct ipv6_hdr));\n+\treturn 0;\n+}\n+\n+/**\n+ * Copy UDP item into version 2 NIC filter.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param enic_filter[out]\n+ *   Partially filled in NIC filter structure.\n+ * @param inner_ofst[in]\n+ *   Must be 0. Don't support inner UDP filtering.\n+ */\n+static int\n+enic_copy_item_udp_v2(const struct rte_flow_item *item,\n+\t\t      struct filter_v2 *enic_filter, u8 *inner_ofst)\n+{\n+\tconst struct rte_flow_item_udp *spec = item->spec;\n+\tconst struct rte_flow_item_udp *mask = item->mask;\n+\tstruct filter_generic_1 *gp = &enic_filter->u.generic_1;\n+\n+\tFLOW_TRACE();\n+\n+\tif (*inner_ofst)\n+\t\treturn ENOTSUP;\n+\n+\t/* Match UDP */\n+\tgp->mask_flags |= FILTER_GENERIC_1_UDP;\n+\tgp->val_flags |= FILTER_GENERIC_1_UDP;\n+\n+\t/* Match all if no spec */\n+\tif (!spec)\n+\t\treturn 0;\n+\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_udp_mask;\n+\n+\tmemcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,\n+\t       sizeof(struct udp_hdr));\n+\tmemcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,\n+\t       sizeof(struct udp_hdr));\n+\treturn 0;\n+}\n+\n+/**\n+ * Copy TCP item into version 2 NIC filter.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param enic_filter[out]\n+ *   Partially filled in NIC filter structure.\n+ * @param inner_ofst[in]\n+ *   Must be 0. Don't support inner TCP filtering.\n+ */\n+static int\n+enic_copy_item_tcp_v2(const struct rte_flow_item *item,\n+\t\t      struct filter_v2 *enic_filter, u8 *inner_ofst)\n+{\n+\tconst struct rte_flow_item_tcp *spec = item->spec;\n+\tconst struct rte_flow_item_tcp *mask = item->mask;\n+\tstruct filter_generic_1 *gp = &enic_filter->u.generic_1;\n+\n+\tFLOW_TRACE();\n+\n+\tif (*inner_ofst)\n+\t\treturn ENOTSUP;\n+\n+\t/* Match TCP */\n+\tgp->mask_flags |= FILTER_GENERIC_1_TCP;\n+\tgp->val_flags |= FILTER_GENERIC_1_TCP;\n+\n+\t/* Match all if no spec */\n+\tif (!spec)\n+\t\treturn 0;\n+\n+\tif (!mask)\n+\t\treturn ENOTSUP;\n+\n+\tmemcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,\n+\t       sizeof(struct tcp_hdr));\n+\tmemcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,\n+\t       sizeof(struct tcp_hdr));\n+\treturn 0;\n+}\n+\n+/**\n+ * Copy SCTP item into version 2 NIC filter.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param enic_filter[out]\n+ *   Partially filled in NIC filter structure.\n+ * @param inner_ofst[in]\n+ *   Must be 0. Don't support inner SCTP filtering.\n+ */\n+static int\n+enic_copy_item_sctp_v2(const struct rte_flow_item *item,\n+\t\t       struct filter_v2 *enic_filter, u8 *inner_ofst)\n+{\n+\tconst struct rte_flow_item_sctp *spec = item->spec;\n+\tconst struct rte_flow_item_sctp *mask = item->mask;\n+\tstruct filter_generic_1 *gp = &enic_filter->u.generic_1;\n+\n+\tFLOW_TRACE();\n+\n+\tif (*inner_ofst)\n+\t\treturn ENOTSUP;\n+\n+\t/* Match all if no spec */\n+\tif (!spec)\n+\t\treturn 0;\n+\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_sctp_mask;\n+\n+\tmemcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,\n+\t       sizeof(struct sctp_hdr));\n+\tmemcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,\n+\t       sizeof(struct sctp_hdr));\n+\treturn 0;\n+}\n+\n+/**\n+ * Copy UDP item into version 2 NIC filter.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param enic_filter[out]\n+ *   Partially filled in NIC filter structure.\n+ * @param inner_ofst[in]\n+ *   Must be 0. VxLAN headers always start at the beginning of L5.\n+ */\n+static int\n+enic_copy_item_vxlan_v2(const struct rte_flow_item *item,\n+\t\t\tstruct filter_v2 *enic_filter, u8 *inner_ofst)\n+{\n+\tconst struct rte_flow_item_vxlan *spec = item->spec;\n+\tconst struct rte_flow_item_vxlan *mask = item->mask;\n+\tstruct filter_generic_1 *gp = &enic_filter->u.generic_1;\n+\n+\tFLOW_TRACE();\n+\n+\tif (*inner_ofst)\n+\t\treturn EINVAL;\n+\n+\t/* Match all if no spec */\n+\tif (!spec)\n+\t\treturn 0;\n+\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_vxlan_mask;\n+\n+\tmemcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,\n+\t       sizeof(struct vxlan_hdr));\n+\tmemcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,\n+\t       sizeof(struct vxlan_hdr));\n+\n+\t*inner_ofst = sizeof(struct vxlan_hdr);\n+\treturn 0;\n+}\n+\n+/**\n+ * Return 1 if current item is valid on top of the previous one.\n+ *\n+ * @param prev_item[in]\n+ *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this\n+ *   is the first item.\n+ * @param item_info[in]\n+ *   Info about this item, like valid previous items.\n+ * @param is_first[in]\n+ *   True if this the first item in the pattern.\n+ */\n+static int\n+item_stacking_valid(enum rte_flow_item_type prev_item,\n+\t\t    const struct enic_items *item_info, u8 is_first_item)\n+{\n+\tenum rte_flow_item_type const *allowed_items = item_info->prev_items;\n+\n+\tFLOW_TRACE();\n+\n+\tfor (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {\n+\t\tif (prev_item == *allowed_items)\n+\t\t\treturn 1;\n+\t}\n+\n+\t/* This is the first item in the stack. Check if that's cool */\n+\tif (is_first_item && item_info->valid_start_item)\n+\t\treturn 1;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Build the intenal enic filter structure from the provided pattern. The\n+ * pattern is validated as the items are copied.\n+ *\n+ * @param pattern[in]\n+ * @param items_info[in]\n+ *   Info about this NICs item support, like valid previous items.\n+ * @param enic_filter[out]\n+ *   NIC specfilc filters derived from the pattern.\n+ * @param error[out]\n+ */\n+static int\n+enic_copy_filter(const struct rte_flow_item pattern[],\n+\t\t const struct enic_items *items_info,\n+\t\t struct filter_v2 *enic_filter,\n+\t\t struct rte_flow_error *error)\n+{\n+\tint ret;\n+\tconst struct rte_flow_item *item = pattern;\n+\tu8 inner_ofst = 0; /* If encapsulated, ofst into L5 */\n+\tenum rte_flow_item_type prev_item;\n+\tconst struct enic_items *item_info;\n+\n+\tenic_filter->type = FILTER_DPDK_1;\n+\tu8 is_first_item = 1;\n+\n+\tFLOW_TRACE();\n+\n+\tprev_item = 0;\n+\n+\tfor (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {\n+\t\t/* Get info about how to validate and copy the item. If NULL\n+\t\t * is returned the nic does not support the item.\n+\t\t */\n+\t\tif (item->type == RTE_FLOW_ITEM_TYPE_VOID)\n+\t\t\tcontinue;\n+\n+\t\titem_info = &items_info[item->type];\n+\n+\t\t/* check to see if item stacking is valid */\n+\t\tif (!item_stacking_valid(prev_item, item_info, is_first_item))\n+\t\t\tgoto stacking_error;\n+\n+\t\tret = item_info->copy_item(item, enic_filter, &inner_ofst);\n+\t\tif (ret)\n+\t\t\tgoto item_not_supported;\n+\t\tprev_item = item->type;\n+\t\tis_first_item = 0;\n+\t}\n+\treturn 0;\n+\n+item_not_supported:\n+\trte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t   NULL, \"enic type error\");\n+\treturn -rte_errno;\n+\n+stacking_error:\n+\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t   item, \"stacking error\");\n+\treturn -rte_errno;\n+}\n+/**\n+ * Build the intenal version 2 NIC action structure from the provided pattern.\n+ * The pattern is validated as the items are copied.\n+ *\n+ * @param actions[in]\n+ * @param enic_action[out]\n+ *   NIC specfilc actions derived from the actions.\n+ * @param error[out]\n+ */\n+static int\n+enic_copy_action_v2(const struct rte_flow_action actions[],\n+\t\t    struct filter_action_v2 *enic_action)\n+{\n+\tFLOW_TRACE();\n+\n+\tfor (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {\n+\t\tswitch (actions->type) {\n+\t\tcase RTE_FLOW_ACTION_TYPE_QUEUE: {\n+\t\t\tconst struct rte_flow_action_queue *queue =\n+\t\t\t\t(const struct rte_flow_action_queue *)\n+\t\t\t\tactions->conf;\n+\t\t\tenic_action->rq_idx =\n+\t\t\t\tenic_rte_rq_idx_to_sop_idx(queue->index);\n+\t\t\tenic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;\n+\t\t\tbreak;\n+\t\t}\n+\t\tcase RTE_FLOW_ACTION_TYPE_MARK: {\n+\t\t\tconst struct rte_flow_action_mark *mark =\n+\t\t\t\t(const struct rte_flow_action_mark *)\n+\t\t\t\tactions->conf;\n+\n+\t\t\t/* ENIC_MAGIC_FILTER_ID is reserved and is the highest\n+\t\t\t * in the range of allows mark ids.\n+\t\t\t */\n+\t\t\tif (mark->id >= ENIC_MAGIC_FILTER_ID)\n+\t\t\t\treturn EINVAL;\n+\t\t\tenic_action->filter_id = mark->id;\n+\t\t\tenic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;\n+\t\t\tbreak;\n+\t\t}\n+\t\tcase RTE_FLOW_ACTION_TYPE_FLAG: {\n+\t\t\tenic_action->filter_id = ENIC_MAGIC_FILTER_ID;\n+\t\t\tenic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;\n+\t\t\tbreak;\n+\t\t}\n+\t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n+\t\t\tcontinue;\n+\t\tdefault:\n+\t\t\tRTE_ASSERT(0);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\tenic_action->type = FILTER_ACTION_V2;\n+\treturn 0;\n+}\n+\n+/** Check if the action is supported */\n+static int\n+enic_match_action(const struct rte_flow_action *action,\n+\t\t  const enum rte_flow_action_type *supported_actions)\n+{\n+\tfor (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;\n+\t     supported_actions++) {\n+\t\tif (action->type == *supported_actions)\n+\t\t\treturn 1;\n+\t}\n+\treturn 0;\n+}\n+\n+/** Get the NIC filter capabilties structure */\n+static const struct enic_filter_cap *\n+enic_get_filter_cap(struct enic *enic)\n+{\n+\t/* FIXME: only support advanced filters for now */\n+\tif (enic->flow_filter_mode != FILTER_DPDK_1)\n+\t\treturn (const struct enic_filter_cap *)NULL;\n+\n+\tif (enic->flow_filter_mode)\n+\t\treturn &enic_filter_cap[enic->flow_filter_mode];\n+\n+\treturn (const struct enic_filter_cap *)NULL;\n+}\n+\n+/** Get the actions for this NIC version. */\n+static const struct enic_action_cap *\n+enic_get_action_cap(struct enic *enic)\n+{\n+\tstatic const struct enic_action_cap *ea;\n+\n+\tif (enic->filter_tags)\n+\t\tea = &enic_action_cap[FILTER_ACTION_V2_ALL];\n+\treturn ea;\n+}\n+/**\n+ * Internal flow parse/validate function.\n+ *\n+ * @param dev[in]\n+ *   This device pointer.\n+ * @param pattern[in]\n+ * @param actions[in]\n+ * @param error[out]\n+ * @param enic_filter[out]\n+ *   Internal NIC filter structure pointer.\n+ * @param enic_action[out]\n+ *   Internal NIC action structure pointer.\n+ */\n+static int\n+enic_flow_parse(struct rte_eth_dev *dev,\n+\t\tconst struct rte_flow_attr *attrs,\n+\t\tconst struct rte_flow_item pattern[],\n+\t\tconst struct rte_flow_action actions[],\n+\t\tstruct rte_flow_error *error,\n+\t\tstruct filter_v2 *enic_filter,\n+\t\tstruct filter_action_v2 *enic_action)\n+{\n+\tunsigned int ret = 0;\n+\tstruct enic *enic = pmd_priv(dev);\n+\tconst struct enic_filter_cap *enic_filter_cap;\n+\tconst struct enic_action_cap *enic_action_cap;\n+\tconst struct rte_flow_action *action;\n+\n+\tFLOW_TRACE();\n+\n+\tmemset(enic_filter, 0, sizeof(*enic_filter));\n+\tmemset(enic_action, 0, sizeof(*enic_action));\n+\n+\tif (!pattern) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,\n+\t\t\t\t   NULL, \"No pattern specified\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (!actions) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION_NUM,\n+\t\t\t\t   NULL, \"No action specified\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (attrs) {\n+\t\tif (attrs->group) {\n+\t\t\trte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n+\t\t\t\t\t   NULL,\n+\t\t\t\t\t   \"priority groups are not supported\");\n+\t\t\treturn -rte_errno;\n+\t\t} else if (attrs->priority) {\n+\t\t\trte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\n+\t\t\t\t\t   NULL,\n+\t\t\t\t\t   \"priorities are not supported\");\n+\t\t\treturn -rte_errno;\n+\t\t} else if (attrs->egress) {\n+\t\t\trte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,\n+\t\t\t\t\t   NULL,\n+\t\t\t\t\t   \"egress is not supported\");\n+\t\t\treturn -rte_errno;\n+\t\t} else if (!attrs->ingress) {\n+\t\t\trte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\n+\t\t\t\t\t   NULL,\n+\t\t\t\t\t   \"only ingress is supported\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t} else {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR,\n+\t\t\t\t   NULL, \"No attribute specified\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Verify Actions. */\n+\tenic_action_cap =  enic_get_action_cap(enic);\n+\tfor (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;\n+\t     action++) {\n+\t\tif (action->type == RTE_FLOW_ACTION_TYPE_VOID)\n+\t\t\tcontinue;\n+\t\telse if (!enic_match_action(action, enic_action_cap->actions))\n+\t\t\tbreak;\n+\t}\n+\tif (action->type != RTE_FLOW_ACTION_TYPE_END) {\n+\t\trte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   action, \"Invalid action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\tret = enic_action_cap->copy_fn(actions, enic_action);\n+\tif (ret) {\n+\t\trte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,\n+\t\t\t   NULL, \"Unsupported action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Verify Flow items. If copying the filter from flow format to enic\n+\t * format fails, the flow is not supported\n+\t */\n+\tenic_filter_cap =  enic_get_filter_cap(enic);\n+\tif (enic_filter_cap == NULL) {\n+\t\trte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,\n+\t\t\t   NULL, \"Flow API not available\");\n+\t\treturn -rte_errno;\n+\t}\n+\tret = enic_copy_filter(pattern, enic_filter_cap->item_info,\n+\t\t\t\t       enic_filter, error);\n+\treturn ret;\n+}\n+\n+/**\n+ * Push filter/action to the NIC.\n+ *\n+ * @param enic[in]\n+ *   Device structure pointer.\n+ * @param enic_filter[in]\n+ *   Internal NIC filter structure pointer.\n+ * @param enic_action[in]\n+ *   Internal NIC action structure pointer.\n+ * @param error[out]\n+ */\n+static struct rte_flow *\n+enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,\n+\t\t   struct filter_action_v2 *enic_action,\n+\t\t   struct rte_flow_error *error)\n+{\n+\tstruct rte_flow *flow;\n+\tint ret;\n+\tu16 entry;\n+\n+\tFLOW_TRACE();\n+\n+\tflow = rte_calloc(__func__, 1, sizeof(*flow), 0);\n+\tif (!flow) {\n+\t\trte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,\n+\t\t\t\t   NULL, \"cannot allocate flow memory\");\n+\t\treturn NULL;\n+\t}\n+\n+\t/* entry[in] is the queue id, entry[out] is the filter Id for delete */\n+\tentry = enic_action->rq_idx;\n+\tret = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,\n+\t\t\t\t  enic_action);\n+\tif (!ret) {\n+\t\tflow->enic_filter_id = entry;\n+\t\tflow->enic_filter = *enic_filter;\n+\t} else {\n+\t\trte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,\n+\t\t\t\t   NULL, \"vnic_dev_classifier error\");\n+\t\trte_free(flow);\n+\t\treturn NULL;\n+\t}\n+\treturn flow;\n+}\n+\n+/**\n+ * Remove filter/action from the NIC.\n+ *\n+ * @param enic[in]\n+ *   Device structure pointer.\n+ * @param filter_id[in]\n+ *   Id of NIC filter.\n+ * @param enic_action[in]\n+ *   Internal NIC action structure pointer.\n+ * @param error[out]\n+ */\n+static int\n+enic_flow_del_filter(struct enic *enic, u16 filter_id,\n+\t\t   struct rte_flow_error *error)\n+{\n+\tint ret;\n+\n+\tFLOW_TRACE();\n+\n+\tret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);\n+\tif (!ret)\n+\t\trte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,\n+\t\t\t\t   NULL, \"vnic_dev_classifier failed\");\n+\treturn ret;\n+}\n+\n /*\n  * The following functions are callbacks for Generic flow API.\n  */\n@@ -71,15 +940,15 @@ enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,\n \t\t   const struct rte_flow_action actions[],\n \t\t   struct rte_flow_error *error)\n {\n-\t(void)dev;\n-\t(void)attrs;\n-\t(void)pattern;\n-\t(void)actions;\n-\t(void)error;\n+\tstruct filter_v2 enic_filter;\n+\tstruct filter_action_v2 enic_action;\n+\tint ret;\n \n \tFLOW_TRACE();\n \n-\treturn 0;\n+\tret = enic_flow_parse(dev, attrs, pattern, actions, error,\n+\t\t\t       &enic_filter, &enic_action);\n+\treturn ret;\n }\n \n /**\n@@ -95,15 +964,27 @@ enic_flow_create(struct rte_eth_dev *dev,\n \t\t const struct rte_flow_action actions[],\n \t\t struct rte_flow_error *error)\n {\n-\t(void)dev;\n-\t(void)attrs;\n-\t(void)pattern;\n-\t(void)actions;\n-\t(void)error;\n+\tint ret;\n+\tstruct filter_v2 enic_filter;\n+\tstruct filter_action_v2 enic_action;\n+\tstruct rte_flow *flow;\n+\tstruct enic *enic = pmd_priv(dev);\n \n \tFLOW_TRACE();\n \n-\treturn NULL;\n+\tret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,\n+\t\t\t      &enic_action);\n+\tif (ret < 0)\n+\t\treturn NULL;\n+\n+\trte_spinlock_lock(&enic->flows_lock);\n+\tflow = enic_flow_add_filter(enic, &enic_filter, &enic_action,\n+\t\t\t\t    error);\n+\tif (flow)\n+\t\tLIST_INSERT_HEAD(&enic->flows, flow, next);\n+\trte_spinlock_unlock(&enic->flows_lock);\n+\n+\treturn flow;\n }\n \n /**\n@@ -116,11 +997,14 @@ static int\n enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t  __rte_unused struct rte_flow_error *error)\n {\n-\t(void)dev;\n-\t(void)flow;\n+\tstruct enic *enic = pmd_priv(dev);\n \n \tFLOW_TRACE();\n \n+\trte_spinlock_lock(&enic->flows_lock);\n+\tenic_flow_del_filter(enic, flow->enic_filter_id, error);\n+\tLIST_REMOVE(flow, next);\n+\trte_spinlock_unlock(&enic->flows_lock);\n \treturn 0;\n }\n \n@@ -133,11 +1017,19 @@ enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,\n static int\n enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)\n {\n-\t(void)dev;\n-\t(void)error;\n+\tstruct rte_flow *flow;\n+\tstruct enic *enic = pmd_priv(dev);\n \n \tFLOW_TRACE();\n \n+\trte_spinlock_lock(&enic->flows_lock);\n+\n+\twhile (!LIST_EMPTY(&enic->flows)) {\n+\t\tflow = LIST_FIRST(&enic->flows);\n+\t\tenic_flow_del_filter(enic, flow->enic_filter_id, error);\n+\t\tLIST_REMOVE(flow, next);\n+\t}\n+\trte_spinlock_unlock(&enic->flows_lock);\n \treturn 0;\n }\n \ndiff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c\nindex 570b7b69a..4c515f601 100644\n--- a/drivers/net/enic/enic_main.c\n+++ b/drivers/net/enic/enic_main.c\n@@ -1316,6 +1316,9 @@ static int enic_dev_init(struct enic *enic)\n \n \tvnic_dev_set_reset_flag(enic->vdev, 0);\n \n+\tLIST_INIT(&enic->flows);\n+\trte_spinlock_init(&enic->flows_lock);\n+\n \t/* set up link status checking */\n \tvnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */\n \ndiff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c\nindex 867bd25c4..e4b80d49c 100644\n--- a/drivers/net/enic/enic_res.c\n+++ b/drivers/net/enic/enic_res.c\n@@ -104,6 +104,21 @@ int enic_get_vnic_config(struct enic *enic)\n \tdev_info(enic, \"Advanced Filters %savailable\\n\", ((enic->adv_filters)\n \t\t ? \"\" : \"not \"));\n \n+\terr = vnic_dev_capable_filter_mode(enic->vdev, &enic->flow_filter_mode,\n+\t\t\t\t\t   &enic->filter_tags);\n+\tif (err) {\n+\t\tdev_err(enic_get_dev(enic),\n+\t\t\t\"Error getting filter modes, %d\\n\", err);\n+\t\treturn err;\n+\t}\n+\n+\tdev_info(enic, \"Flow api filter mode: %s, Filter tagging %savailable\\n\",\n+\t\t((enic->flow_filter_mode == FILTER_DPDK_1) ? \"DPDK\" :\n+\t\t((enic->flow_filter_mode == FILTER_USNIC_IP) ? \"USNIC\" :\n+\t\t((enic->flow_filter_mode == FILTER_IPV4_5TUPLE) ? \"5TUPLE\" :\n+\t\t\"NONE\"))),\n+\t\t((enic->filter_tags) ? \"\" : \"not \"));\n+\n \tc->wq_desc_count =\n \t\tmin_t(u32, ENIC_MAX_WQ_DESCS,\n \t\tmax_t(u32, ENIC_MIN_WQ_DESCS,\ndiff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c\nindex 343dabc64..5f59e3726 100644\n--- a/drivers/net/enic/enic_rxtx.c\n+++ b/drivers/net/enic/enic_rxtx.c\n@@ -253,8 +253,20 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)\n \t}\n \tmbuf->vlan_tci = vlan_tci;\n \n-\t/* RSS flag */\n-\tif (enic_cq_rx_desc_rss_type(cqrd)) {\n+\tif ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {\n+\t\tstruct cq_enet_rq_clsf_desc *clsf_cqd;\n+\t\tuint16_t filter_id;\n+\t\tclsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;\n+\t\tfilter_id = clsf_cqd->filter_id;\n+\t\tif (filter_id) {\n+\t\t\tpkt_flags |= PKT_RX_FDIR;\n+\t\t\tif (filter_id != ENIC_MAGIC_FILTER_ID) {\n+\t\t\t\tmbuf->hash.fdir.hi = clsf_cqd->filter_id;\n+\t\t\t\tpkt_flags |= PKT_RX_FDIR_ID;\n+\t\t\t}\n+\t\t}\n+\t} else if (enic_cq_rx_desc_rss_type(cqrd)) {\n+\t\t/* RSS flag */\n \t\tpkt_flags |= PKT_RX_RSS_HASH;\n \t\tmbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);\n \t}\n",
    "prefixes": [
        "dpdk-dev",
        "v2",
        "3/7"
    ]
}