From patchwork Sat Mar 2 10:42:49 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Hyong Youb Kim (hyonkim)" X-Patchwork-Id: 50757 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 034BF4C8F; Sat, 2 Mar 2019 11:45:51 +0100 (CET) Received: from rcdn-iport-2.cisco.com (rcdn-iport-2.cisco.com [173.37.86.73]) by dpdk.org (Postfix) with ESMTP id 29DC72B9E; Sat, 2 Mar 2019 11:45:49 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=cisco.com; i=@cisco.com; l=4058; q=dns/txt; s=iport; t=1551523549; x=1552733149; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=fari03nMWiH7RL1Syo4YrEl7wR62zhIXiwbUqVq61qE=; b=Wypp+cZLXWmTQEaJyRv+j29JE20UdfA0gtvAk/Ypzdc1rWhrpAhQM1c/ PhONTuhUxToYoBvJr8soHDmE1uAoBWexaUxaCOAftyWIxZrbc9MpAtgNg VZEYYIepl8B7AU7QztvsZvgONv7Do0qIQ8fGnazWLr2WCQV1qPQCwC4yE 4=; X-IronPort-AV: E=Sophos;i="5.58,431,1544486400"; d="scan'208";a="530643476" Received: from alln-core-1.cisco.com ([173.36.13.131]) by rcdn-iport-2.cisco.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 02 Mar 2019 10:45:48 +0000 Received: from cisco.com (savbu-usnic-a.cisco.com [10.193.184.48]) by alln-core-1.cisco.com (8.15.2/8.15.2) with ESMTP id x22AjmwV024496; Sat, 2 Mar 2019 10:45:48 GMT Received: by cisco.com (Postfix, from userid 508933) id 2EEBD20F2001; Sat, 2 Mar 2019 02:45:48 -0800 (PST) From: Hyong Youb Kim To: Ferruh Yigit Cc: dev@dpdk.org, John Daley , Hyong Youb Kim , stable@dpdk.org Date: Sat, 2 Mar 2019 02:42:49 -0800 Message-Id: <20190302104251.32565-12-hyonkim@cisco.com> X-Mailer: git-send-email 2.16.2 In-Reply-To: <20190302104251.32565-1-hyonkim@cisco.com> References: <20190302104251.32565-1-hyonkim@cisco.com> X-Outbound-SMTP-Client: 10.193.184.48, savbu-usnic-a.cisco.com X-Outbound-Node: alln-core-1.cisco.com Subject: [dpdk-dev] [PATCH v2 11/13] net/enic: fix a couple issues with VXLAN match X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The filter API does not have flags for "match VXLAN". Explicitly set the UDP destination port and mask in the L4 pattern. Otherwise, UDP packets with non-VXLAN ports may be falsely reported as VXLAN. 1400 series VIC adapters have hardware VXLAN parsing. The L5 buffer on the NIC starts with the inner Ethernet header, and the VXLAN header is now in the L4 buffer following the UDP header. So the VXLAN spec/mask needs to be in the L4 pattern, not L5. Older models still expect the VXLAN spec/mask in the L5 pattern. Fix up the L4/L5 patterns accordingly. Fixes: 6ced137607d0 ("net/enic: flow API for NICs with advanced filters enabled") Cc: stable@dpdk.org Signed-off-by: Hyong Youb Kim --- drivers/net/enic/enic_flow.c | 46 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c index ffc6ce1da..da43b31dc 100644 --- a/drivers/net/enic/enic_flow.c +++ b/drivers/net/enic/enic_flow.c @@ -830,12 +830,23 @@ enic_copy_item_vxlan_v2(struct copy_item_args *arg) const struct rte_flow_item_vxlan *spec = item->spec; const struct rte_flow_item_vxlan *mask = item->mask; struct filter_generic_1 *gp = &enic_filter->u.generic_1; + struct udp_hdr *udp; FLOW_TRACE(); if (*inner_ofst) return EINVAL; + /* + * The NIC filter API has no flags for "match vxlan". Set UDP port to + * avoid false positives. + */ + gp->mask_flags |= FILTER_GENERIC_1_UDP; + gp->val_flags |= FILTER_GENERIC_1_UDP; + udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask; + udp->dst_port = 0xffff; + udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val; + udp->dst_port = RTE_BE16(4789); /* Match all if no spec */ if (!spec) return 0; @@ -931,6 +942,36 @@ item_stacking_valid(enum rte_flow_item_type prev_item, return 0; } +/* + * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5. + * Instead it is in L4 following the UDP header. Append the vxlan + * pattern to L4 (udp) and shift any inner packet pattern in L5. + */ +static void +fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp, + uint8_t inner_ofst) +{ + uint8_t layer[FILTER_GENERIC_1_KEY_LEN]; + uint8_t inner; + uint8_t vxlan; + + if (!(inner_ofst > 0 && enic->vxlan)) + return; + FLOW_TRACE(); + vxlan = sizeof(struct vxlan_hdr); + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr), + gp->layer[FILTER_GENERIC_1_L5].mask, vxlan); + memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr), + gp->layer[FILTER_GENERIC_1_L5].val, vxlan); + inner = inner_ofst - vxlan; + memset(layer, 0, sizeof(layer)); + memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner); + memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer)); + memset(layer, 0, sizeof(layer)); + memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner); + memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer)); +} + /** * Build the intenal enic filter structure from the provided pattern. The * pattern is validated as the items are copied. @@ -945,6 +986,7 @@ item_stacking_valid(enum rte_flow_item_type prev_item, static int enic_copy_filter(const struct rte_flow_item pattern[], const struct enic_filter_cap *cap, + struct enic *enic, struct filter_v2 *enic_filter, struct rte_flow_error *error) { @@ -989,6 +1031,8 @@ enic_copy_filter(const struct rte_flow_item pattern[], prev_item = item->type; is_first_item = 0; } + fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst); + return 0; item_not_supported: @@ -1481,7 +1525,7 @@ enic_flow_parse(struct rte_eth_dev *dev, return -rte_errno; } enic_filter->type = enic->flow_filter_mode; - ret = enic_copy_filter(pattern, enic_filter_cap, + ret = enic_copy_filter(pattern, enic_filter_cap, enic, enic_filter, error); return ret; }