From patchwork Wed Jun 27 15:07:50 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: =?utf-8?q?N=C3=A9lio_Laranjeiro?= X-Patchwork-Id: 41692 X-Patchwork-Delegate: shahafs@mellanox.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2E5EC1C021; Wed, 27 Jun 2018 17:08:13 +0200 (CEST) Received: from mail-wr0-f194.google.com (mail-wr0-f194.google.com [209.85.128.194]) by dpdk.org (Postfix) with ESMTP id 99F2A1BFCC for ; Wed, 27 Jun 2018 17:07:50 +0200 (CEST) Received: by mail-wr0-f194.google.com with SMTP id h10-v6so2402009wrq.8 for ; Wed, 27 Jun 2018 08:07:50 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=6wind-com.20150623.gappssmtp.com; s=20150623; h=from:to:subject:date:message-id:in-reply-to:references; bh=MmXU0N2J9XQdaqzbqt7S9MQnihgDYkriac/MPePP/0o=; b=dsa9NbYwVgFzHwtQT0ZMJHwRmdkzsgKjW99rC3gg1itbzHLJk3+9ohlMkbRQhsLhAt lfsiJ2yBFDDYeqHobtOb6/OEUr0PwXLJhUxy4y9gJRfy566K8YQjG4FiuTjMQ5gC13Ru eBCP3j3Kz7UCKiKqp5F3uS/21VpaBxLepSjcgmedKwZOp0wNpEzI92buWf7gen4Ozpb1 rsK21A0QL/s9wjpPIGijJ/USMXV8tdW8GY2tUAsgMpwOTB8RBerNFDqzpjzuVBNtvhgS +PiDBDU0MAjSEX4y+1SJkNDT7NQm3zeqtwpVIraMftvEDyZ8H1iCVDDQCuHDpJ4BtRwT NBaQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:subject:date:message-id:in-reply-to :references; bh=MmXU0N2J9XQdaqzbqt7S9MQnihgDYkriac/MPePP/0o=; b=sB8yym7ovyWim4zb9EMAPiQA31LOCGb3rTCDC9jCUMsWSxi6uXD8gMaPXnJaq/EWUw iWXva0+lZK/kxT92DVPkI/v0G+VPaKT0nQzDJ91jcYVQS8DZP27IuE+4B77IebZCfXHk 5AJPJgTe/a/1wjiwbFimHNSYBtybsFnrAgVrf14Gsd4tXKMW21aOKTdB/5kFlr50vVsG smS4Ly2PZjLcskDax6hy+NgP0BpwDme+QJCj8uax39ukHZgLwuTz3RoR56SbyuztEmNT xHIBeRTreRWxCGSKoKYSvSy4aU22VgAs4oTvD+B/VfMEvb54D02rZeoGXxtcvOQOZWW+ wF9g== X-Gm-Message-State: APt69E176J3/1CgXmOromR65dIqOkVudjOUE/CmSOIWnQfN5Nu7JYGXJ d9vECOsy3A7qbDzWT0yMg5huTANo0Q== X-Google-Smtp-Source: AAOMgpep4v16lVLopZPF+qgEgUUUEEAV2lUe99vlR6epMIxmItnHquVlBchlSDmGb+LUikD3quT9nQ== X-Received: by 2002:adf:c892:: with SMTP id k18-v6mr5831789wrh.6.1530112070029; Wed, 27 Jun 2018 08:07:50 -0700 (PDT) Received: from laranjeiro-vm.dev.6wind.com (host.78.145.23.62.rev.coltfrance.com. [62.23.145.78]) by smtp.gmail.com with ESMTPSA id k17-v6sm4872513wrp.19.2018.06.27.08.07.49 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Wed, 27 Jun 2018 08:07:49 -0700 (PDT) From: Nelio Laranjeiro To: dev@dpdk.org, Adrien Mazarguil , Yongseok Koh Date: Wed, 27 Jun 2018 17:07:50 +0200 Message-Id: <6190b2d787d3a9d2095e2eed17db1c20b2e3c4e1.1530111623.git.nelio.laranjeiro@6wind.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: References: Subject: [dpdk-dev] [PATCH v2 18/20] net/mlx5: add flow GRE item X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Signed-off-by: Nelio Laranjeiro --- drivers/net/mlx5/mlx5_flow.c | 191 ++++++++++++++++++++++++++++++++++- 1 file changed, 186 insertions(+), 5 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 47c55b426..636aaabe8 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -54,6 +54,7 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate; /* Pattern tunnel Layer bits. */ #define MLX5_FLOW_LAYER_VXLAN (1u << 12) #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13) +#define MLX5_FLOW_LAYER_GRE (1u << 14) /* Outer Masks. */ #define MLX5_FLOW_LAYER_OUTER_L3 \ @@ -66,7 +67,8 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate; /* Tunnel masks. */ #define MLX5_FLOW_LAYER_TUNNEL \ - (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE) + (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ + MLX5_FLOW_LAYER_GRE) /* Inner Masks. */ #define MLX5_FLOW_LAYER_INNER_L3 \ @@ -215,6 +217,9 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { }, }; +/* Tunnel protocol values. */ +#define MLX5_PROTOCOL_GRE 47 + /** Handles information leading to a drop fate. */ struct mlx5_flow_verbs { LIST_ENTRY(mlx5_flow_verbs) next; @@ -1005,12 +1010,23 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow, item, "L3 cannot follow an L4" " layer"); + /* + * IPv6 is not recognised by the NIC inside a GRE tunnel. + * Such support has to be disabled as the rule will be + * accepted. Tested with Mellanox OFED 4.3-3.0.2.1 + */ + if (tunnel && layers & MLX5_FLOW_LAYER_GRE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "IPv6 inside a GRE tunnel is" + " not recognised."); if (!mask) mask = &rte_flow_item_ipv6_mask; - ret = mlx5_flow_item_validate(item, (const uint8_t *)mask, - (const uint8_t *)&nic_mask, - sizeof(struct rte_flow_item_ipv6), - error); + ret = mlx5_flow_item_validate + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ipv6), error); if (ret < 0) return ret; } @@ -1411,6 +1427,168 @@ mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev, return size; } +/** + * Update the protocol in Verbs IPv4 spec. + * + * @param attr[in, out] + * Pointer to Verbs attributes structure. + * @param protocol[in] + * Protocol value to set if none is present in the specification. + */ +static void +mlx5_flow_item_gre_ipv4_protocol_update(struct ibv_flow_attr *attr, + uint8_t protocol) +{ + unsigned int i; + const enum ibv_flow_spec_type search = IBV_FLOW_SPEC_IPV4_EXT; + struct ibv_spec_header *hdr = (struct ibv_spec_header *) + ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); + + if (!attr) + return; + for (i = 0; i != attr->num_of_specs; ++i) { + if (hdr->type == search) { + struct ibv_flow_spec_ipv4_ext *ip = + (struct ibv_flow_spec_ipv4_ext *)hdr; + + if (!ip->val.proto) { + ip->val.proto = protocol; + ip->mask.proto = 0xff; + } + break; + } + hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); + } +} + +/** + * Update the protocol in Verbs IPv6 spec. + * + * @param attr[in, out] + * Pointer to Verbs attributes structure. + * @param protocol[in] + * Protocol value to set if none is present in the specification. + */ +static void +mlx5_flow_item_gre_ipv6_protocol_update(struct ibv_flow_attr *attr, + uint8_t protocol) +{ + unsigned int i; + const enum ibv_flow_spec_type search = IBV_FLOW_SPEC_IPV6; + struct ibv_spec_header *hdr = (struct ibv_spec_header *) + ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); + + if (!attr) + return; + for (i = 0; i != attr->num_of_specs; ++i) { + if (hdr->type == search) { + struct ibv_flow_spec_ipv6 *ip = + (struct ibv_flow_spec_ipv6 *)hdr; + + if (!ip->val.next_hdr) { + ip->val.next_hdr = protocol; + ip->mask.next_hdr = 0xff; + } + break; + } + hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); + } +} + +/** + * Validate GRE layer and possibly create the Verbs specification. + * + * @param dev + * Pointer to Ethernet device. + * @param item[in] + * Item specification. + * @param flow[in, out] + * Pointer to flow structure. + * @param flow_size[in] + * Size in bytes of the available space for to store the flow information. + * @param error + * Pointer to error structure. + * + * @return + * size in bytes necessary for the conversion, a negative errno value + * otherwise and rte_errno is set. + */ +static int +mlx5_flow_item_gre(const struct rte_flow_item *item, + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + struct mlx5_flow_verbs *verbs = flow->cur_verbs; + const struct rte_flow_item_gre *spec = item->spec; + const struct rte_flow_item_gre *mask = item->mask; + const uint32_t layers = mlx5_flow_layers(flow); +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + unsigned int size = sizeof(struct ibv_flow_spec_gre); + struct ibv_flow_spec_gre tunnel = { + .type = IBV_FLOW_SPEC_GRE, + .size = size, + }; +#else + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel tunnel = { + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; +#endif + int ret; + + if (layers & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "a tunnel is already present"); + if (!(layers & MLX5_FLOW_LAYER_OUTER_L3)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 Layer is missing"); + if (!mask) + mask = &rte_flow_item_gre_mask; + ret = mlx5_flow_item_validate + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_gre_mask, + sizeof(struct rte_flow_item_gre), error); + if (ret < 0) + return ret; +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + if (spec) { + tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver; + tunnel.val.protocol = spec->protocol; + tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver; + tunnel.mask.protocol = mask->protocol; + /* Remove unwanted bits from values. */ + tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver; + tunnel.val.protocol &= tunnel.mask.protocol; + tunnel.val.key &= tunnel.mask.key; + } +#else + if (spec && (spec->protocol & mask->protocol)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "without MPLS support the" + " specification cannot be used for" + " filtering"); +#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */ + if (size <= flow_size) { + if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4) + mlx5_flow_item_gre_ipv4_protocol_update + (verbs->attr, MLX5_PROTOCOL_GRE); + else + mlx5_flow_item_gre_ipv6_protocol_update + (verbs->attr, MLX5_PROTOCOL_GRE); + mlx5_flow_spec_verbs_add(flow, &tunnel, size); + } + mlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_GRE); + flow->ptype = RTE_PTYPE_TUNNEL_GRE; + return size; +} + /** * Validate items provided by the user. * @@ -1469,6 +1647,9 @@ mlx5_flow_items(struct rte_eth_dev *dev, ret = mlx5_flow_item_vxlan_gpe(dev, items, flow, remain, error); break; + case RTE_FLOW_ITEM_TYPE_GRE: + ret = mlx5_flow_item_gre(items, flow, remain, error); + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,