From patchwork Thu Apr 12 14:31:45 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: =?utf-8?q?N=C3=A9lio_Laranjeiro?= X-Patchwork-Id: 37978 X-Patchwork-Delegate: shahafs@mellanox.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D11E61BA66; Thu, 12 Apr 2018 16:31:36 +0200 (CEST) Received: from mail-wm0-f66.google.com (mail-wm0-f66.google.com [74.125.82.66]) by dpdk.org (Postfix) with ESMTP id 8CBCB1B6EE for ; Thu, 12 Apr 2018 16:31:34 +0200 (CEST) Received: by mail-wm0-f66.google.com with SMTP id r191so12021198wmg.4 for ; Thu, 12 Apr 2018 07:31:34 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=6wind-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references :in-reply-to:references; bh=SAkY0RfT8iODZ9QjZmKjPvhn+RheAH1D3HcIQtkS3nc=; b=pI5fxw13IykDEZ5bK4fotVpXCGcLS9WQ6XrXB+bKXGZWY1rS4/xIDz9WraDuHvwJrl Y6VTnO2XEHnQbryGpVClI/Ll9/eTpq9f2+eh4UIHM2GvcQ6CB69mz1EIISLzVcxV7E8G exrE0ZwHmIozDTykM17P6R1y6tw0YQ4BOidSG5+HTec/OsTJf97NUk/PWC8iri7MXq5m effeXzaj/ekOzFF4yKCE/ojIY2nBHMCe6AdiUWZwFd4iXDlvYVXMKOGGdWNx85bxneGX n3T90oo3MbGyOBspia/aWYdVJVR4m9xc4a1AiluS1Mn/ar2dTJQCbHYC19Dn0gukMLrl inHA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:in-reply-to:references; bh=SAkY0RfT8iODZ9QjZmKjPvhn+RheAH1D3HcIQtkS3nc=; b=Ivj5telpHLzMUSihMni0+N4HJrdic4KtjxlcwC1yfsO6R1tU26QIuNPI7TBvV2xMaX LD9nart0EscLMZDFiOIEApYousTxOskYrXUI01eXdWibJ33eR/h2CnUIEHXTHYcF0QGj 1AQ6dFHdPYOYD3Y141a9fsvaYw1m7ueqpKl5us52qbFB5cvu5P8ZDft082y85DEW+FK7 IVBHOFiZHI2D+W3yvTdu3JbojSQhM01NNwhwBTpv3t3hSWWYKltAQMhOCK0GfYkQbPMv EKxMkSTWWrEbzoGYzwcGSeCdhLxLhpr08MUefqsm8WsvWYq53aOrSxP6VJme8jQagkad iQpg== X-Gm-Message-State: ALQs6tAhalKYuMEGPURb9cIwgh4+SxlyUD3g+Iy6mMDl3al+e5lSziHD jxvJGbFaEy9aKp6caeOl7N59+g1K/A== X-Google-Smtp-Source: AIpwx4+ofGTmDNe+/97qDYYn4/ECl6nbPF9DLPDgr0Xqpp8AljkkSaraprVVn40uZiBnwu5wq1lpYg== X-Received: by 10.28.54.6 with SMTP id d6mr914978wma.32.1523543494031; Thu, 12 Apr 2018 07:31:34 -0700 (PDT) Received: from laranjeiro-vm.dev.6wind.com (host.78.145.23.62.rev.coltfrance.com. [62.23.145.78]) by smtp.gmail.com with ESMTPSA id t142sm4104471wmt.12.2018.04.12.07.31.33 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Thu, 12 Apr 2018 07:31:33 -0700 (PDT) From: Nelio Laranjeiro To: dev@dpdk.org, Yongseok Koh Cc: Adrien Mazarguil Date: Thu, 12 Apr 2018 16:31:45 +0200 Message-Id: <38648562ed1e7063dbd687edadac96e39bdcac31.1523543393.git.nelio.laranjeiro@6wind.com> X-Mailer: git-send-email 2.17.0 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH 1/2] net/mlx5: split L3/L4 in flow director X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This will help to bring back the mask handler which was removed when this feature was rewritten on top of rte_flow. Signed-off-by: Nelio Laranjeiro Acked-by: Adrien Mazarguil --- drivers/net/mlx5/mlx5_flow.c | 112 ++++++++++++----------------------- 1 file changed, 37 insertions(+), 75 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 7ef68de49..7ba643b83 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -2695,8 +2695,11 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, return -rte_errno; } attributes->queue.index = fdir_filter->action.rx_queue; + /* Handle L3. */ switch (fdir_filter->input.flow_type) { case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: attributes->l3.ipv4.hdr = (struct ipv4_hdr){ .src_addr = input->flow.udp4_flow.ip.src_ip, .dst_addr = input->flow.udp4_flow.ip.dst_ip, @@ -2704,15 +2707,44 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, .type_of_service = input->flow.udp4_flow.ip.tos, .next_proto_id = input->flow.udp4_flow.ip.proto, }; - attributes->l4.udp.hdr = (struct udp_hdr){ - .src_port = input->flow.udp4_flow.src_port, - .dst_port = input->flow.udp4_flow.dst_port, - }; attributes->items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_IPV4, .spec = &attributes->l3, .mask = &attributes->l3, }; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + attributes->l3.ipv6.hdr = (struct ipv6_hdr){ + .hop_limits = input->flow.udp6_flow.ip.hop_limits, + .proto = input->flow.udp6_flow.ip.proto, + }; + memcpy(attributes->l3.ipv6.hdr.src_addr, + input->flow.udp6_flow.ip.src_ip, + RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); + memcpy(attributes->l3.ipv6.hdr.dst_addr, + input->flow.udp6_flow.ip.dst_ip, + RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); + attributes->items[1] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .spec = &attributes->l3, + .mask = &attributes->l3, + }; + break; + default: + DRV_LOG(ERR, "port %u invalid flow type%d", + dev->data->port_id, fdir_filter->input.flow_type); + rte_errno = ENOTSUP; + return -rte_errno; + } + /* Handle L4. */ + switch (fdir_filter->input.flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + attributes->l4.udp.hdr = (struct udp_hdr){ + .src_port = input->flow.udp4_flow.src_port, + .dst_port = input->flow.udp4_flow.dst_port, + }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_UDP, .spec = &attributes->l4, @@ -2720,62 +2752,21 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, }; break; case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: - attributes->l3.ipv4.hdr = (struct ipv4_hdr){ - .src_addr = input->flow.tcp4_flow.ip.src_ip, - .dst_addr = input->flow.tcp4_flow.ip.dst_ip, - .time_to_live = input->flow.tcp4_flow.ip.ttl, - .type_of_service = input->flow.tcp4_flow.ip.tos, - .next_proto_id = input->flow.tcp4_flow.ip.proto, - }; attributes->l4.tcp.hdr = (struct tcp_hdr){ .src_port = input->flow.tcp4_flow.src_port, .dst_port = input->flow.tcp4_flow.dst_port, }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV4, - .spec = &attributes->l3, - .mask = &attributes->l3, - }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_TCP, .spec = &attributes->l4, .mask = &attributes->l4, }; break; - case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: - attributes->l3.ipv4.hdr = (struct ipv4_hdr){ - .src_addr = input->flow.ip4_flow.src_ip, - .dst_addr = input->flow.ip4_flow.dst_ip, - .time_to_live = input->flow.ip4_flow.ttl, - .type_of_service = input->flow.ip4_flow.tos, - .next_proto_id = input->flow.ip4_flow.proto, - }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV4, - .spec = &attributes->l3, - .mask = &attributes->l3, - }; - break; case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ - .hop_limits = input->flow.udp6_flow.ip.hop_limits, - .proto = input->flow.udp6_flow.ip.proto, - }; - memcpy(attributes->l3.ipv6.hdr.src_addr, - input->flow.udp6_flow.ip.src_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - memcpy(attributes->l3.ipv6.hdr.dst_addr, - input->flow.udp6_flow.ip.dst_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); attributes->l4.udp.hdr = (struct udp_hdr){ .src_port = input->flow.udp6_flow.src_port, .dst_port = input->flow.udp6_flow.dst_port, }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV6, - .spec = &attributes->l3, - .mask = &attributes->l3, - }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_UDP, .spec = &attributes->l4, @@ -2783,47 +2774,18 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, }; break; case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ - .hop_limits = input->flow.tcp6_flow.ip.hop_limits, - .proto = input->flow.tcp6_flow.ip.proto, - }; - memcpy(attributes->l3.ipv6.hdr.src_addr, - input->flow.tcp6_flow.ip.src_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - memcpy(attributes->l3.ipv6.hdr.dst_addr, - input->flow.tcp6_flow.ip.dst_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); attributes->l4.tcp.hdr = (struct tcp_hdr){ .src_port = input->flow.tcp6_flow.src_port, .dst_port = input->flow.tcp6_flow.dst_port, }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV6, - .spec = &attributes->l3, - .mask = &attributes->l3, - }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_TCP, .spec = &attributes->l4, .mask = &attributes->l4, }; break; + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ - .hop_limits = input->flow.ipv6_flow.hop_limits, - .proto = input->flow.ipv6_flow.proto, - }; - memcpy(attributes->l3.ipv6.hdr.src_addr, - input->flow.ipv6_flow.src_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - memcpy(attributes->l3.ipv6.hdr.dst_addr, - input->flow.ipv6_flow.dst_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV6, - .spec = &attributes->l3, - .mask = &attributes->l3, - }; break; default: DRV_LOG(ERR, "port %u invalid flow type%d",