From patchwork Tue Nov 13 09:51:44 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Dekel Peled X-Patchwork-Id: 48020 X-Patchwork-Delegate: shahafs@mellanox.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id DAC574C91; Tue, 13 Nov 2018 10:53:37 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id B5F0D4C8C for ; Tue, 13 Nov 2018 10:53:35 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from dekelp@mellanox.com) with ESMTPS (AES256-SHA encrypted); 13 Nov 2018 11:59:05 +0200 Received: from mtl-vdi-280.wap.labs.mlnx. (mtl-vdi-280.wap.labs.mlnx [10.128.130.87]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id wAD9rSwe013581; Tue, 13 Nov 2018 11:53:32 +0200 From: Dekel Peled To: yskoh@mellanox.com, shahafs@mellanox.com Cc: dev@dpdk.org, orika@mellanox.com Date: Tue, 13 Nov 2018 11:51:44 +0200 Message-Id: <1542102705-55243-2-git-send-email-dekelp@mellanox.com> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1542014408-12853-1-git-send-email-dekelp@mellanox.com> References: <1542014408-12853-1-git-send-email-dekelp@mellanox.com> Subject: [dpdk-dev] [PATCH v2 1/2] net/mlx5: add MPLS to Direct Verbs flow engine X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Shahaf Shuler The support in MPLS on this flow engine was overlooked. It's absence is critical because there are required actions for MPLS which can be done only with the DV engine. To set correctly the MPLS filter, we need to reason about the flow item before the MPLS (UDP, GRE or other). To do that, a new variable last_item was added and updated after each translation. the full item flags are updated after each item iteration. Signed-off-by: Shahaf Shuler --- drivers/net/mlx5/mlx5_flow_dv.c | 111 +++++++++++++++++++++++++++++++++------- 1 file changed, 92 insertions(+), 19 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index a2edd16..41d5765 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -890,6 +890,14 @@ return ret; item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE; break; + case RTE_FLOW_ITEM_TYPE_MPLS: + ret = mlx5_flow_validate_item_mpls(items, item_flags, + next_protocol, + error); + if (ret < 0) + return ret; + item_flags |= MLX5_FLOW_LAYER_MPLS; + break; case RTE_FLOW_ITEM_TYPE_META: ret = flow_dv_validate_item_meta(dev, items, attr, error); @@ -1609,6 +1617,64 @@ } /** + * Add MPLS item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] prev_layer + * The protocol layer indicated in previous item. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_mpls(void *matcher, void *key, + const struct rte_flow_item *item, + uint64_t prev_layer, + int inner) +{ + const struct rte_flow_item_mpls *mpls_m = item->mask; + const struct rte_flow_item_mpls *mpls_v = item->spec; + + if (!mpls_v) + return; + if (!mpls_m) + mpls_m = &rte_flow_item_mpls_mask; + + switch (prev_layer) { + case MLX5_FLOW_LAYER_OUTER_L4_UDP: + memcpy(MLX5_ADDR_OF(fte_match_set_misc2, key, + outer_first_mpls_over_udp), + mpls_v, sizeof(struct rte_flow_item_mpls)); + memcpy(MLX5_ADDR_OF(fte_match_set_misc2, matcher, + outer_first_mpls_over_udp), + mpls_m, sizeof(struct rte_flow_item_mpls)); + break; + case MLX5_FLOW_LAYER_GRE: + memcpy(MLX5_ADDR_OF(fte_match_set_misc2, key, + outer_first_mpls_over_gre), + mpls_v, sizeof(struct rte_flow_item_mpls)); + memcpy(MLX5_ADDR_OF(fte_match_set_misc2, matcher, + outer_first_mpls_over_gre), + mpls_m, sizeof(struct rte_flow_item_mpls)); + break; + default: + /* Inner MPLS is not supported. */ + if (!inner) { + memcpy(MLX5_ADDR_OF(fte_match_set_misc2, key, + outer_first_mpls), + mpls_v, sizeof(struct rte_flow_item_mpls)); + memcpy(MLX5_ADDR_OF(fte_match_set_misc2, matcher, + outer_first_mpls), + mpls_m, sizeof(struct rte_flow_item_mpls)); + } + } +} + +/** * Add META item to matcher * * @param[in, out] matcher @@ -1786,6 +1852,7 @@ struct priv *priv = dev->data->dev_private; struct rte_flow *flow = dev_flow->flow; uint64_t item_flags = 0; + uint64_t last_item = 0; uint64_t action_flags = 0; uint64_t priority = attr->priority; struct mlx5_flow_dv_matcher matcher = { @@ -1940,17 +2007,17 @@ flow_dv_translate_item_eth(match_mask, match_value, items, tunnel); matcher.priority = MLX5_PRIORITY_MAP_L2; - item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 : - MLX5_FLOW_LAYER_OUTER_L2; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; break; case RTE_FLOW_ITEM_TYPE_VLAN: flow_dv_translate_item_vlan(match_mask, match_value, items, tunnel); matcher.priority = MLX5_PRIORITY_MAP_L2; - item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | - MLX5_FLOW_LAYER_INNER_VLAN) : - (MLX5_FLOW_LAYER_OUTER_L2 | - MLX5_FLOW_LAYER_OUTER_VLAN); + last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | + MLX5_FLOW_LAYER_INNER_VLAN) : + (MLX5_FLOW_LAYER_OUTER_L2 | + MLX5_FLOW_LAYER_OUTER_VLAN); break; case RTE_FLOW_ITEM_TYPE_IPV4: flow_dv_translate_item_ipv4(match_mask, match_value, @@ -1961,8 +2028,8 @@ (dev_flow, tunnel, MLX5_IPV4_LAYER_TYPES, MLX5_IPV4_IBV_RX_HASH); - item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : - MLX5_FLOW_LAYER_OUTER_L3_IPV4; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; break; case RTE_FLOW_ITEM_TYPE_IPV6: flow_dv_translate_item_ipv6(match_mask, match_value, @@ -1973,8 +2040,8 @@ (dev_flow, tunnel, MLX5_IPV6_LAYER_TYPES, MLX5_IPV6_IBV_RX_HASH); - item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : - MLX5_FLOW_LAYER_OUTER_L3_IPV6; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; break; case RTE_FLOW_ITEM_TYPE_TCP: flow_dv_translate_item_tcp(match_mask, match_value, @@ -1985,8 +2052,8 @@ (dev_flow, tunnel, ETH_RSS_TCP, IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP); - item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : - MLX5_FLOW_LAYER_OUTER_L4_TCP; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; break; case RTE_FLOW_ITEM_TYPE_UDP: flow_dv_translate_item_udp(match_mask, match_value, @@ -1997,37 +2064,43 @@ (dev_flow, tunnel, ETH_RSS_UDP, IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP); - item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : - MLX5_FLOW_LAYER_OUTER_L4_UDP; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; break; case RTE_FLOW_ITEM_TYPE_GRE: flow_dv_translate_item_gre(match_mask, match_value, items, tunnel); - item_flags |= MLX5_FLOW_LAYER_GRE; + last_item = MLX5_FLOW_LAYER_GRE; break; case RTE_FLOW_ITEM_TYPE_NVGRE: flow_dv_translate_item_nvgre(match_mask, match_value, items, tunnel); - item_flags |= MLX5_FLOW_LAYER_GRE; + last_item = MLX5_FLOW_LAYER_GRE; break; case RTE_FLOW_ITEM_TYPE_VXLAN: flow_dv_translate_item_vxlan(match_mask, match_value, items, tunnel); - item_flags |= MLX5_FLOW_LAYER_VXLAN; + last_item = MLX5_FLOW_LAYER_VXLAN; break; case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: flow_dv_translate_item_vxlan(match_mask, match_value, items, tunnel); - item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE; + last_item = MLX5_FLOW_LAYER_VXLAN_GPE; + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + flow_dv_translate_item_mpls(match_mask, match_value, + items, last_item, tunnel); + last_item = MLX5_FLOW_LAYER_MPLS; break; case RTE_FLOW_ITEM_TYPE_META: flow_dv_translate_item_meta(match_mask, match_value, items); - item_flags |= MLX5_FLOW_ITEM_METADATA; + last_item = MLX5_FLOW_ITEM_METADATA; break; default: break; } + item_flags |= last_item; } assert(!flow_dv_check_valid_spec(matcher.mask.buf, dev_flow->dv.value.buf));