From patchwork Wed May 27 13:23:26 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Hemant Agrawal X-Patchwork-Id: 70643 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 32109A034E; Wed, 27 May 2020 15:34:34 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1BC071DB10; Wed, 27 May 2020 15:27:29 +0200 (CEST) Received: from inva020.nxp.com (inva020.nxp.com [92.121.34.13]) by dpdk.org (Postfix) with ESMTP id B1B3B1DA36 for ; Wed, 27 May 2020 15:26:48 +0200 (CEST) Received: from inva020.nxp.com (localhost [127.0.0.1]) by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 95A6B1A07E6; Wed, 27 May 2020 15:26:48 +0200 (CEST) Received: from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com [165.114.16.14]) by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id A0F7F1A07F7; Wed, 27 May 2020 15:26:46 +0200 (CEST) Received: from bf-netperf1.ap.freescale.net (bf-netperf1.ap.freescale.net [10.232.133.63]) by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id 7E10940323; Wed, 27 May 2020 21:26:44 +0800 (SGT) From: Hemant Agrawal To: dev@dpdk.org, ferruh.yigit@intel.com Cc: Nipun Gupta Date: Wed, 27 May 2020 18:53:26 +0530 Message-Id: <20200527132326.1382-38-hemant.agrawal@nxp.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200527132326.1382-1-hemant.agrawal@nxp.com> References: <20200527132326.1382-1-hemant.agrawal@nxp.com> X-Virus-Scanned: ClamAV using ClamSMTP Subject: [dpdk-dev] [PATCH 37/37] net/dpaa2: support raw flow classification X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Nipun Gupta Add support for raw flow, which can be used for any protocol rules. Signed-off-by: Nipun Gupta --- drivers/net/dpaa2/dpaa2_ethdev.h | 3 +- drivers/net/dpaa2/dpaa2_flow.c | 135 +++++++++++++++++++++++++++++++ 2 files changed, 137 insertions(+), 1 deletion(-) diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h index 52faeeefe..2bc0f3f5a 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.h +++ b/drivers/net/dpaa2/dpaa2_ethdev.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016-2019 NXP + * Copyright 2016-2020 NXP * */ @@ -99,6 +99,7 @@ extern enum pmd_dpaa2_ts dpaa2_enable_ts; #define DPAA2_QOS_TABLE_IPADDR_EXTRACT 4 #define DPAA2_FS_TABLE_IPADDR_EXTRACT 8 +#define DPAA2_FLOW_MAX_KEY_SIZE 16 /*Externaly defined*/ extern const struct rte_flow_ops dpaa2_flow_ops; diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c index cc789346a..136bdd5fa 100644 --- a/drivers/net/dpaa2/dpaa2_flow.c +++ b/drivers/net/dpaa2/dpaa2_flow.c @@ -493,6 +493,42 @@ static int dpaa2_flow_extract_add( return 0; } +static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract, + int size) +{ + struct dpkg_profile_cfg *dpkg = &key_extract->dpkg; + struct dpaa2_key_info *key_info = &key_extract->key_info; + int last_extract_size, index; + + if (dpkg->num_extracts != 0 && dpkg->extracts[0].type != + DPKG_EXTRACT_FROM_DATA) { + DPAA2_PMD_WARN("RAW extract cannot be combined with others"); + return -1; + } + + last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE); + dpkg->num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE); + if (last_extract_size) + dpkg->num_extracts++; + else + last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE; + + for (index = 0; index < dpkg->num_extracts; index++) { + dpkg->extracts[index].type = DPKG_EXTRACT_FROM_DATA; + if (index == dpkg->num_extracts - 1) + dpkg->extracts[index].extract.from_data.size = + last_extract_size; + else + dpkg->extracts[index].extract.from_data.size = + DPAA2_FLOW_MAX_KEY_SIZE; + dpkg->extracts[index].extract.from_data.offset = + DPAA2_FLOW_MAX_KEY_SIZE * index; + } + + key_info->key_total_size = size; + return 0; +} + /* Protocol discrimination. * Discriminate IPv4/IPv6/vLan by Eth type. * Discriminate UDP/TCP/ICMP by next proto of IP. @@ -674,6 +710,18 @@ dpaa2_flow_rule_data_set( return 0; } +static inline int +dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule, + const void *key, const void *mask, int size) +{ + int offset = 0; + + memcpy((void *)(size_t)(rule->key_iova + offset), key, size); + memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size); + + return 0; +} + static inline int _dpaa2_flow_rule_move_ipaddr_tail( struct dpaa2_key_extract *key_extract, @@ -2814,6 +2862,83 @@ dpaa2_configure_flow_gre(struct rte_flow *flow, return 0; } +static int +dpaa2_configure_flow_raw(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item_raw *spec = pattern->spec; + const struct rte_flow_item_raw *mask = pattern->mask; + int prev_key_size = + priv->extract.qos_key_extract.key_info.key_total_size; + int local_cfg = 0, ret; + uint32_t group; + + /* Need both spec and mask */ + if (!spec || !mask) { + DPAA2_PMD_ERR("spec or mask not present."); + return -EINVAL; + } + /* Only supports non-relative with offset 0 */ + if (spec->relative || spec->offset != 0 || + spec->search || spec->limit) { + DPAA2_PMD_ERR("relative and non zero offset not supported."); + return -EINVAL; + } + /* Spec len and mask len should be same */ + if (spec->length != mask->length) { + DPAA2_PMD_ERR("Spec len and mask len mismatch."); + return -EINVAL; + } + + /* Get traffic class index and flow id to be configured */ + group = attr->group; + flow->tc_id = group; + flow->tc_index = attr->priority; + + if (prev_key_size < spec->length) { + ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract, + spec->length); + if (ret) { + DPAA2_PMD_ERR("QoS Extract RAW add failed."); + return -1; + } + local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; + + ret = dpaa2_flow_extract_add_raw( + &priv->extract.tc_key_extract[group], + spec->length); + if (ret) { + DPAA2_PMD_ERR("FS Extract RAW add failed."); + return -1; + } + local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; + } + + ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern, + mask->pattern, spec->length); + if (ret) { + DPAA2_PMD_ERR("QoS RAW rule data set failed"); + return -1; + } + + ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern, + mask->pattern, spec->length); + if (ret) { + DPAA2_PMD_ERR("FS RAW rule data set failed"); + return -1; + } + + (*device_configured) |= local_cfg; + + return 0; +} + /* The existing QoS/FS entry with IP address(es) * needs update after * new extract(s) are inserted before IP @@ -3297,6 +3422,16 @@ dpaa2_generic_flow_set(struct rte_flow *flow, return ret; } break; + case RTE_FLOW_ITEM_TYPE_RAW: + ret = dpaa2_configure_flow_raw(flow, + dev, attr, &pattern[i], + actions, error, + &is_keycfg_configured); + if (ret) { + DPAA2_PMD_ERR("RAW flow configuration failed!"); + return ret; + } + break; case RTE_FLOW_ITEM_TYPE_END: end_of_list = 1; break; /*End of List*/