From patchwork Wed May 27 13:23:16 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Hemant Agrawal X-Patchwork-Id: 70633 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6C1B1A034E; Wed, 27 May 2020 15:32:07 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id F1A911DAD2; Wed, 27 May 2020 15:27:15 +0200 (CEST) Received: from inva020.nxp.com (inva020.nxp.com [92.121.34.13]) by dpdk.org (Postfix) with ESMTP id F18251DA55 for ; Wed, 27 May 2020 15:26:44 +0200 (CEST) Received: from inva020.nxp.com (localhost [127.0.0.1]) by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id B0D4F1A07EE; Wed, 27 May 2020 15:26:44 +0200 (CEST) Received: from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com [165.114.16.14]) by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id D6C511A07F6; Wed, 27 May 2020 15:26:42 +0200 (CEST) Received: from bf-netperf1.ap.freescale.net (bf-netperf1.ap.freescale.net [10.232.133.63]) by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id 0E2D340334; Wed, 27 May 2020 21:26:37 +0800 (SGT) From: Hemant Agrawal To: dev@dpdk.org, ferruh.yigit@intel.com Cc: Jun Yang Date: Wed, 27 May 2020 18:53:16 +0530 Message-Id: <20200527132326.1382-28-hemant.agrawal@nxp.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200527132326.1382-1-hemant.agrawal@nxp.com> References: <20200527132326.1382-1-hemant.agrawal@nxp.com> X-Virus-Scanned: ClamAV using ClamSMTP Subject: [dpdk-dev] [PATCH 27/37] net/dpaa2: flow QoS or FS table entry indexing X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Jun Yang Calculate QoS/FS entry index by group and priority of flow. 1)The less index of entry, the higher priority of flow. 2)Verify if the flow with same group and priority has been added before creating flow. Signed-off-by: Jun Yang --- drivers/net/dpaa2/dpaa2_ethdev.c | 4 + drivers/net/dpaa2/dpaa2_ethdev.h | 5 +- drivers/net/dpaa2/dpaa2_flow.c | 127 +++++++++++++------------------ 3 files changed, 59 insertions(+), 77 deletions(-) diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c index cd8555246..401a75cca 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/drivers/net/dpaa2/dpaa2_ethdev.c @@ -2394,6 +2394,10 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) } priv->num_rx_tc = attr.num_rx_tcs; + priv->qos_entries = attr.qos_entries; + priv->fs_entries = attr.fs_entries; + priv->dist_queues = attr.num_queues; + /* only if the custom CG is enabled */ if (attr.options & DPNI_OPT_CUSTOM_CG) priv->max_cgs = attr.num_cgs; diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h index 030c625e3..b49b88a2d 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.h +++ b/drivers/net/dpaa2/dpaa2_ethdev.h @@ -145,6 +145,9 @@ struct dpaa2_dev_priv { uint8_t max_mac_filters; uint8_t max_vlan_filters; uint8_t num_rx_tc; + uint16_t qos_entries; + uint16_t fs_entries; + uint8_t dist_queues; uint8_t flags; /*dpaa2 config flags */ uint8_t en_ordered; uint8_t en_loose_ordered; @@ -152,8 +155,6 @@ struct dpaa2_dev_priv { uint8_t cgid_in_use[MAX_RX_QUEUES]; struct extract_s extract; - uint8_t *qos_index; - uint8_t *fs_index; uint16_t ss_offset; uint64_t ss_iova; diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c index 941d62b80..760a8a793 100644 --- a/drivers/net/dpaa2/dpaa2_flow.c +++ b/drivers/net/dpaa2/dpaa2_flow.c @@ -47,11 +47,8 @@ struct rte_flow { LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ struct dpni_rule_cfg qos_rule; struct dpni_rule_cfg fs_rule; - uint16_t qos_index; - uint16_t fs_index; uint8_t key_size; uint8_t tc_id; /** Traffic Class ID. */ - uint8_t flow_type; uint8_t tc_index; /** index within this Traffic Class. */ enum rte_flow_action_type action; uint16_t flow_id; @@ -2645,6 +2642,7 @@ dpaa2_flow_entry_update( char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE]; char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE]; int extend = -1, extend1, size; + uint16_t qos_index; while (curr) { if (curr->ipaddr_rule.ipaddr_type == @@ -2676,6 +2674,9 @@ dpaa2_flow_entry_update( size = NH_FLD_IPV6_ADDR_SIZE; } + qos_index = curr->tc_id * priv->fs_entries + + curr->tc_index; + ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token, &curr->qos_rule); if (ret) { @@ -2769,7 +2770,7 @@ dpaa2_flow_entry_update( ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token, &curr->qos_rule, - curr->tc_id, curr->qos_index, + curr->tc_id, qos_index, 0, 0); if (ret) { DPAA2_PMD_ERR("Qos entry update failed."); @@ -2875,7 +2876,7 @@ dpaa2_flow_entry_update( curr->fs_rule.key_size += extend; ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, - priv->token, curr->tc_id, curr->fs_index, + priv->token, curr->tc_id, curr->tc_index, &curr->fs_rule, &curr->action_cfg); if (ret) { DPAA2_PMD_ERR("FS entry update failed."); @@ -2888,6 +2889,28 @@ dpaa2_flow_entry_update( return 0; } +static inline int +dpaa2_flow_verify_attr( + struct dpaa2_dev_priv *priv, + const struct rte_flow_attr *attr) +{ + struct rte_flow *curr = LIST_FIRST(&priv->flows); + + while (curr) { + if (curr->tc_id == attr->group && + curr->tc_index == attr->priority) { + DPAA2_PMD_ERR( + "Flow with group %d and priority %d already exists.", + attr->group, attr->priority); + + return -1; + } + curr = LIST_NEXT(curr, next); + } + + return 0; +} + static int dpaa2_generic_flow_set(struct rte_flow *flow, struct rte_eth_dev *dev, @@ -2898,10 +2921,8 @@ dpaa2_generic_flow_set(struct rte_flow *flow, { const struct rte_flow_action_queue *dest_queue; const struct rte_flow_action_rss *rss_conf; - uint16_t index; int is_keycfg_configured = 0, end_of_list = 0; int ret = 0, i = 0, j = 0; - struct dpni_attr nic_attr; struct dpni_rx_tc_dist_cfg tc_cfg; struct dpni_qos_tbl_cfg qos_cfg; struct dpni_fs_action_cfg action; @@ -2909,6 +2930,11 @@ dpaa2_generic_flow_set(struct rte_flow *flow, struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; size_t param; struct rte_flow *curr = LIST_FIRST(&priv->flows); + uint16_t qos_index; + + ret = dpaa2_flow_verify_attr(priv, attr); + if (ret) + return ret; /* Parse pattern list to get the matching parameters */ while (!end_of_list) { @@ -3056,31 +3082,15 @@ dpaa2_generic_flow_set(struct rte_flow *flow, } } /* Configure QoS table first */ - memset(&nic_attr, 0, sizeof(struct dpni_attr)); - ret = dpni_get_attributes(dpni, CMD_PRI_LOW, - priv->token, &nic_attr); - if (ret < 0) { - DPAA2_PMD_ERR( - "Failure to get attribute. dpni@%p err code(%d)\n", - dpni, ret); - return ret; - } - action.flow_id = action.flow_id % nic_attr.num_rx_tcs; + action.flow_id = action.flow_id % priv->num_rx_tc; - if (!priv->qos_index) { - priv->qos_index = rte_zmalloc(0, - nic_attr.qos_entries, 64); - } - for (index = 0; index < nic_attr.qos_entries; index++) { - if (!priv->qos_index[index]) { - priv->qos_index[index] = 1; - break; - } - } - if (index >= nic_attr.qos_entries) { + qos_index = flow->tc_id * priv->fs_entries + + flow->tc_index; + + if (qos_index >= priv->qos_entries) { DPAA2_PMD_ERR("QoS table with %d entries full", - nic_attr.qos_entries); + priv->qos_entries); return -1; } flow->qos_rule.key_size = priv->extract @@ -3110,30 +3120,18 @@ dpaa2_generic_flow_set(struct rte_flow *flow, } ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token, &flow->qos_rule, - flow->tc_id, index, + flow->tc_id, qos_index, 0, 0); if (ret < 0) { DPAA2_PMD_ERR( "Error in addnig entry to QoS table(%d)", ret); - priv->qos_index[index] = 0; return ret; } - flow->qos_index = index; /* Then Configure FS table */ - if (!priv->fs_index) { - priv->fs_index = rte_zmalloc(0, - nic_attr.fs_entries, 64); - } - for (index = 0; index < nic_attr.fs_entries; index++) { - if (!priv->fs_index[index]) { - priv->fs_index[index] = 1; - break; - } - } - if (index >= nic_attr.fs_entries) { + if (flow->tc_index >= priv->fs_entries) { DPAA2_PMD_ERR("FS table with %d entries full", - nic_attr.fs_entries); + priv->fs_entries); return -1; } flow->fs_rule.key_size = priv->extract @@ -3164,31 +3162,23 @@ dpaa2_generic_flow_set(struct rte_flow *flow, } } ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token, - flow->tc_id, index, + flow->tc_id, flow->tc_index, &flow->fs_rule, &action); if (ret < 0) { DPAA2_PMD_ERR( "Error in adding entry to FS table(%d)", ret); - priv->fs_index[index] = 0; return ret; } - flow->fs_index = index; memcpy(&flow->action_cfg, &action, sizeof(struct dpni_fs_action_cfg)); break; case RTE_FLOW_ACTION_TYPE_RSS: - ret = dpni_get_attributes(dpni, CMD_PRI_LOW, - priv->token, &nic_attr); - if (ret < 0) { - DPAA2_PMD_ERR( - "Failure to get attribute. dpni@%p err code(%d)\n", - dpni, ret); - return ret; - } rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf); for (i = 0; i < (int)rss_conf->queue_num; i++) { - if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) || - rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) { + if (rss_conf->queue[i] < + (attr->group * priv->dist_queues) || + rss_conf->queue[i] >= + ((attr->group + 1) * priv->dist_queues)) { DPAA2_PMD_ERR( "Queue/Group combination are not supported\n"); return -ENOTSUP; @@ -3262,34 +3252,24 @@ dpaa2_generic_flow_set(struct rte_flow *flow, } /* Add Rule into QoS table */ - if (!priv->qos_index) { - priv->qos_index = rte_zmalloc(0, - nic_attr.qos_entries, 64); - } - for (index = 0; index < nic_attr.qos_entries; index++) { - if (!priv->qos_index[index]) { - priv->qos_index[index] = 1; - break; - } - } - if (index >= nic_attr.qos_entries) { + qos_index = flow->tc_id * priv->fs_entries + + flow->tc_index; + if (qos_index >= priv->qos_entries) { DPAA2_PMD_ERR("QoS table with %d entries full", - nic_attr.qos_entries); + priv->qos_entries); return -1; } flow->qos_rule.key_size = priv->extract.qos_key_extract.key_info.key_total_size; ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token, &flow->qos_rule, flow->tc_id, - index, 0, 0); + qos_index, 0, 0); if (ret < 0) { DPAA2_PMD_ERR( "Error in entry addition in QoS table(%d)", ret); - priv->qos_index[index] = 0; return ret; } - flow->qos_index = index; break; case RTE_FLOW_ACTION_TYPE_END: end_of_list = 1; @@ -3574,7 +3554,6 @@ int dpaa2_flow_destroy(struct rte_eth_dev *dev, "Error in adding entry to QoS table(%d)", ret); goto error; } - priv->qos_index[flow->qos_index] = 0; /* Then remove entry from FS table */ ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token, @@ -3584,7 +3563,6 @@ int dpaa2_flow_destroy(struct rte_eth_dev *dev, "Error in entry addition in FS table(%d)", ret); goto error; } - priv->fs_index[flow->fs_index] = 0; break; case RTE_FLOW_ACTION_TYPE_RSS: ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token, @@ -3594,7 +3572,6 @@ int dpaa2_flow_destroy(struct rte_eth_dev *dev, "Error in entry addition in QoS table(%d)", ret); goto error; } - priv->qos_index[flow->qos_index] = 0; break; default: DPAA2_PMD_ERR(