From patchwork Fri Oct 23 07:38:38 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: mablexidana X-Patchwork-Id: 7933 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 0FB76C460; Fri, 23 Oct 2015 09:38:46 +0200 (CEST) Received: from m13-4.163.com (m13-4.163.com [220.181.13.4]) by dpdk.org (Postfix) with ESMTP id 39398C458 for ; Fri, 23 Oct 2015 09:38:42 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=163.com; s=s110527; h=Date:From:Subject:MIME-Version:Message-ID; bh=zpzwp YG3YmjV9m51Ss8WcOaehg0ES1ovde5avBoN3e8=; b=hYzP1ejgo0dR69lm9wB+y mTQdxPkEcWS8EnBipKIH3jVL+vHxTtQLVRG5LCsEToFGP3Ky0dojnJQiw9CIc7Cl 6HU5rqEFArApJLUNzFEPskdKqtUFOADSAlS1dSHtgSpr+GV4Jt1HMfyBulteA3Uz MwOa3pVyh9sHsa50IRKbVA= Received: from mablexidana$163.com ( [182.92.253.20] ) by ajax-webmail-wmsvr4 (Coremail) ; Fri, 23 Oct 2015 15:38:38 +0800 (CST) X-Originating-IP: [182.92.253.20] Date: Fri, 23 Oct 2015 15:38:38 +0800 (CST) From: mablexidana To: dev@dpdk.org X-Priority: 3 X-Mailer: Coremail Webmail Server Version SP_ntes V3.5 build 20150911(74783.7961) Copyright (c) 2002-2015 www.mailtech.cn 163com X-CM-CTRLDATA: N8i4A2Zvb3Rlcl9odG09MTc4MzY6NTY= MIME-Version: 1.0 Message-ID: <29f00a47.e2cb.15093a29920.Coremail.mablexidana@163.com> X-CM-TRANSID: BMGowABHTQH_4ylWu3aPAA--.13246W X-CM-SenderInfo: xpdezvp0lgt0rd6rljoofrz/xtbBDh+csFQG1qRYhQAAsA X-Coremail-Antispam: 1U5529EdanIXcx71UUUUU7vcSsGvfC2KfnxnUU== X-Content-Filtered-By: Mailman/MimeDel 2.1.15 Subject: [dpdk-dev] [PATCH 1/2] lpm: add strict if control etc. on version 1.2.3 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" hi: This is test on dpdk version 1.2.3. Fixes: 2a2174801fa4 (" fix lpm bugs add strict if control,do not let tbl24 process run into tbl8 process add valid_group = valid, incase ,the valid_group is write to invalid") add strict if control,do not let tbl24 process run into tbl8 process add valid_group = valid, incase ,the valid_group is write to invalid Regards yuerxin --- lib/librte_lpm/rte_lpm.c | 89 +++++++++++++++++++++++++++--------------------- 1 file changed, 50 insertions(+), 39 deletions(-) return 0; } @@ -569,6 +571,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, .valid = VALID, .depth = depth, .next_hop = next_hop, + .valid_group = lpm->tbl8[i].valid_group, //save for old groups g_valid flags }; /* @@ -634,7 +637,7 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, } static inline int32_t -find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) +find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth) { int32_t rule_index; uint32_t ip_masked; @@ -645,8 +648,10 @@ find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) rule_index = rule_find(lpm, ip_masked, prev_depth); -if (rule_index >= 0) +if (rule_index >= 0) { + *sub_rule_depth = prev_depth; return rule_index; + } } return -1; @@ -654,7 +659,7 @@ find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) static inline int32_t delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, -uint8_t depth, int32_t sub_rule_index) +uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j; uint8_t new_depth; @@ -677,7 +682,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, lpm->tbl24[i].depth <= depth ) { lpm->tbl24[i].valid = INVALID; } -else { +else if (lpm->tbl24[i].ext_entry == 1) { /* * If TBL24 entry is extended, then there has * to be a rule with depth >= 25 in the @@ -703,19 +708,21 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, */ /* Calculate depth of sub_rule. */ + new_depth = (uint8_t) (sub_rule_index / lpm->max_rules_per_depth); struct rte_lpm_tbl24_entry new_tbl24_entry = { .valid = VALID, .ext_entry = 0, -.depth = new_depth, + .depth = sub_rule_depth, {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,} }; struct rte_lpm_tbl8_entry new_tbl8_entry = { .valid = VALID, -.depth = new_depth, + .valid_group = VALID, +.depth = sub_rule_depth, .next_hop = lpm->rules_tbl [sub_rule_index].next_hop, }; @@ -726,7 +733,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, lpm->tbl24[i].depth <= depth ) { lpm->tbl24[i] = new_tbl24_entry; } -else { +else if (lpm->tbl24[i].ext_entry == 1){ /* * If TBL24 entry is extended, then there has * to be a rule with depth >= 25 in the @@ -807,7 +814,7 @@ tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start) static inline int32_t delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, -uint8_t depth, int32_t sub_rule_index) +uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index, tbl8_range, i; @@ -843,7 +850,8 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, /* Set new tbl8 entry. */ struct rte_lpm_tbl8_entry new_tbl8_entry = { .valid = VALID, -.depth = new_depth, +.valid_group = lpm->tbl8[tbl8_group_start].valid_group, +.depth = sub_rule_depth, .next_hop = lpm->rules_tbl[sub_rule_index].next_hop, }; @@ -900,6 +908,7 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) { int32_t rule_to_delete_index, sub_rule_index; uint32_t ip_masked; + uint8_t sub_rule_depth; /* * Check input arguments. Note: IP must be a positive integer of 32 * bits in length therefore it need not be checked. @@ -931,7 +940,8 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) * replace the rule_to_delete we return -1 and invalidate the table * entries associated with this rule. */ -sub_rule_index = find_previous_rule(lpm, ip, depth); + sub_rule_depth = 0; +sub_rule_index = find_previous_rule(lpm, ip, depth,&sub_rule_depth); /* * If the input depth value is less than 25 use function @@ -939,10 +949,11 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) */ if (depth <= MAX_DEPTH_TBL24) { return delete_depth_small(lpm, ip_masked, depth, -sub_rule_index); +sub_rule_index, sub_rule_depth); } else { /* If depth > MAX_DEPTH_TBL24 */ -return delete_depth_big(lpm, ip_masked, depth, sub_rule_index); +return delete_depth_big(lpm, ip_masked, depth, sub_rule_index, + sub_rule_depth); } } -- 1.8.5.2 (Apple Git-48) diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index bb1ec48..8fabf30 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -423,33 +423,35 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, continue; } - -/* If tbl24 entry is valid and extended calculate the index - * into tbl8. */ -tbl8_index = lpm->tbl24[tbl24_index].tbl8_gindex * -RTE_LPM_TBL8_GROUP_NUM_ENTRIES; -tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - -for (j = tbl8_index; j < tbl8_group_end; j++) { -if (!lpm->tbl8[j].valid || -lpm->tbl8[j].depth <= depth) { -struct rte_lpm_tbl8_entry new_tbl8_entry = { -.valid = VALID, -.valid_group = VALID, -.depth = depth, -.next_hop = next_hop, -}; - -/* - * Setting tbl8 entry in one go to avoid race - * conditions - */ -lpm->tbl8[j] = new_tbl8_entry; - -continue; -} -} -} + if (lpm->tbl24[i].ext_entry == 1) { + + /* If tbl24 entry is valid and extended calculate the index + * into tbl8. */ + tbl8_index = lpm->tbl24[i].tbl8_gindex * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + for (j = tbl8_index; j < tbl8_group_end; j++) { + if (!lpm->tbl8[j].valid || + lpm->tbl8[j].depth <= depth) { + struct rte_lpm_tbl8_entry new_tbl8_entry = { + .valid = VALID, + .valid_group = VALID, + .depth = depth, + .next_hop = next_hop, + }; + + /* + * Setting tbl8 entry in one go to avoid race + * conditions + */ + lpm->tbl8[j] = new_tbl8_entry; + + continue; + } + } + } + }