From patchwork Tue May 23 07:13:00 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhao1, Wei" X-Patchwork-Id: 24466 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 516CC7CBA; Tue, 23 May 2017 09:22:06 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id E260B388F for ; Tue, 23 May 2017 09:21:45 +0200 (CEST) Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 23 May 2017 00:21:42 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos; i="5.38,381,1491289200"; d="scan'208"; a="1173081848" Received: from dpdk1.bj.intel.com ([172.16.182.84]) by fmsmga002.fm.intel.com with ESMTP; 23 May 2017 00:21:41 -0700 From: Wei Zhao To: dev@dpdk.org Cc: Wei Zhao Date: Tue, 23 May 2017 15:13:00 +0800 Message-Id: <1495523581-56027-11-git-send-email-wei.zhao1@intel.com> X-Mailer: git-send-email 2.5.5 In-Reply-To: <1495523581-56027-1-git-send-email-wei.zhao1@intel.com> References: <1495523581-56027-1-git-send-email-wei.zhao1@intel.com> Subject: [dpdk-dev] [PATCH 10/11] net/e1000: destroy consistent filter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds a function to destroy the flow fliter. Signed-off-by: Wei Zhao --- drivers/net/e1000/igb_flow.c | 100 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 99 insertions(+), 1 deletion(-) diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c index 05665ec..af1f909 100644 --- a/drivers/net/e1000/igb_flow.c +++ b/drivers/net/e1000/igb_flow.c @@ -1468,10 +1468,108 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev, return ret; } +/* Destroy a flow rule on igb. */ +static int +igb_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + struct rte_flow *pmd_flow = flow; + enum rte_filter_type filter_type = pmd_flow->filter_type; + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct rte_eth_flex_filter flex_filter; + struct igb_ntuple_filter_ele *ntuple_filter_ptr; + struct igb_ethertype_filter_ele *ethertype_filter_ptr; + struct igb_eth_syn_filter_ele *syn_filter_ptr; + struct igb_flex_filter_ele *flex_filter_ptr; + struct igb_flow_mem *igb_flow_mem_ptr; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ntuple_filter_ptr = (struct igb_ntuple_filter_ele *) + pmd_flow->rule; + (void)rte_memcpy(&ntuple_filter, + &ntuple_filter_ptr->filter_info, + sizeof(struct rte_eth_ntuple_filter)); + ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&igb_filter_ntuple_list, + ntuple_filter_ptr, entries); + rte_free(ntuple_filter_ptr); + } + break; + case RTE_ETH_FILTER_ETHERTYPE: + ethertype_filter_ptr = (struct igb_ethertype_filter_ele *) + pmd_flow->rule; + (void)rte_memcpy(ðertype_filter, + ðertype_filter_ptr->filter_info, + sizeof(struct rte_eth_ethertype_filter)); + ret = igb_add_del_ethertype_filter(dev, + ðertype_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&igb_filter_ethertype_list, + ethertype_filter_ptr, entries); + rte_free(ethertype_filter_ptr); + } + break; + case RTE_ETH_FILTER_SYN: + syn_filter_ptr = (struct igb_eth_syn_filter_ele *) + pmd_flow->rule; + (void)rte_memcpy(&syn_filter, + &syn_filter_ptr->filter_info, + sizeof(struct rte_eth_syn_filter)); + ret = eth_igb_syn_filter_set(dev, &syn_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&igb_filter_syn_list, + syn_filter_ptr, entries); + rte_free(syn_filter_ptr); + } + break; + case RTE_ETH_FILTER_FLEXIBLE: + flex_filter_ptr = (struct igb_flex_filter_ele *) + pmd_flow->rule; + (void)rte_memcpy(&flex_filter, &flex_filter_ptr->filter_info, + sizeof(struct rte_eth_flex_filter)); + ret = eth_igb_add_del_flex_filter(dev, &flex_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&igb_filter_flex_list, + flex_filter_ptr, entries); + rte_free(flex_filter_ptr); + } + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to destroy flow"); + return ret; + } + + TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) { + if (igb_flow_mem_ptr->flow == pmd_flow) { + TAILQ_REMOVE(&igb_flow_list, + igb_flow_mem_ptr, entries); + rte_free(igb_flow_mem_ptr); + } + } + rte_free(flow); + + return ret; +} + const struct rte_flow_ops igb_flow_ops = { igb_flow_validate, igb_flow_create, - NULL, + igb_flow_destroy, NULL, NULL, }; \ No newline at end of file