[6/7] net/hns3: fix FDIR lock bug

Message ID 1618653299-40380-7-git-send-email-humin29@huawei.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series features and bugfix for hns3 PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

humin (Q) April 17, 2021, 9:54 a.m. UTC
  From: Chengwen Feng <fengchengwen@huawei.com>

Currently, the fdir lock was used to protect concurrent access in
multiple processes, it has the following problems:
1) Lack of protection for fdir reset recover.
2) Only part data is protected, eg. the filterlist is not protected.

We use the following scheme:
1) Del the fdir lock.
2) Add a flow lock and provides rte flow driver ops API-level
protection.
3) Declare support RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE.

Fixes: fcba820d9b9e ("net/hns3: support flow director")
Cc: stable@dpdk.org

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.c    |  4 +-
 drivers/net/hns3/hns3_ethdev.h    |  4 ++
 drivers/net/hns3/hns3_ethdev_vf.c |  3 +-
 drivers/net/hns3/hns3_fdir.c      | 28 ++++++------
 drivers/net/hns3/hns3_fdir.h      |  3 +-
 drivers/net/hns3/hns3_flow.c      | 96 ++++++++++++++++++++++++++++++++++++---
 6 files changed, 111 insertions(+), 27 deletions(-)
  

Patch

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index c276c6b..bcbebd0 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -7334,8 +7334,8 @@  hns3_dev_init(struct rte_eth_dev *eth_dev)
 		PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
 		return -ENOMEM;
 	}
-	/* initialize flow filter lists */
-	hns3_filterlist_init(eth_dev);
+
+	hns3_flow_init(eth_dev);
 
 	hns3_set_rxtx_function(eth_dev);
 	eth_dev->dev_ops = &hns3_eth_dev_ops;
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 7b7d359..b1360cb 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -5,6 +5,7 @@ 
 #ifndef _HNS3_ETHDEV_H_
 #define _HNS3_ETHDEV_H_
 
+#include <pthread.h>
 #include <sys/time.h>
 #include <ethdev_driver.h>
 #include <rte_byteorder.h>
@@ -613,6 +614,9 @@  struct hns3_hw {
 	uint8_t udp_cksum_mode;
 
 	struct hns3_port_base_vlan_config port_base_vlan_cfg;
+
+	pthread_mutex_t flows_lock; /* rte_flow ops lock */
+
 	/*
 	 * PMD setup and configuration is not thread safe. Since it is not
 	 * performance sensitive, it is better to guarantee thread-safety
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 58809fb..cf2b74e 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -2910,8 +2910,7 @@  hns3vf_dev_init(struct rte_eth_dev *eth_dev)
 		return -ENOMEM;
 	}
 
-	/* initialize flow filter lists */
-	hns3_filterlist_init(eth_dev);
+	hns3_flow_init(eth_dev);
 
 	hns3_set_rxtx_function(eth_dev);
 	eth_dev->dev_ops = &hns3vf_eth_dev_ops;
diff --git a/drivers/net/hns3/hns3_fdir.c b/drivers/net/hns3/hns3_fdir.c
index 603cc82..87c1aef 100644
--- a/drivers/net/hns3/hns3_fdir.c
+++ b/drivers/net/hns3/hns3_fdir.c
@@ -830,7 +830,6 @@  int hns3_fdir_filter_init(struct hns3_adapter *hns)
 
 	fdir_hash_params.socket_id = rte_socket_id();
 	TAILQ_INIT(&fdir_info->fdir_list);
-	rte_spinlock_init(&fdir_info->flows_lock);
 	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, "%s", hns->hw.data->name);
 	fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
 	if (fdir_info->hash_handle == NULL) {
@@ -856,7 +855,6 @@  void hns3_fdir_filter_uninit(struct hns3_adapter *hns)
 	struct hns3_fdir_info *fdir_info = &pf->fdir;
 	struct hns3_fdir_rule_ele *fdir_filter;
 
-	rte_spinlock_lock(&fdir_info->flows_lock);
 	if (fdir_info->hash_map) {
 		rte_free(fdir_info->hash_map);
 		fdir_info->hash_map = NULL;
@@ -865,7 +863,6 @@  void hns3_fdir_filter_uninit(struct hns3_adapter *hns)
 		rte_hash_free(fdir_info->hash_handle);
 		fdir_info->hash_handle = NULL;
 	}
-	rte_spinlock_unlock(&fdir_info->flows_lock);
 
 	fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list);
 	while (fdir_filter) {
@@ -891,10 +888,8 @@  static int hns3_fdir_filter_lookup(struct hns3_fdir_info *fdir_info,
 	hash_sig_t sig;
 	int ret;
 
-	rte_spinlock_lock(&fdir_info->flows_lock);
 	sig = rte_hash_crc(key, sizeof(*key), 0);
 	ret = rte_hash_lookup_with_hash(fdir_info->hash_handle, key, sig);
-	rte_spinlock_unlock(&fdir_info->flows_lock);
 
 	return ret;
 }
@@ -908,11 +903,9 @@  static int hns3_insert_fdir_filter(struct hns3_hw *hw,
 	int ret;
 
 	key = &fdir_filter->fdir_conf.key_conf;
-	rte_spinlock_lock(&fdir_info->flows_lock);
 	sig = rte_hash_crc(key, sizeof(*key), 0);
 	ret = rte_hash_add_key_with_hash(fdir_info->hash_handle, key, sig);
 	if (ret < 0) {
-		rte_spinlock_unlock(&fdir_info->flows_lock);
 		hns3_err(hw, "Hash table full? err:%d(%s)!", ret,
 			 strerror(-ret));
 		return ret;
@@ -920,7 +913,6 @@  static int hns3_insert_fdir_filter(struct hns3_hw *hw,
 
 	fdir_info->hash_map[ret] = fdir_filter;
 	TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
-	rte_spinlock_unlock(&fdir_info->flows_lock);
 
 	return ret;
 }
@@ -933,11 +925,9 @@  static int hns3_remove_fdir_filter(struct hns3_hw *hw,
 	hash_sig_t sig;
 	int ret;
 
-	rte_spinlock_lock(&fdir_info->flows_lock);
 	sig = rte_hash_crc(key, sizeof(*key), 0);
 	ret = rte_hash_del_key_with_hash(fdir_info->hash_handle, key, sig);
 	if (ret < 0) {
-		rte_spinlock_unlock(&fdir_info->flows_lock);
 		hns3_err(hw, "Delete hash key fail ret=%d", ret);
 		return ret;
 	}
@@ -945,7 +935,6 @@  static int hns3_remove_fdir_filter(struct hns3_hw *hw,
 	fdir_filter = fdir_info->hash_map[ret];
 	fdir_info->hash_map[ret] = NULL;
 	TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
-	rte_spinlock_unlock(&fdir_info->flows_lock);
 
 	rte_free(fdir_filter);
 
@@ -1000,11 +989,9 @@  int hns3_fdir_filter_program(struct hns3_adapter *hns,
 	rule->location = ret;
 	node->fdir_conf.location = ret;
 
-	rte_spinlock_lock(&fdir_info->flows_lock);
 	ret = hns3_config_action(hw, rule);
 	if (!ret)
 		ret = hns3_config_key(hns, rule);
-	rte_spinlock_unlock(&fdir_info->flows_lock);
 	if (ret) {
 		hns3_err(hw, "Failed to config fdir: %u src_ip:%x dst_ip:%x "
 			 "src_port:%u dst_port:%u ret = %d",
@@ -1029,9 +1016,7 @@  int hns3_clear_all_fdir_filter(struct hns3_adapter *hns)
 	int ret = 0;
 
 	/* flush flow director */
-	rte_spinlock_lock(&fdir_info->flows_lock);
 	rte_hash_reset(fdir_info->hash_handle);
-	rte_spinlock_unlock(&fdir_info->flows_lock);
 
 	fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list);
 	while (fdir_filter) {
@@ -1059,6 +1044,17 @@  int hns3_restore_all_fdir_filter(struct hns3_adapter *hns)
 	bool err = false;
 	int ret;
 
+	/*
+	 * This API is called in the reset recovery process, the parent function
+	 * must hold hw->lock.
+	 * There maybe deadlock if acquire hw->flows_lock directly because rte
+	 * flow driver ops first acquire hw->flows_lock and then may acquire
+	 * hw->lock.
+	 * So here first release the hw->lock and then acquire the
+	 * hw->flows_lock to avoid deadlock.
+	 */
+	rte_spinlock_unlock(&hw->lock);
+	pthread_mutex_lock(&hw->flows_lock);
 	TAILQ_FOREACH(fdir_filter, &fdir_info->fdir_list, entries) {
 		ret = hns3_config_action(hw, &fdir_filter->fdir_conf);
 		if (!ret)
@@ -1069,6 +1065,8 @@  int hns3_restore_all_fdir_filter(struct hns3_adapter *hns)
 				break;
 		}
 	}
+	pthread_mutex_unlock(&hw->flows_lock);
+	rte_spinlock_lock(&hw->lock);
 
 	if (err) {
 		hns3_err(hw, "Fail to restore FDIR filter, ret = %d", ret);
diff --git a/drivers/net/hns3/hns3_fdir.h b/drivers/net/hns3/hns3_fdir.h
index 266d37c..fc62daa 100644
--- a/drivers/net/hns3/hns3_fdir.h
+++ b/drivers/net/hns3/hns3_fdir.h
@@ -199,7 +199,6 @@  struct hns3_process_private {
  *  A structure used to define fields of a FDIR related info.
  */
 struct hns3_fdir_info {
-	rte_spinlock_t flows_lock;
 	struct hns3_fdir_rule_list fdir_list;
 	struct hns3_fdir_rule_ele **hash_map;
 	struct rte_hash *hash_handle;
@@ -220,7 +219,7 @@  int hns3_fdir_filter_program(struct hns3_adapter *hns,
 			     struct hns3_fdir_rule *rule, bool del);
 int hns3_clear_all_fdir_filter(struct hns3_adapter *hns);
 int hns3_get_count(struct hns3_hw *hw, uint32_t id, uint64_t *value);
-void hns3_filterlist_init(struct rte_eth_dev *dev);
+void hns3_flow_init(struct rte_eth_dev *dev);
 int hns3_restore_all_fdir_filter(struct hns3_adapter *hns);
 
 #endif /* _HNS3_FDIR_H_ */
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index 098b191..4511a49 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -1214,9 +1214,18 @@  hns3_parse_fdir_filter(struct rte_eth_dev *dev,
 }
 
 void
-hns3_filterlist_init(struct rte_eth_dev *dev)
+hns3_flow_init(struct rte_eth_dev *dev)
 {
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct hns3_process_private *process_list = dev->process_private;
+	pthread_mutexattr_t attr;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		pthread_mutexattr_init(&attr);
+		pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
+		pthread_mutex_init(&hw->flows_lock, &attr);
+		dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
+	}
 
 	TAILQ_INIT(&process_list->fdir_list);
 	TAILQ_INIT(&process_list->filter_rss_list);
@@ -2002,12 +2011,87 @@  hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
 	return 0;
 }
 
+static int
+hns3_flow_validate_wrap(struct rte_eth_dev *dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int ret;
+
+	pthread_mutex_lock(&hw->flows_lock);
+	ret = hns3_flow_validate(dev, attr, pattern, actions, error);
+	pthread_mutex_unlock(&hw->flows_lock);
+
+	return ret;
+}
+
+static struct rte_flow *
+hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_flow *flow;
+
+	pthread_mutex_lock(&hw->flows_lock);
+	flow = hns3_flow_create(dev, attr, pattern, actions, error);
+	pthread_mutex_unlock(&hw->flows_lock);
+
+	return flow;
+}
+
+static int
+hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
+		       struct rte_flow_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int ret;
+
+	pthread_mutex_lock(&hw->flows_lock);
+	ret = hns3_flow_destroy(dev, flow, error);
+	pthread_mutex_unlock(&hw->flows_lock);
+
+	return ret;
+}
+
+static int
+hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int ret;
+
+	pthread_mutex_lock(&hw->flows_lock);
+	ret = hns3_flow_flush(dev, error);
+	pthread_mutex_unlock(&hw->flows_lock);
+
+	return ret;
+}
+
+static int
+hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
+		     const struct rte_flow_action *actions, void *data,
+		     struct rte_flow_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int ret;
+
+	pthread_mutex_lock(&hw->flows_lock);
+	ret = hns3_flow_query(dev, flow, actions, data, error);
+	pthread_mutex_unlock(&hw->flows_lock);
+
+	return ret;
+}
+
 static const struct rte_flow_ops hns3_flow_ops = {
-	.validate = hns3_flow_validate,
-	.create = hns3_flow_create,
-	.destroy = hns3_flow_destroy,
-	.flush = hns3_flow_flush,
-	.query = hns3_flow_query,
+	.validate = hns3_flow_validate_wrap,
+	.create = hns3_flow_create_wrap,
+	.destroy = hns3_flow_destroy_wrap,
+	.flush = hns3_flow_flush_wrap,
+	.query = hns3_flow_query_wrap,
 	.isolate = NULL,
 };