From patchwork Thu Dec 7 01:42:02 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jie Hai X-Patchwork-Id: 134895 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3D3F143692; Thu, 7 Dec 2023 02:46:13 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 243DE42E99; Thu, 7 Dec 2023 02:46:09 +0100 (CET) Received: from szxga08-in.huawei.com (szxga08-in.huawei.com [45.249.212.255]) by mails.dpdk.org (Postfix) with ESMTP id B473F4028B for ; Thu, 7 Dec 2023 02:46:06 +0100 (CET) Received: from kwepemd100004.china.huawei.com (unknown [172.30.72.57]) by szxga08-in.huawei.com (SkyGuard) with ESMTP id 4SlxpY59yPz1Q68T; Thu, 7 Dec 2023 09:42:13 +0800 (CST) Received: from localhost.localdomain (10.67.165.2) by kwepemd100004.china.huawei.com (7.221.188.31) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.2.1258.28; Thu, 7 Dec 2023 09:46:01 +0800 From: Jie Hai To: , Yisen Zhuang , Ferruh Yigit , "Min Hu (Connor)" , Chengchang Tang , Hao Chen , "Wei Hu (Xavier)" CC: , , , Subject: [PATCH v2 1/4] net/hns3: fix VF multiple count on one reset Date: Thu, 7 Dec 2023 09:42:02 +0800 Message-ID: <20231207014205.4002558-2-haijie1@huawei.com> X-Mailer: git-send-email 2.30.0 In-Reply-To: <20231207014205.4002558-1-haijie1@huawei.com> References: <20231111015915.2776769-1-haijie1@huawei.com> <20231207014205.4002558-1-haijie1@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.2] X-ClientProxiedBy: dggems706-chm.china.huawei.com (10.3.19.183) To kwepemd100004.china.huawei.com (7.221.188.31) X-CFilter-Loop: Reflected X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Dengdui Huang There are two ways for the hns3 VF driver to know reset event, namely, interrupt task and periodic detection task. For the latter, the real reset process will delay several microseconds to execute. Both tasks cause the count to increase by 1. However, the periodic detection task also detects a reset event A after interrupt task receive a reset event A. As a result, the reset count will be double. So this patch adds the comparison of reset level for VF in case of the multiple reset count. Fixes: a5475d61fa34 ("net/hns3: support VF") Cc: stable@dpdk.org Signed-off-by: Dengdui Huang Signed-off-by: Jie Hai --- drivers/net/hns3/hns3_ethdev_vf.c | 44 ++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 916cc0fb1b62..089df146f76e 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -563,13 +563,8 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) val = hns3_read_dev(hw, HNS3_VF_RST_ING); hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT); val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); - if (clearval) { - hw->reset.stats.global_cnt++; - hns3_warn(hw, "Global reset detected, clear reset status"); - } else { - hns3_schedule_delayed_reset(hns); - hns3_warn(hw, "Global reset detected, don't clear reset status"); - } + hw->reset.stats.global_cnt++; + hns3_warn(hw, "Global reset detected, clear reset status"); ret = HNS3VF_VECTOR0_EVENT_RST; goto out; @@ -584,9 +579,9 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) val = 0; ret = HNS3VF_VECTOR0_EVENT_OTHER; + out: - if (clearval) - *clearval = val; + *clearval = val; return ret; } @@ -1709,11 +1704,25 @@ is_vf_reset_done(struct hns3_hw *hw) return true; } +static enum hns3_reset_level +hns3vf_detect_reset_event(struct hns3_hw *hw) +{ + enum hns3_reset_level reset = HNS3_NONE_RESET; + uint32_t cmdq_stat_reg; + + cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG); + if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) + reset = HNS3_VF_RESET; + + return reset; +} + bool hns3vf_is_reset_pending(struct hns3_adapter *hns) { + enum hns3_reset_level last_req; struct hns3_hw *hw = &hns->hw; - enum hns3_reset_level reset; + enum hns3_reset_level new_req; /* * According to the protocol of PCIe, FLR to a PF device resets the PF @@ -1736,13 +1745,18 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return false; - hns3vf_check_event_cause(hns, NULL); - reset = hns3vf_get_reset_level(hw, &hw->reset.pending); - if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET && - hw->reset.level < reset) { - hns3_warn(hw, "High level reset %d is pending", reset); + new_req = hns3vf_detect_reset_event(hw); + if (new_req == HNS3_NONE_RESET) + return false; + + last_req = hns3vf_get_reset_level(hw, &hw->reset.pending); + if (last_req == HNS3_NONE_RESET || last_req < new_req) { + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + hns3_schedule_delayed_reset(hns); + hns3_warn(hw, "High level reset detected, delay do reset"); return true; } + return false; } From patchwork Thu Dec 7 01:42:03 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jie Hai X-Patchwork-Id: 134896 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 937EF43692; Thu, 7 Dec 2023 02:46:19 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3076742EA6; Thu, 7 Dec 2023 02:46:10 +0100 (CET) Received: from szxga01-in.huawei.com (szxga01-in.huawei.com [45.249.212.187]) by mails.dpdk.org (Postfix) with ESMTP id E5BD142E05 for ; Thu, 7 Dec 2023 02:46:06 +0100 (CET) Received: from kwepemd100004.china.huawei.com (unknown [172.30.72.55]) by szxga01-in.huawei.com (SkyGuard) with ESMTP id 4SlxtB1wYtzYsjw for ; Thu, 7 Dec 2023 09:45:22 +0800 (CST) Received: from localhost.localdomain (10.67.165.2) by kwepemd100004.china.huawei.com (7.221.188.31) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.2.1258.28; Thu, 7 Dec 2023 09:46:01 +0800 From: Jie Hai To: , Yisen Zhuang , Dengdui Huang CC: , , , Subject: [PATCH v2 2/4] net/hns3: fix disable command with firmware Date: Thu, 7 Dec 2023 09:42:03 +0800 Message-ID: <20231207014205.4002558-3-haijie1@huawei.com> X-Mailer: git-send-email 2.30.0 In-Reply-To: <20231207014205.4002558-1-haijie1@huawei.com> References: <20231111015915.2776769-1-haijie1@huawei.com> <20231207014205.4002558-1-haijie1@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.2] X-ClientProxiedBy: dggems706-chm.china.huawei.com (10.3.19.183) To kwepemd100004.china.huawei.com (7.221.188.31) X-CFilter-Loop: Reflected X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Dengdui Huang Disable command only when need to delay handle reset. This patch fixes it. Fixes: 5be38fc6c0fc ("net/hns3: fix multiple reset detected log") Cc: stable@dpdk.org Signed-off-by: Dengdui Huang Signed-off-by: Jie Hai --- drivers/net/hns3/hns3_ethdev.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index ae81368f68ae..76fc401bd62c 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -5552,18 +5552,16 @@ hns3_detect_reset_event(struct hns3_hw *hw) last_req = hns3_get_reset_level(hns, &hw->reset.pending); vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); - if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) { - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) new_req = HNS3_IMP_RESET; - } else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) { - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) new_req = HNS3_GLOBAL_RESET; - } if (new_req == HNS3_NONE_RESET) return HNS3_NONE_RESET; if (last_req == HNS3_NONE_RESET || last_req < new_req) { + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); hns3_schedule_delayed_reset(hns); hns3_warn(hw, "High level reset detected, delay do reset"); } From patchwork Thu Dec 7 01:42:04 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jie Hai X-Patchwork-Id: 134897 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id C700843692; Thu, 7 Dec 2023 02:46:25 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 42F3242EAB; Thu, 7 Dec 2023 02:46:11 +0100 (CET) Received: from szxga01-in.huawei.com (szxga01-in.huawei.com [45.249.212.187]) by mails.dpdk.org (Postfix) with ESMTP id F1BD342E94 for ; Thu, 7 Dec 2023 02:46:06 +0100 (CET) Received: from kwepemd100004.china.huawei.com (unknown [172.30.72.56]) by szxga01-in.huawei.com (SkyGuard) with ESMTP id 4SlxpZ0WJkzrVVM for ; Thu, 7 Dec 2023 09:42:14 +0800 (CST) Received: from localhost.localdomain (10.67.165.2) by kwepemd100004.china.huawei.com (7.221.188.31) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.2.1258.28; Thu, 7 Dec 2023 09:46:02 +0800 From: Jie Hai To: , Yisen Zhuang , Dengdui Huang CC: , , , Subject: [PATCH v2 3/4] net/hns3: fix incorrect reset level comparison Date: Thu, 7 Dec 2023 09:42:04 +0800 Message-ID: <20231207014205.4002558-4-haijie1@huawei.com> X-Mailer: git-send-email 2.30.0 In-Reply-To: <20231207014205.4002558-1-haijie1@huawei.com> References: <20231111015915.2776769-1-haijie1@huawei.com> <20231207014205.4002558-1-haijie1@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.2] X-ClientProxiedBy: dggems706-chm.china.huawei.com (10.3.19.183) To kwepemd100004.china.huawei.com (7.221.188.31) X-CFilter-Loop: Reflected X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Dengdui Huang Currently, there are two problems in hns3vf_is_reset_pending(): 1. The new detect reset level is not HNS3_NONE_RESET, but the last reset level is HNS3_NONE_RESET, this function returns false. 2. Comparison between last_req and new_req is opposite. In addition, the reset level comparison in hns3_detect_reset_event() is similar to the hns3vf_is_reset_pending(). So this patch fixes above the problems and merges the logic of reset level comparison. Fixes: 5be38fc6c0fc ("net/hns3: fix multiple reset detected log") Cc: stable@dpdk.org Signed-off-by: Dengdui Huang Signed-off-by: Jie Hai --- drivers/net/hns3/hns3_ethdev.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 76fc401bd62c..b8f7e408d1e0 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -5545,27 +5545,15 @@ is_pf_reset_done(struct hns3_hw *hw) static enum hns3_reset_level hns3_detect_reset_event(struct hns3_hw *hw) { - struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); enum hns3_reset_level new_req = HNS3_NONE_RESET; - enum hns3_reset_level last_req; uint32_t vector0_intr_state; - last_req = hns3_get_reset_level(hns, &hw->reset.pending); vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) new_req = HNS3_IMP_RESET; else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) new_req = HNS3_GLOBAL_RESET; - if (new_req == HNS3_NONE_RESET) - return HNS3_NONE_RESET; - - if (last_req == HNS3_NONE_RESET || last_req < new_req) { - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); - hns3_schedule_delayed_reset(hns); - hns3_warn(hw, "High level reset detected, delay do reset"); - } - return new_req; } @@ -5584,10 +5572,14 @@ hns3_is_reset_pending(struct hns3_adapter *hns) return false; new_req = hns3_detect_reset_event(hw); + if (new_req == HNS3_NONE_RESET) + return false; + last_req = hns3_get_reset_level(hns, &hw->reset.pending); - if (last_req != HNS3_NONE_RESET && new_req != HNS3_NONE_RESET && - new_req < last_req) { - hns3_warn(hw, "High level reset %d is pending", last_req); + if (last_req == HNS3_NONE_RESET || last_req < new_req) { + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + hns3_schedule_delayed_reset(hns); + hns3_warn(hw, "High level reset detected, delay do reset"); return true; } last_req = hns3_get_reset_level(hns, &hw->reset.request); From patchwork Thu Dec 7 01:42:05 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jie Hai X-Patchwork-Id: 134898 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 51A3643692; Thu, 7 Dec 2023 02:46:34 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id BD36C42EBB; Thu, 7 Dec 2023 02:46:12 +0100 (CET) Received: from szxga08-in.huawei.com (szxga08-in.huawei.com [45.249.212.255]) by mails.dpdk.org (Postfix) with ESMTP id 1279342E96 for ; Thu, 7 Dec 2023 02:46:07 +0100 (CET) Received: from kwepemd100004.china.huawei.com (unknown [172.30.72.55]) by szxga08-in.huawei.com (SkyGuard) with ESMTP id 4Slxpc709Tz1Q6MD for ; Thu, 7 Dec 2023 09:42:16 +0800 (CST) Received: from localhost.localdomain (10.67.165.2) by kwepemd100004.china.huawei.com (7.221.188.31) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.2.1258.28; Thu, 7 Dec 2023 09:46:03 +0800 From: Jie Hai To: , Yisen Zhuang CC: , , , Subject: [PATCH v2 4/4] net/hns3: use stdatomic API Date: Thu, 7 Dec 2023 09:42:05 +0800 Message-ID: <20231207014205.4002558-5-haijie1@huawei.com> X-Mailer: git-send-email 2.30.0 In-Reply-To: <20231207014205.4002558-1-haijie1@huawei.com> References: <20231111015915.2776769-1-haijie1@huawei.com> <20231207014205.4002558-1-haijie1@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.2] X-ClientProxiedBy: dggems706-chm.china.huawei.com (10.3.19.183) To kwepemd100004.china.huawei.com (7.221.188.31) X-CFilter-Loop: Reflected X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding rte_atomic_xxx optional stdatomic API. Signed-off-by: Jie Hai --- drivers/net/hns3/hns3_cmd.c | 24 ++++++----- drivers/net/hns3/hns3_dcb.c | 3 +- drivers/net/hns3/hns3_ethdev.c | 50 ++++++++++++++--------- drivers/net/hns3/hns3_ethdev.h | 36 +++++++++-------- drivers/net/hns3/hns3_ethdev_vf.c | 66 +++++++++++++++++-------------- drivers/net/hns3/hns3_intr.c | 47 ++++++++++++---------- drivers/net/hns3/hns3_intr.h | 4 +- drivers/net/hns3/hns3_mbx.c | 6 ++- drivers/net/hns3/hns3_mp.c | 9 +++-- drivers/net/hns3/hns3_rxtx.c | 15 ++++--- drivers/net/hns3/hns3_tm.c | 6 ++- 11 files changed, 156 insertions(+), 110 deletions(-) diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c index 2c1664485bef..49cb2cc3dacf 100644 --- a/drivers/net/hns3/hns3_cmd.c +++ b/drivers/net/hns3/hns3_cmd.c @@ -44,12 +44,13 @@ static int hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring, uint64_t size, uint32_t alignment) { - static uint64_t hns3_dma_memzone_id; + static RTE_ATOMIC(uint64_t) hns3_dma_memzone_id; const struct rte_memzone *mz = NULL; char z_name[RTE_MEMZONE_NAMESIZE]; snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, - __atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED)); + rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1, + rte_memory_order_relaxed)); mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M); @@ -198,8 +199,8 @@ hns3_cmd_csq_clean(struct hns3_hw *hw) hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head, csq->next_to_use, csq->next_to_clean); if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - __atomic_store_n(&hw->reset.disable_cmd, 1, - __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, + rte_memory_order_relaxed); hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw)); } @@ -313,7 +314,8 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw) if (hns3_cmd_csq_done(hw)) return 0; - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, + rte_memory_order_relaxed)) { hns3_err(hw, "Don't wait for reply because of disable_cmd"); return -EBUSY; @@ -360,7 +362,8 @@ hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num) int retval; uint32_t ntc; - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, + rte_memory_order_relaxed)) return -EBUSY; rte_spinlock_lock(&hw->cmq.csq.lock); @@ -745,7 +748,8 @@ hns3_cmd_init(struct hns3_hw *hw) ret = -EBUSY; goto err_cmd_init; } - __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, + rte_memory_order_relaxed); ret = hns3_cmd_query_firmware_version_and_capability(hw); if (ret) { @@ -788,7 +792,8 @@ hns3_cmd_init(struct hns3_hw *hw) return 0; err_cmd_init: - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, + rte_memory_order_relaxed); return ret; } @@ -817,7 +822,8 @@ hns3_cmd_uninit(struct hns3_hw *hw) if (!hns->is_vf) (void)hns3_firmware_compat_config(hw, false); - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, + rte_memory_order_relaxed); /* * A delay is added to ensure that the register cleanup operations diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c index 2831d3dc6205..08c77e04857d 100644 --- a/drivers/net/hns3/hns3_dcb.c +++ b/drivers/net/hns3/hns3_dcb.c @@ -648,7 +648,8 @@ hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q) * and configured directly to the hardware in the RESET_STAGE_RESTORE * stage of the reset process. */ - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { + if (rte_atomic_load_explicit(&hw->reset.resetting, + rte_memory_order_relaxed) == 0) { for (i = 0; i < hw->rss_ind_tbl_size; i++) rss_cfg->rss_indirection_tbl[i] = i % hw->alloc_rss_size; diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index b8f7e408d1e0..457247609811 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -99,7 +99,7 @@ static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { }; static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, - uint64_t *levels); + RTE_ATOMIC(uint64_t *)levels); static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on); @@ -134,7 +134,8 @@ hns3_proc_imp_reset_event(struct hns3_adapter *hns, uint32_t *vec_val) { struct hns3_hw *hw = &hns->hw; - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, + rte_memory_order_relaxed); hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); hw->reset.stats.imp_cnt++; @@ -148,7 +149,8 @@ hns3_proc_global_reset_event(struct hns3_adapter *hns, uint32_t *vec_val) { struct hns3_hw *hw = &hns->hw; - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, + rte_memory_order_relaxed); hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); hw->reset.stats.global_cnt++; @@ -1151,7 +1153,8 @@ hns3_init_vlan_config(struct hns3_adapter *hns) * ensure that the hardware configuration remains unchanged before and * after reset. */ - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { + if (rte_atomic_load_explicit(&hw->reset.resetting, + rte_memory_order_relaxed) == 0) { hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; } @@ -1175,7 +1178,8 @@ hns3_init_vlan_config(struct hns3_adapter *hns) * we will restore configurations to hardware in hns3_restore_vlan_table * and hns3_restore_vlan_conf later. */ - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { + if (rte_atomic_load_explicit(&hw->reset.resetting, + rte_memory_order_relaxed) == 0) { ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); if (ret) { hns3_err(hw, "pvid set fail in pf, ret =%d", ret); @@ -5059,7 +5063,8 @@ hns3_dev_start(struct rte_eth_dev *dev) int ret; PMD_INIT_FUNC_TRACE(); - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) + if (rte_atomic_load_explicit(&hw->reset.resetting, + rte_memory_order_relaxed)) return -EBUSY; rte_spinlock_lock(&hw->lock); @@ -5150,7 +5155,8 @@ hns3_do_stop(struct hns3_adapter *hns) * during reset and is required to be released after the reset is * completed. */ - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) + if (rte_atomic_load_explicit(&hw->reset.resetting, + rte_memory_order_relaxed) == 0) hns3_dev_release_mbufs(hns); ret = hns3_cfg_mac_mode(hw, false); @@ -5158,7 +5164,8 @@ hns3_do_stop(struct hns3_adapter *hns) return ret; hw->mac.link_status = RTE_ETH_LINK_DOWN; - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, + rte_memory_order_relaxed) == 0) { hns3_configure_all_mac_addr(hns, true); ret = hns3_reset_all_tqps(hns); if (ret) { @@ -5184,7 +5191,8 @@ hns3_dev_stop(struct rte_eth_dev *dev) hns3_stop_rxtx_datapath(dev); rte_spinlock_lock(&hw->lock); - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { + if (rte_atomic_load_explicit(&hw->reset.resetting, + rte_memory_order_relaxed) == 0) { hns3_tm_dev_stop_proc(hw); hns3_config_mac_tnl_int(hw, false); hns3_stop_tqps(hw); @@ -5577,7 +5585,8 @@ hns3_is_reset_pending(struct hns3_adapter *hns) last_req = hns3_get_reset_level(hns, &hw->reset.pending); if (last_req == HNS3_NONE_RESET || last_req < new_req) { - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, + rte_memory_order_relaxed); hns3_schedule_delayed_reset(hns); hns3_warn(hw, "High level reset detected, delay do reset"); return true; @@ -5737,7 +5746,8 @@ hns3_prepare_reset(struct hns3_adapter *hns) * any mailbox handling or command to firmware is only valid * after hns3_cmd_init is called. */ - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, + rte_memory_order_relaxed); hw->reset.stats.request_cnt++; break; case HNS3_IMP_RESET: @@ -5792,7 +5802,8 @@ hns3_stop_service(struct hns3_adapter *hns) * from table space. Hence, for function reset software intervention is * required to delete the entries */ - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, + rte_memory_order_relaxed) == 0) hns3_configure_all_mc_mac_addr(hns, true); rte_spinlock_unlock(&hw->lock); @@ -5913,10 +5924,10 @@ hns3_reset_service(void *param) * The interrupt may have been lost. It is necessary to handle * the interrupt to recover from the error. */ - if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == - SCHEDULE_DEFERRED) { - __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, - __ATOMIC_RELAXED); + if (rte_atomic_load_explicit(&hw->reset.schedule, + rte_memory_order_relaxed) == SCHEDULE_DEFERRED) { + rte_atomic_store_explicit(&hw->reset.schedule, + SCHEDULE_REQUESTED, rte_memory_order_relaxed); hns3_err(hw, "Handling interrupts in delayed tasks"); hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); reset_level = hns3_get_reset_level(hns, &hw->reset.pending); @@ -5925,7 +5936,8 @@ hns3_reset_service(void *param) hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); } } - __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, + rte_memory_order_relaxed); /* * Check if there is any ongoing reset in the hardware. This status can @@ -6575,8 +6587,8 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) hw->adapter_state = HNS3_NIC_INITIALIZED; - if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == - SCHEDULE_PENDING) { + if (rte_atomic_load_explicit(&hw->reset.schedule, + rte_memory_order_relaxed) == SCHEDULE_PENDING) { hns3_err(hw, "Reschedule reset service after dev_init"); hns3_schedule_reset(hns); } else { diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h index 12d8299def39..1062cb846368 100644 --- a/drivers/net/hns3/hns3_ethdev.h +++ b/drivers/net/hns3/hns3_ethdev.h @@ -399,17 +399,17 @@ enum hns3_schedule { struct hns3_reset_data { enum hns3_reset_stage stage; - uint16_t schedule; + RTE_ATOMIC(uint16_t) schedule; /* Reset flag, covering the entire reset process */ - uint16_t resetting; + RTE_ATOMIC(uint16_t) resetting; /* Used to disable sending cmds during reset */ - uint16_t disable_cmd; + RTE_ATOMIC(uint16_t) disable_cmd; /* The reset level being processed */ enum hns3_reset_level level; /* Reset level set, each bit represents a reset level */ - uint64_t pending; + RTE_ATOMIC(uint64_t) pending; /* Request reset level set, from interrupt or mailbox */ - uint64_t request; + RTE_ATOMIC(uint64_t) request; int attempts; /* Reset failure retry */ int retries; /* Timeout failure retry in reset_post */ /* @@ -497,7 +497,7 @@ struct hns3_hw { * by dev_set_link_up() or dev_start(). */ bool set_link_down; - unsigned int secondary_cnt; /* Number of secondary processes init'd. */ + RTE_ATOMIC(unsigned int) secondary_cnt; /* Number of secondary processes init'd. */ struct hns3_tqp_stats tqp_stats; /* Include Mac stats | Rx stats | Tx stats */ struct hns3_mac_stats mac_stats; @@ -842,7 +842,7 @@ struct hns3_vf { struct hns3_adapter *adapter; /* Whether PF support push link status change to VF */ - uint16_t pf_push_lsc_cap; + RTE_ATOMIC(uint16_t) pf_push_lsc_cap; /* * If PF support push link status change, VF still need send request to @@ -851,7 +851,7 @@ struct hns3_vf { */ uint16_t req_link_info_cnt; - uint16_t poll_job_started; /* whether poll job is started */ + RTE_ATOMIC(uint16_t) poll_job_started; /* whether poll job is started */ }; struct hns3_adapter { @@ -995,32 +995,36 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg) hns3_read_reg((a)->io_base, (reg)) static inline uint64_t -hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr) +hns3_atomic_test_bit(unsigned int nr, RTE_ATOMIC(uint64_t *)addr) { uint64_t res; - res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0; + res = (rte_atomic_load_explicit(addr, rte_memory_order_relaxed) & + (1UL << nr)) != 0; return res; } static inline void -hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr) +hns3_atomic_set_bit(unsigned int nr, RTE_ATOMIC(uint64_t *)addr) { - __atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED); + rte_atomic_fetch_or_explicit(addr, (1UL << nr), + rte_memory_order_relaxed); } static inline void -hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr) +hns3_atomic_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t *)addr) { - __atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED); + rte_atomic_fetch_and_explicit(addr, ~(1UL << nr), + rte_memory_order_relaxed); } static inline uint64_t -hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr) +hns3_test_and_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t *)addr) { uint64_t mask = (1UL << nr); - return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask; + return rte_atomic_fetch_and_explicit(addr, + ~mask, rte_memory_order_relaxed) & mask; } int diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 089df146f76e..449285dc9805 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -37,7 +37,7 @@ enum hns3vf_evt_cause { }; static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw, - uint64_t *levels); + RTE_ATOMIC(uint64_t *)levels); static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev); @@ -478,7 +478,7 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) * MTU value issued by hns3 VF PMD must be less than or equal to * PF's MTU. */ - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { + if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) { hns3_err(hw, "Failed to set mtu during resetting"); return -EIO; } @@ -559,7 +559,7 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING); hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg); hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed); val = hns3_read_dev(hw, HNS3_VF_RST_ING); hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT); val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); @@ -628,8 +628,8 @@ hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported) struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN) - __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0, - __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); + rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, + &exp, val, rte_memory_order_acquire, rte_memory_order_acquire); } static void @@ -643,8 +643,8 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN; struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); - __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN, - __ATOMIC_RELEASE); + rte_atomic_load_explicit(&vf->pf_push_lsc_cap, + HNS3_PF_PUSH_LSC_CAP_UNKNOWN, rte_memory_order_release); (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, NULL, 0); @@ -659,7 +659,8 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) * mailbox from PF driver to get this capability. */ hns3_dev_handle_mbx_msg(hw); - if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) != + if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, + rte_memory_order_acquire) != HNS3_PF_PUSH_LSC_CAP_UNKNOWN) break; remain_ms--; @@ -670,10 +671,10 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) * state: unknown (means pf not ack), not_supported, supported. * Here config it as 'not_supported' when it's 'unknown' state. */ - __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0, - __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); + rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, + val, rte_memory_order_acquire, rte_memory_order_acquire); - if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) == + if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) == HNS3_PF_PUSH_LSC_CAP_SUPPORTED) { hns3_info(hw, "detect PF support push link status change!"); } else { @@ -907,7 +908,7 @@ hns3vf_request_link_info(struct hns3_hw *hw) bool send_req; int ret; - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) + if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) return; send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED || @@ -943,7 +944,7 @@ hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, * sending request to PF kernel driver, then could update link status by * process PF kernel driver's link status mailbox message. */ - if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED)) + if (!rte_atomic_load_explicit(&vf->poll_job_started, rte_memory_order_relaxed)) return; if (hw->adapter_state != HNS3_NIC_STARTED) @@ -982,7 +983,7 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) struct hns3_hw *hw = &hns->hw; int ret; - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { + if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) { hns3_err(hw, "vf set vlan id failed during resetting, vlan_id =%u", vlan_id); @@ -1042,7 +1043,8 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask) unsigned int tmp_mask; int ret = 0; - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { + if (rte_atomic_load_explicit(&hw->reset.resetting, + rte_memory_order_relaxed)) { hns3_err(hw, "vf set vlan offload failed during resetting, mask = 0x%x", mask); return -EIO; @@ -1232,7 +1234,7 @@ hns3vf_start_poll_job(struct rte_eth_dev *dev) if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED) vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT; - __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&vf->poll_job_started, 1, rte_memory_order_relaxed); hns3vf_service_handler(dev); } @@ -1244,7 +1246,7 @@ hns3vf_stop_poll_job(struct rte_eth_dev *dev) rte_eal_alarm_cancel(hns3vf_service_handler, dev); - __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&vf->poll_job_started, 0, rte_memory_order_relaxed); } static int @@ -1478,10 +1480,10 @@ hns3vf_do_stop(struct hns3_adapter *hns) * during reset and is required to be released after the reset is * completed. */ - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) + if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) hns3_dev_release_mbufs(hns); - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) { hns3_configure_all_mac_addr(hns, true); ret = hns3_reset_all_tqps(hns); if (ret) { @@ -1506,7 +1508,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev) hns3_stop_rxtx_datapath(dev); rte_spinlock_lock(&hw->lock); - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { + if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) { hns3_stop_tqps(hw); hns3vf_do_stop(hns); hns3_unmap_rx_interrupt(dev); @@ -1621,7 +1623,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) int ret; PMD_INIT_FUNC_TRACE(); - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) + if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) return -EBUSY; rte_spinlock_lock(&hw->lock); @@ -1751,7 +1753,8 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) last_req = hns3vf_get_reset_level(hw, &hw->reset.pending); if (last_req == HNS3_NONE_RESET || last_req < new_req) { - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, + rte_memory_order_relaxed); hns3_schedule_delayed_reset(hns); hns3_warn(hw, "High level reset detected, delay do reset"); return true; @@ -1824,7 +1827,8 @@ hns3vf_prepare_reset(struct hns3_adapter *hns) if (ret) return ret; } - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, + rte_memory_order_relaxed); return 0; } @@ -1865,7 +1869,8 @@ hns3vf_stop_service(struct hns3_adapter *hns) * from table space. Hence, for function reset software intervention is * required to delete the entries. */ - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, + rte_memory_order_relaxed) == 0) hns3_configure_all_mc_mac_addr(hns, true); rte_spinlock_unlock(&hw->lock); @@ -2047,10 +2052,10 @@ hns3vf_reset_service(void *param) * The interrupt may have been lost. It is necessary to handle * the interrupt to recover from the error. */ - if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) == SCHEDULE_DEFERRED) { - __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, - __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED, + rte_memory_order_relaxed); hns3_err(hw, "Handling interrupts in delayed tasks"); hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]); reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending); @@ -2059,7 +2064,7 @@ hns3vf_reset_service(void *param) hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); } } - __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed); /* * Hardware reset has been notified, we now have to poll & check if @@ -2254,8 +2259,9 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) hw->adapter_state = HNS3_NIC_INITIALIZED; - if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == - SCHEDULE_PENDING) { + if (rte_atomic_load_explicit(&hw->reset.schedule, + rte_memory_order_relaxed) == + SCHEDULE_PENDING) { hns3_err(hw, "Reschedule reset service after dev_init"); hns3_schedule_reset(hns); } else { diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c index 916bf30dcb56..8701fc861e8b 100644 --- a/drivers/net/hns3/hns3_intr.c +++ b/drivers/net/hns3/hns3_intr.c @@ -2033,7 +2033,7 @@ hns3_get_hw_error_status(struct hns3_cmd_desc *desc, uint8_t desc_offset, static int hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc, - int num, uint64_t *levels, + int num, RTE_ATOMIC(uint64_t *)levels, enum hns3_hw_err_report_type err_type) { const struct hns3_hw_error_desc *err = pf_ras_err_tbl; @@ -2104,7 +2104,7 @@ hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc, } void -hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels) +hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t *)levels) { uint32_t mpf_bd_num, pf_bd_num, bd_num; struct hns3_hw *hw = &hns->hw; @@ -2151,7 +2151,7 @@ hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels) } void -hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels) +hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t *)levels) { uint32_t mpf_bd_num, pf_bd_num, bd_num; struct hns3_hw *hw = &hns->hw; @@ -2402,7 +2402,8 @@ hns3_reset_init(struct hns3_hw *hw) hw->reset.request = 0; hw->reset.pending = 0; hw->reset.resetting = 0; - __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, + rte_memory_order_relaxed); hw->reset.wait_data = rte_zmalloc("wait_data", sizeof(struct hns3_wait_data), 0); if (!hw->reset.wait_data) { @@ -2419,8 +2420,8 @@ hns3_schedule_reset(struct hns3_adapter *hns) /* Reschedule the reset process after successful initialization */ if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) { - __atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING, - __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_PENDING, + rte_memory_order_relaxed); return; } @@ -2428,15 +2429,15 @@ hns3_schedule_reset(struct hns3_adapter *hns) return; /* Schedule restart alarm if it is not scheduled yet */ - if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == - SCHEDULE_REQUESTED) + if (rte_atomic_load_explicit(&hw->reset.schedule, + rte_memory_order_relaxed) == SCHEDULE_REQUESTED) return; - if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == - SCHEDULE_DEFERRED) + if (rte_atomic_load_explicit(&hw->reset.schedule, + rte_memory_order_relaxed) == SCHEDULE_DEFERRED) rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns); - __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, - __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED, + rte_memory_order_relaxed); rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns); } @@ -2453,11 +2454,11 @@ hns3_schedule_delayed_reset(struct hns3_adapter *hns) return; } - if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) != - SCHEDULE_NONE) + if (rte_atomic_load_explicit(&hw->reset.schedule, + rte_memory_order_relaxed) != SCHEDULE_NONE) return; - __atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED, - __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_DEFERRED, + rte_memory_order_relaxed); rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns); } @@ -2537,7 +2538,7 @@ hns3_reset_req_hw_reset(struct hns3_adapter *hns) } static void -hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels) +hns3_clear_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t *)levels) { uint64_t merge_cnt = hw->reset.stats.merge_cnt; uint64_t tmp; @@ -2633,7 +2634,8 @@ hns3_reset_err_handle(struct hns3_adapter *hns) * Regardless of whether the execution is successful or not, the * flow after execution must be continued. */ - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, + rte_memory_order_relaxed)) (void)hns3_cmd_init(hw); reset_fail: hw->reset.attempts = 0; @@ -2661,7 +2663,8 @@ hns3_reset_pre(struct hns3_adapter *hns) int ret; if (hw->reset.stage == RESET_STAGE_NONE) { - __atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hns->hw.reset.resetting, 1, + rte_memory_order_relaxed); hw->reset.stage = RESET_STAGE_DOWN; hns3_report_reset_begin(hw); ret = hw->reset.ops->stop_service(hns); @@ -2750,7 +2753,8 @@ hns3_reset_post(struct hns3_adapter *hns) hns3_notify_reset_ready(hw, false); hns3_clear_reset_level(hw, &hw->reset.pending); hns3_clear_reset_status(hw); - __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, + rte_memory_order_relaxed); hw->reset.attempts = 0; hw->reset.stats.success_cnt++; hw->reset.stage = RESET_STAGE_NONE; @@ -2812,7 +2816,8 @@ hns3_reset_fail_handle(struct hns3_adapter *hns) hw->reset.mbuf_deferred_free = false; } rte_spinlock_unlock(&hw->lock); - __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, + rte_memory_order_relaxed); hw->reset.stage = RESET_STAGE_NONE; hns3_clock_gettime(&tv); timersub(&tv, &hw->reset.start_time, &tv_delta); diff --git a/drivers/net/hns3/hns3_intr.h b/drivers/net/hns3/hns3_intr.h index aca1c0722c67..dce404cc26e3 100644 --- a/drivers/net/hns3/hns3_intr.h +++ b/drivers/net/hns3/hns3_intr.h @@ -171,8 +171,8 @@ struct hns3_hw_error_desc { }; int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool en); -void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels); -void hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels); +void hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t *)levels); +void hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t *)levels); void hns3_config_mac_tnl_int(struct hns3_hw *hw, bool en); void hns3_handle_error(struct hns3_adapter *hns); diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c index f1743c195efa..7af56ff23deb 100644 --- a/drivers/net/hns3/hns3_mbx.c +++ b/drivers/net/hns3/hns3_mbx.c @@ -59,7 +59,8 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS; while (wait_time < mbx_time_limit) { - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, + rte_memory_order_relaxed)) { hns3_err(hw, "Don't wait for mbx response because of " "disable_cmd"); return -EBUSY; @@ -425,7 +426,8 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) } while (!hns3_cmd_crq_empty(hw)) { - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, + rte_memory_order_relaxed)) { rte_spinlock_unlock(&hw->cmq.crq.lock); return; } diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c index 556f1941c6b2..8ee97a7c598a 100644 --- a/drivers/net/hns3/hns3_mp.c +++ b/drivers/net/hns3/hns3_mp.c @@ -151,7 +151,8 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum hns3_mp_req_type type) int i; if (rte_eal_process_type() == RTE_PROC_SECONDARY || - __atomic_load_n(&hw->secondary_cnt, __ATOMIC_RELAXED) == 0) + rte_atomic_load_explicit(&hw->secondary_cnt, + rte_memory_order_relaxed) == 0) return; if (!mp_req_type_is_valid(type)) { @@ -277,7 +278,8 @@ hns3_mp_init(struct rte_eth_dev *dev) ret); return ret; } - __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); + rte_atomic_fetch_add_explicit(&hw->secondary_cnt, 1, + rte_memory_order_relaxed); } else { ret = hns3_mp_init_primary(); if (ret) { @@ -297,7 +299,8 @@ void hns3_mp_uninit(struct rte_eth_dev *dev) struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (rte_eal_process_type() != RTE_PROC_PRIMARY) - __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); + rte_atomic_fetch_sub_explicit(&hw->secondary_cnt, 1, + rte_memory_order_relaxed); process_data.eth_dev_cnt--; if (process_data.eth_dev_cnt == 0) { diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 09b7e90c7000..bb600475e91e 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -4465,7 +4465,8 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) struct hns3_adapter *hns = eth_dev->data->dev_private; if (hns->hw.adapter_state == HNS3_NIC_STARTED && - __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) { + rte_atomic_load_explicit(&hns->hw.reset.resetting, + rte_memory_order_relaxed) == 0) { eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev); eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status; eth_dev->tx_pkt_burst = hw->set_link_down ? @@ -4531,7 +4532,8 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) rte_spinlock_lock(&hw->lock); - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { + if (rte_atomic_load_explicit(&hw->reset.resetting, + rte_memory_order_relaxed)) { hns3_err(hw, "fail to start Rx queue during resetting."); rte_spinlock_unlock(&hw->lock); return -EIO; @@ -4587,7 +4589,8 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) rte_spinlock_lock(&hw->lock); - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { + if (rte_atomic_load_explicit(&hw->reset.resetting, + rte_memory_order_relaxed)) { hns3_err(hw, "fail to stop Rx queue during resetting."); rte_spinlock_unlock(&hw->lock); return -EIO; @@ -4616,7 +4619,8 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) rte_spinlock_lock(&hw->lock); - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { + if (rte_atomic_load_explicit(&hw->reset.resetting, + rte_memory_order_relaxed)) { hns3_err(hw, "fail to start Tx queue during resetting."); rte_spinlock_unlock(&hw->lock); return -EIO; @@ -4649,7 +4653,8 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) rte_spinlock_lock(&hw->lock); - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { + if (rte_atomic_load_explicit(&hw->reset.resetting, + rte_memory_order_relaxed)) { hns3_err(hw, "fail to stop Tx queue during resetting."); rte_spinlock_unlock(&hw->lock); return -EIO; diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c index d9691640140b..656db9b170b2 100644 --- a/drivers/net/hns3/hns3_tm.c +++ b/drivers/net/hns3/hns3_tm.c @@ -1051,7 +1051,8 @@ hns3_tm_hierarchy_commit(struct rte_eth_dev *dev, if (error == NULL) return -EINVAL; - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { + if (rte_atomic_load_explicit(&hw->reset.resetting, + rte_memory_order_relaxed)) { error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; error->message = "device is resetting"; /* don't goto fail_clear, user may try later */ @@ -1141,7 +1142,8 @@ hns3_tm_node_shaper_update(struct rte_eth_dev *dev, if (error == NULL) return -EINVAL; - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { + if (rte_atomic_load_explicit(&hw->reset.resetting, + rte_memory_order_relaxed)) { error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; error->message = "device is resetting"; return -EBUSY;