[4/6] net/hns3: separate tx prepare from getting Tx function
Checks
Commit Message
From: Huisong Li <lihuisong@huawei.com>
Separate getting tx prepare from hns3_get_tx_function by extracting
an independent function.
Fixes: d7ec2c076579 ("net/hns3: select Tx prepare based on Tx offload")
Cc: stable@dpdk.org
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Dongdong Liu <liudongdong3@huawei.com>
---
drivers/net/hns3/hns3_rxtx.c | 32 ++++++++++++++------------------
drivers/net/hns3/hns3_rxtx.h | 3 +--
2 files changed, 15 insertions(+), 20 deletions(-)
@@ -4324,26 +4324,30 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
+
if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
return true;
return false;
}
+static eth_tx_prep_t
+hns3_get_tx_prepare(struct rte_eth_dev *dev)
+{
+ return hns3_get_tx_prep_needed(dev) ? hns3_prep_pkts : NULL;
+}
+
eth_tx_burst_t
-hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
+hns3_get_tx_function(struct rte_eth_dev *dev)
{
struct hns3_adapter *hns = dev->data->dev_private;
bool vec_allowed, sve_allowed, simple_allowed;
- bool vec_support, tx_prepare_needed;
+ bool vec_support;
vec_support = hns3_tx_check_vec_support(dev) == 0;
vec_allowed = vec_support && hns3_get_default_vec_support();
sve_allowed = vec_support && hns3_get_sve_support();
simple_allowed = hns3_tx_check_simple_support(dev);
- tx_prepare_needed = hns3_get_tx_prep_needed(dev);
-
- *prep = NULL;
if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
return hns3_xmit_pkts_vec;
@@ -4351,19 +4355,14 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
return hns3_xmit_pkts_vec_sve;
if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
return hns3_xmit_pkts_simple;
- if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) {
- if (tx_prepare_needed)
- *prep = hns3_prep_pkts;
+ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
return hns3_xmit_pkts;
- }
if (vec_allowed)
return hns3_xmit_pkts_vec;
if (simple_allowed)
return hns3_xmit_pkts_simple;
- if (tx_prepare_needed)
- *prep = hns3_prep_pkts;
return hns3_xmit_pkts;
}
@@ -4403,7 +4402,6 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
{
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct hns3_adapter *hns = eth_dev->data->dev_private;
- eth_tx_prep_t prep = NULL;
if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
__atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
@@ -4411,8 +4409,8 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
eth_dev->tx_pkt_burst = hw->set_link_down ?
rte_eth_pkt_burst_dummy :
- hns3_get_tx_function(eth_dev, &prep);
- eth_dev->tx_pkt_prepare = prep;
+ hns3_get_tx_function(eth_dev);
+ eth_dev->tx_pkt_prepare = hns3_get_tx_prepare(eth_dev);
eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
} else {
eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
@@ -4758,10 +4756,8 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev)
void
hns3_start_tx_datapath(struct rte_eth_dev *dev)
{
- eth_tx_prep_t prep = NULL;
-
- dev->tx_pkt_burst = hns3_get_tx_function(dev, &prep);
- dev->tx_pkt_prepare = prep;
+ dev->tx_pkt_burst = hns3_get_tx_function(dev);
+ dev->tx_pkt_prepare = hns3_get_tx_prepare(dev);
hns3_eth_dev_fp_ops_config(dev);
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
@@ -740,8 +740,7 @@ int hns3_tx_burst_mode_get(struct rte_eth_dev *dev,
const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev);
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
-eth_tx_burst_t hns3_get_tx_function(struct rte_eth_dev *dev,
- eth_tx_prep_t *prep);
+eth_tx_burst_t hns3_get_tx_function(struct rte_eth_dev *dev);
uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id);
void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,