@@ -4304,9 +4304,8 @@ txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
sizeof(struct txgbe_5tuple_filter), 0);
if (filter == NULL)
return -ENOMEM;
- rte_memcpy(&filter->filter_info,
- &filter_5tuple,
- sizeof(struct txgbe_5tuple_filter_info));
+ memcpy(&filter->filter_info, &filter_5tuple,
+ sizeof(struct txgbe_5tuple_filter_info));
filter->queue = ntuple_filter->queue;
ret = txgbe_add_5tuple_filter(dev, filter);
if (ret < 0) {
@@ -5109,9 +5108,7 @@ txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
if (!node)
return -ENOMEM;
- rte_memcpy(&node->key,
- &key,
- sizeof(struct txgbe_l2_tn_key));
+ memcpy(&node->key, &key, sizeof(struct txgbe_l2_tn_key));
node->pool = l2_tunnel->pool;
ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
if (ret < 0) {
@@ -42,7 +42,7 @@
else \
ipv6_addr[i] = 0; \
} \
- rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
+ memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
} while (0)
/**
@@ -858,8 +858,8 @@ txgbe_fdir_filter_program(struct rte_eth_dev *dev,
sizeof(struct txgbe_fdir_filter), 0);
if (!node)
return -ENOMEM;
- rte_memcpy(&node->input, &rule->input,
- sizeof(struct txgbe_atr_input));
+ memcpy(&node->input, &rule->input,
+ sizeof(struct txgbe_atr_input));
node->fdirflags = rule->fdirflags;
node->fdirhash = fdirhash;
node->queue = queue;
@@ -1834,10 +1834,10 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
if (item->spec) {
rule->b_spec = TRUE;
ipv6_spec = item->spec;
- rte_memcpy(rule->input.src_ip,
- ipv6_spec->hdr.src_addr, 16);
- rte_memcpy(rule->input.dst_ip,
- ipv6_spec->hdr.dst_addr, 16);
+ memcpy(rule->input.src_ip, ipv6_spec->hdr.src_addr,
+ 16);
+ memcpy(rule->input.dst_ip, ipv6_spec->hdr.dst_addr,
+ 16);
}
/**
@@ -2756,9 +2756,9 @@ txgbe_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&ntuple_filter_ptr->filter_info,
- &ntuple_filter,
- sizeof(struct rte_eth_ntuple_filter));
+ memcpy(&ntuple_filter_ptr->filter_info,
+ &ntuple_filter,
+ sizeof(struct rte_eth_ntuple_filter));
TAILQ_INSERT_TAIL(&filter_ntuple_list,
ntuple_filter_ptr, entries);
flow->rule = ntuple_filter_ptr;
@@ -2782,9 +2782,9 @@ txgbe_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(ðertype_filter_ptr->filter_info,
- ðertype_filter,
- sizeof(struct rte_eth_ethertype_filter));
+ memcpy(ðertype_filter_ptr->filter_info,
+ ðertype_filter,
+ sizeof(struct rte_eth_ethertype_filter));
TAILQ_INSERT_TAIL(&filter_ethertype_list,
ethertype_filter_ptr, entries);
flow->rule = ethertype_filter_ptr;
@@ -2806,9 +2806,8 @@ txgbe_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&syn_filter_ptr->filter_info,
- &syn_filter,
- sizeof(struct rte_eth_syn_filter));
+ memcpy(&syn_filter_ptr->filter_info, &syn_filter,
+ sizeof(struct rte_eth_syn_filter));
TAILQ_INSERT_TAIL(&filter_syn_list,
syn_filter_ptr,
entries);
@@ -2827,9 +2826,8 @@ txgbe_flow_create(struct rte_eth_dev *dev,
if (fdir_rule.b_mask) {
if (!fdir_info->mask_added) {
/* It's the first time the mask is set. */
- rte_memcpy(&fdir_info->mask,
- &fdir_rule.mask,
- sizeof(struct txgbe_hw_fdir_mask));
+ memcpy(&fdir_info->mask, &fdir_rule.mask,
+ sizeof(struct txgbe_hw_fdir_mask));
fdir_info->flex_bytes_offset =
fdir_rule.flex_bytes_offset;
@@ -2873,9 +2871,9 @@ txgbe_flow_create(struct rte_eth_dev *dev,
"failed to allocate memory");
goto out;
}
- rte_memcpy(&fdir_rule_ptr->filter_info,
- &fdir_rule,
- sizeof(struct txgbe_fdir_rule));
+ memcpy(&fdir_rule_ptr->filter_info,
+ &fdir_rule,
+ sizeof(struct txgbe_fdir_rule));
TAILQ_INSERT_TAIL(&filter_fdir_list,
fdir_rule_ptr, entries);
flow->rule = fdir_rule_ptr;
@@ -2910,9 +2908,8 @@ txgbe_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&l2_tn_filter_ptr->filter_info,
- &l2_tn_filter,
- sizeof(struct txgbe_l2_tunnel_conf));
+ memcpy(&l2_tn_filter_ptr->filter_info, &l2_tn_filter,
+ sizeof(struct txgbe_l2_tunnel_conf));
TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
l2_tn_filter_ptr, entries);
flow->rule = l2_tn_filter_ptr;
@@ -3038,9 +3035,8 @@ txgbe_flow_destroy(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_NTUPLE:
ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
pmd_flow->rule;
- rte_memcpy(&ntuple_filter,
- &ntuple_filter_ptr->filter_info,
- sizeof(struct rte_eth_ntuple_filter));
+ memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ntuple_filter));
ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
if (!ret) {
TAILQ_REMOVE(&filter_ntuple_list,
@@ -3051,9 +3047,8 @@ txgbe_flow_destroy(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_ETHERTYPE:
ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
pmd_flow->rule;
- rte_memcpy(ðertype_filter,
- ðertype_filter_ptr->filter_info,
- sizeof(struct rte_eth_ethertype_filter));
+ memcpy(ðertype_filter, ðertype_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ethertype_filter));
ret = txgbe_add_del_ethertype_filter(dev,
ðertype_filter, FALSE);
if (!ret) {
@@ -3065,9 +3060,8 @@ txgbe_flow_destroy(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_SYN:
syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
pmd_flow->rule;
- rte_memcpy(&syn_filter,
- &syn_filter_ptr->filter_info,
- sizeof(struct rte_eth_syn_filter));
+ memcpy(&syn_filter, &syn_filter_ptr->filter_info,
+ sizeof(struct rte_eth_syn_filter));
ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
if (!ret) {
TAILQ_REMOVE(&filter_syn_list,
@@ -3077,9 +3071,8 @@ txgbe_flow_destroy(struct rte_eth_dev *dev,
break;
case RTE_ETH_FILTER_FDIR:
fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
- rte_memcpy(&fdir_rule,
- &fdir_rule_ptr->filter_info,
- sizeof(struct txgbe_fdir_rule));
+ memcpy(&fdir_rule, &fdir_rule_ptr->filter_info,
+ sizeof(struct txgbe_fdir_rule));
ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
if (!ret) {
TAILQ_REMOVE(&filter_fdir_list,
@@ -3092,8 +3085,8 @@ txgbe_flow_destroy(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_L2_TUNNEL:
l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
pmd_flow->rule;
- rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
- sizeof(struct txgbe_l2_tunnel_conf));
+ memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
+ sizeof(struct txgbe_l2_tunnel_conf));
ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
if (!ret) {
TAILQ_REMOVE(&filter_l2_tunnel_list,
@@ -658,10 +658,10 @@ txgbe_crypto_add_ingress_sa_from_flow(const void *sess,
const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
ic_session->src_ip.type = IPv6;
ic_session->dst_ip.type = IPv6;
- rte_memcpy(ic_session->src_ip.ipv6,
- ipv6->hdr.src_addr, 16);
- rte_memcpy(ic_session->dst_ip.ipv6,
- ipv6->hdr.dst_addr, 16);
+ memcpy(ic_session->src_ip.ipv6, ipv6->hdr.src_addr,
+ 16);
+ memcpy(ic_session->dst_ip.ipv6, ipv6->hdr.dst_addr,
+ 16);
} else {
const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
ic_session->src_ip.type = IPv4;
@@ -435,7 +435,7 @@ txgbe_vf_reset(struct rte_eth_dev *eth_dev, uint16_t vf, uint32_t *msgbuf)
/* reply to reset with ack and vf mac address */
msgbuf[0] = TXGBE_VF_RESET | TXGBE_VT_MSGTYPE_ACK;
- rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
+ memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
@@ -457,7 +457,7 @@ txgbe_vf_set_mac_addr(struct rte_eth_dev *eth_dev,
struct rte_ether_addr *ea = (struct rte_ether_addr *)new_mac;
if (rte_is_valid_assigned_ether_addr(ea)) {
- rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
+ memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
return hw->mac.set_rar(hw, rar_entry, new_mac, vf, true);
}
return -1;
@@ -280,8 +280,8 @@ txgbe_shaper_profile_add(struct rte_eth_dev *dev,
if (!shaper_profile)
return -ENOMEM;
shaper_profile->shaper_profile_id = shaper_profile_id;
- rte_memcpy(&shaper_profile->profile, profile,
- sizeof(struct rte_tm_shaper_params));
+ memcpy(&shaper_profile->profile, profile,
+ sizeof(struct rte_tm_shaper_params));
TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
shaper_profile, node);
@@ -625,8 +625,8 @@ txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->no = 0;
tm_node->parent = NULL;
tm_node->shaper_profile = shaper_profile;
- rte_memcpy(&tm_node->params, params,
- sizeof(struct rte_tm_node_params));
+ memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
tm_conf->root = tm_node;
/* increase the reference counter of the shaper profile */
@@ -706,8 +706,7 @@ txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->reference_count = 0;
tm_node->parent = parent_node;
tm_node->shaper_profile = shaper_profile;
- rte_memcpy(&tm_node->params, params,
- sizeof(struct rte_tm_node_params));
+ memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) {
tm_node->no = parent_node->reference_count;
TAILQ_INSERT_TAIL(&tm_conf->tc_list,