@@ -6597,9 +6597,8 @@ ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
sizeof(struct ixgbe_5tuple_filter), 0);
if (filter == NULL)
return -ENOMEM;
- rte_memcpy(&filter->filter_info,
- &filter_5tuple,
- sizeof(struct ixgbe_5tuple_filter_info));
+ memcpy(&filter->filter_info, &filter_5tuple,
+ sizeof(struct ixgbe_5tuple_filter_info));
filter->queue = ntuple_filter->queue;
ret = ixgbe_add_5tuple_filter(dev, filter);
if (ret < 0) {
@@ -7596,9 +7595,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
if (!node)
return -ENOMEM;
- rte_memcpy(&node->key,
- &key,
- sizeof(struct ixgbe_l2_tn_key));
+ memcpy(&node->key, &key, sizeof(struct ixgbe_l2_tn_key));
node->pool = l2_tunnel->pool;
ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
if (ret < 0) {
@@ -74,7 +74,7 @@
else \
ipv6_addr[i] = 0; \
} \
- rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
+ memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
} while (0)
#define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
@@ -1217,9 +1217,8 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
0);
if (!node)
return -ENOMEM;
- rte_memcpy(&node->ixgbe_fdir,
- &rule->ixgbe_fdir,
- sizeof(union ixgbe_atr_input));
+ memcpy(&node->ixgbe_fdir, &rule->ixgbe_fdir,
+ sizeof(union ixgbe_atr_input));
node->fdirflags = fdircmd_flags;
node->fdirhash = fdirhash;
node->queue = queue;
@@ -1944,10 +1944,10 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
ipv6_spec = item->spec;
- rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
- ipv6_spec->hdr.src_addr, 16);
- rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
- ipv6_spec->hdr.dst_addr, 16);
+ memcpy(rule->ixgbe_fdir.formatted.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ memcpy(rule->ixgbe_fdir.formatted.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
}
/**
@@ -3070,9 +3070,9 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&ntuple_filter_ptr->filter_info,
- &ntuple_filter,
- sizeof(struct rte_eth_ntuple_filter));
+ memcpy(&ntuple_filter_ptr->filter_info,
+ &ntuple_filter,
+ sizeof(struct rte_eth_ntuple_filter));
TAILQ_INSERT_TAIL(&filter_ntuple_list,
ntuple_filter_ptr, entries);
flow->rule = ntuple_filter_ptr;
@@ -3096,9 +3096,9 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(ðertype_filter_ptr->filter_info,
- ðertype_filter,
- sizeof(struct rte_eth_ethertype_filter));
+ memcpy(ðertype_filter_ptr->filter_info,
+ ðertype_filter,
+ sizeof(struct rte_eth_ethertype_filter));
TAILQ_INSERT_TAIL(&filter_ethertype_list,
ethertype_filter_ptr, entries);
flow->rule = ethertype_filter_ptr;
@@ -3120,9 +3120,8 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&syn_filter_ptr->filter_info,
- &syn_filter,
- sizeof(struct rte_eth_syn_filter));
+ memcpy(&syn_filter_ptr->filter_info, &syn_filter,
+ sizeof(struct rte_eth_syn_filter));
TAILQ_INSERT_TAIL(&filter_syn_list,
syn_filter_ptr,
entries);
@@ -3141,9 +3140,8 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
if (fdir_rule.b_mask) {
if (!fdir_info->mask_added) {
/* It's the first time the mask is set. */
- rte_memcpy(&fdir_info->mask,
- &fdir_rule.mask,
- sizeof(struct ixgbe_hw_fdir_mask));
+ memcpy(&fdir_info->mask, &fdir_rule.mask,
+ sizeof(struct ixgbe_hw_fdir_mask));
if (fdir_rule.mask.flex_bytes_mask) {
ret = ixgbe_fdir_set_flexbytes_offset(dev,
@@ -3185,9 +3183,9 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&fdir_rule_ptr->filter_info,
- &fdir_rule,
- sizeof(struct ixgbe_fdir_rule));
+ memcpy(&fdir_rule_ptr->filter_info,
+ &fdir_rule,
+ sizeof(struct ixgbe_fdir_rule));
TAILQ_INSERT_TAIL(&filter_fdir_list,
fdir_rule_ptr, entries);
flow->rule = fdir_rule_ptr;
@@ -3222,9 +3220,8 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&l2_tn_filter_ptr->filter_info,
- &l2_tn_filter,
- sizeof(struct ixgbe_l2_tunnel_conf));
+ memcpy(&l2_tn_filter_ptr->filter_info, &l2_tn_filter,
+ sizeof(struct ixgbe_l2_tunnel_conf));
TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
l2_tn_filter_ptr, entries);
flow->rule = l2_tn_filter_ptr;
@@ -3351,9 +3348,8 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_NTUPLE:
ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
pmd_flow->rule;
- rte_memcpy(&ntuple_filter,
- &ntuple_filter_ptr->filter_info,
- sizeof(struct rte_eth_ntuple_filter));
+ memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ntuple_filter));
ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
if (!ret) {
TAILQ_REMOVE(&filter_ntuple_list,
@@ -3364,9 +3360,8 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_ETHERTYPE:
ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
pmd_flow->rule;
- rte_memcpy(ðertype_filter,
- ðertype_filter_ptr->filter_info,
- sizeof(struct rte_eth_ethertype_filter));
+ memcpy(ðertype_filter, ðertype_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ethertype_filter));
ret = ixgbe_add_del_ethertype_filter(dev,
ðertype_filter, FALSE);
if (!ret) {
@@ -3378,9 +3373,8 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_SYN:
syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
pmd_flow->rule;
- rte_memcpy(&syn_filter,
- &syn_filter_ptr->filter_info,
- sizeof(struct rte_eth_syn_filter));
+ memcpy(&syn_filter, &syn_filter_ptr->filter_info,
+ sizeof(struct rte_eth_syn_filter));
ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
if (!ret) {
TAILQ_REMOVE(&filter_syn_list,
@@ -3390,9 +3384,8 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
break;
case RTE_ETH_FILTER_FDIR:
fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
- rte_memcpy(&fdir_rule,
- &fdir_rule_ptr->filter_info,
- sizeof(struct ixgbe_fdir_rule));
+ memcpy(&fdir_rule, &fdir_rule_ptr->filter_info,
+ sizeof(struct ixgbe_fdir_rule));
ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
if (!ret) {
TAILQ_REMOVE(&filter_fdir_list,
@@ -3405,8 +3398,8 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_L2_TUNNEL:
l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
pmd_flow->rule;
- rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
- sizeof(struct ixgbe_l2_tunnel_conf));
+ memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
+ sizeof(struct ixgbe_l2_tunnel_conf));
ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
if (!ret) {
TAILQ_REMOVE(&filter_l2_tunnel_list,
@@ -680,10 +680,10 @@ ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
ic_session->src_ip.type = IPv6;
ic_session->dst_ip.type = IPv6;
- rte_memcpy(ic_session->src_ip.ipv6,
- ipv6->hdr.src_addr, 16);
- rte_memcpy(ic_session->dst_ip.ipv6,
- ipv6->hdr.dst_addr, 16);
+ memcpy(ic_session->src_ip.ipv6, ipv6->hdr.src_addr,
+ 16);
+ memcpy(ic_session->dst_ip.ipv6, ipv6->hdr.dst_addr,
+ 16);
} else {
const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
ic_session->src_ip.type = IPv4;
@@ -450,7 +450,7 @@ ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
- rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
+ memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
@@ -472,7 +472,7 @@ ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
if (rte_is_valid_assigned_ether_addr(
(struct rte_ether_addr *)new_mac)) {
- rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
+ memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
}
return -1;
@@ -289,8 +289,8 @@ ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
if (!shaper_profile)
return -ENOMEM;
shaper_profile->shaper_profile_id = shaper_profile_id;
- rte_memcpy(&shaper_profile->profile, profile,
- sizeof(struct rte_tm_shaper_params));
+ memcpy(&shaper_profile->profile, profile,
+ sizeof(struct rte_tm_shaper_params));
TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
shaper_profile, node);
@@ -637,8 +637,8 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->no = 0;
tm_node->parent = NULL;
tm_node->shaper_profile = shaper_profile;
- rte_memcpy(&tm_node->params, params,
- sizeof(struct rte_tm_node_params));
+ memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
tm_conf->root = tm_node;
/* increase the reference counter of the shaper profile */
@@ -718,8 +718,7 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->reference_count = 0;
tm_node->parent = parent_node;
tm_node->shaper_profile = shaper_profile;
- rte_memcpy(&tm_node->params, params,
- sizeof(struct rte_tm_node_params));
+ memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
tm_node->no = parent_node->reference_count;
TAILQ_INSERT_TAIL(&tm_conf->tc_list,
@@ -37,8 +37,8 @@ rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
if (rte_is_valid_assigned_ether_addr(
(struct rte_ether_addr *)new_mac)) {
- rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
- RTE_ETHER_ADDR_LEN);
+ memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
+ RTE_ETHER_ADDR_LEN);
return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
IXGBE_RAH_AV);
}