@@ -731,7 +731,7 @@ dcf_get_vlan_offload_caps_v2(struct ice_dcf_hw *hw)
return ret;
}
- rte_memcpy(&hw->vlan_v2_caps, &vlan_v2_caps, sizeof(vlan_v2_caps));
+ memcpy(&hw->vlan_v2_caps, &vlan_v2_caps, sizeof(vlan_v2_caps));
return 0;
}
@@ -1407,8 +1407,7 @@ ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw,
return -ENOMEM;
}
- rte_memcpy(list->list[0].addr, addr->addr_bytes,
- sizeof(addr->addr_bytes));
+ memcpy(list->list[0].addr, addr->addr_bytes, sizeof(addr->addr_bytes));
PMD_DRV_LOG(DEBUG, "add/rm mac:" RTE_ETHER_ADDR_PRT_FMT,
RTE_ETHER_ADDR_BYTES(addr));
@@ -407,7 +407,7 @@ ice_dcf_load_pkg(struct ice_adapter *adapter)
use_dsn = ice_dcf_execute_virtchnl_cmd(&dcf_adapter->real_hw, &vc_cmd) == 0;
if (use_dsn)
- rte_memcpy(&dsn, pkg_info.dsn, sizeof(dsn));
+ memcpy(&dsn, pkg_info.dsn, sizeof(dsn));
return ice_load_pkg(adapter, use_dsn, dsn);
}
@@ -308,8 +308,8 @@ ice_dcf_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->id = node_id;
tm_node->parent = NULL;
tm_node->reference_count = 0;
- rte_memcpy(&tm_node->params, params,
- sizeof(struct rte_tm_node_params));
+ memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
hw->tm_conf.root = tm_node;
return 0;
@@ -373,8 +373,7 @@ ice_dcf_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->shaper_profile = shaper_profile;
tm_node->reference_count = 0;
tm_node->parent = parent_node;
- rte_memcpy(&tm_node->params, params,
- sizeof(struct rte_tm_node_params));
+ memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
if (parent_node_type == ICE_DCF_TM_NODE_TYPE_PORT) {
TAILQ_INSERT_TAIL(&hw->tm_conf.tc_list,
tm_node, node);
@@ -520,8 +519,8 @@ ice_dcf_shaper_profile_add(struct rte_eth_dev *dev,
if (!shaper_profile)
return -ENOMEM;
shaper_profile->shaper_profile_id = shaper_profile_id;
- rte_memcpy(&shaper_profile->profile, profile,
- sizeof(struct rte_tm_shaper_params));
+ memcpy(&shaper_profile->profile, profile,
+ sizeof(struct rte_tm_shaper_params));
TAILQ_INSERT_TAIL(&hw->tm_conf.shaper_profile_list,
shaper_profile, node);
@@ -362,13 +362,13 @@ ice_dump_pkg(struct rte_eth_dev *dev, uint8_t **buff, uint32_t *size)
count = *size / ICE_PKG_BUF_SIZE;
for (i = 0; i < count; i++) {
next_buff = (uint8_t *)(*buff) + i * ICE_PKG_BUF_SIZE;
- rte_memcpy(pkg_buff.buf, next_buff, ICE_PKG_BUF_SIZE);
+ memcpy(pkg_buff.buf, next_buff, ICE_PKG_BUF_SIZE);
if (ice_aq_upload_section(hw,
(struct ice_buf_hdr *)&pkg_buff.buf[0],
ICE_PKG_BUF_SIZE,
NULL))
return -EINVAL;
- rte_memcpy(next_buff, pkg_buff.buf, ICE_PKG_BUF_SIZE);
+ memcpy(next_buff, pkg_buff.buf, ICE_PKG_BUF_SIZE);
}
cache_size = sizeof(struct ice_package_header) + *size;
@@ -3387,11 +3387,11 @@ static int ice_init_rss(struct ice_pf *pf)
RTE_MIN(rss_conf->rss_key_len,
vsi->rss_key_size));
- rte_memcpy(key.standard_rss_key, vsi->rss_key,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
- rte_memcpy(key.extended_hash_key,
- &vsi->rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE],
- ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE);
+ memcpy(key.standard_rss_key, vsi->rss_key,
+ ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+ memcpy(key.extended_hash_key,
+ &vsi->rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE],
+ ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE);
ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
if (ret)
goto out;
@@ -4483,7 +4483,7 @@ ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
vsi->info.sw_flags2 &= ~sw_flags2;
vsi->info.sw_id = hw->port_info->sw_id;
- (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
ctxt.info.valid_sections =
rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
ICE_AQ_VSI_PROP_SECURITY_VALID);
@@ -5301,7 +5301,7 @@ ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
ICE_AQ_VSI_INNER_VLAN_EMODE_M);
vsi->info.inner_vlan_flags |= vlan_flags;
memset(&ctxt, 0, sizeof(ctxt));
- rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
ctxt.info.valid_sections =
rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
ctxt.vsi_num = vsi->vsi_id;
@@ -1224,13 +1224,13 @@ ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
memset(key, 0, sizeof(*key));
key->flow_type = input->flow_type;
- rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
- rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
- rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
- rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
+ memcpy(&key->ip, &input->ip, sizeof(key->ip));
+ memcpy(&key->mask, &input->mask, sizeof(key->mask));
+ memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
+ memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
- rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
- rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
+ memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
+ memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
key->tunnel_type = filter->tunnel_type;
}
@@ -1358,7 +1358,7 @@ ice_fdir_create_filter(struct ice_adapter *ad,
if (!entry)
goto error;
- rte_memcpy(entry, filter, sizeof(*filter));
+ memcpy(entry, filter, sizeof(*filter));
flow->rule = entry;
@@ -1419,7 +1419,7 @@ ice_fdir_create_filter(struct ice_adapter *ad,
if (filter->mark_flag == 1)
ice_fdir_rx_parsing_enable(ad, 1);
- rte_memcpy(entry, filter, sizeof(*entry));
+ memcpy(entry, filter, sizeof(*entry));
ret = ice_fdir_entry_insert(pf, entry, &key);
if (ret) {
rte_flow_error_set(error, -ret,
@@ -1720,8 +1720,8 @@ ice_fdir_parse_action(struct ice_adapter *ad,
act_count = actions->conf;
filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
- rte_memcpy(&filter->act_count, act_count,
- sizeof(filter->act_count));
+ memcpy(&filter->act_count, act_count,
+ sizeof(filter->act_count));
break;
default:
@@ -1978,12 +1978,13 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
p_ext_data = (tunnel_type && is_outer) ?
&filter->input.ext_data_outer :
&filter->input.ext_data;
- rte_memcpy(&p_ext_data->src_mac,
- ð_spec->hdr.src_addr, RTE_ETHER_ADDR_LEN);
- rte_memcpy(&p_ext_data->dst_mac,
- ð_spec->hdr.dst_addr, RTE_ETHER_ADDR_LEN);
- rte_memcpy(&p_ext_data->ether_type,
- ð_spec->hdr.ether_type, sizeof(eth_spec->hdr.ether_type));
+ memcpy(&p_ext_data->src_mac, ð_spec->hdr.src_addr,
+ RTE_ETHER_ADDR_LEN);
+ memcpy(&p_ext_data->dst_mac, ð_spec->hdr.dst_addr,
+ RTE_ETHER_ADDR_LEN);
+ memcpy(&p_ext_data->ether_type,
+ ð_spec->hdr.ether_type,
+ sizeof(eth_spec->hdr.ether_type));
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
@@ -2108,8 +2109,8 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
*input_set |= ICE_INSET_IPV6_HOP_LIMIT;
- rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
- rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
+ memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
+ memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
p_v6->proto = ipv6_spec->hdr.proto;
@@ -1969,7 +1969,7 @@ ice_pattern_skip_void_item(struct rte_flow_item *items,
pb = pe + 1;
}
/* Copy the END item. */
- rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+ memcpy(items, pe, sizeof(struct rte_flow_item));
}
/* Check if the pattern matches a supported item type array */
@@ -715,7 +715,7 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad,
pkt_len, ICE_BLK_RSS, true, &prof))
return -rte_errno;
- rte_memcpy(&meta->raw.prof, &prof, sizeof(prof));
+ memcpy(&meta->raw.prof, &prof, sizeof(prof));
rte_free(pkt_buf);
rte_free(msk_buf);
@@ -293,8 +293,8 @@ ice_shaper_profile_add(struct rte_eth_dev *dev,
if (!shaper_profile)
return -ENOMEM;
shaper_profile->shaper_profile_id = shaper_profile_id;
- rte_memcpy(&shaper_profile->profile, profile,
- sizeof(struct rte_tm_shaper_params));
+ memcpy(&shaper_profile->profile, profile,
+ sizeof(struct rte_tm_shaper_params));
TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
shaper_profile, node);
@@ -403,8 +403,8 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->shaper_profile = shaper_profile;
tm_node->children =
(void *)((uint8_t *)tm_node + sizeof(struct ice_tm_node));
- rte_memcpy(&tm_node->params, params,
- sizeof(struct rte_tm_node_params));
+ memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
pf->tm_conf.root = tm_node;
return 0;
}
@@ -480,8 +480,7 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
PMD_DRV_LOG(WARNING, "weight != 1 not supported in level %d",
level_id);
- rte_memcpy(&tm_node->params, params,
- sizeof(struct rte_tm_node_params));
+ memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
tm_node->parent->reference_count++;
return 0;