@@ -78,8 +78,8 @@ add_ipv6(struct rte_flow_item *items,
for (i = 0; i < 16; i++) {
/* Currently src_ip is limited to 32 bit */
if (i < 4)
- ipv6_specs[ti].hdr.src_addr[15 - i] = para.src_ip >> (i * 8);
- ipv6_masks[ti].hdr.src_addr[15 - i] = 0xff;
+ ipv6_specs[ti].hdr.src_addr.a[15 - i] = para.src_ip >> (i * 8);
+ ipv6_masks[ti].hdr.src_addr.a[15 - i] = 0xff;
}
items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV6;
@@ -432,7 +432,6 @@ app_main_loop_rx_metadata(void) {
struct rte_ipv4_hdr *ip_hdr;
struct rte_ipv6_hdr *ipv6_hdr;
uint32_t ip_dst;
- uint8_t *ipv6_dst;
uint32_t *signature, *k32;
m = app.mbuf_rx.array[j];
@@ -452,9 +451,8 @@ app_main_loop_rx_metadata(void) {
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
ipv6_hdr = (struct rte_ipv6_hdr *)
&m_data[sizeof(struct rte_ether_hdr)];
- ipv6_dst = ipv6_hdr->dst_addr;
- memcpy(key, ipv6_dst, 16);
+ memcpy(key, &ipv6_hdr->dst_addr, 16);
} else
continue;
@@ -149,8 +149,8 @@ initialize_ipv6_header(struct rte_ipv6_hdr *ip_hdr, uint8_t *src_addr,
ip_hdr->proto = IPPROTO_UDP;
ip_hdr->hop_limits = IP_DEFTTL;
- rte_memcpy(ip_hdr->src_addr, src_addr, sizeof(ip_hdr->src_addr));
- rte_memcpy(ip_hdr->dst_addr, dst_addr, sizeof(ip_hdr->dst_addr));
+ rte_memcpy(&ip_hdr->src_addr, src_addr, sizeof(ip_hdr->src_addr));
+ rte_memcpy(&ip_hdr->dst_addr, dst_addr, sizeof(ip_hdr->dst_addr));
return (uint16_t) (pkt_data_len + sizeof(struct rte_ipv6_hdr));
}
@@ -238,8 +238,8 @@ v6_allocate_packet_of(struct rte_mbuf *b, int fill, size_t s, uint8_t ttl,
hdr->proto = proto;
hdr->hop_limits = ttl;
- memset(hdr->src_addr, 0x08, sizeof(hdr->src_addr));
- memset(hdr->dst_addr, 0x04, sizeof(hdr->src_addr));
+ memset(hdr->src_addr.a, 0x08, sizeof(hdr->src_addr));
+ memset(hdr->dst_addr.a, 0x04, sizeof(hdr->src_addr));
}
static inline void
@@ -340,17 +340,17 @@ ipv6_frag_fill_data(struct rte_mbuf **mbuf, uint8_t nb_frags, uint32_t flow_id,
rte_cpu_to_be_16(pkt_len - sizeof(struct rte_ipv6_hdr));
ip_hdr->proto = IPPROTO_FRAGMENT;
ip_hdr->hop_limits = IP_DEFTTL;
- memcpy(ip_hdr->src_addr, ip6_addr, sizeof(ip_hdr->src_addr));
- memcpy(ip_hdr->dst_addr, ip6_addr, sizeof(ip_hdr->dst_addr));
- ip_hdr->src_addr[7] = (flow_id >> 16) & 0xf;
- ip_hdr->src_addr[7] |= 0x10;
- ip_hdr->src_addr[8] = (flow_id >> 8) & 0xff;
- ip_hdr->src_addr[9] = flow_id & 0xff;
+ memcpy(&ip_hdr->src_addr, ip6_addr, sizeof(ip_hdr->src_addr));
+ memcpy(&ip_hdr->dst_addr, ip6_addr, sizeof(ip_hdr->dst_addr));
+ ip_hdr->src_addr.a[7] = (flow_id >> 16) & 0xf;
+ ip_hdr->src_addr.a[7] |= 0x10;
+ ip_hdr->src_addr.a[8] = (flow_id >> 8) & 0xff;
+ ip_hdr->src_addr.a[9] = flow_id & 0xff;
- ip_hdr->dst_addr[7] = (flow_id >> 16) & 0xf;
- ip_hdr->dst_addr[7] |= 0x20;
- ip_hdr->dst_addr[8] = (flow_id >> 8) & 0xff;
- ip_hdr->dst_addr[9] = flow_id & 0xff;
+ ip_hdr->dst_addr.a[7] = (flow_id >> 16) & 0xf;
+ ip_hdr->dst_addr.a[7] |= 0x20;
+ ip_hdr->dst_addr.a[8] = (flow_id >> 8) & 0xff;
+ ip_hdr->dst_addr.a[9] = flow_id & 0xff;
frag_hdr->next_header = IPPROTO_UDP;
frag_hdr->reserved = 0;
@@ -145,10 +145,10 @@ test_toeplitz_hash_calc(void)
}
for (i = 0; i < RTE_DIM(v6_tbl); i++) {
/*Fill ipv6 hdr*/
- for (j = 0; j < RTE_DIM(ipv6_hdr.src_addr); j++)
- ipv6_hdr.src_addr[j] = v6_tbl[i].src_ip[j];
- for (j = 0; j < RTE_DIM(ipv6_hdr.dst_addr); j++)
- ipv6_hdr.dst_addr[j] = v6_tbl[i].dst_ip[j];
+ for (j = 0; j < RTE_DIM(ipv6_hdr.src_addr.a); j++)
+ ipv6_hdr.src_addr.a[j] = v6_tbl[i].src_ip[j];
+ for (j = 0; j < RTE_DIM(ipv6_hdr.dst_addr.a); j++)
+ ipv6_hdr.dst_addr.a[j] = v6_tbl[i].dst_ip[j];
/*Load and convert ipv6 address into tuple*/
rte_thash_load_v6_addrs(&ipv6_hdr, &tuple);
tuple.v6.sport = v6_tbl[i].src_port;
@@ -87,8 +87,6 @@ Deprecation Notices
- ``rte_lpm6_delete_bulk_func()``
- ``rte_lpm6_lookup()``
- ``rte_lpm6_lookup_bulk_func()``
- net
- - ``struct rte_ipv6_hdr``
node
- ``rte_node_ip6_route_add()``
pipeline
@@ -85,6 +85,8 @@ API Changes
=======================================================
* net: IPv6 related symbols were moved from ``<rte_ip.h>`` to the new ``<rte_ip6.h>`` header.
+* net: The ``rte_ipv6_hdr`` structure was modified to use ``struct rte_ipv6_addr`` instead of
+ ``uint8_t[16]`` fields.
ABI Changes
-----------
@@ -424,22 +424,22 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
rte_memcpy(filter->src_ipaddr,
- ipv6_spec->hdr.src_addr, 16);
+ &ipv6_spec->hdr.src_addr, 16);
rte_memcpy(filter->dst_ipaddr,
- ipv6_spec->hdr.dst_addr, 16);
+ &ipv6_spec->hdr.dst_addr, 16);
- if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
+ if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr.a,
16)) {
rte_memcpy(filter->src_ipaddr_mask,
- ipv6_mask->hdr.src_addr, 16);
+ &ipv6_mask->hdr.src_addr, 16);
en |= !use_ntuple ? 0 :
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
}
- if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
+ if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr.a,
16)) {
rte_memcpy(filter->dst_ipaddr_mask,
- ipv6_mask->hdr.dst_addr, 16);
+ &ipv6_mask->hdr.dst_addr, 16);
en |= !use_ntuple ? 0 :
NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
}
@@ -690,9 +690,9 @@ static inline uint32_t
ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
{
unaligned_uint32_t *word_src_addr =
- (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
+ (unaligned_uint32_t *)&ipv6_hdr->src_addr;
unaligned_uint32_t *word_dst_addr =
- (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
+ (unaligned_uint32_t *)&ipv6_hdr->dst_addr;
return (word_src_addr[0] ^ word_dst_addr[0]) ^
(word_src_addr[1] ^ word_dst_addr[1]) ^
@@ -411,15 +411,15 @@ ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
RTE_IPV6_HDR_TC_SHIFT,
tos);
- if (memcmp(val->hdr.dst_addr, z, sizeof(val->hdr.dst_addr)) ||
+ if (memcmp(&val->hdr.dst_addr, z, sizeof(val->hdr.dst_addr)) ||
(umask &&
- memcmp(umask->hdr.dst_addr, z, sizeof(umask->hdr.dst_addr))))
+ memcmp(&umask->hdr.dst_addr, z, sizeof(umask->hdr.dst_addr))))
CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
lip);
- if (memcmp(val->hdr.src_addr, z, sizeof(val->hdr.src_addr)) ||
+ if (memcmp(&val->hdr.src_addr, z, sizeof(val->hdr.src_addr)) ||
(umask &&
- memcmp(umask->hdr.src_addr, z, sizeof(umask->hdr.src_addr))))
+ memcmp(&umask->hdr.src_addr, z, sizeof(umask->hdr.src_addr))))
CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
fip);
@@ -918,12 +918,14 @@ static struct chrte_fparse parseitem[] = {
.fptr = ch_rte_parsetype_ipv6,
.dmask = &(const struct rte_flow_item_ipv6) {
.hdr = {
- .src_addr =
+ .src_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
- .dst_addr =
+ },
+ .dst_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
.vtc_flow = RTE_BE32(0xff000000),
},
},
@@ -117,12 +117,14 @@ static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
.hdr = {
- .src_addr =
+ .src_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
- .dst_addr =
+ },
+ .dst_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
.proto = 0xff
},
};
@@ -1480,16 +1482,16 @@ dpaa2_configure_flow_generic_ip(
mask_ipv4->hdr.dst_addr)) {
flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
} else if (mask_ipv6 &&
- (memcmp((const char *)mask_ipv6->hdr.src_addr,
+ (memcmp(&mask_ipv6->hdr.src_addr,
zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
- memcmp((const char *)mask_ipv6->hdr.dst_addr,
+ memcmp(&mask_ipv6->hdr.dst_addr,
zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
}
if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
(mask_ipv6 &&
- memcmp((const char *)mask_ipv6->hdr.src_addr,
+ memcmp(&mask_ipv6->hdr.src_addr,
zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
index = dpaa2_flow_extract_search(
&priv->extract.qos_key_extract.dpkg,
@@ -1528,13 +1530,13 @@ dpaa2_configure_flow_generic_ip(
if (spec_ipv4)
key = &spec_ipv4->hdr.src_addr;
else
- key = &spec_ipv6->hdr.src_addr[0];
+ key = &spec_ipv6->hdr.src_addr;
if (mask_ipv4) {
mask = &mask_ipv4->hdr.src_addr;
size = NH_FLD_IPV4_ADDR_SIZE;
prot = NET_PROT_IPV4;
} else {
- mask = &mask_ipv6->hdr.src_addr[0];
+ mask = &mask_ipv6->hdr.src_addr;
size = NH_FLD_IPV6_ADDR_SIZE;
prot = NET_PROT_IPV6;
}
@@ -1571,7 +1573,7 @@ dpaa2_configure_flow_generic_ip(
if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
(mask_ipv6 &&
- memcmp((const char *)mask_ipv6->hdr.dst_addr,
+ memcmp(&mask_ipv6->hdr.dst_addr,
zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
index = dpaa2_flow_extract_search(
&priv->extract.qos_key_extract.dpkg,
@@ -1618,13 +1620,13 @@ dpaa2_configure_flow_generic_ip(
if (spec_ipv4)
key = &spec_ipv4->hdr.dst_addr;
else
- key = spec_ipv6->hdr.dst_addr;
+ key = &spec_ipv6->hdr.dst_addr;
if (mask_ipv4) {
mask = &mask_ipv4->hdr.dst_addr;
size = NH_FLD_IPV4_ADDR_SIZE;
prot = NET_PROT_IPV4;
} else {
- mask = &mask_ipv6->hdr.dst_addr[0];
+ mask = &mask_ipv6->hdr.dst_addr;
size = NH_FLD_IPV6_ADDR_SIZE;
prot = NET_PROT_IPV6;
}
@@ -962,7 +962,7 @@ static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
/* check ipv6 src addr mask, ipv6 src addr is 16 bytes */
for (i = 0; i < 16; i++) {
- if (ipv6_mask->hdr.src_addr[i] == UINT8_MAX) {
+ if (ipv6_mask->hdr.src_addr.a[i] == UINT8_MAX) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"Not supported by fdir filter, do not support src ipv6");
@@ -978,13 +978,13 @@ static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
}
for (i = 0; i < 16; i++) {
- if (ipv6_mask->hdr.dst_addr[i] == UINT8_MAX)
+ if (ipv6_mask->hdr.dst_addr.a[i] == UINT8_MAX)
rule->mask.dst_ipv6_mask |= 1 << i;
}
ipv6_spec = (const struct rte_flow_item_ipv6 *)item->spec;
rte_memcpy(rule->hinic_fdir.dst_ipv6,
- ipv6_spec->hdr.dst_addr, 16);
+ &ipv6_spec->hdr.dst_addr, 16);
/*
* Check if the next not void item is TCP or UDP or ICMP.
@@ -743,7 +743,7 @@ hinic_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
else
psd_hdr.len = ipv6_hdr->payload_len;
- sum = __rte_raw_cksum(ipv6_hdr->src_addr,
+ sum = __rte_raw_cksum(&ipv6_hdr->src_addr,
sizeof(ipv6_hdr->src_addr) + sizeof(ipv6_hdr->dst_addr), 0);
sum = __rte_raw_cksum(&psd_hdr, sizeof(psd_hdr), sum);
return __rte_raw_cksum_reduce(sum);
@@ -822,10 +822,10 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
"Only support src & dst ip,proto in IPV6");
}
net_addr_to_host(rule->key_conf.mask.src_ip,
- (const rte_be32_t *)ipv6_mask->hdr.src_addr,
+ (const rte_be32_t *)&ipv6_mask->hdr.src_addr,
IP_ADDR_LEN);
net_addr_to_host(rule->key_conf.mask.dst_ip,
- (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
+ (const rte_be32_t *)&ipv6_mask->hdr.dst_addr,
IP_ADDR_LEN);
rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
@@ -838,10 +838,10 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
ipv6_spec = item->spec;
net_addr_to_host(rule->key_conf.spec.src_ip,
- (const rte_be32_t *)ipv6_spec->hdr.src_addr,
+ (const rte_be32_t *)&ipv6_spec->hdr.src_addr,
IP_ADDR_LEN);
net_addr_to_host(rule->key_conf.spec.dst_ip,
- (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
+ (const rte_be32_t *)&ipv6_spec->hdr.dst_addr,
IP_ADDR_LEN);
rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
@@ -1953,13 +1953,13 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
return -rte_errno;
}
- if (!memcmp(ipv6_mask->hdr.src_addr,
+ if (!memcmp(&ipv6_mask->hdr.src_addr,
ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr)))
+ sizeof(ipv6_mask->hdr.src_addr)))
input_set |= I40E_INSET_IPV6_SRC;
- if (!memcmp(ipv6_mask->hdr.dst_addr,
+ if (!memcmp(&ipv6_mask->hdr.dst_addr,
ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ sizeof(ipv6_mask->hdr.dst_addr)))
input_set |= I40E_INSET_IPV6_DST;
if ((ipv6_mask->hdr.vtc_flow &
@@ -1987,9 +1987,9 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
I40E_FDIR_IPTYPE_IPV6;
rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
- ipv6_spec->hdr.src_addr, 16);
+ &ipv6_spec->hdr.src_addr, 16);
rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
- ipv6_spec->hdr.dst_addr, 16);
+ &ipv6_spec->hdr.dst_addr, 16);
/* Check if it is fragment. */
if (ipv6_spec->hdr.proto ==
@@ -1048,14 +1048,14 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
HOP_LIMIT);
}
- if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr))) {
+ if (!memcmp(&ipv6_mask->hdr.src_addr, ipv6_addr_mask,
+ sizeof(ipv6_mask->hdr.src_addr))) {
input_set |= IAVF_INSET_IPV6_SRC;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
SRC);
}
- if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+ if (!memcmp(&ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
+ sizeof(ipv6_mask->hdr.dst_addr))) {
input_set |= IAVF_INSET_IPV6_DST;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
DST);
@@ -354,23 +354,23 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
}
for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
- if (ipv6_mask->hdr.src_addr[j]) {
+ if (ipv6_mask->hdr.src_addr.a[j]) {
*input |= IAVF_INSET_IPV6_SRC;
break;
}
}
for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
- if (ipv6_mask->hdr.dst_addr[j]) {
+ if (ipv6_mask->hdr.dst_addr.a[j]) {
*input |= IAVF_INSET_IPV6_DST;
break;
}
}
for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
- if (ipv6_mask->hdr.src_addr[j])
+ if (ipv6_mask->hdr.src_addr.a[j])
input_set_byte++;
- if (ipv6_mask->hdr.dst_addr[j])
+ if (ipv6_mask->hdr.dst_addr.a[j])
input_set_byte++;
}
@@ -1738,8 +1738,8 @@ static void
parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
struct rte_ipv6_hdr *ipv6)
{
- memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
- memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+ memcpy(&ipv6->src_addr, &item->hdr.src_addr, 16);
+ memcpy(&ipv6->dst_addr, &item->hdr.dst_addr, 16);
}
static void
@@ -1904,7 +1904,7 @@ iavf_ipsec_flow_create(struct iavf_adapter *ad,
ipsec_flow->spi,
0,
0,
- ipsec_flow->ipv6_hdr.dst_addr,
+ ipsec_flow->ipv6_hdr.dst_addr.a,
0,
ipsec_flow->is_udp,
ipsec_flow->udp_hdr.dst_port);
@@ -2097,11 +2097,11 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
return -rte_errno;
}
- if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr)))
+ if (!memcmp(&ipv6_mask->hdr.src_addr, ipv6_addr_mask,
+ sizeof(ipv6_mask->hdr.src_addr)))
*input_set |= ICE_INSET_IPV6_SRC;
- if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ if (!memcmp(&ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
+ sizeof(ipv6_mask->hdr.dst_addr)))
*input_set |= ICE_INSET_IPV6_DST;
if ((ipv6_mask->hdr.vtc_flow &
@@ -2113,8 +2113,8 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
*input_set |= ICE_INSET_IPV6_HOP_LIMIT;
- rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
- rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(&p_v6->dst_ip, &ipv6_spec->hdr.dst_addr, 16);
+ rte_memcpy(&p_v6->src_ip, &ipv6_spec->hdr.src_addr, 16);
vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
p_v6->proto = ipv6_spec->hdr.proto;
@@ -665,13 +665,13 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[],
}
for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
- if (ipv6_mask->hdr.src_addr[j]) {
+ if (ipv6_mask->hdr.src_addr.a[j]) {
*input |= ICE_INSET_IPV6_SRC;
break;
}
}
for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
- if (ipv6_mask->hdr.dst_addr[j]) {
+ if (ipv6_mask->hdr.dst_addr.a[j]) {
*input |= ICE_INSET_IPV6_DST;
break;
}
@@ -691,18 +691,18 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[],
f = &list[t].h_u.ipv6_hdr;
s = &list[t].m_u.ipv6_hdr;
for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
- if (ipv6_mask->hdr.src_addr[j]) {
+ if (ipv6_mask->hdr.src_addr.a[j]) {
f->src_addr[j] =
- ipv6_spec->hdr.src_addr[j];
+ ipv6_spec->hdr.src_addr.a[j];
s->src_addr[j] =
- ipv6_mask->hdr.src_addr[j];
+ ipv6_mask->hdr.src_addr.a[j];
input_set_byte++;
}
- if (ipv6_mask->hdr.dst_addr[j]) {
+ if (ipv6_mask->hdr.dst_addr.a[j]) {
f->dst_addr[j] =
- ipv6_spec->hdr.dst_addr[j];
+ ipv6_spec->hdr.dst_addr.a[j];
s->dst_addr[j] =
- ipv6_mask->hdr.dst_addr[j];
+ ipv6_mask->hdr.dst_addr.a[j];
input_set_byte++;
}
}
@@ -435,8 +435,8 @@ igc_parse_pattern_ipv6(const struct rte_flow_item *item,
if (mask->hdr.vtc_flow ||
mask->hdr.payload_len ||
mask->hdr.hop_limits ||
- !igc_is_zero_ipv6_addr(mask->hdr.src_addr) ||
- !igc_is_zero_ipv6_addr(mask->hdr.dst_addr))
+ !igc_is_zero_ipv6_addr(&mask->hdr.src_addr) ||
+ !igc_is_zero_ipv6_addr(&mask->hdr.dst_addr))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"IPv6 only support protocol");
@@ -1917,9 +1917,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
/* check src addr mask */
for (j = 0; j < 16; j++) {
- if (ipv6_mask->hdr.src_addr[j] == 0) {
+ if (ipv6_mask->hdr.src_addr.a[j] == 0) {
rule->mask.src_ipv6_mask &= ~(1 << j);
- } else if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX) {
+ } else if (ipv6_mask->hdr.src_addr.a[j] != UINT8_MAX) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1930,9 +1930,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
/* check dst addr mask */
for (j = 0; j < 16; j++) {
- if (ipv6_mask->hdr.dst_addr[j] == 0) {
+ if (ipv6_mask->hdr.dst_addr.a[j] == 0) {
rule->mask.dst_ipv6_mask &= ~(1 << j);
- } else if (ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+ } else if (ipv6_mask->hdr.dst_addr.a[j] != UINT8_MAX) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1945,9 +1945,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
rule->b_spec = TRUE;
ipv6_spec = item->spec;
rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
- ipv6_spec->hdr.src_addr, 16);
+ &ipv6_spec->hdr.src_addr, 16);
rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
- ipv6_spec->hdr.dst_addr, 16);
+ &ipv6_spec->hdr.dst_addr, 16);
}
/**
@@ -681,9 +681,9 @@ ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
ic_session->src_ip.type = IPv6;
ic_session->dst_ip.type = IPv6;
rte_memcpy(ic_session->src_ip.ipv6,
- ipv6->hdr.src_addr, 16);
+ &ipv6->hdr.src_addr, 16);
rte_memcpy(ic_session->dst_ip.ipv6,
- ipv6->hdr.dst_addr, 16);
+ &ipv6->hdr.dst_addr, 16);
} else {
const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
ic_session->src_ip.type = IPv4;
@@ -176,14 +176,14 @@ struct mlx5dr_definer_conv_data {
X(SET, ipv6_proto, v->hdr.proto, rte_flow_item_ipv6) \
X(SET, ipv6_routing_hdr, IPPROTO_ROUTING, rte_flow_item_ipv6) \
X(SET, ipv6_hop_limits, v->hdr.hop_limits, rte_flow_item_ipv6) \
- X(SET_BE32P, ipv6_src_addr_127_96, &v->hdr.src_addr[0], rte_flow_item_ipv6) \
- X(SET_BE32P, ipv6_src_addr_95_64, &v->hdr.src_addr[4], rte_flow_item_ipv6) \
- X(SET_BE32P, ipv6_src_addr_63_32, &v->hdr.src_addr[8], rte_flow_item_ipv6) \
- X(SET_BE32P, ipv6_src_addr_31_0, &v->hdr.src_addr[12], rte_flow_item_ipv6) \
- X(SET_BE32P, ipv6_dst_addr_127_96, &v->hdr.dst_addr[0], rte_flow_item_ipv6) \
- X(SET_BE32P, ipv6_dst_addr_95_64, &v->hdr.dst_addr[4], rte_flow_item_ipv6) \
- X(SET_BE32P, ipv6_dst_addr_63_32, &v->hdr.dst_addr[8], rte_flow_item_ipv6) \
- X(SET_BE32P, ipv6_dst_addr_31_0, &v->hdr.dst_addr[12], rte_flow_item_ipv6) \
+ X(SET_BE32P, ipv6_src_addr_127_96, &v->hdr.src_addr.a[0], rte_flow_item_ipv6) \
+ X(SET_BE32P, ipv6_src_addr_95_64, &v->hdr.src_addr.a[4], rte_flow_item_ipv6) \
+ X(SET_BE32P, ipv6_src_addr_63_32, &v->hdr.src_addr.a[8], rte_flow_item_ipv6) \
+ X(SET_BE32P, ipv6_src_addr_31_0, &v->hdr.src_addr.a[12], rte_flow_item_ipv6) \
+ X(SET_BE32P, ipv6_dst_addr_127_96, &v->hdr.dst_addr.a[0], rte_flow_item_ipv6) \
+ X(SET_BE32P, ipv6_dst_addr_95_64, &v->hdr.dst_addr.a[4], rte_flow_item_ipv6) \
+ X(SET_BE32P, ipv6_dst_addr_63_32, &v->hdr.dst_addr.a[8], rte_flow_item_ipv6) \
+ X(SET_BE32P, ipv6_dst_addr_31_0, &v->hdr.dst_addr.a[12], rte_flow_item_ipv6) \
X(SET, ipv6_version, STE_IPV6, rte_flow_item_ipv6) \
X(SET, ipv6_frag, v->has_frag_ext, rte_flow_item_ipv6) \
X(SET, icmp_protocol, STE_ICMP, rte_flow_item_icmp) \
@@ -1161,8 +1161,8 @@ mlx5dr_definer_conv_item_ipv6(struct mlx5dr_definer_conv_data *cd,
m->has_esp_ext || m->has_dest_ext || m->has_mobil_ext ||
m->has_hip_ext || m->has_shim6_ext ||
(l && (l->has_frag_ext || l->hdr.vtc_flow || l->hdr.proto ||
- !is_mem_zero(l->hdr.src_addr, 16) ||
- !is_mem_zero(l->hdr.dst_addr, 16)))) {
+ !is_mem_zero(l->hdr.src_addr.a, 16) ||
+ !is_mem_zero(l->hdr.dst_addr.a, 16)))) {
rte_errno = ENOTSUP;
return rte_errno;
}
@@ -1219,56 +1219,56 @@ mlx5dr_definer_conv_item_ipv6(struct mlx5dr_definer_conv_data *cd,
DR_CALC_SET(fc, eth_l3, time_to_live_hop_limit, inner);
}
- if (!is_mem_zero(m->hdr.src_addr, 4)) {
+ if (!is_mem_zero(m->hdr.src_addr.a, 4)) {
fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_127_96, inner)];
fc->item_idx = item_idx;
fc->tag_set = &mlx5dr_definer_ipv6_src_addr_127_96_set;
DR_CALC_SET(fc, ipv6_src, ipv6_address_127_96, inner);
}
- if (!is_mem_zero(m->hdr.src_addr + 4, 4)) {
+ if (!is_mem_zero(m->hdr.src_addr.a + 4, 4)) {
fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_95_64, inner)];
fc->item_idx = item_idx;
fc->tag_set = &mlx5dr_definer_ipv6_src_addr_95_64_set;
DR_CALC_SET(fc, ipv6_src, ipv6_address_95_64, inner);
}
- if (!is_mem_zero(m->hdr.src_addr + 8, 4)) {
+ if (!is_mem_zero(m->hdr.src_addr.a + 8, 4)) {
fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_63_32, inner)];
fc->item_idx = item_idx;
fc->tag_set = &mlx5dr_definer_ipv6_src_addr_63_32_set;
DR_CALC_SET(fc, ipv6_src, ipv6_address_63_32, inner);
}
- if (!is_mem_zero(m->hdr.src_addr + 12, 4)) {
+ if (!is_mem_zero(m->hdr.src_addr.a + 12, 4)) {
fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_31_0, inner)];
fc->item_idx = item_idx;
fc->tag_set = &mlx5dr_definer_ipv6_src_addr_31_0_set;
DR_CALC_SET(fc, ipv6_src, ipv6_address_31_0, inner);
}
- if (!is_mem_zero(m->hdr.dst_addr, 4)) {
+ if (!is_mem_zero(m->hdr.dst_addr.a, 4)) {
fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_127_96, inner)];
fc->item_idx = item_idx;
fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_127_96_set;
DR_CALC_SET(fc, ipv6_dst, ipv6_address_127_96, inner);
}
- if (!is_mem_zero(m->hdr.dst_addr + 4, 4)) {
+ if (!is_mem_zero(m->hdr.dst_addr.a + 4, 4)) {
fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_95_64, inner)];
fc->item_idx = item_idx;
fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_95_64_set;
DR_CALC_SET(fc, ipv6_dst, ipv6_address_95_64, inner);
}
- if (!is_mem_zero(m->hdr.dst_addr + 8, 4)) {
+ if (!is_mem_zero(m->hdr.dst_addr.a + 8, 4)) {
fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_63_32, inner)];
fc->item_idx = item_idx;
fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_63_32_set;
DR_CALC_SET(fc, ipv6_dst, ipv6_address_63_32, inner);
}
- if (!is_mem_zero(m->hdr.dst_addr + 12, 4)) {
+ if (!is_mem_zero(m->hdr.dst_addr.a + 12, 4)) {
fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_31_0, inner)];
fc->item_idx = item_idx;
fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_31_0_set;
@@ -2933,12 +2933,14 @@ mlx5_flow_validate_item_ipv6(const struct rte_eth_dev *dev,
const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 nic_mask = {
.hdr = {
- .src_addr =
+ .src_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
- .dst_addr =
+ },
+ .dst_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
.vtc_flow = RTE_BE32(0xffffffff),
.proto = 0xff,
},
@@ -7697,12 +7697,14 @@ const struct rte_flow_item_ipv4 nic_ipv4_mask = {
const struct rte_flow_item_ipv6 nic_ipv6_mask = {
.hdr = {
- .src_addr =
+ .src_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
- .dst_addr =
+ },
+ .dst_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
.vtc_flow = RTE_BE32(0xffffffff),
.proto = 0xff,
.hop_limits = 0xff,
@@ -9549,12 +9551,14 @@ flow_dv_translate_item_ipv6(void *key, const struct rte_flow_item *item,
const struct rte_flow_item_ipv6 *ipv6_v;
const struct rte_flow_item_ipv6 nic_mask = {
.hdr = {
- .src_addr =
+ .src_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
- .dst_addr =
+ },
+ .dst_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
.vtc_flow = RTE_BE32(0xffffffff),
.proto = 0xff,
.hop_limits = 0xff,
@@ -9577,11 +9581,11 @@ flow_dv_translate_item_ipv6(void *key, const struct rte_flow_item *item,
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
for (i = 0; i < size; ++i)
- l24_v[i] = ipv6_m->hdr.dst_addr[i] & ipv6_v->hdr.dst_addr[i];
+ l24_v[i] = ipv6_m->hdr.dst_addr.a[i] & ipv6_v->hdr.dst_addr.a[i];
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv6_layout.ipv6);
for (i = 0; i < size; ++i)
- l24_v[i] = ipv6_m->hdr.src_addr[i] & ipv6_v->hdr.src_addr[i];
+ l24_v[i] = ipv6_m->hdr.src_addr.a[i] & ipv6_v->hdr.src_addr.a[i];
/* TOS. */
vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
@@ -8250,12 +8250,14 @@ const struct rte_flow_item_ipv6 hws_nic_ipv6_mask = {
.payload_len = RTE_BE16(0xffff),
.proto = 0xff,
.hop_limits = 0xff,
- .src_addr =
+ .src_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
- .dst_addr =
+ },
+ .dst_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
},
.has_frag_ext = 1,
};
@@ -14645,10 +14647,10 @@ flow_hw_calc_encap_hash(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
memcpy(data.dst.ipv6_addr,
- ((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.dst_addr,
+ &((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.dst_addr,
sizeof(data.dst.ipv6_addr));
memcpy(data.src.ipv6_addr,
- ((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.src_addr,
+ &((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.src_addr,
sizeof(data.src.ipv6_addr));
break;
case RTE_FLOW_ITEM_TYPE_UDP:
@@ -600,13 +600,13 @@ flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
uint32_t vtc_flow_val;
uint32_t vtc_flow_mask;
- memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
+ memcpy(&ipv6.val.src_ip, &spec->hdr.src_addr,
RTE_DIM(ipv6.val.src_ip));
- memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
+ memcpy(&ipv6.val.dst_ip, &spec->hdr.dst_addr,
RTE_DIM(ipv6.val.dst_ip));
- memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
+ memcpy(&ipv6.mask.src_ip, &mask->hdr.src_addr,
RTE_DIM(ipv6.mask.src_ip));
- memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
+ memcpy(&ipv6.mask.dst_ip, &mask->hdr.dst_addr,
RTE_DIM(ipv6.mask.dst_ip));
vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
@@ -2027,18 +2027,18 @@ nfp_flow_merge_ipv6(struct nfp_flow_merge_param *param)
ipv6_gre_tun->ip_ext.tos = vtc_flow >> RTE_IPV6_HDR_TC_SHIFT;
ipv6_gre_tun->ip_ext.ttl = hdr->hop_limits;
- memcpy(ipv6_gre_tun->ipv6.ipv6_src, hdr->src_addr,
+ memcpy(ipv6_gre_tun->ipv6.ipv6_src, &hdr->src_addr,
sizeof(ipv6_gre_tun->ipv6.ipv6_src));
- memcpy(ipv6_gre_tun->ipv6.ipv6_dst, hdr->dst_addr,
+ memcpy(ipv6_gre_tun->ipv6.ipv6_dst, &hdr->dst_addr,
sizeof(ipv6_gre_tun->ipv6.ipv6_dst));
} else {
ipv6_udp_tun = (struct nfp_flower_ipv6_udp_tun *)(*param->mbuf_off);
ipv6_udp_tun->ip_ext.tos = vtc_flow >> RTE_IPV6_HDR_TC_SHIFT;
ipv6_udp_tun->ip_ext.ttl = hdr->hop_limits;
- memcpy(ipv6_udp_tun->ipv6.ipv6_src, hdr->src_addr,
+ memcpy(ipv6_udp_tun->ipv6.ipv6_src, &hdr->src_addr,
sizeof(ipv6_udp_tun->ipv6.ipv6_src));
- memcpy(ipv6_udp_tun->ipv6.ipv6_dst, hdr->dst_addr,
+ memcpy(ipv6_udp_tun->ipv6.ipv6_dst, &hdr->dst_addr,
sizeof(ipv6_udp_tun->ipv6.ipv6_dst));
}
} else {
@@ -2061,8 +2061,8 @@ nfp_flow_merge_ipv6(struct nfp_flow_merge_param *param)
ipv6->ip_ext.tos = vtc_flow >> RTE_IPV6_HDR_TC_SHIFT;
ipv6->ip_ext.proto = hdr->proto;
ipv6->ip_ext.ttl = hdr->hop_limits;
- memcpy(ipv6->ipv6_src, hdr->src_addr, sizeof(ipv6->ipv6_src));
- memcpy(ipv6->ipv6_dst, hdr->dst_addr, sizeof(ipv6->ipv6_dst));
+ memcpy(ipv6->ipv6_src, &hdr->src_addr, sizeof(ipv6->ipv6_src));
+ memcpy(ipv6->ipv6_dst, &hdr->dst_addr, sizeof(ipv6->ipv6_dst));
ipv6_end:
*param->mbuf_off += sizeof(struct nfp_flower_ipv6);
@@ -2518,10 +2518,14 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
.vtc_flow = RTE_BE32(0x0ff00000),
.proto = 0xff,
.hop_limits = 0xff,
- .src_addr = "\xff\xff\xff\xff\xff\xff\xff\xff"
+ .src_addr = { .a =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
- .dst_addr = "\xff\xff\xff\xff\xff\xff\xff\xff"
+ },
+ .dst_addr = { .a =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
},
.has_frag_ext = 1,
},
@@ -3324,8 +3328,8 @@ nfp_flower_add_tun_neigh_v6_encap(struct nfp_app_fw_flower *app_fw_flower,
struct nfp_flower_cmsg_tun_neigh_v6 payload;
tun->payload.v6_flag = 1;
- memcpy(tun->payload.dst.dst_ipv6, ipv6->hdr.dst_addr, sizeof(tun->payload.dst.dst_ipv6));
- memcpy(tun->payload.src.src_ipv6, ipv6->hdr.src_addr, sizeof(tun->payload.src.src_ipv6));
+ memcpy(tun->payload.dst.dst_ipv6, &ipv6->hdr.dst_addr, sizeof(tun->payload.dst.dst_ipv6));
+ memcpy(tun->payload.src.src_ipv6, &ipv6->hdr.src_addr, sizeof(tun->payload.src.src_ipv6));
memcpy(tun->payload.dst_addr, eth->dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
memcpy(tun->payload.src_addr, eth->src_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
@@ -3345,8 +3349,8 @@ nfp_flower_add_tun_neigh_v6_encap(struct nfp_app_fw_flower *app_fw_flower,
sizeof(struct nfp_flower_meta_tci));
memset(&payload, 0, sizeof(struct nfp_flower_cmsg_tun_neigh_v6));
- memcpy(payload.dst_ipv6, ipv6->hdr.dst_addr, sizeof(payload.dst_ipv6));
- memcpy(payload.src_ipv6, ipv6->hdr.src_addr, sizeof(payload.src_ipv6));
+ memcpy(payload.dst_ipv6, &ipv6->hdr.dst_addr, sizeof(payload.dst_ipv6));
+ memcpy(payload.src_ipv6, &ipv6->hdr.src_addr, sizeof(payload.src_ipv6));
memcpy(payload.common.dst_mac, eth->dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
memcpy(payload.common.src_mac, eth->src_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
payload.common.port_id = port->in_port;
@@ -3573,7 +3577,7 @@ nfp_flow_action_vxlan_encap_v6(struct nfp_app_fw_flower *app_fw_flower,
pre_tun = (struct nfp_fl_act_pre_tun *)actions;
memset(pre_tun, 0, act_pre_size);
- nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr);
+ nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr.a);
set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size);
memset(set_tun, 0, act_set_size);
@@ -3944,7 +3948,7 @@ nfp_flow_action_geneve_encap_v6(struct nfp_app_fw_flower *app_fw_flower,
pre_tun = (struct nfp_fl_act_pre_tun *)actions;
memset(pre_tun, 0, act_pre_size);
- nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr);
+ nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr.a);
set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size);
memset(set_tun, 0, act_set_size);
@@ -4021,7 +4025,7 @@ nfp_flow_action_nvgre_encap_v6(struct nfp_app_fw_flower *app_fw_flower,
pre_tun = (struct nfp_fl_act_pre_tun *)actions;
memset(pre_tun, 0, act_pre_size);
- nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr);
+ nfp_flow_pre_tun_v6_process(pre_tun, ipv6->hdr.dst_addr.a);
set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size);
memset(set_tun, 0, act_set_size);
@@ -294,28 +294,28 @@ nfp_net_flow_merge_ipv6(struct rte_flow *nfp_flow,
ipv6->l4_protocol_mask = mask->hdr.proto;
for (i = 0; i < sizeof(ipv6->src_ipv6); i += 4) {
- ipv6->src_ipv6_mask[i] = mask->hdr.src_addr[i + 3];
- ipv6->src_ipv6_mask[i + 1] = mask->hdr.src_addr[i + 2];
- ipv6->src_ipv6_mask[i + 2] = mask->hdr.src_addr[i + 1];
- ipv6->src_ipv6_mask[i + 3] = mask->hdr.src_addr[i];
+ ipv6->src_ipv6_mask[i] = mask->hdr.src_addr.a[i + 3];
+ ipv6->src_ipv6_mask[i + 1] = mask->hdr.src_addr.a[i + 2];
+ ipv6->src_ipv6_mask[i + 2] = mask->hdr.src_addr.a[i + 1];
+ ipv6->src_ipv6_mask[i + 3] = mask->hdr.src_addr.a[i];
- ipv6->dst_ipv6_mask[i] = mask->hdr.dst_addr[i + 3];
- ipv6->dst_ipv6_mask[i + 1] = mask->hdr.dst_addr[i + 2];
- ipv6->dst_ipv6_mask[i + 2] = mask->hdr.dst_addr[i + 1];
- ipv6->dst_ipv6_mask[i + 3] = mask->hdr.dst_addr[i];
+ ipv6->dst_ipv6_mask[i] = mask->hdr.dst_addr.a[i + 3];
+ ipv6->dst_ipv6_mask[i + 1] = mask->hdr.dst_addr.a[i + 2];
+ ipv6->dst_ipv6_mask[i + 2] = mask->hdr.dst_addr.a[i + 1];
+ ipv6->dst_ipv6_mask[i + 3] = mask->hdr.dst_addr.a[i];
}
ipv6->l4_protocol = spec->hdr.proto;
for (i = 0; i < sizeof(ipv6->src_ipv6); i += 4) {
- ipv6->src_ipv6[i] = spec->hdr.src_addr[i + 3];
- ipv6->src_ipv6[i + 1] = spec->hdr.src_addr[i + 2];
- ipv6->src_ipv6[i + 2] = spec->hdr.src_addr[i + 1];
- ipv6->src_ipv6[i + 3] = spec->hdr.src_addr[i];
+ ipv6->src_ipv6[i] = spec->hdr.src_addr.a[i + 3];
+ ipv6->src_ipv6[i + 1] = spec->hdr.src_addr.a[i + 2];
+ ipv6->src_ipv6[i + 2] = spec->hdr.src_addr.a[i + 1];
+ ipv6->src_ipv6[i + 3] = spec->hdr.src_addr.a[i];
- ipv6->dst_ipv6[i] = spec->hdr.dst_addr[i + 3];
- ipv6->dst_ipv6[i + 1] = spec->hdr.dst_addr[i + 2];
- ipv6->dst_ipv6[i + 2] = spec->hdr.dst_addr[i + 1];
- ipv6->dst_ipv6[i + 3] = spec->hdr.dst_addr[i];
+ ipv6->dst_ipv6[i] = spec->hdr.dst_addr.a[i + 3];
+ ipv6->dst_ipv6[i + 1] = spec->hdr.dst_addr.a[i + 2];
+ ipv6->dst_ipv6[i + 2] = spec->hdr.dst_addr.a[i + 1];
+ ipv6->dst_ipv6[i + 3] = spec->hdr.dst_addr.a[i];
}
return 0;
@@ -403,10 +403,14 @@ static const struct nfp_net_flow_item_proc nfp_net_flow_item_proc_list[] = {
.mask_support = &(const struct rte_flow_item_ipv6){
.hdr = {
.proto = 0xff,
- .src_addr = "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .dst_addr = "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .src_addr = { .a =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
+ .dst_addr = { .a =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
},
},
.mask_default = &rte_flow_item_ipv6_mask,
@@ -794,9 +794,9 @@ qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev,
spec = pattern->spec;
memcpy(flow->entry.tuple.src_ipv6,
- spec->hdr.src_addr, IPV6_ADDR_LEN);
+ &spec->hdr.src_addr, IPV6_ADDR_LEN);
memcpy(flow->entry.tuple.dst_ipv6,
- spec->hdr.dst_addr, IPV6_ADDR_LEN);
+ &spec->hdr.dst_addr, IPV6_ADDR_LEN);
flow->entry.tuple.eth_proto =
RTE_ETHER_TYPE_IPV6;
}
@@ -575,14 +575,14 @@ sfc_flow_parse_ipv6(const struct rte_flow_item *item,
const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
const struct rte_flow_item_ipv6 supp_mask = {
.hdr = {
- .src_addr = { 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff },
- .dst_addr = { 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff },
+ .src_addr = { .a =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
+ .dst_addr = { .a =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
.proto = 0xff,
}
};
@@ -618,28 +618,28 @@ sfc_flow_parse_ipv6(const struct rte_flow_item *item,
* IPv6 addresses are in big-endian byte order in item and in
* efx_spec
*/
- if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
+ if (memcmp(&mask->hdr.src_addr, &supp_mask.hdr.src_addr,
sizeof(mask->hdr.src_addr)) == 0) {
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
sizeof(spec->hdr.src_addr));
- rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
+ rte_memcpy(&efx_spec->efs_rem_host, &spec->hdr.src_addr,
sizeof(efx_spec->efs_rem_host));
- } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
+ } else if (!sfc_flow_is_zero(mask->hdr.src_addr.a,
sizeof(mask->hdr.src_addr))) {
goto fail_bad_mask;
}
- if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
+ if (memcmp(&mask->hdr.dst_addr, &supp_mask.hdr.dst_addr,
sizeof(mask->hdr.dst_addr)) == 0) {
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
sizeof(spec->hdr.dst_addr));
- rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
+ rte_memcpy(&efx_spec->efs_loc_host, &spec->hdr.dst_addr,
sizeof(efx_spec->efs_loc_host));
- } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
+ } else if (!sfc_flow_is_zero(mask->hdr.dst_addr.a,
sizeof(mask->hdr.dst_addr))) {
goto fail_bad_mask;
}
@@ -209,11 +209,11 @@ static const struct tap_flow_items tap_flow_items[] = {
RTE_FLOW_ITEM_TYPE_TCP),
.mask = &(const struct rte_flow_item_ipv6){
.hdr = {
- .src_addr = {
+ .src_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
},
- .dst_addr = {
+ .dst_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
},
@@ -617,13 +617,13 @@ tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
info->eth_type = htons(ETH_P_IPV6);
if (!spec)
return 0;
- if (memcmp(mask->hdr.dst_addr, empty_addr, 16)) {
+ if (memcmp(&mask->hdr.dst_addr, empty_addr, 16)) {
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
}
- if (memcmp(mask->hdr.src_addr, empty_addr, 16)) {
+ if (memcmp(&mask->hdr.src_addr, empty_addr, 16)) {
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
@@ -1807,9 +1807,9 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
/* check src addr mask */
for (j = 0; j < 16; j++) {
- if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
+ if (ipv6_mask->hdr.src_addr.a[j] == UINT8_MAX) {
rule->mask.src_ipv6_mask |= 1 << j;
- } else if (ipv6_mask->hdr.src_addr[j] != 0) {
+ } else if (ipv6_mask->hdr.src_addr.a[j] != 0) {
memset(rule, 0, sizeof(struct txgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1820,9 +1820,9 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
/* check dst addr mask */
for (j = 0; j < 16; j++) {
- if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
+ if (ipv6_mask->hdr.dst_addr.a[j] == UINT8_MAX) {
rule->mask.dst_ipv6_mask |= 1 << j;
- } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
+ } else if (ipv6_mask->hdr.dst_addr.a[j] != 0) {
memset(rule, 0, sizeof(struct txgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1835,9 +1835,9 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
rule->b_spec = TRUE;
ipv6_spec = item->spec;
rte_memcpy(rule->input.src_ip,
- ipv6_spec->hdr.src_addr, 16);
+ &ipv6_spec->hdr.src_addr, 16);
rte_memcpy(rule->input.dst_ip,
- ipv6_spec->hdr.dst_addr, 16);
+ &ipv6_spec->hdr.dst_addr, 16);
}
/**
@@ -659,9 +659,9 @@ txgbe_crypto_add_ingress_sa_from_flow(const void *sess,
ic_session->src_ip.type = IPv6;
ic_session->dst_ip.type = IPv6;
rte_memcpy(ic_session->src_ip.ipv6,
- ipv6->hdr.src_addr, 16);
+ &ipv6->hdr.src_addr, 16);
rte_memcpy(ic_session->dst_ip.ipv6,
- ipv6->hdr.dst_addr, 16);
+ &ipv6->hdr.dst_addr, 16);
} else {
const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
ic_session->src_ip.type = IPv4;
@@ -311,7 +311,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv6_hdr *);
/* Find destination port */
- if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
+ if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr.a,
&next_hop) == 0 &&
(enabled_port_mask & 1 << next_hop) != 0) {
port_out = next_hop;
@@ -638,7 +638,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
.size = sizeof(uint32_t),
.field_index = 1,
.input_index = 1,
- .offset = offsetof(struct rte_ipv6_hdr, src_addr[0]),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr.a[0]),
},
[2] = {
@@ -646,7 +646,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
.size = sizeof(uint32_t),
.field_index = 2,
.input_index = 2,
- .offset = offsetof(struct rte_ipv6_hdr, src_addr[4]),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr.a[4]),
},
[3] = {
@@ -654,7 +654,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
.size = sizeof(uint32_t),
.field_index = 3,
.input_index = 3,
- .offset = offsetof(struct rte_ipv6_hdr, src_addr[8]),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr.a[8]),
},
[4] = {
@@ -662,7 +662,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
.size = sizeof(uint32_t),
.field_index = 4,
.input_index = 4,
- .offset = offsetof(struct rte_ipv6_hdr, src_addr[12]),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr.a[12]),
},
/* Destination IP address (IPv6) */
@@ -671,7 +671,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
.size = sizeof(uint32_t),
.field_index = 5,
.input_index = 5,
- .offset = offsetof(struct rte_ipv6_hdr, dst_addr[0]),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[0]),
},
[6] = {
@@ -679,7 +679,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
.size = sizeof(uint32_t),
.field_index = 6,
.input_index = 6,
- .offset = offsetof(struct rte_ipv6_hdr, dst_addr[4]),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[4]),
},
[7] = {
@@ -687,7 +687,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
.size = sizeof(uint32_t),
.field_index = 7,
.input_index = 7,
- .offset = offsetof(struct rte_ipv6_hdr, dst_addr[8]),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[8]),
},
[8] = {
@@ -695,7 +695,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
.size = sizeof(uint32_t),
.field_index = 8,
.input_index = 8,
- .offset = offsetof(struct rte_ipv6_hdr, dst_addr[12]),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[12]),
},
/* Source Port */
@@ -400,7 +400,7 @@ reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
}
/* Find destination port */
- if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
+ if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr.a,
&next_hop) == 0 &&
(enabled_port_mask & 1 << next_hop) != 0) {
dst_port = next_hop;
@@ -85,7 +85,7 @@ ipv6_hdr_print(struct rte_ipv6_hdr *hdr)
{
uint8_t *addr;
- addr = hdr->src_addr;
+ addr = hdr->src_addr.a;
printf("src: %4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx \t",
(uint16_t)((addr[0] << 8) | addr[1]),
(uint16_t)((addr[2] << 8) | addr[3]),
@@ -96,7 +96,7 @@ ipv6_hdr_print(struct rte_ipv6_hdr *hdr)
(uint16_t)((addr[12] << 8) | addr[13]),
(uint16_t)((addr[14] << 8) | addr[15]));
- addr = hdr->dst_addr;
+ addr = hdr->dst_addr.a;
printf("dst: %4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx",
(uint16_t)((addr[0] << 8) | addr[1]),
(uint16_t)((addr[2] << 8) | addr[3]),
@@ -196,8 +196,8 @@ parse_flow_tokens(char **tokens, uint32_t n_tokens,
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
- if (ipv6_addr_cpy(rule->ipv6.spec.hdr.src_addr,
- rule->ipv6.mask.hdr.src_addr,
+ if (ipv6_addr_cpy(rule->ipv6.spec.hdr.src_addr.a,
+ rule->ipv6.mask.hdr.src_addr.a,
tokens[ti], status))
return;
}
@@ -205,8 +205,8 @@ parse_flow_tokens(char **tokens, uint32_t n_tokens,
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
- if (ipv6_addr_cpy(rule->ipv6.spec.hdr.dst_addr,
- rule->ipv6.mask.hdr.dst_addr,
+ if (ipv6_addr_cpy(rule->ipv6.spec.hdr.dst_addr.a,
+ rule->ipv6.mask.hdr.dst_addr.a,
tokens[ti], status))
return;
}
@@ -529,9 +529,9 @@ create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
sa->pattern[1].spec = &sa->ipv6_spec;
- memcpy(sa->ipv6_spec.hdr.dst_addr,
+ memcpy(&sa->ipv6_spec.hdr.dst_addr,
sa->dst.ip.ip6.ip6_b, 16);
- memcpy(sa->ipv6_spec.hdr.src_addr,
+ memcpy(&sa->ipv6_spec.hdr.src_addr,
sa->src.ip.ip6.ip6_b, 16);
} else if (IS_IP4(sa->flags)) {
sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
@@ -735,9 +735,9 @@ create_ipsec_esp_flow(struct ipsec_sa *sa)
sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
sa->pattern[1].spec = &sa->ipv6_spec;
- memcpy(sa->ipv6_spec.hdr.dst_addr,
+ memcpy(&sa->ipv6_spec.hdr.dst_addr,
sa->dst.ip.ip6.ip6_b, sizeof(sa->dst.ip.ip6.ip6_b));
- memcpy(sa->ipv6_spec.hdr.src_addr,
+ memcpy(&sa->ipv6_spec.hdr.src_addr,
sa->src.ip.ip6.ip6_b, sizeof(sa->src.ip.ip6.ip6_b));
sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
sa->pattern[2].spec = &sa->esp_spec;
@@ -1571,8 +1571,8 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size,
};
if (IS_IP6_TUNNEL(lsa->flags)) {
- memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
- memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
+ memcpy(&v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
+ memcpy(&v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
}
rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
@@ -5,6 +5,8 @@
#ifndef __SAD_H__
#define __SAD_H__
+#include <rte_ip.h>
+#include <rte_ip6.h>
#include <rte_ipsec_sad.h>
#define SA_CACHE_SZ 128
@@ -37,8 +39,8 @@ cmp_sa_key(struct ipsec_sa *sa, int is_v4, struct rte_ipv4_hdr *ipv4,
(sa->dst.ip.ip4 == ipv4->dst_addr)) ||
/* IPv6 check */
(!is_v4 && (sa_type == IP6_TUNNEL) &&
- (!memcmp(sa->src.ip.ip6.ip6, ipv6->src_addr, 16)) &&
- (!memcmp(sa->dst.ip.ip6.ip6, ipv6->dst_addr, 16))))
+ (!memcmp(sa->src.ip.ip6.ip6, &ipv6->src_addr, 16)) &&
+ (!memcmp(sa->dst.ip.ip6.ip6, &ipv6->dst_addr, 16))))
return 1;
return 0;
@@ -128,9 +130,9 @@ sad_lookup(struct ipsec_sad *sad, struct rte_mbuf *pkts[],
}
}
v6[nb_v6].spi = esp->spi;
- memcpy(v6[nb_v6].dip, ipv6->dst_addr,
+ memcpy(v6[nb_v6].dip, &ipv6->dst_addr,
sizeof(ipv6->dst_addr));
- memcpy(v6[nb_v6].sip, ipv6->src_addr,
+ memcpy(v6[nb_v6].sip, &ipv6->src_addr,
sizeof(ipv6->src_addr));
keys_v6[nb_v6] = (const union rte_ipsec_sad_key *)
&v6[nb_v6];
@@ -65,7 +65,7 @@ fib_parse_packet(struct rte_mbuf *mbuf,
/* IPv6 */
else {
ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
- rte_mov16(ipv6, (const uint8_t *)ipv6_hdr->dst_addr);
+ rte_mov16(ipv6, ipv6_hdr->dst_addr.a);
*ip_type = 0;
(*ipv6_cnt)++;
}
@@ -62,7 +62,7 @@ lpm_get_ipv6_dst_port(const struct rte_ipv6_hdr *ipv6_hdr,
uint16_t portid,
struct rte_lpm6 *ipv6_l3fwd_lookup_struct)
{
- const uint8_t *dst_ip = ipv6_hdr->dst_addr;
+ const uint8_t *dst_ip = ipv6_hdr->dst_addr.a;
uint32_t next_hop;
if (rte_lpm6_lookup(ipv6_l3fwd_lookup_struct, dst_ip, &next_hop) == 0)
@@ -122,7 +122,7 @@ lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
- ipv6_hdr->dst_addr, &next_hop) == 0)
+ ipv6_hdr->dst_addr.a, &next_hop) == 0)
? next_hop : portid);
}
@@ -1006,12 +1006,14 @@ struct rte_flow_item_ipv6 {
#ifndef __cplusplus
static const struct rte_flow_item_ipv6 rte_flow_item_ipv6_mask = {
.hdr = {
- .src_addr =
+ .src_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
- .dst_addr =
+ },
+ .dst_addr = { .a =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
},
};
#endif
@@ -140,24 +140,24 @@ rte_thash_load_v6_addrs(const struct rte_ipv6_hdr *orig,
union rte_thash_tuple *targ)
{
#ifdef RTE_ARCH_X86
- __m128i ipv6 = _mm_loadu_si128((const __m128i *)orig->src_addr);
+ __m128i ipv6 = _mm_loadu_si128((const __m128i *)&orig->src_addr);
*(__m128i *)targ->v6.src_addr =
_mm_shuffle_epi8(ipv6, rte_thash_ipv6_bswap_mask);
- ipv6 = _mm_loadu_si128((const __m128i *)orig->dst_addr);
+ ipv6 = _mm_loadu_si128((const __m128i *)&orig->dst_addr);
*(__m128i *)targ->v6.dst_addr =
_mm_shuffle_epi8(ipv6, rte_thash_ipv6_bswap_mask);
#elif defined(__ARM_NEON)
- uint8x16_t ipv6 = vld1q_u8((uint8_t const *)orig->src_addr);
+ uint8x16_t ipv6 = vld1q_u8((uint8_t const *)&orig->src_addr);
vst1q_u8((uint8_t *)targ->v6.src_addr, vrev32q_u8(ipv6));
- ipv6 = vld1q_u8((uint8_t const *)orig->dst_addr);
+ ipv6 = vld1q_u8((uint8_t const *)&orig->dst_addr);
vst1q_u8((uint8_t *)targ->v6.dst_addr, vrev32q_u8(ipv6));
#else
int i;
for (i = 0; i < 4; i++) {
*((uint32_t *)targ->v6.src_addr + i) =
- rte_be_to_cpu_32(*((const uint32_t *)orig->src_addr + i));
+ rte_be_to_cpu_32(*((const uint32_t *)orig->src_addr.a + i));
*((uint32_t *)targ->v6.dst_addr + i) =
- rte_be_to_cpu_32(*((const uint32_t *)orig->dst_addr + i));
+ rte_be_to_cpu_32(*((const uint32_t *)orig->dst_addr.a + i));
}
#endif
}
@@ -143,8 +143,8 @@ rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
int32_t ip_len;
int32_t trim;
- rte_memcpy(&key.src_dst[0], ip_hdr->src_addr, 16);
- rte_memcpy(&key.src_dst[2], ip_hdr->dst_addr, 16);
+ rte_memcpy(&key.src_dst[0], &ip_hdr->src_addr, 16);
+ rte_memcpy(&key.src_dst[2], &ip_hdr->dst_addr, 16);
key.id = frag_hdr->id;
key.key_len = IPV6_KEYLEN;
@@ -53,8 +53,8 @@ struct rte_ipv6_hdr {
rte_be16_t payload_len; /**< IP payload size, including ext. headers */
uint8_t proto; /**< Protocol, next header. */
uint8_t hop_limits; /**< Hop limits. */
- uint8_t src_addr[16]; /**< IP address of source host. */
- uint8_t dst_addr[16]; /**< IP address of destination host(s). */
+ struct rte_ipv6_addr src_addr; /**< IP address of source host. */
+ struct rte_ipv6_addr dst_addr; /**< IP address of destination host(s). */
} __rte_packed;
/* IPv6 routing extension type definition. */
@@ -123,7 +123,7 @@ rte_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
psd_hdr.len = ipv6_hdr->payload_len;
}
- sum = __rte_raw_cksum(ipv6_hdr->src_addr,
+ sum = __rte_raw_cksum(&ipv6_hdr->src_addr,
sizeof(ipv6_hdr->src_addr) + sizeof(ipv6_hdr->dst_addr),
0);
sum = __rte_raw_cksum(&psd_hdr, sizeof(psd_hdr), sum);
@@ -112,28 +112,28 @@ ip6_lookup_node_process_scalar(struct rte_graph *graph, struct rte_node *node,
sizeof(struct rte_ether_hdr));
/* Extract hop_limits as ipv6 hdr is in cache */
node_mbuf_priv1(mbuf0, dyn)->ttl = ipv6_hdr->hop_limits;
- rte_memcpy(ip_batch[0], ipv6_hdr->dst_addr, 16);
+ rte_memcpy(ip_batch[0], &ipv6_hdr->dst_addr, 16);
/* Extract DIP of mbuf1 */
ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf1, struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
/* Extract hop_limits as ipv6 hdr is in cache */
node_mbuf_priv1(mbuf1, dyn)->ttl = ipv6_hdr->hop_limits;
- rte_memcpy(ip_batch[1], ipv6_hdr->dst_addr, 16);
+ rte_memcpy(ip_batch[1], &ipv6_hdr->dst_addr, 16);
/* Extract DIP of mbuf2 */
ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf2, struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
/* Extract hop_limits as ipv6 hdr is in cache */
node_mbuf_priv1(mbuf2, dyn)->ttl = ipv6_hdr->hop_limits;
- rte_memcpy(ip_batch[2], ipv6_hdr->dst_addr, 16);
+ rte_memcpy(ip_batch[2], &ipv6_hdr->dst_addr, 16);
/* Extract DIP of mbuf3 */
ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf3, struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
/* Extract hop_limits as ipv6 hdr is in cache */
node_mbuf_priv1(mbuf3, dyn)->ttl = ipv6_hdr->hop_limits;
- rte_memcpy(ip_batch[3], ipv6_hdr->dst_addr, 16);
+ rte_memcpy(ip_batch[3], &ipv6_hdr->dst_addr, 16);
rte_lpm6_lookup_bulk_func(lpm6, ip_batch, next_hop, 4);
@@ -223,7 +223,7 @@ ip6_lookup_node_process_scalar(struct rte_graph *graph, struct rte_node *node,
/* Extract TTL as IPv6 hdr is in cache */
node_mbuf_priv1(mbuf0, dyn)->ttl = ipv6_hdr->hop_limits;
- rc = rte_lpm6_lookup(lpm6, ipv6_hdr->dst_addr, &next_hop);
+ rc = rte_lpm6_lookup(lpm6, ipv6_hdr->dst_addr.a, &next_hop);
next_hop = (rc == 0) ? next_hop : drop_nh;
node_mbuf_priv1(mbuf0, dyn)->nh = (uint16_t)next_hop;
@@ -1387,13 +1387,11 @@ tunnel_ipv6_header_set(struct rte_ipv6_hdr *h, struct rte_swx_ipsec_sa_params *p
.payload_len = 0, /* Cannot be pre-computed. */
.proto = IPPROTO_ESP,
.hop_limits = 64,
- .src_addr = {0},
- .dst_addr = {0},
};
memcpy(h, &ipv6_hdr, sizeof(ipv6_hdr));
- memcpy(h->src_addr, p->encap.tunnel.ipv6.src_addr.s6_addr, 16);
- memcpy(h->dst_addr, p->encap.tunnel.ipv6.dst_addr.s6_addr, 16);
+ memcpy(&h->src_addr, p->encap.tunnel.ipv6.src_addr.s6_addr, 16);
+ memcpy(&h->dst_addr, p->encap.tunnel.ipv6.dst_addr.s6_addr, 16);
}
/* IPsec library SA parameters. */
@@ -872,10 +872,10 @@ encap_vxlan_apply(void *data,
d->ipv6.payload_len = 0; /* not pre-computed */
d->ipv6.proto = IP_PROTO_UDP;
d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
- memcpy(d->ipv6.src_addr,
+ memcpy(&d->ipv6.src_addr,
p->vxlan.ipv6.sa,
sizeof(p->vxlan.ipv6.sa));
- memcpy(d->ipv6.dst_addr,
+ memcpy(&d->ipv6.dst_addr,
p->vxlan.ipv6.da,
sizeof(p->vxlan.ipv6.da));
@@ -907,10 +907,10 @@ encap_vxlan_apply(void *data,
d->ipv6.payload_len = 0; /* not pre-computed */
d->ipv6.proto = IP_PROTO_UDP;
d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
- memcpy(d->ipv6.src_addr,
+ memcpy(&d->ipv6.src_addr,
p->vxlan.ipv6.sa,
sizeof(p->vxlan.ipv6.sa));
- memcpy(d->ipv6.dst_addr,
+ memcpy(&d->ipv6.dst_addr,
p->vxlan.ipv6.da,
sizeof(p->vxlan.ipv6.da));
@@ -1437,12 +1437,12 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
uint16_t tcp_cksum;
tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
- (uint16_t *)ip->src_addr,
+ (uint16_t *)&ip->src_addr,
(uint16_t *)data->addr,
tcp->src_port,
data->port);
- rte_memcpy(ip->src_addr, data->addr, 16);
+ rte_memcpy(&ip->src_addr, data->addr, 16);
tcp->src_port = data->port;
tcp->cksum = tcp_cksum;
} else {
@@ -1450,12 +1450,12 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
uint16_t udp_cksum;
udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
- (uint16_t *)ip->src_addr,
+ (uint16_t *)&ip->src_addr,
(uint16_t *)data->addr,
udp->src_port,
data->port);
- rte_memcpy(ip->src_addr, data->addr, 16);
+ rte_memcpy(&ip->src_addr, data->addr, 16);
udp->src_port = data->port;
udp->dgram_cksum = udp_cksum;
}
@@ -1465,12 +1465,12 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
uint16_t tcp_cksum;
tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
- (uint16_t *)ip->dst_addr,
+ (uint16_t *)&ip->dst_addr,
(uint16_t *)data->addr,
tcp->dst_port,
data->port);
- rte_memcpy(ip->dst_addr, data->addr, 16);
+ rte_memcpy(&ip->dst_addr, data->addr, 16);
tcp->dst_port = data->port;
tcp->cksum = tcp_cksum;
} else {
@@ -1478,12 +1478,12 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
uint16_t udp_cksum;
udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
- (uint16_t *)ip->dst_addr,
+ (uint16_t *)&ip->dst_addr.a,
(uint16_t *)data->addr,
udp->dst_port,
data->port);
- rte_memcpy(ip->dst_addr, data->addr, 16);
+ rte_memcpy(&ip->dst_addr, data->addr, 16);
udp->dst_port = data->port;
udp->dgram_cksum = udp_cksum;
}