@@ -37,10 +37,10 @@ add_vlan(struct rte_flow_item *items,
__rte_unused struct additional_para para)
{
static struct rte_flow_item_vlan vlan_spec = {
- .tci = RTE_BE16(VLAN_VALUE),
+ .hdr.vlan_tci = RTE_BE16(VLAN_VALUE),
};
static struct rte_flow_item_vlan vlan_mask = {
- .tci = RTE_BE16(0xffff),
+ .hdr.vlan_tci = RTE_BE16(0xffff),
};
items[items_counter].type = RTE_FLOW_ITEM_TYPE_VLAN;
@@ -3599,19 +3599,19 @@ static const struct token token_list[] = {
.name = "dst",
.help = "destination MAC",
.next = NEXT(item_eth, NEXT_ENTRY(COMMON_MAC_ADDR), item_param),
- .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, hdr.dst_addr)),
},
[ITEM_ETH_SRC] = {
.name = "src",
.help = "source MAC",
.next = NEXT(item_eth, NEXT_ENTRY(COMMON_MAC_ADDR), item_param),
- .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, hdr.src_addr)),
},
[ITEM_ETH_TYPE] = {
.name = "type",
.help = "EtherType",
.next = NEXT(item_eth, NEXT_ENTRY(COMMON_UNSIGNED), item_param),
- .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, hdr.ether_type)),
},
[ITEM_ETH_HAS_VLAN] = {
.name = "has_vlan",
@@ -3632,7 +3632,7 @@ static const struct token token_list[] = {
.help = "tag control information",
.next = NEXT(item_vlan, NEXT_ENTRY(COMMON_UNSIGNED),
item_param),
- .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, hdr.vlan_tci)),
},
[ITEM_VLAN_PCP] = {
.name = "pcp",
@@ -3640,7 +3640,7 @@ static const struct token token_list[] = {
.next = NEXT(item_vlan, NEXT_ENTRY(COMMON_UNSIGNED),
item_param),
.args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
- tci, "\xe0\x00")),
+ hdr.vlan_tci, "\xe0\x00")),
},
[ITEM_VLAN_DEI] = {
.name = "dei",
@@ -3648,7 +3648,7 @@ static const struct token token_list[] = {
.next = NEXT(item_vlan, NEXT_ENTRY(COMMON_UNSIGNED),
item_param),
.args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
- tci, "\x10\x00")),
+ hdr.vlan_tci, "\x10\x00")),
},
[ITEM_VLAN_VID] = {
.name = "vid",
@@ -3656,7 +3656,7 @@ static const struct token token_list[] = {
.next = NEXT(item_vlan, NEXT_ENTRY(COMMON_UNSIGNED),
item_param),
.args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
- tci, "\x0f\xff")),
+ hdr.vlan_tci, "\x0f\xff")),
},
[ITEM_VLAN_INNER_TYPE] = {
.name = "inner_type",
@@ -3664,7 +3664,7 @@ static const struct token token_list[] = {
.next = NEXT(item_vlan, NEXT_ENTRY(COMMON_UNSIGNED),
item_param),
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
- inner_type)),
+ hdr.eth_proto)),
},
[ITEM_VLAN_HAS_MORE_VLAN] = {
.name = "has_more_vlan",
@@ -7402,10 +7402,10 @@ parse_setup_vxlan_encap_data(struct action_vxlan_encap_data *action_vxlan_encap_
.type = RTE_FLOW_ITEM_TYPE_END,
},
},
- .item_eth.type = 0,
+ .item_eth.hdr.ether_type = 0,
.item_vlan = {
- .tci = vxlan_encap_conf.vlan_tci,
- .inner_type = 0,
+ .hdr.vlan_tci = vxlan_encap_conf.vlan_tci,
+ .hdr.eth_proto = 0,
},
.item_ipv4.hdr = {
.src_addr = vxlan_encap_conf.ipv4_src,
@@ -7417,9 +7417,9 @@ parse_setup_vxlan_encap_data(struct action_vxlan_encap_data *action_vxlan_encap_
},
.item_vxlan.flags = 0,
};
- memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
+ memcpy(action_vxlan_encap_data->item_eth.hdr.dst_addr.addr_bytes,
vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
- memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
+ memcpy(action_vxlan_encap_data->item_eth.hdr.src_addr.addr_bytes,
vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
if (!vxlan_encap_conf.select_ipv4) {
memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
@@ -7537,10 +7537,10 @@ parse_setup_nvgre_encap_data(struct action_nvgre_encap_data *action_nvgre_encap_
.type = RTE_FLOW_ITEM_TYPE_END,
},
},
- .item_eth.type = 0,
+ .item_eth.hdr.ether_type = 0,
.item_vlan = {
- .tci = nvgre_encap_conf.vlan_tci,
- .inner_type = 0,
+ .hdr.vlan_tci = nvgre_encap_conf.vlan_tci,
+ .hdr.eth_proto = 0,
},
.item_ipv4.hdr = {
.src_addr = nvgre_encap_conf.ipv4_src,
@@ -7550,9 +7550,9 @@ parse_setup_nvgre_encap_data(struct action_nvgre_encap_data *action_nvgre_encap_
.item_nvgre.protocol = RTE_BE16(RTE_ETHER_TYPE_TEB),
.item_nvgre.flow_id = 0,
};
- memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
+ memcpy(action_nvgre_encap_data->item_eth.hdr.dst_addr.addr_bytes,
nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
- memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
+ memcpy(action_nvgre_encap_data->item_eth.hdr.src_addr.addr_bytes,
nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
if (!nvgre_encap_conf.select_ipv4) {
memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
@@ -7613,10 +7613,10 @@ parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
struct buffer *out = buf;
struct rte_flow_action *action;
struct action_raw_encap_data *action_encap_data;
- struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_eth eth = { .hdr.ether_type = 0, };
struct rte_flow_item_vlan vlan = {
- .tci = mplsoudp_encap_conf.vlan_tci,
- .inner_type = 0,
+ .hdr.vlan_tci = mplsoudp_encap_conf.vlan_tci,
+ .hdr.eth_proto = 0,
};
uint8_t *header;
int ret;
@@ -7643,22 +7643,22 @@ parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
};
header = action_encap_data->data;
if (l2_encap_conf.select_vlan)
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
else if (l2_encap_conf.select_ipv4)
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
else
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- memcpy(eth.dst.addr_bytes,
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ memcpy(eth.hdr.dst_addr.addr_bytes,
l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
- memcpy(eth.src.addr_bytes,
+ memcpy(eth.hdr.src_addr.addr_bytes,
l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
memcpy(header, ð, sizeof(eth));
header += sizeof(eth);
if (l2_encap_conf.select_vlan) {
if (l2_encap_conf.select_ipv4)
- vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
else
- vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
memcpy(header, &vlan, sizeof(vlan));
header += sizeof(vlan);
}
@@ -7677,10 +7677,10 @@ parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
struct buffer *out = buf;
struct rte_flow_action *action;
struct action_raw_decap_data *action_decap_data;
- struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_eth eth = { .hdr.ether_type = 0, };
struct rte_flow_item_vlan vlan = {
- .tci = mplsoudp_encap_conf.vlan_tci,
- .inner_type = 0,
+ .hdr.vlan_tci = mplsoudp_encap_conf.vlan_tci,
+ .hdr.eth_proto = 0,
};
uint8_t *header;
int ret;
@@ -7707,7 +7707,7 @@ parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
};
header = action_decap_data->data;
if (l2_decap_conf.select_vlan)
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
memcpy(header, ð, sizeof(eth));
header += sizeof(eth);
if (l2_decap_conf.select_vlan) {
@@ -7731,10 +7731,10 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
struct buffer *out = buf;
struct rte_flow_action *action;
struct action_raw_encap_data *action_encap_data;
- struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_eth eth = { .hdr.ether_type = 0, };
struct rte_flow_item_vlan vlan = {
- .tci = mplsogre_encap_conf.vlan_tci,
- .inner_type = 0,
+ .hdr.vlan_tci = mplsogre_encap_conf.vlan_tci,
+ .hdr.eth_proto = 0,
};
struct rte_flow_item_ipv4 ipv4 = {
.hdr = {
@@ -7783,22 +7783,22 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
};
header = action_encap_data->data;
if (mplsogre_encap_conf.select_vlan)
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
else if (mplsogre_encap_conf.select_ipv4)
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
else
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- memcpy(eth.dst.addr_bytes,
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ memcpy(eth.hdr.dst_addr.addr_bytes,
mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
- memcpy(eth.src.addr_bytes,
+ memcpy(eth.hdr.src_addr.addr_bytes,
mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
memcpy(header, ð, sizeof(eth));
header += sizeof(eth);
if (mplsogre_encap_conf.select_vlan) {
if (mplsogre_encap_conf.select_ipv4)
- vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
else
- vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
memcpy(header, &vlan, sizeof(vlan));
header += sizeof(vlan);
}
@@ -7837,8 +7837,8 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
struct buffer *out = buf;
struct rte_flow_action *action;
struct action_raw_decap_data *action_decap_data;
- struct rte_flow_item_eth eth = { .type = 0, };
- struct rte_flow_item_vlan vlan = {.tci = 0};
+ struct rte_flow_item_eth eth = { .hdr.ether_type = 0, };
+ struct rte_flow_item_vlan vlan = {.hdr.vlan_tci = 0};
struct rte_flow_item_ipv4 ipv4 = {
.hdr = {
.next_proto_id = IPPROTO_GRE,
@@ -7878,22 +7878,22 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
};
header = action_decap_data->data;
if (mplsogre_decap_conf.select_vlan)
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
else if (mplsogre_encap_conf.select_ipv4)
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
else
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- memcpy(eth.dst.addr_bytes,
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ memcpy(eth.hdr.dst_addr.addr_bytes,
mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
- memcpy(eth.src.addr_bytes,
+ memcpy(eth.hdr.src_addr.addr_bytes,
mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
memcpy(header, ð, sizeof(eth));
header += sizeof(eth);
if (mplsogre_encap_conf.select_vlan) {
if (mplsogre_encap_conf.select_ipv4)
- vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
else
- vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
memcpy(header, &vlan, sizeof(vlan));
header += sizeof(vlan);
}
@@ -7924,10 +7924,10 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
struct buffer *out = buf;
struct rte_flow_action *action;
struct action_raw_encap_data *action_encap_data;
- struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_eth eth = { .hdr.ether_type = 0, };
struct rte_flow_item_vlan vlan = {
- .tci = mplsoudp_encap_conf.vlan_tci,
- .inner_type = 0,
+ .hdr.vlan_tci = mplsoudp_encap_conf.vlan_tci,
+ .hdr.eth_proto = 0,
};
struct rte_flow_item_ipv4 ipv4 = {
.hdr = {
@@ -7977,22 +7977,22 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
};
header = action_encap_data->data;
if (mplsoudp_encap_conf.select_vlan)
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
else if (mplsoudp_encap_conf.select_ipv4)
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
else
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- memcpy(eth.dst.addr_bytes,
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ memcpy(eth.hdr.dst_addr.addr_bytes,
mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
- memcpy(eth.src.addr_bytes,
+ memcpy(eth.hdr.src_addr.addr_bytes,
mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
memcpy(header, ð, sizeof(eth));
header += sizeof(eth);
if (mplsoudp_encap_conf.select_vlan) {
if (mplsoudp_encap_conf.select_ipv4)
- vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
else
- vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
memcpy(header, &vlan, sizeof(vlan));
header += sizeof(vlan);
}
@@ -8031,8 +8031,8 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
struct buffer *out = buf;
struct rte_flow_action *action;
struct action_raw_decap_data *action_decap_data;
- struct rte_flow_item_eth eth = { .type = 0, };
- struct rte_flow_item_vlan vlan = {.tci = 0};
+ struct rte_flow_item_eth eth = { .hdr.ether_type = 0, };
+ struct rte_flow_item_vlan vlan = {.hdr.vlan_tci = 0};
struct rte_flow_item_ipv4 ipv4 = {
.hdr = {
.next_proto_id = IPPROTO_UDP,
@@ -8074,22 +8074,22 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
};
header = action_decap_data->data;
if (mplsoudp_decap_conf.select_vlan)
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
else if (mplsoudp_encap_conf.select_ipv4)
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
else
- eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- memcpy(eth.dst.addr_bytes,
+ eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ memcpy(eth.hdr.dst_addr.addr_bytes,
mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
- memcpy(eth.src.addr_bytes,
+ memcpy(eth.hdr.src_addr.addr_bytes,
mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
memcpy(header, ð, sizeof(eth));
header += sizeof(eth);
if (mplsoudp_encap_conf.select_vlan) {
if (mplsoudp_encap_conf.select_ipv4)
- vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
else
- vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
memcpy(header, &vlan, sizeof(vlan));
header += sizeof(vlan);
}
@@ -840,9 +840,7 @@ instead of using the ``type`` field.
If the ``type`` and ``has_vlan`` fields are not specified, then both tagged
and untagged packets will match the pattern.
-- ``dst``: destination MAC.
-- ``src``: source MAC.
-- ``type``: EtherType or TPID.
+- ``hdr``: header definition (``rte_ether.h``).
- ``has_vlan``: packet header contains at least one VLAN.
- Default ``mask`` matches destination and source addresses only.
@@ -861,8 +859,7 @@ instead of using the ``inner_type field``.
If the ``inner_type`` and ``has_more_vlan`` fields are not specified,
then any tagged packets will match the pattern.
-- ``tci``: tag control information.
-- ``inner_type``: inner EtherType or TPID.
+- ``hdr``: header definition (``rte_ether.h``).
- ``has_more_vlan``: packet header contains at least one more VLAN, after this VLAN.
- Default ``mask`` matches the VID part of TCI only (lower 12 bits).
@@ -58,14 +58,36 @@ Deprecation Notices
to using the general ``rte_flow_modify_field`` action.
* ethdev: The flow API matching pattern structures, ``struct rte_flow_item_*``,
- should start with relevant protocol header.
- Some matching pattern structures implements this by duplicating protocol header
- fields in the struct. To clarify the intention and to be sure protocol header
- is intact, will replace those fields with relevant protocol header struct.
- In v21.02 both individual protocol header fields and the protocol header struct
- will be added as union, target is switch usage to the protocol header by time.
- In v21.11 LTS, protocol header fields will be cleaned and only protocol header
- struct will remain.
+ should start with relevant protocol header structure from lib/net/.
+ The individual protocol header fields and the protocol header struct
+ may be kept together in an union as a first migration step.
+ In future (target is DPDK 23.11), the protocol header fields will be cleaned
+ and only protocol header struct will remain.
+
+ These items are not compliant (not including struct from lib/net/):
+ - ``rte_flow_item_ah``
+ - ``rte_flow_item_arp_eth_ipv4``
+ - ``rte_flow_item_e_tag``
+ - ``rte_flow_item_geneve``
+ - ``rte_flow_item_geneve_opt``
+ - ``rte_flow_item_gre``
+ - ``rte_flow_item_gtp``
+ - ``rte_flow_item_icmp6``
+ - ``rte_flow_item_icmp6_nd_na``
+ - ``rte_flow_item_icmp6_nd_ns``
+ - ``rte_flow_item_icmp6_nd_opt``
+ - ``rte_flow_item_icmp6_nd_opt_sla_eth``
+ - ``rte_flow_item_icmp6_nd_opt_tla_eth``
+ - ``rte_flow_item_igmp``
+ - ``rte_flow_item_ipv6_ext``
+ - ``rte_flow_item_l2tpv3oip``
+ - ``rte_flow_item_mpls``
+ - ``rte_flow_item_nsh``
+ - ``rte_flow_item_nvgre``
+ - ``rte_flow_item_pfcp``
+ - ``rte_flow_item_pppoe``
+ - ``rte_flow_item_pppoe_proto_id``
+ - ``rte_flow_item_vxlan_gpe``
* ethdev: Queue specific stats fields will be removed from ``struct rte_eth_stats``.
Mentioned fields are: ``q_ipackets``, ``q_opackets``, ``q_ibytes``, ``q_obytes``,
@@ -199,10 +199,10 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
* Destination MAC address mask must not be partially
* set. Should be all 1's or all 0's.
*/
- if ((!rte_is_zero_ether_addr(ð_mask->src) &&
- !rte_is_broadcast_ether_addr(ð_mask->src)) ||
- (!rte_is_zero_ether_addr(ð_mask->dst) &&
- !rte_is_broadcast_ether_addr(ð_mask->dst))) {
+ if ((!rte_is_zero_ether_addr(ð_mask->hdr.src_addr) &&
+ !rte_is_broadcast_ether_addr(ð_mask->hdr.src_addr)) ||
+ (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr) &&
+ !rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr))) {
rte_flow_error_set(error,
EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -212,8 +212,8 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
}
/* Mask is not allowed. Only exact matches are */
- if (eth_mask->type &&
- eth_mask->type != RTE_BE16(0xffff)) {
+ if (eth_mask->hdr.ether_type &&
+ eth_mask->hdr.ether_type != RTE_BE16(0xffff)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -221,8 +221,8 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
return -rte_errno;
}
- if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
- dst = ð_spec->dst;
+ if (rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr)) {
+ dst = ð_spec->hdr.dst_addr;
if (!rte_is_valid_assigned_ether_addr(dst)) {
rte_flow_error_set(error,
EINVAL,
@@ -234,7 +234,7 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
return -rte_errno;
}
rte_memcpy(filter->dst_macaddr,
- ð_spec->dst, RTE_ETHER_ADDR_LEN);
+ ð_spec->hdr.dst_addr, RTE_ETHER_ADDR_LEN);
en |= use_ntuple ?
NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
@@ -245,8 +245,8 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
PMD_DRV_LOG(DEBUG,
"Creating a priority flow\n");
}
- if (rte_is_broadcast_ether_addr(ð_mask->src)) {
- src = ð_spec->src;
+ if (rte_is_broadcast_ether_addr(ð_mask->hdr.src_addr)) {
+ src = ð_spec->hdr.src_addr;
if (!rte_is_valid_assigned_ether_addr(src)) {
rte_flow_error_set(error,
EINVAL,
@@ -258,7 +258,7 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
return -rte_errno;
}
rte_memcpy(filter->src_macaddr,
- ð_spec->src, RTE_ETHER_ADDR_LEN);
+ ð_spec->hdr.src_addr, RTE_ETHER_ADDR_LEN);
en |= use_ntuple ?
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
@@ -270,9 +270,9 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
* PMD_DRV_LOG(ERR, "Handle this condition\n");
* }
*/
- if (eth_mask->type) {
+ if (eth_mask->hdr.ether_type) {
filter->ethertype =
- rte_be_to_cpu_16(eth_spec->type);
+ rte_be_to_cpu_16(eth_spec->hdr.ether_type);
en |= en_ethertype;
}
if (inner)
@@ -295,11 +295,11 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
" supported");
return -rte_errno;
}
- if (vlan_mask->tci &&
- vlan_mask->tci == RTE_BE16(0x0fff)) {
+ if (vlan_mask->hdr.vlan_tci &&
+ vlan_mask->hdr.vlan_tci == RTE_BE16(0x0fff)) {
/* Only the VLAN ID can be matched. */
filter->l2_ovlan =
- rte_be_to_cpu_16(vlan_spec->tci &
+ rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci &
RTE_BE16(0x0fff));
en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
} else {
@@ -310,8 +310,8 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
"VLAN mask is invalid");
return -rte_errno;
}
- if (vlan_mask->inner_type &&
- vlan_mask->inner_type != RTE_BE16(0xffff)) {
+ if (vlan_mask->hdr.eth_proto &&
+ vlan_mask->hdr.eth_proto != RTE_BE16(0xffff)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -319,9 +319,9 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
" valid");
return -rte_errno;
}
- if (vlan_mask->inner_type) {
+ if (vlan_mask->hdr.eth_proto) {
filter->ethertype =
- rte_be_to_cpu_16(vlan_spec->inner_type);
+ rte_be_to_cpu_16(vlan_spec->hdr.eth_proto);
en |= en_ethertype;
}
@@ -627,13 +627,13 @@ ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
/* Perform validations */
if (eth_spec) {
/* Todo: work around to avoid multicast and broadcast addr */
- if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
+ if (ulp_rte_parser_is_bcmc_addr(ð_spec->hdr.dst_addr))
return BNXT_TF_RC_PARSE_ERR;
- if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
+ if (ulp_rte_parser_is_bcmc_addr(ð_spec->hdr.src_addr))
return BNXT_TF_RC_PARSE_ERR;
- eth_type = eth_spec->type;
+ eth_type = eth_spec->hdr.ether_type;
}
if (ulp_rte_prsr_fld_size_validate(params, &idx,
@@ -646,22 +646,22 @@ ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
* header fields
*/
dmac_idx = idx;
- size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
+ size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.dst_addr.addr_bytes);
ulp_rte_prsr_fld_mask(params, &idx, size,
- ulp_deference_struct(eth_spec, dst.addr_bytes),
- ulp_deference_struct(eth_mask, dst.addr_bytes),
+ ulp_deference_struct(eth_spec, hdr.dst_addr.addr_bytes),
+ ulp_deference_struct(eth_mask, hdr.dst_addr.addr_bytes),
ULP_PRSR_ACT_DEFAULT);
- size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes);
+ size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.src_addr.addr_bytes);
ulp_rte_prsr_fld_mask(params, &idx, size,
- ulp_deference_struct(eth_spec, src.addr_bytes),
- ulp_deference_struct(eth_mask, src.addr_bytes),
+ ulp_deference_struct(eth_spec, hdr.src_addr.addr_bytes),
+ ulp_deference_struct(eth_mask, hdr.src_addr.addr_bytes),
ULP_PRSR_ACT_DEFAULT);
- size = sizeof(((struct rte_flow_item_eth *)NULL)->type);
+ size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.ether_type);
ulp_rte_prsr_fld_mask(params, &idx, size,
- ulp_deference_struct(eth_spec, type),
- ulp_deference_struct(eth_mask, type),
+ ulp_deference_struct(eth_spec, hdr.ether_type),
+ ulp_deference_struct(eth_mask, hdr.ether_type),
ULP_PRSR_ACT_MATCH_IGNORE);
/* Update the protocol hdr bitmap */
@@ -706,15 +706,15 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
uint32_t size;
if (vlan_spec) {
- vlan_tag = ntohs(vlan_spec->tci);
+ vlan_tag = ntohs(vlan_spec->hdr.vlan_tci);
priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
vlan_tag &= ULP_VLAN_TAG_MASK;
vlan_tag = htons(vlan_tag);
- eth_type = vlan_spec->inner_type;
+ eth_type = vlan_spec->hdr.eth_proto;
}
if (vlan_mask) {
- vlan_tag_mask = ntohs(vlan_mask->tci);
+ vlan_tag_mask = ntohs(vlan_mask->hdr.vlan_tci);
priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
vlan_tag_mask &= 0xfff;
@@ -741,7 +741,7 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
* Copy the rte_flow_item for vlan into hdr_field using Vlan
* header fields
*/
- size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci);
+ size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.vlan_tci);
/*
* The priority field is ignored since OVS is setting it as
* wild card match and it is not supported. This is a work
@@ -757,10 +757,10 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
(vlan_mask) ? &vlan_tag_mask : NULL,
ULP_PRSR_ACT_DEFAULT);
- size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
+ size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.eth_proto);
ulp_rte_prsr_fld_mask(params, &idx, size,
- ulp_deference_struct(vlan_spec, inner_type),
- ulp_deference_struct(vlan_mask, inner_type),
+ ulp_deference_struct(vlan_spec, hdr.eth_proto),
+ ulp_deference_struct(vlan_mask, hdr.eth_proto),
ULP_PRSR_ACT_MATCH_IGNORE);
/* Get the outer tag and inner tag counts */
@@ -1673,14 +1673,14 @@ ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
uint32_t size;
field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
- size = sizeof(eth_spec->dst.addr_bytes);
- field = ulp_rte_parser_fld_copy(field, eth_spec->dst.addr_bytes, size);
+ size = sizeof(eth_spec->hdr.dst_addr.addr_bytes);
+ field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.dst_addr.addr_bytes, size);
- size = sizeof(eth_spec->src.addr_bytes);
- field = ulp_rte_parser_fld_copy(field, eth_spec->src.addr_bytes, size);
+ size = sizeof(eth_spec->hdr.src_addr.addr_bytes);
+ field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.src_addr.addr_bytes, size);
- size = sizeof(eth_spec->type);
- field = ulp_rte_parser_fld_copy(field, ð_spec->type, size);
+ size = sizeof(eth_spec->hdr.ether_type);
+ field = ulp_rte_parser_fld_copy(field, ð_spec->hdr.ether_type, size);
ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
}
@@ -1704,11 +1704,11 @@ ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
BNXT_ULP_HDR_BIT_OI_VLAN);
}
- size = sizeof(vlan_spec->tci);
- field = ulp_rte_parser_fld_copy(field, &vlan_spec->tci, size);
+ size = sizeof(vlan_spec->hdr.vlan_tci);
+ field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.vlan_tci, size);
- size = sizeof(vlan_spec->inner_type);
- field = ulp_rte_parser_fld_copy(field, &vlan_spec->inner_type, size);
+ size = sizeof(vlan_spec->hdr.eth_proto);
+ field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.eth_proto, size);
}
/* Function to handle the parsing of RTE Flow item ipv4 Header. */
@@ -122,15 +122,15 @@ is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
*/
static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
- .dst.addr_bytes = { 0 },
- .src.addr_bytes = { 0 },
- .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
+ .hdr.dst_addr.addr_bytes = { 0 },
+ .hdr.src_addr.addr_bytes = { 0 },
+ .hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
};
static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
- .dst.addr_bytes = { 0 },
- .src.addr_bytes = { 0 },
- .type = 0xFFFF,
+ .hdr.dst_addr.addr_bytes = { 0 },
+ .hdr.src_addr.addr_bytes = { 0 },
+ .hdr.ether_type = 0xFFFF,
};
static struct rte_flow_item flow_item_8023ad[] = {
@@ -188,22 +188,22 @@ ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
return 0;
/* we don't support SRC_MAC filtering*/
- if (!rte_is_zero_ether_addr(&spec->src) ||
- (umask && !rte_is_zero_ether_addr(&umask->src)))
+ if (!rte_is_zero_ether_addr(&spec->hdr.src_addr) ||
+ (umask && !rte_is_zero_ether_addr(&umask->hdr.src_addr)))
return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
item,
"src mac filtering not supported");
- if (!rte_is_zero_ether_addr(&spec->dst) ||
- (umask && !rte_is_zero_ether_addr(&umask->dst))) {
+ if (!rte_is_zero_ether_addr(&spec->hdr.dst_addr) ||
+ (umask && !rte_is_zero_ether_addr(&umask->hdr.dst_addr))) {
CXGBE_FILL_FS(0, 0x1ff, macidx);
- CXGBE_FILL_FS_MEMCPY(spec->dst.addr_bytes, mask->dst.addr_bytes,
+ CXGBE_FILL_FS_MEMCPY(spec->hdr.dst_addr.addr_bytes, mask->hdr.dst_addr.addr_bytes,
dmac);
}
- if (spec->type || (umask && umask->type))
- CXGBE_FILL_FS(be16_to_cpu(spec->type),
- be16_to_cpu(mask->type), ethtype);
+ if (spec->hdr.ether_type || (umask && umask->hdr.ether_type))
+ CXGBE_FILL_FS(be16_to_cpu(spec->hdr.ether_type),
+ be16_to_cpu(mask->hdr.ether_type), ethtype);
return 0;
}
@@ -239,26 +239,26 @@ ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {
CXGBE_FILL_FS(1, 1, ovlan_vld);
if (spec) {
- if (spec->tci || (umask && umask->tci))
- CXGBE_FILL_FS(be16_to_cpu(spec->tci),
- be16_to_cpu(mask->tci), ovlan);
+ if (spec->hdr.vlan_tci || (umask && umask->hdr.vlan_tci))
+ CXGBE_FILL_FS(be16_to_cpu(spec->hdr.vlan_tci),
+ be16_to_cpu(mask->hdr.vlan_tci), ovlan);
fs->mask.ethtype = 0;
fs->val.ethtype = 0;
}
} else {
CXGBE_FILL_FS(1, 1, ivlan_vld);
if (spec) {
- if (spec->tci || (umask && umask->tci))
- CXGBE_FILL_FS(be16_to_cpu(spec->tci),
- be16_to_cpu(mask->tci), ivlan);
+ if (spec->hdr.vlan_tci || (umask && umask->hdr.vlan_tci))
+ CXGBE_FILL_FS(be16_to_cpu(spec->hdr.vlan_tci),
+ be16_to_cpu(mask->hdr.vlan_tci), ivlan);
fs->mask.ethtype = 0;
fs->val.ethtype = 0;
}
}
- if (spec && (spec->inner_type || (umask && umask->inner_type)))
- CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
- be16_to_cpu(mask->inner_type), ethtype);
+ if (spec && (spec->hdr.eth_proto || (umask && umask->hdr.eth_proto)))
+ CXGBE_FILL_FS(be16_to_cpu(spec->hdr.eth_proto),
+ be16_to_cpu(mask->hdr.eth_proto), ethtype);
return 0;
}
@@ -889,17 +889,17 @@ static struct chrte_fparse parseitem[] = {
[RTE_FLOW_ITEM_TYPE_ETH] = {
.fptr = ch_rte_parsetype_eth,
.dmask = &(const struct rte_flow_item_eth){
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
- .type = 0xffff,
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .hdr.ether_type = 0xffff,
}
},
[RTE_FLOW_ITEM_TYPE_VLAN] = {
.fptr = ch_rte_parsetype_vlan,
.dmask = &(const struct rte_flow_item_vlan){
- .tci = 0xffff,
- .inner_type = 0xffff,
+ .hdr.vlan_tci = 0xffff,
+ .hdr.eth_proto = 0xffff,
}
},
@@ -100,13 +100,13 @@ enum rte_flow_action_type dpaa2_supported_fs_action_type[] = {
#ifndef __cplusplus
static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .type = RTE_BE16(0xffff),
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.src_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.ether_type = RTE_BE16(0xffff),
};
static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
- .tci = RTE_BE16(0xffff),
+ .hdr.vlan_tci = RTE_BE16(0xffff),
};
static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
@@ -966,7 +966,7 @@ dpaa2_configure_flow_eth(struct rte_flow *flow,
return -1;
}
- if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
+ if (memcmp((const char *)&mask->hdr.src_addr, zero_cmp, RTE_ETHER_ADDR_LEN)) {
index = dpaa2_flow_extract_search(
&priv->extract.qos_key_extract.dpkg,
NET_PROT_ETH, NH_FLD_ETH_SA);
@@ -1009,8 +1009,8 @@ dpaa2_configure_flow_eth(struct rte_flow *flow,
&flow->qos_rule,
NET_PROT_ETH,
NH_FLD_ETH_SA,
- &spec->src.addr_bytes,
- &mask->src.addr_bytes,
+ &spec->hdr.src_addr.addr_bytes,
+ &mask->hdr.src_addr.addr_bytes,
sizeof(struct rte_ether_addr));
if (ret) {
DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
@@ -1022,8 +1022,8 @@ dpaa2_configure_flow_eth(struct rte_flow *flow,
&flow->fs_rule,
NET_PROT_ETH,
NH_FLD_ETH_SA,
- &spec->src.addr_bytes,
- &mask->src.addr_bytes,
+ &spec->hdr.src_addr.addr_bytes,
+ &mask->hdr.src_addr.addr_bytes,
sizeof(struct rte_ether_addr));
if (ret) {
DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
@@ -1031,7 +1031,7 @@ dpaa2_configure_flow_eth(struct rte_flow *flow,
}
}
- if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
+ if (memcmp((const char *)&mask->hdr.dst_addr, zero_cmp, RTE_ETHER_ADDR_LEN)) {
index = dpaa2_flow_extract_search(
&priv->extract.qos_key_extract.dpkg,
NET_PROT_ETH, NH_FLD_ETH_DA);
@@ -1076,8 +1076,8 @@ dpaa2_configure_flow_eth(struct rte_flow *flow,
&flow->qos_rule,
NET_PROT_ETH,
NH_FLD_ETH_DA,
- &spec->dst.addr_bytes,
- &mask->dst.addr_bytes,
+ &spec->hdr.dst_addr.addr_bytes,
+ &mask->hdr.dst_addr.addr_bytes,
sizeof(struct rte_ether_addr));
if (ret) {
DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
@@ -1089,8 +1089,8 @@ dpaa2_configure_flow_eth(struct rte_flow *flow,
&flow->fs_rule,
NET_PROT_ETH,
NH_FLD_ETH_DA,
- &spec->dst.addr_bytes,
- &mask->dst.addr_bytes,
+ &spec->hdr.dst_addr.addr_bytes,
+ &mask->hdr.dst_addr.addr_bytes,
sizeof(struct rte_ether_addr));
if (ret) {
DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
@@ -1098,7 +1098,7 @@ dpaa2_configure_flow_eth(struct rte_flow *flow,
}
}
- if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
+ if (memcmp((const char *)&mask->hdr.ether_type, zero_cmp, sizeof(rte_be16_t))) {
index = dpaa2_flow_extract_search(
&priv->extract.qos_key_extract.dpkg,
NET_PROT_ETH, NH_FLD_ETH_TYPE);
@@ -1142,8 +1142,8 @@ dpaa2_configure_flow_eth(struct rte_flow *flow,
&flow->qos_rule,
NET_PROT_ETH,
NH_FLD_ETH_TYPE,
- &spec->type,
- &mask->type,
+ &spec->hdr.ether_type,
+ &mask->hdr.ether_type,
sizeof(rte_be16_t));
if (ret) {
DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
@@ -1155,8 +1155,8 @@ dpaa2_configure_flow_eth(struct rte_flow *flow,
&flow->fs_rule,
NET_PROT_ETH,
NH_FLD_ETH_TYPE,
- &spec->type,
- &mask->type,
+ &spec->hdr.ether_type,
+ &mask->hdr.ether_type,
sizeof(rte_be16_t));
if (ret) {
DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
@@ -1266,7 +1266,7 @@ dpaa2_configure_flow_vlan(struct rte_flow *flow,
return -1;
}
- if (!mask->tci)
+ if (!mask->hdr.vlan_tci)
return 0;
index = dpaa2_flow_extract_search(
@@ -1314,8 +1314,8 @@ dpaa2_configure_flow_vlan(struct rte_flow *flow,
&flow->qos_rule,
NET_PROT_VLAN,
NH_FLD_VLAN_TCI,
- &spec->tci,
- &mask->tci,
+ &spec->hdr.vlan_tci,
+ &mask->hdr.vlan_tci,
sizeof(rte_be16_t));
if (ret) {
DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
@@ -1327,8 +1327,8 @@ dpaa2_configure_flow_vlan(struct rte_flow *flow,
&flow->fs_rule,
NET_PROT_VLAN,
NH_FLD_VLAN_TCI,
- &spec->tci,
- &mask->tci,
+ &spec->hdr.vlan_tci,
+ &mask->hdr.vlan_tci,
sizeof(rte_be16_t));
if (ret) {
DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
@@ -150,7 +150,7 @@ rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id,
kg_cfg.num_extracts = 1;
spec = (const struct rte_flow_item_eth *)pattern[0]->spec;
- eth_type = rte_constant_bswap16(spec->type);
+ eth_type = rte_constant_bswap16(spec->hdr.ether_type);
memcpy((void *)key_iova, (const void *)ð_type,
sizeof(rte_be16_t));
memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t));
@@ -555,16 +555,16 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
* Mask bits of destination MAC address must be full
* of 1 or full of 0.
*/
- if (!rte_is_zero_ether_addr(ð_mask->src) ||
- (!rte_is_zero_ether_addr(ð_mask->dst) &&
- !rte_is_broadcast_ether_addr(ð_mask->dst))) {
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr) ||
+ (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr) &&
+ !rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr))) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ether address mask");
return -rte_errno;
}
- if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ if ((eth_mask->hdr.ether_type & UINT16_MAX) != UINT16_MAX) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ethertype mask");
@@ -574,13 +574,13 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
/* If mask bits of destination MAC address
* are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
*/
- if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
- filter->mac_addr = eth_spec->dst;
+ if (rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr)) {
+ filter->mac_addr = eth_spec->hdr.dst_addr;
filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
} else {
filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
}
- filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
/* Check if the next non-void item is END. */
index++;
@@ -656,17 +656,17 @@ enic_copy_item_eth_v2(struct copy_item_args *arg)
if (!mask)
mask = &rte_flow_item_eth_mask;
- memcpy(enic_spec.dst_addr.addr_bytes, spec->dst.addr_bytes,
+ memcpy(enic_spec.dst_addr.addr_bytes, spec->hdr.dst_addr.addr_bytes,
RTE_ETHER_ADDR_LEN);
- memcpy(enic_spec.src_addr.addr_bytes, spec->src.addr_bytes,
+ memcpy(enic_spec.src_addr.addr_bytes, spec->hdr.src_addr.addr_bytes,
RTE_ETHER_ADDR_LEN);
- memcpy(enic_mask.dst_addr.addr_bytes, mask->dst.addr_bytes,
+ memcpy(enic_mask.dst_addr.addr_bytes, mask->hdr.dst_addr.addr_bytes,
RTE_ETHER_ADDR_LEN);
- memcpy(enic_mask.src_addr.addr_bytes, mask->src.addr_bytes,
+ memcpy(enic_mask.src_addr.addr_bytes, mask->hdr.src_addr.addr_bytes,
RTE_ETHER_ADDR_LEN);
- enic_spec.ether_type = spec->type;
- enic_mask.ether_type = mask->type;
+ enic_spec.ether_type = spec->hdr.ether_type;
+ enic_mask.ether_type = mask->hdr.ether_type;
/* outer header */
memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
@@ -715,16 +715,16 @@ enic_copy_item_vlan_v2(struct copy_item_args *arg)
struct rte_vlan_hdr *vlan;
vlan = (struct rte_vlan_hdr *)(eth_mask + 1);
- vlan->eth_proto = mask->inner_type;
+ vlan->eth_proto = mask->hdr.eth_proto;
vlan = (struct rte_vlan_hdr *)(eth_val + 1);
- vlan->eth_proto = spec->inner_type;
+ vlan->eth_proto = spec->hdr.eth_proto;
} else {
- eth_mask->ether_type = mask->inner_type;
- eth_val->ether_type = spec->inner_type;
+ eth_mask->ether_type = mask->hdr.eth_proto;
+ eth_val->ether_type = spec->hdr.eth_proto;
}
/* For TCI, use the vlan mask/val fields (little endian). */
- gp->mask_vlan = rte_be_to_cpu_16(mask->tci);
- gp->val_vlan = rte_be_to_cpu_16(spec->tci);
+ gp->mask_vlan = rte_be_to_cpu_16(mask->hdr.vlan_tci);
+ gp->val_vlan = rte_be_to_cpu_16(spec->hdr.vlan_tci);
return 0;
}
@@ -462,10 +462,10 @@ enic_fm_copy_item_vlan(struct copy_item_args *arg)
eth_val = (void *)&fm_data->l2.eth;
/*
- * Outer TPID cannot be matched. If inner_type is 0, use what is
+ * Outer TPID cannot be matched. If protocol is 0, use what is
* in the eth header.
*/
- if (eth_mask->ether_type && mask->inner_type)
+ if (eth_mask->ether_type && mask->hdr.eth_proto)
return -ENOTSUP;
/*
@@ -473,14 +473,14 @@ enic_fm_copy_item_vlan(struct copy_item_args *arg)
* L2, regardless of vlan stripping settings. So, the inner type
* from vlan becomes the ether type of the eth header.
*/
- if (mask->inner_type) {
- eth_mask->ether_type = mask->inner_type;
- eth_val->ether_type = spec->inner_type;
+ if (mask->hdr.eth_proto) {
+ eth_mask->ether_type = mask->hdr.eth_proto;
+ eth_val->ether_type = spec->hdr.eth_proto;
}
fm_data->fk_header_select |= FKH_ETHER | FKH_QTAG;
fm_mask->fk_header_select |= FKH_ETHER | FKH_QTAG;
- fm_data->fk_vlan = rte_be_to_cpu_16(spec->tci);
- fm_mask->fk_vlan = rte_be_to_cpu_16(mask->tci);
+ fm_data->fk_vlan = rte_be_to_cpu_16(spec->hdr.vlan_tci);
+ fm_mask->fk_vlan = rte_be_to_cpu_16(mask->hdr.vlan_tci);
return 0;
}
@@ -1385,7 +1385,7 @@ enic_fm_copy_vxlan_encap(struct enic_flowman *fm,
ENICPMD_LOG(DEBUG, "vxlan-encap: vlan");
spec = item->spec;
- fm_op.encap.outer_vlan = rte_be_to_cpu_16(spec->tci);
+ fm_op.encap.outer_vlan = rte_be_to_cpu_16(spec->hdr.vlan_tci);
item++;
flow_item_skip_void(&item);
}
@@ -310,15 +310,15 @@ static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
* Mask bits of destination MAC address must be full
* of 1 or full of 0.
*/
- if (!rte_is_zero_ether_addr(ð_mask->src) ||
- (!rte_is_zero_ether_addr(ð_mask->dst) &&
- !rte_is_broadcast_ether_addr(ð_mask->dst))) {
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr) ||
+ (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr) &&
+ !rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr))) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ether address mask");
return -rte_errno;
}
- if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ if ((eth_mask->hdr.ether_type & UINT16_MAX) != UINT16_MAX) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ethertype mask");
return -rte_errno;
@@ -328,13 +328,13 @@ static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
* If mask bits of destination MAC address
* are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
*/
- if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
- filter->mac_addr = eth_spec->dst;
+ if (rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr)) {
+ filter->mac_addr = eth_spec->hdr.dst_addr;
filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
} else {
filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
}
- filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
/* Check if the next non-void item is END. */
item = next_no_void_pattern(pattern, item);
@@ -493,28 +493,28 @@ hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (item->mask) {
eth_mask = item->mask;
- if (eth_mask->type) {
+ if (eth_mask->hdr.ether_type) {
hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
rule->key_conf.mask.ether_type =
- rte_be_to_cpu_16(eth_mask->type);
+ rte_be_to_cpu_16(eth_mask->hdr.ether_type);
}
- if (!rte_is_zero_ether_addr(ð_mask->src)) {
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr)) {
hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
memcpy(rule->key_conf.mask.src_mac,
- eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
+ eth_mask->hdr.src_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
}
- if (!rte_is_zero_ether_addr(ð_mask->dst)) {
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr)) {
hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
memcpy(rule->key_conf.mask.dst_mac,
- eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
+ eth_mask->hdr.dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
}
}
eth_spec = item->spec;
- rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
- memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
+ rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
+ memcpy(rule->key_conf.spec.src_mac, eth_spec->hdr.src_addr.addr_bytes,
RTE_ETHER_ADDR_LEN);
- memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
+ memcpy(rule->key_conf.spec.dst_mac, eth_spec->hdr.dst_addr.addr_bytes,
RTE_ETHER_ADDR_LEN);
return 0;
}
@@ -538,17 +538,17 @@ hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (item->mask) {
vlan_mask = item->mask;
- if (vlan_mask->tci) {
+ if (vlan_mask->hdr.vlan_tci) {
if (rule->key_conf.vlan_num == 1) {
hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
1);
rule->key_conf.mask.vlan_tag1 =
- rte_be_to_cpu_16(vlan_mask->tci);
+ rte_be_to_cpu_16(vlan_mask->hdr.vlan_tci);
} else {
hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
1);
rule->key_conf.mask.vlan_tag2 =
- rte_be_to_cpu_16(vlan_mask->tci);
+ rte_be_to_cpu_16(vlan_mask->hdr.vlan_tci);
}
}
}
@@ -556,10 +556,10 @@ hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
vlan_spec = item->spec;
if (rule->key_conf.vlan_num == 1)
rule->key_conf.spec.vlan_tag1 =
- rte_be_to_cpu_16(vlan_spec->tci);
+ rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci);
else
rule->key_conf.spec.vlan_tag2 =
- rte_be_to_cpu_16(vlan_spec->tci);
+ rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci);
return 0;
}
@@ -1322,9 +1322,9 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
* Mask bits of destination MAC address must be full
* of 1 or full of 0.
*/
- if (!rte_is_zero_ether_addr(ð_mask->src) ||
- (!rte_is_zero_ether_addr(ð_mask->dst) &&
- !rte_is_broadcast_ether_addr(ð_mask->dst))) {
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr) ||
+ (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr) &&
+ !rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr))) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -1332,7 +1332,7 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
return -rte_errno;
}
- if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ if ((eth_mask->hdr.ether_type & UINT16_MAX) != UINT16_MAX) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -1343,13 +1343,13 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
/* If mask bits of destination MAC address
* are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
*/
- if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
- filter->mac_addr = eth_spec->dst;
+ if (rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr)) {
+ filter->mac_addr = eth_spec->hdr.dst_addr;
filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
} else {
filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
}
- filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
@@ -1662,25 +1662,25 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
}
if (eth_spec && eth_mask) {
- if (rte_is_broadcast_ether_addr(ð_mask->dst) &&
- rte_is_zero_ether_addr(ð_mask->src)) {
+ if (rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr) &&
+ rte_is_zero_ether_addr(ð_mask->hdr.src_addr)) {
filter->input.flow.l2_flow.dst =
- eth_spec->dst;
+ eth_spec->hdr.dst_addr;
input_set |= I40E_INSET_DMAC;
- } else if (rte_is_zero_ether_addr(ð_mask->dst) &&
- rte_is_broadcast_ether_addr(ð_mask->src)) {
+ } else if (rte_is_zero_ether_addr(ð_mask->hdr.dst_addr) &&
+ rte_is_broadcast_ether_addr(ð_mask->hdr.src_addr)) {
filter->input.flow.l2_flow.src =
- eth_spec->src;
+ eth_spec->hdr.src_addr;
input_set |= I40E_INSET_SMAC;
- } else if (rte_is_broadcast_ether_addr(ð_mask->dst) &&
- rte_is_broadcast_ether_addr(ð_mask->src)) {
+ } else if (rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr) &&
+ rte_is_broadcast_ether_addr(ð_mask->hdr.src_addr)) {
filter->input.flow.l2_flow.dst =
- eth_spec->dst;
+ eth_spec->hdr.dst_addr;
filter->input.flow.l2_flow.src =
- eth_spec->src;
+ eth_spec->hdr.src_addr;
input_set |= (I40E_INSET_DMAC | I40E_INSET_SMAC);
- } else if (!rte_is_zero_ether_addr(ð_mask->src) ||
- !rte_is_zero_ether_addr(ð_mask->dst)) {
+ } else if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr) ||
+ !rte_is_zero_ether_addr(ð_mask->hdr.dst_addr)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -1690,7 +1690,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
}
if (eth_spec && eth_mask &&
next_type == RTE_FLOW_ITEM_TYPE_END) {
- if (eth_mask->type != RTE_BE16(0xffff)) {
+ if (eth_mask->hdr.ether_type != RTE_BE16(0xffff)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -1698,7 +1698,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
return -rte_errno;
}
- ether_type = rte_be_to_cpu_16(eth_spec->type);
+ ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
ether_type == RTE_ETHER_TYPE_IPV4 ||
@@ -1712,7 +1712,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
}
input_set |= I40E_INSET_LAST_ETHER_TYPE;
filter->input.flow.l2_flow.ether_type =
- eth_spec->type;
+ eth_spec->hdr.ether_type;
}
pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
@@ -1725,13 +1725,13 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
if (vlan_spec && vlan_mask) {
- if (vlan_mask->tci !=
+ if (vlan_mask->hdr.vlan_tci !=
rte_cpu_to_be_16(I40E_VLAN_TCI_MASK) &&
- vlan_mask->tci !=
+ vlan_mask->hdr.vlan_tci !=
rte_cpu_to_be_16(I40E_VLAN_PRI_MASK) &&
- vlan_mask->tci !=
+ vlan_mask->hdr.vlan_tci !=
rte_cpu_to_be_16(I40E_VLAN_CFI_MASK) &&
- vlan_mask->tci !=
+ vlan_mask->hdr.vlan_tci !=
rte_cpu_to_be_16(I40E_VLAN_VID_MASK)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1740,10 +1740,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
}
input_set |= I40E_INSET_VLAN_INNER;
filter->input.flow_ext.vlan_tci =
- vlan_spec->tci;
+ vlan_spec->hdr.vlan_tci;
}
- if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
- if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
+ if (vlan_spec && vlan_mask && vlan_mask->hdr.eth_proto) {
+ if (vlan_mask->hdr.eth_proto != RTE_BE16(0xffff)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -1753,7 +1753,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
}
ether_type =
- rte_be_to_cpu_16(vlan_spec->inner_type);
+ rte_be_to_cpu_16(vlan_spec->hdr.eth_proto);
if (ether_type == RTE_ETHER_TYPE_IPV4 ||
ether_type == RTE_ETHER_TYPE_IPV6 ||
@@ -1766,7 +1766,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
}
input_set |= I40E_INSET_LAST_ETHER_TYPE;
filter->input.flow.l2_flow.ether_type =
- vlan_spec->inner_type;
+ vlan_spec->hdr.eth_proto;
}
pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
@@ -2908,9 +2908,9 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
/* DST address of inner MAC shouldn't be masked.
* SRC address of Inner MAC should be masked.
*/
- if (!rte_is_broadcast_ether_addr(ð_mask->dst) ||
- !rte_is_zero_ether_addr(ð_mask->src) ||
- eth_mask->type) {
+ if (!rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr) ||
+ !rte_is_zero_ether_addr(ð_mask->hdr.src_addr) ||
+ eth_mask->hdr.ether_type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -2920,12 +2920,12 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
if (!vxlan_flag) {
rte_memcpy(&filter->outer_mac,
- ð_spec->dst,
+ ð_spec->hdr.dst_addr,
RTE_ETHER_ADDR_LEN);
filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
} else {
rte_memcpy(&filter->inner_mac,
- ð_spec->dst,
+ ð_spec->hdr.dst_addr,
RTE_ETHER_ADDR_LEN);
filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
}
@@ -2935,7 +2935,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
vlan_spec = item->spec;
vlan_mask = item->mask;
if (!(vlan_spec && vlan_mask) ||
- vlan_mask->inner_type) {
+ vlan_mask->hdr.eth_proto) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -2944,10 +2944,10 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
if (vlan_spec && vlan_mask) {
- if (vlan_mask->tci ==
+ if (vlan_mask->hdr.vlan_tci ==
rte_cpu_to_be_16(I40E_VLAN_TCI_MASK))
filter->inner_vlan =
- rte_be_to_cpu_16(vlan_spec->tci) &
+ rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci) &
I40E_VLAN_TCI_MASK;
filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
}
@@ -3138,9 +3138,9 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
/* DST address of inner MAC shouldn't be masked.
* SRC address of Inner MAC should be masked.
*/
- if (!rte_is_broadcast_ether_addr(ð_mask->dst) ||
- !rte_is_zero_ether_addr(ð_mask->src) ||
- eth_mask->type) {
+ if (!rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr) ||
+ !rte_is_zero_ether_addr(ð_mask->hdr.src_addr) ||
+ eth_mask->hdr.ether_type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -3150,12 +3150,12 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
if (!nvgre_flag) {
rte_memcpy(&filter->outer_mac,
- ð_spec->dst,
+ ð_spec->hdr.dst_addr,
RTE_ETHER_ADDR_LEN);
filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
} else {
rte_memcpy(&filter->inner_mac,
- ð_spec->dst,
+ ð_spec->hdr.dst_addr,
RTE_ETHER_ADDR_LEN);
filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
}
@@ -3166,7 +3166,7 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
vlan_spec = item->spec;
vlan_mask = item->mask;
if (!(vlan_spec && vlan_mask) ||
- vlan_mask->inner_type) {
+ vlan_mask->hdr.eth_proto) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -3175,10 +3175,10 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
}
if (vlan_spec && vlan_mask) {
- if (vlan_mask->tci ==
+ if (vlan_mask->hdr.vlan_tci ==
rte_cpu_to_be_16(I40E_VLAN_TCI_MASK))
filter->inner_vlan =
- rte_be_to_cpu_16(vlan_spec->tci) &
+ rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci) &
I40E_VLAN_TCI_MASK;
filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
}
@@ -3675,7 +3675,7 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
vlan_mask = item->mask;
if (!(vlan_spec && vlan_mask) ||
- vlan_mask->inner_type) {
+ vlan_mask->hdr.eth_proto) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -3701,8 +3701,8 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
/* Get filter specification */
if (o_vlan_mask != NULL && i_vlan_mask != NULL) {
- filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci);
- filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci);
+ filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->hdr.vlan_tci);
+ filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->hdr.vlan_tci);
} else {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -990,7 +990,7 @@ i40e_hash_parse_queue_region(const struct rte_eth_dev *dev,
vlan_spec = pattern->spec;
vlan_mask = pattern->mask;
if (!vlan_spec || !vlan_mask ||
- (rte_be_to_cpu_16(vlan_mask->tci) >> 13) != 7)
+ (rte_be_to_cpu_16(vlan_mask->hdr.vlan_tci) >> 13) != 7)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, pattern,
"Pattern error.");
@@ -1037,7 +1037,7 @@ i40e_hash_parse_queue_region(const struct rte_eth_dev *dev,
rss_conf->region_queue_num = (uint8_t)rss_act->queue_num;
rss_conf->region_queue_start = rss_act->queue[0];
- rss_conf->region_priority = rte_be_to_cpu_16(vlan_spec->tci) >> 13;
+ rss_conf->region_priority = rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci) >> 13;
return 0;
}
@@ -850,27 +850,27 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
}
if (eth_spec && eth_mask) {
- if (!rte_is_zero_ether_addr(ð_mask->dst)) {
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr)) {
input_set |= IAVF_INSET_DMAC;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
ETH,
DST);
- } else if (!rte_is_zero_ether_addr(ð_mask->src)) {
+ } else if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr)) {
input_set |= IAVF_INSET_SMAC;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
ETH,
SRC);
}
- if (eth_mask->type) {
- if (eth_mask->type != RTE_BE16(0xffff)) {
+ if (eth_mask->hdr.ether_type) {
+ if (eth_mask->hdr.ether_type != RTE_BE16(0xffff)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid type mask.");
return -rte_errno;
}
- ether_type = rte_be_to_cpu_16(eth_spec->type);
+ ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
if (ether_type == RTE_ETHER_TYPE_IPV4 ||
ether_type == RTE_ETHER_TYPE_IPV6) {
rte_flow_error_set(error, EINVAL,
@@ -189,7 +189,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
if (eth_spec && eth_mask) {
input = &outer_input_set;
- if (!rte_is_zero_ether_addr(ð_mask->dst)) {
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr)) {
*input |= IAVF_INSET_DMAC;
input_set_byte += 6;
} else {
@@ -197,12 +197,12 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
input_set_byte += 6;
}
- if (!rte_is_zero_ether_addr(ð_mask->src)) {
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr)) {
*input |= IAVF_INSET_SMAC;
input_set_byte += 6;
}
- if (eth_mask->type) {
+ if (eth_mask->hdr.ether_type) {
*input |= IAVF_INSET_ETHERTYPE;
input_set_byte += 2;
}
@@ -419,10 +419,10 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
*input |= IAVF_INSET_VLAN_OUTER;
- if (vlan_mask->tci)
+ if (vlan_mask->hdr.vlan_tci)
input_set_byte += 2;
- if (vlan_mask->inner_type) {
+ if (vlan_mask->hdr.eth_proto) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -1690,9 +1690,9 @@ parse_eth_item(const struct rte_flow_item_eth *item,
struct rte_ether_hdr *eth)
{
memcpy(eth->src_addr.addr_bytes,
- item->src.addr_bytes, sizeof(eth->src_addr));
+ item->hdr.src_addr.addr_bytes, sizeof(eth->src_addr));
memcpy(eth->dst_addr.addr_bytes,
- item->dst.addr_bytes, sizeof(eth->dst_addr));
+ item->hdr.dst_addr.addr_bytes, sizeof(eth->dst_addr));
}
static void
@@ -675,36 +675,36 @@ ice_acl_parse_pattern(__rte_unused struct ice_adapter *ad,
eth_mask = item->mask;
if (eth_spec && eth_mask) {
- if (rte_is_broadcast_ether_addr(ð_mask->src) ||
- rte_is_broadcast_ether_addr(ð_mask->dst)) {
+ if (rte_is_broadcast_ether_addr(ð_mask->hdr.src_addr) ||
+ rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid mac addr mask");
return -rte_errno;
}
- if (!rte_is_zero_ether_addr(ð_spec->src) &&
- !rte_is_zero_ether_addr(ð_mask->src)) {
+ if (!rte_is_zero_ether_addr(ð_spec->hdr.src_addr) &&
+ !rte_is_zero_ether_addr(ð_mask->hdr.src_addr)) {
input_set |= ICE_INSET_SMAC;
ice_memcpy(&filter->input.ext_data.src_mac,
- ð_spec->src,
+ ð_spec->hdr.src_addr,
RTE_ETHER_ADDR_LEN,
ICE_NONDMA_TO_NONDMA);
ice_memcpy(&filter->input.ext_mask.src_mac,
- ð_mask->src,
+ ð_mask->hdr.src_addr,
RTE_ETHER_ADDR_LEN,
ICE_NONDMA_TO_NONDMA);
}
- if (!rte_is_zero_ether_addr(ð_spec->dst) &&
- !rte_is_zero_ether_addr(ð_mask->dst)) {
+ if (!rte_is_zero_ether_addr(ð_spec->hdr.dst_addr) &&
+ !rte_is_zero_ether_addr(ð_mask->hdr.dst_addr)) {
input_set |= ICE_INSET_DMAC;
ice_memcpy(&filter->input.ext_data.dst_mac,
- ð_spec->dst,
+ ð_spec->hdr.dst_addr,
RTE_ETHER_ADDR_LEN,
ICE_NONDMA_TO_NONDMA);
ice_memcpy(&filter->input.ext_mask.dst_mac,
- ð_mask->dst,
+ ð_mask->hdr.dst_addr,
RTE_ETHER_ADDR_LEN,
ICE_NONDMA_TO_NONDMA);
}
@@ -1971,17 +1971,17 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
if (!(eth_spec && eth_mask))
break;
- if (!rte_is_zero_ether_addr(ð_mask->dst))
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr))
*input_set |= ICE_INSET_DMAC;
- if (!rte_is_zero_ether_addr(ð_mask->src))
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr))
*input_set |= ICE_INSET_SMAC;
next_type = (item + 1)->type;
/* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
- if (eth_mask->type == RTE_BE16(0xffff) &&
+ if (eth_mask->hdr.ether_type == RTE_BE16(0xffff) &&
next_type == RTE_FLOW_ITEM_TYPE_END) {
*input_set |= ICE_INSET_ETHERTYPE;
- ether_type = rte_be_to_cpu_16(eth_spec->type);
+ ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
if (ether_type == RTE_ETHER_TYPE_IPV4 ||
ether_type == RTE_ETHER_TYPE_IPV6) {
@@ -1997,11 +1997,11 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
&filter->input.ext_data_outer :
&filter->input.ext_data;
rte_memcpy(&p_ext_data->src_mac,
- ð_spec->src, RTE_ETHER_ADDR_LEN);
+ ð_spec->hdr.src_addr, RTE_ETHER_ADDR_LEN);
rte_memcpy(&p_ext_data->dst_mac,
- ð_spec->dst, RTE_ETHER_ADDR_LEN);
+ ð_spec->hdr.dst_addr, RTE_ETHER_ADDR_LEN);
rte_memcpy(&p_ext_data->ether_type,
- ð_spec->type, sizeof(eth_spec->type));
+ ð_spec->hdr.ether_type, sizeof(eth_spec->hdr.ether_type));
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
@@ -592,8 +592,8 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[],
eth_spec = item->spec;
eth_mask = item->mask;
if (eth_spec && eth_mask) {
- const uint8_t *a = eth_mask->src.addr_bytes;
- const uint8_t *b = eth_mask->dst.addr_bytes;
+ const uint8_t *a = eth_mask->hdr.src_addr.addr_bytes;
+ const uint8_t *b = eth_mask->hdr.dst_addr.addr_bytes;
if (tunnel_valid)
input = &inner_input_set;
else
@@ -610,7 +610,7 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[],
break;
}
}
- if (eth_mask->type)
+ if (eth_mask->hdr.ether_type)
*input |= ICE_INSET_ETHERTYPE;
list[t].type = (tunnel_valid == 0) ?
ICE_MAC_OFOS : ICE_MAC_IL;
@@ -620,31 +620,31 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[],
h = &list[t].h_u.eth_hdr;
m = &list[t].m_u.eth_hdr;
for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
- if (eth_mask->src.addr_bytes[j]) {
+ if (eth_mask->hdr.src_addr.addr_bytes[j]) {
h->src_addr[j] =
- eth_spec->src.addr_bytes[j];
+ eth_spec->hdr.src_addr.addr_bytes[j];
m->src_addr[j] =
- eth_mask->src.addr_bytes[j];
+ eth_mask->hdr.src_addr.addr_bytes[j];
i = 1;
input_set_byte++;
}
- if (eth_mask->dst.addr_bytes[j]) {
+ if (eth_mask->hdr.dst_addr.addr_bytes[j]) {
h->dst_addr[j] =
- eth_spec->dst.addr_bytes[j];
+ eth_spec->hdr.dst_addr.addr_bytes[j];
m->dst_addr[j] =
- eth_mask->dst.addr_bytes[j];
+ eth_mask->hdr.dst_addr.addr_bytes[j];
i = 1;
input_set_byte++;
}
}
if (i)
t++;
- if (eth_mask->type) {
+ if (eth_mask->hdr.ether_type) {
list[t].type = ICE_ETYPE_OL;
list[t].h_u.ethertype.ethtype_id =
- eth_spec->type;
+ eth_spec->hdr.ether_type;
list[t].m_u.ethertype.ethtype_id =
- eth_mask->type;
+ eth_mask->hdr.ether_type;
input_set_byte += 2;
t++;
}
@@ -1087,14 +1087,14 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[],
*input |= ICE_INSET_VLAN_INNER;
}
- if (vlan_mask->tci) {
+ if (vlan_mask->hdr.vlan_tci) {
list[t].h_u.vlan_hdr.vlan =
- vlan_spec->tci;
+ vlan_spec->hdr.vlan_tci;
list[t].m_u.vlan_hdr.vlan =
- vlan_mask->tci;
+ vlan_mask->hdr.vlan_tci;
input_set_byte += 2;
}
- if (vlan_mask->inner_type) {
+ if (vlan_mask->hdr.eth_proto) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -1879,7 +1879,7 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
eth_mask = item->mask;
else
continue;
- if (eth_mask->type == UINT16_MAX)
+ if (eth_mask->hdr.ether_type == UINT16_MAX)
tun_type = ICE_SW_TUN_AND_NON_TUN;
}
@@ -327,14 +327,14 @@ igc_parse_pattern_ether(const struct rte_flow_item *item,
IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_ETHER);
/* destination and source MAC address are not supported */
- if (!rte_is_zero_ether_addr(&mask->src) ||
- !rte_is_zero_ether_addr(&mask->dst))
+ if (!rte_is_zero_ether_addr(&mask->hdr.src_addr) ||
+ !rte_is_zero_ether_addr(&mask->hdr.dst_addr))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
"Only support ether-type");
/* ether-type mask bits must be all 1 */
- if (IGC_NOT_ALL_BITS_SET(mask->type))
+ if (IGC_NOT_ALL_BITS_SET(mask->hdr.ether_type))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
"Ethernet type mask bits must be all 1");
@@ -342,7 +342,7 @@ igc_parse_pattern_ether(const struct rte_flow_item *item,
ether = &filter->ethertype;
/* get ether-type */
- ether->ether_type = rte_be_to_cpu_16(spec->type);
+ ether->ether_type = rte_be_to_cpu_16(spec->hdr.ether_type);
/* ether-type should not be IPv4 and IPv6 */
if (ether->ether_type == RTE_ETHER_TYPE_IPV4 ||
@@ -101,7 +101,7 @@ ipn3ke_pattern_vxlan(const struct rte_flow_item patterns[],
eth = item->spec;
rte_memcpy(&parser->key[0],
- eth->src.addr_bytes,
+ eth->hdr.src_addr.addr_bytes,
RTE_ETHER_ADDR_LEN);
break;
@@ -165,7 +165,7 @@ ipn3ke_pattern_mac(const struct rte_flow_item patterns[],
eth = item->spec;
rte_memcpy(parser->key,
- eth->src.addr_bytes,
+ eth->hdr.src_addr.addr_bytes,
RTE_ETHER_ADDR_LEN);
break;
@@ -227,13 +227,13 @@ ipn3ke_pattern_qinq(const struct rte_flow_item patterns[],
if (!outer_vlan) {
outer_vlan = item->spec;
- tci = rte_be_to_cpu_16(outer_vlan->tci);
+ tci = rte_be_to_cpu_16(outer_vlan->hdr.vlan_tci);
parser->key[0] = (tci & 0xff0) >> 4;
parser->key[1] |= (tci & 0x00f) << 4;
} else {
inner_vlan = item->spec;
- tci = rte_be_to_cpu_16(inner_vlan->tci);
+ tci = rte_be_to_cpu_16(inner_vlan->hdr.vlan_tci);
parser->key[1] |= (tci & 0xf00) >> 8;
parser->key[2] = (tci & 0x0ff);
}
@@ -744,16 +744,16 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
* Mask bits of destination MAC address must be full
* of 1 or full of 0.
*/
- if (!rte_is_zero_ether_addr(ð_mask->src) ||
- (!rte_is_zero_ether_addr(ð_mask->dst) &&
- !rte_is_broadcast_ether_addr(ð_mask->dst))) {
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr) ||
+ (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr) &&
+ !rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr))) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ether address mask");
return -rte_errno;
}
- if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ if ((eth_mask->hdr.ether_type & UINT16_MAX) != UINT16_MAX) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ethertype mask");
@@ -763,13 +763,13 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
/* If mask bits of destination MAC address
* are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
*/
- if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
- filter->mac_addr = eth_spec->dst;
+ if (rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr)) {
+ filter->mac_addr = eth_spec->hdr.dst_addr;
filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
} else {
filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
}
- filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
/* Check if the next non-void item is END. */
item = next_no_void_pattern(pattern, item);
@@ -1698,7 +1698,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
/* Get the dst MAC. */
for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
rule->ixgbe_fdir.formatted.inner_mac[j] =
- eth_spec->dst.addr_bytes[j];
+ eth_spec->hdr.dst_addr.addr_bytes[j];
}
}
@@ -1709,7 +1709,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
eth_mask = item->mask;
/* Ether type should be masked. */
- if (eth_mask->type ||
+ if (eth_mask->hdr.ether_type ||
rule->mode == RTE_FDIR_MODE_SIGNATURE) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1726,8 +1726,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
* and don't support dst MAC address mask.
*/
for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
- if (eth_mask->src.addr_bytes[j] ||
- eth_mask->dst.addr_bytes[j] != 0xFF) {
+ if (eth_mask->hdr.src_addr.addr_bytes[j] ||
+ eth_mask->hdr.dst_addr.addr_bytes[j] != 0xFF) {
memset(rule, 0,
sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1790,9 +1790,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
vlan_spec = item->spec;
vlan_mask = item->mask;
- rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
+ rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->hdr.vlan_tci;
- rule->mask.vlan_tci_mask = vlan_mask->tci;
+ rule->mask.vlan_tci_mask = vlan_mask->hdr.vlan_tci;
rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
/* More than one tags are not supported. */
@@ -2642,7 +2642,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
eth_mask = item->mask;
/* Ether type should be masked. */
- if (eth_mask->type) {
+ if (eth_mask->hdr.ether_type) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2652,7 +2652,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* src MAC address should be masked. */
for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
- if (eth_mask->src.addr_bytes[j]) {
+ if (eth_mask->hdr.src_addr.addr_bytes[j]) {
memset(rule, 0,
sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2664,9 +2664,9 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
rule->mask.mac_addr_byte_mask = 0;
for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
/* It's a per byte mask. */
- if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+ if (eth_mask->hdr.dst_addr.addr_bytes[j] == 0xFF) {
rule->mask.mac_addr_byte_mask |= 0x1 << j;
- } else if (eth_mask->dst.addr_bytes[j]) {
+ } else if (eth_mask->hdr.dst_addr.addr_bytes[j]) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2685,7 +2685,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* Get the dst MAC. */
for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
rule->ixgbe_fdir.formatted.inner_mac[j] =
- eth_spec->dst.addr_bytes[j];
+ eth_spec->hdr.dst_addr.addr_bytes[j];
}
}
@@ -2722,9 +2722,9 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
vlan_spec = item->spec;
vlan_mask = item->mask;
- rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
+ rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->hdr.vlan_tci;
- rule->mask.vlan_tci_mask = vlan_mask->tci;
+ rule->mask.vlan_tci_mask = vlan_mask->hdr.vlan_tci;
rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
/* More than one tags are not supported. */
@@ -207,17 +207,17 @@ mlx4_flow_merge_eth(struct rte_flow *flow,
uint32_t sum_dst = 0;
uint32_t sum_src = 0;
- for (i = 0; i != sizeof(mask->dst.addr_bytes); ++i) {
- sum_dst += mask->dst.addr_bytes[i];
- sum_src += mask->src.addr_bytes[i];
+ for (i = 0; i != sizeof(mask->hdr.dst_addr.addr_bytes); ++i) {
+ sum_dst += mask->hdr.dst_addr.addr_bytes[i];
+ sum_src += mask->hdr.src_addr.addr_bytes[i];
}
if (sum_src) {
msg = "mlx4 does not support source MAC matching";
goto error;
} else if (!sum_dst) {
flow->promisc = 1;
- } else if (sum_dst == 1 && mask->dst.addr_bytes[0] == 1) {
- if (!(spec->dst.addr_bytes[0] & 1)) {
+ } else if (sum_dst == 1 && mask->hdr.dst_addr.addr_bytes[0] == 1) {
+ if (!(spec->hdr.dst_addr.addr_bytes[0] & 1)) {
msg = "mlx4 does not support the explicit"
" exclusion of all multicast traffic";
goto error;
@@ -251,8 +251,8 @@ mlx4_flow_merge_eth(struct rte_flow *flow,
flow->promisc = 1;
return 0;
}
- memcpy(eth->val.dst_mac, spec->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
- memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
+ memcpy(eth->val.dst_mac, spec->hdr.dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
+ memcpy(eth->mask.dst_mac, mask->hdr.dst_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
/* Remove unwanted bits from values. */
for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
@@ -297,12 +297,12 @@ mlx4_flow_merge_vlan(struct rte_flow *flow,
struct ibv_flow_spec_eth *eth;
const char *msg;
- if (!mask || !mask->tci) {
+ if (!mask || !mask->hdr.vlan_tci) {
msg = "mlx4 cannot match all VLAN traffic while excluding"
" non-VLAN traffic, TCI VID must be specified";
goto error;
}
- if (mask->tci != RTE_BE16(0x0fff)) {
+ if (mask->hdr.vlan_tci != RTE_BE16(0x0fff)) {
msg = "mlx4 does not support partial TCI VID matching";
goto error;
}
@@ -310,8 +310,8 @@ mlx4_flow_merge_vlan(struct rte_flow *flow,
return 0;
eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size -
sizeof(*eth));
- eth->val.vlan_tag = spec->tci;
- eth->mask.vlan_tag = mask->tci;
+ eth->val.vlan_tag = spec->hdr.vlan_tci;
+ eth->mask.vlan_tag = mask->hdr.vlan_tci;
eth->val.vlan_tag &= eth->mask.vlan_tag;
if (flow->ibv_attr->type == IBV_FLOW_ATTR_ALL_DEFAULT)
flow->ibv_attr->type = IBV_FLOW_ATTR_NORMAL;
@@ -582,7 +582,7 @@ static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
RTE_FLOW_ITEM_TYPE_IPV4),
.mask_support = &(const struct rte_flow_item_eth){
/* Only destination MAC can be matched. */
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
},
.mask_default = &rte_flow_item_eth_mask,
.mask_sz = sizeof(struct rte_flow_item_eth),
@@ -593,7 +593,7 @@ static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
.next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
.mask_support = &(const struct rte_flow_item_vlan){
/* Only TCI VID matching is supported. */
- .tci = RTE_BE16(0x0fff),
+ .hdr.vlan_tci = RTE_BE16(0x0fff),
},
.mask_default = &rte_flow_item_vlan_mask,
.mask_sz = sizeof(struct rte_flow_item_vlan),
@@ -1304,14 +1304,14 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
};
struct rte_flow_item_eth eth_spec;
const struct rte_flow_item_eth eth_mask = {
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
};
const struct rte_flow_item_eth eth_allmulti = {
- .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+ .hdr.dst_addr.addr_bytes = "\x01\x00\x00\x00\x00\x00",
};
struct rte_flow_item_vlan vlan_spec;
const struct rte_flow_item_vlan vlan_mask = {
- .tci = RTE_BE16(0x0fff),
+ .hdr.vlan_tci = RTE_BE16(0x0fff),
};
struct rte_flow_item pattern[] = {
{
@@ -1356,12 +1356,12 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
- struct rte_ether_addr *rule_mac = ð_spec.dst;
+ struct rte_ether_addr *rule_mac = ð_spec.hdr.dst_addr;
rte_be16_t *rule_vlan =
(ETH_DEV(priv)->data->dev_conf.rxmode.offloads &
RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
!ETH_DEV(priv)->data->promiscuous ?
- &vlan_spec.tci :
+ &vlan_spec.hdr.vlan_tci :
NULL;
uint16_t vlan = 0;
struct rte_flow *flow;
@@ -1399,7 +1399,7 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
if (i < RTE_DIM(priv->mac))
mac = &priv->mac[i];
else
- mac = ð_mask.dst;
+ mac = ð_mask.hdr.dst_addr;
if (rte_is_zero_ether_addr(mac))
continue;
/* Check if MAC flow rule is already present. */
@@ -276,13 +276,13 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
return RTE_FLOW_ITEM_TYPE_VOID;
switch (item->type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- MLX5_XSET_ITEM_MASK_SPEC(eth, type);
+ MLX5_XSET_ITEM_MASK_SPEC(eth, hdr.ether_type);
if (!mask)
return RTE_FLOW_ITEM_TYPE_VOID;
ret = mlx5_ethertype_to_item_type(spec, mask, false);
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- MLX5_XSET_ITEM_MASK_SPEC(vlan, inner_type);
+ MLX5_XSET_ITEM_MASK_SPEC(vlan, hdr.eth_proto);
if (!mask)
return RTE_FLOW_ITEM_TYPE_VOID;
ret = mlx5_ethertype_to_item_type(spec, mask, false);
@@ -2328,9 +2328,9 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
{
const struct rte_flow_item_eth *mask = item->mask;
const struct rte_flow_item_eth nic_mask = {
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .type = RTE_BE16(0xffff),
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.src_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.ether_type = RTE_BE16(0xffff),
.has_vlan = ext_vlan_sup ? 1 : 0,
};
int ret;
@@ -2390,8 +2390,8 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
const struct rte_flow_item_vlan *spec = item->spec;
const struct rte_flow_item_vlan *mask = item->mask;
const struct rte_flow_item_vlan nic_mask = {
- .tci = RTE_BE16(UINT16_MAX),
- .inner_type = RTE_BE16(UINT16_MAX),
+ .hdr.vlan_tci = RTE_BE16(UINT16_MAX),
+ .hdr.eth_proto = RTE_BE16(UINT16_MAX),
};
uint16_t vlan_tag = 0;
const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
@@ -2419,7 +2419,7 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret)
return ret;
- if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
+ if (!tunnel && mask->hdr.vlan_tci != RTE_BE16(0x0fff)) {
struct mlx5_priv *priv = dev->data->dev_private;
if (priv->vmwa_context) {
@@ -2439,8 +2439,8 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
}
}
if (spec) {
- vlan_tag = spec->tci;
- vlan_tag &= mask->tci;
+ vlan_tag = spec->hdr.vlan_tci;
+ vlan_tag &= mask->hdr.vlan_tci;
}
/*
* From verbs perspective an empty VLAN is equivalent
@@ -7669,10 +7669,10 @@ mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
* a multicast dst mac causes kernel to give low priority to this flow.
*/
static const struct rte_flow_item_eth lacp_spec = {
- .type = RTE_BE16(0x8809),
+ .hdr.ether_type = RTE_BE16(0x8809),
};
static const struct rte_flow_item_eth lacp_mask = {
- .type = 0xffff,
+ .hdr.ether_type = 0xffff,
};
const struct rte_flow_attr attr = {
.ingress = 1,
@@ -644,17 +644,17 @@ flow_dv_convert_action_modify_mac
memset(ð, 0, sizeof(eth));
memset(ð_mask, 0, sizeof(eth_mask));
if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
- memcpy(ð.src.addr_bytes, &conf->mac_addr,
- sizeof(eth.src.addr_bytes));
- memcpy(ð_mask.src.addr_bytes,
- &rte_flow_item_eth_mask.src.addr_bytes,
- sizeof(eth_mask.src.addr_bytes));
+ memcpy(ð.hdr.src_addr.addr_bytes, &conf->mac_addr,
+ sizeof(eth.hdr.src_addr.addr_bytes));
+ memcpy(ð_mask.hdr.src_addr.addr_bytes,
+ &rte_flow_item_eth_mask.hdr.src_addr.addr_bytes,
+ sizeof(eth_mask.hdr.src_addr.addr_bytes));
} else {
- memcpy(ð.dst.addr_bytes, &conf->mac_addr,
- sizeof(eth.dst.addr_bytes));
- memcpy(ð_mask.dst.addr_bytes,
- &rte_flow_item_eth_mask.dst.addr_bytes,
- sizeof(eth_mask.dst.addr_bytes));
+ memcpy(ð.hdr.dst_addr.addr_bytes, &conf->mac_addr,
+ sizeof(eth.hdr.dst_addr.addr_bytes));
+ memcpy(ð_mask.hdr.dst_addr.addr_bytes,
+ &rte_flow_item_eth_mask.hdr.dst_addr.addr_bytes,
+ sizeof(eth_mask.hdr.dst_addr.addr_bytes));
}
item.spec = ð
item.mask = ð_mask;
@@ -2303,8 +2303,8 @@ flow_dv_validate_item_vlan(const struct rte_flow_item *item,
{
const struct rte_flow_item_vlan *mask = item->mask;
const struct rte_flow_item_vlan nic_mask = {
- .tci = RTE_BE16(UINT16_MAX),
- .inner_type = RTE_BE16(UINT16_MAX),
+ .hdr.vlan_tci = RTE_BE16(UINT16_MAX),
+ .hdr.eth_proto = RTE_BE16(UINT16_MAX),
.has_more_vlan = 1,
};
const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
@@ -2332,7 +2332,7 @@ flow_dv_validate_item_vlan(const struct rte_flow_item *item,
MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret)
return ret;
- if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
+ if (!tunnel && mask->hdr.vlan_tci != RTE_BE16(0x0fff)) {
struct mlx5_priv *priv = dev->data->dev_private;
if (priv->vmwa_context) {
@@ -2871,9 +2871,9 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
struct rte_vlan_hdr *vlan)
{
const struct rte_flow_item_vlan nic_mask = {
- .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
+ .hdr.vlan_tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
MLX5DV_FLOW_VLAN_VID_MASK),
- .inner_type = RTE_BE16(0xffff),
+ .hdr.eth_proto = RTE_BE16(0xffff),
};
if (items == NULL)
@@ -2895,23 +2895,23 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
if (!vlan_m)
vlan_m = &nic_mask;
/* Only full match values are accepted */
- if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
+ if ((vlan_m->hdr.vlan_tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
vlan->vlan_tci |=
- rte_be_to_cpu_16(vlan_v->tci &
+ rte_be_to_cpu_16(vlan_v->hdr.vlan_tci &
MLX5DV_FLOW_VLAN_PCP_MASK_BE);
}
- if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
+ if ((vlan_m->hdr.vlan_tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
MLX5DV_FLOW_VLAN_VID_MASK_BE) {
vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
vlan->vlan_tci |=
- rte_be_to_cpu_16(vlan_v->tci &
+ rte_be_to_cpu_16(vlan_v->hdr.vlan_tci &
MLX5DV_FLOW_VLAN_VID_MASK_BE);
}
- if (vlan_m->inner_type == nic_mask.inner_type)
- vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
- vlan_m->inner_type);
+ if (vlan_m->hdr.eth_proto == nic_mask.hdr.eth_proto)
+ vlan->eth_proto = rte_be_to_cpu_16(vlan_v->hdr.eth_proto &
+ vlan_m->hdr.eth_proto);
}
}
@@ -2961,8 +2961,8 @@ flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
"push vlan action for VF representor "
"not supported on NIC table");
if (vlan_m &&
- (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
- (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
+ (vlan_m->hdr.vlan_tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
+ (vlan_m->hdr.vlan_tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
!(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
!(mlx5_flow_find_action
@@ -2974,8 +2974,8 @@ flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
"push VLAN action cannot figure out "
"PCP value");
if (vlan_m &&
- (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
- (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
+ (vlan_m->hdr.vlan_tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
+ (vlan_m->hdr.vlan_tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
MLX5DV_FLOW_VLAN_VID_MASK_BE &&
!(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
!(mlx5_flow_find_action
@@ -7076,10 +7076,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
if (items->mask != NULL && items->spec != NULL) {
ether_type =
((const struct rte_flow_item_eth *)
- items->spec)->type;
+ items->spec)->hdr.ether_type;
ether_type &=
((const struct rte_flow_item_eth *)
- items->mask)->type;
+ items->mask)->hdr.ether_type;
ether_type = rte_be_to_cpu_16(ether_type);
} else {
ether_type = 0;
@@ -7095,10 +7095,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
if (items->mask != NULL && items->spec != NULL) {
ether_type =
((const struct rte_flow_item_vlan *)
- items->spec)->inner_type;
+ items->spec)->hdr.eth_proto;
ether_type &=
((const struct rte_flow_item_vlan *)
- items->mask)->inner_type;
+ items->mask)->hdr.eth_proto;
ether_type = rte_be_to_cpu_16(ether_type);
} else {
ether_type = 0;
@@ -8356,9 +8356,9 @@ flow_dv_translate_item_eth(void *matcher, void *key,
const struct rte_flow_item_eth *eth_m = item->mask;
const struct rte_flow_item_eth *eth_v = item->spec;
const struct rte_flow_item_eth nic_mask = {
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .type = RTE_BE16(0xffff),
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.src_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.ether_type = RTE_BE16(0xffff),
.has_vlan = 0,
};
void *hdrs_m;
@@ -8380,17 +8380,17 @@ flow_dv_translate_item_eth(void *matcher, void *key,
hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
- ð_m->dst, sizeof(eth_m->dst));
+ ð_m->hdr.dst_addr, sizeof(eth_m->hdr.dst_addr));
/* The value must be in the range of the mask. */
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
- for (i = 0; i < sizeof(eth_m->dst); ++i)
- l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
+ for (i = 0; i < sizeof(eth_m->hdr.dst_addr); ++i)
+ l24_v[i] = eth_m->hdr.dst_addr.addr_bytes[i] & eth_v->hdr.dst_addr.addr_bytes[i];
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
- ð_m->src, sizeof(eth_m->src));
+ ð_m->hdr.src_addr, sizeof(eth_m->hdr.src_addr));
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
/* The value must be in the range of the mask. */
- for (i = 0; i < sizeof(eth_m->dst); ++i)
- l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
+ for (i = 0; i < sizeof(eth_m->hdr.dst_addr); ++i)
+ l24_v[i] = eth_m->hdr.src_addr.addr_bytes[i] & eth_v->hdr.src_addr.addr_bytes[i];
/*
* HW supports match on one Ethertype, the Ethertype following the last
* VLAN tag of the packet (see PRM).
@@ -8399,10 +8399,10 @@ flow_dv_translate_item_eth(void *matcher, void *key,
* ethertype, and use ip_version field instead.
* eCPRI over Ether layer will use type value 0xAEFE.
*/
- if (eth_m->type == 0xFFFF) {
+ if (eth_m->hdr.ether_type == 0xFFFF) {
/* Set cvlan_tag mask for any single\multi\un-tagged case. */
MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
- switch (eth_v->type) {
+ switch (eth_v->hdr.ether_type) {
case RTE_BE16(RTE_ETHER_TYPE_VLAN):
MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
return;
@@ -8432,9 +8432,9 @@ flow_dv_translate_item_eth(void *matcher, void *key,
}
}
MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
- rte_be_to_cpu_16(eth_m->type));
+ rte_be_to_cpu_16(eth_m->hdr.ether_type));
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
- *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
+ *(uint16_t *)(l24_v) = eth_m->hdr.ether_type & eth_v->hdr.ether_type;
}
/**
@@ -8478,7 +8478,7 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
*/
if (vlan_v)
dev_flow->handle->vf_vlan.tag =
- rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
+ rte_be_to_cpu_16(vlan_v->hdr.vlan_tci) & 0x0fff;
}
/*
* When VLAN item exists in flow, mark packet as tagged,
@@ -8492,8 +8492,8 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
return;
if (!vlan_m)
vlan_m = &rte_flow_item_vlan_mask;
- tci_m = rte_be_to_cpu_16(vlan_m->tci);
- tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
+ tci_m = rte_be_to_cpu_16(vlan_m->hdr.vlan_tci);
+ tci_v = rte_be_to_cpu_16(vlan_m->hdr.vlan_tci & vlan_v->hdr.vlan_tci);
MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
@@ -8504,8 +8504,8 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
* HW is optimized for IPv4/IPv6. In such cases, avoid setting
* ethertype, and use ip_version field instead.
*/
- if (vlan_m->inner_type == 0xFFFF) {
- switch (vlan_v->inner_type) {
+ if (vlan_m->hdr.eth_proto == 0xFFFF) {
+ switch (vlan_v->hdr.eth_proto) {
case RTE_BE16(RTE_ETHER_TYPE_VLAN):
MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
@@ -8529,9 +8529,9 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
return;
}
MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
- rte_be_to_cpu_16(vlan_m->inner_type));
+ rte_be_to_cpu_16(vlan_m->hdr.eth_proto));
MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
- rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
+ rte_be_to_cpu_16(vlan_m->hdr.eth_proto & vlan_v->hdr.eth_proto));
}
/**
@@ -430,16 +430,16 @@ flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
if (spec) {
unsigned int i;
- memcpy(ð.val.dst_mac, spec->dst.addr_bytes,
+ memcpy(ð.val.dst_mac, spec->hdr.dst_addr.addr_bytes,
RTE_ETHER_ADDR_LEN);
- memcpy(ð.val.src_mac, spec->src.addr_bytes,
+ memcpy(ð.val.src_mac, spec->hdr.src_addr.addr_bytes,
RTE_ETHER_ADDR_LEN);
- eth.val.ether_type = spec->type;
- memcpy(ð.mask.dst_mac, mask->dst.addr_bytes,
+ eth.val.ether_type = spec->hdr.ether_type;
+ memcpy(ð.mask.dst_mac, mask->hdr.dst_addr.addr_bytes,
RTE_ETHER_ADDR_LEN);
- memcpy(ð.mask.src_mac, mask->src.addr_bytes,
+ memcpy(ð.mask.src_mac, mask->hdr.src_addr.addr_bytes,
RTE_ETHER_ADDR_LEN);
- eth.mask.ether_type = mask->type;
+ eth.mask.ether_type = mask->hdr.ether_type;
/* Remove unwanted bits from values. */
for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
@@ -515,11 +515,11 @@ flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
if (!mask)
mask = &rte_flow_item_vlan_mask;
if (spec) {
- eth.val.vlan_tag = spec->tci;
- eth.mask.vlan_tag = mask->tci;
+ eth.val.vlan_tag = spec->hdr.vlan_tci;
+ eth.mask.vlan_tag = mask->hdr.vlan_tci;
eth.val.vlan_tag &= eth.mask.vlan_tag;
- eth.val.ether_type = spec->inner_type;
- eth.mask.ether_type = mask->inner_type;
+ eth.val.ether_type = spec->hdr.eth_proto;
+ eth.mask.ether_type = mask->hdr.eth_proto;
eth.val.ether_type &= eth.mask.ether_type;
}
if (!(item_flags & l2m))
@@ -528,7 +528,7 @@ flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
flow_verbs_item_vlan_update(&dev_flow->verbs.attr, ð);
if (!tunnel)
dev_flow->handle->vf_vlan.tag =
- rte_be_to_cpu_16(spec->tci) & 0x0fff;
+ rte_be_to_cpu_16(spec->hdr.vlan_tci) & 0x0fff;
}
/**
@@ -1268,10 +1268,10 @@ flow_verbs_validate(struct rte_eth_dev *dev,
if (items->mask != NULL && items->spec != NULL) {
ether_type =
((const struct rte_flow_item_eth *)
- items->spec)->type;
+ items->spec)->hdr.ether_type;
ether_type &=
((const struct rte_flow_item_eth *)
- items->mask)->type;
+ items->mask)->hdr.ether_type;
if (ether_type == RTE_BE16(RTE_ETHER_TYPE_VLAN))
is_empty_vlan = true;
ether_type = rte_be_to_cpu_16(ether_type);
@@ -1291,10 +1291,10 @@ flow_verbs_validate(struct rte_eth_dev *dev,
if (items->mask != NULL && items->spec != NULL) {
ether_type =
((const struct rte_flow_item_vlan *)
- items->spec)->inner_type;
+ items->spec)->hdr.eth_proto;
ether_type &=
((const struct rte_flow_item_vlan *)
- items->mask)->inner_type;
+ items->mask)->hdr.eth_proto;
ether_type = rte_be_to_cpu_16(ether_type);
} else {
ether_type = 0;
@@ -1294,19 +1294,19 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_item_eth bcast = {
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
};
struct rte_flow_item_eth ipv6_multi_spec = {
- .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
+ .hdr.dst_addr.addr_bytes = "\x33\x33\x00\x00\x00\x00",
};
struct rte_flow_item_eth ipv6_multi_mask = {
- .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
+ .hdr.dst_addr.addr_bytes = "\xff\xff\x00\x00\x00\x00",
};
struct rte_flow_item_eth unicast = {
- .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
};
struct rte_flow_item_eth unicast_mask = {
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
};
const unsigned int vlan_filter_n = priv->vlan_filter_n;
const struct rte_ether_addr cmp = {
@@ -1367,9 +1367,9 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
return 0;
if (dev->data->promiscuous) {
struct rte_flow_item_eth promisc = {
- .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
- .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
- .type = 0,
+ .hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .hdr.ether_type = 0,
};
ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
@@ -1378,9 +1378,9 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
}
if (dev->data->all_multicast) {
struct rte_flow_item_eth multicast = {
- .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
- .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
- .type = 0,
+ .hdr.dst_addr.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+ .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .hdr.ether_type = 0,
};
ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
@@ -1392,7 +1392,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
uint16_t vlan = priv->vlan_filter[i];
struct rte_flow_item_vlan vlan_spec = {
- .tci = rte_cpu_to_be_16(vlan),
+ .hdr.vlan_tci = rte_cpu_to_be_16(vlan),
};
struct rte_flow_item_vlan vlan_mask =
rte_flow_item_vlan_mask;
@@ -1427,14 +1427,14 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
if (!memcmp(mac, &cmp, sizeof(*mac)))
continue;
- memcpy(&unicast.dst.addr_bytes,
+ memcpy(&unicast.hdr.dst_addr.addr_bytes,
mac->addr_bytes,
RTE_ETHER_ADDR_LEN);
for (j = 0; j != vlan_filter_n; ++j) {
uint16_t vlan = priv->vlan_filter[j];
struct rte_flow_item_vlan vlan_spec = {
- .tci = rte_cpu_to_be_16(vlan),
+ .hdr.vlan_tci = rte_cpu_to_be_16(vlan),
};
struct rte_flow_item_vlan vlan_mask =
rte_flow_item_vlan_mask;
@@ -189,14 +189,14 @@ mrvl_parse_mac(const struct rte_flow_item_eth *spec,
const uint8_t *k, *m;
if (parse_dst) {
- k = spec->dst.addr_bytes;
- m = mask->dst.addr_bytes;
+ k = spec->hdr.dst_addr.addr_bytes;
+ m = mask->hdr.dst_addr.addr_bytes;
flow->table_key.proto_field[flow->rule.num_fields].field.eth =
MV_NET_ETH_F_DA;
} else {
- k = spec->src.addr_bytes;
- m = mask->src.addr_bytes;
+ k = spec->hdr.src_addr.addr_bytes;
+ m = mask->hdr.src_addr.addr_bytes;
flow->table_key.proto_field[flow->rule.num_fields].field.eth =
MV_NET_ETH_F_SA;
@@ -275,7 +275,7 @@ mrvl_parse_type(const struct rte_flow_item_eth *spec,
mrvl_alloc_key_mask(key_field);
key_field->size = 2;
- k = rte_be_to_cpu_16(spec->type);
+ k = rte_be_to_cpu_16(spec->hdr.ether_type);
snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
flow->table_key.proto_field[flow->rule.num_fields].proto =
@@ -311,7 +311,7 @@ mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
mrvl_alloc_key_mask(key_field);
key_field->size = 2;
- k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK;
+ k = rte_be_to_cpu_16(spec->hdr.vlan_tci) & MRVL_VLAN_ID_MASK;
snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
flow->table_key.proto_field[flow->rule.num_fields].proto =
@@ -347,7 +347,7 @@ mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
mrvl_alloc_key_mask(key_field);
key_field->size = 1;
- k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13;
+ k = (rte_be_to_cpu_16(spec->hdr.vlan_tci) & MRVL_VLAN_PRI_MASK) >> 13;
snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
flow->table_key.proto_field[flow->rule.num_fields].proto =
@@ -856,19 +856,19 @@ mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
memset(&zero, 0, sizeof(zero));
- if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) {
+ if (memcmp(&mask->hdr.dst_addr, &zero, sizeof(mask->hdr.dst_addr))) {
ret = mrvl_parse_dmac(spec, mask, flow);
if (ret)
goto out;
}
- if (memcmp(&mask->src, &zero, sizeof(mask->src))) {
+ if (memcmp(&mask->hdr.src_addr, &zero, sizeof(mask->hdr.src_addr))) {
ret = mrvl_parse_smac(spec, mask, flow);
if (ret)
goto out;
}
- if (mask->type) {
+ if (mask->hdr.ether_type) {
MRVL_LOG(WARNING, "eth type mask is ignored");
ret = mrvl_parse_type(spec, mask, flow);
if (ret)
@@ -905,7 +905,7 @@ mrvl_parse_vlan(const struct rte_flow_item *item,
if (ret)
return ret;
- m = rte_be_to_cpu_16(mask->tci);
+ m = rte_be_to_cpu_16(mask->hdr.vlan_tci);
if (m & MRVL_VLAN_ID_MASK) {
MRVL_LOG(WARNING, "vlan id mask is ignored");
ret = mrvl_parse_vlan_id(spec, mask, flow);
@@ -920,12 +920,12 @@ mrvl_parse_vlan(const struct rte_flow_item *item,
goto out;
}
- if (mask->inner_type) {
+ if (mask->hdr.eth_proto) {
struct rte_flow_item_eth spec_eth = {
- .type = spec->inner_type,
+ .hdr.ether_type = spec->hdr.eth_proto,
};
struct rte_flow_item_eth mask_eth = {
- .type = mask->inner_type,
+ .hdr.ether_type = mask->hdr.eth_proto,
};
/* TPID is not supported so if ETH_TYPE was selected,
@@ -280,12 +280,12 @@ sfc_flow_parse_eth(const struct rte_flow_item *item,
const struct rte_flow_item_eth *spec = NULL;
const struct rte_flow_item_eth *mask = NULL;
const struct rte_flow_item_eth supp_mask = {
- .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- .type = 0xffff,
+ .hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .hdr.src_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .hdr.ether_type = 0xffff,
};
const struct rte_flow_item_eth ifrm_supp_mask = {
- .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
};
const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0x00
@@ -319,15 +319,15 @@ sfc_flow_parse_eth(const struct rte_flow_item *item,
if (spec == NULL)
return 0;
- if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
+ if (rte_is_same_ether_addr(&mask->hdr.dst_addr, &supp_mask.hdr.dst_addr)) {
efx_spec->efs_match_flags |= is_ifrm ?
EFX_FILTER_MATCH_IFRM_LOC_MAC :
EFX_FILTER_MATCH_LOC_MAC;
- rte_memcpy(loc_mac, spec->dst.addr_bytes,
+ rte_memcpy(loc_mac, spec->hdr.dst_addr.addr_bytes,
EFX_MAC_ADDR_LEN);
- } else if (memcmp(mask->dst.addr_bytes, ig_mask,
+ } else if (memcmp(mask->hdr.dst_addr.addr_bytes, ig_mask,
EFX_MAC_ADDR_LEN) == 0) {
- if (rte_is_unicast_ether_addr(&spec->dst))
+ if (rte_is_unicast_ether_addr(&spec->hdr.dst_addr))
efx_spec->efs_match_flags |= is_ifrm ?
EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
@@ -335,7 +335,7 @@ sfc_flow_parse_eth(const struct rte_flow_item *item,
efx_spec->efs_match_flags |= is_ifrm ?
EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
- } else if (!rte_is_zero_ether_addr(&mask->dst)) {
+ } else if (!rte_is_zero_ether_addr(&mask->hdr.dst_addr)) {
goto fail_bad_mask;
}
@@ -344,11 +344,11 @@ sfc_flow_parse_eth(const struct rte_flow_item *item,
* ethertype masks are equal to zero in inner frame,
* so these fields are filled in only for the outer frame
*/
- if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
+ if (rte_is_same_ether_addr(&mask->hdr.src_addr, &supp_mask.hdr.src_addr)) {
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
- rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
+ rte_memcpy(efx_spec->efs_rem_mac, spec->hdr.src_addr.addr_bytes,
EFX_MAC_ADDR_LEN);
- } else if (!rte_is_zero_ether_addr(&mask->src)) {
+ } else if (!rte_is_zero_ether_addr(&mask->hdr.src_addr)) {
goto fail_bad_mask;
}
@@ -356,10 +356,10 @@ sfc_flow_parse_eth(const struct rte_flow_item *item,
* Ether type is in big-endian byte order in item and
* in little-endian in efx_spec, so byte swap is used
*/
- if (mask->type == supp_mask.type) {
+ if (mask->hdr.ether_type == supp_mask.hdr.ether_type) {
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
- efx_spec->efs_ether_type = rte_bswap16(spec->type);
- } else if (mask->type != 0) {
+ efx_spec->efs_ether_type = rte_bswap16(spec->hdr.ether_type);
+ } else if (mask->hdr.ether_type != 0) {
goto fail_bad_mask;
}
@@ -394,8 +394,8 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
const struct rte_flow_item_vlan *spec = NULL;
const struct rte_flow_item_vlan *mask = NULL;
const struct rte_flow_item_vlan supp_mask = {
- .tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
- .inner_type = RTE_BE16(0xffff),
+ .hdr.vlan_tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
+ .hdr.eth_proto = RTE_BE16(0xffff),
};
rc = sfc_flow_parse_init(item,
@@ -414,9 +414,9 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
* If two VLAN items are included, the first matches
* the outer tag and the next matches the inner tag.
*/
- if (mask->tci == supp_mask.tci) {
+ if (mask->hdr.vlan_tci == supp_mask.hdr.vlan_tci) {
/* Apply mask to keep VID only */
- vid = rte_bswap16(spec->tci & mask->tci);
+ vid = rte_bswap16(spec->hdr.vlan_tci & mask->hdr.vlan_tci);
if (!(efx_spec->efs_match_flags &
EFX_FILTER_MATCH_OUTER_VID)) {
@@ -445,13 +445,13 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
"VLAN TPID matching is not supported");
return -rte_errno;
}
- if (mask->inner_type == supp_mask.inner_type) {
+ if (mask->hdr.eth_proto == supp_mask.hdr.eth_proto) {
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
- efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
- } else if (mask->inner_type) {
+ efx_spec->efs_ether_type = rte_bswap16(spec->hdr.eth_proto);
+ } else if (mask->hdr.eth_proto) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Bad mask for VLAN inner_type");
+ "Bad mask for VLAN inner type");
return -rte_errno;
}
@@ -1701,18 +1701,18 @@ static const struct sfc_mae_field_locator flocs_eth[] = {
* The field is handled by sfc_mae_rule_process_pattern_data().
*/
SFC_MAE_FIELD_HANDLING_DEFERRED,
- RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
- offsetof(struct rte_flow_item_eth, type),
+ RTE_SIZEOF_FIELD(struct rte_flow_item_eth, hdr.ether_type),
+ offsetof(struct rte_flow_item_eth, hdr.ether_type),
},
{
EFX_MAE_FIELD_ETH_DADDR_BE,
- RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
- offsetof(struct rte_flow_item_eth, dst),
+ RTE_SIZEOF_FIELD(struct rte_flow_item_eth, hdr.dst_addr),
+ offsetof(struct rte_flow_item_eth, hdr.dst_addr),
},
{
EFX_MAE_FIELD_ETH_SADDR_BE,
- RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
- offsetof(struct rte_flow_item_eth, src),
+ RTE_SIZEOF_FIELD(struct rte_flow_item_eth, hdr.src_addr),
+ offsetof(struct rte_flow_item_eth, hdr.src_addr),
},
};
@@ -1770,8 +1770,8 @@ sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
* sfc_mae_rule_process_pattern_data() will consider them
* altogether when the rest of the items have been parsed.
*/
- ethertypes[0].value = item_spec->type;
- ethertypes[0].mask = item_mask->type;
+ ethertypes[0].value = item_spec->hdr.ether_type;
+ ethertypes[0].mask = item_mask->hdr.ether_type;
if (item_mask->has_vlan) {
pdata->has_ovlan_mask = B_TRUE;
if (item_spec->has_vlan)
@@ -1794,8 +1794,8 @@ static const struct sfc_mae_field_locator flocs_vlan[] = {
/* Outermost tag */
{
EFX_MAE_FIELD_VLAN0_TCI_BE,
- RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
- offsetof(struct rte_flow_item_vlan, tci),
+ RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, hdr.vlan_tci),
+ offsetof(struct rte_flow_item_vlan, hdr.vlan_tci),
},
{
/*
@@ -1803,15 +1803,15 @@ static const struct sfc_mae_field_locator flocs_vlan[] = {
* The field is handled by sfc_mae_rule_process_pattern_data().
*/
SFC_MAE_FIELD_HANDLING_DEFERRED,
- RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
- offsetof(struct rte_flow_item_vlan, inner_type),
+ RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, hdr.eth_proto),
+ offsetof(struct rte_flow_item_vlan, hdr.eth_proto),
},
/* Innermost tag */
{
EFX_MAE_FIELD_VLAN1_TCI_BE,
- RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
- offsetof(struct rte_flow_item_vlan, tci),
+ RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, hdr.vlan_tci),
+ offsetof(struct rte_flow_item_vlan, hdr.vlan_tci),
},
{
/*
@@ -1819,8 +1819,8 @@ static const struct sfc_mae_field_locator flocs_vlan[] = {
* The field is handled by sfc_mae_rule_process_pattern_data().
*/
SFC_MAE_FIELD_HANDLING_DEFERRED,
- RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
- offsetof(struct rte_flow_item_vlan, inner_type),
+ RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, hdr.eth_proto),
+ offsetof(struct rte_flow_item_vlan, hdr.eth_proto),
},
};
@@ -1899,9 +1899,9 @@ sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
* sfc_mae_rule_process_pattern_data() will consider them
* altogether when the rest of the items have been parsed.
*/
- et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
- et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
- pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
+ et[pdata->nb_vlan_tags + 1].value = item_spec->hdr.eth_proto;
+ et[pdata->nb_vlan_tags + 1].mask = item_mask->hdr.eth_proto;
+ pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->hdr.vlan_tci;
if (item_mask->has_more_vlan) {
if (pdata->nb_vlan_tags ==
SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
@@ -258,9 +258,9 @@ static const struct tap_flow_items tap_flow_items[] = {
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_IPV6),
.mask = &(const struct rte_flow_item_eth){
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .type = -1,
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.src_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.ether_type = -1,
},
.mask_sz = sizeof(struct rte_flow_item_eth),
.default_mask = &rte_flow_item_eth_mask,
@@ -272,11 +272,11 @@ static const struct tap_flow_items tap_flow_items[] = {
.mask = &(const struct rte_flow_item_vlan){
/* DEI matching is not supported */
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
- .tci = 0xffef,
+ .hdr.vlan_tci = 0xffef,
#else
- .tci = 0xefff,
+ .hdr.vlan_tci = 0xefff,
#endif
- .inner_type = -1,
+ .hdr.eth_proto = -1,
},
.mask_sz = sizeof(struct rte_flow_item_vlan),
.default_mask = &rte_flow_item_vlan_mask,
@@ -391,7 +391,7 @@ static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
.items[0] = {
.type = RTE_FLOW_ITEM_TYPE_ETH,
.mask = &(const struct rte_flow_item_eth){
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
},
},
.items[1] = {
@@ -408,10 +408,10 @@ static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
.items[0] = {
.type = RTE_FLOW_ITEM_TYPE_ETH,
.mask = &(const struct rte_flow_item_eth){
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
},
.spec = &(const struct rte_flow_item_eth){
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
},
},
.items[1] = {
@@ -428,10 +428,10 @@ static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
.items[0] = {
.type = RTE_FLOW_ITEM_TYPE_ETH,
.mask = &(const struct rte_flow_item_eth){
- .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
+ .hdr.dst_addr.addr_bytes = "\x33\x33\x00\x00\x00\x00",
},
.spec = &(const struct rte_flow_item_eth){
- .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
+ .hdr.dst_addr.addr_bytes = "\x33\x33\x00\x00\x00\x00",
},
},
.items[1] = {
@@ -462,10 +462,10 @@ static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
.items[0] = {
.type = RTE_FLOW_ITEM_TYPE_ETH,
.mask = &(const struct rte_flow_item_eth){
- .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+ .hdr.dst_addr.addr_bytes = "\x01\x00\x00\x00\x00\x00",
},
.spec = &(const struct rte_flow_item_eth){
- .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+ .hdr.dst_addr.addr_bytes = "\x01\x00\x00\x00\x00\x00",
},
},
.items[1] = {
@@ -527,31 +527,31 @@ tap_flow_create_eth(const struct rte_flow_item *item, void *data)
if (!mask)
mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_ETH].default_mask;
/* TC does not support eth_type masking. Only accept if exact match. */
- if (mask->type && mask->type != 0xffff)
+ if (mask->hdr.ether_type && mask->hdr.ether_type != 0xffff)
return -1;
if (!spec)
return 0;
/* store eth_type for consistency if ipv4/6 pattern item comes next */
- if (spec->type & mask->type)
- info->eth_type = spec->type;
+ if (spec->hdr.ether_type & mask->hdr.ether_type)
+ info->eth_type = spec->hdr.ether_type;
if (!flow)
return 0;
msg = &flow->msg;
- if (!rte_is_zero_ether_addr(&mask->dst)) {
+ if (!rte_is_zero_ether_addr(&mask->hdr.dst_addr)) {
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST,
RTE_ETHER_ADDR_LEN,
- &spec->dst.addr_bytes);
+ &spec->hdr.dst_addr.addr_bytes);
tap_nlattr_add(&msg->nh,
TCA_FLOWER_KEY_ETH_DST_MASK, RTE_ETHER_ADDR_LEN,
- &mask->dst.addr_bytes);
+ &mask->hdr.dst_addr.addr_bytes);
}
- if (!rte_is_zero_ether_addr(&mask->src)) {
+ if (!rte_is_zero_ether_addr(&mask->hdr.src_addr)) {
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC,
RTE_ETHER_ADDR_LEN,
- &spec->src.addr_bytes);
+ &spec->hdr.src_addr.addr_bytes);
tap_nlattr_add(&msg->nh,
TCA_FLOWER_KEY_ETH_SRC_MASK, RTE_ETHER_ADDR_LEN,
- &mask->src.addr_bytes);
+ &mask->hdr.src_addr.addr_bytes);
}
return 0;
}
@@ -587,11 +587,11 @@ tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
if (info->vlan)
return -1;
info->vlan = 1;
- if (mask->inner_type) {
+ if (mask->hdr.eth_proto) {
/* TC does not support partial eth_type masking */
- if (mask->inner_type != RTE_BE16(0xffff))
+ if (mask->hdr.eth_proto != RTE_BE16(0xffff))
return -1;
- info->eth_type = spec->inner_type;
+ info->eth_type = spec->hdr.eth_proto;
}
if (!flow)
return 0;
@@ -601,8 +601,8 @@ tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
#define VLAN_ID(tci) ((tci) & 0xfff)
if (!spec)
return 0;
- if (spec->tci) {
- uint16_t tci = ntohs(spec->tci) & mask->tci;
+ if (spec->hdr.vlan_tci) {
+ uint16_t tci = ntohs(spec->hdr.vlan_tci) & mask->hdr.vlan_tci;
uint16_t prio = VLAN_PRIO(tci);
uint8_t vid = VLAN_ID(tci);
@@ -1681,7 +1681,7 @@ int tap_flow_implicit_create(struct pmd_internals *pmd,
};
struct rte_flow_item *items = implicit_rte_flows[idx].items;
struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr;
- struct rte_flow_item_eth eth_local = { .type = 0 };
+ struct rte_flow_item_eth eth_local = { .hdr.ether_type = 0 };
unsigned int if_index = pmd->remote_if_index;
struct rte_flow *remote_flow = NULL;
struct nlmsg *msg = NULL;
@@ -1718,7 +1718,7 @@ int tap_flow_implicit_create(struct pmd_internals *pmd,
* eth addr couldn't be set in implicit_rte_flows[] as it is not
* known at compile time.
*/
- memcpy(ð_local.dst, &pmd->eth_addr, sizeof(pmd->eth_addr));
+ memcpy(ð_local.hdr.dst_addr, &pmd->eth_addr, sizeof(pmd->eth_addr));
items = items_local;
}
tc_init_msg(msg, if_index, RTM_NEWTFILTER, flags);
@@ -706,16 +706,16 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
* Mask bits of destination MAC address must be full
* of 1 or full of 0.
*/
- if (!rte_is_zero_ether_addr(ð_mask->src) ||
- (!rte_is_zero_ether_addr(ð_mask->dst) &&
- !rte_is_broadcast_ether_addr(ð_mask->dst))) {
+ if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr) ||
+ (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr) &&
+ !rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr))) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ether address mask");
return -rte_errno;
}
- if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ if ((eth_mask->hdr.ether_type & UINT16_MAX) != UINT16_MAX) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ethertype mask");
@@ -725,13 +725,13 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
/* If mask bits of destination MAC address
* are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
*/
- if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
- filter->mac_addr = eth_spec->dst;
+ if (rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr)) {
+ filter->mac_addr = eth_spec->hdr.dst_addr;
filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
} else {
filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
}
- filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type);
/* Check if the next non-void item is END. */
item = next_no_void_pattern(pattern, item);
@@ -1635,7 +1635,7 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
eth_mask = item->mask;
/* Ether type should be masked. */
- if (eth_mask->type ||
+ if (eth_mask->hdr.ether_type ||
rule->mode == RTE_FDIR_MODE_SIGNATURE) {
memset(rule, 0, sizeof(struct txgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1652,8 +1652,8 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
* and don't support dst MAC address mask.
*/
for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
- if (eth_mask->src.addr_bytes[j] ||
- eth_mask->dst.addr_bytes[j] != 0xFF) {
+ if (eth_mask->hdr.src_addr.addr_bytes[j] ||
+ eth_mask->hdr.dst_addr.addr_bytes[j] != 0xFF) {
memset(rule, 0,
sizeof(struct txgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2381,7 +2381,7 @@ txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
eth_mask = item->mask;
/* Ether type should be masked. */
- if (eth_mask->type) {
+ if (eth_mask->hdr.ether_type) {
memset(rule, 0, sizeof(struct txgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2391,7 +2391,7 @@ txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* src MAC address should be masked. */
for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
- if (eth_mask->src.addr_bytes[j]) {
+ if (eth_mask->hdr.src_addr.addr_bytes[j]) {
memset(rule, 0,
sizeof(struct txgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2403,9 +2403,9 @@ txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
rule->mask.mac_addr_byte_mask = 0;
for (j = 0; j < ETH_ADDR_LEN; j++) {
/* It's a per byte mask. */
- if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+ if (eth_mask->hdr.dst_addr.addr_bytes[j] == 0xFF) {
rule->mask.mac_addr_byte_mask |= 0x1 << j;
- } else if (eth_mask->dst.addr_bytes[j]) {
+ } else if (eth_mask->hdr.dst_addr.addr_bytes[j]) {
memset(rule, 0, sizeof(struct txgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,