[v3] net/iavf: support gtpu outer and inner co-exist
Checks
Commit Message
Although currently only the gtpu inner hash be enabled while not the
gtpu outer hash, but the outer protocol still needed to co-exist with
inner protocol when configure the gtpu inner hash rule, that would
allow the gtpu innner hash support for the different outer protocols.
Signed-off-by: Jeff Guo <jia.guo@intel.com>
---
v3->v2:
delete unused param
---
drivers/net/iavf/iavf_hash.c | 52 +++++++++++++++++++++++++++++++-----
1 file changed, 45 insertions(+), 7 deletions(-)
@@ -29,11 +29,21 @@
#define IAVF_PHINT_GTPU_EH_DWN BIT_ULL(2)
#define IAVF_PHINT_GTPU_EH_UP BIT_ULL(3)
+#define IAVF_PHINT_OUTER_IPV4_INNER_IPV4 BIT_ULL(4)
+#define IAVF_PHINT_OUTER_IPV4_INNER_IPV6 BIT_ULL(5)
+#define IAVF_PHINT_OUTER_IPV6_INNER_IPV4 BIT_ULL(6)
+#define IAVF_PHINT_OUTER_IPV6_INNER_IPV6 BIT_ULL(7)
+
#define IAVF_PHINT_GTPU_MSK (IAVF_PHINT_GTPU | \
IAVF_PHINT_GTPU_EH | \
IAVF_PHINT_GTPU_EH_DWN | \
IAVF_PHINT_GTPU_EH_UP)
+#define IAVF_PHINT_LAYERS_MSK (IAVF_PHINT_OUTER_IPV4_INNER_IPV4 | \
+ IAVF_PHINT_OUTER_IPV4_INNER_IPV6 | \
+ IAVF_PHINT_OUTER_IPV6_INNER_IPV4 | \
+ IAVF_PHINT_OUTER_IPV6_INNER_IPV6)
+
#define IAVF_GTPU_EH_DWNLINK 0
#define IAVF_GTPU_EH_UPLINK 1
@@ -499,12 +509,13 @@ iavf_hash_init(struct iavf_adapter *ad)
}
static int
-iavf_hash_parse_pattern(struct iavf_pattern_match_item *pattern_match_item,
- const struct rte_flow_item pattern[], uint64_t *phint,
+iavf_hash_parse_pattern(const struct rte_flow_item pattern[], uint64_t *phint,
struct rte_flow_error *error)
{
const struct rte_flow_item *item = pattern;
const struct rte_flow_item_gtp_psc *psc;
+ bool outer_ipv4 = false;
+ bool outer_ipv6 = false;
for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) {
@@ -515,6 +526,22 @@ iavf_hash_parse_pattern(struct iavf_pattern_match_item *pattern_match_item,
}
switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ if (outer_ipv4)
+ *phint |= IAVF_PHINT_OUTER_IPV4_INNER_IPV4;
+ else if (outer_ipv6)
+ *phint |= IAVF_PHINT_OUTER_IPV6_INNER_IPV4;
+ else
+ outer_ipv4 = true;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ if (outer_ipv4)
+ *phint |= IAVF_PHINT_OUTER_IPV4_INNER_IPV6;
+ else if (outer_ipv6)
+ *phint |= IAVF_PHINT_OUTER_IPV6_INNER_IPV6;
+ else
+ outer_ipv6 = true;
+ break;
case RTE_FLOW_ITEM_TYPE_GTPU:
*phint |= IAVF_PHINT_GTPU;
break;
@@ -533,9 +560,6 @@ iavf_hash_parse_pattern(struct iavf_pattern_match_item *pattern_match_item,
}
}
- /* update and restore pattern hint */
- *phint |= *(uint64_t *)(pattern_match_item->meta);
-
return 0;
}
@@ -712,6 +736,7 @@ static void
iavf_refine_proto_hdrs_by_pattern(struct virtchnl_proto_hdrs *proto_hdrs,
uint64_t phint)
{
+ struct virtchnl_proto_hdr *hdr_outer;
struct virtchnl_proto_hdr *hdr1;
struct virtchnl_proto_hdr *hdr2;
int i;
@@ -720,6 +745,20 @@ iavf_refine_proto_hdrs_by_pattern(struct virtchnl_proto_hdrs *proto_hdrs,
return;
if (proto_hdrs->tunnel_level == TUNNEL_LEVEL_INNER) {
+ if (phint & IAVF_PHINT_LAYERS_MSK) {
+ /* adding gtpu outer header */
+ hdr_outer = &proto_hdrs->proto_hdr[proto_hdrs->count];
+ hdr_outer->field_selector = 0;
+ proto_hdrs->count++;
+
+ if (phint & (IAVF_PHINT_OUTER_IPV4_INNER_IPV4 |
+ IAVF_PHINT_OUTER_IPV4_INNER_IPV6))
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_outer, IPV4);
+ else if (phint & (IAVF_PHINT_OUTER_IPV6_INNER_IPV4 |
+ IAVF_PHINT_OUTER_IPV6_INNER_IPV6))
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_outer, IPV6);
+ }
+
/* shift headers 1 layer */
for (i = proto_hdrs->count; i > 0; i--) {
hdr1 = &proto_hdrs->proto_hdr[i];
@@ -908,8 +947,7 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
goto error;
}
- ret = iavf_hash_parse_pattern(pattern_match_item, pattern, &phint,
- error);
+ ret = iavf_hash_parse_pattern(pattern, &phint, error);
if (ret)
goto error;