@@ -558,6 +558,630 @@ struct flow_tcf_ptoi {
}
/**
+ * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_ETH item for E-Switch.
+ *
+ * @param[in] item
+ * Pointer to the itemn structure.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ **/
+static int
+flow_tcf_validate_vxlan_encap_eth(const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+
+ if (!spec)
+ /*
+ * Specification for L2 addresses can be empty
+ * because these ones are optional and not
+ * required directly by tc rule.
+ */
+ return 0;
+ if (!mask)
+ /* If mask is not specified use the default one. */
+ mask = &rte_flow_item_eth_mask;
+ if (memcmp(&mask->dst,
+ &flow_tcf_mask_empty.eth.dst,
+ sizeof(flow_tcf_mask_empty.eth.dst))) {
+ if (memcmp(&mask->dst,
+ &rte_flow_item_eth_mask.dst,
+ sizeof(rte_flow_item_eth_mask.dst)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"eth.dst\" field");
+ /*
+ * Ethernet addresses are not supported by
+ * tc as tunnel_key parameters. Destination
+ * L2 address is needed to form encap packet
+ * header and retrieved by kernel from implicit
+ * sources (ARP table, etc), address masks are
+ * not supported at all.
+ */
+ DRV_LOG(WARNING,
+ "outer ethernet destination address "
+ "cannot be forced for VXLAN "
+ "encapsulation, parameter ignored");
+ }
+ if (memcmp(&mask->src,
+ &flow_tcf_mask_empty.eth.src,
+ sizeof(flow_tcf_mask_empty.eth.src))) {
+ if (memcmp(&mask->src,
+ &rte_flow_item_eth_mask.src,
+ sizeof(rte_flow_item_eth_mask.src)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"eth.src\" field");
+ DRV_LOG(WARNING,
+ "outer ethernet source address "
+ "cannot be forced for VXLAN "
+ "encapsulation, parameter ignored");
+ }
+ if (mask->type != RTE_BE16(0x0000)) {
+ if (mask->type != RTE_BE16(0xffff))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"eth.type\" field");
+ DRV_LOG(WARNING,
+ "outer ethernet type field "
+ "cannot be forced for VXLAN "
+ "encapsulation, parameter ignored");
+ }
+ return 0;
+}
+
+/**
+ * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_IPV4 item for E-Switch.
+ *
+ * @param[in] item
+ * Pointer to the itemn structure.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ **/
+static int
+flow_tcf_validate_vxlan_encap_ipv4(const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+
+ if (!spec)
+ /*
+ * Specification for L3 addresses cannot be empty
+ * because it is required by tunnel_key parameter.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "NULL outer L3 address specification "
+ " for VXLAN encapsulation");
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+ if (mask->hdr.dst_addr != RTE_BE32(0x00000000)) {
+ if (mask->hdr.dst_addr != RTE_BE32(0xffffffff))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"ipv4.hdr.dst_addr\" field");
+ /* More L3 address validations can be put here. */
+ } else {
+ /*
+ * Kernel uses the destination L3 address to determine
+ * the routing path and obtain the L2 destination
+ * address, so L3 destination address must be
+ * specified in the tc rule.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "outer L3 destination address must be "
+ "specified for VXLAN encapsulation");
+ }
+ if (mask->hdr.src_addr != RTE_BE32(0x00000000)) {
+ if (mask->hdr.src_addr != RTE_BE32(0xffffffff))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"ipv4.hdr.src_addr\" field");
+ /* More L3 address validations can be put here. */
+ } else {
+ /*
+ * Kernel uses the source L3 address to select the
+ * interface for egress encapsulated traffic, so
+ * it must be specified in the tc rule.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "outer L3 source address must be "
+ "specified for VXLAN encapsulation");
+ }
+ return 0;
+}
+
+/**
+ * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_IPV6 item for E-Switch.
+ *
+ * @param[in] item
+ * Pointer to the itemn structure.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ **/
+static int
+flow_tcf_validate_vxlan_encap_ipv6(const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv6 *spec = item->spec;
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+
+ if (!spec)
+ /*
+ * Specification for L3 addresses cannot be empty
+ * because it is required by tunnel_key parameter.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "NULL outer L3 address specification "
+ " for VXLAN encapsulation");
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+ if (memcmp(&mask->hdr.dst_addr,
+ &flow_tcf_mask_empty.ipv6.hdr.dst_addr,
+ sizeof(flow_tcf_mask_empty.ipv6.hdr.dst_addr))) {
+ if (memcmp(&mask->hdr.dst_addr,
+ &rte_flow_item_ipv6_mask.hdr.dst_addr,
+ sizeof(rte_flow_item_ipv6_mask.hdr.dst_addr)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"ipv6.hdr.dst_addr\" field");
+ /* More L3 address validations can be put here. */
+ } else {
+ /*
+ * Kernel uses the destination L3 address to determine
+ * the routing path and obtain the L2 destination
+ * address (heigh or gate), so L3 destination address
+ * must be specified within the tc rule.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "outer L3 destination address must be "
+ "specified for VXLAN encapsulation");
+ }
+ if (memcmp(&mask->hdr.src_addr,
+ &flow_tcf_mask_empty.ipv6.hdr.src_addr,
+ sizeof(flow_tcf_mask_empty.ipv6.hdr.src_addr))) {
+ if (memcmp(&mask->hdr.src_addr,
+ &rte_flow_item_ipv6_mask.hdr.src_addr,
+ sizeof(rte_flow_item_ipv6_mask.hdr.src_addr)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"ipv6.hdr.src_addr\" field");
+ /* More L3 address validation can be put here. */
+ } else {
+ /*
+ * Kernel uses the source L3 address to select the
+ * interface for egress encapsulated traffic, so
+ * it must be specified in the tc rule.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "outer L3 source address must be "
+ "specified for VXLAN encapsulation");
+ }
+ return 0;
+}
+
+/**
+ * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_UDP item for E-Switch.
+ *
+ * @param[in] item
+ * Pointer to the itemn structure.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ **/
+static int
+flow_tcf_validate_vxlan_encap_udp(const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+
+ if (!spec)
+ /*
+ * Specification for UDP ports cannot be empty
+ * because it is required by tunnel_key parameter.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "NULL UDP port specification "
+ " for VXLAN encapsulation");
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ if (mask->hdr.dst_port != RTE_BE16(0x0000)) {
+ if (mask->hdr.dst_port != RTE_BE16(0xffff))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"udp.hdr.dst_port\" field");
+ if (!spec->hdr.dst_port)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "zero encap remote UDP port");
+ } else {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "outer UDP remote port must be "
+ "specified for VXLAN encapsulation");
+ }
+ if (mask->hdr.src_port != RTE_BE16(0x0000)) {
+ if (mask->hdr.src_port != RTE_BE16(0xffff))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"udp.hdr.src_port\" field");
+ DRV_LOG(WARNING,
+ "outer UDP source port cannot be "
+ "forced for VXLAN encapsulation, "
+ "parameter ignored");
+ }
+ return 0;
+}
+
+/**
+ * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_VXLAN item for E-Switch.
+ *
+ * @param[in] item
+ * Pointer to the itemn structure.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ **/
+static int
+flow_tcf_validate_vxlan_encap_vni(const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vxlan *spec = item->spec;
+ const struct rte_flow_item_vxlan *mask = item->mask;
+
+ if (!spec)
+ /* Outer VNI is required by tunnel_key parameter. */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "NULL VNI specification "
+ " for VXLAN encapsulation");
+ if (!mask)
+ mask = &rte_flow_item_vxlan_mask;
+ if (mask->vni[0] != 0 ||
+ mask->vni[1] != 0 ||
+ mask->vni[2] != 0) {
+ if (mask->vni[0] != 0xff ||
+ mask->vni[1] != 0xff ||
+ mask->vni[2] != 0xff)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"vxlan.vni\" field");
+ if (spec->vni[0] == 0 &&
+ spec->vni[1] == 0 &&
+ spec->vni[2] == 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VXLAN vni cannot be 0");
+ } else {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "outer VNI must be specified "
+ "for VXLAN encapsulation");
+ }
+ return 0;
+}
+
+/**
+ * Validate VXLAN_ENCAP action item list for E-Switch.
+ *
+ * @param[in] action
+ * Pointer to the VXLAN_ENCAP action structure.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ **/
+static int
+flow_tcf_validate_vxlan_encap(const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *items;
+ int ret;
+ uint32_t item_flags = 0;
+
+ assert(action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP);
+ if (!action->conf)
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "Missing VXLAN tunnel "
+ "action configuration");
+ items = ((const struct rte_flow_action_vxlan_encap *)
+ action->conf)->definition;
+ if (!items)
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "Missing VXLAN tunnel "
+ "encapsulation parameters");
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ ret = mlx5_flow_validate_item_eth(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ ret = flow_tcf_validate_vxlan_encap_eth(items, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L2;
+ break;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ret = mlx5_flow_validate_item_ipv4(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ ret = flow_tcf_validate_vxlan_encap_ipv4(items, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ret = mlx5_flow_validate_item_ipv6(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ ret = flow_tcf_validate_vxlan_encap_ipv6(items, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ ret = mlx5_flow_validate_item_udp(items, item_flags,
+ 0xFF, error);
+ if (ret < 0)
+ return ret;
+ ret = flow_tcf_validate_vxlan_encap_udp(items, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ ret = mlx5_flow_validate_item_vxlan(items,
+ item_flags, error);
+ if (ret < 0)
+ return ret;
+ ret = flow_tcf_validate_vxlan_encap_vni(items, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_VXLAN;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, items,
+ "VXLAN encap item not supported");
+ }
+ }
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "no outer L3 layer found"
+ " for VXLAN encapsulation");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "no outer L4 layer found"
+ " for VXLAN encapsulation");
+ if (!(item_flags & MLX5_FLOW_LAYER_VXLAN))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "no VXLAN VNI found"
+ " for VXLAN encapsulation");
+ return 0;
+}
+
+/**
+ * Validate VXLAN_DECAP action outer tunnel items for E-Switch.
+ *
+ * @param[in] item_flags
+ * Mask of provided outer tunnel parameters
+ * @param[in] ipv4
+ * Outer IPv4 address item (if any, NULL otherwise).
+ * @param[in] ipv6
+ * Outer IPv6 address item (if any, NULL otherwise).
+ * @param[in] udp
+ * Outer UDP layer item (if any, NULL otherwise).
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ **/
+static int
+flow_tcf_validate_vxlan_decap(uint32_t item_flags,
+ const struct rte_flow_action *action,
+ const struct rte_flow_item *ipv4,
+ const struct rte_flow_item *ipv6,
+ const struct rte_flow_item *udp,
+ struct rte_flow_error *error)
+{
+ if (!ipv4 && !ipv6)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "no outer L3 layer found"
+ " for VXLAN decapsulation");
+ if (ipv4) {
+ const struct rte_flow_item_ipv4 *spec = ipv4->spec;
+ const struct rte_flow_item_ipv4 *mask = ipv4->mask;
+
+ if (!spec)
+ /*
+ * Specification for L3 addresses cannot be empty
+ * because it is required as decap parameter.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, ipv4,
+ "NULL outer L3 address specification "
+ " for VXLAN decapsulation");
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+ if (mask->hdr.dst_addr != RTE_BE32(0x00000000)) {
+ if (mask->hdr.dst_addr != RTE_BE32(0xffffffff))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"ipv4.hdr.dst_addr\" field");
+ /* More L3 address validations can be put here. */
+ } else {
+ /*
+ * Kernel uses the destination L3 address
+ * to determine the ingress network interface
+ * for traffic being decapculated.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, ipv4,
+ "outer L3 destination address must be "
+ "specified for VXLAN decapsulation");
+ }
+ /* Source L3 address is optional for decap. */
+ if (mask->hdr.src_addr != RTE_BE32(0x00000000))
+ if (mask->hdr.src_addr != RTE_BE32(0xffffffff))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"ipv4.hdr.src_addr\" field");
+ } else {
+ const struct rte_flow_item_ipv6 *spec = ipv6->spec;
+ const struct rte_flow_item_ipv6 *mask = ipv6->mask;
+
+ if (!spec)
+ /*
+ * Specification for L3 addresses cannot be empty
+ * because it is required as decap parameter.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, ipv6,
+ "NULL outer L3 address specification "
+ " for VXLAN decapsulation");
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+ if (memcmp(&mask->hdr.dst_addr,
+ &flow_tcf_mask_empty.ipv6.hdr.dst_addr,
+ sizeof(flow_tcf_mask_empty.ipv6.hdr.dst_addr))) {
+ if (memcmp(&mask->hdr.dst_addr,
+ &rte_flow_item_ipv6_mask.hdr.dst_addr,
+ sizeof(rte_flow_item_ipv6_mask.hdr.dst_addr)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"ipv6.hdr.dst_addr\" field");
+ /* More L3 address validations can be put here. */
+ } else {
+ /*
+ * Kernel uses the destination L3 address
+ * to determine the ingress network interface
+ * for traffic being decapculated.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, ipv6,
+ "outer L3 destination address must be "
+ "specified for VXLAN decapsulation");
+ }
+ /* Source L3 address is optional for decap. */
+ if (memcmp(&mask->hdr.src_addr,
+ &flow_tcf_mask_empty.ipv6.hdr.src_addr,
+ sizeof(flow_tcf_mask_empty.ipv6.hdr.src_addr))) {
+ if (memcmp(&mask->hdr.src_addr,
+ &rte_flow_item_ipv6_mask.hdr.src_addr,
+ sizeof(mask->hdr.src_addr)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"ipv6.hdr.src_addr\" field");
+ }
+ }
+ if (!udp) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "no outer L4 layer found"
+ " for VXLAN decapsulation");
+ } else {
+ const struct rte_flow_item_udp *spec = udp->spec;
+ const struct rte_flow_item_udp *mask = udp->mask;
+
+ if (!spec)
+ /*
+ * Specification for UDP ports cannot be empty
+ * because it is required as decap parameter.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, udp,
+ "NULL UDP port specification "
+ " for VXLAN decapsulation");
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ if (mask->hdr.dst_port != RTE_BE16(0x0000)) {
+ if (mask->hdr.dst_port != RTE_BE16(0xffff))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"udp.hdr.dst_port\" field");
+ if (!spec->hdr.dst_port)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, udp,
+ "zero decap local UDP port");
+ } else {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, udp,
+ "outer UDP destination port must be "
+ "specified for VXLAN decapsulation");
+ }
+ if (mask->hdr.src_port != RTE_BE16(0x0000)) {
+ if (mask->hdr.src_port != RTE_BE16(0xffff))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"udp.hdr.src_port\" field");
+ DRV_LOG(WARNING,
+ "outer UDP local port cannot be "
+ "forced for VXLAN encapsulation, "
+ "parameter ignored");
+ }
+ }
+ if (!(item_flags & MLX5_FLOW_LAYER_VXLAN))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "no VXLAN VNI found"
+ " for VXLAN decapsulation");
+ /* VNI is already validated, extra check can be put here. */
+ return 0;
+}
+/**
* Validate flow for E-Switch.
*
* @param[in] priv
@@ -589,6 +1213,7 @@ struct flow_tcf_ptoi {
const struct rte_flow_item_ipv6 *ipv6;
const struct rte_flow_item_tcp *tcp;
const struct rte_flow_item_udp *udp;
+ const struct rte_flow_item_vxlan *vxlan;
} spec, mask;
union {
const struct rte_flow_action_port_id *port_id;
@@ -597,7 +1222,11 @@ struct flow_tcf_ptoi {
of_set_vlan_vid;
const struct rte_flow_action_of_set_vlan_pcp *
of_set_vlan_pcp;
+ const struct rte_flow_action_vxlan_encap *vxlan_encap;
} conf;
+ const struct rte_flow_item *ipv4 = NULL; /* storage to check */
+ const struct rte_flow_item *ipv6 = NULL; /* outer tunnel. */
+ const struct rte_flow_item *udp = NULL; /* parameters. */
uint32_t item_flags = 0;
uint32_t action_flags = 0;
uint8_t next_protocol = -1;
@@ -724,7 +1353,6 @@ struct flow_tcf_ptoi {
error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
mask.ipv4 = flow_tcf_item_mask
(items, &rte_flow_item_ipv4_mask,
&flow_tcf_mask_supported.ipv4,
@@ -745,13 +1373,22 @@ struct flow_tcf_ptoi {
next_protocol =
((const struct rte_flow_item_ipv4 *)
(items->spec))->hdr.next_proto_id;
+ if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) {
+ /*
+ * Multiple outer items are not allowed as
+ * tunnel parameters
+ */
+ ipv4 = NULL;
+ } else {
+ ipv4 = items;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
mask.ipv6 = flow_tcf_item_mask
(items, &rte_flow_item_ipv6_mask,
&flow_tcf_mask_supported.ipv6,
@@ -772,13 +1409,22 @@ struct flow_tcf_ptoi {
next_protocol =
((const struct rte_flow_item_ipv6 *)
(items->spec))->hdr.proto;
+ if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6) {
+ /*
+ *Multiple outer items are not allowed as
+ * tunnel parameters
+ */
+ ipv6 = NULL;
+ } else {
+ ipv6 = items;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ }
break;
case RTE_FLOW_ITEM_TYPE_UDP:
ret = mlx5_flow_validate_item_udp(items, item_flags,
next_protocol, error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
mask.udp = flow_tcf_item_mask
(items, &rte_flow_item_udp_mask,
&flow_tcf_mask_supported.udp,
@@ -787,13 +1433,18 @@ struct flow_tcf_ptoi {
error);
if (!mask.udp)
return -rte_errno;
+ if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP) {
+ udp = NULL;
+ } else {
+ udp = items;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ }
break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp(items, item_flags,
next_protocol, error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
mask.tcp = flow_tcf_item_mask
(items, &rte_flow_item_tcp_mask,
&flow_tcf_mask_supported.tcp,
@@ -802,6 +1453,31 @@ struct flow_tcf_ptoi {
error);
if (!mask.tcp)
return -rte_errno;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ ret = mlx5_flow_validate_item_vxlan(items,
+ item_flags, error);
+ if (ret < 0)
+ return ret;
+ mask.vxlan = flow_tcf_item_mask
+ (items, &rte_flow_item_vxlan_mask,
+ &flow_tcf_mask_supported.vxlan,
+ &flow_tcf_mask_empty.vxlan,
+ sizeof(flow_tcf_mask_supported.vxlan),
+ error);
+ if (!mask.vxlan)
+ return -rte_errno;
+ if (mask.vxlan->vni[0] != 0xff ||
+ mask.vxlan->vni[1] != 0xff ||
+ mask.vxlan->vni[2] != 0xff)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.vxlan,
+ "no support for partial or "
+ "empty mask on \"vxlan.vni\" field");
+ item_flags |= MLX5_FLOW_LAYER_VXLAN;
break;
default:
return rte_flow_error_set(error, ENOTSUP,
@@ -857,6 +1533,33 @@ struct flow_tcf_ptoi {
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
action_flags |= MLX5_ACTION_OF_SET_VLAN_PCP;
break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ if (action_flags & (MLX5_ACTION_VXLAN_ENCAP
+ | MLX5_ACTION_VXLAN_DECAP))
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "can't have multiple vxlan actions");
+ ret = flow_tcf_validate_vxlan_encap(actions, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_ACTION_VXLAN_ENCAP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ if (action_flags & (MLX5_ACTION_VXLAN_ENCAP
+ | MLX5_ACTION_VXLAN_DECAP))
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "can't have multiple vxlan actions");
+ ret = flow_tcf_validate_vxlan_decap(item_flags,
+ actions,
+ ipv4, ipv6, udp,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_ACTION_VXLAN_DECAP;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -864,6 +1567,12 @@ struct flow_tcf_ptoi {
"action not supported");
}
}
+ if ((item_flags & MLX5_FLOW_LAYER_VXLAN) &&
+ !(action_flags & MLX5_ACTION_VXLAN_DECAP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "VNI pattern should be followed "
+ " by VXLAN_DECAP action");
return 0;
}