1. Add validation for item NSH.
It will fail if HCA cap for NSH is false.
2. Add item_flags for NSH.
3. For vxlan-gpe if next header is NSH, set next_protocol as NSH.
Signed-off-by: Haifei Luo <haifeil@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.c | 39 +++++++++++++++++++++++++++++++++
drivers/net/mlx5/mlx5_flow.h | 6 +++++
drivers/net/mlx5/mlx5_flow_dv.c | 13 ++++++++++-
3 files changed, 57 insertions(+), 1 deletion(-)
@@ -3905,6 +3905,45 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
}
+/**
+ * Validate the NSH item.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device on which flow rule is being created on.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (item->mask) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "NSH fields matching is not supported");
+ }
+
+ if (!priv->sh->config.dv_flow_en) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "NSH support requires DV flow interface");
+ }
+
+ if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_vxlan_gpe_nsh) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Current FW does not support matching on NSH");
+ }
+
+ return 0;
+}
+
static int
flow_null_validate(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_attr *attr __rte_unused,
@@ -233,6 +233,9 @@ enum mlx5_feature_name {
/* IB BTH ITEM. */
#define MLX5_FLOW_ITEM_IB_BTH (1ull << 51)
+/* NSH ITEM */
+#define MLX5_FLOW_ITEM_NSH (1ull << 53)
+
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
@@ -2453,6 +2456,9 @@ int mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
uint16_t ether_type,
const struct rte_flow_item_ecpri *acc_mask,
struct rte_flow_error *error);
+int mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ struct rte_flow_error *error);
int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
struct mlx5_flow_meter_info *fm,
uint32_t mtr_idx,
@@ -7815,6 +7815,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
last_item = MLX5_FLOW_ITEM_IB_BTH;
break;
+ case RTE_FLOW_ITEM_TYPE_NSH:
+ ret = mlx5_flow_validate_item_nsh(dev, items, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_NSH;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -9720,7 +9726,9 @@ flow_dv_translate_item_vxlan_gpe(void *key, const struct rte_flow_item *item,
v_protocol = vxlan_v->hdr.protocol;
if (!m_protocol) {
/* Force next protocol to ensure next headers parsing. */
- if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+ if (pattern_flags & MLX5_FLOW_ITEM_NSH)
+ v_protocol = RTE_VXLAN_GPE_TYPE_NSH;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
@@ -13910,6 +13918,9 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
flow_dv_translate_item_ib_bth(key, items, tunnel, key_type);
last_item = MLX5_FLOW_ITEM_IB_BTH;
break;
+ case RTE_FLOW_ITEM_TYPE_NSH:
+ last_item = MLX5_FLOW_ITEM_NSH;
+ break;
default:
break;
}