diff mbox series

[v2] net/mlx5: fix eCPRI previous layer checking

Message ID 1605086930-189770-1-git-send-email-bingz@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers show
Series [v2] net/mlx5: fix eCPRI previous layer checking | expand

Checks

Context Check Description
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/travis-robot success Travis build: passed
ci/Intel-compilation success Compilation OK
ci/iol-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/checkpatch success coding style OK

Commit Message

Bing Zhao Nov. 11, 2020, 9:28 a.m. UTC
Based on the specification, eCPRI can only follow ETH (VLAN) layer
or UDP layer. When creating a flow with eCPRI item, this should be
checked and invalid layout of the layers should be rejected.

Fixes: c7eca23657b7 ("net/mlx5: add flow validation of eCPRI header")

Cc: stable@dpdk.org

Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
v2: remove the line break for error log message.
---
 drivers/net/mlx5/mlx5_flow.c | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

Comments

Thomas Monjalon Nov. 13, 2020, 6:08 p.m. UTC | #1
11/11/2020 10:28, Bing Zhao:
> Based on the specification, eCPRI can only follow ETH (VLAN) layer
> or UDP layer. When creating a flow with eCPRI item, this should be
> checked and invalid layout of the layers should be rejected.
> 
> Fixes: c7eca23657b7 ("net/mlx5: add flow validation of eCPRI header")
> 
> Cc: stable@dpdk.org
> 
> Signed-off-by: Bing Zhao <bingz@nvidia.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> ---
> v2: remove the line break for error log message.

Applied in next-net-mlx instead of the first version, thanks.
diff mbox series

Patch

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index a6e60af..859b7f6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -2896,17 +2896,20 @@  struct mlx5_flow_tunnel_info {
 					MLX5_FLOW_LAYER_OUTER_VLAN);
 	struct rte_flow_item_ecpri mask_lo;
 
+	if (!(last_item & outer_l2_vlan) &&
+	    last_item != MLX5_FLOW_LAYER_OUTER_L4_UDP)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "eCPRI can only follow L2/VLAN layer or UDP layer");
 	if ((last_item & outer_l2_vlan) && ether_type &&
 	    ether_type != RTE_ETHER_TYPE_ECPRI)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
-					  "eCPRI cannot follow L2/VLAN layer "
-					  "which ether type is not 0xAEFE.");
+					  "eCPRI cannot follow L2/VLAN layer which ether type is not 0xAEFE");
 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
-					  "eCPRI with tunnel is not supported "
-					  "right now.");
+					  "eCPRI with tunnel is not supported right now");
 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -2914,13 +2917,12 @@  struct mlx5_flow_tunnel_info {
 	else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
-					  "eCPRI cannot follow a TCP layer.");
+					  "eCPRI cannot coexist with a TCP layer");
 	/* In specification, eCPRI could be over UDP layer. */
 	else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
-					  "eCPRI over UDP layer is not yet "
-					  "supported right now.");
+					  "eCPRI over UDP layer is not yet supported right now");
 	/* Mask for type field in common header could be zero. */
 	if (!mask)
 		mask = &rte_flow_item_ecpri_mask;
@@ -2929,13 +2931,11 @@  struct mlx5_flow_tunnel_info {
 	if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
-					  "partial mask is not supported "
-					  "for protocol");
+					  "partial mask is not supported for protocol");
 	else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
-					  "message header mask must be after "
-					  "a type mask");
+					  "message header mask must be after a type mask");
 	return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
 					 acc_mask ? (const uint8_t *)acc_mask
 						  : (const uint8_t *)&nic_mask,