The NAT64 DR actions can be shared among the tables. All these
actions can be created during configuring the flow queues and saved
for the future usage.
Even the actions can be shared now, inside per each flow rule, the
actual hardware resources are unique.
Signed-off-by: Bing Zhao <bingz@nvidia.com>
---
doc/guides/nics/features/mlx5.ini | 1 +
doc/guides/nics/mlx5.rst | 9 ++++-
drivers/net/mlx5/mlx5.h | 6 +++
drivers/net/mlx5/mlx5_flow.h | 11 ++++++
drivers/net/mlx5/mlx5_flow_dv.c | 4 +-
drivers/net/mlx5/mlx5_flow_hw.c | 65 +++++++++++++++++++++++++++++++
6 files changed, 94 insertions(+), 2 deletions(-)
@@ -115,6 +115,7 @@ mark = Y
meter = Y
meter_mark = Y
modify_field = Y
+nat64 = Y
nvgre_decap = Y
nvgre_encap = Y
of_pop_vlan = Y
@@ -167,7 +167,7 @@ Features
- Sub-Function.
- Matching on represented port.
- Matching on aggregated affinity.
-
+- NAT64.
Limitations
-----------
@@ -779,6 +779,13 @@ Limitations
if preceding active application rules are still present and vice versa.
+- NAT64 action:
+ - Supported only with HW Steering enabled (``dv_flow_en`` = 2).
+ - Supported only on non-root table.
+ - Actions order limitation should follow the modify fields action.
+ - The last 2 TAG registers will be used implicitly in address backup mode.
+ - Even if the action can be shared, new steering entries will be created per flow rule. It is recommended a single rule with NAT64 should be shared to reduce the duplication of entries. The default address and other fields covertion will be handled with NAT64 action. To support other address, new rule(s) with modify fields on the IP addresses should be created.
+
Statistics
----------
@@ -1967,6 +1967,12 @@ struct mlx5_priv {
struct mlx5_aso_mtr_pool *hws_mpool; /* HW steering's Meter pool. */
struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
/**< HW steering templates used to create control flow rules. */
+ /*
+ * The NAT64 action can be shared among matchers per domain.
+ * [0]: RTE_FLOW_NAT64_6TO4, [1]: RTE_FLOW_NAT64_4TO6
+ * Todo: consider to add *_MAX macro.
+ */
+ struct mlx5dr_action *action_nat64[MLX5DR_TABLE_TYPE_MAX][2];
#endif
struct rte_eth_dev *shared_host; /* Host device for HW steering. */
uint16_t shared_refcnt; /* HW steering host reference counter. */
@@ -159,6 +159,17 @@ struct mlx5_rte_flow_item_sq {
uint32_t queue; /* DevX SQ number */
};
+/* Map from registers to modify fields. */
+extern enum mlx5_modification_field reg_to_field[];
+extern const size_t mlx5_mod_reg_size;
+
+static __rte_always_inline enum mlx5_modification_field
+mlx5_covert_reg_to_field(enum modify_reg reg)
+{
+ MLX5_ASSERT((size_t)reg < mlx5_mod_reg_size);
+ return reg_to_field[reg];
+}
+
/* Feature name to allocate metadata register. */
enum mlx5_feature_name {
MLX5_HAIRPIN_RX,
@@ -958,7 +958,7 @@ flow_dv_convert_action_modify_tcp_ack
MLX5_MODIFICATION_TYPE_ADD, error);
}
-static enum mlx5_modification_field reg_to_field[] = {
+enum mlx5_modification_field reg_to_field[] = {
[REG_NON] = MLX5_MODI_OUT_NONE,
[REG_A] = MLX5_MODI_META_DATA_REG_A,
[REG_B] = MLX5_MODI_META_DATA_REG_B,
@@ -976,6 +976,8 @@ static enum mlx5_modification_field reg_to_field[] = {
[REG_C_11] = MLX5_MODI_META_REG_C_11,
};
+const size_t mlx5_mod_reg_size = RTE_DIM(reg_to_field);
+
/**
* Convert register set to DV specification.
*
@@ -7413,6 +7413,66 @@ flow_hw_destroy_send_to_kernel_action(struct mlx5_priv *priv)
}
}
+static void
+flow_hw_destroy_nat64_actions(struct mlx5_priv *priv)
+{
+ uint32_t i;
+
+ for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
+ if (priv->action_nat64[i][0]) {
+ (void)mlx5dr_action_destroy(priv->action_nat64[i][0]);
+ priv->action_nat64[i][0] = NULL;
+ }
+ if (priv->action_nat64[i][1]) {
+ (void)mlx5dr_action_destroy(priv->action_nat64[i][1]);
+ priv->action_nat64[i][1] = NULL;
+ }
+ }
+}
+
+static int
+flow_hw_create_nat64_actions(struct mlx5_priv *priv, struct rte_flow_error *error)
+{
+ struct mlx5dr_action_nat64_attr attr;
+ uint8_t regs[MLX5_FLOW_NAT64_REGS_MAX];
+ uint32_t i;
+ const uint32_t flags[MLX5DR_TABLE_TYPE_MAX] = {
+ MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_SHARED,
+ MLX5DR_ACTION_FLAG_HWS_TX | MLX5DR_ACTION_FLAG_SHARED,
+ MLX5DR_ACTION_FLAG_HWS_FDB | MLX5DR_ACTION_FLAG_SHARED,
+ };
+ struct mlx5dr_action *act;
+
+ attr.registers = regs;
+ /* Try to use 3 registers by default. */
+ attr.num_of_registers = MLX5_FLOW_NAT64_REGS_MAX;
+ for (i = 0; i < MLX5_FLOW_NAT64_REGS_MAX; i++) {
+ MLX5_ASSERT(priv->sh->registers.nat64_regs[i] != REG_NON);
+ regs[i] = mlx5_covert_reg_to_field(priv->sh->registers.nat64_regs[i]);
+ }
+ for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
+ if (i == MLX5DR_TABLE_TYPE_FDB && !priv->sh->config.dv_esw_en)
+ continue;
+ attr.flags = (enum mlx5dr_action_nat64_flags)
+ (MLX5DR_ACTION_NAT64_V6_TO_V4 | MLX5DR_ACTION_NAT64_BACKUP_ADDR);
+ act = mlx5dr_action_create_nat64(priv->dr_ctx, &attr, flags[i]);
+ if (!act)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to create v6 to v4 action.");
+ priv->action_nat64[i][0] = act;
+ attr.flags = (enum mlx5dr_action_nat64_flags)
+ (MLX5DR_ACTION_NAT64_V4_TO_V6 | MLX5DR_ACTION_NAT64_BACKUP_ADDR);
+ act = mlx5dr_action_create_nat64(priv->dr_ctx, &attr, flags[i]);
+ if (!act)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to create v4 to v6 action.");
+ priv->action_nat64[i][1] = act;
+ }
+ return 0;
+}
+
/**
* Create an egress pattern template matching on source SQ.
*
@@ -9539,6 +9599,9 @@ flow_hw_configure(struct rte_eth_dev *dev,
NULL, "Failed to VLAN actions.");
goto err;
}
+ ret = flow_hw_create_nat64_actions(priv, error);
+ if (ret)
+ goto err;
if (_queue_attr)
mlx5_free(_queue_attr);
if (port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE)
@@ -9570,6 +9633,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
}
if (priv->hw_def_miss)
mlx5dr_action_destroy(priv->hw_def_miss);
+ flow_hw_destroy_nat64_actions(priv);
flow_hw_destroy_vlan(dev);
if (dr_ctx)
claim_zero(mlx5dr_context_close(dr_ctx));
@@ -9649,6 +9713,7 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
}
if (priv->hw_def_miss)
mlx5dr_action_destroy(priv->hw_def_miss);
+ flow_hw_destroy_nat64_actions(priv);
flow_hw_destroy_vlan(dev);
flow_hw_destroy_send_to_kernel_action(priv);
flow_hw_free_vport_actions(priv);