@@ -1147,6 +1147,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
* then this will return directly without any action.
*/
mlx5_flow_list_flush(dev, &priv->flows, true);
+ mlx5_shared_action_flush(dev);
mlx5_flow_meter_flush(dev, NULL);
/* Free the intermediate buffers for flow creation. */
mlx5_flow_free_intermediate(dev);
@@ -680,6 +680,8 @@ struct mlx5_priv {
uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */
struct mlx5_mp_id mp_id; /* ID of a multi-process process */
LIST_HEAD(fdir, mlx5_fdir_flow) fdir_flows; /* fdir flows. */
+ LIST_HEAD(shared_action, rte_flow_shared_action) shared_actions;
+ /* shared actions */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
@@ -180,6 +180,9 @@
#define MLX5_HAIRPIN_QUEUE_STRIDE 6
#define MLX5_HAIRPIN_JUMBO_LOG_SIZE (14 + 2)
+/* Maximum number of shared actions supported by rte_flow */
+#define MLX5_MAX_SHARED_ACTIONS 1
+
/* Definition of static_assert found in /usr/include/assert.h */
#ifndef HAVE_STATIC_ASSERT
#define static_assert _Static_assert
@@ -231,6 +231,25 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
},
};
+static struct rte_flow_shared_action *
+mlx5_shared_action_create(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error);
+static int mlx5_shared_action_destroy
+ (struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *shared_action,
+ struct rte_flow_error *error);
+static int mlx5_shared_action_update
+ (struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *shared_action,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error);
+static int mlx5_shared_action_query
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action *action,
+ void *data,
+ struct rte_flow_error *error);
+
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@@ -240,6 +259,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.query = mlx5_flow_query,
.dev_dump = mlx5_flow_dev_dump,
.get_aged_flows = mlx5_flow_get_aged_flows,
+ .shared_action_create = mlx5_shared_action_create,
+ .shared_action_destroy = mlx5_shared_action_destroy,
+ .shared_action_update = mlx5_shared_action_update,
+ .shared_action_query = mlx5_shared_action_query,
};
/* Convert FDIR request to Generic flow. */
@@ -1117,16 +1140,10 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
/*
* Validate the rss action.
*
- * @param[in] action
- * Pointer to the queue action.
- * @param[in] action_flags
- * Bit-fields that holds the actions detected until now.
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] attr
- * Attributes of flow that includes this action.
- * @param[in] item_flags
- * Items that were detected.
+ * @param[in] action
+ * Pointer to the queue action.
* @param[out] error
* Pointer to error structure.
*
@@ -1134,23 +1151,14 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
- uint64_t action_flags,
- struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- uint64_t item_flags,
- struct rte_flow_error *error)
+mlx5_validate_action_rss(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_rss *rss = action->conf;
- int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
- if (action_flags & MLX5_FLOW_FATE_ACTIONS)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can't have 2 fate actions"
- " in same flow");
if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
return rte_flow_error_set(error, ENOTSUP,
@@ -1196,15 +1204,17 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
!(rss->types & ETH_RSS_IP))
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
- "L3 partial RSS requested but L3 RSS"
- " type not specified");
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "L3 partial RSS requested but L3 "
+ "RSS type not specified");
if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
!(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
- "L4 partial RSS requested but L4 RSS"
- " type not specified");
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "L4 partial RSS requested but L4 "
+ "RSS type not specified");
if (!priv->rxqs_n)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
@@ -1221,17 +1231,62 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
&rss->queue[i], "queue index out of range");
if (!(*priv->rxqs)[rss->queue[i]])
return rte_flow_error_set
- (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&rss->queue[i], "queue is not configured");
}
+ return 0;
+}
+
+/*
+ * Validate the rss action.
+ *
+ * @param[in] action
+ * Pointer to the queue action.
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[in] item_flags
+ * Items that were detected.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
+ uint64_t action_flags,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_rss *rss = action->conf;
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ int ret;
+
+ if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions"
+ " in same flow");
+ ret = mlx5_validate_action_rss(dev, action, error);
+ if (ret)
+ return ret;
if (attr->egress)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
"rss action not supported for "
"egress");
if (rss->level > 1 && !tunnel)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
"inner RSS is not supported for "
"non-tunnel flows");
return 0;
@@ -2739,6 +2794,131 @@ flow_get_rss_action(const struct rte_flow_action actions[])
return NULL;
}
+/* maps shared action to translated non shared in some actions array */
+struct mlx5_translated_shared_action {
+ struct rte_flow_shared_action *action; /**< Shared action */
+ int index; /**< Index in related array of rte_flow_action */
+};
+
+/**
+ * Translates actions of type RTE_FLOW_ACTION_TYPE_SHARED to related
+ * non shared action if translation possible.
+ * This functionality used to run same execution path for both shared & non
+ * shared actions on flow create. All necessary preparations for shared
+ * action handling should be preformed on *shared* actions list returned by
+ * from this call.
+ *
+ * @param[in] actions
+ * List of actions to translate.
+ * @param[out] shared
+ * List to store translated shared actions.
+ * @param[in, out] shared_n
+ * Size of *shared* array. On return should be updated with number of shared
+ * actions retrieved from the *actions* list.
+ * @param[out] translated_actions
+ * List of actions where all shared actions were translated to non shared
+ * if possible. NULL if no translation took place.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_shared_actions_translate(const struct rte_flow_action actions[],
+ struct mlx5_translated_shared_action *shared,
+ int *shared_n,
+ struct rte_flow_action **translated_actions,
+ struct rte_flow_error *error)
+{
+ struct rte_flow_action *translated = NULL;
+ int n;
+ int copied_n = 0;
+ struct mlx5_translated_shared_action *shared_end = NULL;
+
+ for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) {
+ if (actions[n].type != RTE_FLOW_ACTION_TYPE_SHARED)
+ continue;
+ if (copied_n == *shared_n) {
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "too many shared actions");
+ }
+ rte_memcpy(&shared[copied_n].action, &actions[n].conf,
+ sizeof(actions[n].conf));
+ shared[copied_n].index = n;
+ copied_n++;
+ }
+ n++;
+ *shared_n = copied_n;
+ if (!copied_n)
+ return 0;
+ translated = rte_calloc(__func__, n, sizeof(struct rte_flow_action), 0);
+ rte_memcpy(translated, actions, n * sizeof(struct rte_flow_action));
+ for (shared_end = shared + copied_n; shared < shared_end; shared++) {
+ const struct rte_flow_shared_action *shared_action;
+
+ shared_action = shared->action;
+ switch (shared_action->type) {
+ case MLX5_FLOW_ACTION_SHARED_RSS:
+ translated[shared->index].type =
+ RTE_FLOW_ACTION_TYPE_RSS;
+ translated[shared->index].conf =
+ &shared_action->rss.origin;
+ break;
+ default:
+ rte_free(translated);
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "invalid shared action type");
+ }
+ }
+ *translated_actions = translated;
+ return 0;
+}
+
+/**
+ * Get Shared RSS action from the action list.
+ *
+ * @param[in] shared
+ * Pointer to the list of actions.
+ * @param[in] shared_n
+ * Actions list length.
+ *
+ * @return
+ * Pointer to the MLX5 RSS action if exist, else return NULL.
+ */
+static struct mlx5_shared_action_rss *
+flow_get_shared_rss_action(struct mlx5_translated_shared_action *shared,
+ int shared_n)
+{
+ struct mlx5_translated_shared_action *shared_end;
+
+ for (shared_end = shared + shared_n; shared < shared_end; shared++) {
+ struct rte_flow_shared_action *shared_action;
+
+ shared_action = shared->action;
+ switch (shared_action->type) {
+ case MLX5_FLOW_ACTION_SHARED_RSS:
+ rte_atomic32_inc(&shared_action->refcnt);
+ return &shared_action->rss;
+ default:
+ break;
+ }
+ }
+ return NULL;
+}
+
+struct rte_flow_shared_action *
+mlx5_flow_get_shared_rss(struct rte_flow *flow)
+{
+ if (flow->shared_rss)
+ return container_of(flow->shared_rss,
+ struct rte_flow_shared_action, rss);
+ else
+ return NULL;
+}
+
static unsigned int
find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
{
@@ -4328,13 +4508,16 @@ static uint32_t
flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
+ const struct rte_flow_action original_actions[],
bool external, struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = NULL;
struct mlx5_flow *dev_flow;
const struct rte_flow_action_rss *rss;
+ struct mlx5_translated_shared_action
+ shared_actions[MLX5_MAX_SHARED_ACTIONS];
+ int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
union {
struct rte_flow_expand_rss buf;
uint8_t buffer[2048];
@@ -4354,14 +4537,23 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
struct rte_flow_expand_rss *buf = &expand_buffer.buf;
struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
priv->rss_desc)[!!priv->flow_idx];
- const struct rte_flow_action *p_actions_rx = actions;
+ const struct rte_flow_action *p_actions_rx;
uint32_t i;
uint32_t idx = 0;
int hairpin_flow;
uint32_t hairpin_id = 0;
struct rte_flow_attr attr_tx = { .priority = 0 };
- int ret;
+ const struct rte_flow_action *actions;
+ struct rte_flow_action *translated_actions = NULL;
+ int ret = flow_shared_actions_translate(original_actions,
+ shared_actions,
+ &shared_actions_n,
+ &translated_actions, error);
+ if (ret < 0)
+ return 0;
+ actions = (translated_actions) ? translated_actions : original_actions;
+ p_actions_rx = actions;
hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
ret = flow_drv_validate(dev, attr, items, p_actions_rx,
external, hairpin_flow, error);
@@ -4413,6 +4605,8 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
buf->entries = 1;
buf->entry[0].pattern = (void *)(uintptr_t)items;
}
+ flow->shared_rss = flow_get_shared_rss_action(shared_actions,
+ shared_actions_n);
/*
* Record the start index when there is a nested call. All sub-flows
* need to be translated before another calling.
@@ -4484,6 +4678,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
flow, next);
flow_rxq_flags_set(dev, flow);
+ rte_free(translated_actions);
/* Nested flow creation index recovery. */
priv->flow_idx = priv->flow_nested_idx;
if (priv->flow_nested_idx)
@@ -4498,6 +4693,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
rte_errno = ret; /* Restore rte_errno. */
error_before_flow:
ret = rte_errno;
+ rte_free(translated_actions);
if (hairpin_id)
mlx5_flow_id_release(priv->sh->flow_id_pool,
hairpin_id);
@@ -6296,3 +6492,235 @@ mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
dev->data->port_id);
return -ENOTSUP;
}
+
+/**
+ * Retrieve driver ops struct.
+ *
+ * @param[in] dev
+ * Pointer to the dev structure.
+ * @param[in] error_message
+ * Error message to set if driver ops struct not found.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * Pointer to driver ops on success, otherwise NULL and rte_errno is set.
+ */
+static const struct mlx5_flow_driver_ops *
+flow_drv_dv_ops_get(struct rte_eth_dev *dev,
+ const char *error_message,
+ struct rte_flow_error *error)
+{
+ struct rte_flow_attr attr = { .transfer = 0 };
+
+ if (flow_get_drv_type(dev, &attr) != MLX5_FLOW_TYPE_DV) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, error_message);
+ DRV_LOG(ERR, "port %u %s.", dev->data->port_id, error_message);
+ return NULL;
+ }
+
+ return flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+}
+
+/* Wrapper for driver action_validate op callback */
+static int
+flow_drv_action_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops = flow_drv_dv_ops_get(dev,
+ "action registration unsupported", error);
+ return (fops) ? fops->action_validate(dev, action, error) : -rte_errno;
+}
+
+/* Wrapper for driver action_create op callback */
+static struct rte_flow_shared_action *
+flow_drv_action_create(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops = flow_drv_dv_ops_get(dev,
+ "action registration unsupported", error);
+ return (fops) ? fops->action_create(dev, action, error) : NULL;
+}
+
+/**
+ * Destroys the shared action by handle.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] action
+ * Handle for the shared action to be destroyed.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ *
+ * @note: wrapper for driver action_create op callback.
+ */
+static int
+mlx5_shared_action_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops = flow_drv_dv_ops_get(dev,
+ "action registration unsupported", error);
+ return (fops) ? fops->action_destroy(dev, action, error) : -rte_errno;
+}
+
+/* Wrapper for driver action_destroy op callback */
+static int
+flow_drv_action_update(struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ const void *action_conf,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops = flow_drv_dv_ops_get(dev,
+ "action registration unsupported", error);
+ return (fops) ? fops->action_update(dev, action,
+ action_conf, error)
+ : -rte_errno;
+}
+
+/**
+ * Create shared action for reuse in multiple flow rules.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] action
+ * Action configuration for shared action creation.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ * @return
+ * A valid handle in case of success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow_shared_action *
+mlx5_shared_action_create(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ if (flow_drv_action_validate(dev, action, error))
+ return NULL;
+ return flow_drv_action_create(dev, action, error);
+}
+
+/**
+ * Updates inplace the shared action configuration pointed by *action* handle
+ * with the configuration provided as *update* argument.
+ * The update of the shared action configuration effects all flow rules reusing
+ * the action via handle.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] action
+ * Handle for the shared action to be updated.
+ * @param[in] update
+ * Action specification used to modify the action pointed by handle.
+ * *update* should be of same type with the action pointed by the *action*
+ * handle argument, otherwise considered as invalid.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_shared_action_update(struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *shared_action,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ switch (shared_action->type) {
+ case MLX5_FLOW_ACTION_SHARED_RSS:
+ if (action->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "update action type invalid");
+ }
+ ret = flow_drv_action_validate(dev, action, error);
+ if (ret)
+ return ret;
+ return flow_drv_action_update(dev, shared_action, action->conf,
+ error);
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "action type not supported");
+ }
+}
+
+/**
+ * Query the shared action by handle.
+ *
+ * This function allows retrieving action-specific data such as counters.
+ * Data is gathered by special action which may be present/referenced in
+ * more than one flow rule definition.
+ *
+ * \see RTE_FLOW_ACTION_TYPE_COUNT
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] action
+ * Handle for the shared action to query.
+ * @param[in, out] data
+ * Pointer to storage for the associated query data type.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_shared_action_query(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action *action,
+ void *data,
+ struct rte_flow_error *error)
+{
+ (void)dev;
+ switch (action->type) {
+ case MLX5_FLOW_ACTION_SHARED_RSS:
+ *((int32_t *)data) = rte_atomic32_read(&action->refcnt);
+ return 0;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "action type not supported");
+ }
+}
+
+/**
+ * Destroy all shared actions.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_shared_action_flush(struct rte_eth_dev *dev)
+{
+ struct rte_flow_error error;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_shared_action *action;
+ int ret = 0;
+
+ while (!LIST_EMPTY(&priv->shared_actions)) {
+ action = LIST_FIRST(&priv->shared_actions);
+ ret = mlx5_shared_action_destroy(dev, action, &error);
+ }
+ return ret;
+}
@@ -202,6 +202,7 @@ enum mlx5_feature_name {
#define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33)
#define MLX5_FLOW_ACTION_AGE (1ull << 34)
#define MLX5_FLOW_ACTION_DEFAULT_MISS (1ull << 35)
+#define MLX5_FLOW_ACTION_SHARED_RSS (1ull << 36)
#define MLX5_FLOW_FATE_ACTIONS \
(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
@@ -821,6 +822,7 @@ struct mlx5_fdir_flow {
/* Flow structure. */
struct rte_flow {
ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */
+ struct mlx5_shared_action_rss *shared_rss; /** < Shred RSS action. */
uint32_t dev_handles;
/**< Device flow handles that are part of the flow. */
uint32_t drv_type:2; /**< Driver type. */
@@ -834,6 +836,62 @@ struct rte_flow {
uint16_t meter; /**< Holds flow meter id. */
} __rte_packed;
+/*
+ * Define list of valid combinations of RX Hash fields
+ * (see enum ibv_rx_hash_fields).
+ */
+#define MLX5_RSS_HASH_IPV4 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
+#define MLX5_RSS_HASH_IPV4_TCP \
+ (MLX5_RSS_HASH_IPV4 | \
+ IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_SRC_PORT_TCP)
+#define MLX5_RSS_HASH_IPV4_UDP \
+ (MLX5_RSS_HASH_IPV4 | \
+ IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_SRC_PORT_UDP)
+#define MLX5_RSS_HASH_IPV6 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
+#define MLX5_RSS_HASH_IPV6_TCP \
+ (MLX5_RSS_HASH_IPV6 | \
+ IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_SRC_PORT_TCP)
+#define MLX5_RSS_HASH_IPV6_UDP \
+ (MLX5_RSS_HASH_IPV6 | \
+ IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_SRC_PORT_UDP)
+#define MLX5_RSS_HASH_NONE 0ULL
+
+/* array of valid combinations of RX Hash fields for RSS */
+static const uint64_t mlx5_rss_hash_fields[] = {
+ MLX5_RSS_HASH_IPV4,
+ MLX5_RSS_HASH_IPV4_TCP,
+ MLX5_RSS_HASH_IPV4_UDP,
+ MLX5_RSS_HASH_IPV6,
+ MLX5_RSS_HASH_IPV6_TCP,
+ MLX5_RSS_HASH_IPV6_UDP,
+ MLX5_RSS_HASH_NONE,
+};
+
+#define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)
+
+/* Shared RSS action structure */
+struct mlx5_shared_action_rss {
+ struct rte_flow_action_rss origin; /**< Original rte RSS action. */
+ uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+ uint16_t *queue; /**< Queue indices to use. */
+ uint32_t hrxq[MLX5_RSS_HASH_FIELDS_LEN];
+ /**< Hash RX queue indexes mapped to mlx5_rss_hash_fields */
+ uint32_t hrxq_tunnel[MLX5_RSS_HASH_FIELDS_LEN];
+ /**< Hash RX queue indexes for tunneled RSS */
+};
+
+struct rte_flow_shared_action {
+ LIST_ENTRY(rte_flow_shared_action) next;
+ /**< Pointer to the next element. */
+ rte_atomic32_t refcnt;
+ uint64_t type;
+ /**< Shared action type (see MLX5_FLOW_ACTION_SHARED_*). */
+ union {
+ struct mlx5_shared_action_rss rss;
+ /**< Shared RSS action. */
+ };
+};
+
typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
@@ -888,6 +946,22 @@ typedef int (*mlx5_flow_get_aged_flows_t)
void **context,
uint32_t nb_contexts,
struct rte_flow_error *error);
+typedef int (*mlx5_flow_action_validate_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error);
+typedef struct rte_flow_shared_action *(*mlx5_flow_action_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_action_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_action_update_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ const void *action_conf,
+ struct rte_flow_error *error);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
mlx5_flow_prepare_t prepare;
@@ -904,6 +978,10 @@ struct mlx5_flow_driver_ops {
mlx5_flow_counter_free_t counter_free;
mlx5_flow_counter_query_t counter_query;
mlx5_flow_get_aged_flows_t get_aged_flows;
+ mlx5_flow_action_validate_t action_validate;
+ mlx5_flow_action_create_t action_create;
+ mlx5_flow_action_destroy_t action_destroy;
+ mlx5_flow_action_update_t action_update;
};
/* mlx5_flow.c */
@@ -928,6 +1006,9 @@ int mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
const struct rte_flow_action *mlx5_flow_find_action
(const struct rte_flow_action *actions,
enum rte_flow_action_type action);
+int mlx5_validate_action_rss(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error);
int mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
struct rte_flow_error *error);
@@ -1040,4 +1121,6 @@ int mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr);
int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
struct rte_mtr_error *error);
+struct rte_flow_shared_action *mlx5_flow_get_shared_rss(struct rte_flow *flow);
+int mlx5_shared_action_flush(struct rte_eth_dev *dev);
#endif /* RTE_PMD_MLX5_FLOW_H_ */