@@ -156,6 +156,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
func mlx5dv_create_flow_action_packet_reformat \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
+ HAVE_MLX5DV_DR \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_DR_NS_TYPE_TERMINATING \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
HAVE_IBV_DEVX_OBJ \
infiniband/mlx5dv.h \
func mlx5dv_devx_obj_create \
@@ -762,6 +762,9 @@
int own_domain_id = 0;
uint16_t port_id;
unsigned int i;
+#ifdef HAVE_MLX5DV_DR
+ void *ns;
+#endif
/* Determine if this port representor is supposed to be spawned. */
if (switch_info->representor && dpdk_dev->devargs) {
@@ -1219,6 +1222,24 @@
priv->tcf_context = NULL;
}
}
+#ifdef HAVE_MLX5DV_DR
+ ns = mlx5dv_dr_create_ns(ctx,
+ MLX5DV_DR_NS_DOMAIN_INGRESS_BYPASS);
+ if (ns == NULL) {
+ DRV_LOG(ERR, "mlx5dv_dr_create_ns failed");
+ err = errno;
+ goto error;
+ }
+ priv->rx_ns = ns;
+ ns = mlx5dv_dr_create_ns(ctx,
+ MLX5DV_DR_NS_DOMAIN_EGRESS_BYPASS);
+ if (ns == NULL) {
+ DRV_LOG(ERR, "mlx5dv_dr_create_ns failed");
+ err = errno;
+ goto error;
+ }
+ priv->tx_ns = ns;
+#endif
TAILQ_INIT(&priv->flows);
TAILQ_INIT(&priv->ctrl_flows);
/* Hint libmlx5 to use PMD allocator for data plane resources */
@@ -187,6 +187,15 @@ struct mlx5_drop {
struct mlx5_flow_tcf_context;
+/* Table structure. */
+struct mlx5_flow_tbl_resource {
+ void *obj; /**< Pointer to DR table object. */
+ rte_atomic32_t refcnt; /**< Reference counter. */
+};
+
+#define MLX5_MAX_TABLES 1024
+#define MLX5_GROUP_FACTOR 1
+
struct mlx5_priv {
LIST_ENTRY(mlx5_priv) mem_event_cb;
/**< Called by memory event callback. */
@@ -259,6 +268,12 @@ struct mlx5_priv {
/* UAR same-page access control required in 32bit implementations. */
#endif
struct mlx5_flow_tcf_context *tcf_context; /* TC flower context. */
+ void *rx_ns; /* RX Direct Rules name space handle. */
+ struct mlx5_flow_tbl_resource rx_tbl[MLX5_MAX_TABLES];
+ /* RX Direct Rules tables. */
+ void *tx_ns; /* TX Direct Rules name space handle. */
+ struct mlx5_flow_tbl_resource tx_tbl[MLX5_MAX_TABLES];
+ /* TX Direct Rules tables/ */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
@@ -2082,6 +2082,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
flow = rte_calloc(__func__, 1, flow_size, 0);
flow->drv_type = flow_get_drv_type(dev, attr);
+ flow->ingress = attr->ingress;
assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
flow->drv_type < MLX5_FLOW_TYPE_MAX);
flow->queue = (void *)(flow + 1);
@@ -204,6 +204,7 @@ struct mlx5_flow_dv_matcher {
uint16_t crc; /**< CRC of key. */
uint16_t priority; /**< Priority of matcher. */
uint8_t egress; /**< Egress matcher. */
+ uint32_t group; /**< The matcher group. */
struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
};
@@ -220,6 +221,7 @@ struct mlx5_flow_dv_encap_decap_resource {
size_t size;
uint8_t reformat_type;
uint8_t ft_type;
+ uint64_t flags; /**< Flags for RDMA API. */
};
/* Tag resource structure. */
@@ -348,7 +350,7 @@ struct mlx5_flow_counter {
/* Flow structure. */
struct rte_flow {
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
- enum mlx5_flow_drv_type drv_type; /**< Drvier type. */
+ enum mlx5_flow_drv_type drv_type; /**< Driver type. */
struct mlx5_flow_counter *counter; /**< Holds flow counter. */
struct mlx5_flow_dv_tag_resource *tag_resource;
/**< pointer to the tag action. */
@@ -360,6 +362,8 @@ struct rte_flow {
uint64_t actions;
/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
+ uint8_t ingress; /**< 1 if the flow is ingress. */
+ uint32_t group; /**< The group index. */
};
typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
@@ -805,11 +805,20 @@ struct field_modify_info modify_tcp[] = {
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+ struct rte_flow *flow = dev_flow->flow;
+ struct mlx5dv_dr_ns *ns;
+
+ resource->flags = flow->group ? 0 : 1;
+ if (flow->ingress)
+ ns = priv->rx_ns;
+ else
+ ns = priv->tx_ns;
/* Lookup a matching resource from cache. */
LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) {
if (resource->reformat_type == cache_resource->reformat_type &&
resource->ft_type == cache_resource->ft_type &&
+ resource->flags == cache_resource->flags &&
resource->size == cache_resource->size &&
!memcmp((const void *)resource->buf,
(const void *)cache_resource->buf,
@@ -831,10 +840,10 @@ struct field_modify_info modify_tcp[] = {
*cache_resource = *resource;
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_packet_reformat
- (priv->ctx, cache_resource->size,
- (cache_resource->size ? cache_resource->buf : NULL),
- cache_resource->reformat_type,
- cache_resource->ft_type);
+ (priv->ctx, cache_resource->reformat_type,
+ cache_resource->ft_type, ns, cache_resource->flags,
+ cache_resource->size,
+ (cache_resource->size ? cache_resource->buf : NULL));
if (!cache_resource->verbs_action) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
@@ -1438,6 +1447,10 @@ struct field_modify_info modify_tcp[] = {
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
+ struct mlx5dv_dr_ns *ns =
+ resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX ?
+ priv->tx_ns : priv->rx_ns;
+
/* Lookup a matching resource from cache. */
LIST_FOREACH(cache_resource, &priv->modify_cmds, next) {
if (resource->ft_type == cache_resource->ft_type &&
@@ -1463,11 +1476,11 @@ struct field_modify_info modify_tcp[] = {
*cache_resource = *resource;
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_modify_header
- (priv->ctx,
+ (priv->ctx, cache_resource->ft_type,
+ ns, 0,
cache_resource->actions_num *
sizeof(cache_resource->actions[0]),
- (uint64_t *)cache_resource->actions,
- cache_resource->ft_type);
+ (uint64_t *)cache_resource->actions);
if (!cache_resource->verbs_action) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
@@ -1592,11 +1605,13 @@ struct field_modify_info modify_tcp[] = {
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t priority_max = priv->config.flow_prio - 1;
+#ifdef HAVE_MLX5DV_DR
if (attributes->group)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
NULL,
"groups is not supported");
+#endif
if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
attributes->priority >= priority_max)
return rte_flow_error_set(error, ENOTSUP,
@@ -2723,7 +2738,11 @@ struct field_modify_info modify_tcp[] = {
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
-
+#ifdef HAVE_MLX5DV_DR
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
+#endif
return match_criteria_enable;
}
@@ -2754,12 +2773,14 @@ struct field_modify_info modify_tcp[] = {
.type = IBV_FLOW_ATTR_NORMAL,
.match_mask = (void *)&matcher->mask,
};
+ struct mlx5_flow_tbl_resource *tbl;
/* Lookup from cache. */
LIST_FOREACH(cache_matcher, &priv->matchers, next) {
if (matcher->crc == cache_matcher->crc &&
matcher->priority == cache_matcher->priority &&
matcher->egress == cache_matcher->egress &&
+ matcher->group == cache_matcher->group &&
!memcmp((const void *)matcher->mask.buf,
(const void *)cache_matcher->mask.buf,
cache_matcher->mask.size)) {
@@ -2774,6 +2795,27 @@ struct field_modify_info modify_tcp[] = {
return 0;
}
}
+#ifdef HAVE_MLX5DV_DR
+ if (matcher->egress) {
+ tbl = &priv->tx_tbl[matcher->group];
+ if (!tbl->obj)
+ tbl->obj = mlx5_glue->dr_create_flow_tbl
+ (priv->tx_ns,
+ matcher->group * MLX5_GROUP_FACTOR);
+ } else {
+ tbl = &priv->rx_tbl[matcher->group];
+ if (!tbl->obj)
+ tbl->obj = mlx5_glue->dr_create_flow_tbl
+ (priv->rx_ns,
+ matcher->group * MLX5_GROUP_FACTOR);
+ }
+ if (!tbl->obj)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create table");
+
+ rte_atomic32_inc(&tbl->refcnt);
+#endif
/* Register new matcher. */
cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
if (!cache_matcher)
@@ -2787,9 +2829,16 @@ struct field_modify_info modify_tcp[] = {
if (matcher->egress)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
cache_matcher->matcher_object =
- mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
+ mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr,
+ tbl->obj);
if (!cache_matcher->matcher_object) {
rte_free(cache_matcher);
+#ifdef HAVE_MLX5DV_DR
+ if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
+ mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+ tbl->obj = NULL;
+ }
+#endif
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create matcher");
@@ -2801,6 +2850,7 @@ struct field_modify_info modify_tcp[] = {
cache_matcher->priority,
cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
rte_atomic32_read(&cache_matcher->refcnt));
+ rte_atomic32_inc(&tbl->refcnt);
return 0;
}
@@ -3276,6 +3326,7 @@ struct field_modify_info modify_tcp[] = {
matcher.priority = mlx5_flow_adjust_priority(dev, priority,
matcher.priority);
matcher.egress = attr->egress;
+ matcher.group = attr->group;
if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
return -rte_errno;
return 0;
@@ -3391,6 +3442,8 @@ struct field_modify_info modify_tcp[] = {
struct mlx5_flow *flow)
{
struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_tbl_resource *tbl;
assert(matcher->matcher_object);
DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
@@ -3400,6 +3453,14 @@ struct field_modify_info modify_tcp[] = {
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(matcher->matcher_object));
LIST_REMOVE(matcher, next);
+ if (matcher->egress)
+ tbl = &priv->tx_tbl[matcher->group];
+ else
+ tbl = &priv->rx_tbl[matcher->group];
+ if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
+ mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+ tbl->obj = NULL;
+ }
rte_free(matcher);
DRV_LOG(DEBUG, "port %u matcher %p: removed",
dev->data->port_id, (void *)matcher);
@@ -3489,7 +3550,7 @@ struct field_modify_info modify_tcp[] = {
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
dv = &dev_flow->dv;
if (dv->flow) {
- claim_zero(mlx5_glue->destroy_flow(dv->flow));
+ claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
dv->flow = NULL;
}
if (dv->hrxq) {
@@ -178,6 +178,9 @@
mlx5_glue_destroy_flow_action(void *action)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_destroy_action(action);
+#else
struct mlx5dv_flow_action_attr *attr = action;
int res = 0;
switch (attr->type) {
@@ -189,6 +192,7 @@
}
free(action);
return res;
+#endif
#else
(void)action;
return ENOTSUP;
@@ -365,6 +369,53 @@
return ibv_cq_ex_to_cq(cq);
}
+static void *
+mlx5_glue_dr_create_flow_tbl(void *ns, uint32_t level)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_create_ft(ns, level);
+#else
+ (void)ns;
+ (void)level;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_dr_destroy_flow_tbl(void *tbl)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_destroy_ft(tbl);
+#else
+ (void)tbl;
+ return 0;
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_ns(struct ibv_context *ctx,
+ enum mlx5dv_dr_ns_domain domain)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_create_ns(ctx, domain);
+#else
+ (void)ctx;
+ (void)domain;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_dr_destroy_ns(void *ns)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_destroy_ns(ns);
+#else
+ (void)ns;
+ return 0;
+#endif
+}
+
static struct ibv_cq_ex *
mlx5_glue_dv_create_cq(struct ibv_context *context,
struct ibv_cq_init_attr_ex *cq_attr,
@@ -423,26 +474,40 @@
#endif
}
-static struct mlx5dv_flow_matcher *
+static void *
mlx5_glue_dv_create_flow_matcher(struct ibv_context *context,
- struct mlx5dv_flow_matcher_attr *matcher_attr)
+ struct mlx5dv_flow_matcher_attr *matcher_attr,
+ void *tbl)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ (void)context;
+ return mlx5dv_dr_create_matcher(tbl, matcher_attr->priority,
+ matcher_attr->match_criteria_enable,
+ matcher_attr->match_mask);
+#else
+ (void)tbl;
return mlx5dv_create_flow_matcher(context, matcher_attr);
+#endif
#else
(void)context;
(void)matcher_attr;
+ (void)tbl;
return NULL;
#endif
}
-static struct ibv_flow *
-mlx5_glue_dv_create_flow(struct mlx5dv_flow_matcher *matcher,
- struct mlx5dv_flow_match_parameters *match_value,
+static void *
+mlx5_glue_dv_create_flow(void *matcher,
+ void *match_value,
size_t num_actions,
void *actions[])
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_create_rule(matcher, match_value, num_actions,
+ (struct mlx5dv_dr_action **)actions);
+#else
struct mlx5dv_flow_action_attr actions_attr[8];
if (num_actions > 8)
@@ -452,6 +517,7 @@
*((struct mlx5dv_flow_action_attr *)(actions[i]));
return mlx5dv_create_flow(matcher, match_value,
num_actions, actions_attr);
+#endif
#else
(void)matcher;
(void)match_value;
@@ -461,21 +527,13 @@
#endif
}
-static int
-mlx5_glue_dv_destroy_flow_matcher(struct mlx5dv_flow_matcher *matcher)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- return mlx5dv_destroy_flow_matcher(matcher);
-#else
- (void)matcher;
- return 0;
-#endif
-}
-
static void *
mlx5_glue_dv_create_flow_action_counter(void *counter_obj, uint32_t offset)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_create_action_devx_counter(counter_obj, offset);
+#else
struct mlx5dv_flow_action_attr *action;
(void)offset;
@@ -485,6 +543,7 @@
action->type = MLX5DV_FLOW_ACTION_COUNTER_DEVX;
action->obj = counter_obj;
return action;
+#endif
#else
(void)counter_obj;
(void)offset;
@@ -496,6 +555,9 @@
mlx5_glue_dv_create_flow_action_dest_ibv_qp(void *qp)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_create_action_dest_ibv_qp(qp);
+#else
struct mlx5dv_flow_action_attr *action;
action = malloc(sizeof(*action));
@@ -504,6 +566,7 @@
action->type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
action->obj = qp;
return action;
+#endif
#else
(void)qp;
return NULL;
@@ -513,13 +576,22 @@
static void *
mlx5_glue_dv_create_flow_action_modify_header
(struct ibv_context *ctx,
+ enum mlx5dv_flow_table_type ft_type,
+ void *ns, uint64_t flags,
size_t actions_sz,
- uint64_t actions[],
- enum mlx5dv_flow_table_type ft_type)
+ uint64_t actions[])
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ (void)ctx;
+ (void)ft_type;
+ return mlx5dv_dr_create_action_modify_header(ns, flags, actions_sz,
+ actions);
+#else
struct mlx5dv_flow_action_attr *action;
+ (void)ns;
+ (void)flags;
action = malloc(sizeof(*action));
if (!action)
return NULL;
@@ -527,11 +599,14 @@
action->action = mlx5dv_create_flow_action_modify_header
(ctx, actions_sz, actions, ft_type);
return action;
+#endif
#else
(void)ctx;
+ (void)ft_type;
+ (void)ns;
+ (void)flags;
(void)actions_sz;
(void)actions;
- (void)ft_type;
return NULL;
#endif
}
@@ -539,12 +614,20 @@
static void *
mlx5_glue_dv_create_flow_action_packet_reformat
(struct ibv_context *ctx,
- size_t data_sz,
- void *data,
enum mlx5dv_flow_action_packet_reformat_type reformat_type,
- enum mlx5dv_flow_table_type ft_type)
+ enum mlx5dv_flow_table_type ft_type, struct mlx5dv_dr_ns *ns,
+ uint32_t flags, size_t data_sz, void *data)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ (void)ctx;
+ (void)ft_type;
+ return mlx5dv_dr_create_action_packet_reformat(ns, flags,
+ reformat_type, data_sz,
+ data);
+#else
+ (void)ns;
+ (void)flags;
struct mlx5dv_flow_action_attr *action;
action = malloc(sizeof(*action));
@@ -554,12 +637,15 @@
action->action = mlx5dv_create_flow_action_packet_reformat
(ctx, data_sz, data, reformat_type, ft_type);
return action;
+#endif
#else
(void)ctx;
- (void)data_sz;
- (void)data;
(void)reformat_type;
(void)ft_type;
+ (void)ns;
+ (void)flags;
+ (void)data_sz;
+ (void)data;
return NULL;
#endif
}
@@ -568,6 +654,9 @@
mlx5_glue_dv_create_flow_action_tag(uint32_t tag)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_create_action_tag(tag);
+#else
struct mlx5dv_flow_action_attr *action;
action = malloc(sizeof(*action));
if (!action)
@@ -576,10 +665,36 @@
action->tag_value = tag;
return action;
#endif
+#endif
(void)tag;
return NULL;
}
+static int
+mlx5_glue_dv_destroy_flow(void *flow_id)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_destroy_rule(flow_id);
+#else
+ return ibv_destroy_flow(flow_id);
+#endif
+}
+
+static int
+mlx5_glue_dv_destroy_flow_matcher(void *matcher)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_destroy_matcher(matcher);
+#else
+ return mlx5dv_destroy_flow_matcher(matcher);
+#endif
+#else
+ (void)matcher;
+ return 0;
+#endif
+}
+
static struct ibv_context *
mlx5_glue_dv_open_device(struct ibv_device *device)
{
@@ -718,6 +833,10 @@
.get_async_event = mlx5_glue_get_async_event,
.port_state_str = mlx5_glue_port_state_str,
.cq_ex_to_cq = mlx5_glue_cq_ex_to_cq,
+ .dr_create_flow_tbl = mlx5_glue_dr_create_flow_tbl,
+ .dr_destroy_flow_tbl = mlx5_glue_dr_destroy_flow_tbl,
+ .dr_create_ns = mlx5_glue_dr_create_ns,
+ .dr_destroy_ns = mlx5_glue_dr_destroy_ns,
.dv_create_cq = mlx5_glue_dv_create_cq,
.dv_create_wq = mlx5_glue_dv_create_wq,
.dv_query_device = mlx5_glue_dv_query_device,
@@ -725,7 +844,6 @@
.dv_init_obj = mlx5_glue_dv_init_obj,
.dv_create_qp = mlx5_glue_dv_create_qp,
.dv_create_flow_matcher = mlx5_glue_dv_create_flow_matcher,
- .dv_destroy_flow_matcher = mlx5_glue_dv_destroy_flow_matcher,
.dv_create_flow = mlx5_glue_dv_create_flow,
.dv_create_flow_action_counter =
mlx5_glue_dv_create_flow_action_counter,
@@ -736,6 +854,8 @@
.dv_create_flow_action_packet_reformat =
mlx5_glue_dv_create_flow_action_packet_reformat,
.dv_create_flow_action_tag = mlx5_glue_dv_create_flow_action_tag,
+ .dv_destroy_flow = mlx5_glue_dv_destroy_flow,
+ .dv_destroy_flow_matcher = mlx5_glue_dv_destroy_flow_matcher,
.dv_open_device = mlx5_glue_dv_open_device,
.devx_obj_create = mlx5_glue_devx_obj_create,
.devx_obj_destroy = mlx5_glue_devx_obj_destroy,
@@ -63,6 +63,11 @@
struct mlx5dv_devx_obj;
#endif
+#ifndef HAVE_MLX5DV_DR
+struct mlx5dv_dr_ns;
+enum mlx5dv_dr_ns_domain { unused, };
+#endif
+
/* LIB_GLUE_VERSION must be updated every time this structure is modified. */
struct mlx5_glue {
const char *version;
@@ -140,6 +145,11 @@ struct mlx5_glue {
struct ibv_async_event *event);
const char *(*port_state_str)(enum ibv_port_state port_state);
struct ibv_cq *(*cq_ex_to_cq)(struct ibv_cq_ex *cq);
+ void *(*dr_create_flow_tbl)(void *ns, uint32_t level);
+ int (*dr_destroy_flow_tbl)(void *tbl);
+ void *(*dr_create_ns)(struct ibv_context *ctx,
+ enum mlx5dv_dr_ns_domain domain);
+ int (*dr_destroy_ns)(void *ns);
struct ibv_cq_ex *(*dv_create_cq)
(struct ibv_context *context,
struct ibv_cq_init_attr_ex *cq_attr,
@@ -158,23 +168,26 @@ struct mlx5_glue {
(struct ibv_context *context,
struct ibv_qp_init_attr_ex *qp_init_attr_ex,
struct mlx5dv_qp_init_attr *dv_qp_init_attr);
- struct mlx5dv_flow_matcher *(*dv_create_flow_matcher)
+ void *(*dv_create_flow_matcher)
(struct ibv_context *context,
- struct mlx5dv_flow_matcher_attr *matcher_attr);
- int (*dv_destroy_flow_matcher)(struct mlx5dv_flow_matcher *matcher);
- struct ibv_flow *(*dv_create_flow)(struct mlx5dv_flow_matcher *matcher,
- struct mlx5dv_flow_match_parameters *match_value,
+ struct mlx5dv_flow_matcher_attr *matcher_attr,
+ void *tbl);
+ void *(*dv_create_flow)(void *matcher, void *match_value,
size_t num_actions, void *actions[]);
void *(*dv_create_flow_action_counter)(void *obj, uint32_t offset);
void *(*dv_create_flow_action_dest_ibv_qp)(void *qp);
void *(*dv_create_flow_action_modify_header)
- (struct ibv_context *ctx, size_t actions_sz, uint64_t actions[],
- enum mlx5dv_flow_table_type ft_type);
+ (struct ibv_context *ctx, enum mlx5dv_flow_table_type ft_type,
+ void *ns, uint64_t flags, size_t actions_sz,
+ uint64_t actions[]);
void *(*dv_create_flow_action_packet_reformat)
- (struct ibv_context *ctx, size_t data_sz, void *data,
+ (struct ibv_context *ctx,
enum mlx5dv_flow_action_packet_reformat_type reformat_type,
- enum mlx5dv_flow_table_type ft_type);
+ enum mlx5dv_flow_table_type ft_type, struct mlx5dv_dr_ns *ns,
+ uint32_t flags, size_t data_sz, void *data);
void *(*dv_create_flow_action_tag)(uint32_t tag);
+ int (*dv_destroy_flow)(void *flow);
+ int (*dv_destroy_flow_matcher)(void *matcher);
struct ibv_context *(*dv_open_device)(struct ibv_device *device);
struct mlx5dv_devx_obj *(*devx_obj_create)
(struct ibv_context *ctx,
@@ -492,20 +492,40 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 reserved_at_1a0[0x60];
};
+struct mlx5_ifc_fte_match_set_misc3_bits {
+ u8 inner_tcp_seq_num[0x20];
+ u8 outer_tcp_seq_num[0x20];
+ u8 inner_tcp_ack_num[0x20];
+ u8 outer_tcp_ack_num[0x20];
+ u8 reserved_at_auto1[0x8];
+ u8 outer_vxlan_gpe_vni[0x18];
+ u8 outer_vxlan_gpe_next_protocol[0x8];
+ u8 outer_vxlan_gpe_flags[0x8];
+ u8 reserved_at_a8[0x10];
+ u8 icmp_header_data[0x20];
+ u8 icmpv6_header_data[0x20];
+ u8 icmp_type[0x8];
+ u8 icmp_code[0x8];
+ u8 icmpv6_type[0x8];
+ u8 icmpv6_code[0x8];
+ u8 reserved_at_1a0[0xe0];
+};
+
/* Flow matcher. */
struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
- u8 reserved_at_800[0x800];
+ struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
};
enum {
MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT,
MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT,
MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT,
- MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT
+ MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT,
+ MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT
};
enum {