@@ -80,6 +80,19 @@ rte_flow_conv_copy(void *buf, const void *data, const size_t size,
.desc_fn = fn, \
}
+static size_t
+rte_flow_item_flex_conv(void *buf, const void *data)
+{
+ struct rte_flow_item_flex *dst = buf;
+ const struct rte_flow_item_flex *src = data;
+ if (buf) {
+ dst->pattern = rte_memcpy
+ ((void *)((uintptr_t)(dst + 1)), src->pattern,
+ src->length);
+ }
+ return src->length;
+}
+
/** Information about known flow pattern items. */
static const struct rte_flow_desc_data rte_flow_desc_item[] = {
MK_FLOW_ITEM(END, 0),
@@ -141,6 +154,8 @@ static const struct rte_flow_desc_data rte_flow_desc_item[] = {
MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
+ MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
+ rte_flow_item_flex_conv),
};
/** Generate flow_action[] entry. */
@@ -1308,3 +1323,61 @@ rte_flow_tunnel_item_release(uint16_t port_id,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(ENOTSUP));
}
+
+struct rte_flow_item_flex_handle *
+rte_flow_flex_item_create(uint16_t port_id,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ struct rte_flow_item_flex_handle *handle;
+
+ if (unlikely(!ops))
+ return NULL;
+ if (unlikely(!ops->flex_item_create)) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOTSUP));
+ return NULL;
+ }
+ handle = ops->flex_item_create(dev, conf, error);
+ if (handle == NULL)
+ flow_err(port_id, -rte_errno, error);
+ return handle;
+}
+
+int
+rte_flow_flex_item_release(uint16_t port_id,
+ const struct rte_flow_item_flex_handle *handle,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ if (unlikely(!ops || !ops->flex_item_release))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOTSUP));
+ ret = ops->flex_item_release(dev, handle, error);
+ return flow_err(port_id, ret, error);
+}
+
+int
+rte_flow_flex_item_update(uint16_t port_id,
+ const struct rte_flow_item_flex_handle *handle,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ if (unlikely(!ops || !ops->flex_item_update))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOTSUP));
+ ret = ops->flex_item_update(dev, handle, conf, error);
+ return flow_err(port_id, ret, error);
+}
@@ -139,6 +139,19 @@ struct rte_flow_ops {
struct rte_flow_item *pmd_items,
uint32_t num_of_items,
struct rte_flow_error *err);
+ struct rte_flow_item_flex_handle *(*flex_item_create)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error);
+ int (*flex_item_release)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ struct rte_flow_error *error);
+ int (*flex_item_update)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_item_flex_handle *handle,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error);
};
/**
@@ -247,6 +247,11 @@ EXPERIMENTAL {
rte_mtr_meter_policy_delete;
rte_mtr_meter_policy_update;
rte_mtr_meter_policy_validate;
+
+ # added in 21.11
+ rte_flow_flex_item_create;
+ rte_flow_flex_item_release;
+ rte_flow_flex_item_update;
};
INTERNAL {