[dpdk-dev,v8,1/4] librte_flow_classify: add flow classify library

Message ID 1508271970-2229-2-git-send-email-bernard.iremonger@intel.com (mailing list archive)
State Superseded, archived
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail Compilation issues

Commit Message

Iremonger, Bernard Oct. 17, 2017, 8:26 p.m. UTC
  From: Ferruh Yigit <ferruh.yigit@intel.com>

The following APIs's are implemented in the
librte_flow_classify library:

rte_flow_classifier_create
rte_flow_classifier_free
rte_flow_classifier_query
rte_flow_classifier_run
rte_flow_classify_table_create
rte_flow_classify_table_entry_add
rte_flow_classify_table_entry_delete
rte_flow_classify_validate

The following librte_table API's are used:
f_create to create a table.
f_add to add a rule to the table.
f_del to delete a rule from the table.
f_lookup to match packets with the rules.

The library supports counting of IPv4 five tupple packets only,
ie IPv4 UDP, TCP and SCTP packets.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
---
 MAINTAINERS                                        |   7 +
 config/common_base                                 |   6 +
 doc/api/doxy-api-index.md                          |   1 +
 doc/api/doxy-api.conf                              |   1 +
 lib/Makefile                                       |   3 +
 lib/librte_eal/common/include/rte_log.h            |   1 +
 lib/librte_flow_classify/Makefile                  |  51 ++
 lib/librte_flow_classify/rte_flow_classify.c       | 735 +++++++++++++++++++++
 lib/librte_flow_classify/rte_flow_classify.h       | 321 +++++++++
 lib/librte_flow_classify/rte_flow_classify_parse.c | 546 +++++++++++++++
 lib/librte_flow_classify/rte_flow_classify_parse.h |  74 +++
 .../rte_flow_classify_version.map                  |  14 +
 mk/rte.app.mk                                      |   1 +
 13 files changed, 1761 insertions(+)
 create mode 100644 lib/librte_flow_classify/Makefile
 create mode 100644 lib/librte_flow_classify/rte_flow_classify.c
 create mode 100644 lib/librte_flow_classify/rte_flow_classify.h
 create mode 100644 lib/librte_flow_classify/rte_flow_classify_parse.c
 create mode 100644 lib/librte_flow_classify/rte_flow_classify_parse.h
 create mode 100644 lib/librte_flow_classify/rte_flow_classify_version.map
  

Comments

Jasvinder Singh Oct. 19, 2017, 2:22 p.m. UTC | #1
<snip>

> +
> +struct acl_keys {
> +	struct rte_table_acl_rule_add_params key_add; /**< add key */
> +	struct rte_table_acl_rule_delete_params	key_del; /**< delete
> key */
> +};
> +
> +struct rte_flow_classify_rule {
> +	uint32_t id; /**< unique ID of classify rule */
> +	enum rte_flow_classify_rule_type rule_type; /**< classify rule type
> */
> +	struct rte_flow_action action; /**< action when match found */
> +	struct rte_flow_classify_ipv4_5tuple ipv4_5tuple; /**< ipv4 5tuple */
> +	union {
> +		struct acl_keys key;
> +	} u;
> +	int key_found;   /**< rule key found in table */
> +	void *entry;     /**< pointer to buffer to hold rule meta data*/
> +	void *entry_ptr; /**< handle to the table entry for rule meta data*/
> +};

In my opnion, the  above struct should have the provision to accommodate other types of rules, not only ipv4_5tuples.
Making this structure modular will help in extending it for other rule types in the future.

> +int
> +rte_flow_classify_validate(
> +		   const struct rte_flow_attr *attr,
> +		   const struct rte_flow_item pattern[],
> +		   const struct rte_flow_action actions[],
> +		   struct rte_flow_error *error)
> +{
> +	struct rte_flow_item *items;
> +	parse_filter_t parse_filter;
> +	uint32_t item_num = 0;
> +	uint32_t i = 0;
> +	int ret;
> +
> +	if (!error)
> +		return -EINVAL;
> +
> +	if (!pattern) {
> +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> +				   NULL, "NULL pattern.");
> +		return -EINVAL;
> +	}
> +
> +	if (!actions) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> +				   NULL, "NULL action.");
> +		return -EINVAL;
> +	}
> +
> +	if (!attr) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ATTR,
> +				   NULL, "NULL attribute.");
> +		return -EINVAL;
> +	}
> +
> +	memset(&ntuple_filter, 0, sizeof(ntuple_filter));
> +
> +	/* Get the non-void item number of pattern */
> +	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
> +		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
> +			item_num++;
> +		i++;
> +	}
> +	item_num++;
> +
> +	items = malloc(item_num * sizeof(struct rte_flow_item));
> +	if (!items) {
> +		rte_flow_error_set(error, ENOMEM,
> RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> +				   NULL, "No memory for pattern items.");
> +		return -ENOMEM;
> +	}
> +
> +	memset(items, 0, item_num * sizeof(struct rte_flow_item));
> +	classify_pattern_skip_void_item(items, pattern);
> +
> +	parse_filter = classify_find_parse_filter_func(items);
> +	if (!parse_filter) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM,
> +				   pattern, "Unsupported pattern");
> +		free(items);
> +		return -EINVAL;
> +	}
> +
> +	ret = parse_filter(attr, items, actions, &ntuple_filter, error);
> +	free(items);
> +	return ret;
> +}

This function mainly parses the flow pattern, actions etc and fill the entries in
internal ntuple_filter.It is invoked only in flow_entry_add(). Is there any reason to
make this function available as public API?

> +#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
> +#define uint32_t_to_char(ip, a, b, c, d) do {\
> +		*a = (unsigned char)(ip >> 24 & 0xff);\
> +		*b = (unsigned char)(ip >> 16 & 0xff);\
> +		*c = (unsigned char)(ip >> 8 & 0xff);\
> +		*d = (unsigned char)(ip & 0xff);\
> +	} while (0)
> +
> +static inline void
> +print_ipv4_key_add(struct rte_table_acl_rule_add_params *key)
> +{
> +	unsigned char a, b, c, d;
> +
> +	printf("ipv4_key_add: 0x%02hhx/0x%hhx ",
> +		key->field_value[PROTO_FIELD_IPV4].value.u8,
> +		key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
> +
> +	uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
> +			&a, &b, &c, &d);
> +	printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
> +			key-
> >field_value[SRC_FIELD_IPV4].mask_range.u32);
> +
> +	uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
> +			&a, &b, &c, &d);
> +	printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
> +			key-
> >field_value[DST_FIELD_IPV4].mask_range.u32);
> +
> +	printf("%hu : 0x%x %hu : 0x%x",
> +		key->field_value[SRCP_FIELD_IPV4].value.u16,
> +		key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
> +		key->field_value[DSTP_FIELD_IPV4].value.u16,
> +		key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
> +
> +	printf(" priority: 0x%x\n", key->priority);
> +}

The above function is specific to printing acl table keys. How about making this function little generic by
passing the parameters to distinguish the rule, table type, etc. and do the printing? 

Same comments for the print_ipv4_key_delete().


> +static inline void
> +print_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key)
> +{
> +	unsigned char a, b, c, d;
> +
> +	printf("ipv4_key_del: 0x%02hhx/0x%hhx ",
> +		key->field_value[PROTO_FIELD_IPV4].value.u8,
> +		key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
> +
> +	uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
> +			&a, &b, &c, &d);
> +	printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
> +			key-
> >field_value[SRC_FIELD_IPV4].mask_range.u32);
> +
> +	uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
> +			&a, &b, &c, &d);
> +	printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
> +			key-
> >field_value[DST_FIELD_IPV4].mask_range.u32);
> +
> +	printf("%hu : 0x%x %hu : 0x%x\n",
> +		key->field_value[SRCP_FIELD_IPV4].value.u16,
> +		key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
> +		key->field_value[DSTP_FIELD_IPV4].value.u16,
> +		key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
> +}
> +#endif
> +
> +static int
> +rte_flow_classifier_check_params(struct rte_flow_classifier_params
> *params)
> +{
> +	if (params == NULL) {
> +		RTE_LOG(ERR, CLASSIFY,
> +			"%s: Incorrect value for parameter params\n",
> __func__);
> +		return -EINVAL;
> +	}
> +
> +	/* name */
> +	if (params->name == NULL) {
> +		RTE_LOG(ERR, CLASSIFY,
> +			"%s: Incorrect value for parameter name\n",
> __func__);
> +		return -EINVAL;
> +	}
> +
> +	/* socket */
> +	if ((params->socket_id < 0) ||
> +	    (params->socket_id >= RTE_MAX_NUMA_NODES)) {
> +		RTE_LOG(ERR, CLASSIFY,
> +			"%s: Incorrect value for parameter socket_id\n",
> +			__func__);
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +struct rte_flow_classifier *
> +rte_flow_classifier_create(struct rte_flow_classifier_params *params)
> +{
> +	struct rte_flow_classifier *cls;
> +	int ret;
> +
> +	/* Check input parameters */
> +	ret = rte_flow_classifier_check_params(params);
> +	if (ret != 0) {
> +		RTE_LOG(ERR, CLASSIFY,
> +			"%s: flow classifier params check failed (%d)\n",
> +			__func__, ret);
> +		return NULL;
> +	}
> +
> +	/* Allocate memory for the flow classifier */
> +	cls = rte_zmalloc_socket("FLOW_CLASSIFIER",
> +			sizeof(struct rte_flow_classifier),
> +			RTE_CACHE_LINE_SIZE, params->socket_id);
> +
> +	if (cls == NULL) {
> +		RTE_LOG(ERR, CLASSIFY,
> +			"%s: flow classifier memory allocation failed\n",
> +			__func__);
> +		return NULL;
> +	}
> +
> +	/* Save input parameters */
> +	snprintf(cls->name, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ, "%s",
> +			params->name);
> +	cls->socket_id = params->socket_id;
> +	cls->type = params->type;
> +
> +	/* Initialize flow classifier internal data structure */
> +	cls->num_tables = 0;
> +
> +	return cls;
> +}
> +
> +static void
> +rte_flow_classify_table_free(struct rte_table *table)
> +{
> +	if (table->ops.f_free != NULL)
> +		table->ops.f_free(table->h_table);
> +
> +	rte_free(table->default_entry);
> +}

This is internal function. There is an API for creating a table for classifier instance but not for destroying the table. What
if application requires destroying the specific table of the classifier but want to keep the classifier instance?

> +int
> +rte_flow_classifier_free(struct rte_flow_classifier *cls)
> +{
> +	uint32_t i;
> +
> +	/* Check input parameters */
> +	if (cls == NULL) {
> +		RTE_LOG(ERR, CLASSIFY,
> +			"%s: rte_flow_classifier parameter is NULL\n",
> +			__func__);
> +		return -EINVAL;
> +	}
> +
> +	/* Free tables */
> +	for (i = 0; i < cls->num_tables; i++) {
> +		struct rte_table *table = &cls->tables[i];
> +
> +		rte_flow_classify_table_free(table);
> +	}
> +
> +	/* Free flow classifier memory */
> +	rte_free(cls);
> +
> +	return 0;
> +}
> +
> +static int
> +rte_table_check_params(struct rte_flow_classifier *cls,
> +		struct rte_flow_classify_table_params *params,
> +		uint32_t *table_id)
> +{
> +	if (cls == NULL) {
> +		RTE_LOG(ERR, CLASSIFY,
> +			"%s: flow classifier parameter is NULL\n",
> +			__func__);
> +		return -EINVAL;
> +	}
> +	if (params == NULL) {
> +		RTE_LOG(ERR, CLASSIFY, "%s: params parameter is NULL\n",
> +			__func__);
> +		return -EINVAL;
> +	}
> +	if (table_id == NULL) {
> +		RTE_LOG(ERR, CLASSIFY, "%s: table_id parameter is NULL\n",
> +			__func__);
> +		return -EINVAL;
> +	}
> +
> +	/* ops */
> +	if (params->ops == NULL) {
> +		RTE_LOG(ERR, CLASSIFY, "%s: params->ops is NULL\n",
> +			__func__);
> +		return -EINVAL;
> +	}
> +
> +	if (params->ops->f_create == NULL) {
> +		RTE_LOG(ERR, CLASSIFY,
> +			"%s: f_create function pointer is NULL\n", __func__);
> +		return -EINVAL;
> +	}
> +
> +	if (params->ops->f_lookup == NULL) {
> +		RTE_LOG(ERR, CLASSIFY,
> +			"%s: f_lookup function pointer is NULL\n",
> __func__);
> +		return -EINVAL;
> +	}
> +
> +	/* De we have room for one more table? */
> +	if (cls->num_tables == RTE_FLOW_CLASSIFY_TABLE_MAX) {
> +		RTE_LOG(ERR, CLASSIFY,
> +			"%s: Incorrect value for num_tables parameter\n",
> +			__func__);
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +int
> +rte_flow_classify_table_create(struct rte_flow_classifier *cls,
> +	struct rte_flow_classify_table_params *params,
> +	uint32_t *table_id)
> +{
> +	struct rte_table *table;
> +	struct rte_flow_classify_table_entry *default_entry;
> +	void *h_table;
> +	uint32_t entry_size, id;
> +	int ret;
> +
> +	/* Check input arguments */
> +	ret = rte_table_check_params(cls, params, table_id);
> +	if (ret != 0)
> +		return ret;
> +
> +	id = cls->num_tables;
> +	table = &cls->tables[id];
> +
> +	/* Allocate space for the default table entry */
> +	entry_size = sizeof(struct rte_flow_classify_table_entry) +
> +			params->table_metadata_size;
> +	default_entry =
> +		(struct rte_flow_classify_table_entry *) rte_zmalloc_socket(
> +		"Flow Classify default entry", entry_size,
> +		RTE_CACHE_LINE_SIZE, cls->socket_id);
> +	if (default_entry == NULL) {
> +		RTE_LOG(ERR, CLASSIFY,
> +			"%s: Failed to allocate default entry\n", __func__);
> +		return -EINVAL;
> +	}

what is the purpose of default_entry as I don't see its usage anywhere in the library?

> +	/* Create the table */
> +	h_table = params->ops->f_create(params->arg_create, cls-
> >socket_id,
> +		entry_size);
> +	if (h_table == NULL) {
> +		rte_free(default_entry);
> +		RTE_LOG(ERR, CLASSIFY, "%s: Table creation failed\n",
> __func__);
> +		return -EINVAL;
> +	}
> +
> +	/* Commit current table to the classifier */
> +	cls->num_tables++;
> +	*table_id = id;
> +
> +	/* Save input parameters */
> +	memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
> +
> +	table->entry_size = entry_size;
> +	table->default_entry = default_entry;
> +
> +	/* Initialize table internal data structure */
> +	table->h_table = h_table;
> +
> +	return 0;
> +}
> +
> +static struct rte_flow_classify_rule *
> +allocate_ipv4_5tuple_rule(void)
> +{
> +	struct rte_flow_classify_rule *rule;
> +
> +	rule = malloc(sizeof(struct rte_flow_classify_rule));
> +	if (!rule)
> +		return rule;
> +
> +	memset(rule, 0, sizeof(struct rte_flow_classify_rule));
> +	rule->id = unique_id++;
> +	rule->rule_type = RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE;
> +
> +	memcpy(&rule->action, classify_get_flow_action(),
> +	       sizeof(struct rte_flow_action));
> +
> +	/* key add values */
> +	rule->u.key.key_add.priority = ntuple_filter.priority;
> +	rule-
> >u.key.key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 =
> +			ntuple_filter.proto_mask;
> +	rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].value.u8 =
> +			ntuple_filter.proto;
> +	rule->ipv4_5tuple.proto = ntuple_filter.proto;
> +	rule->ipv4_5tuple.proto_mask = ntuple_filter.proto_mask;
> +
> +	rule->u.key.key_add.field_value[SRC_FIELD_IPV4].mask_range.u32
> =
> +			ntuple_filter.src_ip_mask;
> +	rule->u.key.key_add.field_value[SRC_FIELD_IPV4].value.u32 =
> +			ntuple_filter.src_ip;
> +	rule->ipv4_5tuple.src_ip_mask = ntuple_filter.src_ip_mask;
> +	rule->ipv4_5tuple.src_ip = ntuple_filter.src_ip;
> +
> +	rule->u.key.key_add.field_value[DST_FIELD_IPV4].mask_range.u32
> =
> +			ntuple_filter.dst_ip_mask;
> +	rule->u.key.key_add.field_value[DST_FIELD_IPV4].value.u32 =
> +			ntuple_filter.dst_ip;
> +	rule->ipv4_5tuple.dst_ip_mask = ntuple_filter.dst_ip_mask;
> +	rule->ipv4_5tuple.dst_ip = ntuple_filter.dst_ip;
> +
> +	rule-
> >u.key.key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
> +			ntuple_filter.src_port_mask;
> +	rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].value.u16 =
> +			ntuple_filter.src_port;
> +	rule->ipv4_5tuple.src_port_mask = ntuple_filter.src_port_mask;
> +	rule->ipv4_5tuple.src_port = ntuple_filter.src_port;
> +
> +	rule-
> >u.key.key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
> +			ntuple_filter.dst_port_mask;
> +	rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].value.u16 =
> +			ntuple_filter.dst_port;
> +	rule->ipv4_5tuple.dst_port_mask = ntuple_filter.dst_port_mask;
> +	rule->ipv4_5tuple.dst_port = ntuple_filter.dst_port;
> +
> +#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
> +	print_ipv4_key_add(&rule->u.key.key_add);
> +#endif
> +
> +	/* key delete values */
> +	memcpy(&rule->u.key.key_del.field_value[PROTO_FIELD_IPV4],
> +	       &rule->u.key.key_add.field_value[PROTO_FIELD_IPV4],
> +	       NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field));
> +
> +#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
> +	print_ipv4_key_delete(&rule->u.key.key_del);
> +#endif
> +	return rule;
> +}
> +
> +struct rte_flow_classify_rule *
> +rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
> +		uint32_t table_id,
> +		const struct rte_flow_attr *attr,
> +		const struct rte_flow_item pattern[],
> +		const struct rte_flow_action actions[],
> +		struct rte_flow_error *error)
> +{
> +	struct rte_flow_classify_rule *rule;
> +	struct rte_flow_classify_table_entry *table_entry;
> +	int ret;
> +
> +	if (!error)
> +		return NULL;
> +
> +	if (!cls) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				NULL, "NULL classifier.");
> +		return NULL;
> +	}
> +
> +	if (table_id >= cls->num_tables) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				NULL, "invalid table_id.");
> +		return NULL;
> +	}
> +
> +	if (!pattern) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> +				NULL, "NULL pattern.");
> +		return NULL;
> +	}
> +
> +	if (!actions) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> +				NULL, "NULL action.");
> +		return NULL;
> +	}
> +
> +	if (!attr) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ATTR,
> +				NULL, "NULL attribute.");
> +		return NULL;
> +	}
> +
> +	/* parse attr, pattern and actions */
> +	ret = rte_flow_classify_validate(attr, pattern, actions, error);
> +	if (ret < 0)
> +		return NULL;
> +
> +	switch (cls->type) {
> +	case RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL:
> +		rule = allocate_ipv4_5tuple_rule();
> +		if (!rule)
> +			return NULL;
> +		break;
> +	default:
> +		return NULL;
> +	}
> +
> +	rule->entry = malloc(sizeof(struct rte_flow_classify_table_entry));
> +	if (!rule->entry) {
> +		free(rule);
> +		rule = NULL;
> +		return NULL;
> +	}
> +
> +	table_entry = rule->entry;
> +	table_entry->rule_id = rule->id;
> +
> +	ret = cls->tables[table_id].ops.f_add(
> +			cls->tables[table_id].h_table,
> +			&rule->u.key.key_add,
> +			rule->entry,
> +			&rule->key_found,
> +			&rule->entry_ptr);
> +	if (ret) {
> +		free(rule->entry);
> +		free(rule);
> +		rule = NULL;
> +		return NULL;
> +	}
> +	return rule;
> +}

It is not clear if the pattern to be added already exists in the table? how this information will be propagated
to the application?

> +int
> +rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
> +		uint32_t table_id,
> +		struct rte_flow_classify_rule *rule,
> +		struct rte_flow_error *error)
> +{
> +	int ret = -EINVAL;
> +
> +	if (!error)
> +		return ret;
> +
> +	if (!cls) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				NULL, "NULL classifier.");
> +		return ret;
> +	}
> +
> +	if (table_id >= cls->num_tables) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				NULL, "invalid table_id.");
> +		return ret;
> +	}
> +
> +	if (!rule) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				NULL, "NULL rule.");
> +		return ret;
> +	}
> +
> +	ret = cls->tables[table_id].ops.f_delete(
> +			cls->tables[table_id].h_table,
> +			&rule->u.key.key_del,
> +			&rule->key_found,
> +			&rule->entry);

Please introduce check for f_delete, shouldn't be NULL.

> +
> +int
> +rte_flow_classifier_run(struct rte_flow_classifier *cls,
> +		uint32_t table_id,
> +		struct rte_mbuf **pkts,
> +		const uint16_t nb_pkts,
> +		struct rte_flow_error *error)
> +{
> +	int ret = -EINVAL;
> +	uint64_t pkts_mask;
> +	uint64_t lookup_hit_mask;
> +
> +	if (!error)
> +		return ret;
> +
> +	if (!cls || !pkts || nb_pkts == 0) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				NULL, "invalid input");
> +		return ret;
> +	}
> +
> +	if (table_id >= cls->num_tables) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				NULL, "invalid table_id.");
> +		return ret;
> +	}
> +
> +	pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
> +	ret = cls->tables[table_id].ops.f_lookup(
> +			cls->tables[table_id].h_table,
> +			pkts, pkts_mask, &lookup_hit_mask,
> +			(void **)cls->entries);
> +
> +	if (!ret && lookup_hit_mask)
> +		cls->nb_pkts = nb_pkts;
> +	else
> +		cls->nb_pkts = 0;
> +
> +	return ret;
> +}
> +
> +static int
> +action_apply(struct rte_flow_classifier *cls,
> +		struct rte_flow_classify_rule *rule,
> +		struct rte_flow_classify_stats *stats)
> +{
> +	struct rte_flow_classify_ipv4_5tuple_stats *ntuple_stats;
> +	uint64_t count = 0;
> +	int i;
> +	int ret = -ENODATA;
> +
> +	switch (rule->action.type) {
> +	case RTE_FLOW_ACTION_TYPE_COUNT:
> +		for (i = 0; i < cls->nb_pkts; i++) {
> +			if (rule->id == cls->entries[i]->rule_id)
> +				count++;
> +		}
> +		if (count) {
> +			ret = 0;
> +			ntuple_stats =
> +				(struct rte_flow_classify_ipv4_5tuple_stats
> *)
> +				stats->stats;
> +			ntuple_stats->counter1 = count;
> +			ntuple_stats->ipv4_5tuple = rule->ipv4_5tuple;
> +		}
> +		break;
> +	default:
> +		ret = -ENOTSUP;
> +		break;
> +	}
> +
> +	return ret;
> +}
> +
> +int
> +rte_flow_classifier_query(struct rte_flow_classifier *cls,
> +		struct rte_flow_classify_rule *rule,
> +		struct rte_flow_classify_stats *stats,
> +		struct rte_flow_error *error)
> +{
> +	int ret = -EINVAL;
> +
> +	if (!error)
> +		return ret;
> +
> +	if (!cls || !rule || !stats) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				NULL, "invalid input");
> +		return ret;
> +	}
> +
> +	ret = action_apply(cls, rule, stats);
> +	return ret;
> +}

The rte_flow_classify_run and rte_flow_classify_query API  should be invoked consecutively in the application, true?

> diff --git a/lib/librte_flow_classify/rte_flow_classify.h
> b/lib/librte_flow_classify/rte_flow_classify.h
> new file mode 100644
> index 0000000..9bd6cf4
> --- /dev/null
> +++ b/lib/librte_flow_classify/rte_flow_classify.h
> @@ -0,0 +1,321 @@
> +/*-
> + *   BSD LICENSE
> + *
> + *   Copyright(c) 2017 Intel Corporation. All rights reserved.
> + *   All rights reserved.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of Intel Corporation nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
> OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
> AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
> TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> DAMAGE.
> + */
> +
> +#ifndef _RTE_FLOW_CLASSIFY_H_
> +#define _RTE_FLOW_CLASSIFY_H_
> +
> +/**
> + * @file
> + *
> + * RTE Flow Classify Library
> + *
> + * This library provides flow record information with some measured
> properties.
> + *
> + * Application should define the flow and measurement criteria (action) for
> it.
> + *
> + * Library doesn't maintain any flow records itself, instead flow information
> is
> + * returned to upper layer only for given packets.
> + *
> + * It is application's responsibility to call rte_flow_classify_query()
> + * for group of packets, just after receiving them or before transmitting
> them.
> + * Application should provide the flow type interested in, measurement to
> apply
> + * to that flow in rte_flow_classify_create() API, and should provide
> + * rte_flow_classify object and storage to put results in
> + * rte_flow_classify_query() API.
> + *
> + *  Usage:
> + *  - application calls rte_flow_classify_create() to create a rte_flow_classify
> + *    object.
> + *  - application calls rte_flow_classify_query() in a polling manner,
> + *    preferably after rte_eth_rx_burst(). This will cause the library to
> + *    convert packet information to flow information with some
> measurements.
> + *  - rte_flow_classify object can be destroyed when they are no more
> needed
> + *    via rte_flow_classify_destroy()
> + */
> +
> +#include <rte_ethdev.h>
> +#include <rte_ether.h>
> +#include <rte_flow.h>
> +#include <rte_acl.h>
> +#include <rte_table_acl.h>
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +
> +#define RTE_FLOW_CLASSIFY_TABLE_MAX  1
> +
> +/** Opaque data type for flow classifier */
> +struct rte_flow_classifier;
> +
> +/** Opaque data type for flow classify rule */
> +struct rte_flow_classify_rule;
> +
> +enum rte_flow_classify_rule_type {
> +	RTE_FLOW_CLASSIFY_RULE_TYPE_NONE,   /**< no type */
> +	RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE, /**< IPv4 5tuple
> type */
> +};
> +
> +enum rte_flow_classify_table_type {
> +	RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE, /**< no type */
> +	RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL,  /**< ACL type */
> +};
> +
> +/** Parameters for flow classifier creation */
> +struct rte_flow_classifier_params {
> +	/**< flow classifier name */
> +	const char *name;
> +
> +	/**< CPU socket ID where memory for the flow classifier and its */
> +	/**< elements (tables) should be allocated */
> +	int socket_id;
> +
> +	/**< Table type */
> +	enum rte_flow_classify_table_type type;
> +
> +	/**< Table id */
> +	uint32_t table_id;
> +};
> +
> +struct rte_flow_classify_table_params {
> +	/**<Table operations (specific to each table type) */
> +	struct rte_table_ops *ops;
> +
> +	/**< Opaque param to be passed to the table create operation */
> +	void *arg_create;
> +
> +	/**< Memory size to be reserved per classifier object entry for */
> +	/**< storing meta data */
> +	uint32_t table_metadata_size;
> +};
> +
> +struct rte_flow_classify_ipv4_5tuple {
> +	uint32_t dst_ip;         /**< Destination IP address in big endian. */
> +	uint32_t dst_ip_mask;    /**< Mask of destination IP address. */
> +	uint32_t src_ip;         /**< Source IP address in big endian. */
> +	uint32_t src_ip_mask;    /**< Mask of destination IP address. */
> +	uint16_t dst_port;       /**< Destination port in big endian. */
> +	uint16_t dst_port_mask;  /**< Mask of destination port. */
> +	uint16_t src_port;       /**< Source Port in big endian. */
> +	uint16_t src_port_mask;  /**< Mask of source port. */
> +	uint8_t proto;           /**< L4 protocol. */
> +	uint8_t proto_mask;      /**< Mask of L4 protocol. */
> +};
> +
> +struct rte_flow_classify_table_entry {
> +	/**< meta-data for classify rule */
> +	uint32_t rule_id;
> +
> +	/**< Start of table entry area for user defined meta data */
> +	__extension__ uint8_t meta_data[0];
> +};

The above structure is not used by any of the public API ?

> + * Flow stats
> + *
> + * For the count action, stats can be returned by the query API.
> + *
> + * Storage for stats is provided by application.
> + */
> +struct rte_flow_classify_stats {
> +	void *stats;
> +};
> +
> +struct rte_flow_classify_ipv4_5tuple_stats {
> +	/**< count of packets that match IPv4 5tuple pattern */
> +	uint64_t counter1;
> +	/**< IPv4 5tuple data */
> +	struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
> +};
> +
> +/**
> + * Flow classifier create
> + *
> + * @param params
> + *   Parameters for flow classifier creation
> + * @return
> + *   Handle to flow classifier instance on success or NULL otherwise
> + */
> +struct rte_flow_classifier *rte_flow_classifier_create(
> +		struct rte_flow_classifier_params *params);
> +
> +/**
> + * Flow classifier free
> + *
> + * @param cls
> + *   Handle to flow classifier instance
> + * @return
> + *   0 on success, error code otherwise
> + */
> +int rte_flow_classifier_free(struct rte_flow_classifier *cls);
> +
> +/**
> + * Flow classify table create
> + *
> + * @param cls
> + *   Handle to flow classifier instance
> + * @param params
> + *   Parameters for flow_classify table creation
> + * @param table_id
> + *   Table ID. Valid only within the scope of table IDs of the current
> + *   classifier. Only returned after a successful invocation.
> + * @return
> + *   0 on success, error code otherwise
> + */
> +int rte_flow_classify_table_create(struct rte_flow_classifier *cls,
> +	struct rte_flow_classify_table_params *params,
> +	uint32_t *table_id);
> +
> +/**
> + * Validate a flow classify rule.
> + *
> + * @param[in] attr
> + *   Flow rule attributes
> + * @param[in] pattern
> + *   Pattern specification (list terminated by the END pattern item).
> + * @param[in] actions
> + *   Associated actions (list terminated by the END pattern item).
> + * @param[out] error
> + *   Perform verbose error reporting if not NULL. Structure
> + *   initialised in case of error only.
> + *
> + * @return
> + *   0 on success, error code otherwise.
> + */
> +int
> +rte_flow_classify_validate(
> +		const struct rte_flow_attr *attr,
> +		const struct rte_flow_item pattern[],
> +		const struct rte_flow_action actions[],
> +		struct rte_flow_error *error);
> +
> +/**
> + * Add a flow classify rule to the flow_classifer table.
> + *
> + * @param[in] cls
> + *   Flow classifier handle
> + * @param[in] table_id
> + *   id of table
> + * @param[in] attr
> + *   Flow rule attributes
> + * @param[in] pattern
> + *   Pattern specification (list terminated by the END pattern item).
> + * @param[in] actions
> + *   Associated actions (list terminated by the END pattern item).
> + * @param[out] error
> + *   Perform verbose error reporting if not NULL. Structure
> + *   initialised in case of error only.
> + * @return
> + *   A valid handle in case of success, NULL otherwise.
> + */
> +struct rte_flow_classify_rule *
> +rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
> +		uint32_t table_id,
> +		const struct rte_flow_attr *attr,
> +		const struct rte_flow_item pattern[],
> +		const struct rte_flow_action actions[],
> +		struct rte_flow_error *error);
> +
> +/**
> + * Delete a flow classify rule from the flow_classifer table.
> + *
> + * @param[in] cls
> + *   Flow classifier handle
> + * @param[in] table_id
> + *   id of table
> + * @param[in] rule
> + *   Flow classify rule
> + * @param[out] error
> + *   Perform verbose error reporting if not NULL. Structure
> + *   initialised in case of error only.
> + * @return
> + *   0 on success, error code otherwise.
> + */
> +int
> +rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
> +		uint32_t table_id,
> +		struct rte_flow_classify_rule *rule,
> +		struct rte_flow_error *error);
> +
> +/**
> + * Run flow classifier for given packets.
> + *
> + * @param[in] cls
> + *   Flow classifier handle
> + * @param[in] table_id
> + *   id of table
> + * @param[in] pkts
> + *   Pointer to packets to process
> + * @param[in] nb_pkts
> + *   Number of packets to process
> + * @param[out] error
> + *   Perform verbose error reporting if not NULL. Structure
> + *   initialised in case of error only.
> + *
> + * @return
> + *   0 on success, error code otherwise.
> + */
> +
> +int rte_flow_classifier_run(struct rte_flow_classifier *cls,
> +		uint32_t table_id,
> +		struct rte_mbuf **pkts,
> +		const uint16_t nb_pkts,
> +		struct rte_flow_error *error);
> +
> +/**
> + * Query flow classifier for given rule.
> + *
> + * @param[in] cls
> + *   Flow classifier handle
> + * @param[in] rule
> + *   Flow classify rule
> + * @param[in] stats
> + *   Flow classify stats
> + * @param[out] error
> + *   Perform verbose error reporting if not NULL. Structure
> + *   initialised in case of error only.
> + *
> + * @return
> + *   0 on success, error code otherwise.
> + */
> +int rte_flow_classifier_query(struct rte_flow_classifier *cls,
> +		struct rte_flow_classify_rule *rule,
> +		struct rte_flow_classify_stats *stats,
> +		struct rte_flow_error *error);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_FLOW_CLASSIFY_H_ */


There are doxygen rendering issues in this document. Please cross check the header file with
"make doc-api-html" output.
  
Iremonger, Bernard Oct. 20, 2017, 4:59 p.m. UTC | #2
Hi Jasvinder,

Thanks for the review.

> -----Original Message-----
> From: Singh, Jasvinder
> Sent: Thursday, October 19, 2017 3:22 PM
> To: Iremonger, Bernard <bernard.iremonger@intel.com>; dev@dpdk.org;
> Yigit, Ferruh <ferruh.yigit@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Dumitrescu, Cristian
> <cristian.dumitrescu@intel.com>; adrien.mazarguil@6wind.com
> Subject: RE: [PATCH v8 1/4] librte_flow_classify: add flow classify library
> 
> 
> <snip>
> 
> > +
> > +struct acl_keys {
> > +	struct rte_table_acl_rule_add_params key_add; /**< add key */
> > +	struct rte_table_acl_rule_delete_params	key_del; /**< delete
> > key */
> > +};
> > +
> > +struct rte_flow_classify_rule {
> > +	uint32_t id; /**< unique ID of classify rule */
> > +	enum rte_flow_classify_rule_type rule_type; /**< classify rule type
> > */
> > +	struct rte_flow_action action; /**< action when match found */
> > +	struct rte_flow_classify_ipv4_5tuple ipv4_5tuple; /**< ipv4 5tuple */
> > +	union {
> > +		struct acl_keys key;
> > +	} u;
> > +	int key_found;   /**< rule key found in table */
> > +	void *entry;     /**< pointer to buffer to hold rule meta data*/
> > +	void *entry_ptr; /**< handle to the table entry for rule meta data*/
> > +};
> 
> In my opnion, the  above struct should have the provision to accommodate
> other types of rules, not only ipv4_5tuples.
> Making this structure modular will help in extending it for other rule types in
> the future.

I will refactor by adding struct classify_rules{}  to struct rte_flow_classif_rule{}:

struct classify_rules {
	enum rte_flow_classify_rule_type type;
	union {
		struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
	} u;
};

struct rte_flow_classify_rule {
	uint32_t id; /* unique ID of classify rule */
	struct rte_flow_action action; /* action when match found */
	struct classify_rules rules; /* union of rules */
	union {
		struct acl_keys key;
	} u;
	int key_found;   /* rule key found in table */
	void *entry;     /* pointer to buffer to hold rule meta data */
	void *entry_ptr; /* handle to the table entry for rule meta data */
}; 

 
> > +int
> > +rte_flow_classify_validate(
> > +		   const struct rte_flow_attr *attr,
> > +		   const struct rte_flow_item pattern[],
> > +		   const struct rte_flow_action actions[],
> > +		   struct rte_flow_error *error)
> > +{
> > +	struct rte_flow_item *items;
> > +	parse_filter_t parse_filter;
> > +	uint32_t item_num = 0;
> > +	uint32_t i = 0;
> > +	int ret;
> > +
> > +	if (!error)
> > +		return -EINVAL;
> > +
> > +	if (!pattern) {
> > +		rte_flow_error_set(error, EINVAL,
> > RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> > +				   NULL, "NULL pattern.");
> > +		return -EINVAL;
> > +	}
> > +
> > +	if (!actions) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> > +				   NULL, "NULL action.");
> > +		return -EINVAL;
> > +	}
> > +
> > +	if (!attr) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ATTR,
> > +				   NULL, "NULL attribute.");
> > +		return -EINVAL;
> > +	}
> > +
> > +	memset(&ntuple_filter, 0, sizeof(ntuple_filter));
> > +
> > +	/* Get the non-void item number of pattern */
> > +	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
> > +		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
> > +			item_num++;
> > +		i++;
> > +	}
> > +	item_num++;
> > +
> > +	items = malloc(item_num * sizeof(struct rte_flow_item));
> > +	if (!items) {
> > +		rte_flow_error_set(error, ENOMEM,
> > RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> > +				   NULL, "No memory for pattern items.");
> > +		return -ENOMEM;
> > +	}
> > +
> > +	memset(items, 0, item_num * sizeof(struct rte_flow_item));
> > +	classify_pattern_skip_void_item(items, pattern);
> > +
> > +	parse_filter = classify_find_parse_filter_func(items);
> > +	if (!parse_filter) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ITEM,
> > +				   pattern, "Unsupported pattern");
> > +		free(items);
> > +		return -EINVAL;
> > +	}
> > +
> > +	ret = parse_filter(attr, items, actions, &ntuple_filter, error);
> > +	free(items);
> > +	return ret;
> > +}
> 
> This function mainly parses the flow pattern, actions etc and fill the entries in
> internal ntuple_filter.It is invoked only in flow_entry_add(). Is there any
> reason to make this function available as public API?

This function does not need to be a public API.
The flow_classify API's started out mirroring the flow API's but this is no longer the case.
Probably better to make it internal and it could be made public later if needed.

 
> > +#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
> > +#define uint32_t_to_char(ip, a, b, c, d) do {\
> > +		*a = (unsigned char)(ip >> 24 & 0xff);\
> > +		*b = (unsigned char)(ip >> 16 & 0xff);\
> > +		*c = (unsigned char)(ip >> 8 & 0xff);\
> > +		*d = (unsigned char)(ip & 0xff);\
> > +	} while (0)
> > +
> > +static inline void
> > +print_ipv4_key_add(struct rte_table_acl_rule_add_params *key) {
> > +	unsigned char a, b, c, d;
> > +
> > +	printf("ipv4_key_add: 0x%02hhx/0x%hhx ",
> > +		key->field_value[PROTO_FIELD_IPV4].value.u8,
> > +		key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
> > +
> > +	uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
> > +			&a, &b, &c, &d);
> > +	printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
> > +			key-
> > >field_value[SRC_FIELD_IPV4].mask_range.u32);
> > +
> > +	uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
> > +			&a, &b, &c, &d);
> > +	printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
> > +			key-
> > >field_value[DST_FIELD_IPV4].mask_range.u32);
> > +
> > +	printf("%hu : 0x%x %hu : 0x%x",
> > +		key->field_value[SRCP_FIELD_IPV4].value.u16,
> > +		key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
> > +		key->field_value[DSTP_FIELD_IPV4].value.u16,
> > +		key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
> > +
> > +	printf(" priority: 0x%x\n", key->priority); }
> 
> The above function is specific to printing acl table keys. How about making
> this function little generic by passing the parameters to distinguish the rule,
> table type, etc. and do the printing?
> 
> Same comments for the print_ipv4_key_delete().
> 

This is debug code, could it be left as is until another table type is added?
I will rename to include acl in the function names.

> 
> > +static inline void
> > +print_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key) {
> > +	unsigned char a, b, c, d;
> > +
> > +	printf("ipv4_key_del: 0x%02hhx/0x%hhx ",
> > +		key->field_value[PROTO_FIELD_IPV4].value.u8,
> > +		key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
> > +
> > +	uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
> > +			&a, &b, &c, &d);
> > +	printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
> > +			key-
> > >field_value[SRC_FIELD_IPV4].mask_range.u32);
> > +
> > +	uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
> > +			&a, &b, &c, &d);
> > +	printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
> > +			key-
> > >field_value[DST_FIELD_IPV4].mask_range.u32);
> > +
> > +	printf("%hu : 0x%x %hu : 0x%x\n",
> > +		key->field_value[SRCP_FIELD_IPV4].value.u16,
> > +		key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
> > +		key->field_value[DSTP_FIELD_IPV4].value.u16,
> > +		key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
> > +}
> > +#endif
> > +
> > +static int
> > +rte_flow_classifier_check_params(struct rte_flow_classifier_params
> > *params)
> > +{
> > +	if (params == NULL) {
> > +		RTE_LOG(ERR, CLASSIFY,
> > +			"%s: Incorrect value for parameter params\n",
> > __func__);
> > +		return -EINVAL;
> > +	}
> > +
> > +	/* name */
> > +	if (params->name == NULL) {
> > +		RTE_LOG(ERR, CLASSIFY,
> > +			"%s: Incorrect value for parameter name\n",
> > __func__);
> > +		return -EINVAL;
> > +	}
> > +
> > +	/* socket */
> > +	if ((params->socket_id < 0) ||
> > +	    (params->socket_id >= RTE_MAX_NUMA_NODES)) {
> > +		RTE_LOG(ERR, CLASSIFY,
> > +			"%s: Incorrect value for parameter socket_id\n",
> > +			__func__);
> > +		return -EINVAL;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +struct rte_flow_classifier *
> > +rte_flow_classifier_create(struct rte_flow_classifier_params *params)
> > +{
> > +	struct rte_flow_classifier *cls;
> > +	int ret;
> > +
> > +	/* Check input parameters */
> > +	ret = rte_flow_classifier_check_params(params);
> > +	if (ret != 0) {
> > +		RTE_LOG(ERR, CLASSIFY,
> > +			"%s: flow classifier params check failed (%d)\n",
> > +			__func__, ret);
> > +		return NULL;
> > +	}
> > +
> > +	/* Allocate memory for the flow classifier */
> > +	cls = rte_zmalloc_socket("FLOW_CLASSIFIER",
> > +			sizeof(struct rte_flow_classifier),
> > +			RTE_CACHE_LINE_SIZE, params->socket_id);
> > +
> > +	if (cls == NULL) {
> > +		RTE_LOG(ERR, CLASSIFY,
> > +			"%s: flow classifier memory allocation failed\n",
> > +			__func__);
> > +		return NULL;
> > +	}
> > +
> > +	/* Save input parameters */
> > +	snprintf(cls->name, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ, "%s",
> > +			params->name);
> > +	cls->socket_id = params->socket_id;
> > +	cls->type = params->type;
> > +
> > +	/* Initialize flow classifier internal data structure */
> > +	cls->num_tables = 0;
> > +
> > +	return cls;
> > +}
> > +
> > +static void
> > +rte_flow_classify_table_free(struct rte_table *table) {
> > +	if (table->ops.f_free != NULL)
> > +		table->ops.f_free(table->h_table);
> > +
> > +	rte_free(table->default_entry);
> > +}
> 
> This is internal function. There is an API for creating a table for classifier
> instance but not for destroying the table. What if application requires
> destroying the specific table of the classifier but want to keep the classifier
> instance?

Yes, there should probably be an API to delete a table.
I will add an rte_flow_classify_table_delete() API.

 
> > +int
> > +rte_flow_classifier_free(struct rte_flow_classifier *cls) {
> > +	uint32_t i;
> > +
> > +	/* Check input parameters */
> > +	if (cls == NULL) {
> > +		RTE_LOG(ERR, CLASSIFY,
> > +			"%s: rte_flow_classifier parameter is NULL\n",
> > +			__func__);
> > +		return -EINVAL;
> > +	}
> > +
> > +	/* Free tables */
> > +	for (i = 0; i < cls->num_tables; i++) {
> > +		struct rte_table *table = &cls->tables[i];
> > +
> > +		rte_flow_classify_table_free(table);
> > +	}
> > +
> > +	/* Free flow classifier memory */
> > +	rte_free(cls);
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> > +rte_table_check_params(struct rte_flow_classifier *cls,
> > +		struct rte_flow_classify_table_params *params,
> > +		uint32_t *table_id)
> > +{
> > +	if (cls == NULL) {
> > +		RTE_LOG(ERR, CLASSIFY,
> > +			"%s: flow classifier parameter is NULL\n",
> > +			__func__);
> > +		return -EINVAL;
> > +	}
> > +	if (params == NULL) {
> > +		RTE_LOG(ERR, CLASSIFY, "%s: params parameter is NULL\n",
> > +			__func__);
> > +		return -EINVAL;
> > +	}
> > +	if (table_id == NULL) {
> > +		RTE_LOG(ERR, CLASSIFY, "%s: table_id parameter is NULL\n",
> > +			__func__);
> > +		return -EINVAL;
> > +	}
> > +
> > +	/* ops */
> > +	if (params->ops == NULL) {
> > +		RTE_LOG(ERR, CLASSIFY, "%s: params->ops is NULL\n",
> > +			__func__);
> > +		return -EINVAL;
> > +	}
> > +
> > +	if (params->ops->f_create == NULL) {
> > +		RTE_LOG(ERR, CLASSIFY,
> > +			"%s: f_create function pointer is NULL\n", __func__);
> > +		return -EINVAL;
> > +	}
> > +
> > +	if (params->ops->f_lookup == NULL) {
> > +		RTE_LOG(ERR, CLASSIFY,
> > +			"%s: f_lookup function pointer is NULL\n",
> > __func__);
> > +		return -EINVAL;
> > +	}
> > +
> > +	/* De we have room for one more table? */
> > +	if (cls->num_tables == RTE_FLOW_CLASSIFY_TABLE_MAX) {
> > +		RTE_LOG(ERR, CLASSIFY,
> > +			"%s: Incorrect value for num_tables parameter\n",
> > +			__func__);
> > +		return -EINVAL;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +int
> > +rte_flow_classify_table_create(struct rte_flow_classifier *cls,
> > +	struct rte_flow_classify_table_params *params,
> > +	uint32_t *table_id)
> > +{
> > +	struct rte_table *table;
> > +	struct rte_flow_classify_table_entry *default_entry;
> > +	void *h_table;
> > +	uint32_t entry_size, id;
> > +	int ret;
> > +
> > +	/* Check input arguments */
> > +	ret = rte_table_check_params(cls, params, table_id);
> > +	if (ret != 0)
> > +		return ret;
> > +
> > +	id = cls->num_tables;
> > +	table = &cls->tables[id];
> > +
> > +	/* Allocate space for the default table entry */
> > +	entry_size = sizeof(struct rte_flow_classify_table_entry) +
> > +			params->table_metadata_size;
> > +	default_entry =
> > +		(struct rte_flow_classify_table_entry *) rte_zmalloc_socket(
> > +		"Flow Classify default entry", entry_size,
> > +		RTE_CACHE_LINE_SIZE, cls->socket_id);
> > +	if (default_entry == NULL) {
> > +		RTE_LOG(ERR, CLASSIFY,
> > +			"%s: Failed to allocate default entry\n", __func__);
> > +		return -EINVAL;
> > +	}
> 
> what is the purpose of default_entry as I don't see its usage anywhere in the
> library?

This came from the ip_pipeline code in earlier discussions, it is not used at present.
I will remove it.

 
> > +	/* Create the table */
> > +	h_table = params->ops->f_create(params->arg_create, cls-
> > >socket_id,
> > +		entry_size);
> > +	if (h_table == NULL) {
> > +		rte_free(default_entry);
> > +		RTE_LOG(ERR, CLASSIFY, "%s: Table creation failed\n",
> > __func__);
> > +		return -EINVAL;
> > +	}
> > +
> > +	/* Commit current table to the classifier */
> > +	cls->num_tables++;
> > +	*table_id = id;
> > +
> > +	/* Save input parameters */
> > +	memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
> > +
> > +	table->entry_size = entry_size;
> > +	table->default_entry = default_entry;
> > +
> > +	/* Initialize table internal data structure */
> > +	table->h_table = h_table;
> > +
> > +	return 0;
> > +}
> > +
> > +static struct rte_flow_classify_rule *
> > +allocate_ipv4_5tuple_rule(void)
> > +{
> > +	struct rte_flow_classify_rule *rule;
> > +
> > +	rule = malloc(sizeof(struct rte_flow_classify_rule));
> > +	if (!rule)
> > +		return rule;
> > +
> > +	memset(rule, 0, sizeof(struct rte_flow_classify_rule));
> > +	rule->id = unique_id++;
> > +	rule->rule_type = RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE;
> > +
> > +	memcpy(&rule->action, classify_get_flow_action(),
> > +	       sizeof(struct rte_flow_action));
> > +
> > +	/* key add values */
> > +	rule->u.key.key_add.priority = ntuple_filter.priority;
> > +	rule-
> > >u.key.key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 =
> > +			ntuple_filter.proto_mask;
> > +	rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].value.u8 =
> > +			ntuple_filter.proto;
> > +	rule->ipv4_5tuple.proto = ntuple_filter.proto;
> > +	rule->ipv4_5tuple.proto_mask = ntuple_filter.proto_mask;
> > +
> > +	rule->u.key.key_add.field_value[SRC_FIELD_IPV4].mask_range.u32
> > =
> > +			ntuple_filter.src_ip_mask;
> > +	rule->u.key.key_add.field_value[SRC_FIELD_IPV4].value.u32 =
> > +			ntuple_filter.src_ip;
> > +	rule->ipv4_5tuple.src_ip_mask = ntuple_filter.src_ip_mask;
> > +	rule->ipv4_5tuple.src_ip = ntuple_filter.src_ip;
> > +
> > +	rule->u.key.key_add.field_value[DST_FIELD_IPV4].mask_range.u32
> > =
> > +			ntuple_filter.dst_ip_mask;
> > +	rule->u.key.key_add.field_value[DST_FIELD_IPV4].value.u32 =
> > +			ntuple_filter.dst_ip;
> > +	rule->ipv4_5tuple.dst_ip_mask = ntuple_filter.dst_ip_mask;
> > +	rule->ipv4_5tuple.dst_ip = ntuple_filter.dst_ip;
> > +
> > +	rule-
> > >u.key.key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
> > +			ntuple_filter.src_port_mask;
> > +	rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].value.u16 =
> > +			ntuple_filter.src_port;
> > +	rule->ipv4_5tuple.src_port_mask = ntuple_filter.src_port_mask;
> > +	rule->ipv4_5tuple.src_port = ntuple_filter.src_port;
> > +
> > +	rule-
> > >u.key.key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
> > +			ntuple_filter.dst_port_mask;
> > +	rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].value.u16 =
> > +			ntuple_filter.dst_port;
> > +	rule->ipv4_5tuple.dst_port_mask = ntuple_filter.dst_port_mask;
> > +	rule->ipv4_5tuple.dst_port = ntuple_filter.dst_port;
> > +
> > +#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
> > +	print_ipv4_key_add(&rule->u.key.key_add);
> > +#endif
> > +
> > +	/* key delete values */
> > +	memcpy(&rule->u.key.key_del.field_value[PROTO_FIELD_IPV4],
> > +	       &rule->u.key.key_add.field_value[PROTO_FIELD_IPV4],
> > +	       NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field));
> > +
> > +#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
> > +	print_ipv4_key_delete(&rule->u.key.key_del);
> > +#endif
> > +	return rule;
> > +}
> > +
> > +struct rte_flow_classify_rule *
> > +rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
> > +		uint32_t table_id,
> > +		const struct rte_flow_attr *attr,
> > +		const struct rte_flow_item pattern[],
> > +		const struct rte_flow_action actions[],
> > +		struct rte_flow_error *error)
> > +{
> > +	struct rte_flow_classify_rule *rule;
> > +	struct rte_flow_classify_table_entry *table_entry;
> > +	int ret;
> > +
> > +	if (!error)
> > +		return NULL;
> > +
> > +	if (!cls) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +				NULL, "NULL classifier.");
> > +		return NULL;
> > +	}
> > +
> > +	if (table_id >= cls->num_tables) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +				NULL, "invalid table_id.");
> > +		return NULL;
> > +	}
> > +
> > +	if (!pattern) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> > +				NULL, "NULL pattern.");
> > +		return NULL;
> > +	}
> > +
> > +	if (!actions) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> > +				NULL, "NULL action.");
> > +		return NULL;
> > +	}
> > +
> > +	if (!attr) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ATTR,
> > +				NULL, "NULL attribute.");
> > +		return NULL;
> > +	}
> > +
> > +	/* parse attr, pattern and actions */
> > +	ret = rte_flow_classify_validate(attr, pattern, actions, error);
> > +	if (ret < 0)
> > +		return NULL;
> > +
> > +	switch (cls->type) {
> > +	case RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL:
> > +		rule = allocate_ipv4_5tuple_rule();
> > +		if (!rule)
> > +			return NULL;
> > +		break;
> > +	default:
> > +		return NULL;
> > +	}
> > +
> > +	rule->entry = malloc(sizeof(struct rte_flow_classify_table_entry));
> > +	if (!rule->entry) {
> > +		free(rule);
> > +		rule = NULL;
> > +		return NULL;
> > +	}
> > +
> > +	table_entry = rule->entry;
> > +	table_entry->rule_id = rule->id;
> > +
> > +	ret = cls->tables[table_id].ops.f_add(
> > +			cls->tables[table_id].h_table,
> > +			&rule->u.key.key_add,
> > +			rule->entry,
> > +			&rule->key_found,
> > +			&rule->entry_ptr);
> > +	if (ret) {
> > +		free(rule->entry);
> > +		free(rule);
> > +		rule = NULL;
> > +		return NULL;
> > +	}
> > +	return rule;
> > +}
> 
> It is not clear if the pattern to be added already exists in the table? how this
> information will be propagated to the application?

The key found flag will be set if the key is already present.
 I will add a key_found parameter to the API to return the key found data.

> > +int
> > +rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
> > +		uint32_t table_id,
> > +		struct rte_flow_classify_rule *rule,
> > +		struct rte_flow_error *error)
> > +{
> > +	int ret = -EINVAL;
> > +
> > +	if (!error)
> > +		return ret;
> > +
> > +	if (!cls) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +				NULL, "NULL classifier.");
> > +		return ret;
> > +	}
> > +
> > +	if (table_id >= cls->num_tables) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +				NULL, "invalid table_id.");
> > +		return ret;
> > +	}
> > +
> > +	if (!rule) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +				NULL, "NULL rule.");
> > +		return ret;
> > +	}
> > +
> > +	ret = cls->tables[table_id].ops.f_delete(
> > +			cls->tables[table_id].h_table,
> > +			&rule->u.key.key_del,
> > +			&rule->key_found,
> > +			&rule->entry);
> 
> Please introduce check for f_delete, shouldn't be NULL.

I will add a check that f_delete is not NULL.

 
> > +
> > +int
> > +rte_flow_classifier_run(struct rte_flow_classifier *cls,
> > +		uint32_t table_id,
> > +		struct rte_mbuf **pkts,
> > +		const uint16_t nb_pkts,
> > +		struct rte_flow_error *error)
> > +{
> > +	int ret = -EINVAL;
> > +	uint64_t pkts_mask;
> > +	uint64_t lookup_hit_mask;
> > +
> > +	if (!error)
> > +		return ret;
> > +
> > +	if (!cls || !pkts || nb_pkts == 0) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +				NULL, "invalid input");
> > +		return ret;
> > +	}
> > +
> > +	if (table_id >= cls->num_tables) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +				NULL, "invalid table_id.");
> > +		return ret;
> > +	}
> > +
> > +	pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
> > +	ret = cls->tables[table_id].ops.f_lookup(
> > +			cls->tables[table_id].h_table,
> > +			pkts, pkts_mask, &lookup_hit_mask,
> > +			(void **)cls->entries);
> > +
> > +	if (!ret && lookup_hit_mask)
> > +		cls->nb_pkts = nb_pkts;
> > +	else
> > +		cls->nb_pkts = 0;
> > +
> > +	return ret;
> > +}
> > +
> > +static int
> > +action_apply(struct rte_flow_classifier *cls,
> > +		struct rte_flow_classify_rule *rule,
> > +		struct rte_flow_classify_stats *stats) {
> > +	struct rte_flow_classify_ipv4_5tuple_stats *ntuple_stats;
> > +	uint64_t count = 0;
> > +	int i;
> > +	int ret = -ENODATA;
> > +
> > +	switch (rule->action.type) {
> > +	case RTE_FLOW_ACTION_TYPE_COUNT:
> > +		for (i = 0; i < cls->nb_pkts; i++) {
> > +			if (rule->id == cls->entries[i]->rule_id)
> > +				count++;
> > +		}
> > +		if (count) {
> > +			ret = 0;
> > +			ntuple_stats =
> > +				(struct rte_flow_classify_ipv4_5tuple_stats
> > *)
> > +				stats->stats;
> > +			ntuple_stats->counter1 = count;
> > +			ntuple_stats->ipv4_5tuple = rule->ipv4_5tuple;
> > +		}
> > +		break;
> > +	default:
> > +		ret = -ENOTSUP;
> > +		break;
> > +	}
> > +
> > +	return ret;
> > +}
> > +
> > +int
> > +rte_flow_classifier_query(struct rte_flow_classifier *cls,
> > +		struct rte_flow_classify_rule *rule,
> > +		struct rte_flow_classify_stats *stats,
> > +		struct rte_flow_error *error)
> > +{
> > +	int ret = -EINVAL;
> > +
> > +	if (!error)
> > +		return ret;
> > +
> > +	if (!cls || !rule || !stats) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +				NULL, "invalid input");
> > +		return ret;
> > +	}
> > +
> > +	ret = action_apply(cls, rule, stats);
> > +	return ret;
> > +}
> 
> The rte_flow_classify_run and rte_flow_classify_query API  should be
> invoked consecutively in the application, true?

Yes, they should be invoked consecutively.
I will  merge the  rte_flow_classify_run API with the  rte_flow_classify_query API and
drop the rte_flow_classif_run API.

 
> > diff --git a/lib/librte_flow_classify/rte_flow_classify.h
> > b/lib/librte_flow_classify/rte_flow_classify.h
> > new file mode 100644
> > index 0000000..9bd6cf4
> > --- /dev/null
> > +++ b/lib/librte_flow_classify/rte_flow_classify.h
> > @@ -0,0 +1,321 @@
> > +/*-
> > + *   BSD LICENSE
> > + *
> > + *   Copyright(c) 2017 Intel Corporation. All rights reserved.
> > + *   All rights reserved.
> > + *
> > + *   Redistribution and use in source and binary forms, with or without
> > + *   modification, are permitted provided that the following conditions
> > + *   are met:
> > + *
> > + *     * Redistributions of source code must retain the above copyright
> > + *       notice, this list of conditions and the following disclaimer.
> > + *     * Redistributions in binary form must reproduce the above copyright
> > + *       notice, this list of conditions and the following disclaimer in
> > + *       the documentation and/or other materials provided with the
> > + *       distribution.
> > + *     * Neither the name of Intel Corporation nor the names of its
> > + *       contributors may be used to endorse or promote products derived
> > + *       from this software without specific prior written permission.
> > + *
> > + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> > CONTRIBUTORS
> > + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> > NOT
> > + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> > FITNESS FOR
> > + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> > COPYRIGHT
> > + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> > INCIDENTAL,
> > + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
> BUT
> > NOT
> > + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
> LOSS
> > OF USE,
> > + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
> > AND ON ANY
> > + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
> > TORT
> > + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
> OF
> > THE USE
> > + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> > DAMAGE.
> > + */
> > +
> > +#ifndef _RTE_FLOW_CLASSIFY_H_
> > +#define _RTE_FLOW_CLASSIFY_H_
> > +
> > +/**
> > + * @file
> > + *
> > + * RTE Flow Classify Library
> > + *
> > + * This library provides flow record information with some measured
> > properties.
> > + *
> > + * Application should define the flow and measurement criteria
> > + (action) for
> > it.
> > + *
> > + * Library doesn't maintain any flow records itself, instead flow
> > + information
> > is
> > + * returned to upper layer only for given packets.
> > + *
> > + * It is application's responsibility to call
> > + rte_flow_classify_query()
> > + * for group of packets, just after receiving them or before
> > + transmitting
> > them.
> > + * Application should provide the flow type interested in,
> > + measurement to
> > apply
> > + * to that flow in rte_flow_classify_create() API, and should provide
> > + * rte_flow_classify object and storage to put results in
> > + * rte_flow_classify_query() API.
> > + *
> > + *  Usage:
> > + *  - application calls rte_flow_classify_create() to create a
> rte_flow_classify
> > + *    object.
> > + *  - application calls rte_flow_classify_query() in a polling manner,
> > + *    preferably after rte_eth_rx_burst(). This will cause the library to
> > + *    convert packet information to flow information with some
> > measurements.
> > + *  - rte_flow_classify object can be destroyed when they are no more
> > needed
> > + *    via rte_flow_classify_destroy()
> > + */
> > +
> > +#include <rte_ethdev.h>
> > +#include <rte_ether.h>
> > +#include <rte_flow.h>
> > +#include <rte_acl.h>
> > +#include <rte_table_acl.h>
> > +
> > +#ifdef __cplusplus
> > +extern "C" {
> > +#endif
> > +
> > +
> > +#define RTE_FLOW_CLASSIFY_TABLE_MAX  1
> > +
> > +/** Opaque data type for flow classifier */ struct
> > +rte_flow_classifier;
> > +
> > +/** Opaque data type for flow classify rule */ struct
> > +rte_flow_classify_rule;
> > +
> > +enum rte_flow_classify_rule_type {
> > +	RTE_FLOW_CLASSIFY_RULE_TYPE_NONE,   /**< no type */
> > +	RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE, /**< IPv4 5tuple
> > type */
> > +};
> > +
> > +enum rte_flow_classify_table_type {
> > +	RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE, /**< no type */
> > +	RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL,  /**< ACL type */ };
> > +
> > +/** Parameters for flow classifier creation */ struct
> > +rte_flow_classifier_params {
> > +	/**< flow classifier name */
> > +	const char *name;
> > +
> > +	/**< CPU socket ID where memory for the flow classifier and its */
> > +	/**< elements (tables) should be allocated */
> > +	int socket_id;
> > +
> > +	/**< Table type */
> > +	enum rte_flow_classify_table_type type;
> > +
> > +	/**< Table id */
> > +	uint32_t table_id;
> > +};
> > +
> > +struct rte_flow_classify_table_params {
> > +	/**<Table operations (specific to each table type) */
> > +	struct rte_table_ops *ops;
> > +
> > +	/**< Opaque param to be passed to the table create operation */
> > +	void *arg_create;
> > +
> > +	/**< Memory size to be reserved per classifier object entry for */
> > +	/**< storing meta data */
> > +	uint32_t table_metadata_size;
> > +};
> > +
> > +struct rte_flow_classify_ipv4_5tuple {
> > +	uint32_t dst_ip;         /**< Destination IP address in big endian. */
> > +	uint32_t dst_ip_mask;    /**< Mask of destination IP address. */
> > +	uint32_t src_ip;         /**< Source IP address in big endian. */
> > +	uint32_t src_ip_mask;    /**< Mask of destination IP address. */
> > +	uint16_t dst_port;       /**< Destination port in big endian. */
> > +	uint16_t dst_port_mask;  /**< Mask of destination port. */
> > +	uint16_t src_port;       /**< Source Port in big endian. */
> > +	uint16_t src_port_mask;  /**< Mask of source port. */
> > +	uint8_t proto;           /**< L4 protocol. */
> > +	uint8_t proto_mask;      /**< Mask of L4 protocol. */
> > +};
> > +
> > +struct rte_flow_classify_table_entry {
> > +	/**< meta-data for classify rule */
> > +	uint32_t rule_id;
> > +
> > +	/**< Start of table entry area for user defined meta data */
> > +	__extension__ uint8_t meta_data[0];
> > +};
> 
> The above structure is not used by any of the public API ?
> 
> > + * Flow stats
> > + *
> > + * For the count action, stats can be returned by the query API.
> > + *
> > + * Storage for stats is provided by application.
> > + */
> > +struct rte_flow_classify_stats {
> > +	void *stats;
> > +};
> > +
> > +struct rte_flow_classify_ipv4_5tuple_stats {
> > +	/**< count of packets that match IPv4 5tuple pattern */
> > +	uint64_t counter1;
> > +	/**< IPv4 5tuple data */
> > +	struct rte_flow_classify_ipv4_5tuple ipv4_5tuple; };
> > +
> > +/**
> > + * Flow classifier create
> > + *
> > + * @param params
> > + *   Parameters for flow classifier creation
> > + * @return
> > + *   Handle to flow classifier instance on success or NULL otherwise
> > + */
> > +struct rte_flow_classifier *rte_flow_classifier_create(
> > +		struct rte_flow_classifier_params *params);
> > +
> > +/**
> > + * Flow classifier free
> > + *
> > + * @param cls
> > + *   Handle to flow classifier instance
> > + * @return
> > + *   0 on success, error code otherwise
> > + */
> > +int rte_flow_classifier_free(struct rte_flow_classifier *cls);
> > +
> > +/**
> > + * Flow classify table create
> > + *
> > + * @param cls
> > + *   Handle to flow classifier instance
> > + * @param params
> > + *   Parameters for flow_classify table creation
> > + * @param table_id
> > + *   Table ID. Valid only within the scope of table IDs of the current
> > + *   classifier. Only returned after a successful invocation.
> > + * @return
> > + *   0 on success, error code otherwise
> > + */
> > +int rte_flow_classify_table_create(struct rte_flow_classifier *cls,
> > +	struct rte_flow_classify_table_params *params,
> > +	uint32_t *table_id);
> > +
> > +/**
> > + * Validate a flow classify rule.
> > + *
> > + * @param[in] attr
> > + *   Flow rule attributes
> > + * @param[in] pattern
> > + *   Pattern specification (list terminated by the END pattern item).
> > + * @param[in] actions
> > + *   Associated actions (list terminated by the END pattern item).
> > + * @param[out] error
> > + *   Perform verbose error reporting if not NULL. Structure
> > + *   initialised in case of error only.
> > + *
> > + * @return
> > + *   0 on success, error code otherwise.
> > + */
> > +int
> > +rte_flow_classify_validate(
> > +		const struct rte_flow_attr *attr,
> > +		const struct rte_flow_item pattern[],
> > +		const struct rte_flow_action actions[],
> > +		struct rte_flow_error *error);
> > +
> > +/**
> > + * Add a flow classify rule to the flow_classifer table.
> > + *
> > + * @param[in] cls
> > + *   Flow classifier handle
> > + * @param[in] table_id
> > + *   id of table
> > + * @param[in] attr
> > + *   Flow rule attributes
> > + * @param[in] pattern
> > + *   Pattern specification (list terminated by the END pattern item).
> > + * @param[in] actions
> > + *   Associated actions (list terminated by the END pattern item).
> > + * @param[out] error
> > + *   Perform verbose error reporting if not NULL. Structure
> > + *   initialised in case of error only.
> > + * @return
> > + *   A valid handle in case of success, NULL otherwise.
> > + */
> > +struct rte_flow_classify_rule *
> > +rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
> > +		uint32_t table_id,
> > +		const struct rte_flow_attr *attr,
> > +		const struct rte_flow_item pattern[],
> > +		const struct rte_flow_action actions[],
> > +		struct rte_flow_error *error);
> > +
> > +/**
> > + * Delete a flow classify rule from the flow_classifer table.
> > + *
> > + * @param[in] cls
> > + *   Flow classifier handle
> > + * @param[in] table_id
> > + *   id of table
> > + * @param[in] rule
> > + *   Flow classify rule
> > + * @param[out] error
> > + *   Perform verbose error reporting if not NULL. Structure
> > + *   initialised in case of error only.
> > + * @return
> > + *   0 on success, error code otherwise.
> > + */
> > +int
> > +rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
> > +		uint32_t table_id,
> > +		struct rte_flow_classify_rule *rule,
> > +		struct rte_flow_error *error);
> > +
> > +/**
> > + * Run flow classifier for given packets.
> > + *
> > + * @param[in] cls
> > + *   Flow classifier handle
> > + * @param[in] table_id
> > + *   id of table
> > + * @param[in] pkts
> > + *   Pointer to packets to process
> > + * @param[in] nb_pkts
> > + *   Number of packets to process
> > + * @param[out] error
> > + *   Perform verbose error reporting if not NULL. Structure
> > + *   initialised in case of error only.
> > + *
> > + * @return
> > + *   0 on success, error code otherwise.
> > + */
> > +
> > +int rte_flow_classifier_run(struct rte_flow_classifier *cls,
> > +		uint32_t table_id,
> > +		struct rte_mbuf **pkts,
> > +		const uint16_t nb_pkts,
> > +		struct rte_flow_error *error);
> > +
> > +/**
> > + * Query flow classifier for given rule.
> > + *
> > + * @param[in] cls
> > + *   Flow classifier handle
> > + * @param[in] rule
> > + *   Flow classify rule
> > + * @param[in] stats
> > + *   Flow classify stats
> > + * @param[out] error
> > + *   Perform verbose error reporting if not NULL. Structure
> > + *   initialised in case of error only.
> > + *
> > + * @return
> > + *   0 on success, error code otherwise.
> > + */
> > +int rte_flow_classifier_query(struct rte_flow_classifier *cls,
> > +		struct rte_flow_classify_rule *rule,
> > +		struct rte_flow_classify_stats *stats,
> > +		struct rte_flow_error *error);
> > +
> > +#ifdef __cplusplus
> > +}
> > +#endif
> > +
> > +#endif /* _RTE_FLOW_CLASSIFY_H_ */
> 
> 
> There are doxygen rendering issues in this document. Please cross check the
> header file with "make doc-api-html" output.

I will check the doxygen output.

I will send a v9 patch set with the above changes.

Regards,

Bernard.
  
Iremonger, Bernard Oct. 21, 2017, 12:07 p.m. UTC | #3
Hi Jasvinder

<snip>

> >
> > > +
> > > +struct acl_keys {
> > > +	struct rte_table_acl_rule_add_params key_add; /**< add key */
> > > +	struct rte_table_acl_rule_delete_params	key_del; /**< delete
> > > key */
> > > +};
> > > +
> > > +struct rte_flow_classify_rule {
> > > +	uint32_t id; /**< unique ID of classify rule */
> > > +	enum rte_flow_classify_rule_type rule_type; /**< classify rule
> > > +type
> > > */
> > > +	struct rte_flow_action action; /**< action when match found */
> > > +	struct rte_flow_classify_ipv4_5tuple ipv4_5tuple; /**< ipv4 5tuple */
> > > +	union {
> > > +		struct acl_keys key;
> > > +	} u;
> > > +	int key_found;   /**< rule key found in table */
> > > +	void *entry;     /**< pointer to buffer to hold rule meta data*/
> > > +	void *entry_ptr; /**< handle to the table entry for rule meta
> > > +data*/ };
> >
> > In my opnion, the  above struct should have the provision to
> > accommodate other types of rules, not only ipv4_5tuples.
> > Making this structure modular will help in extending it for other rule
> > types in the future.
> 
> I will refactor by adding struct classify_rules{}  to struct
> rte_flow_classif_rule{}:
> 
> struct classify_rules {
> 	enum rte_flow_classify_rule_type type;
> 	union {
> 		struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
> 	} u;
> };
> 
> struct rte_flow_classify_rule {
> 	uint32_t id; /* unique ID of classify rule */
> 	struct rte_flow_action action; /* action when match found */
> 	struct classify_rules rules; /* union of rules */
> 	union {
> 		struct acl_keys key;
> 	} u;
> 	int key_found;   /* rule key found in table */
> 	void *entry;     /* pointer to buffer to hold rule meta data */
> 	void *entry_ptr; /* handle to the table entry for rule meta data */ };
> 
> 
> > > +int
> > > +rte_flow_classify_validate(
> > > +		   const struct rte_flow_attr *attr,
> > > +		   const struct rte_flow_item pattern[],
> > > +		   const struct rte_flow_action actions[],
> > > +		   struct rte_flow_error *error)
> > > +{
> > > +	struct rte_flow_item *items;
> > > +	parse_filter_t parse_filter;
> > > +	uint32_t item_num = 0;
> > > +	uint32_t i = 0;
> > > +	int ret;
> > > +
> > > +	if (!error)
> > > +		return -EINVAL;
> > > +
> > > +	if (!pattern) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> > > +				   NULL, "NULL pattern.");
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	if (!actions) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> > > +				   NULL, "NULL action.");
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	if (!attr) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +				   RTE_FLOW_ERROR_TYPE_ATTR,
> > > +				   NULL, "NULL attribute.");
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	memset(&ntuple_filter, 0, sizeof(ntuple_filter));
> > > +
> > > +	/* Get the non-void item number of pattern */
> > > +	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
> > > +		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
> > > +			item_num++;
> > > +		i++;
> > > +	}
> > > +	item_num++;
> > > +
> > > +	items = malloc(item_num * sizeof(struct rte_flow_item));
> > > +	if (!items) {
> > > +		rte_flow_error_set(error, ENOMEM,
> > > RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> > > +				   NULL, "No memory for pattern items.");
> > > +		return -ENOMEM;
> > > +	}
> > > +
> > > +	memset(items, 0, item_num * sizeof(struct rte_flow_item));
> > > +	classify_pattern_skip_void_item(items, pattern);
> > > +
> > > +	parse_filter = classify_find_parse_filter_func(items);
> > > +	if (!parse_filter) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +				   RTE_FLOW_ERROR_TYPE_ITEM,
> > > +				   pattern, "Unsupported pattern");
> > > +		free(items);
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	ret = parse_filter(attr, items, actions, &ntuple_filter, error);
> > > +	free(items);
> > > +	return ret;
> > > +}
> >
> > This function mainly parses the flow pattern, actions etc and fill the
> > entries in internal ntuple_filter.It is invoked only in
> > flow_entry_add(). Is there any reason to make this function available as
> public API?
> 
> This function does not need to be a public API.
> The flow_classify API's started out mirroring the flow API's but this is no
> longer the case.
> Probably better to make it internal and it could be made public later if
> needed.
> 
> 
> > > +#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
> > > +#define uint32_t_to_char(ip, a, b, c, d) do {\
> > > +		*a = (unsigned char)(ip >> 24 & 0xff);\
> > > +		*b = (unsigned char)(ip >> 16 & 0xff);\
> > > +		*c = (unsigned char)(ip >> 8 & 0xff);\
> > > +		*d = (unsigned char)(ip & 0xff);\
> > > +	} while (0)
> > > +
> > > +static inline void
> > > +print_ipv4_key_add(struct rte_table_acl_rule_add_params *key) {
> > > +	unsigned char a, b, c, d;
> > > +
> > > +	printf("ipv4_key_add: 0x%02hhx/0x%hhx ",
> > > +		key->field_value[PROTO_FIELD_IPV4].value.u8,
> > > +		key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
> > > +
> > > +	uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
> > > +			&a, &b, &c, &d);
> > > +	printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
> > > +			key-
> > > >field_value[SRC_FIELD_IPV4].mask_range.u32);
> > > +
> > > +	uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
> > > +			&a, &b, &c, &d);
> > > +	printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
> > > +			key-
> > > >field_value[DST_FIELD_IPV4].mask_range.u32);
> > > +
> > > +	printf("%hu : 0x%x %hu : 0x%x",
> > > +		key->field_value[SRCP_FIELD_IPV4].value.u16,
> > > +		key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
> > > +		key->field_value[DSTP_FIELD_IPV4].value.u16,
> > > +		key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
> > > +
> > > +	printf(" priority: 0x%x\n", key->priority); }
> >
> > The above function is specific to printing acl table keys. How about
> > making this function little generic by passing the parameters to
> > distinguish the rule, table type, etc. and do the printing?
> >
> > Same comments for the print_ipv4_key_delete().
> >
> 
> This is debug code, could it be left as is until another table type is added?
> I will rename to include acl in the function names.
> 
> >
> > > +static inline void
> > > +print_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key)
> {
> > > +	unsigned char a, b, c, d;
> > > +
> > > +	printf("ipv4_key_del: 0x%02hhx/0x%hhx ",
> > > +		key->field_value[PROTO_FIELD_IPV4].value.u8,
> > > +		key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
> > > +
> > > +	uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
> > > +			&a, &b, &c, &d);
> > > +	printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
> > > +			key-
> > > >field_value[SRC_FIELD_IPV4].mask_range.u32);
> > > +
> > > +	uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
> > > +			&a, &b, &c, &d);
> > > +	printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
> > > +			key-
> > > >field_value[DST_FIELD_IPV4].mask_range.u32);
> > > +
> > > +	printf("%hu : 0x%x %hu : 0x%x\n",
> > > +		key->field_value[SRCP_FIELD_IPV4].value.u16,
> > > +		key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
> > > +		key->field_value[DSTP_FIELD_IPV4].value.u16,
> > > +		key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
> > > +}
> > > +#endif
> > > +
> > > +static int
> > > +rte_flow_classifier_check_params(struct rte_flow_classifier_params
> > > *params)
> > > +{
> > > +	if (params == NULL) {
> > > +		RTE_LOG(ERR, CLASSIFY,
> > > +			"%s: Incorrect value for parameter params\n",
> > > __func__);
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	/* name */
> > > +	if (params->name == NULL) {
> > > +		RTE_LOG(ERR, CLASSIFY,
> > > +			"%s: Incorrect value for parameter name\n",
> > > __func__);
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	/* socket */
> > > +	if ((params->socket_id < 0) ||
> > > +	    (params->socket_id >= RTE_MAX_NUMA_NODES)) {
> > > +		RTE_LOG(ERR, CLASSIFY,
> > > +			"%s: Incorrect value for parameter socket_id\n",
> > > +			__func__);
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	return 0;
> > > +}
> > > +
> > > +struct rte_flow_classifier *
> > > +rte_flow_classifier_create(struct rte_flow_classifier_params
> > > +*params) {
> > > +	struct rte_flow_classifier *cls;
> > > +	int ret;
> > > +
> > > +	/* Check input parameters */
> > > +	ret = rte_flow_classifier_check_params(params);
> > > +	if (ret != 0) {
> > > +		RTE_LOG(ERR, CLASSIFY,
> > > +			"%s: flow classifier params check failed (%d)\n",
> > > +			__func__, ret);
> > > +		return NULL;
> > > +	}
> > > +
> > > +	/* Allocate memory for the flow classifier */
> > > +	cls = rte_zmalloc_socket("FLOW_CLASSIFIER",
> > > +			sizeof(struct rte_flow_classifier),
> > > +			RTE_CACHE_LINE_SIZE, params->socket_id);
> > > +
> > > +	if (cls == NULL) {
> > > +		RTE_LOG(ERR, CLASSIFY,
> > > +			"%s: flow classifier memory allocation failed\n",
> > > +			__func__);
> > > +		return NULL;
> > > +	}
> > > +
> > > +	/* Save input parameters */
> > > +	snprintf(cls->name, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ, "%s",
> > > +			params->name);
> > > +	cls->socket_id = params->socket_id;
> > > +	cls->type = params->type;
> > > +
> > > +	/* Initialize flow classifier internal data structure */
> > > +	cls->num_tables = 0;
> > > +
> > > +	return cls;
> > > +}
> > > +
> > > +static void
> > > +rte_flow_classify_table_free(struct rte_table *table) {
> > > +	if (table->ops.f_free != NULL)
> > > +		table->ops.f_free(table->h_table);
> > > +
> > > +	rte_free(table->default_entry);
> > > +}
> >
> > This is internal function. There is an API for creating a table for
> > classifier instance but not for destroying the table. What if
> > application requires destroying the specific table of the classifier
> > but want to keep the classifier instance?
> 
> Yes, there should probably be an API to delete a table.
> I will add an rte_flow_classify_table_delete() API.

After further investigation, I will not add an rte_flow_table_delete() API.

The tables are stored in an array. Deleting a table will leave an empty entry in the array.
The  table_create API just adds tables to the array until the array is full, it does handle empty entries in the array.  Note, the ip_pipeline code does not have a table_delete API either.
The ret_flow_classifier_free() API frees all the tables in the classifier.

> > > +int
> > > +rte_flow_classifier_free(struct rte_flow_classifier *cls) {
> > > +	uint32_t i;
> > > +
> > > +	/* Check input parameters */
> > > +	if (cls == NULL) {
> > > +		RTE_LOG(ERR, CLASSIFY,
> > > +			"%s: rte_flow_classifier parameter is NULL\n",
> > > +			__func__);
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	/* Free tables */
> > > +	for (i = 0; i < cls->num_tables; i++) {
> > > +		struct rte_table *table = &cls->tables[i];
> > > +
> > > +		rte_flow_classify_table_free(table);
> > > +	}
> > > +
> > > +	/* Free flow classifier memory */
> > > +	rte_free(cls);
> > > +
> > > +	return 0;
> > > +}
> > > +
> > > +static int
> > > +rte_table_check_params(struct rte_flow_classifier *cls,
> > > +		struct rte_flow_classify_table_params *params,
> > > +		uint32_t *table_id)
> > > +{
> > > +	if (cls == NULL) {
> > > +		RTE_LOG(ERR, CLASSIFY,
> > > +			"%s: flow classifier parameter is NULL\n",
> > > +			__func__);
> > > +		return -EINVAL;
> > > +	}
> > > +	if (params == NULL) {
> > > +		RTE_LOG(ERR, CLASSIFY, "%s: params parameter is NULL\n",
> > > +			__func__);
> > > +		return -EINVAL;
> > > +	}
> > > +	if (table_id == NULL) {
> > > +		RTE_LOG(ERR, CLASSIFY, "%s: table_id parameter is NULL\n",
> > > +			__func__);
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	/* ops */
> > > +	if (params->ops == NULL) {
> > > +		RTE_LOG(ERR, CLASSIFY, "%s: params->ops is NULL\n",
> > > +			__func__);
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	if (params->ops->f_create == NULL) {
> > > +		RTE_LOG(ERR, CLASSIFY,
> > > +			"%s: f_create function pointer is NULL\n", __func__);
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	if (params->ops->f_lookup == NULL) {
> > > +		RTE_LOG(ERR, CLASSIFY,
> > > +			"%s: f_lookup function pointer is NULL\n",
> > > __func__);
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	/* De we have room for one more table? */
> > > +	if (cls->num_tables == RTE_FLOW_CLASSIFY_TABLE_MAX) {
> > > +		RTE_LOG(ERR, CLASSIFY,
> > > +			"%s: Incorrect value for num_tables parameter\n",
> > > +			__func__);
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	return 0;
> > > +}
> > > +
> > > +int
> > > +rte_flow_classify_table_create(struct rte_flow_classifier *cls,
> > > +	struct rte_flow_classify_table_params *params,
> > > +	uint32_t *table_id)
> > > +{
> > > +	struct rte_table *table;
> > > +	struct rte_flow_classify_table_entry *default_entry;
> > > +	void *h_table;
> > > +	uint32_t entry_size, id;
> > > +	int ret;
> > > +
> > > +	/* Check input arguments */
> > > +	ret = rte_table_check_params(cls, params, table_id);
> > > +	if (ret != 0)
> > > +		return ret;
> > > +
> > > +	id = cls->num_tables;
> > > +	table = &cls->tables[id];
> > > +
> > > +	/* Allocate space for the default table entry */
> > > +	entry_size = sizeof(struct rte_flow_classify_table_entry) +
> > > +			params->table_metadata_size;
> > > +	default_entry =
> > > +		(struct rte_flow_classify_table_entry *) rte_zmalloc_socket(
> > > +		"Flow Classify default entry", entry_size,
> > > +		RTE_CACHE_LINE_SIZE, cls->socket_id);
> > > +	if (default_entry == NULL) {
> > > +		RTE_LOG(ERR, CLASSIFY,
> > > +			"%s: Failed to allocate default entry\n", __func__);
> > > +		return -EINVAL;
> > > +	}
> >
> > what is the purpose of default_entry as I don't see its usage anywhere
> > in the library?
> 
> This came from the ip_pipeline code in earlier discussions, it is not used at
> present.
> I will remove it.
> 
> 
> > > +	/* Create the table */
> > > +	h_table = params->ops->f_create(params->arg_create, cls-
> > > >socket_id,
> > > +		entry_size);
> > > +	if (h_table == NULL) {
> > > +		rte_free(default_entry);
> > > +		RTE_LOG(ERR, CLASSIFY, "%s: Table creation failed\n",
> > > __func__);
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	/* Commit current table to the classifier */
> > > +	cls->num_tables++;
> > > +	*table_id = id;
> > > +
> > > +	/* Save input parameters */
> > > +	memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
> > > +
> > > +	table->entry_size = entry_size;
> > > +	table->default_entry = default_entry;
> > > +
> > > +	/* Initialize table internal data structure */
> > > +	table->h_table = h_table;
> > > +
> > > +	return 0;
> > > +}
> > > +
> > > +static struct rte_flow_classify_rule *
> > > +allocate_ipv4_5tuple_rule(void)
> > > +{
> > > +	struct rte_flow_classify_rule *rule;
> > > +
> > > +	rule = malloc(sizeof(struct rte_flow_classify_rule));
> > > +	if (!rule)
> > > +		return rule;
> > > +
> > > +	memset(rule, 0, sizeof(struct rte_flow_classify_rule));
> > > +	rule->id = unique_id++;
> > > +	rule->rule_type = RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE;
> > > +
> > > +	memcpy(&rule->action, classify_get_flow_action(),
> > > +	       sizeof(struct rte_flow_action));
> > > +
> > > +	/* key add values */
> > > +	rule->u.key.key_add.priority = ntuple_filter.priority;
> > > +	rule-
> > > >u.key.key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 =
> > > +			ntuple_filter.proto_mask;
> > > +	rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].value.u8 =
> > > +			ntuple_filter.proto;
> > > +	rule->ipv4_5tuple.proto = ntuple_filter.proto;
> > > +	rule->ipv4_5tuple.proto_mask = ntuple_filter.proto_mask;
> > > +
> > > +	rule->u.key.key_add.field_value[SRC_FIELD_IPV4].mask_range.u32
> > > =
> > > +			ntuple_filter.src_ip_mask;
> > > +	rule->u.key.key_add.field_value[SRC_FIELD_IPV4].value.u32 =
> > > +			ntuple_filter.src_ip;
> > > +	rule->ipv4_5tuple.src_ip_mask = ntuple_filter.src_ip_mask;
> > > +	rule->ipv4_5tuple.src_ip = ntuple_filter.src_ip;
> > > +
> > > +	rule->u.key.key_add.field_value[DST_FIELD_IPV4].mask_range.u32
> > > =
> > > +			ntuple_filter.dst_ip_mask;
> > > +	rule->u.key.key_add.field_value[DST_FIELD_IPV4].value.u32 =
> > > +			ntuple_filter.dst_ip;
> > > +	rule->ipv4_5tuple.dst_ip_mask = ntuple_filter.dst_ip_mask;
> > > +	rule->ipv4_5tuple.dst_ip = ntuple_filter.dst_ip;
> > > +
> > > +	rule-
> > > >u.key.key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
> > > +			ntuple_filter.src_port_mask;
> > > +	rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].value.u16 =
> > > +			ntuple_filter.src_port;
> > > +	rule->ipv4_5tuple.src_port_mask = ntuple_filter.src_port_mask;
> > > +	rule->ipv4_5tuple.src_port = ntuple_filter.src_port;
> > > +
> > > +	rule-
> > > >u.key.key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
> > > +			ntuple_filter.dst_port_mask;
> > > +	rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].value.u16 =
> > > +			ntuple_filter.dst_port;
> > > +	rule->ipv4_5tuple.dst_port_mask = ntuple_filter.dst_port_mask;
> > > +	rule->ipv4_5tuple.dst_port = ntuple_filter.dst_port;
> > > +
> > > +#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
> > > +	print_ipv4_key_add(&rule->u.key.key_add);
> > > +#endif
> > > +
> > > +	/* key delete values */
> > > +	memcpy(&rule->u.key.key_del.field_value[PROTO_FIELD_IPV4],
> > > +	       &rule->u.key.key_add.field_value[PROTO_FIELD_IPV4],
> > > +	       NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field));
> > > +
> > > +#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
> > > +	print_ipv4_key_delete(&rule->u.key.key_del);
> > > +#endif
> > > +	return rule;
> > > +}
> > > +
> > > +struct rte_flow_classify_rule *
> > > +rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
> > > +		uint32_t table_id,
> > > +		const struct rte_flow_attr *attr,
> > > +		const struct rte_flow_item pattern[],
> > > +		const struct rte_flow_action actions[],
> > > +		struct rte_flow_error *error)
> > > +{
> > > +	struct rte_flow_classify_rule *rule;
> > > +	struct rte_flow_classify_table_entry *table_entry;
> > > +	int ret;
> > > +
> > > +	if (!error)
> > > +		return NULL;
> > > +
> > > +	if (!cls) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > > +				NULL, "NULL classifier.");
> > > +		return NULL;
> > > +	}
> > > +
> > > +	if (table_id >= cls->num_tables) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > > +				NULL, "invalid table_id.");
> > > +		return NULL;
> > > +	}
> > > +
> > > +	if (!pattern) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +				RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> > > +				NULL, "NULL pattern.");
> > > +		return NULL;
> > > +	}
> > > +
> > > +	if (!actions) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +				RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> > > +				NULL, "NULL action.");
> > > +		return NULL;
> > > +	}
> > > +
> > > +	if (!attr) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +				RTE_FLOW_ERROR_TYPE_ATTR,
> > > +				NULL, "NULL attribute.");
> > > +		return NULL;
> > > +	}
> > > +
> > > +	/* parse attr, pattern and actions */
> > > +	ret = rte_flow_classify_validate(attr, pattern, actions, error);
> > > +	if (ret < 0)
> > > +		return NULL;
> > > +
> > > +	switch (cls->type) {
> > > +	case RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL:
> > > +		rule = allocate_ipv4_5tuple_rule();
> > > +		if (!rule)
> > > +			return NULL;
> > > +		break;
> > > +	default:
> > > +		return NULL;
> > > +	}
> > > +
> > > +	rule->entry = malloc(sizeof(struct rte_flow_classify_table_entry));
> > > +	if (!rule->entry) {
> > > +		free(rule);
> > > +		rule = NULL;
> > > +		return NULL;
> > > +	}
> > > +
> > > +	table_entry = rule->entry;
> > > +	table_entry->rule_id = rule->id;
> > > +
> > > +	ret = cls->tables[table_id].ops.f_add(
> > > +			cls->tables[table_id].h_table,
> > > +			&rule->u.key.key_add,
> > > +			rule->entry,
> > > +			&rule->key_found,
> > > +			&rule->entry_ptr);
> > > +	if (ret) {
> > > +		free(rule->entry);
> > > +		free(rule);
> > > +		rule = NULL;
> > > +		return NULL;
> > > +	}
> > > +	return rule;
> > > +}
> >
> > It is not clear if the pattern to be added already exists in the
> > table? how this information will be propagated to the application?
> 
> The key found flag will be set if the key is already present.
>  I will add a key_found parameter to the API to return the key found data.
> 
> > > +int
> > > +rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
> > > +		uint32_t table_id,
> > > +		struct rte_flow_classify_rule *rule,
> > > +		struct rte_flow_error *error)
> > > +{
> > > +	int ret = -EINVAL;
> > > +
> > > +	if (!error)
> > > +		return ret;
> > > +
> > > +	if (!cls) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > > +				NULL, "NULL classifier.");
> > > +		return ret;
> > > +	}
> > > +
> > > +	if (table_id >= cls->num_tables) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > > +				NULL, "invalid table_id.");
> > > +		return ret;
> > > +	}
> > > +
> > > +	if (!rule) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > > +				NULL, "NULL rule.");
> > > +		return ret;
> > > +	}
> > > +
> > > +	ret = cls->tables[table_id].ops.f_delete(
> > > +			cls->tables[table_id].h_table,
> > > +			&rule->u.key.key_del,
> > > +			&rule->key_found,
> > > +			&rule->entry);
> >
> > Please introduce check for f_delete, shouldn't be NULL.
> 
> I will add a check that f_delete is not NULL.
> 
> 
> > > +
> > > +int
> > > +rte_flow_classifier_run(struct rte_flow_classifier *cls,
> > > +		uint32_t table_id,
> > > +		struct rte_mbuf **pkts,
> > > +		const uint16_t nb_pkts,
> > > +		struct rte_flow_error *error)
> > > +{
> > > +	int ret = -EINVAL;
> > > +	uint64_t pkts_mask;
> > > +	uint64_t lookup_hit_mask;
> > > +
> > > +	if (!error)
> > > +		return ret;
> > > +
> > > +	if (!cls || !pkts || nb_pkts == 0) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > > +				NULL, "invalid input");
> > > +		return ret;
> > > +	}
> > > +
> > > +	if (table_id >= cls->num_tables) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > > +				NULL, "invalid table_id.");
> > > +		return ret;
> > > +	}
> > > +
> > > +	pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
> > > +	ret = cls->tables[table_id].ops.f_lookup(
> > > +			cls->tables[table_id].h_table,
> > > +			pkts, pkts_mask, &lookup_hit_mask,
> > > +			(void **)cls->entries);
> > > +
> > > +	if (!ret && lookup_hit_mask)
> > > +		cls->nb_pkts = nb_pkts;
> > > +	else
> > > +		cls->nb_pkts = 0;
> > > +
> > > +	return ret;
> > > +}
> > > +
> > > +static int
> > > +action_apply(struct rte_flow_classifier *cls,
> > > +		struct rte_flow_classify_rule *rule,
> > > +		struct rte_flow_classify_stats *stats) {
> > > +	struct rte_flow_classify_ipv4_5tuple_stats *ntuple_stats;
> > > +	uint64_t count = 0;
> > > +	int i;
> > > +	int ret = -ENODATA;
> > > +
> > > +	switch (rule->action.type) {
> > > +	case RTE_FLOW_ACTION_TYPE_COUNT:
> > > +		for (i = 0; i < cls->nb_pkts; i++) {
> > > +			if (rule->id == cls->entries[i]->rule_id)
> > > +				count++;
> > > +		}
> > > +		if (count) {
> > > +			ret = 0;
> > > +			ntuple_stats =
> > > +				(struct rte_flow_classify_ipv4_5tuple_stats
> > > *)
> > > +				stats->stats;
> > > +			ntuple_stats->counter1 = count;
> > > +			ntuple_stats->ipv4_5tuple = rule->ipv4_5tuple;
> > > +		}
> > > +		break;
> > > +	default:
> > > +		ret = -ENOTSUP;
> > > +		break;
> > > +	}
> > > +
> > > +	return ret;
> > > +}
> > > +
> > > +int
> > > +rte_flow_classifier_query(struct rte_flow_classifier *cls,
> > > +		struct rte_flow_classify_rule *rule,
> > > +		struct rte_flow_classify_stats *stats,
> > > +		struct rte_flow_error *error)
> > > +{
> > > +	int ret = -EINVAL;
> > > +
> > > +	if (!error)
> > > +		return ret;
> > > +
> > > +	if (!cls || !rule || !stats) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > > +				NULL, "invalid input");
> > > +		return ret;
> > > +	}
> > > +
> > > +	ret = action_apply(cls, rule, stats);
> > > +	return ret;
> > > +}
> >
> > The rte_flow_classify_run and rte_flow_classify_query API  should be
> > invoked consecutively in the application, true?
> 
> Yes, they should be invoked consecutively.
> I will  merge the  rte_flow_classify_run API with the  rte_flow_classify_query
> API and drop the rte_flow_classif_run API.
> 
> 
> > > diff --git a/lib/librte_flow_classify/rte_flow_classify.h
> > > b/lib/librte_flow_classify/rte_flow_classify.h
> > > new file mode 100644
> > > index 0000000..9bd6cf4
> > > --- /dev/null
> > > +++ b/lib/librte_flow_classify/rte_flow_classify.h
> > > @@ -0,0 +1,321 @@
> > > +/*-
> > > + *   BSD LICENSE
> > > + *
> > > + *   Copyright(c) 2017 Intel Corporation. All rights reserved.
> > > + *   All rights reserved.
> > > + *
> > > + *   Redistribution and use in source and binary forms, with or without
> > > + *   modification, are permitted provided that the following conditions
> > > + *   are met:
> > > + *
> > > + *     * Redistributions of source code must retain the above copyright
> > > + *       notice, this list of conditions and the following disclaimer.
> > > + *     * Redistributions in binary form must reproduce the above
> copyright
> > > + *       notice, this list of conditions and the following disclaimer in
> > > + *       the documentation and/or other materials provided with the
> > > + *       distribution.
> > > + *     * Neither the name of Intel Corporation nor the names of its
> > > + *       contributors may be used to endorse or promote products derived
> > > + *       from this software without specific prior written permission.
> > > + *
> > > + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> > > CONTRIBUTORS
> > > + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
> BUT
> > > NOT
> > > + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> > > FITNESS FOR
> > > + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> > > COPYRIGHT
> > > + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> > > INCIDENTAL,
> > > + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
> > BUT
> > > NOT
> > > + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
> > LOSS
> > > OF USE,
> > > + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
> CAUSED
> > > AND ON ANY
> > > + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
> > > TORT
> > > + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> OUT
> > OF
> > > THE USE
> > > + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> > > DAMAGE.
> > > + */
> > > +
> > > +#ifndef _RTE_FLOW_CLASSIFY_H_
> > > +#define _RTE_FLOW_CLASSIFY_H_
> > > +
> > > +/**
> > > + * @file
> > > + *
> > > + * RTE Flow Classify Library
> > > + *
> > > + * This library provides flow record information with some measured
> > > properties.
> > > + *
> > > + * Application should define the flow and measurement criteria
> > > + (action) for
> > > it.
> > > + *
> > > + * Library doesn't maintain any flow records itself, instead flow
> > > + information
> > > is
> > > + * returned to upper layer only for given packets.
> > > + *
> > > + * It is application's responsibility to call
> > > + rte_flow_classify_query()
> > > + * for group of packets, just after receiving them or before
> > > + transmitting
> > > them.
> > > + * Application should provide the flow type interested in,
> > > + measurement to
> > > apply
> > > + * to that flow in rte_flow_classify_create() API, and should
> > > + provide
> > > + * rte_flow_classify object and storage to put results in
> > > + * rte_flow_classify_query() API.
> > > + *
> > > + *  Usage:
> > > + *  - application calls rte_flow_classify_create() to create a
> > rte_flow_classify
> > > + *    object.
> > > + *  - application calls rte_flow_classify_query() in a polling manner,
> > > + *    preferably after rte_eth_rx_burst(). This will cause the library to
> > > + *    convert packet information to flow information with some
> > > measurements.
> > > + *  - rte_flow_classify object can be destroyed when they are no
> > > + more
> > > needed
> > > + *    via rte_flow_classify_destroy()
> > > + */
> > > +
> > > +#include <rte_ethdev.h>
> > > +#include <rte_ether.h>
> > > +#include <rte_flow.h>
> > > +#include <rte_acl.h>
> > > +#include <rte_table_acl.h>
> > > +
> > > +#ifdef __cplusplus
> > > +extern "C" {
> > > +#endif
> > > +
> > > +
> > > +#define RTE_FLOW_CLASSIFY_TABLE_MAX  1
> > > +
> > > +/** Opaque data type for flow classifier */ struct
> > > +rte_flow_classifier;
> > > +
> > > +/** Opaque data type for flow classify rule */ struct
> > > +rte_flow_classify_rule;
> > > +
> > > +enum rte_flow_classify_rule_type {
> > > +	RTE_FLOW_CLASSIFY_RULE_TYPE_NONE,   /**< no type */
> > > +	RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE, /**< IPv4 5tuple
> > > type */
> > > +};
> > > +
> > > +enum rte_flow_classify_table_type {
> > > +	RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE, /**< no type */
> > > +	RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL,  /**< ACL type */ };
> > > +
> > > +/** Parameters for flow classifier creation */ struct
> > > +rte_flow_classifier_params {
> > > +	/**< flow classifier name */
> > > +	const char *name;
> > > +
> > > +	/**< CPU socket ID where memory for the flow classifier and its */
> > > +	/**< elements (tables) should be allocated */
> > > +	int socket_id;
> > > +
> > > +	/**< Table type */
> > > +	enum rte_flow_classify_table_type type;
> > > +
> > > +	/**< Table id */
> > > +	uint32_t table_id;
> > > +};
> > > +
> > > +struct rte_flow_classify_table_params {
> > > +	/**<Table operations (specific to each table type) */
> > > +	struct rte_table_ops *ops;
> > > +
> > > +	/**< Opaque param to be passed to the table create operation */
> > > +	void *arg_create;
> > > +
> > > +	/**< Memory size to be reserved per classifier object entry for */
> > > +	/**< storing meta data */
> > > +	uint32_t table_metadata_size;
> > > +};
> > > +
> > > +struct rte_flow_classify_ipv4_5tuple {
> > > +	uint32_t dst_ip;         /**< Destination IP address in big endian. */
> > > +	uint32_t dst_ip_mask;    /**< Mask of destination IP address. */
> > > +	uint32_t src_ip;         /**< Source IP address in big endian. */
> > > +	uint32_t src_ip_mask;    /**< Mask of destination IP address. */
> > > +	uint16_t dst_port;       /**< Destination port in big endian. */
> > > +	uint16_t dst_port_mask;  /**< Mask of destination port. */
> > > +	uint16_t src_port;       /**< Source Port in big endian. */
> > > +	uint16_t src_port_mask;  /**< Mask of source port. */
> > > +	uint8_t proto;           /**< L4 protocol. */
> > > +	uint8_t proto_mask;      /**< Mask of L4 protocol. */
> > > +};
> > > +
> > > +struct rte_flow_classify_table_entry {
> > > +	/**< meta-data for classify rule */
> > > +	uint32_t rule_id;
> > > +
> > > +	/**< Start of table entry area for user defined meta data */
> > > +	__extension__ uint8_t meta_data[0]; };
> >
> > The above structure is not used by any of the public API ?
> >
> > > + * Flow stats
> > > + *
> > > + * For the count action, stats can be returned by the query API.
> > > + *
> > > + * Storage for stats is provided by application.
> > > + */
> > > +struct rte_flow_classify_stats {
> > > +	void *stats;
> > > +};
> > > +
> > > +struct rte_flow_classify_ipv4_5tuple_stats {
> > > +	/**< count of packets that match IPv4 5tuple pattern */
> > > +	uint64_t counter1;
> > > +	/**< IPv4 5tuple data */
> > > +	struct rte_flow_classify_ipv4_5tuple ipv4_5tuple; };
> > > +
> > > +/**
> > > + * Flow classifier create
> > > + *
> > > + * @param params
> > > + *   Parameters for flow classifier creation
> > > + * @return
> > > + *   Handle to flow classifier instance on success or NULL otherwise
> > > + */
> > > +struct rte_flow_classifier *rte_flow_classifier_create(
> > > +		struct rte_flow_classifier_params *params);
> > > +
> > > +/**
> > > + * Flow classifier free
> > > + *
> > > + * @param cls
> > > + *   Handle to flow classifier instance
> > > + * @return
> > > + *   0 on success, error code otherwise
> > > + */
> > > +int rte_flow_classifier_free(struct rte_flow_classifier *cls);
> > > +
> > > +/**
> > > + * Flow classify table create
> > > + *
> > > + * @param cls
> > > + *   Handle to flow classifier instance
> > > + * @param params
> > > + *   Parameters for flow_classify table creation
> > > + * @param table_id
> > > + *   Table ID. Valid only within the scope of table IDs of the current
> > > + *   classifier. Only returned after a successful invocation.
> > > + * @return
> > > + *   0 on success, error code otherwise
> > > + */
> > > +int rte_flow_classify_table_create(struct rte_flow_classifier *cls,
> > > +	struct rte_flow_classify_table_params *params,
> > > +	uint32_t *table_id);
> > > +
> > > +/**
> > > + * Validate a flow classify rule.
> > > + *
> > > + * @param[in] attr
> > > + *   Flow rule attributes
> > > + * @param[in] pattern
> > > + *   Pattern specification (list terminated by the END pattern item).
> > > + * @param[in] actions
> > > + *   Associated actions (list terminated by the END pattern item).
> > > + * @param[out] error
> > > + *   Perform verbose error reporting if not NULL. Structure
> > > + *   initialised in case of error only.
> > > + *
> > > + * @return
> > > + *   0 on success, error code otherwise.
> > > + */
> > > +int
> > > +rte_flow_classify_validate(
> > > +		const struct rte_flow_attr *attr,
> > > +		const struct rte_flow_item pattern[],
> > > +		const struct rte_flow_action actions[],
> > > +		struct rte_flow_error *error);
> > > +
> > > +/**
> > > + * Add a flow classify rule to the flow_classifer table.
> > > + *
> > > + * @param[in] cls
> > > + *   Flow classifier handle
> > > + * @param[in] table_id
> > > + *   id of table
> > > + * @param[in] attr
> > > + *   Flow rule attributes
> > > + * @param[in] pattern
> > > + *   Pattern specification (list terminated by the END pattern item).
> > > + * @param[in] actions
> > > + *   Associated actions (list terminated by the END pattern item).
> > > + * @param[out] error
> > > + *   Perform verbose error reporting if not NULL. Structure
> > > + *   initialised in case of error only.
> > > + * @return
> > > + *   A valid handle in case of success, NULL otherwise.
> > > + */
> > > +struct rte_flow_classify_rule *
> > > +rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
> > > +		uint32_t table_id,
> > > +		const struct rte_flow_attr *attr,
> > > +		const struct rte_flow_item pattern[],
> > > +		const struct rte_flow_action actions[],
> > > +		struct rte_flow_error *error);
> > > +
> > > +/**
> > > + * Delete a flow classify rule from the flow_classifer table.
> > > + *
> > > + * @param[in] cls
> > > + *   Flow classifier handle
> > > + * @param[in] table_id
> > > + *   id of table
> > > + * @param[in] rule
> > > + *   Flow classify rule
> > > + * @param[out] error
> > > + *   Perform verbose error reporting if not NULL. Structure
> > > + *   initialised in case of error only.
> > > + * @return
> > > + *   0 on success, error code otherwise.
> > > + */
> > > +int
> > > +rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
> > > +		uint32_t table_id,
> > > +		struct rte_flow_classify_rule *rule,
> > > +		struct rte_flow_error *error);
> > > +
> > > +/**
> > > + * Run flow classifier for given packets.
> > > + *
> > > + * @param[in] cls
> > > + *   Flow classifier handle
> > > + * @param[in] table_id
> > > + *   id of table
> > > + * @param[in] pkts
> > > + *   Pointer to packets to process
> > > + * @param[in] nb_pkts
> > > + *   Number of packets to process
> > > + * @param[out] error
> > > + *   Perform verbose error reporting if not NULL. Structure
> > > + *   initialised in case of error only.
> > > + *
> > > + * @return
> > > + *   0 on success, error code otherwise.
> > > + */
> > > +
> > > +int rte_flow_classifier_run(struct rte_flow_classifier *cls,
> > > +		uint32_t table_id,
> > > +		struct rte_mbuf **pkts,
> > > +		const uint16_t nb_pkts,
> > > +		struct rte_flow_error *error);
> > > +
> > > +/**
> > > + * Query flow classifier for given rule.
> > > + *
> > > + * @param[in] cls
> > > + *   Flow classifier handle
> > > + * @param[in] rule
> > > + *   Flow classify rule
> > > + * @param[in] stats
> > > + *   Flow classify stats
> > > + * @param[out] error
> > > + *   Perform verbose error reporting if not NULL. Structure
> > > + *   initialised in case of error only.
> > > + *
> > > + * @return
> > > + *   0 on success, error code otherwise.
> > > + */
> > > +int rte_flow_classifier_query(struct rte_flow_classifier *cls,
> > > +		struct rte_flow_classify_rule *rule,
> > > +		struct rte_flow_classify_stats *stats,
> > > +		struct rte_flow_error *error);
> > > +
> > > +#ifdef __cplusplus
> > > +}
> > > +#endif
> > > +
> > > +#endif /* _RTE_FLOW_CLASSIFY_H_ */
> >
> >
> > There are doxygen rendering issues in this document. Please cross
> > check the header file with "make doc-api-html" output.
> 
> I will check the doxygen output.
> 
> I will send a v9 patch set with the above changes.
> 
 Regards,
 
 Bernard.
  

Patch

diff --git a/MAINTAINERS b/MAINTAINERS
index 2a58378..0981793 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -735,6 +735,13 @@  F: doc/guides/prog_guide/pdump_lib.rst
 F: app/pdump/
 F: doc/guides/tools/pdump.rst
 
+Flow classify
+M: Bernard Iremonger <bernard.iremonger@intel.com>
+F: lib/librte_flow_classify/
+F: test/test/test_flow_classify*
+F: examples/flow_classify/
+F: doc/guides/sample_app_ug/flow_classify.rst
+F: doc/guides/prog_guide/flow_classify_lib.rst
 
 Packet Framework
 ----------------
diff --git a/config/common_base b/config/common_base
index d9471e8..e1079aa 100644
--- a/config/common_base
+++ b/config/common_base
@@ -707,6 +707,12 @@  CONFIG_RTE_LIBRTE_GSO=y
 CONFIG_RTE_LIBRTE_METER=y
 
 #
+# Compile librte_classify
+#
+CONFIG_RTE_LIBRTE_FLOW_CLASSIFY=y
+CONFIG_RTE_LIBRTE_CLASSIFY_DEBUG=n
+
+#
 # Compile librte_sched
 #
 CONFIG_RTE_LIBRTE_SCHED=y
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 990815f..e4468d0 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -111,6 +111,7 @@  The public API headers are grouped by topics:
   [ACL]                (@ref rte_acl.h),
   [EFD]                (@ref rte_efd.h),
   [member]             (@ref rte_member.h)
+  [flow_classify]      (@ref rte_flow_classify.h),
 
 - **QoS**:
   [metering]           (@ref rte_meter.h),
diff --git a/doc/api/doxy-api.conf b/doc/api/doxy-api.conf
index 9e9fa56..9edb6fd 100644
--- a/doc/api/doxy-api.conf
+++ b/doc/api/doxy-api.conf
@@ -48,6 +48,7 @@  INPUT                   = doc/api/doxy-api-index.md \
                           lib/librte_efd \
                           lib/librte_ether \
                           lib/librte_eventdev \
+                          lib/librte_flow_classify \
                           lib/librte_gro \
                           lib/librte_gso \
                           lib/librte_hash \
diff --git a/lib/Makefile b/lib/Makefile
index 86d475f..aba2593 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -83,6 +83,9 @@  DIRS-$(CONFIG_RTE_LIBRTE_POWER) += librte_power
 DEPDIRS-librte_power := librte_eal
 DIRS-$(CONFIG_RTE_LIBRTE_METER) += librte_meter
 DEPDIRS-librte_meter := librte_eal
+DIRS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += librte_flow_classify
+DEPDIRS-librte_flow_classify := librte_eal librte_ether librte_net
+DEPDIRS-librte_flow_classify += librte_table librte_acl librte_port
 DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += librte_sched
 DEPDIRS-librte_sched := librte_eal librte_mempool librte_mbuf librte_net
 DEPDIRS-librte_sched += librte_timer
diff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h
index 2fa1199..67209ae 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -88,6 +88,7 @@  struct rte_logs {
 #define RTE_LOGTYPE_EFD       18 /**< Log related to EFD. */
 #define RTE_LOGTYPE_EVENTDEV  19 /**< Log related to eventdev. */
 #define RTE_LOGTYPE_GSO       20 /**< Log related to GSO. */
+#define RTE_LOGTYPE_CLASSIFY  21 /**< Log related to flow classify. */
 
 /* these log types can be used in an application */
 #define RTE_LOGTYPE_USER1     24 /**< User-defined log type 1. */
diff --git a/lib/librte_flow_classify/Makefile b/lib/librte_flow_classify/Makefile
new file mode 100644
index 0000000..7863a0c
--- /dev/null
+++ b/lib/librte_flow_classify/Makefile
@@ -0,0 +1,51 @@ 
+#   BSD LICENSE
+#
+#   Copyright(c) 2017 Intel Corporation. All rights reserved.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_flow_classify.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+EXPORT_MAP := rte_flow_classify_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += rte_flow_classify.c
+SRCS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += rte_flow_classify_parse.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY)-include := rte_flow_classify.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_flow_classify/rte_flow_classify.c b/lib/librte_flow_classify/rte_flow_classify.c
new file mode 100644
index 0000000..22082c4
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify.c
@@ -0,0 +1,735 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_flow_classify.h>
+#include "rte_flow_classify_parse.h"
+#include <rte_flow_driver.h>
+#include <rte_table_acl.h>
+#include <stdbool.h>
+
+static struct rte_eth_ntuple_filter ntuple_filter;
+static uint32_t unique_id = 1;
+
+struct rte_table {
+	/* Input parameters */
+	struct rte_table_ops ops;
+	struct rte_flow_classify_table_entry *default_entry;
+	uint32_t entry_size;
+	enum rte_flow_classify_table_type type;
+
+	/* Handle to the low-level table object */
+	void *h_table;
+};
+
+#define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256
+
+struct rte_flow_classifier {
+	/* Input parameters */
+	char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ];
+	int socket_id;
+	enum rte_flow_classify_table_type type;
+
+	/* Internal tables */
+	struct rte_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX];
+	uint32_t num_tables;
+	uint16_t nb_pkts;
+	struct rte_flow_classify_table_entry
+		*entries[RTE_PORT_IN_BURST_SIZE_MAX];
+} __rte_cache_aligned;
+
+enum {
+	PROTO_FIELD_IPV4,
+	SRC_FIELD_IPV4,
+	DST_FIELD_IPV4,
+	SRCP_FIELD_IPV4,
+	DSTP_FIELD_IPV4,
+	NUM_FIELDS_IPV4
+};
+
+struct acl_keys {
+	struct rte_table_acl_rule_add_params key_add; /**< add key */
+	struct rte_table_acl_rule_delete_params	key_del; /**< delete key */
+};
+
+struct rte_flow_classify_rule {
+	uint32_t id; /**< unique ID of classify rule */
+	enum rte_flow_classify_rule_type rule_type; /**< classify rule type */
+	struct rte_flow_action action; /**< action when match found */
+	struct rte_flow_classify_ipv4_5tuple ipv4_5tuple; /**< ipv4 5tuple */
+	union {
+		struct acl_keys key;
+	} u;
+	int key_found;   /**< rule key found in table */
+	void *entry;     /**< pointer to buffer to hold rule meta data*/
+	void *entry_ptr; /**< handle to the table entry for rule meta data*/
+};
+
+int
+rte_flow_classify_validate(
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	struct rte_flow_item *items;
+	parse_filter_t parse_filter;
+	uint32_t item_num = 0;
+	uint32_t i = 0;
+	int ret;
+
+	if (!error)
+		return -EINVAL;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -EINVAL;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -EINVAL;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -EINVAL;
+	}
+
+	memset(&ntuple_filter, 0, sizeof(ntuple_filter));
+
+	/* Get the non-void item number of pattern */
+	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+			item_num++;
+		i++;
+	}
+	item_num++;
+
+	items = malloc(item_num * sizeof(struct rte_flow_item));
+	if (!items) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "No memory for pattern items.");
+		return -ENOMEM;
+	}
+
+	memset(items, 0, item_num * sizeof(struct rte_flow_item));
+	classify_pattern_skip_void_item(items, pattern);
+
+	parse_filter = classify_find_parse_filter_func(items);
+	if (!parse_filter) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM,
+				   pattern, "Unsupported pattern");
+		free(items);
+		return -EINVAL;
+	}
+
+	ret = parse_filter(attr, items, actions, &ntuple_filter, error);
+	free(items);
+	return ret;
+}
+
+#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
+#define uint32_t_to_char(ip, a, b, c, d) do {\
+		*a = (unsigned char)(ip >> 24 & 0xff);\
+		*b = (unsigned char)(ip >> 16 & 0xff);\
+		*c = (unsigned char)(ip >> 8 & 0xff);\
+		*d = (unsigned char)(ip & 0xff);\
+	} while (0)
+
+static inline void
+print_ipv4_key_add(struct rte_table_acl_rule_add_params *key)
+{
+	unsigned char a, b, c, d;
+
+	printf("ipv4_key_add: 0x%02hhx/0x%hhx ",
+		key->field_value[PROTO_FIELD_IPV4].value.u8,
+		key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
+
+	uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
+			&a, &b, &c, &d);
+	printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+			key->field_value[SRC_FIELD_IPV4].mask_range.u32);
+
+	uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
+			&a, &b, &c, &d);
+	printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+			key->field_value[DST_FIELD_IPV4].mask_range.u32);
+
+	printf("%hu : 0x%x %hu : 0x%x",
+		key->field_value[SRCP_FIELD_IPV4].value.u16,
+		key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
+		key->field_value[DSTP_FIELD_IPV4].value.u16,
+		key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
+
+	printf(" priority: 0x%x\n", key->priority);
+}
+
+static inline void
+print_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key)
+{
+	unsigned char a, b, c, d;
+
+	printf("ipv4_key_del: 0x%02hhx/0x%hhx ",
+		key->field_value[PROTO_FIELD_IPV4].value.u8,
+		key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
+
+	uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
+			&a, &b, &c, &d);
+	printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+			key->field_value[SRC_FIELD_IPV4].mask_range.u32);
+
+	uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
+			&a, &b, &c, &d);
+	printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+			key->field_value[DST_FIELD_IPV4].mask_range.u32);
+
+	printf("%hu : 0x%x %hu : 0x%x\n",
+		key->field_value[SRCP_FIELD_IPV4].value.u16,
+		key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
+		key->field_value[DSTP_FIELD_IPV4].value.u16,
+		key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
+}
+#endif
+
+static int
+rte_flow_classifier_check_params(struct rte_flow_classifier_params *params)
+{
+	if (params == NULL) {
+		RTE_LOG(ERR, CLASSIFY,
+			"%s: Incorrect value for parameter params\n", __func__);
+		return -EINVAL;
+	}
+
+	/* name */
+	if (params->name == NULL) {
+		RTE_LOG(ERR, CLASSIFY,
+			"%s: Incorrect value for parameter name\n", __func__);
+		return -EINVAL;
+	}
+
+	/* socket */
+	if ((params->socket_id < 0) ||
+	    (params->socket_id >= RTE_MAX_NUMA_NODES)) {
+		RTE_LOG(ERR, CLASSIFY,
+			"%s: Incorrect value for parameter socket_id\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+struct rte_flow_classifier *
+rte_flow_classifier_create(struct rte_flow_classifier_params *params)
+{
+	struct rte_flow_classifier *cls;
+	int ret;
+
+	/* Check input parameters */
+	ret = rte_flow_classifier_check_params(params);
+	if (ret != 0) {
+		RTE_LOG(ERR, CLASSIFY,
+			"%s: flow classifier params check failed (%d)\n",
+			__func__, ret);
+		return NULL;
+	}
+
+	/* Allocate memory for the flow classifier */
+	cls = rte_zmalloc_socket("FLOW_CLASSIFIER",
+			sizeof(struct rte_flow_classifier),
+			RTE_CACHE_LINE_SIZE, params->socket_id);
+
+	if (cls == NULL) {
+		RTE_LOG(ERR, CLASSIFY,
+			"%s: flow classifier memory allocation failed\n",
+			__func__);
+		return NULL;
+	}
+
+	/* Save input parameters */
+	snprintf(cls->name, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ, "%s",
+			params->name);
+	cls->socket_id = params->socket_id;
+	cls->type = params->type;
+
+	/* Initialize flow classifier internal data structure */
+	cls->num_tables = 0;
+
+	return cls;
+}
+
+static void
+rte_flow_classify_table_free(struct rte_table *table)
+{
+	if (table->ops.f_free != NULL)
+		table->ops.f_free(table->h_table);
+
+	rte_free(table->default_entry);
+}
+
+int
+rte_flow_classifier_free(struct rte_flow_classifier *cls)
+{
+	uint32_t i;
+
+	/* Check input parameters */
+	if (cls == NULL) {
+		RTE_LOG(ERR, CLASSIFY,
+			"%s: rte_flow_classifier parameter is NULL\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	/* Free tables */
+	for (i = 0; i < cls->num_tables; i++) {
+		struct rte_table *table = &cls->tables[i];
+
+		rte_flow_classify_table_free(table);
+	}
+
+	/* Free flow classifier memory */
+	rte_free(cls);
+
+	return 0;
+}
+
+static int
+rte_table_check_params(struct rte_flow_classifier *cls,
+		struct rte_flow_classify_table_params *params,
+		uint32_t *table_id)
+{
+	if (cls == NULL) {
+		RTE_LOG(ERR, CLASSIFY,
+			"%s: flow classifier parameter is NULL\n",
+			__func__);
+		return -EINVAL;
+	}
+	if (params == NULL) {
+		RTE_LOG(ERR, CLASSIFY, "%s: params parameter is NULL\n",
+			__func__);
+		return -EINVAL;
+	}
+	if (table_id == NULL) {
+		RTE_LOG(ERR, CLASSIFY, "%s: table_id parameter is NULL\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	/* ops */
+	if (params->ops == NULL) {
+		RTE_LOG(ERR, CLASSIFY, "%s: params->ops is NULL\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (params->ops->f_create == NULL) {
+		RTE_LOG(ERR, CLASSIFY,
+			"%s: f_create function pointer is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (params->ops->f_lookup == NULL) {
+		RTE_LOG(ERR, CLASSIFY,
+			"%s: f_lookup function pointer is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	/* De we have room for one more table? */
+	if (cls->num_tables == RTE_FLOW_CLASSIFY_TABLE_MAX) {
+		RTE_LOG(ERR, CLASSIFY,
+			"%s: Incorrect value for num_tables parameter\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int
+rte_flow_classify_table_create(struct rte_flow_classifier *cls,
+	struct rte_flow_classify_table_params *params,
+	uint32_t *table_id)
+{
+	struct rte_table *table;
+	struct rte_flow_classify_table_entry *default_entry;
+	void *h_table;
+	uint32_t entry_size, id;
+	int ret;
+
+	/* Check input arguments */
+	ret = rte_table_check_params(cls, params, table_id);
+	if (ret != 0)
+		return ret;
+
+	id = cls->num_tables;
+	table = &cls->tables[id];
+
+	/* Allocate space for the default table entry */
+	entry_size = sizeof(struct rte_flow_classify_table_entry) +
+			params->table_metadata_size;
+	default_entry =
+		(struct rte_flow_classify_table_entry *) rte_zmalloc_socket(
+		"Flow Classify default entry", entry_size,
+		RTE_CACHE_LINE_SIZE, cls->socket_id);
+	if (default_entry == NULL) {
+		RTE_LOG(ERR, CLASSIFY,
+			"%s: Failed to allocate default entry\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Create the table */
+	h_table = params->ops->f_create(params->arg_create, cls->socket_id,
+		entry_size);
+	if (h_table == NULL) {
+		rte_free(default_entry);
+		RTE_LOG(ERR, CLASSIFY, "%s: Table creation failed\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Commit current table to the classifier */
+	cls->num_tables++;
+	*table_id = id;
+
+	/* Save input parameters */
+	memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
+
+	table->entry_size = entry_size;
+	table->default_entry = default_entry;
+
+	/* Initialize table internal data structure */
+	table->h_table = h_table;
+
+	return 0;
+}
+
+static struct rte_flow_classify_rule *
+allocate_ipv4_5tuple_rule(void)
+{
+	struct rte_flow_classify_rule *rule;
+
+	rule = malloc(sizeof(struct rte_flow_classify_rule));
+	if (!rule)
+		return rule;
+
+	memset(rule, 0, sizeof(struct rte_flow_classify_rule));
+	rule->id = unique_id++;
+	rule->rule_type = RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE;
+
+	memcpy(&rule->action, classify_get_flow_action(),
+	       sizeof(struct rte_flow_action));
+
+	/* key add values */
+	rule->u.key.key_add.priority = ntuple_filter.priority;
+	rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 =
+			ntuple_filter.proto_mask;
+	rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].value.u8 =
+			ntuple_filter.proto;
+	rule->ipv4_5tuple.proto = ntuple_filter.proto;
+	rule->ipv4_5tuple.proto_mask = ntuple_filter.proto_mask;
+
+	rule->u.key.key_add.field_value[SRC_FIELD_IPV4].mask_range.u32 =
+			ntuple_filter.src_ip_mask;
+	rule->u.key.key_add.field_value[SRC_FIELD_IPV4].value.u32 =
+			ntuple_filter.src_ip;
+	rule->ipv4_5tuple.src_ip_mask = ntuple_filter.src_ip_mask;
+	rule->ipv4_5tuple.src_ip = ntuple_filter.src_ip;
+
+	rule->u.key.key_add.field_value[DST_FIELD_IPV4].mask_range.u32 =
+			ntuple_filter.dst_ip_mask;
+	rule->u.key.key_add.field_value[DST_FIELD_IPV4].value.u32 =
+			ntuple_filter.dst_ip;
+	rule->ipv4_5tuple.dst_ip_mask = ntuple_filter.dst_ip_mask;
+	rule->ipv4_5tuple.dst_ip = ntuple_filter.dst_ip;
+
+	rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
+			ntuple_filter.src_port_mask;
+	rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].value.u16 =
+			ntuple_filter.src_port;
+	rule->ipv4_5tuple.src_port_mask = ntuple_filter.src_port_mask;
+	rule->ipv4_5tuple.src_port = ntuple_filter.src_port;
+
+	rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
+			ntuple_filter.dst_port_mask;
+	rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].value.u16 =
+			ntuple_filter.dst_port;
+	rule->ipv4_5tuple.dst_port_mask = ntuple_filter.dst_port_mask;
+	rule->ipv4_5tuple.dst_port = ntuple_filter.dst_port;
+
+#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
+	print_ipv4_key_add(&rule->u.key.key_add);
+#endif
+
+	/* key delete values */
+	memcpy(&rule->u.key.key_del.field_value[PROTO_FIELD_IPV4],
+	       &rule->u.key.key_add.field_value[PROTO_FIELD_IPV4],
+	       NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field));
+
+#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
+	print_ipv4_key_delete(&rule->u.key.key_del);
+#endif
+	return rule;
+}
+
+struct rte_flow_classify_rule *
+rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
+		uint32_t table_id,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error)
+{
+	struct rte_flow_classify_rule *rule;
+	struct rte_flow_classify_table_entry *table_entry;
+	int ret;
+
+	if (!error)
+		return NULL;
+
+	if (!cls) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				NULL, "NULL classifier.");
+		return NULL;
+	}
+
+	if (table_id >= cls->num_tables) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				NULL, "invalid table_id.");
+		return NULL;
+	}
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				NULL, "NULL pattern.");
+		return NULL;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				NULL, "NULL action.");
+		return NULL;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ATTR,
+				NULL, "NULL attribute.");
+		return NULL;
+	}
+
+	/* parse attr, pattern and actions */
+	ret = rte_flow_classify_validate(attr, pattern, actions, error);
+	if (ret < 0)
+		return NULL;
+
+	switch (cls->type) {
+	case RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL:
+		rule = allocate_ipv4_5tuple_rule();
+		if (!rule)
+			return NULL;
+		break;
+	default:
+		return NULL;
+	}
+
+	rule->entry = malloc(sizeof(struct rte_flow_classify_table_entry));
+	if (!rule->entry) {
+		free(rule);
+		rule = NULL;
+		return NULL;
+	}
+
+	table_entry = rule->entry;
+	table_entry->rule_id = rule->id;
+
+	ret = cls->tables[table_id].ops.f_add(
+			cls->tables[table_id].h_table,
+			&rule->u.key.key_add,
+			rule->entry,
+			&rule->key_found,
+			&rule->entry_ptr);
+	if (ret) {
+		free(rule->entry);
+		free(rule);
+		rule = NULL;
+		return NULL;
+	}
+	return rule;
+}
+
+int
+rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
+		uint32_t table_id,
+		struct rte_flow_classify_rule *rule,
+		struct rte_flow_error *error)
+{
+	int ret = -EINVAL;
+
+	if (!error)
+		return ret;
+
+	if (!cls) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				NULL, "NULL classifier.");
+		return ret;
+	}
+
+	if (table_id >= cls->num_tables) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				NULL, "invalid table_id.");
+		return ret;
+	}
+
+	if (!rule) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				NULL, "NULL rule.");
+		return ret;
+	}
+
+	ret = cls->tables[table_id].ops.f_delete(
+			cls->tables[table_id].h_table,
+			&rule->u.key.key_del,
+			&rule->key_found,
+			&rule->entry);
+
+	return ret;
+}
+
+int
+rte_flow_classifier_run(struct rte_flow_classifier *cls,
+		uint32_t table_id,
+		struct rte_mbuf **pkts,
+		const uint16_t nb_pkts,
+		struct rte_flow_error *error)
+{
+	int ret = -EINVAL;
+	uint64_t pkts_mask;
+	uint64_t lookup_hit_mask;
+
+	if (!error)
+		return ret;
+
+	if (!cls || !pkts || nb_pkts == 0) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				NULL, "invalid input");
+		return ret;
+	}
+
+	if (table_id >= cls->num_tables) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				NULL, "invalid table_id.");
+		return ret;
+	}
+
+	pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
+	ret = cls->tables[table_id].ops.f_lookup(
+			cls->tables[table_id].h_table,
+			pkts, pkts_mask, &lookup_hit_mask,
+			(void **)cls->entries);
+
+	if (!ret && lookup_hit_mask)
+		cls->nb_pkts = nb_pkts;
+	else
+		cls->nb_pkts = 0;
+
+	return ret;
+}
+
+static int
+action_apply(struct rte_flow_classifier *cls,
+		struct rte_flow_classify_rule *rule,
+		struct rte_flow_classify_stats *stats)
+{
+	struct rte_flow_classify_ipv4_5tuple_stats *ntuple_stats;
+	uint64_t count = 0;
+	int i;
+	int ret = -ENODATA;
+
+	switch (rule->action.type) {
+	case RTE_FLOW_ACTION_TYPE_COUNT:
+		for (i = 0; i < cls->nb_pkts; i++) {
+			if (rule->id == cls->entries[i]->rule_id)
+				count++;
+		}
+		if (count) {
+			ret = 0;
+			ntuple_stats =
+				(struct rte_flow_classify_ipv4_5tuple_stats *)
+				stats->stats;
+			ntuple_stats->counter1 = count;
+			ntuple_stats->ipv4_5tuple = rule->ipv4_5tuple;
+		}
+		break;
+	default:
+		ret = -ENOTSUP;
+		break;
+	}
+
+	return ret;
+}
+
+int
+rte_flow_classifier_query(struct rte_flow_classifier *cls,
+		struct rte_flow_classify_rule *rule,
+		struct rte_flow_classify_stats *stats,
+		struct rte_flow_error *error)
+{
+	int ret = -EINVAL;
+
+	if (!error)
+		return ret;
+
+	if (!cls || !rule || !stats) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				NULL, "invalid input");
+		return ret;
+	}
+
+	ret = action_apply(cls, rule, stats);
+	return ret;
+}
diff --git a/lib/librte_flow_classify/rte_flow_classify.h b/lib/librte_flow_classify/rte_flow_classify.h
new file mode 100644
index 0000000..9bd6cf4
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify.h
@@ -0,0 +1,321 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_FLOW_CLASSIFY_H_
+#define _RTE_FLOW_CLASSIFY_H_
+
+/**
+ * @file
+ *
+ * RTE Flow Classify Library
+ *
+ * This library provides flow record information with some measured properties.
+ *
+ * Application should define the flow and measurement criteria (action) for it.
+ *
+ * Library doesn't maintain any flow records itself, instead flow information is
+ * returned to upper layer only for given packets.
+ *
+ * It is application's responsibility to call rte_flow_classify_query()
+ * for group of packets, just after receiving them or before transmitting them.
+ * Application should provide the flow type interested in, measurement to apply
+ * to that flow in rte_flow_classify_create() API, and should provide
+ * rte_flow_classify object and storage to put results in
+ * rte_flow_classify_query() API.
+ *
+ *  Usage:
+ *  - application calls rte_flow_classify_create() to create a rte_flow_classify
+ *    object.
+ *  - application calls rte_flow_classify_query() in a polling manner,
+ *    preferably after rte_eth_rx_burst(). This will cause the library to
+ *    convert packet information to flow information with some measurements.
+ *  - rte_flow_classify object can be destroyed when they are no more needed
+ *    via rte_flow_classify_destroy()
+ */
+
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <rte_acl.h>
+#include <rte_table_acl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define RTE_FLOW_CLASSIFY_TABLE_MAX  1
+
+/** Opaque data type for flow classifier */
+struct rte_flow_classifier;
+
+/** Opaque data type for flow classify rule */
+struct rte_flow_classify_rule;
+
+enum rte_flow_classify_rule_type {
+	RTE_FLOW_CLASSIFY_RULE_TYPE_NONE,   /**< no type */
+	RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE, /**< IPv4 5tuple type */
+};
+
+enum rte_flow_classify_table_type {
+	RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE, /**< no type */
+	RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL,  /**< ACL type */
+};
+
+/** Parameters for flow classifier creation */
+struct rte_flow_classifier_params {
+	/**< flow classifier name */
+	const char *name;
+
+	/**< CPU socket ID where memory for the flow classifier and its */
+	/**< elements (tables) should be allocated */
+	int socket_id;
+
+	/**< Table type */
+	enum rte_flow_classify_table_type type;
+
+	/**< Table id */
+	uint32_t table_id;
+};
+
+struct rte_flow_classify_table_params {
+	/**<Table operations (specific to each table type) */
+	struct rte_table_ops *ops;
+
+	/**< Opaque param to be passed to the table create operation */
+	void *arg_create;
+
+	/**< Memory size to be reserved per classifier object entry for */
+	/**< storing meta data */
+	uint32_t table_metadata_size;
+};
+
+struct rte_flow_classify_ipv4_5tuple {
+	uint32_t dst_ip;         /**< Destination IP address in big endian. */
+	uint32_t dst_ip_mask;    /**< Mask of destination IP address. */
+	uint32_t src_ip;         /**< Source IP address in big endian. */
+	uint32_t src_ip_mask;    /**< Mask of destination IP address. */
+	uint16_t dst_port;       /**< Destination port in big endian. */
+	uint16_t dst_port_mask;  /**< Mask of destination port. */
+	uint16_t src_port;       /**< Source Port in big endian. */
+	uint16_t src_port_mask;  /**< Mask of source port. */
+	uint8_t proto;           /**< L4 protocol. */
+	uint8_t proto_mask;      /**< Mask of L4 protocol. */
+};
+
+struct rte_flow_classify_table_entry {
+	/**< meta-data for classify rule */
+	uint32_t rule_id;
+
+	/**< Start of table entry area for user defined meta data */
+	__extension__ uint8_t meta_data[0];
+};
+
+/**
+ * Flow stats
+ *
+ * For the count action, stats can be returned by the query API.
+ *
+ * Storage for stats is provided by application.
+ */
+struct rte_flow_classify_stats {
+	void *stats;
+};
+
+struct rte_flow_classify_ipv4_5tuple_stats {
+	/**< count of packets that match IPv4 5tuple pattern */
+	uint64_t counter1;
+	/**< IPv4 5tuple data */
+	struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
+};
+
+/**
+ * Flow classifier create
+ *
+ * @param params
+ *   Parameters for flow classifier creation
+ * @return
+ *   Handle to flow classifier instance on success or NULL otherwise
+ */
+struct rte_flow_classifier *rte_flow_classifier_create(
+		struct rte_flow_classifier_params *params);
+
+/**
+ * Flow classifier free
+ *
+ * @param cls
+ *   Handle to flow classifier instance
+ * @return
+ *   0 on success, error code otherwise
+ */
+int rte_flow_classifier_free(struct rte_flow_classifier *cls);
+
+/**
+ * Flow classify table create
+ *
+ * @param cls
+ *   Handle to flow classifier instance
+ * @param params
+ *   Parameters for flow_classify table creation
+ * @param table_id
+ *   Table ID. Valid only within the scope of table IDs of the current
+ *   classifier. Only returned after a successful invocation.
+ * @return
+ *   0 on success, error code otherwise
+ */
+int rte_flow_classify_table_create(struct rte_flow_classifier *cls,
+	struct rte_flow_classify_table_params *params,
+	uint32_t *table_id);
+
+/**
+ * Validate a flow classify rule.
+ *
+ * @param[in] attr
+ *   Flow rule attributes
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END pattern item).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. Structure
+ *   initialised in case of error only.
+ *
+ * @return
+ *   0 on success, error code otherwise.
+ */
+int
+rte_flow_classify_validate(
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+
+/**
+ * Add a flow classify rule to the flow_classifer table.
+ *
+ * @param[in] cls
+ *   Flow classifier handle
+ * @param[in] table_id
+ *   id of table
+ * @param[in] attr
+ *   Flow rule attributes
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END pattern item).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. Structure
+ *   initialised in case of error only.
+ * @return
+ *   A valid handle in case of success, NULL otherwise.
+ */
+struct rte_flow_classify_rule *
+rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
+		uint32_t table_id,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+
+/**
+ * Delete a flow classify rule from the flow_classifer table.
+ *
+ * @param[in] cls
+ *   Flow classifier handle
+ * @param[in] table_id
+ *   id of table
+ * @param[in] rule
+ *   Flow classify rule
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. Structure
+ *   initialised in case of error only.
+ * @return
+ *   0 on success, error code otherwise.
+ */
+int
+rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
+		uint32_t table_id,
+		struct rte_flow_classify_rule *rule,
+		struct rte_flow_error *error);
+
+/**
+ * Run flow classifier for given packets.
+ *
+ * @param[in] cls
+ *   Flow classifier handle
+ * @param[in] table_id
+ *   id of table
+ * @param[in] pkts
+ *   Pointer to packets to process
+ * @param[in] nb_pkts
+ *   Number of packets to process
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. Structure
+ *   initialised in case of error only.
+ *
+ * @return
+ *   0 on success, error code otherwise.
+ */
+
+int rte_flow_classifier_run(struct rte_flow_classifier *cls,
+		uint32_t table_id,
+		struct rte_mbuf **pkts,
+		const uint16_t nb_pkts,
+		struct rte_flow_error *error);
+
+/**
+ * Query flow classifier for given rule.
+ *
+ * @param[in] cls
+ *   Flow classifier handle
+ * @param[in] rule
+ *   Flow classify rule
+ * @param[in] stats
+ *   Flow classify stats
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. Structure
+ *   initialised in case of error only.
+ *
+ * @return
+ *   0 on success, error code otherwise.
+ */
+int rte_flow_classifier_query(struct rte_flow_classifier *cls,
+		struct rte_flow_classify_rule *rule,
+		struct rte_flow_classify_stats *stats,
+		struct rte_flow_error *error);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_FLOW_CLASSIFY_H_ */
diff --git a/lib/librte_flow_classify/rte_flow_classify_parse.c b/lib/librte_flow_classify/rte_flow_classify_parse.c
new file mode 100644
index 0000000..921a852
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify_parse.c
@@ -0,0 +1,546 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_flow_classify.h>
+#include "rte_flow_classify_parse.h"
+#include <rte_flow_driver.h>
+
+struct classify_valid_pattern {
+	enum rte_flow_item_type *items;
+	parse_filter_t parse_filter;
+};
+
+static struct rte_flow_action action;
+
+/* Pattern matched ntuple filter */
+static enum rte_flow_item_type pattern_ntuple_1[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched ntuple filter */
+static enum rte_flow_item_type pattern_ntuple_2[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched ntuple filter */
+static enum rte_flow_item_type pattern_ntuple_3[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static int
+classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
+			 const struct rte_flow_item pattern[],
+			 const struct rte_flow_action actions[],
+			 struct rte_eth_ntuple_filter *filter,
+			 struct rte_flow_error *error);
+
+static struct classify_valid_pattern classify_supported_patterns[] = {
+	/* ntuple */
+	{ pattern_ntuple_1, classify_parse_ntuple_filter },
+	{ pattern_ntuple_2, classify_parse_ntuple_filter },
+	{ pattern_ntuple_3, classify_parse_ntuple_filter },
+};
+
+struct rte_flow_action *
+classify_get_flow_action(void)
+{
+	return &action;
+}
+
+/* Find the first VOID or non-VOID item pointer */
+const struct rte_flow_item *
+classify_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+	bool is_find;
+
+	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		if (is_void)
+			is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+		else
+			is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+		if (is_find)
+			break;
+		item++;
+	}
+	return item;
+}
+
+/* Skip all VOID items of the pattern */
+void
+classify_pattern_skip_void_item(struct rte_flow_item *items,
+			    const struct rte_flow_item *pattern)
+{
+	uint32_t cpy_count = 0;
+	const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+	for (;;) {
+		/* Find a non-void item first */
+		pb = classify_find_first_item(pb, false);
+		if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+			pe = pb;
+			break;
+		}
+
+		/* Find a void item */
+		pe = classify_find_first_item(pb + 1, true);
+
+		cpy_count = pe - pb;
+		rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+		items += cpy_count;
+
+		if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+			pb = pe;
+			break;
+		}
+
+		pb = pe + 1;
+	}
+	/* Copy the END item. */
+	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+classify_match_pattern(enum rte_flow_item_type *item_array,
+		   struct rte_flow_item *pattern)
+{
+	struct rte_flow_item *item = pattern;
+
+	while ((*item_array == item->type) &&
+	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+		item_array++;
+		item++;
+	}
+
+	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+		item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+parse_filter_t
+classify_find_parse_filter_func(struct rte_flow_item *pattern)
+{
+	parse_filter_t parse_filter = NULL;
+	uint8_t i = 0;
+
+	for (; i < RTE_DIM(classify_supported_patterns); i++) {
+		if (classify_match_pattern(classify_supported_patterns[i].items,
+					pattern)) {
+			parse_filter =
+				classify_supported_patterns[i].parse_filter;
+			break;
+		}
+	}
+
+	return parse_filter;
+}
+
+#define FLOW_RULE_MIN_PRIORITY 8
+#define FLOW_RULE_MAX_PRIORITY 0
+
+#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
+	do {\
+		item = pattern + index;\
+		while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
+			index++;\
+			item = pattern + index;\
+		} \
+	} while (0)
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index)\
+	do {\
+		act = actions + index;\
+		while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+			index++;\
+			act = actions + index;\
+		} \
+	} while (0)
+
+/**
+ * Please aware there's an assumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM		Spec			Mask
+ * ETH		NULL			NULL
+ * IPV4		src_addr 192.168.1.20	0xFFFFFFFF
+ *			dst_addr 192.167.3.50	0xFFFFFFFF
+ *			next_proto_id	17	0xFF
+ * UDP/TCP/	src_port	80	0xFFFF
+ * SCTP		dst_port	80	0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
+			 const struct rte_flow_item pattern[],
+			 const struct rte_flow_action actions[],
+			 struct rte_eth_ntuple_filter *filter,
+			 struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_action *act;
+	const struct rte_flow_item_ipv4 *ipv4_spec;
+	const struct rte_flow_item_ipv4 *ipv4_mask;
+	const struct rte_flow_item_tcp *tcp_spec;
+	const struct rte_flow_item_tcp *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec;
+	const struct rte_flow_item_udp *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec;
+	const struct rte_flow_item_sctp *sctp_mask;
+	uint32_t index;
+
+	if (!pattern) {
+		rte_flow_error_set(error,
+			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+			NULL, "NULL pattern.");
+		return -EINVAL;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -EINVAL;
+	}
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -EINVAL;
+	}
+
+	/* parse pattern */
+	index = 0;
+
+	/* the first not void item can be MAC or IPv4 */
+	NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ntuple filter");
+		return -EINVAL;
+	}
+	/* Skip Ethernet */
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					item,
+					"Not supported last point for range");
+			return -EINVAL;
+
+		}
+		/* if the first item is MAC, the content should be NULL */
+		if (item->spec || item->mask) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item,
+					"Not supported by ntuple filter");
+			return -EINVAL;
+		}
+		/* check if the next not void item is IPv4 */
+		index++;
+		NEXT_ITEM_OF_PATTERN(item, pattern, index);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item,
+					"Not supported by ntuple filter");
+			return -EINVAL;
+		}
+	}
+
+	/* get the IPv4 info */
+	if (!item->spec || !item->mask) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Invalid ntuple mask");
+		return -EINVAL;
+	}
+	/*Not supported last point for range*/
+	if (item->last) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			item, "Not supported last point for range");
+		return -EINVAL;
+
+	}
+
+	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+	/**
+	 * Only support src & dst addresses, protocol,
+	 * others should be masked.
+	 */
+	if (ipv4_mask->hdr.version_ihl ||
+		ipv4_mask->hdr.type_of_service ||
+		ipv4_mask->hdr.total_length ||
+		ipv4_mask->hdr.packet_id ||
+		ipv4_mask->hdr.fragment_offset ||
+		ipv4_mask->hdr.time_to_live ||
+		ipv4_mask->hdr.hdr_checksum) {
+		rte_flow_error_set(error,
+			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ntuple filter");
+		return -EINVAL;
+	}
+
+	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+	filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+
+	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+	filter->dst_ip = ipv4_spec->hdr.dst_addr;
+	filter->src_ip = ipv4_spec->hdr.src_addr;
+	filter->proto  = ipv4_spec->hdr.next_proto_id;
+
+	/* check if the next not void item is TCP or UDP or SCTP */
+	index++;
+	NEXT_ITEM_OF_PATTERN(item, pattern, index);
+	if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ntuple filter");
+		return -EINVAL;
+	}
+
+	/* get the TCP/UDP info */
+	if (!item->spec || !item->mask) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Invalid ntuple mask");
+		return -EINVAL;
+	}
+
+	/*Not supported last point for range*/
+	if (item->last) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			item, "Not supported last point for range");
+		return -EINVAL;
+
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+		tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+		/**
+		 * Only support src & dst ports, tcp flags,
+		 * others should be masked.
+		 */
+		if (tcp_mask->hdr.sent_seq ||
+		    tcp_mask->hdr.recv_ack ||
+		    tcp_mask->hdr.data_off ||
+		    tcp_mask->hdr.rx_win ||
+		    tcp_mask->hdr.cksum ||
+		    tcp_mask->hdr.tcp_urp) {
+			memset(filter, 0,
+				sizeof(struct rte_eth_ntuple_filter));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -EINVAL;
+		}
+
+		filter->dst_port_mask  = tcp_mask->hdr.dst_port;
+		filter->src_port_mask  = tcp_mask->hdr.src_port;
+		if (tcp_mask->hdr.tcp_flags == 0xFF) {
+			filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+		} else if (!tcp_mask->hdr.tcp_flags) {
+			filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+		} else {
+			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -EINVAL;
+		}
+
+		tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+		filter->dst_port  = tcp_spec->hdr.dst_port;
+		filter->src_port  = tcp_spec->hdr.src_port;
+		filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+	} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+		udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+		/**
+		 * Only support src & dst ports,
+		 * others should be masked.
+		 */
+		if (udp_mask->hdr.dgram_len ||
+		    udp_mask->hdr.dgram_cksum) {
+			memset(filter, 0,
+				sizeof(struct rte_eth_ntuple_filter));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -EINVAL;
+		}
+
+		filter->dst_port_mask = udp_mask->hdr.dst_port;
+		filter->src_port_mask = udp_mask->hdr.src_port;
+
+		udp_spec = (const struct rte_flow_item_udp *)item->spec;
+		filter->dst_port = udp_spec->hdr.dst_port;
+		filter->src_port = udp_spec->hdr.src_port;
+	} else {
+		sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+
+		/**
+		 * Only support src & dst ports,
+		 * others should be masked.
+		 */
+		if (sctp_mask->hdr.tag ||
+		    sctp_mask->hdr.cksum) {
+			memset(filter, 0,
+				sizeof(struct rte_eth_ntuple_filter));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -EINVAL;
+		}
+
+		filter->dst_port_mask = sctp_mask->hdr.dst_port;
+		filter->src_port_mask = sctp_mask->hdr.src_port;
+
+		sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+		filter->dst_port = sctp_spec->hdr.dst_port;
+		filter->src_port = sctp_spec->hdr.src_port;
+	}
+
+	/* check if the next not void item is END */
+	index++;
+	NEXT_ITEM_OF_PATTERN(item, pattern, index);
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ntuple filter");
+		return -EINVAL;
+	}
+
+	/* parse action */
+	index = 0;
+
+	/**
+	 * n-tuple only supports count,
+	 * check if the first not void action is COUNT.
+	 */
+	memset(&action, 0, sizeof(action));
+	NEXT_ITEM_OF_ACTION(act, actions, index);
+	if (act->type != RTE_FLOW_ACTION_TYPE_COUNT) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			item, "Not supported action.");
+		return -EINVAL;
+	}
+	action.type = RTE_FLOW_ACTION_TYPE_COUNT;
+
+	/* check if the next not void item is END */
+	index++;
+	NEXT_ITEM_OF_ACTION(act, actions, index);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			act, "Not supported action.");
+		return -EINVAL;
+	}
+
+	/* parse attr */
+	/* must be input direction */
+	if (!attr->ingress) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -EINVAL;
+	}
+
+	/* not supported */
+	if (attr->egress) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -EINVAL;
+	}
+
+	if (attr->priority > 0xFFFF) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Error priority.");
+		return -EINVAL;
+	}
+	filter->priority = (uint16_t)attr->priority;
+	if (attr->priority >  FLOW_RULE_MIN_PRIORITY)
+		filter->priority = FLOW_RULE_MAX_PRIORITY;
+
+	return 0;
+}
diff --git a/lib/librte_flow_classify/rte_flow_classify_parse.h b/lib/librte_flow_classify/rte_flow_classify_parse.h
new file mode 100644
index 0000000..1d4708a
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify_parse.h
@@ -0,0 +1,74 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_FLOW_CLASSIFY_PARSE_H_
+#define _RTE_FLOW_CLASSIFY_PARSE_H_
+
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int (*parse_filter_t)(const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      struct rte_eth_ntuple_filter *filter,
+			      struct rte_flow_error *error);
+
+/* Skip all VOID items of the pattern */
+void
+classify_pattern_skip_void_item(struct rte_flow_item *items,
+			    const struct rte_flow_item *pattern);
+
+/* Find the first VOID or non-VOID item pointer */
+const struct rte_flow_item *
+classify_find_first_item(const struct rte_flow_item *item, bool is_void);
+
+
+/* Find if there's parse filter function matched */
+parse_filter_t
+classify_find_parse_filter_func(struct rte_flow_item *pattern);
+
+/* get action data */
+struct rte_flow_action *
+classify_get_flow_action(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_FLOW_CLASSIFY_PARSE_H_ */
diff --git a/lib/librte_flow_classify/rte_flow_classify_version.map b/lib/librte_flow_classify/rte_flow_classify_version.map
new file mode 100644
index 0000000..b51cb1a
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify_version.map
@@ -0,0 +1,14 @@ 
+EXPERIMENTAL {
+	global:
+
+	rte_flow_classifier_create;
+	rte_flow_classifier_free;
+	rte_flow_classifier_query;
+	rte_flow_classifier_run;
+	rte_flow_classify_table_create;
+	rte_flow_classify_table_entry_add;
+	rte_flow_classify_table_entry_delete;
+	rte_flow_classify_validate;
+
+	local: *;
+};
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 8192b98..482656c 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -58,6 +58,7 @@  _LDLIBS-y += -L$(RTE_SDK_BIN)/lib
 #
 # Order is important: from higher level to lower level
 #
+_LDLIBS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY)  += -lrte_flow_classify
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PIPELINE)       += -lrte_pipeline
 _LDLIBS-$(CONFIG_RTE_LIBRTE_TABLE)          += -lrte_table
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PORT)           += -lrte_port