Hi, adrien
> -----Original Message-----
> From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> Sent: Tuesday, January 3, 2017 10:08 PM
> To: Zhao1, Wei <wei.zhao1@intel.com>
> Cc: dev@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v2 14/18] net/ixgbe: parse L2 tunnel filter
>
> Hi Wei,
>
> On Fri, Dec 30, 2016 at 03:53:06PM +0800, Wei Zhao wrote:
> > check if the rule is a L2 tunnel rule, and get the L2 tunnel info.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> >
> > ---
> >
> > v2:
> > --add new error set function
> > --change return value type of parser function
> > ---
> > drivers/net/ixgbe/ixgbe_ethdev.c | 269
> +++++++++++++++++++++++++++++++++++----
> > lib/librte_ether/rte_flow.h | 32 +++++
> > 2 files changed, 273 insertions(+), 28 deletions(-)
> [...]
> > diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
> > index 98084ac..e9e6220 100644
> > --- a/lib/librte_ether/rte_flow.h
> > +++ b/lib/librte_ether/rte_flow.h
> > @@ -268,6 +268,13 @@ enum rte_flow_item_type {
> > * See struct rte_flow_item_vxlan.
> > */
> > RTE_FLOW_ITEM_TYPE_VXLAN,
> > +
> > + /**
> > + * Matches a E_TAG header.
> > + *
> > + * See struct rte_flow_item_e_tag.
> > + */
> > + RTE_FLOW_ITEM_TYPE_E_TAG,
> > };
> >
> > /**
> > @@ -454,6 +461,31 @@ struct rte_flow_item_vxlan { };
> >
> > /**
> > + * RTE_FLOW_ITEM_TYPE_E_TAG.
> > + *
> > + * Matches a E-tag header.
> > + */
> > +struct rte_flow_item_e_tag {
> > + struct ether_addr dst; /**< Destination MAC. */
> > + struct ether_addr src; /**< Source MAC. */
> > + uint16_t e_tag_ethertype; /**< E-tag EtherType, 0x893F. */
> > + uint16_t e_pcp:3; /**< E-PCP */
> > + uint16_t dei:1; /**< DEI */
> > + uint16_t in_e_cid_base:12; /**< Ingress E-CID base */
> > + uint16_t rsv:2; /**< reserved */
> > + uint16_t grp:2; /**< GRP */
> > + uint16_t e_cid_base:12; /**< E-CID base */
> > + uint16_t in_e_cid_ext:8; /**< Ingress E-CID extend */
> > + uint16_t e_cid_ext:8; /**< E-CID extend */
> > + uint16_t type; /**< MAC type. */
> > + unsigned int tags; /**< Number of 802.1Q/ad tags defined. */
> > + struct {
> > + uint16_t tpid; /**< Tag protocol identifier. */
> > + uint16_t tci; /**< Tag control information. */
> > + } tag[]; /**< 802.1Q/ad tag definitions, outermost first. */ };
> [...]
>
> See my previous reply [1], this definition is not endian-safe and comprises
> protocols defined as independent items (namely ETH and VLAN). Here is an
> untested suggestion:
>
> struct rte_flow_item_e_tag {
> uint16_t tpid; /**< Tag protocol identifier (0x893F). */
> /** E-Tag control information (E-TCI). */
> uint16_t epcp_edei_in_ecid_b; /**< E-PCP (3b), E-DEI (1b), ingress E-CID
> base (12b). */
> uint16_t rsvd_grp_ecid_b; /**< Reserved (2b), GRP (2b), E-CID base (12b).
> */
> uint8_t in_ecid_e; /**< Ingress E-CID ext. */
> uint8_t ecid_e; /**< E-CID ext. */
> };
>
> Applications are responsibile for breaking down and filling individual fields
> properly. Ethernet header would be provided as its own item as shown in
> this testpmd flow command example:
>
> flow create 0 ingress pattern eth / e_tag in_ecid_base is 42 / end actions
> drop / end
>
In this case , is eth an option or mandatory?
I think it is optional, because user may do not have any parameter in ETH config.
> Note, all multibyte values are in network order like other protocol header
> definitions.
>
> [1] http://dpdk.org/ml/archives/dev/2016-December/053181.html
> Message ID: 20161223081310.GH10340@6wind.com
>
> --
> Adrien Mazarguil
> 6WIND
On Thu, Jan 05, 2017 at 03:12:01AM +0000, Zhao1, Wei wrote:
> Hi, adrien
>
> > -----Original Message-----
> > From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> > Sent: Tuesday, January 3, 2017 10:08 PM
> > To: Zhao1, Wei <wei.zhao1@intel.com>
> > Cc: dev@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>
> > Subject: Re: [dpdk-dev] [PATCH v2 14/18] net/ixgbe: parse L2 tunnel filter
> >
> > Hi Wei,
> >
> > On Fri, Dec 30, 2016 at 03:53:06PM +0800, Wei Zhao wrote:
> > > check if the rule is a L2 tunnel rule, and get the L2 tunnel info.
> > >
> > > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > >
> > > ---
> > >
> > > v2:
> > > --add new error set function
> > > --change return value type of parser function
> > > ---
> > > drivers/net/ixgbe/ixgbe_ethdev.c | 269
> > +++++++++++++++++++++++++++++++++++----
> > > lib/librte_ether/rte_flow.h | 32 +++++
> > > 2 files changed, 273 insertions(+), 28 deletions(-)
> > [...]
> > > diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
> > > index 98084ac..e9e6220 100644
> > > --- a/lib/librte_ether/rte_flow.h
> > > +++ b/lib/librte_ether/rte_flow.h
> > > @@ -268,6 +268,13 @@ enum rte_flow_item_type {
> > > * See struct rte_flow_item_vxlan.
> > > */
> > > RTE_FLOW_ITEM_TYPE_VXLAN,
> > > +
> > > + /**
> > > + * Matches a E_TAG header.
> > > + *
> > > + * See struct rte_flow_item_e_tag.
> > > + */
> > > + RTE_FLOW_ITEM_TYPE_E_TAG,
> > > };
> > >
> > > /**
> > > @@ -454,6 +461,31 @@ struct rte_flow_item_vxlan { };
> > >
> > > /**
> > > + * RTE_FLOW_ITEM_TYPE_E_TAG.
> > > + *
> > > + * Matches a E-tag header.
> > > + */
> > > +struct rte_flow_item_e_tag {
> > > + struct ether_addr dst; /**< Destination MAC. */
> > > + struct ether_addr src; /**< Source MAC. */
> > > + uint16_t e_tag_ethertype; /**< E-tag EtherType, 0x893F. */
> > > + uint16_t e_pcp:3; /**< E-PCP */
> > > + uint16_t dei:1; /**< DEI */
> > > + uint16_t in_e_cid_base:12; /**< Ingress E-CID base */
> > > + uint16_t rsv:2; /**< reserved */
> > > + uint16_t grp:2; /**< GRP */
> > > + uint16_t e_cid_base:12; /**< E-CID base */
> > > + uint16_t in_e_cid_ext:8; /**< Ingress E-CID extend */
> > > + uint16_t e_cid_ext:8; /**< E-CID extend */
> > > + uint16_t type; /**< MAC type. */
> > > + unsigned int tags; /**< Number of 802.1Q/ad tags defined. */
> > > + struct {
> > > + uint16_t tpid; /**< Tag protocol identifier. */
> > > + uint16_t tci; /**< Tag control information. */
> > > + } tag[]; /**< 802.1Q/ad tag definitions, outermost first. */ };
> > [...]
> >
> > See my previous reply [1], this definition is not endian-safe and comprises
> > protocols defined as independent items (namely ETH and VLAN). Here is an
> > untested suggestion:
> >
> > struct rte_flow_item_e_tag {
> > uint16_t tpid; /**< Tag protocol identifier (0x893F). */
> > /** E-Tag control information (E-TCI). */
> > uint16_t epcp_edei_in_ecid_b; /**< E-PCP (3b), E-DEI (1b), ingress E-CID
> > base (12b). */
> > uint16_t rsvd_grp_ecid_b; /**< Reserved (2b), GRP (2b), E-CID base (12b).
> > */
> > uint8_t in_ecid_e; /**< Ingress E-CID ext. */
> > uint8_t ecid_e; /**< E-CID ext. */
> > };
> >
> > Applications are responsibile for breaking down and filling individual fields
> > properly. Ethernet header would be provided as its own item as shown in
> > this testpmd flow command example:
> >
> > flow create 0 ingress pattern eth / e_tag in_ecid_base is 42 / end actions
> > drop / end
> >
>
> In this case , is eth an option or mandatory?
> I think it is optional, because user may do not have any parameter in ETH config.
Normally, protocol items start from L2 so applications *should* provide
ETH otherwise it is an error.
Now a PMD may also allow it to be implicit when it is unambiguous (e.g. an
imaginary ETH item provided without a mask) as described in the "UDPv6
anywhere" example [2]. It's up to you.
> > Note, all multibyte values are in network order like other protocol header
> > definitions.
> >
> > [1] http://dpdk.org/ml/archives/dev/2016-December/053181.html
> > Message ID: 20161223081310.GH10340@6wind.com
[2] http://dpdk.org/doc/guides/prog_guide/rte_flow.html#matching-pattern
@@ -425,6 +425,19 @@ ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
struct rte_eth_syn_filter *filter,
struct rte_flow_error *error);
static int
+cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_l2_tunnel_conf *filter,
+ struct rte_flow_error *error);
+static int
+ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_l2_tunnel_conf *rule,
+ struct rte_flow_error *error);
+static int
ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -8939,41 +8952,175 @@ ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
}
/**
- * Check if the flow rule is supported by ixgbe.
- * It only checkes the format. Don't guarantee the rule can be programmed into
- * the HW. Because there can be no enough room for the rule.
+ * Parse the rule to see if it is a L2 tunnel rule.
+ * And get the L2 tunnel filter info BTW.
+ * Only support E-tag now.
*/
static int
-ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_l2_tunnel_conf *filter,
+ struct rte_flow_error *error)
{
- struct rte_eth_ntuple_filter ntuple_filter;
- struct rte_eth_ethertype_filter ethertype_filter;
- struct rte_eth_syn_filter syn_filter;
- int ret;
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_e_tag *e_tag_spec;
+ const struct rte_flow_item_e_tag *e_tag_mask;
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index, j;
- memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
- ret = ixgbe_parse_ntuple_filter(attr, pattern,
- actions, &ntuple_filter, error);
- if (!ret)
- return 0;
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
- memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
- ret = ixgbe_parse_ethertype_filter(attr, pattern,
- actions, ðertype_filter, error);
- if (!ret)
- return 0;
+ /* parse pattern */
+ index = 0;
- memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
- ret = ixgbe_parse_syn_filter(attr, pattern,
- actions, &syn_filter, error);
- if (!ret)
- return 0;
+ /* The first not void item should be e-tag. */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
- return ret;
+ if (!item->spec || !item->mask) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
+ e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
+
+ /* Src & dst MAC address should be masked. */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ if (e_tag_mask->src.addr_bytes[j] ||
+ e_tag_mask->dst.addr_bytes[j]) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Only care about GRP and E cid base. */
+ if (e_tag_mask->e_tag_ethertype ||
+ e_tag_mask->e_pcp ||
+ e_tag_mask->dei ||
+ e_tag_mask->in_e_cid_base ||
+ e_tag_mask->in_e_cid_ext ||
+ e_tag_mask->e_cid_ext ||
+ e_tag_mask->type ||
+ e_tag_mask->tags ||
+ e_tag_mask->grp != 0x3 ||
+ e_tag_mask->e_cid_base != 0xFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ /**
+ * grp and e_cid_base are bit fields and only use 14 bits.
+ * e-tag id is taken as little endian by HW.
+ */
+ filter->tunnel_id = e_tag_spec->grp << 12;
+ filter->tunnel_id |= rte_be_to_cpu_16(e_tag_spec->e_cid_base);
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->priority) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->pool = act_q->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
}
/* Destroy all flow rules associated with a port on ixgbe. */
@@ -9006,6 +9153,72 @@ ixgbe_flow_flush(struct rte_eth_dev *dev,
return 0;
}
+static int
+ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_l2_tunnel_conf *l2_tn_filter,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ret = cons_parse_l2_tn_filter(attr, pattern,
+ actions, l2_tn_filter, error);
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ return ret;
+}
+
+/**
+ * Check if the flow rule is supported by ixgbe.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ int ret;
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = ixgbe_parse_ntuple_filter(attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = ixgbe_parse_ethertype_filter(attr, pattern,
+ actions, ðertype_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = ixgbe_parse_syn_filter(attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern,
+ actions, &l2_tn_filter, error);
+
+ return ret;
+}
+
RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
@@ -268,6 +268,13 @@ enum rte_flow_item_type {
* See struct rte_flow_item_vxlan.
*/
RTE_FLOW_ITEM_TYPE_VXLAN,
+
+ /**
+ * Matches a E_TAG header.
+ *
+ * See struct rte_flow_item_e_tag.
+ */
+ RTE_FLOW_ITEM_TYPE_E_TAG,
};
/**
@@ -454,6 +461,31 @@ struct rte_flow_item_vxlan {
};
/**
+ * RTE_FLOW_ITEM_TYPE_E_TAG.
+ *
+ * Matches a E-tag header.
+ */
+struct rte_flow_item_e_tag {
+ struct ether_addr dst; /**< Destination MAC. */
+ struct ether_addr src; /**< Source MAC. */
+ uint16_t e_tag_ethertype; /**< E-tag EtherType, 0x893F. */
+ uint16_t e_pcp:3; /**< E-PCP */
+ uint16_t dei:1; /**< DEI */
+ uint16_t in_e_cid_base:12; /**< Ingress E-CID base */
+ uint16_t rsv:2; /**< reserved */
+ uint16_t grp:2; /**< GRP */
+ uint16_t e_cid_base:12; /**< E-CID base */
+ uint16_t in_e_cid_ext:8; /**< Ingress E-CID extend */
+ uint16_t e_cid_ext:8; /**< E-CID extend */
+ uint16_t type; /**< MAC type. */
+ unsigned int tags; /**< Number of 802.1Q/ad tags defined. */
+ struct {
+ uint16_t tpid; /**< Tag protocol identifier. */
+ uint16_t tci; /**< Tag control information. */
+ } tag[]; /**< 802.1Q/ad tag definitions, outermost first. */
+};
+
+/**
* Matching pattern item definition.
*
* A pattern is formed by stacking items starting from the lowest protocol