[dpdk-dev,v4,6/7] ixgbe: support l2 tunnel operation
Commit Message
Add support of l2 tunnel operation.
Support enabling/disabling l2 tunnel tag insertion/stripping.
Support enabling/disabling l2 tunnel packets forwarding.
Support adding/deleting forwarding rules for l2 tunnel packets.
Only support E-tag now.
Also update the release note.
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
doc/guides/rel_notes/release_16_04.rst | 21 ++
drivers/net/ixgbe/ixgbe_ethdev.c | 371 +++++++++++++++++++++++++++++++++
2 files changed, 392 insertions(+)
Comments
Hi Wenzhuo,
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wenzhuo Lu
> Sent: Thursday, February 18, 2016 10:46 AM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v4 6/7] ixgbe: support l2 tunnel operation
>
> Add support of l2 tunnel operation.
> Support enabling/disabling l2 tunnel tag insertion/stripping.
> Support enabling/disabling l2 tunnel packets forwarding.
> Support adding/deleting forwarding rules for l2 tunnel packets.
> Only support E-tag now.
>
> Also update the release note.
>
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> ---
> doc/guides/rel_notes/release_16_04.rst | 21 ++
> drivers/net/ixgbe/ixgbe_ethdev.c | 371
> +++++++++++++++++++++++++++++++++
> 2 files changed, 392 insertions(+)
>
> diff --git a/doc/guides/rel_notes/release_16_04.rst
> b/doc/guides/rel_notes/release_16_04.rst
> index eb1b3b2..994da33 100644
> --- a/doc/guides/rel_notes/release_16_04.rst
> +++ b/doc/guides/rel_notes/release_16_04.rst
> @@ -44,6 +44,27 @@ This section should contain new features added in this
> release. Sample format:
> Add the offload and negotiation of checksum and TSO between vhost-user
> and
> vanilla Linux virtio guest.
>
> +* **Added support for E-tag on X550.**
> +
> + E-tag is defined in 802.1br. Please reference
> + http://www.ieee802.org/1/pages/802.1br.html.
> +
> + This feature is for VF, but please aware all the setting is on PF. It means
> + the CLIs should be used on PF, but some of their effect will be shown on
> VF.
> + The forwarding of E-tag packets based on GRP and E-CID_base will have
> effect
> + on PF. Theoretically the E-tag packets can be forwarded to any pool/queue.
> + But normally we'd like to forward the packets to the pools/queues
> belonging
> + to the VFs. And E-tag insertion and stripping will have effect on VFs. When
> + VF receives E-tag packets, it should strip the E-tag. When VF transmits
> + packets, it should insert the E-tag. Both can be offloaded.
> +
> + When we want to use this E-tag support feature, the forwarding should be
> + enabled to forward the packets received by PF to indicated VFs. And
> insertion
> + and stripping should be enabled for VFs to offload the effort to HW.
> +
> + * Support E-tag offloading of insertion and stripping.
> + * Support Forwarding E-tag packets to pools based on
> + GRP and E-CID_base.
>
> Resolved Issues
> ---------------
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c
> b/drivers/net/ixgbe/ixgbe_ethdev.c
> index b15a4b6..aa00842 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -139,10 +139,17 @@
> #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
>
> #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000
> +#define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000
> #define DEFAULT_ETAG_ETYPE 0x893f
> #define IXGBE_ETAG_ETYPE 0x00005084
> #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff
> #define IXGBE_ETAG_ETYPE_VALID 0x80000000
> +#define IXGBE_RAH_ADTYPE 0x40000000
> +#define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff
> +#define IXGBE_VMVIR_TAGA_MASK 0x18000000
> +#define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000
> +#define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
> +#define IXGBE_QDE_STRIP_TAG 0x00000004
>
> static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
> static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
> @@ -351,6 +358,33 @@ static int ixgbe_dev_l2_tunnel_enable
> static int ixgbe_dev_l2_tunnel_disable
> (struct rte_eth_dev *dev,
> enum rte_eth_l2_tunnel_type l2_tunnel_type);
> +static int ixgbe_dev_l2_tunnel_insertion_enable
> + (struct rte_eth_dev *dev,
> + struct rte_eth_l2_tunnel *l2_tunnel,
> + uint16_t vf_id);
> +static int ixgbe_dev_l2_tunnel_insertion_disable
> + (struct rte_eth_dev *dev,
> + enum rte_eth_l2_tunnel_type l2_tunnel_type,
> + uint16_t vf_id);
> +static int ixgbe_dev_l2_tunnel_stripping_enable
> + (struct rte_eth_dev *dev,
> + enum rte_eth_l2_tunnel_type l2_tunnel_type);
> +static int ixgbe_dev_l2_tunnel_stripping_disable
> + (struct rte_eth_dev *dev,
> + enum rte_eth_l2_tunnel_type l2_tunnel_type);
> +static int ixgbe_dev_l2_tunnel_forwarding_enable
> + (struct rte_eth_dev *dev,
> + enum rte_eth_l2_tunnel_type l2_tunnel_type);
> +static int ixgbe_dev_l2_tunnel_forwarding_disable
> + (struct rte_eth_dev *dev,
> + enum rte_eth_l2_tunnel_type l2_tunnel_type);
> +static int ixgbe_dev_l2_tunnel_filter_add
> + (struct rte_eth_dev *dev,
> + struct rte_eth_l2_tunnel *l2_tunnel,
> + uint32_t pool);
> +static int ixgbe_dev_l2_tunnel_filter_del
> + (struct rte_eth_dev *dev,
> + struct rte_eth_l2_tunnel *l2_tunnel);
>
> /*
> * Define VF Stats MACRO for Non "cleared on read" register
> @@ -512,6 +546,14 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops
> = {
> .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
> .l2_tunnel_enable = ixgbe_dev_l2_tunnel_enable,
> .l2_tunnel_disable = ixgbe_dev_l2_tunnel_disable,
> + .l2_tunnel_insertion_enable =
> ixgbe_dev_l2_tunnel_insertion_enable,
> + .l2_tunnel_insertion_disable =
> ixgbe_dev_l2_tunnel_insertion_disable,
> + .l2_tunnel_stripping_enable =
> ixgbe_dev_l2_tunnel_stripping_enable,
> + .l2_tunnel_stripping_disable =
> ixgbe_dev_l2_tunnel_stripping_disable,
> + .l2_tunnel_forwarding_enable =
> ixgbe_dev_l2_tunnel_forwarding_enable,
> + .l2_tunnel_forwarding_disable =
> ixgbe_dev_l2_tunnel_forwarding_disable,
> + .l2_tunnel_filter_add = ixgbe_dev_l2_tunnel_filter_add,
> + .l2_tunnel_filter_del = ixgbe_dev_l2_tunnel_filter_del,
> };
>
> /*
> @@ -6341,6 +6383,335 @@ ixgbe_dev_l2_tunnel_disable(struct
> rte_eth_dev *dev,
> return ret;
> }
>
> +static int
> +ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
> + struct rte_eth_l2_tunnel *l2_tunnel)
> +{
> + int ret = 0;
> + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> + u32 i, rar_entries;
> + u32 rar_low, rar_high;
> +
> + if (hw->mac.type != ixgbe_mac_X550 &&
> + hw->mac.type != ixgbe_mac_X550EM_x) {
> + return -ENOTSUP;
> + }
> +
> + rar_entries = ixgbe_get_num_rx_addrs(hw);
> +
> + for (i = 1; i < rar_entries; i++) {
> + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
> + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i));
> + if ((rar_high & IXGBE_RAH_AV) &&
> + (rar_high & IXGBE_RAH_ADTYPE) &&
> + ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
> + l2_tunnel->tunnel_id)) {
> + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
> + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
> +
> + ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
> +
> + return ret;
> + }
> + }
> +
> + return ret;
> +}
> +
> +static int
> +ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
> + struct rte_eth_l2_tunnel *l2_tunnel,
> + uint32_t pool)
> +{
> + int ret = 0;
> + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> + u32 i, rar_entries;
> + u32 rar_low, rar_high;
> +
> + if (hw->mac.type != ixgbe_mac_X550 &&
> + hw->mac.type != ixgbe_mac_X550EM_x) {
> + return -ENOTSUP;
> + }
> +
> + /* One entry for one tunnel. Try to remove potential existing entry.
> */
> + ixgbe_e_tag_filter_del(dev, l2_tunnel);
> +
> + rar_entries = ixgbe_get_num_rx_addrs(hw);
> +
> + for (i = 1; i < rar_entries; i++) {
> + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
> + if (rar_high & IXGBE_RAH_AV) {
> + continue;
> + } else {
> + ixgbe_set_vmdq(hw, i, pool);
Do we need to check the return result here?
> + rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
> + rar_low = l2_tunnel->tunnel_id;
> +
> + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
> + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
> +
> + return ret;
> + }
> + }
> +
> + PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
> + " Please remove a rule before adding a new one.");
> + return -1;
> +}
> +
> +/* Add l2 tunnel filter */
> +static int
> +ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
> + struct rte_eth_l2_tunnel *l2_tunnel,
> + uint32_t pool)
> +{
> + int ret = 0;
> +
> + switch (l2_tunnel->l2_tunnel_type) {
> + case RTE_L2_TUNNEL_TYPE_E_TAG:
> + ret = ixgbe_e_tag_filter_add(dev, l2_tunnel, pool);
> + break;
> + default:
> + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> + ret = -1;
> + break;
> + }
> +
> + return ret;
> +}
> +
> +/* Delete l2 tunnel filter */
> +static int
> +ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
> + struct rte_eth_l2_tunnel *l2_tunnel)
> +{
> + int ret = 0;
> +
> + switch (l2_tunnel->l2_tunnel_type) {
> + case RTE_L2_TUNNEL_TYPE_E_TAG:
> + ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
> + break;
> + default:
> + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> + ret = -1;
> + break;
> + }
> +
> + return ret;
> +}
> +
> +static int
> +ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
> +{
> + int ret = 0;
> + uint32_t ctrl;
> + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> +
> + if (hw->mac.type != ixgbe_mac_X550 &&
> + hw->mac.type != ixgbe_mac_X550EM_x) {
> + return -ENOTSUP;
> + }
> +
> + ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
> + ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
> + if (en)
> + ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
> + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
> +
> + return ret;
> +}
> +
> +/* Enable l2 tunnel forwarding */
> +static int
> +ixgbe_dev_l2_tunnel_forwarding_enable
> + (struct rte_eth_dev *dev,
> + enum rte_eth_l2_tunnel_type l2_tunnel_type)
> +{
> + int ret = 0;
> +
> + switch (l2_tunnel_type) {
> + case RTE_L2_TUNNEL_TYPE_E_TAG:
> + ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
> + break;
> + default:
> + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> + ret = -1;
> + break;
> + }
> +
> + return ret;
> +}
> +
> +/* Disable l2 tunnel forwarding */
> +static int
> +ixgbe_dev_l2_tunnel_forwarding_disable
> + (struct rte_eth_dev *dev,
> + enum rte_eth_l2_tunnel_type l2_tunnel_type)
> +{
> + int ret = 0;
> +
> + switch (l2_tunnel_type) {
> + case RTE_L2_TUNNEL_TYPE_E_TAG:
> + ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
> + break;
> + default:
> + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> + ret = -1;
> + break;
> + }
> +
> + return ret;
> +}
> +
> +static int
> +ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
> + struct rte_eth_l2_tunnel *l2_tunnel,
> + uint16_t vf_id,
> + bool en)
> +{
> + int ret = 0;
> + uint32_t vmtir, vmvir;
> + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> +
> + if (vf_id >= dev->pci_dev->max_vfs) {
> + PMD_DRV_LOG(ERR,
> + "VF id %u should be less than %u",
> + vf_id,
> + dev->pci_dev->max_vfs);
> + return -EINVAL;
> + }
> +
> + if (hw->mac.type != ixgbe_mac_X550 &&
> + hw->mac.type != ixgbe_mac_X550EM_x) {
> + return -ENOTSUP;
> + }
> +
> + if (en)
> + vmtir = l2_tunnel->tunnel_id;
> + else
> + vmtir = 0;
> +
> + IXGBE_WRITE_REG(hw, IXGBE_VMTIR(vf_id), vmtir);
> +
> + vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf_id));
> + vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
> + if (en)
> + vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
> + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf_id), vmvir);
> +
> + return ret;
> +}
> +
> +/* Enable l2 tunnel tag insertion */
> +static int
> +ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
> + struct rte_eth_l2_tunnel *l2_tunnel,
> + uint16_t vf_id)
> +{
> + int ret = 0;
> +
> + switch (l2_tunnel->l2_tunnel_type) {
> + case RTE_L2_TUNNEL_TYPE_E_TAG:
> + ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, vf_id, 1);
> + break;
> + default:
> + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> + ret = -1;
> + break;
> + }
> +
> + return ret;
> +}
> +
> +/* Disable l2 tunnel tag insertion */
> +static int
> +ixgbe_dev_l2_tunnel_insertion_disable
> + (struct rte_eth_dev *dev,
> + enum rte_eth_l2_tunnel_type l2_tunnel_type,
> + uint16_t vf_id)
> +{
> + int ret = 0;
> +
> + switch (l2_tunnel_type) {
> + case RTE_L2_TUNNEL_TYPE_E_TAG:
> + ret = ixgbe_e_tag_insertion_en_dis(dev, NULL, vf_id, 0);
> + break;
> + default:
> + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> + ret = -1;
> + break;
> + }
> +
> + return ret;
> +}
> +
> +static int
> +ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
> + bool en)
> +{
> + int ret = 0;
> + uint32_t qde;
> + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> +
> + if (hw->mac.type != ixgbe_mac_X550 &&
> + hw->mac.type != ixgbe_mac_X550EM_x) {
> + return -ENOTSUP;
> + }
> +
> + qde = IXGBE_READ_REG(hw, IXGBE_QDE);
> + if (en)
> + qde |= IXGBE_QDE_STRIP_TAG;
> + else
> + qde &= ~IXGBE_QDE_STRIP_TAG;
> + qde &= ~IXGBE_QDE_READ;
> + qde |= IXGBE_QDE_WRITE;
> + IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
> +
> + return ret;
> +}
> +
> +/* Enable l2 tunnel tag stripping */
> +static int
> +ixgbe_dev_l2_tunnel_stripping_enable
> + (struct rte_eth_dev *dev,
> + enum rte_eth_l2_tunnel_type l2_tunnel_type)
> +{
> + int ret = 0;
> +
> + switch (l2_tunnel_type) {
> + case RTE_L2_TUNNEL_TYPE_E_TAG:
> + ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
> + break;
> + default:
> + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> + ret = -1;
> + break;
> + }
> +
> + return ret;
> +}
> +
> +/* Disable l2 tunnel tag stripping */
> +static int
> +ixgbe_dev_l2_tunnel_stripping_disable
> + (struct rte_eth_dev *dev,
> + enum rte_eth_l2_tunnel_type l2_tunnel_type)
> +{
> + int ret = 0;
> +
> + switch (l2_tunnel_type) {
> + case RTE_L2_TUNNEL_TYPE_E_TAG:
> + ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
> + break;
> + default:
> + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> + ret = -1;
> + break;
> + }
> +
> + return ret;
> +}
> +
> static struct rte_driver rte_ixgbe_driver = {
> .type = PMD_PDEV,
> .init = rte_ixgbe_pmd_init,
> --
> 1.9.3
Hi Shaopeng,
> -----Original Message-----
> From: He, Shaopeng
> Sent: Friday, March 4, 2016 9:47 AM
> To: Lu, Wenzhuo; dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v4 6/7] ixgbe: support l2 tunnel operation
>
> Hi Wenzhuo,
>
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wenzhuo Lu
> > Sent: Thursday, February 18, 2016 10:46 AM
> > To: dev@dpdk.org
> > Subject: [dpdk-dev] [PATCH v4 6/7] ixgbe: support l2 tunnel operation
> >
> > Add support of l2 tunnel operation.
> > Support enabling/disabling l2 tunnel tag insertion/stripping.
> > Support enabling/disabling l2 tunnel packets forwarding.
> > Support adding/deleting forwarding rules for l2 tunnel packets.
> > Only support E-tag now.
> >
> > Also update the release note.
> >
> > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > ---
> > doc/guides/rel_notes/release_16_04.rst | 21 ++
> > drivers/net/ixgbe/ixgbe_ethdev.c | 371
> > +++++++++++++++++++++++++++++++++
> > 2 files changed, 392 insertions(+)
> >
> > diff --git a/doc/guides/rel_notes/release_16_04.rst
> > b/doc/guides/rel_notes/release_16_04.rst
> > index eb1b3b2..994da33 100644
> > --- a/doc/guides/rel_notes/release_16_04.rst
> > +++ b/doc/guides/rel_notes/release_16_04.rst
> > @@ -44,6 +44,27 @@ This section should contain new features added in
> > this release. Sample format:
> > Add the offload and negotiation of checksum and TSO between
> > vhost-user and
> > vanilla Linux virtio guest.
> >
> > +* **Added support for E-tag on X550.**
> > +
> > + E-tag is defined in 802.1br. Please reference
> > + http://www.ieee802.org/1/pages/802.1br.html.
> > +
> > + This feature is for VF, but please aware all the setting is on PF.
> > + It means the CLIs should be used on PF, but some of their effect
> > + will be shown on
> > VF.
> > + The forwarding of E-tag packets based on GRP and E-CID_base will
> > + have
> > effect
> > + on PF. Theoretically the E-tag packets can be forwarded to any pool/queue.
> > + But normally we'd like to forward the packets to the pools/queues
> > belonging
> > + to the VFs. And E-tag insertion and stripping will have effect on
> > + VFs. When VF receives E-tag packets, it should strip the E-tag.
> > + When VF transmits packets, it should insert the E-tag. Both can be offloaded.
> > +
> > + When we want to use this E-tag support feature, the forwarding
> > + should be enabled to forward the packets received by PF to
> > + indicated VFs. And
> > insertion
> > + and stripping should be enabled for VFs to offload the effort to HW.
> > +
> > + * Support E-tag offloading of insertion and stripping.
> > + * Support Forwarding E-tag packets to pools based on
> > + GRP and E-CID_base.
> >
> > Resolved Issues
> > ---------------
> > diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c
> > b/drivers/net/ixgbe/ixgbe_ethdev.c
> > index b15a4b6..aa00842 100644
> > --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> > +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> > @@ -139,10 +139,17 @@
> > #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
> >
> > #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000
> > +#define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000
> > #define DEFAULT_ETAG_ETYPE 0x893f
> > #define IXGBE_ETAG_ETYPE 0x00005084
> > #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff
> > #define IXGBE_ETAG_ETYPE_VALID 0x80000000
> > +#define IXGBE_RAH_ADTYPE 0x40000000
> > +#define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff
> > +#define IXGBE_VMVIR_TAGA_MASK 0x18000000
> > +#define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000
> > +#define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
> > +#define IXGBE_QDE_STRIP_TAG 0x00000004
> >
> > static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev); static
> > int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); @@ -351,6
> > +358,33 @@ static int ixgbe_dev_l2_tunnel_enable static int
> > ixgbe_dev_l2_tunnel_disable
> > (struct rte_eth_dev *dev,
> > enum rte_eth_l2_tunnel_type l2_tunnel_type);
> > +static int ixgbe_dev_l2_tunnel_insertion_enable
> > + (struct rte_eth_dev *dev,
> > + struct rte_eth_l2_tunnel *l2_tunnel,
> > + uint16_t vf_id);
> > +static int ixgbe_dev_l2_tunnel_insertion_disable
> > + (struct rte_eth_dev *dev,
> > + enum rte_eth_l2_tunnel_type l2_tunnel_type,
> > + uint16_t vf_id);
> > +static int ixgbe_dev_l2_tunnel_stripping_enable
> > + (struct rte_eth_dev *dev,
> > + enum rte_eth_l2_tunnel_type l2_tunnel_type); static int
> > +ixgbe_dev_l2_tunnel_stripping_disable
> > + (struct rte_eth_dev *dev,
> > + enum rte_eth_l2_tunnel_type l2_tunnel_type); static int
> > +ixgbe_dev_l2_tunnel_forwarding_enable
> > + (struct rte_eth_dev *dev,
> > + enum rte_eth_l2_tunnel_type l2_tunnel_type); static int
> > +ixgbe_dev_l2_tunnel_forwarding_disable
> > + (struct rte_eth_dev *dev,
> > + enum rte_eth_l2_tunnel_type l2_tunnel_type); static int
> > +ixgbe_dev_l2_tunnel_filter_add
> > + (struct rte_eth_dev *dev,
> > + struct rte_eth_l2_tunnel *l2_tunnel,
> > + uint32_t pool);
> > +static int ixgbe_dev_l2_tunnel_filter_del
> > + (struct rte_eth_dev *dev,
> > + struct rte_eth_l2_tunnel *l2_tunnel);
> >
> > /*
> > * Define VF Stats MACRO for Non "cleared on read" register @@ -512,6
> > +546,14 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
> > .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
> > .l2_tunnel_enable = ixgbe_dev_l2_tunnel_enable,
> > .l2_tunnel_disable = ixgbe_dev_l2_tunnel_disable,
> > + .l2_tunnel_insertion_enable =
> > ixgbe_dev_l2_tunnel_insertion_enable,
> > + .l2_tunnel_insertion_disable =
> > ixgbe_dev_l2_tunnel_insertion_disable,
> > + .l2_tunnel_stripping_enable =
> > ixgbe_dev_l2_tunnel_stripping_enable,
> > + .l2_tunnel_stripping_disable =
> > ixgbe_dev_l2_tunnel_stripping_disable,
> > + .l2_tunnel_forwarding_enable =
> > ixgbe_dev_l2_tunnel_forwarding_enable,
> > + .l2_tunnel_forwarding_disable =
> > ixgbe_dev_l2_tunnel_forwarding_disable,
> > + .l2_tunnel_filter_add = ixgbe_dev_l2_tunnel_filter_add,
> > + .l2_tunnel_filter_del = ixgbe_dev_l2_tunnel_filter_del,
> > };
> >
> > /*
> > @@ -6341,6 +6383,335 @@ ixgbe_dev_l2_tunnel_disable(struct
> > rte_eth_dev *dev,
> > return ret;
> > }
> >
> > +static int
> > +ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
> > + struct rte_eth_l2_tunnel *l2_tunnel) {
> > + int ret = 0;
> > + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> > >dev_private);
> > + u32 i, rar_entries;
> > + u32 rar_low, rar_high;
> > +
> > + if (hw->mac.type != ixgbe_mac_X550 &&
> > + hw->mac.type != ixgbe_mac_X550EM_x) {
> > + return -ENOTSUP;
> > + }
> > +
> > + rar_entries = ixgbe_get_num_rx_addrs(hw);
> > +
> > + for (i = 1; i < rar_entries; i++) {
> > + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
> > + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i));
> > + if ((rar_high & IXGBE_RAH_AV) &&
> > + (rar_high & IXGBE_RAH_ADTYPE) &&
> > + ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
> > + l2_tunnel->tunnel_id)) {
> > + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
> > + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
> > +
> > + ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
> > +
> > + return ret;
> > + }
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +static int
> > +ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
> > + struct rte_eth_l2_tunnel *l2_tunnel,
> > + uint32_t pool)
> > +{
> > + int ret = 0;
> > + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> > >dev_private);
> > + u32 i, rar_entries;
> > + u32 rar_low, rar_high;
> > +
> > + if (hw->mac.type != ixgbe_mac_X550 &&
> > + hw->mac.type != ixgbe_mac_X550EM_x) {
> > + return -ENOTSUP;
> > + }
> > +
> > + /* One entry for one tunnel. Try to remove potential existing entry.
> > */
> > + ixgbe_e_tag_filter_del(dev, l2_tunnel);
> > +
> > + rar_entries = ixgbe_get_num_rx_addrs(hw);
> > +
> > + for (i = 1; i < rar_entries; i++) {
> > + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
> > + if (rar_high & IXGBE_RAH_AV) {
> > + continue;
> > + } else {
> > + ixgbe_set_vmdq(hw, i, pool);
>
> Do we need to check the return result here?
I think we need not to do that. Because ixgbe_set_vmdq is not supported or rar is out of range. I believe we already guaranteed that will not happen :)
>
> > + rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
> > + rar_low = l2_tunnel->tunnel_id;
> > +
> > + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
> > + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
> > +
> > + return ret;
> > + }
> > + }
> > +
> > + PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
> > + " Please remove a rule before adding a new one.");
> > + return -1;
> > +}
> > +
> > +/* Add l2 tunnel filter */
> > +static int
> > +ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
> > + struct rte_eth_l2_tunnel *l2_tunnel,
> > + uint32_t pool)
> > +{
> > + int ret = 0;
> > +
> > + switch (l2_tunnel->l2_tunnel_type) {
> > + case RTE_L2_TUNNEL_TYPE_E_TAG:
> > + ret = ixgbe_e_tag_filter_add(dev, l2_tunnel, pool);
> > + break;
> > + default:
> > + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> > + ret = -1;
> > + break;
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +/* Delete l2 tunnel filter */
> > +static int
> > +ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
> > + struct rte_eth_l2_tunnel *l2_tunnel) {
> > + int ret = 0;
> > +
> > + switch (l2_tunnel->l2_tunnel_type) {
> > + case RTE_L2_TUNNEL_TYPE_E_TAG:
> > + ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
> > + break;
> > + default:
> > + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> > + ret = -1;
> > + break;
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +static int
> > +ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) {
> > + int ret = 0;
> > + uint32_t ctrl;
> > + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> > >dev_private);
> > +
> > + if (hw->mac.type != ixgbe_mac_X550 &&
> > + hw->mac.type != ixgbe_mac_X550EM_x) {
> > + return -ENOTSUP;
> > + }
> > +
> > + ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
> > + ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
> > + if (en)
> > + ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
> > + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
> > +
> > + return ret;
> > +}
> > +
> > +/* Enable l2 tunnel forwarding */
> > +static int
> > +ixgbe_dev_l2_tunnel_forwarding_enable
> > + (struct rte_eth_dev *dev,
> > + enum rte_eth_l2_tunnel_type l2_tunnel_type) {
> > + int ret = 0;
> > +
> > + switch (l2_tunnel_type) {
> > + case RTE_L2_TUNNEL_TYPE_E_TAG:
> > + ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
> > + break;
> > + default:
> > + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> > + ret = -1;
> > + break;
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +/* Disable l2 tunnel forwarding */
> > +static int
> > +ixgbe_dev_l2_tunnel_forwarding_disable
> > + (struct rte_eth_dev *dev,
> > + enum rte_eth_l2_tunnel_type l2_tunnel_type) {
> > + int ret = 0;
> > +
> > + switch (l2_tunnel_type) {
> > + case RTE_L2_TUNNEL_TYPE_E_TAG:
> > + ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
> > + break;
> > + default:
> > + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> > + ret = -1;
> > + break;
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +static int
> > +ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
> > + struct rte_eth_l2_tunnel *l2_tunnel,
> > + uint16_t vf_id,
> > + bool en)
> > +{
> > + int ret = 0;
> > + uint32_t vmtir, vmvir;
> > + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> > >dev_private);
> > +
> > + if (vf_id >= dev->pci_dev->max_vfs) {
> > + PMD_DRV_LOG(ERR,
> > + "VF id %u should be less than %u",
> > + vf_id,
> > + dev->pci_dev->max_vfs);
> > + return -EINVAL;
> > + }
> > +
> > + if (hw->mac.type != ixgbe_mac_X550 &&
> > + hw->mac.type != ixgbe_mac_X550EM_x) {
> > + return -ENOTSUP;
> > + }
> > +
> > + if (en)
> > + vmtir = l2_tunnel->tunnel_id;
> > + else
> > + vmtir = 0;
> > +
> > + IXGBE_WRITE_REG(hw, IXGBE_VMTIR(vf_id), vmtir);
> > +
> > + vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf_id));
> > + vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
> > + if (en)
> > + vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
> > + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf_id), vmvir);
> > +
> > + return ret;
> > +}
> > +
> > +/* Enable l2 tunnel tag insertion */
> > +static int
> > +ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
> > + struct rte_eth_l2_tunnel *l2_tunnel,
> > + uint16_t vf_id)
> > +{
> > + int ret = 0;
> > +
> > + switch (l2_tunnel->l2_tunnel_type) {
> > + case RTE_L2_TUNNEL_TYPE_E_TAG:
> > + ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, vf_id, 1);
> > + break;
> > + default:
> > + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> > + ret = -1;
> > + break;
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +/* Disable l2 tunnel tag insertion */ static int
> > +ixgbe_dev_l2_tunnel_insertion_disable
> > + (struct rte_eth_dev *dev,
> > + enum rte_eth_l2_tunnel_type l2_tunnel_type,
> > + uint16_t vf_id)
> > +{
> > + int ret = 0;
> > +
> > + switch (l2_tunnel_type) {
> > + case RTE_L2_TUNNEL_TYPE_E_TAG:
> > + ret = ixgbe_e_tag_insertion_en_dis(dev, NULL, vf_id, 0);
> > + break;
> > + default:
> > + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> > + ret = -1;
> > + break;
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +static int
> > +ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
> > + bool en)
> > +{
> > + int ret = 0;
> > + uint32_t qde;
> > + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> > >dev_private);
> > +
> > + if (hw->mac.type != ixgbe_mac_X550 &&
> > + hw->mac.type != ixgbe_mac_X550EM_x) {
> > + return -ENOTSUP;
> > + }
> > +
> > + qde = IXGBE_READ_REG(hw, IXGBE_QDE);
> > + if (en)
> > + qde |= IXGBE_QDE_STRIP_TAG;
> > + else
> > + qde &= ~IXGBE_QDE_STRIP_TAG;
> > + qde &= ~IXGBE_QDE_READ;
> > + qde |= IXGBE_QDE_WRITE;
> > + IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
> > +
> > + return ret;
> > +}
> > +
> > +/* Enable l2 tunnel tag stripping */
> > +static int
> > +ixgbe_dev_l2_tunnel_stripping_enable
> > + (struct rte_eth_dev *dev,
> > + enum rte_eth_l2_tunnel_type l2_tunnel_type) {
> > + int ret = 0;
> > +
> > + switch (l2_tunnel_type) {
> > + case RTE_L2_TUNNEL_TYPE_E_TAG:
> > + ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
> > + break;
> > + default:
> > + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> > + ret = -1;
> > + break;
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +/* Disable l2 tunnel tag stripping */ static int
> > +ixgbe_dev_l2_tunnel_stripping_disable
> > + (struct rte_eth_dev *dev,
> > + enum rte_eth_l2_tunnel_type l2_tunnel_type) {
> > + int ret = 0;
> > +
> > + switch (l2_tunnel_type) {
> > + case RTE_L2_TUNNEL_TYPE_E_TAG:
> > + ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
> > + break;
> > + default:
> > + PMD_DRV_LOG(ERR, "Invalid tunnel type");
> > + ret = -1;
> > + break;
> > + }
> > +
> > + return ret;
> > +}
> > +
> > static struct rte_driver rte_ixgbe_driver = {
> > .type = PMD_PDEV,
> > .init = rte_ixgbe_pmd_init,
> > --
> > 1.9.3
@@ -44,6 +44,27 @@ This section should contain new features added in this release. Sample format:
Add the offload and negotiation of checksum and TSO between vhost-user and
vanilla Linux virtio guest.
+* **Added support for E-tag on X550.**
+
+ E-tag is defined in 802.1br. Please reference
+ http://www.ieee802.org/1/pages/802.1br.html.
+
+ This feature is for VF, but please aware all the setting is on PF. It means
+ the CLIs should be used on PF, but some of their effect will be shown on VF.
+ The forwarding of E-tag packets based on GRP and E-CID_base will have effect
+ on PF. Theoretically the E-tag packets can be forwarded to any pool/queue.
+ But normally we'd like to forward the packets to the pools/queues belonging
+ to the VFs. And E-tag insertion and stripping will have effect on VFs. When
+ VF receives E-tag packets, it should strip the E-tag. When VF transmits
+ packets, it should insert the E-tag. Both can be offloaded.
+
+ When we want to use this E-tag support feature, the forwarding should be
+ enabled to forward the packets received by PF to indicated VFs. And insertion
+ and stripping should be enabled for VFs to offload the effort to HW.
+
+ * Support E-tag offloading of insertion and stripping.
+ * Support Forwarding E-tag packets to pools based on
+ GRP and E-CID_base.
Resolved Issues
---------------
@@ -139,10 +139,17 @@
#define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
#define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000
+#define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000
#define DEFAULT_ETAG_ETYPE 0x893f
#define IXGBE_ETAG_ETYPE 0x00005084
#define IXGBE_ETAG_ETYPE_MASK 0x0000ffff
#define IXGBE_ETAG_ETYPE_VALID 0x80000000
+#define IXGBE_RAH_ADTYPE 0x40000000
+#define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff
+#define IXGBE_VMVIR_TAGA_MASK 0x18000000
+#define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000
+#define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_QDE_STRIP_TAG 0x00000004
static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
@@ -351,6 +358,33 @@ static int ixgbe_dev_l2_tunnel_enable
static int ixgbe_dev_l2_tunnel_disable
(struct rte_eth_dev *dev,
enum rte_eth_l2_tunnel_type l2_tunnel_type);
+static int ixgbe_dev_l2_tunnel_insertion_enable
+ (struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel *l2_tunnel,
+ uint16_t vf_id);
+static int ixgbe_dev_l2_tunnel_insertion_disable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_l2_tunnel_type l2_tunnel_type,
+ uint16_t vf_id);
+static int ixgbe_dev_l2_tunnel_stripping_enable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_l2_tunnel_type l2_tunnel_type);
+static int ixgbe_dev_l2_tunnel_stripping_disable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_l2_tunnel_type l2_tunnel_type);
+static int ixgbe_dev_l2_tunnel_forwarding_enable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_l2_tunnel_type l2_tunnel_type);
+static int ixgbe_dev_l2_tunnel_forwarding_disable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_l2_tunnel_type l2_tunnel_type);
+static int ixgbe_dev_l2_tunnel_filter_add
+ (struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel *l2_tunnel,
+ uint32_t pool);
+static int ixgbe_dev_l2_tunnel_filter_del
+ (struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel *l2_tunnel);
/*
* Define VF Stats MACRO for Non "cleared on read" register
@@ -512,6 +546,14 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
.l2_tunnel_enable = ixgbe_dev_l2_tunnel_enable,
.l2_tunnel_disable = ixgbe_dev_l2_tunnel_disable,
+ .l2_tunnel_insertion_enable = ixgbe_dev_l2_tunnel_insertion_enable,
+ .l2_tunnel_insertion_disable = ixgbe_dev_l2_tunnel_insertion_disable,
+ .l2_tunnel_stripping_enable = ixgbe_dev_l2_tunnel_stripping_enable,
+ .l2_tunnel_stripping_disable = ixgbe_dev_l2_tunnel_stripping_disable,
+ .l2_tunnel_forwarding_enable = ixgbe_dev_l2_tunnel_forwarding_enable,
+ .l2_tunnel_forwarding_disable = ixgbe_dev_l2_tunnel_forwarding_disable,
+ .l2_tunnel_filter_add = ixgbe_dev_l2_tunnel_filter_add,
+ .l2_tunnel_filter_del = ixgbe_dev_l2_tunnel_filter_del,
};
/*
@@ -6341,6 +6383,335 @@ ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
return ret;
}
+static int
+ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel *l2_tunnel)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 i, rar_entries;
+ u32 rar_low, rar_high;
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x) {
+ return -ENOTSUP;
+ }
+
+ rar_entries = ixgbe_get_num_rx_addrs(hw);
+
+ for (i = 1; i < rar_entries; i++) {
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
+ rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i));
+ if ((rar_high & IXGBE_RAH_AV) &&
+ (rar_high & IXGBE_RAH_ADTYPE) &&
+ ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
+ l2_tunnel->tunnel_id)) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+
+ ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
+
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel *l2_tunnel,
+ uint32_t pool)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 i, rar_entries;
+ u32 rar_low, rar_high;
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x) {
+ return -ENOTSUP;
+ }
+
+ /* One entry for one tunnel. Try to remove potential existing entry. */
+ ixgbe_e_tag_filter_del(dev, l2_tunnel);
+
+ rar_entries = ixgbe_get_num_rx_addrs(hw);
+
+ for (i = 1; i < rar_entries; i++) {
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
+ if (rar_high & IXGBE_RAH_AV) {
+ continue;
+ } else {
+ ixgbe_set_vmdq(hw, i, pool);
+ rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
+ rar_low = l2_tunnel->tunnel_id;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
+
+ return ret;
+ }
+ }
+
+ PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
+ " Please remove a rule before adding a new one.");
+ return -1;
+}
+
+/* Add l2 tunnel filter */
+static int
+ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel *l2_tunnel,
+ uint32_t pool)
+{
+ int ret = 0;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_filter_add(dev, l2_tunnel, pool);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Delete l2 tunnel filter */
+static int
+ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel *l2_tunnel)
+{
+ int ret = 0;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
+{
+ int ret = 0;
+ uint32_t ctrl;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x) {
+ return -ENOTSUP;
+ }
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
+ if (en)
+ ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
+
+ return ret;
+}
+
+/* Enable l2 tunnel forwarding */
+static int
+ixgbe_dev_l2_tunnel_forwarding_enable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_l2_tunnel_type l2_tunnel_type)
+{
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Disable l2 tunnel forwarding */
+static int
+ixgbe_dev_l2_tunnel_forwarding_disable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_l2_tunnel_type l2_tunnel_type)
+{
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel *l2_tunnel,
+ uint16_t vf_id,
+ bool en)
+{
+ int ret = 0;
+ uint32_t vmtir, vmvir;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (vf_id >= dev->pci_dev->max_vfs) {
+ PMD_DRV_LOG(ERR,
+ "VF id %u should be less than %u",
+ vf_id,
+ dev->pci_dev->max_vfs);
+ return -EINVAL;
+ }
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x) {
+ return -ENOTSUP;
+ }
+
+ if (en)
+ vmtir = l2_tunnel->tunnel_id;
+ else
+ vmtir = 0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMTIR(vf_id), vmtir);
+
+ vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf_id));
+ vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
+ if (en)
+ vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf_id), vmvir);
+
+ return ret;
+}
+
+/* Enable l2 tunnel tag insertion */
+static int
+ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel *l2_tunnel,
+ uint16_t vf_id)
+{
+ int ret = 0;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, vf_id, 1);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Disable l2 tunnel tag insertion */
+static int
+ixgbe_dev_l2_tunnel_insertion_disable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_l2_tunnel_type l2_tunnel_type,
+ uint16_t vf_id)
+{
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_insertion_en_dis(dev, NULL, vf_id, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
+ bool en)
+{
+ int ret = 0;
+ uint32_t qde;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x) {
+ return -ENOTSUP;
+ }
+
+ qde = IXGBE_READ_REG(hw, IXGBE_QDE);
+ if (en)
+ qde |= IXGBE_QDE_STRIP_TAG;
+ else
+ qde &= ~IXGBE_QDE_STRIP_TAG;
+ qde &= ~IXGBE_QDE_READ;
+ qde |= IXGBE_QDE_WRITE;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
+
+ return ret;
+}
+
+/* Enable l2 tunnel tag stripping */
+static int
+ixgbe_dev_l2_tunnel_stripping_enable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_l2_tunnel_type l2_tunnel_type)
+{
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Disable l2 tunnel tag stripping */
+static int
+ixgbe_dev_l2_tunnel_stripping_disable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_l2_tunnel_type l2_tunnel_type)
+{
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
static struct rte_driver rte_ixgbe_driver = {
.type = PMD_PDEV,
.init = rte_ixgbe_pmd_init,