get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/98288/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 98288,
    "url": "http://patches.dpdk.org/api/patches/98288/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210908083758.312055-8-jiawenwu@trustnetic.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210908083758.312055-8-jiawenwu@trustnetic.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210908083758.312055-8-jiawenwu@trustnetic.com",
    "date": "2021-09-08T08:37:33",
    "name": "[07/32] net/ngbe: support VLAN and QinQ offload",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "0dac2a51d4d252848a338cb5268f0f39185c99a4",
    "submitter": {
        "id": 1932,
        "url": "http://patches.dpdk.org/api/people/1932/?format=api",
        "name": "Jiawen Wu",
        "email": "jiawenwu@trustnetic.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210908083758.312055-8-jiawenwu@trustnetic.com/mbox/",
    "series": [
        {
            "id": 18760,
            "url": "http://patches.dpdk.org/api/series/18760/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=18760",
            "date": "2021-09-08T08:37:26",
            "name": "net/ngbe: add many features",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/18760/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/98288/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/98288/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 780EAA0C56;\n\tWed,  8 Sep 2021 10:37:13 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 80D7E411A5;\n\tWed,  8 Sep 2021 10:36:39 +0200 (CEST)",
            "from smtpbguseast2.qq.com (smtpbguseast2.qq.com [54.204.34.130])\n by mails.dpdk.org (Postfix) with ESMTP id 4AFAA41162\n for <dev@dpdk.org>; Wed,  8 Sep 2021 10:36:37 +0200 (CEST)",
            "from wxdbg.localdomain.com (unknown [183.129.236.74])\n by esmtp6.qq.com (ESMTP) with\n id ; Wed, 08 Sep 2021 16:36:31 +0800 (CST)"
        ],
        "X-QQ-mid": "bizesmtp47t1631090192thx343wj",
        "X-QQ-SSF": "01400000002000E0G000B00A0000000",
        "X-QQ-FEAT": "6PjtIMncaiyRnb1Dg142Oo4f0YtXKZA37B34vipdFJR3bw0QqZkIU65omX2Du\n vykj+iCYay4OQDeDdaHF60vSJnlqQqB5eGAlbN28SZQRlbwaCQ3tHg8/LfBqlBxJKy51ccA\n DDDPeAlF7OZ6l7BVx8evrZ3Rz5UEuKtLtgUHIwBO/AwIcFdK6ajkkLgMv3fO9619cgqGSxt\n PPD3dYgKncnOUnTo70UxUZHQaQazZBKdJBAgOp/vXg/TuuRDJMhGUJPw0fMa04c3qhBSViZ\n gtRMxpy/k90gIJ28MuraxhFtuBJ2AprnKvzpA43RXRrm8l9t5q1dmliBVcBhv8VS3FsAs4H\n 0GIr8dkISTBQHYCODF4MGrA2FkqJ4V99+Pju/II",
        "X-QQ-GoodBg": "2",
        "From": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "To": "dev@dpdk.org",
        "Cc": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "Date": "Wed,  8 Sep 2021 16:37:33 +0800",
        "Message-Id": "<20210908083758.312055-8-jiawenwu@trustnetic.com>",
        "X-Mailer": "git-send-email 2.27.0",
        "In-Reply-To": "<20210908083758.312055-1-jiawenwu@trustnetic.com>",
        "References": "<20210908083758.312055-1-jiawenwu@trustnetic.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-QQ-SENDSIZE": "520",
        "Feedback-ID": "bizesmtp:trustnetic.com:qybgforeign:qybgforeign1",
        "X-QQ-Bgrelay": "1",
        "Subject": "[dpdk-dev] [PATCH 07/32] net/ngbe: support VLAN and QinQ offload",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Support to set VLAN and QinQ offload.\n\nSigned-off-by: Jiawen Wu <jiawenwu@trustnetic.com>\n---\n doc/guides/nics/features/ngbe.ini |   2 +\n doc/guides/nics/ngbe.rst          |   1 +\n drivers/net/ngbe/ngbe_ethdev.c    | 273 ++++++++++++++++++++++++++++++\n drivers/net/ngbe/ngbe_ethdev.h    |  42 +++++\n drivers/net/ngbe/ngbe_rxtx.c      | 119 ++++++++++++-\n drivers/net/ngbe/ngbe_rxtx.h      |   3 +\n 6 files changed, 434 insertions(+), 6 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/features/ngbe.ini b/doc/guides/nics/features/ngbe.ini\nindex 30fdfe62c7..4ae2d66d15 100644\n--- a/doc/guides/nics/features/ngbe.ini\n+++ b/doc/guides/nics/features/ngbe.ini\n@@ -12,6 +12,8 @@ Jumbo frame          = Y\n Scattered Rx         = Y\n TSO                  = Y\n CRC offload          = P\n+VLAN offload         = P\n+QinQ offload         = P\n L3 checksum offload  = P\n L4 checksum offload  = P\n Inner L3 checksum    = P\ndiff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst\nindex 702a455041..9518a59443 100644\n--- a/doc/guides/nics/ngbe.rst\n+++ b/doc/guides/nics/ngbe.rst\n@@ -13,6 +13,7 @@ Features\n \n - Packet type information\n - Checksum offload\n+- VLAN/QinQ stripping and inserting\n - TSO offload\n - Jumbo frames\n - Link state information\ndiff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c\nindex e7d63f1b14..3903eb0a2c 100644\n--- a/drivers/net/ngbe/ngbe_ethdev.c\n+++ b/drivers/net/ngbe/ngbe_ethdev.c\n@@ -17,6 +17,9 @@\n static int ngbe_dev_close(struct rte_eth_dev *dev);\n static int ngbe_dev_link_update(struct rte_eth_dev *dev,\n \t\t\t\tint wait_to_complete);\n+static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);\n+static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,\n+\t\t\t\t\tuint16_t queue);\n \n static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);\n static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);\n@@ -27,6 +30,24 @@ static void ngbe_dev_interrupt_handler(void *param);\n static void ngbe_dev_interrupt_delayed_handler(void *param);\n static void ngbe_configure_msix(struct rte_eth_dev *dev);\n \n+#define NGBE_SET_HWSTRIP(h, q) do {\\\n+\t\tuint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \\\n+\t\tuint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \\\n+\t\t(h)->bitmap[idx] |= 1 << bit;\\\n+\t} while (0)\n+\n+#define NGBE_CLEAR_HWSTRIP(h, q) do {\\\n+\t\tuint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \\\n+\t\tuint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \\\n+\t\t(h)->bitmap[idx] &= ~(1 << bit);\\\n+\t} while (0)\n+\n+#define NGBE_GET_HWSTRIP(h, q, r) do {\\\n+\t\tuint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \\\n+\t\tuint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \\\n+\t\t(r) = (h)->bitmap[idx] >> bit & 1;\\\n+\t} while (0)\n+\n /*\n  * The set of PCI devices this driver supports\n  */\n@@ -129,6 +150,8 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)\n {\n \tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);\n \tstruct ngbe_hw *hw = ngbe_dev_hw(eth_dev);\n+\tstruct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);\n+\tstruct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);\n \tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n \tconst struct rte_memzone *mz;\n \tuint32_t ctrl_ext;\n@@ -242,6 +265,12 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)\n \t\treturn -ENOMEM;\n \t}\n \n+\t/* initialize the vfta */\n+\tmemset(shadow_vfta, 0, sizeof(*shadow_vfta));\n+\n+\t/* initialize the hw strip bitmap*/\n+\tmemset(hwstrip, 0, sizeof(*hwstrip));\n+\n \tctrl_ext = rd32(hw, NGBE_PORTCTL);\n \t/* let hardware know driver is loaded */\n \tctrl_ext |= NGBE_PORTCTL_DRVLOAD;\n@@ -311,6 +340,237 @@ static struct rte_pci_driver rte_ngbe_pmd = {\n \t.remove = eth_ngbe_pci_remove,\n };\n \n+void\n+ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)\n+{\n+\tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n+\tuint32_t vlnctrl;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* Filter Table Disable */\n+\tvlnctrl = rd32(hw, NGBE_VLANCTL);\n+\tvlnctrl &= ~NGBE_VLANCTL_VFE;\n+\twr32(hw, NGBE_VLANCTL, vlnctrl);\n+}\n+\n+void\n+ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)\n+{\n+\tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n+\tstruct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);\n+\tuint32_t vlnctrl;\n+\tuint16_t i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* Filter Table Enable */\n+\tvlnctrl = rd32(hw, NGBE_VLANCTL);\n+\tvlnctrl &= ~NGBE_VLANCTL_CFIENA;\n+\tvlnctrl |= NGBE_VLANCTL_VFE;\n+\twr32(hw, NGBE_VLANCTL, vlnctrl);\n+\n+\t/* write whatever is in local vfta copy */\n+\tfor (i = 0; i < NGBE_VFTA_SIZE; i++)\n+\t\twr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);\n+}\n+\n+void\n+ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)\n+{\n+\tstruct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);\n+\tstruct ngbe_rx_queue *rxq;\n+\n+\tif (queue >= NGBE_MAX_RX_QUEUE_NUM)\n+\t\treturn;\n+\n+\tif (on)\n+\t\tNGBE_SET_HWSTRIP(hwstrip, queue);\n+\telse\n+\t\tNGBE_CLEAR_HWSTRIP(hwstrip, queue);\n+\n+\tif (queue >= dev->data->nb_rx_queues)\n+\t\treturn;\n+\n+\trxq = dev->data->rx_queues[queue];\n+\n+\tif (on) {\n+\t\trxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;\n+\t\trxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;\n+\t} else {\n+\t\trxq->vlan_flags = PKT_RX_VLAN;\n+\t\trxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;\n+\t}\n+}\n+\n+static void\n+ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)\n+{\n+\tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n+\tuint32_t ctrl;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tctrl = rd32(hw, NGBE_RXCFG(queue));\n+\tctrl &= ~NGBE_RXCFG_VLAN;\n+\twr32(hw, NGBE_RXCFG(queue), ctrl);\n+\n+\t/* record those setting for HW strip per queue */\n+\tngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);\n+}\n+\n+static void\n+ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)\n+{\n+\tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n+\tuint32_t ctrl;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tctrl = rd32(hw, NGBE_RXCFG(queue));\n+\tctrl |= NGBE_RXCFG_VLAN;\n+\twr32(hw, NGBE_RXCFG(queue), ctrl);\n+\n+\t/* record those setting for HW strip per queue */\n+\tngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);\n+}\n+\n+static void\n+ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)\n+{\n+\tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n+\tuint32_t ctrl;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tctrl = rd32(hw, NGBE_PORTCTL);\n+\tctrl &= ~NGBE_PORTCTL_VLANEXT;\n+\tctrl &= ~NGBE_PORTCTL_QINQ;\n+\twr32(hw, NGBE_PORTCTL, ctrl);\n+}\n+\n+static void\n+ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)\n+{\n+\tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n+\tuint32_t ctrl;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tctrl  = rd32(hw, NGBE_PORTCTL);\n+\tctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;\n+\twr32(hw, NGBE_PORTCTL, ctrl);\n+}\n+\n+static void\n+ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)\n+{\n+\tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n+\tuint32_t ctrl;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tctrl = rd32(hw, NGBE_PORTCTL);\n+\tctrl &= ~NGBE_PORTCTL_QINQ;\n+\twr32(hw, NGBE_PORTCTL, ctrl);\n+}\n+\n+static void\n+ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)\n+{\n+\tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n+\tuint32_t ctrl;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tctrl  = rd32(hw, NGBE_PORTCTL);\n+\tctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;\n+\twr32(hw, NGBE_PORTCTL, ctrl);\n+}\n+\n+void\n+ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)\n+{\n+\tstruct ngbe_rx_queue *rxq;\n+\tuint16_t i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\n+\t\tif (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)\n+\t\t\tngbe_vlan_hw_strip_enable(dev, i);\n+\t\telse\n+\t\t\tngbe_vlan_hw_strip_disable(dev, i);\n+\t}\n+}\n+\n+void\n+ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)\n+{\n+\tuint16_t i;\n+\tstruct rte_eth_rxmode *rxmode;\n+\tstruct ngbe_rx_queue *rxq;\n+\n+\tif (mask & ETH_VLAN_STRIP_MASK) {\n+\t\trxmode = &dev->data->dev_conf.rxmode;\n+\t\tif (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)\n+\t\t\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\t\t\trxq = dev->data->rx_queues[i];\n+\t\t\t\trxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;\n+\t\t\t}\n+\t\telse\n+\t\t\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\t\t\trxq = dev->data->rx_queues[i];\n+\t\t\t\trxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;\n+\t\t\t}\n+\t}\n+}\n+\n+static int\n+ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)\n+{\n+\tstruct rte_eth_rxmode *rxmode;\n+\trxmode = &dev->data->dev_conf.rxmode;\n+\n+\tif (mask & ETH_VLAN_STRIP_MASK)\n+\t\tngbe_vlan_hw_strip_config(dev);\n+\n+\tif (mask & ETH_VLAN_FILTER_MASK) {\n+\t\tif (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)\n+\t\t\tngbe_vlan_hw_filter_enable(dev);\n+\t\telse\n+\t\t\tngbe_vlan_hw_filter_disable(dev);\n+\t}\n+\n+\tif (mask & ETH_VLAN_EXTEND_MASK) {\n+\t\tif (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)\n+\t\t\tngbe_vlan_hw_extend_enable(dev);\n+\t\telse\n+\t\t\tngbe_vlan_hw_extend_disable(dev);\n+\t}\n+\n+\tif (mask & ETH_QINQ_STRIP_MASK) {\n+\t\tif (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)\n+\t\t\tngbe_qinq_hw_strip_enable(dev);\n+\t\telse\n+\t\t\tngbe_qinq_hw_strip_disable(dev);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)\n+{\n+\tngbe_config_vlan_strip_on_all_queues(dev, mask);\n+\n+\tngbe_vlan_offload_config(dev, mask);\n+\n+\treturn 0;\n+}\n+\n static int\n ngbe_dev_configure(struct rte_eth_dev *dev)\n {\n@@ -363,6 +623,7 @@ ngbe_dev_start(struct rte_eth_dev *dev)\n \tbool link_up = false, negotiate = false;\n \tuint32_t speed = 0;\n \tuint32_t allowed_speeds = 0;\n+\tint mask = 0;\n \tint status;\n \tuint32_t *link_speeds;\n \n@@ -420,6 +681,16 @@ ngbe_dev_start(struct rte_eth_dev *dev)\n \t\tgoto error;\n \t}\n \n+\tmask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |\n+\t\tETH_VLAN_EXTEND_MASK;\n+\terr = ngbe_vlan_offload_config(dev, mask);\n+\tif (err != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Unable to set VLAN offload\");\n+\t\tgoto error;\n+\t}\n+\n+\tngbe_configure_port(dev);\n+\n \terr = ngbe_dev_rxtx_start(dev);\n \tif (err < 0) {\n \t\tPMD_INIT_LOG(ERR, \"Unable to start rxtx queues\");\n@@ -654,6 +925,7 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \tdev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;\n \tdev_info->min_rx_bufsize = 1024;\n \tdev_info->max_rx_pktlen = 15872;\n+\tdev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);\n \tdev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |\n \t\t\t\t     dev_info->rx_queue_offload_capa);\n \tdev_info->tx_queue_offload_capa = 0;\n@@ -1190,6 +1462,7 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {\n \t.dev_close                  = ngbe_dev_close,\n \t.dev_reset                  = ngbe_dev_reset,\n \t.link_update                = ngbe_dev_link_update,\n+\t.vlan_offload_set           = ngbe_vlan_offload_set,\n \t.rx_queue_start\t            = ngbe_dev_rx_queue_start,\n \t.rx_queue_stop              = ngbe_dev_rx_queue_stop,\n \t.tx_queue_start\t            = ngbe_dev_tx_queue_start,\ndiff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h\nindex cbf3ab558f..8b3a1cdc3d 100644\n--- a/drivers/net/ngbe/ngbe_ethdev.h\n+++ b/drivers/net/ngbe/ngbe_ethdev.h\n@@ -15,6 +15,17 @@\n #define NGBE_FLAG_MACSEC            ((uint32_t)(1 << 3))\n #define NGBE_FLAG_NEED_LINK_CONFIG  ((uint32_t)(1 << 4))\n \n+#define NGBE_VFTA_SIZE 128\n+#define NGBE_VLAN_TAG_SIZE 4\n+/*Default value of Max Rx Queue*/\n+#define NGBE_MAX_RX_QUEUE_NUM\t8\n+\n+#ifndef NBBY\n+#define NBBY\t8\t/* number of bits in a byte */\n+#endif\n+#define NGBE_HWSTRIP_BITMAP_SIZE \\\n+\t(NGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))\n+\n #define NGBE_QUEUE_ITR_INTERVAL_DEFAULT\t500 /* 500us */\n \n #define NGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET\n@@ -29,12 +40,22 @@ struct ngbe_interrupt {\n \tuint64_t mask_orig; /* save mask during delayed handler */\n };\n \n+struct ngbe_vfta {\n+\tuint32_t vfta[NGBE_VFTA_SIZE];\n+};\n+\n+struct ngbe_hwstrip {\n+\tuint32_t bitmap[NGBE_HWSTRIP_BITMAP_SIZE];\n+};\n+\n /*\n  * Structure to store private data for each driver instance (for each port).\n  */\n struct ngbe_adapter {\n \tstruct ngbe_hw             hw;\n \tstruct ngbe_interrupt      intr;\n+\tstruct ngbe_vfta           shadow_vfta;\n+\tstruct ngbe_hwstrip        hwstrip;\n \tbool                       rx_bulk_alloc_allowed;\n };\n \n@@ -64,6 +85,12 @@ ngbe_dev_intr(struct rte_eth_dev *dev)\n \treturn intr;\n }\n \n+#define NGBE_DEV_VFTA(dev) \\\n+\t(&((struct ngbe_adapter *)(dev)->data->dev_private)->shadow_vfta)\n+\n+#define NGBE_DEV_HWSTRIP(dev) \\\n+\t(&((struct ngbe_adapter *)(dev)->data->dev_private)->hwstrip)\n+\n /*\n  * Rx/Tx function prototypes\n  */\n@@ -126,10 +153,21 @@ uint16_t ngbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n void ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,\n \t\t\t       uint8_t queue, uint8_t msix_vector);\n \n+void ngbe_configure_port(struct rte_eth_dev *dev);\n+\n int\n ngbe_dev_link_update_share(struct rte_eth_dev *dev,\n \t\tint wait_to_complete);\n \n+/*\n+ * misc function prototypes\n+ */\n+void ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);\n+\n+void ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);\n+\n+void ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev);\n+\n #define NGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */\n #define NGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */\n #define NGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */\n@@ -148,5 +186,9 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,\n #define NGBE_DEFAULT_TX_WTHRESH      0\n \n const uint32_t *ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);\n+void ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,\n+\t\tuint16_t queue, bool on);\n+void ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,\n+\t\t\t\t\t\t  int mask);\n \n #endif /* _NGBE_ETHDEV_H_ */\ndiff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c\nindex 4238fbe3b8..1151173b02 100644\n--- a/drivers/net/ngbe/ngbe_rxtx.c\n+++ b/drivers/net/ngbe/ngbe_rxtx.c\n@@ -21,6 +21,7 @@ static const u64 NGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |\n \t\tPKT_TX_OUTER_IPV4 |\n \t\tPKT_TX_IPV6 |\n \t\tPKT_TX_IPV4 |\n+\t\tPKT_TX_VLAN_PKT |\n \t\tPKT_TX_L4_MASK |\n \t\tPKT_TX_TCP_SEG |\n \t\tPKT_TX_TUNNEL_MASK |\n@@ -346,6 +347,11 @@ ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq,\n \t\tvlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len);\n \t}\n \n+\tif (ol_flags & PKT_TX_VLAN_PKT) {\n+\t\ttx_offload_mask.vlan_tci |= ~0;\n+\t\tvlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci);\n+\t}\n+\n \ttxq->ctx_cache[ctx_idx].flags = ol_flags;\n \ttxq->ctx_cache[ctx_idx].tx_offload.data[0] =\n \t\ttx_offload_mask.data[0] & tx_offload.data[0];\n@@ -416,6 +422,8 @@ tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)\n \t\t\ttmp |= NGBE_TXD_IPCS;\n \t\ttmp |= NGBE_TXD_L4CS;\n \t}\n+\tif (ol_flags & PKT_TX_VLAN_PKT)\n+\t\ttmp |= NGBE_TXD_CC;\n \n \treturn tmp;\n }\n@@ -425,6 +433,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)\n {\n \tuint32_t cmdtype = 0;\n \n+\tif (ol_flags & PKT_TX_VLAN_PKT)\n+\t\tcmdtype |= NGBE_TXD_VLE;\n \tif (ol_flags & PKT_TX_TCP_SEG)\n \t\tcmdtype |= NGBE_TXD_TSE;\n \treturn cmdtype;\n@@ -443,6 +453,8 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)\n \n \t/* L2 level */\n \tptype = RTE_PTYPE_L2_ETHER;\n+\tif (oflags & PKT_TX_VLAN)\n+\t\tptype |= RTE_PTYPE_L2_ETHER_VLAN;\n \n \t/* L3 level */\n \tif (oflags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM))\n@@ -606,6 +618,7 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\ttx_offload.l2_len = tx_pkt->l2_len;\n \t\t\ttx_offload.l3_len = tx_pkt->l3_len;\n \t\t\ttx_offload.l4_len = tx_pkt->l4_len;\n+\t\t\ttx_offload.vlan_tci = tx_pkt->vlan_tci;\n \t\t\ttx_offload.tso_segsz = tx_pkt->tso_segsz;\n \t\t\ttx_offload.outer_l2_len = tx_pkt->outer_l2_len;\n \t\t\ttx_offload.outer_l3_len = tx_pkt->outer_l3_len;\n@@ -884,6 +897,23 @@ ngbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptid_mask)\n \treturn ngbe_decode_ptype(ptid);\n }\n \n+static inline uint64_t\n+rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)\n+{\n+\tuint64_t pkt_flags;\n+\n+\t/*\n+\t * Check if VLAN present only.\n+\t * Do not check whether L3/L4 rx checksum done by NIC or not,\n+\t * That can be found from rte_eth_rxmode.offloads flag\n+\t */\n+\tpkt_flags = (rx_status & NGBE_RXD_STAT_VLAN &&\n+\t\t     vlan_flags & PKT_RX_VLAN_STRIPPED)\n+\t\t    ? vlan_flags : 0;\n+\n+\treturn pkt_flags;\n+}\n+\n static inline uint64_t\n rx_desc_error_to_pkt_flags(uint32_t rx_status)\n {\n@@ -972,9 +1002,12 @@ ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)\n \t\t\t\t  rxq->crc_len;\n \t\t\tmb->data_len = pkt_len;\n \t\t\tmb->pkt_len = pkt_len;\n+\t\t\tmb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);\n \n \t\t\t/* convert descriptor fields to rte mbuf flags */\n-\t\t\tpkt_flags = rx_desc_error_to_pkt_flags(s[j]);\n+\t\t\tpkt_flags = rx_desc_status_to_pkt_flags(s[j],\n+\t\t\t\t\trxq->vlan_flags);\n+\t\t\tpkt_flags |= rx_desc_error_to_pkt_flags(s[j]);\n \t\t\tmb->ol_flags = pkt_flags;\n \t\t\tmb->packet_type =\n \t\t\t\tngbe_rxd_pkt_info_to_pkt_type(pkt_info[j],\n@@ -1270,6 +1303,7 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t *    - Rx port identifier.\n \t\t * 2) integrate hardware offload data, if any:\n \t\t *    - IP checksum flag,\n+\t\t *    - VLAN TCI, if any,\n \t\t *    - error flags.\n \t\t */\n \t\tpkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -\n@@ -1283,7 +1317,12 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\trxm->port = rxq->port_id;\n \n \t\tpkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);\n-\t\tpkt_flags = rx_desc_error_to_pkt_flags(staterr);\n+\t\t/* Only valid if PKT_RX_VLAN set in pkt_flags */\n+\t\trxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);\n+\n+\t\tpkt_flags = rx_desc_status_to_pkt_flags(staterr,\n+\t\t\t\t\trxq->vlan_flags);\n+\t\tpkt_flags |= rx_desc_error_to_pkt_flags(staterr);\n \t\trxm->ol_flags = pkt_flags;\n \t\trxm->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,\n \t\t\t\t\t\t       rxq->pkt_type_mask);\n@@ -1328,6 +1367,7 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n  *    - RX port identifier\n  *    - hardware offload data, if any:\n  *      - IP checksum flag\n+ *      - VLAN TCI, if any\n  *      - error flags\n  * @head HEAD of the packet cluster\n  * @desc HW descriptor to get data from\n@@ -1342,8 +1382,13 @@ ngbe_fill_cluster_head_buf(struct rte_mbuf *head, struct ngbe_rx_desc *desc,\n \n \thead->port = rxq->port_id;\n \n+\t/* The vlan_tci field is only valid when PKT_RX_VLAN is\n+\t * set in the pkt_flags field.\n+\t */\n+\thead->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);\n \tpkt_info = rte_le_to_cpu_32(desc->qw0.dw0);\n-\tpkt_flags = rx_desc_error_to_pkt_flags(staterr);\n+\tpkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);\n+\tpkt_flags |= rx_desc_error_to_pkt_flags(staterr);\n \thead->ol_flags = pkt_flags;\n \thead->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,\n \t\t\t\t\t\trxq->pkt_type_mask);\n@@ -1714,10 +1759,10 @@ uint64_t\n ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)\n {\n \tuint64_t tx_offload_capa;\n-\n-\tRTE_SET_USED(dev);\n+\tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n \n \ttx_offload_capa =\n+\t\tDEV_TX_OFFLOAD_VLAN_INSERT |\n \t\tDEV_TX_OFFLOAD_IPV4_CKSUM  |\n \t\tDEV_TX_OFFLOAD_UDP_CKSUM   |\n \t\tDEV_TX_OFFLOAD_TCP_CKSUM   |\n@@ -1730,6 +1775,9 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)\n \t\tDEV_TX_OFFLOAD_IPIP_TNL_TSO\t|\n \t\tDEV_TX_OFFLOAD_MULTI_SEGS;\n \n+\tif (hw->is_pf)\n+\t\ttx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;\n+\n \treturn tx_offload_capa;\n }\n \n@@ -2000,17 +2048,29 @@ ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)\n }\n \n uint64_t\n-ngbe_get_rx_port_offloads(struct rte_eth_dev *dev __rte_unused)\n+ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)\n+{\n+\treturn DEV_RX_OFFLOAD_VLAN_STRIP;\n+}\n+\n+uint64_t\n+ngbe_get_rx_port_offloads(struct rte_eth_dev *dev)\n {\n \tuint64_t offloads;\n+\tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n \n \toffloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |\n \t\t   DEV_RX_OFFLOAD_UDP_CKSUM   |\n \t\t   DEV_RX_OFFLOAD_TCP_CKSUM   |\n \t\t   DEV_RX_OFFLOAD_KEEP_CRC    |\n \t\t   DEV_RX_OFFLOAD_JUMBO_FRAME |\n+\t\t   DEV_RX_OFFLOAD_VLAN_FILTER |\n \t\t   DEV_RX_OFFLOAD_SCATTER;\n \n+\tif (hw->is_pf)\n+\t\toffloads |= (DEV_RX_OFFLOAD_QINQ_STRIP |\n+\t\t\t     DEV_RX_OFFLOAD_VLAN_EXTEND);\n+\n \treturn offloads;\n }\n \n@@ -2189,6 +2249,40 @@ ngbe_dev_free_queues(struct rte_eth_dev *dev)\n \tdev->data->nb_tx_queues = 0;\n }\n \n+void ngbe_configure_port(struct rte_eth_dev *dev)\n+{\n+\tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n+\tint i = 0;\n+\tuint16_t tpids[8] = {RTE_ETHER_TYPE_VLAN, RTE_ETHER_TYPE_QINQ,\n+\t\t\t\t0x9100, 0x9200,\n+\t\t\t\t0x0000, 0x0000,\n+\t\t\t\t0x0000, 0x0000};\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* default outer vlan tpid */\n+\twr32(hw, NGBE_EXTAG,\n+\t\tNGBE_EXTAG_ETAG(RTE_ETHER_TYPE_ETAG) |\n+\t\tNGBE_EXTAG_VLAN(RTE_ETHER_TYPE_QINQ));\n+\n+\t/* default inner vlan tpid */\n+\twr32m(hw, NGBE_VLANCTL,\n+\t\tNGBE_VLANCTL_TPID_MASK,\n+\t\tNGBE_VLANCTL_TPID(RTE_ETHER_TYPE_VLAN));\n+\twr32m(hw, NGBE_DMATXCTRL,\n+\t\tNGBE_DMATXCTRL_TPID_MASK,\n+\t\tNGBE_DMATXCTRL_TPID(RTE_ETHER_TYPE_VLAN));\n+\n+\t/* default vlan tpid filters */\n+\tfor (i = 0; i < 8; i++) {\n+\t\twr32m(hw, NGBE_TAGTPID(i / 2),\n+\t\t\t(i % 2 ? NGBE_TAGTPID_MSB_MASK\n+\t\t\t       : NGBE_TAGTPID_LSB_MASK),\n+\t\t\t(i % 2 ? NGBE_TAGTPID_MSB(tpids[i])\n+\t\t\t       : NGBE_TAGTPID_LSB(tpids[i])));\n+\t}\n+}\n+\n static int\n ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq)\n {\n@@ -2326,6 +2420,12 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)\n \t\t\tNGBE_FRMSZ_MAX(NGBE_FRAME_SIZE_DFT));\n \t}\n \n+\t/*\n+\t * Assume no header split and no VLAN strip support\n+\t * on any Rx queue first .\n+\t */\n+\trx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;\n+\n \t/* Setup Rx queues */\n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n \t\trxq = dev->data->rx_queues[i];\n@@ -2366,6 +2466,13 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)\n \t\tsrrctl |= NGBE_RXCFG_PKTLEN(buf_size);\n \n \t\twr32(hw, NGBE_RXCFG(rxq->reg_idx), srrctl);\n+\n+\t\t/* It adds dual VLAN length for supporting dual VLAN */\n+\t\tif (dev->data->dev_conf.rxmode.max_rx_pkt_len +\n+\t\t\t\t\t    2 * NGBE_VLAN_TAG_SIZE > buf_size)\n+\t\t\tdev->data->scattered_rx = 1;\n+\t\tif (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)\n+\t\t\trx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;\n \t}\n \n \tif (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)\ndiff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h\nindex 07b6e2374e..812bc57c9e 100644\n--- a/drivers/net/ngbe/ngbe_rxtx.h\n+++ b/drivers/net/ngbe/ngbe_rxtx.h\n@@ -271,6 +271,8 @@ struct ngbe_rx_queue {\n \tuint8_t         crc_len;  /**< 0 if CRC stripped, 4 otherwise. */\n \tuint8_t         drop_en;  /**< If not 0, set SRRCTL.Drop_En */\n \tuint8_t         rx_deferred_start; /**< not in global dev start */\n+\t/** flags to set in mbuf when a vlan is detected */\n+\tuint64_t        vlan_flags;\n \tuint64_t\toffloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */\n \t/** need to alloc dummy mbuf, for wraparound when scanning hw ring */\n \tstruct rte_mbuf fake_mbuf;\n@@ -370,6 +372,7 @@ void ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq);\n void ngbe_set_rx_function(struct rte_eth_dev *dev);\n \n uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);\n+uint64_t ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev);\n uint64_t ngbe_get_rx_port_offloads(struct rte_eth_dev *dev);\n \n #endif /* _NGBE_RXTX_H_ */\n",
    "prefixes": [
        "07/32"
    ]
}