get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/118771/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 118771,
    "url": "https://patches.dpdk.org/api/patches/118771/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20221020103656.1068036-2-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221020103656.1068036-2-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221020103656.1068036-2-junfeng.guo@intel.com",
    "date": "2022-10-20T10:36:49",
    "name": "[v6,1/8] net/gve/base: introduce GVE PMD base code",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "211e56ae300b3b21f6ddce5cb1fc614e90a12512",
    "submitter": {
        "id": 1785,
        "url": "https://patches.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20221020103656.1068036-2-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 25340,
            "url": "https://patches.dpdk.org/api/series/25340/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=25340",
            "date": "2022-10-20T10:36:48",
            "name": "introduce GVE PMD",
            "version": 6,
            "mbox": "https://patches.dpdk.org/series/25340/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/118771/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/118771/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 3F7A4A0581;\n\tThu, 20 Oct 2022 12:38:37 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 27A3442D23;\n\tThu, 20 Oct 2022 12:38:37 +0200 (CEST)",
            "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n by mails.dpdk.org (Postfix) with ESMTP id 89A5D42D19\n for <dev@dpdk.org>; Thu, 20 Oct 2022 12:38:34 +0200 (CEST)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 20 Oct 2022 03:38:33 -0700",
            "from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])\n by fmsmga004.fm.intel.com with ESMTP; 20 Oct 2022 03:38:30 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1666262314; x=1697798314;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=YcoAcW5KfAVehNpjGY6tGzIfDLfmFoXcVuRZHpqjIn0=;\n b=ODt59UhGSP7uzElfDhi0byU5B31kccq1dCZ9JomMYxl6q+aHnq8RG8xk\n 2RF/HUFvaPE4VPSJsXypAyLYs+YJMZHG5jpY1kfVNXScuoy+yciATOYl9\n g3DRHdTvBISu9YBO+IdiQ9LtPoUR5hPnOedgBlbtZlfyGcVlRcfZQP8p9\n 52lrTqF0PP/toylB5BW4PcFTByCHrtxLwzphm4ix1O2qijSAbq+2Magun\n qXQ4nYh8jU/UfSYCPIFbyrHvNOKq1GiOvix45PyuiS7Ohha5KiEzTzNZs\n PENKWlMhRJc5B9+mgfiGUV2AJbF4pirNFkFo0AILHbuziQWz+aDRF1Epq w==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10505\"; a=\"308354874\"",
            "E=Sophos;i=\"5.95,198,1661842800\"; d=\"scan'208\";a=\"308354874\"",
            "E=McAfee;i=\"6500,9779,10505\"; a=\"698582838\"",
            "E=Sophos;i=\"5.95,198,1661842800\"; d=\"scan'208\";a=\"698582838\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@xilinx.com,\n beilei.xing@intel.com",
        "Cc": "dev@dpdk.org, xiaoyun.li@intel.com, awogbemila@google.com,\n bruce.richardson@intel.com, hemant.agrawal@nxp.com,\n stephen@networkplumber.org, chenbo.xia@intel.com, helin.zhang@intel.com,\n Junfeng Guo <junfeng.guo@intel.com>, Haiyue Wang <haiyue.wang@intel.com>",
        "Subject": "[PATCH v6 1/8] net/gve/base: introduce GVE PMD base code",
        "Date": "Thu, 20 Oct 2022 18:36:49 +0800",
        "Message-Id": "<20221020103656.1068036-2-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20221020103656.1068036-1-junfeng.guo@intel.com>",
        "References": "<20221010101757.878317-2-junfeng.guo@intel.com>\n <20221020103656.1068036-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "The following base code is based on Google Virtual Ethernet (gve)\ndriver v1.3.0 under MIT license.\n- gve_adminq.c\n- gve_adminq.h\n- gve_desc.h\n- gve_desc_dqo.h\n- gve_register.h\n- gve.h\n\nThe original code is in:\nhttps://github.com/GoogleCloudPlatform/compute-virtual-ethernet-linux/\\\ntree/v1.3.0/google/gve\n\nNote that these code are not Intel files and they come from the kernel\ncommunity. The base code there has the statement of\nSPDX-License-Identifier: (GPL-2.0 OR MIT). Here we just follow the\nrequired MIT license as an exception to DPDK.\n\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Haiyue Wang <haiyue.wang@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n drivers/net/gve/base/gve.h          |  58 ++\n drivers/net/gve/base/gve_adminq.c   | 924 ++++++++++++++++++++++++++++\n drivers/net/gve/base/gve_adminq.h   | 381 ++++++++++++\n drivers/net/gve/base/gve_desc.h     | 137 +++++\n drivers/net/gve/base/gve_desc_dqo.h | 254 ++++++++\n drivers/net/gve/base/gve_register.h |  28 +\n 6 files changed, 1782 insertions(+)\n create mode 100644 drivers/net/gve/base/gve.h\n create mode 100644 drivers/net/gve/base/gve_adminq.c\n create mode 100644 drivers/net/gve/base/gve_adminq.h\n create mode 100644 drivers/net/gve/base/gve_desc.h\n create mode 100644 drivers/net/gve/base/gve_desc_dqo.h\n create mode 100644 drivers/net/gve/base/gve_register.h",
    "diff": "diff --git a/drivers/net/gve/base/gve.h b/drivers/net/gve/base/gve.h\nnew file mode 100644\nindex 0000000000..1b0d59b639\n--- /dev/null\n+++ b/drivers/net/gve/base/gve.h\n@@ -0,0 +1,58 @@\n+/* SPDX-License-Identifier: MIT\n+ * Google Virtual Ethernet (gve) driver\n+ * Version: 1.3.0\n+ * Copyright (C) 2015-2022 Google, Inc.\n+ * Copyright(C) 2022 Intel Corporation\n+ */\n+\n+#ifndef _GVE_H_\n+#define _GVE_H_\n+\n+#include \"gve_desc.h\"\n+\n+#define GVE_VERSION\t\t\"1.3.0\"\n+#define GVE_VERSION_PREFIX\t\"GVE-\"\n+\n+#ifndef GOOGLE_VENDOR_ID\n+#define GOOGLE_VENDOR_ID\t0x1ae0\n+#endif\n+\n+#define GVE_DEV_ID\t\t0x0042\n+\n+#define GVE_REG_BAR\t\t0\n+#define GVE_DB_BAR\t\t2\n+\n+/* 1 for management, 1 for rx, 1 for tx */\n+#define GVE_MIN_MSIX\t\t3\n+\n+/* PTYPEs are always 10 bits. */\n+#define GVE_NUM_PTYPES\t\t1024\n+\n+struct gve_irq_db {\n+\trte_be32_t id;\n+} ____cacheline_aligned;\n+\n+struct gve_ptype {\n+\tuint8_t l3_type;  /* `gve_l3_type` in gve_adminq.h */\n+\tuint8_t l4_type;  /* `gve_l4_type` in gve_adminq.h */\n+};\n+\n+struct gve_ptype_lut {\n+\tstruct gve_ptype ptypes[GVE_NUM_PTYPES];\n+};\n+\n+enum gve_queue_format {\n+\tGVE_QUEUE_FORMAT_UNSPECIFIED = 0x0, /* default unspecified */\n+\tGVE_GQI_RDA_FORMAT\t     = 0x1, /* GQI Raw Addressing */\n+\tGVE_GQI_QPL_FORMAT\t     = 0x2, /* GQI Queue Page List */\n+\tGVE_DQO_RDA_FORMAT\t     = 0x3, /* DQO Raw Addressing */\n+};\n+\n+enum gve_state_flags_bit {\n+\tGVE_PRIV_FLAGS_ADMIN_QUEUE_OK\t\t= 1,\n+\tGVE_PRIV_FLAGS_DEVICE_RESOURCES_OK\t= 2,\n+\tGVE_PRIV_FLAGS_DEVICE_RINGS_OK\t\t= 3,\n+\tGVE_PRIV_FLAGS_NAPI_ENABLED\t\t= 4,\n+};\n+\n+#endif /* _GVE_H_ */\ndiff --git a/drivers/net/gve/base/gve_adminq.c b/drivers/net/gve/base/gve_adminq.c\nnew file mode 100644\nindex 0000000000..2344100f1a\n--- /dev/null\n+++ b/drivers/net/gve/base/gve_adminq.c\n@@ -0,0 +1,924 @@\n+/* SPDX-License-Identifier: MIT\n+ * Google Virtual Ethernet (gve) driver\n+ * Version: 1.3.0\n+ * Copyright (C) 2015-2022 Google, Inc.\n+ * Copyright(C) 2022 Intel Corporation\n+ */\n+\n+#include \"gve_adminq.h\"\n+#include \"gve_register.h\"\n+\n+#define GVE_MAX_ADMINQ_RELEASE_CHECK\t500\n+#define GVE_ADMINQ_SLEEP_LEN\t\t20\n+#define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK\t100\n+\n+#define GVE_DEVICE_OPTION_ERROR_FMT \"%s option error:\\n Expected: length=%d, feature_mask=%x.\\n Actual: length=%d, feature_mask=%x.\"\n+\n+#define GVE_DEVICE_OPTION_TOO_BIG_FMT \"Length of %s option larger than expected. Possible older version of guest driver.\"\n+\n+static\n+struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,\n+\t\t\t\t\t      struct gve_device_option *option)\n+{\n+\tuintptr_t option_end, descriptor_end;\n+\n+\toption_end = (uintptr_t)option + sizeof(*option) + be16_to_cpu(option->option_length);\n+\tdescriptor_end = (uintptr_t)descriptor + be16_to_cpu(descriptor->total_length);\n+\n+\treturn option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;\n+}\n+\n+static\n+void gve_parse_device_option(struct gve_priv *priv,\n+\t\t\t     struct gve_device_option *option,\n+\t\t\t     struct gve_device_option_gqi_rda **dev_op_gqi_rda,\n+\t\t\t     struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,\n+\t\t\t     struct gve_device_option_dqo_rda **dev_op_dqo_rda,\n+\t\t\t     struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)\n+{\n+\tu32 req_feat_mask = be32_to_cpu(option->required_features_mask);\n+\tu16 option_length = be16_to_cpu(option->option_length);\n+\tu16 option_id = be16_to_cpu(option->option_id);\n+\n+\t/* If the length or feature mask doesn't match, continue without\n+\t * enabling the feature.\n+\t */\n+\tswitch (option_id) {\n+\tcase GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING:\n+\t\tif (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ||\n+\t\t    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) {\n+\t\t\tPMD_DRV_LOG(WARNING, GVE_DEVICE_OPTION_ERROR_FMT,\n+\t\t\t\t    \"Raw Addressing\",\n+\t\t\t\t    GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING,\n+\t\t\t\t    GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING,\n+\t\t\t\t    option_length, req_feat_mask);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tPMD_DRV_LOG(INFO, \"Gqi raw addressing device option enabled.\");\n+\t\tpriv->queue_format = GVE_GQI_RDA_FORMAT;\n+\t\tbreak;\n+\tcase GVE_DEV_OPT_ID_GQI_RDA:\n+\t\tif (option_length < sizeof(**dev_op_gqi_rda) ||\n+\t\t    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) {\n+\t\t\tPMD_DRV_LOG(WARNING, GVE_DEVICE_OPTION_ERROR_FMT,\n+\t\t\t\t    \"GQI RDA\", (int)sizeof(**dev_op_gqi_rda),\n+\t\t\t\t    GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA,\n+\t\t\t\t    option_length, req_feat_mask);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (option_length > sizeof(**dev_op_gqi_rda)) {\n+\t\t\tPMD_DRV_LOG(WARNING,\n+\t\t\t\t    GVE_DEVICE_OPTION_TOO_BIG_FMT, \"GQI RDA\");\n+\t\t}\n+\t\t*dev_op_gqi_rda = RTE_PTR_ADD(option, sizeof(*option));\n+\t\tbreak;\n+\tcase GVE_DEV_OPT_ID_GQI_QPL:\n+\t\tif (option_length < sizeof(**dev_op_gqi_qpl) ||\n+\t\t    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {\n+\t\t\tPMD_DRV_LOG(WARNING, GVE_DEVICE_OPTION_ERROR_FMT,\n+\t\t\t\t    \"GQI QPL\", (int)sizeof(**dev_op_gqi_qpl),\n+\t\t\t\t    GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,\n+\t\t\t\t    option_length, req_feat_mask);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (option_length > sizeof(**dev_op_gqi_qpl)) {\n+\t\t\tPMD_DRV_LOG(WARNING,\n+\t\t\t\t    GVE_DEVICE_OPTION_TOO_BIG_FMT, \"GQI QPL\");\n+\t\t}\n+\t\t*dev_op_gqi_qpl = RTE_PTR_ADD(option, sizeof(*option));\n+\t\tbreak;\n+\tcase GVE_DEV_OPT_ID_DQO_RDA:\n+\t\tif (option_length < sizeof(**dev_op_dqo_rda) ||\n+\t\t    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {\n+\t\t\tPMD_DRV_LOG(WARNING, GVE_DEVICE_OPTION_ERROR_FMT,\n+\t\t\t\t    \"DQO RDA\", (int)sizeof(**dev_op_dqo_rda),\n+\t\t\t\t    GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,\n+\t\t\t\t    option_length, req_feat_mask);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (option_length > sizeof(**dev_op_dqo_rda)) {\n+\t\t\tPMD_DRV_LOG(WARNING,\n+\t\t\t\t    GVE_DEVICE_OPTION_TOO_BIG_FMT, \"DQO RDA\");\n+\t\t}\n+\t\t*dev_op_dqo_rda = RTE_PTR_ADD(option, sizeof(*option));\n+\t\tbreak;\n+\tcase GVE_DEV_OPT_ID_JUMBO_FRAMES:\n+\t\tif (option_length < sizeof(**dev_op_jumbo_frames) ||\n+\t\t    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {\n+\t\t\tPMD_DRV_LOG(WARNING, GVE_DEVICE_OPTION_ERROR_FMT,\n+\t\t\t\t    \"Jumbo Frames\",\n+\t\t\t\t    (int)sizeof(**dev_op_jumbo_frames),\n+\t\t\t\t    GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,\n+\t\t\t\t    option_length, req_feat_mask);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (option_length > sizeof(**dev_op_jumbo_frames)) {\n+\t\t\tPMD_DRV_LOG(WARNING,\n+\t\t\t\t    GVE_DEVICE_OPTION_TOO_BIG_FMT,\n+\t\t\t\t    \"Jumbo Frames\");\n+\t\t}\n+\t\t*dev_op_jumbo_frames = RTE_PTR_ADD(option, sizeof(*option));\n+\t\tbreak;\n+\tdefault:\n+\t\t/* If we don't recognize the option just continue\n+\t\t * without doing anything.\n+\t\t */\n+\t\tPMD_DRV_LOG(DEBUG, \"Unrecognized device option 0x%hx not enabled.\",\n+\t\t\t    option_id);\n+\t}\n+}\n+\n+/* Process all device options for a given describe device call. */\n+static int\n+gve_process_device_options(struct gve_priv *priv,\n+\t\t\t   struct gve_device_descriptor *descriptor,\n+\t\t\t   struct gve_device_option_gqi_rda **dev_op_gqi_rda,\n+\t\t\t   struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,\n+\t\t\t   struct gve_device_option_dqo_rda **dev_op_dqo_rda,\n+\t\t\t   struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)\n+{\n+\tconst int num_options = be16_to_cpu(descriptor->num_device_options);\n+\tstruct gve_device_option *dev_opt;\n+\tint i;\n+\n+\t/* The options struct directly follows the device descriptor. */\n+\tdev_opt = RTE_PTR_ADD(descriptor, sizeof(*descriptor));\n+\tfor (i = 0; i < num_options; i++) {\n+\t\tstruct gve_device_option *next_opt;\n+\n+\t\tnext_opt = gve_get_next_option(descriptor, dev_opt);\n+\t\tif (!next_opt) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"options exceed device_descriptor's total length.\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tgve_parse_device_option(priv, dev_opt,\n+\t\t\t\t\tdev_op_gqi_rda, dev_op_gqi_qpl,\n+\t\t\t\t\tdev_op_dqo_rda, dev_op_jumbo_frames);\n+\t\tdev_opt = next_opt;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int gve_adminq_alloc(struct gve_priv *priv)\n+{\n+\tpriv->adminq = gve_alloc_dma_mem(&priv->adminq_dma_mem, PAGE_SIZE);\n+\tif (unlikely(!priv->adminq))\n+\t\treturn -ENOMEM;\n+\n+\tpriv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;\n+\tpriv->adminq_prod_cnt = 0;\n+\tpriv->adminq_cmd_fail = 0;\n+\tpriv->adminq_timeouts = 0;\n+\tpriv->adminq_describe_device_cnt = 0;\n+\tpriv->adminq_cfg_device_resources_cnt = 0;\n+\tpriv->adminq_register_page_list_cnt = 0;\n+\tpriv->adminq_unregister_page_list_cnt = 0;\n+\tpriv->adminq_create_tx_queue_cnt = 0;\n+\tpriv->adminq_create_rx_queue_cnt = 0;\n+\tpriv->adminq_destroy_tx_queue_cnt = 0;\n+\tpriv->adminq_destroy_rx_queue_cnt = 0;\n+\tpriv->adminq_dcfg_device_resources_cnt = 0;\n+\tpriv->adminq_set_driver_parameter_cnt = 0;\n+\tpriv->adminq_report_stats_cnt = 0;\n+\tpriv->adminq_report_link_speed_cnt = 0;\n+\tpriv->adminq_get_ptype_map_cnt = 0;\n+\n+\t/* Setup Admin queue with the device */\n+\tiowrite32be(priv->adminq_dma_mem.pa / PAGE_SIZE,\n+\t\t    &priv->reg_bar0->adminq_pfn);\n+\n+\tgve_set_admin_queue_ok(priv);\n+\treturn 0;\n+}\n+\n+void gve_adminq_release(struct gve_priv *priv)\n+{\n+\tint i = 0;\n+\n+\t/* Tell the device the adminq is leaving */\n+\tiowrite32be(0x0, &priv->reg_bar0->adminq_pfn);\n+\twhile (ioread32be(&priv->reg_bar0->adminq_pfn)) {\n+\t\t/* If this is reached the device is unrecoverable and still\n+\t\t * holding memory. Continue looping to avoid memory corruption,\n+\t\t * but WARN so it is visible what is going on.\n+\t\t */\n+\t\tif (i == GVE_MAX_ADMINQ_RELEASE_CHECK)\n+\t\t\tPMD_DRV_LOG(WARNING, \"Unrecoverable platform error!\");\n+\t\ti++;\n+\t\tmsleep(GVE_ADMINQ_SLEEP_LEN);\n+\t}\n+\tgve_clear_device_rings_ok(priv);\n+\tgve_clear_device_resources_ok(priv);\n+\tgve_clear_admin_queue_ok(priv);\n+}\n+\n+void gve_adminq_free(struct gve_priv *priv)\n+{\n+\tif (!gve_get_admin_queue_ok(priv))\n+\t\treturn;\n+\tgve_adminq_release(priv);\n+\tgve_free_dma_mem(&priv->adminq_dma_mem);\n+\tgve_clear_admin_queue_ok(priv);\n+}\n+\n+static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt)\n+{\n+\tiowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell);\n+}\n+\n+static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {\n+\t\tif (ioread32be(&priv->reg_bar0->adminq_event_counter)\n+\t\t    == prod_cnt)\n+\t\t\treturn true;\n+\t\tmsleep(GVE_ADMINQ_SLEEP_LEN);\n+\t}\n+\n+\treturn false;\n+}\n+\n+static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)\n+{\n+\tif (status != GVE_ADMINQ_COMMAND_PASSED &&\n+\t    status != GVE_ADMINQ_COMMAND_UNSET) {\n+\t\tPMD_DRV_LOG(ERR, \"AQ command failed with status %d\", status);\n+\t\tpriv->adminq_cmd_fail++;\n+\t}\n+\tswitch (status) {\n+\tcase GVE_ADMINQ_COMMAND_PASSED:\n+\t\treturn 0;\n+\tcase GVE_ADMINQ_COMMAND_UNSET:\n+\t\tPMD_DRV_LOG(ERR, \"parse_aq_err: err and status both unset, this should not be possible.\");\n+\t\treturn -EINVAL;\n+\tcase GVE_ADMINQ_COMMAND_ERROR_ABORTED:\n+\tcase GVE_ADMINQ_COMMAND_ERROR_CANCELLED:\n+\tcase GVE_ADMINQ_COMMAND_ERROR_DATALOSS:\n+\tcase GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:\n+\tcase GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:\n+\t\treturn -EAGAIN;\n+\tcase GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:\n+\tcase GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:\n+\tcase GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:\n+\tcase GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:\n+\tcase GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:\n+\tcase GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:\n+\t\treturn -EINVAL;\n+\tcase GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:\n+\t\treturn -ETIME;\n+\tcase GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:\n+\tcase GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:\n+\t\treturn -EACCES;\n+\tcase GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:\n+\t\treturn -ENOMEM;\n+\tcase GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:\n+\t\treturn -ENOTSUP;\n+\tdefault:\n+\t\tPMD_DRV_LOG(ERR, \"parse_aq_err: unknown status code %d\",\n+\t\t\t    status);\n+\t\treturn -EINVAL;\n+\t}\n+}\n+\n+/* Flushes all AQ commands currently queued and waits for them to complete.\n+ * If there are failures, it will return the first error.\n+ */\n+static int gve_adminq_kick_and_wait(struct gve_priv *priv)\n+{\n+\tu32 tail, head;\n+\tu32 i;\n+\n+\ttail = ioread32be(&priv->reg_bar0->adminq_event_counter);\n+\thead = priv->adminq_prod_cnt;\n+\n+\tgve_adminq_kick_cmd(priv, head);\n+\tif (!gve_adminq_wait_for_cmd(priv, head)) {\n+\t\tPMD_DRV_LOG(ERR, \"AQ commands timed out, need to reset AQ\");\n+\t\tpriv->adminq_timeouts++;\n+\t\treturn -ENOTRECOVERABLE;\n+\t}\n+\n+\tfor (i = tail; i < head; i++) {\n+\t\tunion gve_adminq_command *cmd;\n+\t\tu32 status, err;\n+\n+\t\tcmd = &priv->adminq[i & priv->adminq_mask];\n+\t\tstatus = be32_to_cpu(READ_ONCE32(cmd->status));\n+\t\terr = gve_adminq_parse_err(priv, status);\n+\t\tif (err)\n+\t\t\t/* Return the first error if we failed. */\n+\t\t\treturn err;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* This function is not threadsafe - the caller is responsible for any\n+ * necessary locks.\n+ */\n+static int gve_adminq_issue_cmd(struct gve_priv *priv,\n+\t\t\t\tunion gve_adminq_command *cmd_orig)\n+{\n+\tunion gve_adminq_command *cmd;\n+\tu32 opcode;\n+\tu32 tail;\n+\n+\ttail = ioread32be(&priv->reg_bar0->adminq_event_counter);\n+\n+\t/* Check if next command will overflow the buffer. */\n+\tif (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==\n+\t    (tail & priv->adminq_mask)) {\n+\t\tint err;\n+\n+\t\t/* Flush existing commands to make room. */\n+\t\terr = gve_adminq_kick_and_wait(priv);\n+\t\tif (err)\n+\t\t\treturn err;\n+\n+\t\t/* Retry. */\n+\t\ttail = ioread32be(&priv->reg_bar0->adminq_event_counter);\n+\t\tif (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==\n+\t\t    (tail & priv->adminq_mask)) {\n+\t\t\t/* This should never happen. We just flushed the\n+\t\t\t * command queue so there should be enough space.\n+\t\t\t */\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t}\n+\n+\tcmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];\n+\tpriv->adminq_prod_cnt++;\n+\n+\tmemcpy(cmd, cmd_orig, sizeof(*cmd_orig));\n+\topcode = be32_to_cpu(READ_ONCE32(cmd->opcode));\n+\n+\tswitch (opcode) {\n+\tcase GVE_ADMINQ_DESCRIBE_DEVICE:\n+\t\tpriv->adminq_describe_device_cnt++;\n+\t\tbreak;\n+\tcase GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:\n+\t\tpriv->adminq_cfg_device_resources_cnt++;\n+\t\tbreak;\n+\tcase GVE_ADMINQ_REGISTER_PAGE_LIST:\n+\t\tpriv->adminq_register_page_list_cnt++;\n+\t\tbreak;\n+\tcase GVE_ADMINQ_UNREGISTER_PAGE_LIST:\n+\t\tpriv->adminq_unregister_page_list_cnt++;\n+\t\tbreak;\n+\tcase GVE_ADMINQ_CREATE_TX_QUEUE:\n+\t\tpriv->adminq_create_tx_queue_cnt++;\n+\t\tbreak;\n+\tcase GVE_ADMINQ_CREATE_RX_QUEUE:\n+\t\tpriv->adminq_create_rx_queue_cnt++;\n+\t\tbreak;\n+\tcase GVE_ADMINQ_DESTROY_TX_QUEUE:\n+\t\tpriv->adminq_destroy_tx_queue_cnt++;\n+\t\tbreak;\n+\tcase GVE_ADMINQ_DESTROY_RX_QUEUE:\n+\t\tpriv->adminq_destroy_rx_queue_cnt++;\n+\t\tbreak;\n+\tcase GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:\n+\t\tpriv->adminq_dcfg_device_resources_cnt++;\n+\t\tbreak;\n+\tcase GVE_ADMINQ_SET_DRIVER_PARAMETER:\n+\t\tpriv->adminq_set_driver_parameter_cnt++;\n+\t\tbreak;\n+\tcase GVE_ADMINQ_REPORT_STATS:\n+\t\tpriv->adminq_report_stats_cnt++;\n+\t\tbreak;\n+\tcase GVE_ADMINQ_REPORT_LINK_SPEED:\n+\t\tpriv->adminq_report_link_speed_cnt++;\n+\t\tbreak;\n+\tcase GVE_ADMINQ_GET_PTYPE_MAP:\n+\t\tpriv->adminq_get_ptype_map_cnt++;\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_DRV_LOG(ERR, \"unknown AQ command opcode %d\", opcode);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* This function is not threadsafe - the caller is responsible for any\n+ * necessary locks.\n+ * The caller is also responsible for making sure there are no commands\n+ * waiting to be executed.\n+ */\n+static int gve_adminq_execute_cmd(struct gve_priv *priv,\n+\t\t\t\t  union gve_adminq_command *cmd_orig)\n+{\n+\tu32 tail, head;\n+\tint err;\n+\n+\ttail = ioread32be(&priv->reg_bar0->adminq_event_counter);\n+\thead = priv->adminq_prod_cnt;\n+\tif (tail != head)\n+\t\t/* This is not a valid path */\n+\t\treturn -EINVAL;\n+\n+\terr = gve_adminq_issue_cmd(priv, cmd_orig);\n+\tif (err)\n+\t\treturn err;\n+\n+\treturn gve_adminq_kick_and_wait(priv);\n+}\n+\n+/* The device specifies that the management vector can either be the first irq\n+ * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to\n+ * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then\n+ * the management vector is first.\n+ *\n+ * gve arranges the msix vectors so that the management vector is last.\n+ */\n+#define GVE_NTFY_BLK_BASE_MSIX_IDX\t0\n+int gve_adminq_configure_device_resources(struct gve_priv *priv,\n+\t\t\t\t\t  dma_addr_t counter_array_bus_addr,\n+\t\t\t\t\t  u32 num_counters,\n+\t\t\t\t\t  dma_addr_t db_array_bus_addr,\n+\t\t\t\t\t  u32 num_ntfy_blks)\n+{\n+\tunion gve_adminq_command cmd;\n+\n+\tmemset(&cmd, 0, sizeof(cmd));\n+\tcmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);\n+\tcmd.configure_device_resources =\n+\t\t(struct gve_adminq_configure_device_resources) {\n+\t\t.counter_array = cpu_to_be64(counter_array_bus_addr),\n+\t\t.num_counters = cpu_to_be32(num_counters),\n+\t\t.irq_db_addr = cpu_to_be64(db_array_bus_addr),\n+\t\t.num_irq_dbs = cpu_to_be32(num_ntfy_blks),\n+\t\t.irq_db_stride = cpu_to_be32(sizeof(*priv->irq_dbs)),\n+\t\t.ntfy_blk_msix_base_idx =\n+\t\t\t\t\tcpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),\n+\t\t.queue_format = priv->queue_format,\n+\t};\n+\n+\treturn gve_adminq_execute_cmd(priv, &cmd);\n+}\n+\n+int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)\n+{\n+\tunion gve_adminq_command cmd;\n+\n+\tmemset(&cmd, 0, sizeof(cmd));\n+\tcmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);\n+\n+\treturn gve_adminq_execute_cmd(priv, &cmd);\n+}\n+\n+static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)\n+{\n+\tstruct gve_tx_queue *txq = priv->txqs[queue_index];\n+\tunion gve_adminq_command cmd;\n+\n+\tmemset(&cmd, 0, sizeof(cmd));\n+\tcmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);\n+\tcmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {\n+\t\t.queue_id = cpu_to_be32(queue_index),\n+\t\t.queue_resources_addr =\n+\t\t\tcpu_to_be64(txq->qres_mz->iova),\n+\t\t.tx_ring_addr = cpu_to_be64(txq->tx_ring_phys_addr),\n+\t\t.ntfy_id = cpu_to_be32(txq->ntfy_id),\n+\t};\n+\n+\tif (gve_is_gqi(priv)) {\n+\t\tu32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?\n+\t\t\tGVE_RAW_ADDRESSING_QPL_ID : txq->qpl->id;\n+\n+\t\tcmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);\n+\t} else {\n+\t\tcmd.create_tx_queue.tx_ring_size =\n+\t\t\tcpu_to_be16(txq->nb_tx_desc);\n+\t\tcmd.create_tx_queue.tx_comp_ring_addr =\n+\t\t\tcpu_to_be64(txq->complq->tx_ring_phys_addr);\n+\t\tcmd.create_tx_queue.tx_comp_ring_size =\n+\t\t\tcpu_to_be16(priv->tx_compq_size);\n+\t}\n+\n+\treturn gve_adminq_issue_cmd(priv, &cmd);\n+}\n+\n+int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)\n+{\n+\tint err;\n+\tu32 i;\n+\n+\tfor (i = 0; i < num_queues; i++) {\n+\t\terr = gve_adminq_create_tx_queue(priv, i);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t}\n+\n+\treturn gve_adminq_kick_and_wait(priv);\n+}\n+\n+static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)\n+{\n+\tstruct gve_rx_queue *rxq = priv->rxqs[queue_index];\n+\tunion gve_adminq_command cmd;\n+\n+\tmemset(&cmd, 0, sizeof(cmd));\n+\tcmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);\n+\tcmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {\n+\t\t.queue_id = cpu_to_be32(queue_index),\n+\t\t.ntfy_id = cpu_to_be32(rxq->ntfy_id),\n+\t\t.queue_resources_addr = cpu_to_be64(rxq->qres_mz->iova),\n+\t};\n+\n+\tif (gve_is_gqi(priv)) {\n+\t\tu32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?\n+\t\t\tGVE_RAW_ADDRESSING_QPL_ID : rxq->qpl->id;\n+\n+\t\tcmd.create_rx_queue.rx_desc_ring_addr =\n+\t\t\tcpu_to_be64(rxq->mz->iova),\n+\t\tcmd.create_rx_queue.rx_data_ring_addr =\n+\t\t\tcpu_to_be64(rxq->data_mz->iova),\n+\t\tcmd.create_rx_queue.index = cpu_to_be32(queue_index);\n+\t\tcmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);\n+\t\tcmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rxq->rx_buf_len);\n+\t} else {\n+\t\tcmd.create_rx_queue.rx_ring_size =\n+\t\t\tcpu_to_be16(priv->rx_desc_cnt);\n+\t\tcmd.create_rx_queue.rx_desc_ring_addr =\n+\t\t\tcpu_to_be64(rxq->rx_ring_phys_addr);\n+\t\tcmd.create_rx_queue.rx_data_ring_addr =\n+\t\t\tcpu_to_be64(rxq->bufq->rx_ring_phys_addr);\n+\t\tcmd.create_rx_queue.packet_buffer_size =\n+\t\t\tcpu_to_be16(rxq->rx_buf_len);\n+\t\tcmd.create_rx_queue.rx_buff_ring_size =\n+\t\t\tcpu_to_be16(priv->rx_bufq_size);\n+\t\tcmd.create_rx_queue.enable_rsc = !!(priv->enable_rsc);\n+\t}\n+\n+\treturn gve_adminq_issue_cmd(priv, &cmd);\n+}\n+\n+int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)\n+{\n+\tint err;\n+\tu32 i;\n+\n+\tfor (i = 0; i < num_queues; i++) {\n+\t\terr = gve_adminq_create_rx_queue(priv, i);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t}\n+\n+\treturn gve_adminq_kick_and_wait(priv);\n+}\n+\n+static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)\n+{\n+\tunion gve_adminq_command cmd;\n+\tint err;\n+\n+\tmemset(&cmd, 0, sizeof(cmd));\n+\tcmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);\n+\tcmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) {\n+\t\t.queue_id = cpu_to_be32(queue_index),\n+\t};\n+\n+\terr = gve_adminq_issue_cmd(priv, &cmd);\n+\tif (err)\n+\t\treturn err;\n+\n+\treturn 0;\n+}\n+\n+int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues)\n+{\n+\tint err;\n+\tu32 i;\n+\n+\tfor (i = 0; i < num_queues; i++) {\n+\t\terr = gve_adminq_destroy_tx_queue(priv, i);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t}\n+\n+\treturn gve_adminq_kick_and_wait(priv);\n+}\n+\n+static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)\n+{\n+\tunion gve_adminq_command cmd;\n+\tint err;\n+\n+\tmemset(&cmd, 0, sizeof(cmd));\n+\tcmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);\n+\tcmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {\n+\t\t.queue_id = cpu_to_be32(queue_index),\n+\t};\n+\n+\terr = gve_adminq_issue_cmd(priv, &cmd);\n+\tif (err)\n+\t\treturn err;\n+\n+\treturn 0;\n+}\n+\n+int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)\n+{\n+\tint err;\n+\tu32 i;\n+\n+\tfor (i = 0; i < num_queues; i++) {\n+\t\terr = gve_adminq_destroy_rx_queue(priv, i);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t}\n+\n+\treturn gve_adminq_kick_and_wait(priv);\n+}\n+\n+static int gve_set_desc_cnt(struct gve_priv *priv,\n+\t\t\t    struct gve_device_descriptor *descriptor)\n+{\n+\tpriv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);\n+\tif (priv->tx_desc_cnt * sizeof(priv->txqs[0]->tx_desc_ring[0])\n+\t    < PAGE_SIZE) {\n+\t\tPMD_DRV_LOG(ERR, \"Tx desc count %d too low\", priv->tx_desc_cnt);\n+\t\treturn -EINVAL;\n+\t}\n+\tpriv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);\n+\tif (priv->rx_desc_cnt * sizeof(priv->rxqs[0]->rx_desc_ring[0])\n+\t    < PAGE_SIZE) {\n+\t\tPMD_DRV_LOG(ERR, \"Rx desc count %d too low\", priv->rx_desc_cnt);\n+\t\treturn -EINVAL;\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+gve_set_desc_cnt_dqo(struct gve_priv *priv,\n+\t\t     const struct gve_device_descriptor *descriptor,\n+\t\t     const struct gve_device_option_dqo_rda *dev_op_dqo_rda)\n+{\n+\tpriv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);\n+\tpriv->tx_compq_size = be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);\n+\tpriv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);\n+\tpriv->rx_bufq_size = be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);\n+\n+\treturn 0;\n+}\n+\n+static void gve_enable_supported_features(struct gve_priv *priv,\n+\t\t\t\t\t  u32 supported_features_mask,\n+\t\t\t\t\t  const struct gve_device_option_jumbo_frames\n+\t\t\t\t\t\t  *dev_op_jumbo_frames)\n+{\n+\t/* Before control reaches this point, the page-size-capped max MTU from\n+\t * the gve_device_descriptor field has already been stored in\n+\t * priv->dev->max_mtu. We overwrite it with the true max MTU below.\n+\t */\n+\tif (dev_op_jumbo_frames &&\n+\t    (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {\n+\t\tPMD_DRV_LOG(INFO, \"JUMBO FRAMES device option enabled.\");\n+\t\tpriv->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);\n+\t}\n+}\n+\n+int gve_adminq_describe_device(struct gve_priv *priv)\n+{\n+\tstruct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;\n+\tstruct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;\n+\tstruct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;\n+\tstruct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;\n+\tstruct gve_device_descriptor *descriptor;\n+\tstruct gve_dma_mem descriptor_dma_mem;\n+\tu32 supported_features_mask = 0;\n+\tunion gve_adminq_command cmd;\n+\tint err = 0;\n+\tu8 *mac;\n+\tu16 mtu;\n+\n+\tmemset(&cmd, 0, sizeof(cmd));\n+\tdescriptor = gve_alloc_dma_mem(&descriptor_dma_mem, PAGE_SIZE);\n+\tif (!descriptor)\n+\t\treturn -ENOMEM;\n+\tcmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);\n+\tcmd.describe_device.device_descriptor_addr =\n+\t\t\t\t\tcpu_to_be64(descriptor_dma_mem.pa);\n+\tcmd.describe_device.device_descriptor_version =\n+\t\t\tcpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);\n+\tcmd.describe_device.available_length = cpu_to_be32(PAGE_SIZE);\n+\n+\terr = gve_adminq_execute_cmd(priv, &cmd);\n+\tif (err)\n+\t\tgoto free_device_descriptor;\n+\n+\terr = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,\n+\t\t\t\t\t &dev_op_gqi_qpl, &dev_op_dqo_rda,\n+\t\t\t\t\t &dev_op_jumbo_frames);\n+\tif (err)\n+\t\tgoto free_device_descriptor;\n+\n+\t/* If the GQI_RAW_ADDRESSING option is not enabled and the queue format\n+\t * is not set to GqiRda, choose the queue format in a priority order:\n+\t * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.\n+\t */\n+\tif (dev_op_dqo_rda) {\n+\t\tpriv->queue_format = GVE_DQO_RDA_FORMAT;\n+\t\tPMD_DRV_LOG(INFO, \"Driver is running with DQO RDA queue format.\");\n+\t\tsupported_features_mask =\n+\t\t\tbe32_to_cpu(dev_op_dqo_rda->supported_features_mask);\n+\t} else if (dev_op_gqi_rda) {\n+\t\tpriv->queue_format = GVE_GQI_RDA_FORMAT;\n+\t\tPMD_DRV_LOG(INFO, \"Driver is running with GQI RDA queue format.\");\n+\t\tsupported_features_mask =\n+\t\t\tbe32_to_cpu(dev_op_gqi_rda->supported_features_mask);\n+\t} else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {\n+\t\tPMD_DRV_LOG(INFO, \"Driver is running with GQI RDA queue format.\");\n+\t} else {\n+\t\tpriv->queue_format = GVE_GQI_QPL_FORMAT;\n+\t\tif (dev_op_gqi_qpl)\n+\t\t\tsupported_features_mask =\n+\t\t\t\tbe32_to_cpu(dev_op_gqi_qpl->supported_features_mask);\n+\t\tPMD_DRV_LOG(INFO, \"Driver is running with GQI QPL queue format.\");\n+\t}\n+\tif (gve_is_gqi(priv)) {\n+\t\terr = gve_set_desc_cnt(priv, descriptor);\n+\t} else {\n+\t\t/* DQO supports LRO. */\n+\t\terr = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);\n+\t}\n+\tif (err)\n+\t\tgoto free_device_descriptor;\n+\n+\tpriv->max_registered_pages =\n+\t\t\t\tbe64_to_cpu(descriptor->max_registered_pages);\n+\tmtu = be16_to_cpu(descriptor->mtu);\n+\tif (mtu < ETH_MIN_MTU) {\n+\t\tPMD_DRV_LOG(ERR, \"MTU %d below minimum MTU\", mtu);\n+\t\terr = -EINVAL;\n+\t\tgoto free_device_descriptor;\n+\t}\n+\tpriv->max_mtu = mtu;\n+\tpriv->num_event_counters = be16_to_cpu(descriptor->counters);\n+\trte_memcpy(priv->dev_addr.addr_bytes, descriptor->mac, ETH_ALEN);\n+\tmac = descriptor->mac;\n+\tPMD_DRV_LOG(INFO, \"MAC addr: %02x:%02x:%02x:%02x:%02x:%02x\",\n+\t\t    mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);\n+\tpriv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);\n+\tpriv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);\n+\n+\tif (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d\",\n+\t\t\t    priv->rx_data_slot_cnt);\n+\t\tpriv->rx_desc_cnt = priv->rx_data_slot_cnt;\n+\t}\n+\tpriv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);\n+\n+\tgve_enable_supported_features(priv, supported_features_mask,\n+\t\t\t\t      dev_op_jumbo_frames);\n+\n+free_device_descriptor:\n+\tgve_free_dma_mem(&descriptor_dma_mem);\n+\treturn err;\n+}\n+\n+int gve_adminq_register_page_list(struct gve_priv *priv,\n+\t\t\t\t  struct gve_queue_page_list *qpl)\n+{\n+\tstruct gve_dma_mem page_list_dma_mem;\n+\tu32 num_entries = qpl->num_entries;\n+\tu32 size = num_entries * sizeof(qpl->page_buses[0]);\n+\tunion gve_adminq_command cmd;\n+\t__be64 *page_list;\n+\tint err;\n+\tu32 i;\n+\n+\tmemset(&cmd, 0, sizeof(cmd));\n+\tpage_list = gve_alloc_dma_mem(&page_list_dma_mem, size);\n+\tif (!page_list)\n+\t\treturn -ENOMEM;\n+\n+\tfor (i = 0; i < num_entries; i++)\n+\t\tpage_list[i] = cpu_to_be64(qpl->page_buses[i]);\n+\n+\tcmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST);\n+\tcmd.reg_page_list = (struct gve_adminq_register_page_list) {\n+\t\t.page_list_id = cpu_to_be32(qpl->id),\n+\t\t.num_pages = cpu_to_be32(num_entries),\n+\t\t.page_address_list_addr = cpu_to_be64(page_list_dma_mem.pa),\n+\t};\n+\n+\terr = gve_adminq_execute_cmd(priv, &cmd);\n+\tgve_free_dma_mem(&page_list_dma_mem);\n+\treturn err;\n+}\n+\n+int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)\n+{\n+\tunion gve_adminq_command cmd;\n+\n+\tmemset(&cmd, 0, sizeof(cmd));\n+\tcmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);\n+\tcmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {\n+\t\t.page_list_id = cpu_to_be32(page_list_id),\n+\t};\n+\n+\treturn gve_adminq_execute_cmd(priv, &cmd);\n+}\n+\n+int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)\n+{\n+\tunion gve_adminq_command cmd;\n+\n+\tmemset(&cmd, 0, sizeof(cmd));\n+\tcmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);\n+\tcmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {\n+\t\t.parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),\n+\t\t.parameter_value = cpu_to_be64(mtu),\n+\t};\n+\n+\treturn gve_adminq_execute_cmd(priv, &cmd);\n+}\n+\n+int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,\n+\t\t\t    dma_addr_t stats_report_addr, u64 interval)\n+{\n+\tunion gve_adminq_command cmd;\n+\n+\tmemset(&cmd, 0, sizeof(cmd));\n+\tcmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS);\n+\tcmd.report_stats = (struct gve_adminq_report_stats) {\n+\t\t.stats_report_len = cpu_to_be64(stats_report_len),\n+\t\t.stats_report_addr = cpu_to_be64(stats_report_addr),\n+\t\t.interval = cpu_to_be64(interval),\n+\t};\n+\n+\treturn gve_adminq_execute_cmd(priv, &cmd);\n+}\n+\n+int gve_adminq_report_link_speed(struct gve_priv *priv)\n+{\n+\tstruct gve_dma_mem link_speed_region_dma_mem;\n+\tunion gve_adminq_command gvnic_cmd;\n+\tu64 *link_speed_region;\n+\tint err;\n+\n+\tlink_speed_region = gve_alloc_dma_mem(&link_speed_region_dma_mem,\n+\t\t\t\t\t      sizeof(*link_speed_region));\n+\n+\tif (!link_speed_region)\n+\t\treturn -ENOMEM;\n+\n+\tmemset(&gvnic_cmd, 0, sizeof(gvnic_cmd));\n+\tgvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED);\n+\tgvnic_cmd.report_link_speed.link_speed_address =\n+\t\tcpu_to_be64(link_speed_region_dma_mem.pa);\n+\n+\terr = gve_adminq_execute_cmd(priv, &gvnic_cmd);\n+\n+\tpriv->link_speed = be64_to_cpu(*link_speed_region);\n+\tgve_free_dma_mem(&link_speed_region_dma_mem);\n+\treturn err;\n+}\n+\n+int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,\n+\t\t\t\t struct gve_ptype_lut *ptype_lut)\n+{\n+\tstruct gve_dma_mem ptype_map_dma_mem;\n+\tstruct gve_ptype_map *ptype_map;\n+\tunion gve_adminq_command cmd;\n+\tint err = 0;\n+\tint i;\n+\n+\tmemset(&cmd, 0, sizeof(cmd));\n+\tptype_map = gve_alloc_dma_mem(&ptype_map_dma_mem, sizeof(*ptype_map));\n+\tif (!ptype_map)\n+\t\treturn -ENOMEM;\n+\n+\tcmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP);\n+\tcmd.get_ptype_map = (struct gve_adminq_get_ptype_map) {\n+\t\t.ptype_map_len = cpu_to_be64(sizeof(*ptype_map)),\n+\t\t.ptype_map_addr = cpu_to_be64(ptype_map_dma_mem.pa),\n+\t};\n+\n+\terr = gve_adminq_execute_cmd(priv, &cmd);\n+\tif (err)\n+\t\tgoto err;\n+\n+\t/* Populate ptype_lut. */\n+\tfor (i = 0; i < GVE_NUM_PTYPES; i++) {\n+\t\tptype_lut->ptypes[i].l3_type =\n+\t\t\tptype_map->ptypes[i].l3_type;\n+\t\tptype_lut->ptypes[i].l4_type =\n+\t\t\tptype_map->ptypes[i].l4_type;\n+\t}\n+err:\n+\tgve_free_dma_mem(&ptype_map_dma_mem);\n+\treturn err;\n+}\ndiff --git a/drivers/net/gve/base/gve_adminq.h b/drivers/net/gve/base/gve_adminq.h\nnew file mode 100644\nindex 0000000000..c7114cc883\n--- /dev/null\n+++ b/drivers/net/gve/base/gve_adminq.h\n@@ -0,0 +1,381 @@\n+/* SPDX-License-Identifier: MIT\n+ * Google Virtual Ethernet (gve) driver\n+ * Version: 1.3.0\n+ * Copyright (C) 2015-2022 Google, Inc.\n+ * Copyright(C) 2022 Intel Corporation\n+ */\n+\n+#ifndef _GVE_ADMINQ_H\n+#define _GVE_ADMINQ_H\n+\n+/* Admin queue opcodes */\n+enum gve_adminq_opcodes {\n+\tGVE_ADMINQ_DESCRIBE_DEVICE\t\t= 0x1,\n+\tGVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES\t= 0x2,\n+\tGVE_ADMINQ_REGISTER_PAGE_LIST\t\t= 0x3,\n+\tGVE_ADMINQ_UNREGISTER_PAGE_LIST\t\t= 0x4,\n+\tGVE_ADMINQ_CREATE_TX_QUEUE\t\t= 0x5,\n+\tGVE_ADMINQ_CREATE_RX_QUEUE\t\t= 0x6,\n+\tGVE_ADMINQ_DESTROY_TX_QUEUE\t\t= 0x7,\n+\tGVE_ADMINQ_DESTROY_RX_QUEUE\t\t= 0x8,\n+\tGVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES\t= 0x9,\n+\tGVE_ADMINQ_SET_DRIVER_PARAMETER\t\t= 0xB,\n+\tGVE_ADMINQ_REPORT_STATS\t\t\t= 0xC,\n+\tGVE_ADMINQ_REPORT_LINK_SPEED\t\t= 0xD,\n+\tGVE_ADMINQ_GET_PTYPE_MAP\t\t= 0xE,\n+};\n+\n+/* Admin queue status codes */\n+enum gve_adminq_statuses {\n+\tGVE_ADMINQ_COMMAND_UNSET\t\t\t= 0x0,\n+\tGVE_ADMINQ_COMMAND_PASSED\t\t\t= 0x1,\n+\tGVE_ADMINQ_COMMAND_ERROR_ABORTED\t\t= 0xFFFFFFF0,\n+\tGVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS\t\t= 0xFFFFFFF1,\n+\tGVE_ADMINQ_COMMAND_ERROR_CANCELLED\t\t= 0xFFFFFFF2,\n+\tGVE_ADMINQ_COMMAND_ERROR_DATALOSS\t\t= 0xFFFFFFF3,\n+\tGVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED\t= 0xFFFFFFF4,\n+\tGVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION\t= 0xFFFFFFF5,\n+\tGVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR\t\t= 0xFFFFFFF6,\n+\tGVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT\t= 0xFFFFFFF7,\n+\tGVE_ADMINQ_COMMAND_ERROR_NOT_FOUND\t\t= 0xFFFFFFF8,\n+\tGVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE\t\t= 0xFFFFFFF9,\n+\tGVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED\t= 0xFFFFFFFA,\n+\tGVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED\t= 0xFFFFFFFB,\n+\tGVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED\t= 0xFFFFFFFC,\n+\tGVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE\t\t= 0xFFFFFFFD,\n+\tGVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED\t\t= 0xFFFFFFFE,\n+\tGVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR\t\t= 0xFFFFFFFF,\n+};\n+\n+#define GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION 1\n+\n+/* All AdminQ command structs should be naturally packed.\n+ * GVE_CHECK_STRUCT/UNION_LEN will check struct/union length and throw\n+ * error at compile time when the size is not correct.\n+ */\n+\n+struct gve_adminq_describe_device {\n+\t__be64 device_descriptor_addr;\n+\t__be32 device_descriptor_version;\n+\t__be32 available_length;\n+};\n+\n+GVE_CHECK_STRUCT_LEN(16, gve_adminq_describe_device);\n+\n+struct gve_device_descriptor {\n+\t__be64 max_registered_pages;\n+\t__be16 reserved1;\n+\t__be16 tx_queue_entries;\n+\t__be16 rx_queue_entries;\n+\t__be16 default_num_queues;\n+\t__be16 mtu;\n+\t__be16 counters;\n+\t__be16 tx_pages_per_qpl;\n+\t__be16 rx_pages_per_qpl;\n+\tu8  mac[ETH_ALEN];\n+\t__be16 num_device_options;\n+\t__be16 total_length;\n+\tu8  reserved2[6];\n+};\n+\n+GVE_CHECK_STRUCT_LEN(40, gve_device_descriptor);\n+\n+struct gve_device_option {\n+\t__be16 option_id;\n+\t__be16 option_length;\n+\t__be32 required_features_mask;\n+};\n+\n+GVE_CHECK_STRUCT_LEN(8, gve_device_option);\n+\n+struct gve_device_option_gqi_rda {\n+\t__be32 supported_features_mask;\n+};\n+\n+GVE_CHECK_STRUCT_LEN(4, gve_device_option_gqi_rda);\n+\n+struct gve_device_option_gqi_qpl {\n+\t__be32 supported_features_mask;\n+};\n+\n+GVE_CHECK_STRUCT_LEN(4, gve_device_option_gqi_qpl);\n+\n+struct gve_device_option_dqo_rda {\n+\t__be32 supported_features_mask;\n+\t__be16 tx_comp_ring_entries;\n+\t__be16 rx_buff_ring_entries;\n+};\n+\n+GVE_CHECK_STRUCT_LEN(8, gve_device_option_dqo_rda);\n+\n+struct gve_device_option_jumbo_frames {\n+\t__be32 supported_features_mask;\n+\t__be16 max_mtu;\n+\tu8 padding[2];\n+};\n+\n+GVE_CHECK_STRUCT_LEN(8, gve_device_option_jumbo_frames);\n+\n+/* Terminology:\n+ *\n+ * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA\n+ *       mapped and read/updated by the device.\n+ *\n+ * QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with\n+ *       the device for read/write and data is copied from/to SKBs.\n+ */\n+enum gve_dev_opt_id {\n+\tGVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,\n+\tGVE_DEV_OPT_ID_GQI_RDA = 0x2,\n+\tGVE_DEV_OPT_ID_GQI_QPL = 0x3,\n+\tGVE_DEV_OPT_ID_DQO_RDA = 0x4,\n+\tGVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,\n+};\n+\n+enum gve_dev_opt_req_feat_mask {\n+\tGVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,\n+\tGVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,\n+\tGVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,\n+\tGVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,\n+\tGVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,\n+};\n+\n+enum gve_sup_feature_mask {\n+\tGVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,\n+};\n+\n+#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0\n+\n+struct gve_adminq_configure_device_resources {\n+\t__be64 counter_array;\n+\t__be64 irq_db_addr;\n+\t__be32 num_counters;\n+\t__be32 num_irq_dbs;\n+\t__be32 irq_db_stride;\n+\t__be32 ntfy_blk_msix_base_idx;\n+\tu8 queue_format;\n+\tu8 padding[7];\n+};\n+\n+GVE_CHECK_STRUCT_LEN(40, gve_adminq_configure_device_resources);\n+\n+struct gve_adminq_register_page_list {\n+\t__be32 page_list_id;\n+\t__be32 num_pages;\n+\t__be64 page_address_list_addr;\n+};\n+\n+GVE_CHECK_STRUCT_LEN(16, gve_adminq_register_page_list);\n+\n+struct gve_adminq_unregister_page_list {\n+\t__be32 page_list_id;\n+};\n+\n+GVE_CHECK_STRUCT_LEN(4, gve_adminq_unregister_page_list);\n+\n+#define GVE_RAW_ADDRESSING_QPL_ID 0xFFFFFFFF\n+\n+struct gve_adminq_create_tx_queue {\n+\t__be32 queue_id;\n+\t__be32 reserved;\n+\t__be64 queue_resources_addr;\n+\t__be64 tx_ring_addr;\n+\t__be32 queue_page_list_id;\n+\t__be32 ntfy_id;\n+\t__be64 tx_comp_ring_addr;\n+\t__be16 tx_ring_size;\n+\t__be16 tx_comp_ring_size;\n+\tu8 padding[4];\n+};\n+\n+GVE_CHECK_STRUCT_LEN(48, gve_adminq_create_tx_queue);\n+\n+struct gve_adminq_create_rx_queue {\n+\t__be32 queue_id;\n+\t__be32 index;\n+\t__be32 reserved;\n+\t__be32 ntfy_id;\n+\t__be64 queue_resources_addr;\n+\t__be64 rx_desc_ring_addr;\n+\t__be64 rx_data_ring_addr;\n+\t__be32 queue_page_list_id;\n+\t__be16 rx_ring_size;\n+\t__be16 packet_buffer_size;\n+\t__be16 rx_buff_ring_size;\n+\tu8 enable_rsc;\n+\tu8 padding[5];\n+};\n+\n+GVE_CHECK_STRUCT_LEN(56, gve_adminq_create_rx_queue);\n+\n+/* Queue resources that are shared with the device */\n+struct gve_queue_resources {\n+\tunion {\n+\t\tstruct {\n+\t\t\t__be32 db_index;\t/* Device -> Guest */\n+\t\t\t__be32 counter_index;\t/* Device -> Guest */\n+\t\t};\n+\t\tu8 reserved[64];\n+\t};\n+};\n+\n+GVE_CHECK_STRUCT_LEN(64, gve_queue_resources);\n+\n+struct gve_adminq_destroy_tx_queue {\n+\t__be32 queue_id;\n+};\n+\n+GVE_CHECK_STRUCT_LEN(4, gve_adminq_destroy_tx_queue);\n+\n+struct gve_adminq_destroy_rx_queue {\n+\t__be32 queue_id;\n+};\n+\n+GVE_CHECK_STRUCT_LEN(4, gve_adminq_destroy_rx_queue);\n+\n+/* GVE Set Driver Parameter Types */\n+enum gve_set_driver_param_types {\n+\tGVE_SET_PARAM_MTU\t= 0x1,\n+};\n+\n+struct gve_adminq_set_driver_parameter {\n+\t__be32 parameter_type;\n+\tu8 reserved[4];\n+\t__be64 parameter_value;\n+};\n+\n+GVE_CHECK_STRUCT_LEN(16, gve_adminq_set_driver_parameter);\n+\n+struct gve_adminq_report_stats {\n+\t__be64 stats_report_len;\n+\t__be64 stats_report_addr;\n+\t__be64 interval;\n+};\n+\n+GVE_CHECK_STRUCT_LEN(24, gve_adminq_report_stats);\n+\n+struct gve_adminq_report_link_speed {\n+\t__be64 link_speed_address;\n+};\n+\n+GVE_CHECK_STRUCT_LEN(8, gve_adminq_report_link_speed);\n+\n+struct stats {\n+\t__be32 stat_name;\n+\t__be32 queue_id;\n+\t__be64 value;\n+};\n+\n+GVE_CHECK_STRUCT_LEN(16, stats);\n+\n+struct gve_stats_report {\n+\t__be64 written_count;\n+\tstruct stats stats[];\n+};\n+\n+GVE_CHECK_STRUCT_LEN(8, gve_stats_report);\n+\n+enum gve_stat_names {\n+\t/* stats from gve */\n+\tTX_WAKE_CNT\t\t\t= 1,\n+\tTX_STOP_CNT\t\t\t= 2,\n+\tTX_FRAMES_SENT\t\t\t= 3,\n+\tTX_BYTES_SENT\t\t\t= 4,\n+\tTX_LAST_COMPLETION_PROCESSED\t= 5,\n+\tRX_NEXT_EXPECTED_SEQUENCE\t= 6,\n+\tRX_BUFFERS_POSTED\t\t= 7,\n+\tTX_TIMEOUT_CNT\t\t\t= 8,\n+\t/* stats from NIC */\n+\tRX_QUEUE_DROP_CNT\t\t= 65,\n+\tRX_NO_BUFFERS_POSTED\t\t= 66,\n+\tRX_DROPS_PACKET_OVER_MRU\t= 67,\n+\tRX_DROPS_INVALID_CHECKSUM\t= 68,\n+};\n+\n+enum gve_l3_type {\n+\t/* Must be zero so zero initialized LUT is unknown. */\n+\tGVE_L3_TYPE_UNKNOWN = 0,\n+\tGVE_L3_TYPE_OTHER,\n+\tGVE_L3_TYPE_IPV4,\n+\tGVE_L3_TYPE_IPV6,\n+};\n+\n+enum gve_l4_type {\n+\t/* Must be zero so zero initialized LUT is unknown. */\n+\tGVE_L4_TYPE_UNKNOWN = 0,\n+\tGVE_L4_TYPE_OTHER,\n+\tGVE_L4_TYPE_TCP,\n+\tGVE_L4_TYPE_UDP,\n+\tGVE_L4_TYPE_ICMP,\n+\tGVE_L4_TYPE_SCTP,\n+};\n+\n+/* These are control path types for PTYPE which are the same as the data path\n+ * types.\n+ */\n+struct gve_ptype_entry {\n+\tu8 l3_type;\n+\tu8 l4_type;\n+};\n+\n+struct gve_ptype_map {\n+\tstruct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */\n+};\n+\n+struct gve_adminq_get_ptype_map {\n+\t__be64 ptype_map_len;\n+\t__be64 ptype_map_addr;\n+};\n+\n+union gve_adminq_command {\n+\tstruct {\n+\t\t__be32 opcode;\n+\t\t__be32 status;\n+\t\tunion {\n+\t\t\tstruct gve_adminq_configure_device_resources\n+\t\t\t\t\t\tconfigure_device_resources;\n+\t\t\tstruct gve_adminq_create_tx_queue create_tx_queue;\n+\t\t\tstruct gve_adminq_create_rx_queue create_rx_queue;\n+\t\t\tstruct gve_adminq_destroy_tx_queue destroy_tx_queue;\n+\t\t\tstruct gve_adminq_destroy_rx_queue destroy_rx_queue;\n+\t\t\tstruct gve_adminq_describe_device describe_device;\n+\t\t\tstruct gve_adminq_register_page_list reg_page_list;\n+\t\t\tstruct gve_adminq_unregister_page_list unreg_page_list;\n+\t\t\tstruct gve_adminq_set_driver_parameter set_driver_param;\n+\t\t\tstruct gve_adminq_report_stats report_stats;\n+\t\t\tstruct gve_adminq_report_link_speed report_link_speed;\n+\t\t\tstruct gve_adminq_get_ptype_map get_ptype_map;\n+\t\t};\n+\t};\n+\tu8 reserved[64];\n+};\n+\n+GVE_CHECK_UNION_LEN(64, gve_adminq_command);\n+\n+int gve_adminq_alloc(struct gve_priv *priv);\n+void gve_adminq_free(struct gve_priv *priv);\n+void gve_adminq_release(struct gve_priv *priv);\n+int gve_adminq_describe_device(struct gve_priv *priv);\n+int gve_adminq_configure_device_resources(struct gve_priv *priv,\n+\t\t\t\t\t  dma_addr_t counter_array_bus_addr,\n+\t\t\t\t\t  u32 num_counters,\n+\t\t\t\t\t  dma_addr_t db_array_bus_addr,\n+\t\t\t\t\t  u32 num_ntfy_blks);\n+int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);\n+int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues);\n+int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 queue_id);\n+int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);\n+int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);\n+int gve_adminq_register_page_list(struct gve_priv *priv,\n+\t\t\t\t  struct gve_queue_page_list *qpl);\n+int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);\n+int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);\n+int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,\n+\t\t\t    dma_addr_t stats_report_addr, u64 interval);\n+int gve_adminq_report_link_speed(struct gve_priv *priv);\n+\n+struct gve_ptype_lut;\n+int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,\n+\t\t\t\t struct gve_ptype_lut *ptype_lut);\n+\n+#endif /* _GVE_ADMINQ_H */\ndiff --git a/drivers/net/gve/base/gve_desc.h b/drivers/net/gve/base/gve_desc.h\nnew file mode 100644\nindex 0000000000..358755b7e0\n--- /dev/null\n+++ b/drivers/net/gve/base/gve_desc.h\n@@ -0,0 +1,137 @@\n+/* SPDX-License-Identifier: MIT\n+ * Google Virtual Ethernet (gve) driver\n+ * Version: 1.3.0\n+ * Copyright (C) 2015-2022 Google, Inc.\n+ */\n+\n+/* GVE Transmit Descriptor formats */\n+\n+#ifndef _GVE_DESC_H_\n+#define _GVE_DESC_H_\n+\n+/* A note on seg_addrs\n+ *\n+ * Base addresses encoded in seg_addr are not assumed to be physical\n+ * addresses. The ring format assumes these come from some linear address\n+ * space. This could be physical memory, kernel virtual memory, user virtual\n+ * memory.\n+ * If raw dma addressing is not supported then gVNIC uses lists of registered\n+ * pages. Each queue is assumed to be associated with a single such linear\n+ * address space to ensure a consistent meaning for seg_addrs posted to its\n+ * rings.\n+ */\n+\n+struct gve_tx_pkt_desc {\n+\tu8\ttype_flags;  /* desc type is lower 4 bits, flags upper */\n+\tu8\tl4_csum_offset;  /* relative offset of L4 csum word */\n+\tu8\tl4_hdr_offset;  /* Offset of start of L4 headers in packet */\n+\tu8\tdesc_cnt;  /* Total descriptors for this packet */\n+\t__be16\tlen;  /* Total length of this packet (in bytes) */\n+\t__be16\tseg_len;  /* Length of this descriptor's segment */\n+\t__be64\tseg_addr;  /* Base address (see note) of this segment */\n+} __packed;\n+\n+struct gve_tx_mtd_desc {\n+\tu8      type_flags;     /* type is lower 4 bits, subtype upper  */\n+\tu8      path_state;     /* state is lower 4 bits, hash type upper */\n+\t__be16  reserved0;\n+\t__be32  path_hash;\n+\t__be64  reserved1;\n+} __packed;\n+\n+struct gve_tx_seg_desc {\n+\tu8\ttype_flags;\t/* type is lower 4 bits, flags upper\t*/\n+\tu8\tl3_offset;\t/* TSO: 2 byte units to start of IPH\t*/\n+\t__be16\treserved;\n+\t__be16\tmss;\t\t/* TSO MSS\t\t\t\t*/\n+\t__be16\tseg_len;\n+\t__be64\tseg_addr;\n+} __packed;\n+\n+/* GVE Transmit Descriptor Types */\n+#define\tGVE_TXD_STD\t\t(0x0 << 4) /* Std with Host Address\t*/\n+#define\tGVE_TXD_TSO\t\t(0x1 << 4) /* TSO with Host Address\t*/\n+#define\tGVE_TXD_SEG\t\t(0x2 << 4) /* Seg with Host Address\t*/\n+#define\tGVE_TXD_MTD\t\t(0x3 << 4) /* Metadata\t\t\t*/\n+\n+/* GVE Transmit Descriptor Flags for Std Pkts */\n+#define\tGVE_TXF_L4CSUM\tBIT(0)\t/* Need csum offload */\n+#define\tGVE_TXF_TSTAMP\tBIT(2)\t/* Timestamp required */\n+\n+/* GVE Transmit Descriptor Flags for TSO Segs */\n+#define\tGVE_TXSF_IPV6\tBIT(1)\t/* IPv6 TSO */\n+\n+/* GVE Transmit Descriptor Options for MTD Segs */\n+#define GVE_MTD_SUBTYPE_PATH\t\t0\n+\n+#define GVE_MTD_PATH_STATE_DEFAULT\t0\n+#define GVE_MTD_PATH_STATE_TIMEOUT\t1\n+#define GVE_MTD_PATH_STATE_CONGESTION\t2\n+#define GVE_MTD_PATH_STATE_RETRANSMIT\t3\n+\n+#define GVE_MTD_PATH_HASH_NONE         (0x0 << 4)\n+#define GVE_MTD_PATH_HASH_L4           (0x1 << 4)\n+\n+/* GVE Receive Packet Descriptor */\n+/* The start of an ethernet packet comes 2 bytes into the rx buffer.\n+ * gVNIC adds this padding so that both the DMA and the L3/4 protocol header\n+ * access is aligned.\n+ */\n+#define GVE_RX_PAD 2\n+\n+struct gve_rx_desc {\n+\tu8\tpadding[48];\n+\t__be32\trss_hash;  /* Receive-side scaling hash (Toeplitz for gVNIC) */\n+\t__be16\tmss;\n+\t__be16\treserved;  /* Reserved to zero */\n+\tu8\thdr_len;  /* Header length (L2-L4) including padding */\n+\tu8\thdr_off;  /* 64-byte-scaled offset into RX_DATA entry */\n+\t__sum16\tcsum;  /* 1's-complement partial checksum of L3+ bytes */\n+\t__be16\tlen;  /* Length of the received packet */\n+\t__be16\tflags_seq;  /* Flags [15:3] and sequence number [2:0] (1-7) */\n+} __packed;\n+GVE_CHECK_STRUCT_LEN(64, gve_rx_desc);\n+\n+/* If the device supports raw dma addressing then the addr in data slot is\n+ * the dma address of the buffer.\n+ * If the device only supports registered segments then the addr is a byte\n+ * offset into the registered segment (an ordered list of pages) where the\n+ * buffer is.\n+ */\n+union gve_rx_data_slot {\n+\t__be64 qpl_offset;\n+\t__be64 addr;\n+};\n+\n+/* GVE Receive Packet Descriptor Seq No */\n+#define GVE_SEQNO(x) (be16_to_cpu(x) & 0x7)\n+\n+/* GVE Receive Packet Descriptor Flags */\n+#define GVE_RXFLG(x)\tcpu_to_be16(1 << (3 + (x)))\n+#define\tGVE_RXF_FRAG\t\tGVE_RXFLG(3)\t/* IP Fragment\t\t\t*/\n+#define\tGVE_RXF_IPV4\t\tGVE_RXFLG(4)\t/* IPv4\t\t\t\t*/\n+#define\tGVE_RXF_IPV6\t\tGVE_RXFLG(5)\t/* IPv6\t\t\t\t*/\n+#define\tGVE_RXF_TCP\t\tGVE_RXFLG(6)\t/* TCP Packet\t\t\t*/\n+#define\tGVE_RXF_UDP\t\tGVE_RXFLG(7)\t/* UDP Packet\t\t\t*/\n+#define\tGVE_RXF_ERR\t\tGVE_RXFLG(8)\t/* Packet Error Detected\t*/\n+#define\tGVE_RXF_PKT_CONT\tGVE_RXFLG(10)\t/* Multi Fragment RX packet\t*/\n+\n+/* GVE IRQ */\n+#define GVE_IRQ_ACK\tBIT(31)\n+#define GVE_IRQ_MASK\tBIT(30)\n+#define GVE_IRQ_EVENT\tBIT(29)\n+\n+static inline bool gve_needs_rss(__be16 flag)\n+{\n+\tif (flag & GVE_RXF_FRAG)\n+\t\treturn false;\n+\tif (flag & (GVE_RXF_IPV4 | GVE_RXF_IPV6))\n+\t\treturn true;\n+\treturn false;\n+}\n+\n+static inline u8 gve_next_seqno(u8 seq)\n+{\n+\treturn (seq + 1) == 8 ? 1 : seq + 1;\n+}\n+#endif /* _GVE_DESC_H_ */\ndiff --git a/drivers/net/gve/base/gve_desc_dqo.h b/drivers/net/gve/base/gve_desc_dqo.h\nnew file mode 100644\nindex 0000000000..0d533abcd1\n--- /dev/null\n+++ b/drivers/net/gve/base/gve_desc_dqo.h\n@@ -0,0 +1,254 @@\n+/* SPDX-License-Identifier: MIT\n+ * Google Virtual Ethernet (gve) driver\n+ * Version: 1.3.0\n+ * Copyright (C) 2015-2022 Google, Inc.\n+ */\n+\n+/* GVE DQO Descriptor formats */\n+\n+#ifndef _GVE_DESC_DQO_H_\n+#define _GVE_DESC_DQO_H_\n+\n+#define GVE_TX_MAX_HDR_SIZE_DQO 255\n+#define GVE_TX_MIN_TSO_MSS_DQO 88\n+\n+#ifndef __LITTLE_ENDIAN_BITFIELD\n+#error \"Only little endian supported\"\n+#endif\n+\n+/* Basic TX descriptor (DTYPE 0x0C) */\n+struct gve_tx_pkt_desc_dqo {\n+\t__le64 buf_addr;\n+\n+\t/* Must be GVE_TX_PKT_DESC_DTYPE_DQO (0xc) */\n+\tu8 dtype: 5;\n+\n+\t/* Denotes the last descriptor of a packet. */\n+\tu8 end_of_packet: 1;\n+\tu8 checksum_offload_enable: 1;\n+\n+\t/* If set, will generate a descriptor completion for this descriptor. */\n+\tu8 report_event: 1;\n+\tu8 reserved0;\n+\t__le16 reserved1;\n+\n+\t/* The TX completion associated with this packet will contain this tag.\n+\t */\n+\t__le16 compl_tag;\n+\tu16 buf_size: 14;\n+\tu16 reserved2: 2;\n+} __packed;\n+GVE_CHECK_STRUCT_LEN(16, gve_tx_pkt_desc_dqo);\n+\n+#define GVE_TX_PKT_DESC_DTYPE_DQO 0xc\n+#define GVE_TX_MAX_BUF_SIZE_DQO ((16 * 1024) - 1)\n+\n+/* Maximum number of data descriptors allowed per packet, or per-TSO segment. */\n+#define GVE_TX_MAX_DATA_DESCS 10\n+\n+/* Min gap between tail and head to avoid cacheline overlap */\n+#define GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP 4\n+\n+/* \"report_event\" on TX packet descriptors may only be reported on the last\n+ * descriptor of a TX packet, and they must be spaced apart with at least this\n+ * value.\n+ */\n+#define GVE_TX_MIN_RE_INTERVAL 32\n+\n+struct gve_tx_context_cmd_dtype {\n+\tu8 dtype: 5;\n+\tu8 tso: 1;\n+\tu8 reserved1: 2;\n+\n+\tu8 reserved2;\n+};\n+\n+GVE_CHECK_STRUCT_LEN(2, gve_tx_context_cmd_dtype);\n+\n+/* TX Native TSO Context DTYPE (0x05)\n+ *\n+ * \"flex\" fields allow the driver to send additional packet context to HW.\n+ */\n+struct gve_tx_tso_context_desc_dqo {\n+\t/* The L4 payload bytes that should be segmented. */\n+\tu32 tso_total_len: 24;\n+\tu32 flex10: 8;\n+\n+\t/* Max segment size in TSO excluding headers. */\n+\tu16 mss: 14;\n+\tu16 reserved: 2;\n+\n+\tu8 header_len; /* Header length to use for TSO offload */\n+\tu8 flex11;\n+\tstruct gve_tx_context_cmd_dtype cmd_dtype;\n+\tu8 flex0;\n+\tu8 flex5;\n+\tu8 flex6;\n+\tu8 flex7;\n+\tu8 flex8;\n+\tu8 flex9;\n+} __packed;\n+GVE_CHECK_STRUCT_LEN(16, gve_tx_tso_context_desc_dqo);\n+\n+#define GVE_TX_TSO_CTX_DESC_DTYPE_DQO 0x5\n+\n+/* General context descriptor for sending metadata. */\n+struct gve_tx_general_context_desc_dqo {\n+\tu8 flex4;\n+\tu8 flex5;\n+\tu8 flex6;\n+\tu8 flex7;\n+\tu8 flex8;\n+\tu8 flex9;\n+\tu8 flex10;\n+\tu8 flex11;\n+\tstruct gve_tx_context_cmd_dtype cmd_dtype;\n+\tu16 reserved;\n+\tu8 flex0;\n+\tu8 flex1;\n+\tu8 flex2;\n+\tu8 flex3;\n+} __packed;\n+GVE_CHECK_STRUCT_LEN(16, gve_tx_general_context_desc_dqo);\n+\n+#define GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO 0x4\n+\n+/* Logical structure of metadata which is packed into context descriptor flex\n+ * fields.\n+ */\n+struct gve_tx_metadata_dqo {\n+\tunion {\n+\t\tstruct {\n+\t\t\tu8 version;\n+\n+\t\t\t/* If `skb->l4_hash` is set, this value should be\n+\t\t\t * derived from `skb->hash`.\n+\t\t\t *\n+\t\t\t * A zero value means no l4_hash was associated with the\n+\t\t\t * skb.\n+\t\t\t */\n+\t\t\tu16 path_hash: 15;\n+\n+\t\t\t/* Should be set to 1 if the flow associated with the\n+\t\t\t * skb had a rehash from the TCP stack.\n+\t\t\t */\n+\t\t\tu16 rehash_event: 1;\n+\t\t}  __packed;\n+\t\tu8 bytes[12];\n+\t};\n+}  __packed;\n+GVE_CHECK_STRUCT_LEN(12, gve_tx_metadata_dqo);\n+\n+#define GVE_TX_METADATA_VERSION_DQO 0\n+\n+/* TX completion descriptor */\n+struct gve_tx_compl_desc {\n+\t/* For types 0-4 this is the TX queue ID associated with this\n+\t * completion.\n+\t */\n+\tu16 id: 11;\n+\n+\t/* See: GVE_COMPL_TYPE_DQO* */\n+\tu16 type: 3;\n+\tu16 reserved0: 1;\n+\n+\t/* Flipped by HW to notify the descriptor is populated. */\n+\tu16 generation: 1;\n+\tunion {\n+\t\t/* For descriptor completions, this is the last index fetched\n+\t\t * by HW + 1.\n+\t\t */\n+\t\t__le16 tx_head;\n+\n+\t\t/* For packet completions, this is the completion tag set on the\n+\t\t * TX packet descriptors.\n+\t\t */\n+\t\t__le16 completion_tag;\n+\t};\n+\t__le32 reserved1;\n+} __packed;\n+GVE_CHECK_STRUCT_LEN(8, gve_tx_compl_desc);\n+\n+#define GVE_COMPL_TYPE_DQO_PKT 0x2 /* Packet completion */\n+#define GVE_COMPL_TYPE_DQO_DESC 0x4 /* Descriptor completion */\n+#define GVE_COMPL_TYPE_DQO_MISS 0x1 /* Miss path completion */\n+#define GVE_COMPL_TYPE_DQO_REINJECTION 0x3 /* Re-injection completion */\n+\n+/* Descriptor to post buffers to HW on buffer queue. */\n+struct gve_rx_desc_dqo {\n+\t__le16 buf_id; /* ID returned in Rx completion descriptor */\n+\t__le16 reserved0;\n+\t__le32 reserved1;\n+\t__le64 buf_addr; /* DMA address of the buffer */\n+\t__le64 header_buf_addr;\n+\t__le64 reserved2;\n+} __packed;\n+GVE_CHECK_STRUCT_LEN(32, gve_rx_desc_dqo);\n+\n+/* Descriptor for HW to notify SW of new packets received on RX queue. */\n+struct gve_rx_compl_desc_dqo {\n+\t/* Must be 1 */\n+\tu8 rxdid: 4;\n+\tu8 reserved0: 4;\n+\n+\t/* Packet originated from this system rather than the network. */\n+\tu8 loopback: 1;\n+\t/* Set when IPv6 packet contains a destination options header or routing\n+\t * header.\n+\t */\n+\tu8 ipv6_ex_add: 1;\n+\t/* Invalid packet was received. */\n+\tu8 rx_error: 1;\n+\tu8 reserved1: 5;\n+\n+\tu16 packet_type: 10;\n+\tu16 ip_hdr_err: 1;\n+\tu16 udp_len_err: 1;\n+\tu16 raw_cs_invalid: 1;\n+\tu16 reserved2: 3;\n+\n+\tu16 packet_len: 14;\n+\t/* Flipped by HW to notify the descriptor is populated. */\n+\tu16 generation: 1;\n+\t/* Should be zero. */\n+\tu16 buffer_queue_id: 1;\n+\n+\tu16 header_len: 10;\n+\tu16 rsc: 1;\n+\tu16 split_header: 1;\n+\tu16 reserved3: 4;\n+\n+\tu8 descriptor_done: 1;\n+\tu8 end_of_packet: 1;\n+\tu8 header_buffer_overflow: 1;\n+\tu8 l3_l4_processed: 1;\n+\tu8 csum_ip_err: 1;\n+\tu8 csum_l4_err: 1;\n+\tu8 csum_external_ip_err: 1;\n+\tu8 csum_external_udp_err: 1;\n+\n+\tu8 status_error1;\n+\n+\t__le16 reserved5;\n+\t__le16 buf_id; /* Buffer ID which was sent on the buffer queue. */\n+\n+\tunion {\n+\t\t/* Packet checksum. */\n+\t\t__le16 raw_cs;\n+\t\t/* Segment length for RSC packets. */\n+\t\t__le16 rsc_seg_len;\n+\t};\n+\t__le32 hash;\n+\t__le32 reserved6;\n+\t__le64 reserved7;\n+} __packed;\n+\n+GVE_CHECK_STRUCT_LEN(32, gve_rx_compl_desc_dqo);\n+\n+/* Ringing the doorbell too often can hurt performance.\n+ *\n+ * HW requires this value to be at least 8.\n+ */\n+#define GVE_RX_BUF_THRESH_DQO 32\n+\n+#endif /* _GVE_DESC_DQO_H_ */\ndiff --git a/drivers/net/gve/base/gve_register.h b/drivers/net/gve/base/gve_register.h\nnew file mode 100644\nindex 0000000000..b65f336be2\n--- /dev/null\n+++ b/drivers/net/gve/base/gve_register.h\n@@ -0,0 +1,28 @@\n+/* SPDX-License-Identifier: MIT\n+ * Google Virtual Ethernet (gve) driver\n+ * Version: 1.3.0\n+ * Copyright (C) 2015-2022 Google, Inc.\n+ */\n+\n+#ifndef _GVE_REGISTER_H_\n+#define _GVE_REGISTER_H_\n+\n+/* Fixed Configuration Registers */\n+struct gve_registers {\n+\t__be32\tdevice_status;\n+\t__be32\tdriver_status;\n+\t__be32\tmax_tx_queues;\n+\t__be32\tmax_rx_queues;\n+\t__be32\tadminq_pfn;\n+\t__be32\tadminq_doorbell;\n+\t__be32\tadminq_event_counter;\n+\tu8\treserved[3];\n+\tu8\tdriver_version;\n+};\n+\n+enum gve_device_status_flags {\n+\tGVE_DEVICE_STATUS_RESET_MASK\t\t= BIT(1),\n+\tGVE_DEVICE_STATUS_LINK_STATUS_MASK\t= BIT(2),\n+\tGVE_DEVICE_STATUS_REPORT_STATS_MASK\t= BIT(3),\n+};\n+#endif /* _GVE_REGISTER_H_ */\n",
    "prefixes": [
        "v6",
        "1/8"
    ]
}