get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/14474/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 14474,
    "url": "http://patches.dpdk.org/api/patches/14474/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1467299099-32498-2-git-send-email-jan@semihalf.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1467299099-32498-2-git-send-email-jan@semihalf.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1467299099-32498-2-git-send-email-jan@semihalf.com",
    "date": "2016-06-30T15:04:54",
    "name": "[dpdk-dev,v3,1/6] ena: update of ENA communication layer",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "b63e575a8cfb4d76b7c70830742b17c2061f45e8",
    "submitter": {
        "id": 421,
        "url": "http://patches.dpdk.org/api/people/421/?format=api",
        "name": "Jan Medala",
        "email": "jan@semihalf.com"
    },
    "delegate": {
        "id": 10,
        "url": "http://patches.dpdk.org/api/users/10/?format=api",
        "username": "bruce",
        "first_name": "Bruce",
        "last_name": "Richardson",
        "email": "bruce.richardson@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1467299099-32498-2-git-send-email-jan@semihalf.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/14474/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/14474/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id B2DAF2C47;\n\tThu, 30 Jun 2016 17:05:15 +0200 (CEST)",
            "from mail-lf0-f52.google.com (mail-lf0-f52.google.com\n\t[209.85.215.52]) by dpdk.org (Postfix) with ESMTP id A93A12C2E\n\tfor <dev@dpdk.org>; Thu, 30 Jun 2016 17:05:11 +0200 (CEST)",
            "by mail-lf0-f52.google.com with SMTP id l188so57585654lfe.2\n\tfor <dev@dpdk.org>; Thu, 30 Jun 2016 08:05:11 -0700 (PDT)",
            "from anpa-dpdk-2.lab.semihalf.com\n\t(31-172-191-173.noc.fibertech.net.pl. [31.172.191.173])\n\tby smtp.gmail.com with ESMTPSA id\n\t206sm1635339ljj.0.2016.06.30.08.05.08\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128);\n\tThu, 30 Jun 2016 08:05:09 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=semihalf-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=cXaQ2Y4fFngPA5L/XClNJ3cW1VC1pgnBfSbR0YkkiEI=;\n\tb=z5q19tXiHRrC2SZjhM1OEUU4slRT4KTn2VAXwChtHbVminTV6Vxl8P9kAndF+OiFX+\n\tn+vcKc9ScgVpt5mcGXrca6ZukPDs4JsOV21bqH358ePw0LwZyV9aOGjFTa4V9yDI6Mqq\n\trAIPzLMZ5RJV+y+qbMI7XC7OWuRMJYKsQ8lcqOVlHwDkcJWcx/8YPG9ReAnemuZKsJTf\n\tY/jianRq+EAJHnvJeurwiAUFknZcascYEKyPeH+c7awDBTl3c6hDLYZ1W3JDoh/WoFJw\n\tWimmiwokiTrepC5cAw3iXxzpwTw9q3gXzEECoqwaI2D64R0TFUyeCwOOfy6ZXwlcC9iu\n\twApw==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20130820;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=cXaQ2Y4fFngPA5L/XClNJ3cW1VC1pgnBfSbR0YkkiEI=;\n\tb=T14C9aMAmxBBxV+g6VclNeY0RCuD5Q+DGZ9de6D1gdR9m+oZODnvXl0GrryO6kxdkm\n\teYYCwNbg+4h6isAMB+HYbgZQ4QmlvolYlh6CCvkL4ffCaExt/p++LbRk9IP/oUnoc/On\n\tztufRhvfqKyaV/a1Vgvklpi/cC3TBXVkYVKOb+oY0kzdv13+JF5kUxEZ3I+U2MLgPDAr\n\t06S6XhYa81Jj6XCG61tXdGDSqlEI9X4C/hrwpZP2pOwjNUOsarphsU3NngBR/dNICGdh\n\tCtkMlxz6DxOHk7UdKommysTdhmrvi8FOn0DQv0VArNl4S5OypielSpKDYAzsRcCwpDZ7\n\tkvAA==",
        "X-Gm-Message-State": "ALyK8tLpXx37m3ntN6L0JqoSoY5fBzP6ougS9Nx8AiV8nNxkc1QHCRVTjPlIlGYKCZVNMw==",
        "X-Received": "by 10.25.160.205 with SMTP id j196mr5361489lfe.47.1467299109964; \n\tThu, 30 Jun 2016 08:05:09 -0700 (PDT)",
        "From": "Jan Medala <jan@semihalf.com>",
        "To": "dev@dpdk.org",
        "Cc": "ferruh.yigit@intel.com, bruce.richardson@intel.com,\n\tJan Medala <jan@semihalf.com>, Alexander Matushevsky <matua@amazon.com>, \n\tJakub Palider <jpa@semihalf.com>",
        "Date": "Thu, 30 Jun 2016 17:04:54 +0200",
        "Message-Id": "<1467299099-32498-2-git-send-email-jan@semihalf.com>",
        "X-Mailer": "git-send-email 2.8.2",
        "In-Reply-To": "<1467299099-32498-1-git-send-email-jan@semihalf.com>",
        "References": "<1466510763-19569-6-git-send-email-jan@semihalf.com>\n\t<1467299099-32498-1-git-send-email-jan@semihalf.com>",
        "Subject": "[dpdk-dev] [PATCH v3 1/6] ena: update of ENA communication layer",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Synchronize ENA communication layer with latest ENA FW version.\n\nSigned-off-by: Alexander Matushevsky <matua@amazon.com>\nSigned-off-by: Jakub Palider <jpa@semihalf.com>\nSigned-off-by: Jan Medala <jan@semihalf.com>\n---\n drivers/net/ena/base/ena_com.c                  | 203 ++++++-----\n drivers/net/ena/base/ena_com.h                  |  82 +++--\n drivers/net/ena/base/ena_defs/ena_admin_defs.h  | 107 +-----\n drivers/net/ena/base/ena_defs/ena_eth_io_defs.h | 436 ++++++------------------\n drivers/net/ena/base/ena_defs/ena_gen_info.h    |   4 +-\n drivers/net/ena/base/ena_eth_com.c              |  32 +-\n drivers/net/ena/base/ena_eth_com.h              |  14 +\n drivers/net/ena/base/ena_plat_dpdk.h            |  15 +-\n drivers/net/ena/ena_ethdev.c                    |  51 ++-\n 9 files changed, 349 insertions(+), 595 deletions(-)",
    "diff": "diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c\nindex a21a951..b5b8cd9 100644\n--- a/drivers/net/ena/base/ena_com.c\n+++ b/drivers/net/ena/base/ena_com.c\n@@ -42,9 +42,6 @@\n #define ENA_ASYNC_QUEUE_DEPTH 4\n #define ENA_ADMIN_QUEUE_DEPTH 32\n \n-#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)\n-#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)\n-\n #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \\\n \t\tENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \\\n \t\t| (ENA_COMMON_SPEC_VERSION_MINOR))\n@@ -201,12 +198,16 @@ static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,\n static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,\n \t\t\t\t\t  u16 command_id, bool capture)\n {\n-\tENA_ASSERT(command_id < queue->q_depth,\n-\t\t   \"command id is larger than the queue size. cmd_id: %u queue size %d\\n\",\n-\t\t   command_id, queue->q_depth);\n+\tif (unlikely(command_id >= queue->q_depth)) {\n+\t\tena_trc_err(\"command id is larger than the queue size. cmd_id: %u queue size %d\\n\",\n+\t\t\t    command_id, queue->q_depth);\n+\t\treturn NULL;\n+\t}\n \n-\tENA_ASSERT(!(queue->comp_ctx[command_id].occupied && capture),\n-\t\t   \"Completion context is occupied\");\n+\tif (unlikely(queue->comp_ctx[command_id].occupied && capture)) {\n+\t\tena_trc_err(\"Completion context is occupied\\n\");\n+\t\treturn NULL;\n+\t}\n \n \tif (capture) {\n \t\tATOMIC32_INC(&queue->outstanding_cmds);\n@@ -290,7 +291,8 @@ static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)\n \n \tfor (i = 0; i < queue->q_depth; i++) {\n \t\tcomp_ctx = get_comp_ctxt(queue, i, false);\n-\t\tENA_WAIT_EVENT_INIT(comp_ctx->wait_event);\n+\t\tif (comp_ctx)\n+\t\t\tENA_WAIT_EVENT_INIT(comp_ctx->wait_event);\n \t}\n \n \treturn 0;\n@@ -315,16 +317,21 @@ ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,\n \t\t\t\t\t      cmd_size_in_bytes,\n \t\t\t\t\t      comp,\n \t\t\t\t\t      comp_size_in_bytes);\n+\tif (unlikely(IS_ERR(comp_ctx)))\n+\t\tadmin_queue->running_state = false;\n \tENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);\n \n \treturn comp_ctx;\n }\n \n static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,\n+\t\t\t      struct ena_com_create_io_ctx *ctx,\n \t\t\t      struct ena_com_io_sq *io_sq)\n {\n \tsize_t size;\n \n+\tENA_TOUCH(ctx);\n+\n \tmemset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));\n \n \tio_sq->desc_entry_size =\n@@ -357,10 +364,12 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,\n }\n \n static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,\n+\t\t\t      struct ena_com_create_io_ctx *ctx,\n \t\t\t      struct ena_com_io_cq *io_cq)\n {\n \tsize_t size;\n \n+\tENA_TOUCH(ctx);\n \tmemset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));\n \n \t/* Use the basic completion descriptor for Rx */\n@@ -399,6 +408,11 @@ ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,\n \t\tENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;\n \n \tcomp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);\n+\tif (unlikely(!comp_ctx)) {\n+\t\tena_trc_err(\"comp_ctx is NULL. Changing the admin queue running state\\n\");\n+\t\tadmin_queue->running_state = false;\n+\t\treturn;\n+\t}\n \n \tcomp_ctx->status = ENA_CMD_COMPLETED;\n \tcomp_ctx->comp_status = cqe->acq_common_descriptor.status;\n@@ -615,10 +629,12 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)\n \t\tgoto err;\n \t}\n \n-\tENA_ASSERT(read_resp->reg_off == offset,\n-\t\t   \"Invalid MMIO read return value\");\n-\n-\tret = read_resp->reg_val;\n+\tif (read_resp->reg_off != offset) {\n+\t\tena_trc_err(\"reading failed for wrong offset value\");\n+\t\tret = ENA_MMIO_READ_TIMEOUT;\n+\t} else {\n+\t\tret = read_resp->reg_val;\n+\t}\n err:\n \tENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);\n \n@@ -838,7 +854,7 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)\n \treturn 0;\n }\n \n-static int ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)\n+static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)\n {\n \tstruct ena_rss *rss = &ena_dev->rss;\n \n@@ -849,7 +865,6 @@ static int ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)\n \t\t\t\t      rss->hash_key_dma_addr,\n \t\t\t\t      rss->hash_key_mem_handle);\n \trss->hash_key = NULL;\n-\treturn 0;\n }\n \n static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)\n@@ -862,10 +877,13 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)\n \t\t\t       rss->hash_ctrl_dma_addr,\n \t\t\t       rss->hash_ctrl_mem_handle);\n \n+\tif (unlikely(!rss->hash_ctrl))\n+\t\treturn ENA_COM_NO_MEM;\n+\n \treturn 0;\n }\n \n-static int ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)\n+static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)\n {\n \tstruct ena_rss *rss = &ena_dev->rss;\n \n@@ -876,8 +894,6 @@ static int ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)\n \t\t\t\t      rss->hash_ctrl_dma_addr,\n \t\t\t\t      rss->hash_ctrl_mem_handle);\n \trss->hash_ctrl = NULL;\n-\n-\treturn 0;\n }\n \n static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,\n@@ -902,7 +918,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,\n \t\treturn ENA_COM_INVAL;\n \t}\n \n-\ttbl_size = (1 << log_size) *\n+\ttbl_size = (1ULL << log_size) *\n \t\tsizeof(struct ena_admin_rss_ind_table_entry);\n \n \tENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,\n@@ -913,7 +929,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,\n \tif (unlikely(!rss->rss_ind_tbl))\n \t\tgoto mem_err1;\n \n-\ttbl_size = (1 << log_size) * sizeof(u16);\n+\ttbl_size = (1ULL << log_size) * sizeof(u16);\n \trss->host_rss_ind_tbl =\n \t\tENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);\n \tif (unlikely(!rss->host_rss_ind_tbl))\n@@ -924,7 +940,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,\n \treturn 0;\n \n mem_err2:\n-\ttbl_size = (1 << log_size) *\n+\ttbl_size = (1ULL << log_size) *\n \t\tsizeof(struct ena_admin_rss_ind_table_entry);\n \n \tENA_MEM_FREE_COHERENT(ena_dev->dmadev,\n@@ -938,10 +954,10 @@ mem_err1:\n \treturn ENA_COM_NO_MEM;\n }\n \n-static int ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)\n+static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)\n {\n \tstruct ena_rss *rss = &ena_dev->rss;\n-\tsize_t tbl_size = (1 << rss->tbl_log_size) *\n+\tsize_t tbl_size = (1ULL << rss->tbl_log_size) *\n \t\tsizeof(struct ena_admin_rss_ind_table_entry);\n \n \tif (rss->rss_ind_tbl)\n@@ -955,8 +971,6 @@ static int ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)\n \tif (rss->host_rss_ind_tbl)\n \t\tENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);\n \trss->host_rss_ind_tbl = NULL;\n-\n-\treturn 0;\n }\n \n static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,\n@@ -1059,17 +1073,18 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)\n \n static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)\n {\n-\tu16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { -1 };\n+\tu16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };\n \tstruct ena_rss *rss = &ena_dev->rss;\n-\tu16 idx, i;\n+\tu8 idx;\n+\tu16 i;\n \n \tfor (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)\n \t\tdev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;\n \n \tfor (i = 0; i < 1 << rss->tbl_log_size; i++) {\n-\t\tidx = rss->rss_ind_tbl[i].cq_idx;\n-\t\tif (idx > ENA_TOTAL_NUM_QUEUES)\n+\t\tif (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)\n \t\t\treturn ENA_COM_INVAL;\n+\t\tidx = (u8)rss->rss_ind_tbl[i].cq_idx;\n \n \t\tif (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)\n \t\t\treturn ENA_COM_INVAL;\n@@ -1097,7 +1112,7 @@ static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)\n \n static void\n ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,\n-\t\t\t\t     unsigned int intr_delay_resolution)\n+\t\t\t\t     u16 intr_delay_resolution)\n {\n \tstruct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;\n \tunsigned int i;\n@@ -1189,23 +1204,19 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,\n \t}\n \n \tio_cq->idx = cmd_completion.cq_idx;\n-\tio_cq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +\n-\t\tcmd_completion.cq_doorbell_offset);\n-\n-\tif (io_cq->q_depth != cmd_completion.cq_actual_depth) {\n-\t\tena_trc_err(\"completion actual queue size (%d) is differ from requested size (%d)\\n\",\n-\t\t\t    cmd_completion.cq_actual_depth, io_cq->q_depth);\n-\t\tena_com_destroy_io_cq(ena_dev, io_cq);\n-\t\treturn ENA_COM_NO_SPACE;\n-\t}\n \n \tio_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +\n-\t\tcmd_completion.cq_interrupt_unmask_register);\n+\t\tcmd_completion.cq_interrupt_unmask_register_offset);\n \n-\tif (cmd_completion.cq_head_db_offset)\n+\tif (cmd_completion.cq_head_db_register_offset)\n \t\tio_cq->cq_head_db_reg =\n \t\t\t(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +\n-\t\t\tcmd_completion.cq_head_db_offset);\n+\t\t\tcmd_completion.cq_head_db_register_offset);\n+\n+\tif (cmd_completion.numa_node_register_offset)\n+\t\tio_cq->numa_node_cfg_reg =\n+\t\t\t(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +\n+\t\t\tcmd_completion.numa_node_register_offset);\n \n \tena_trc_dbg(\"created cq[%u], depth[%u]\\n\", io_cq->idx, io_cq->q_depth);\n \n@@ -1239,6 +1250,9 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)\n \n \tfor (i = 0; i < admin_queue->q_depth; i++) {\n \t\tcomp_ctx = get_comp_ctxt(admin_queue, i, false);\n+\t\tif (unlikely(!comp_ctx))\n+\t\t\tbreak;\n+\n \t\tcomp_ctx->status = ENA_CMD_ABORTED;\n \n \t\tENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);\n@@ -1304,7 +1318,7 @@ void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)\n {\n \tu16 depth = ena_dev->aenq.q_depth;\n \n-\tENA_ASSERT(ena_dev->aenq.head == depth, \"Invliad AENQ state\\n\");\n+\tENA_ASSERT(ena_dev->aenq.head == depth, \"Invalid AENQ state\\n\");\n \n \t/* Init head_db to mark that all entries in the queue\n \t * are initially available\n@@ -1556,7 +1570,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,\n \n \tif (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {\n \t\tena_trc_err(\"Device isn't ready, abort com init\\n\");\n-\t\treturn -1;\n+\t\treturn ENA_COM_NO_DEVICE;\n \t}\n \n \tadmin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;\n@@ -1631,50 +1645,46 @@ error:\n }\n \n int ena_com_create_io_queue(struct ena_com_dev *ena_dev,\n-\t\t\t    u16 qid,\n-\t\t\t    enum queue_direction direction,\n-\t\t\t    enum ena_admin_placement_policy_type mem_queue_type,\n-\t\t\t    u32 msix_vector,\n-\t\t\t    u16 queue_size)\n+\t\t\t    struct ena_com_create_io_ctx *ctx)\n {\n \tstruct ena_com_io_sq *io_sq;\n \tstruct ena_com_io_cq *io_cq;\n \tint ret = 0;\n \n-\tif (qid >= ENA_TOTAL_NUM_QUEUES) {\n+\tif (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {\n \t\tena_trc_err(\"Qid (%d) is bigger than max num of queues (%d)\\n\",\n-\t\t\t    qid, ENA_TOTAL_NUM_QUEUES);\n+\t\t\t    ctx->qid, ENA_TOTAL_NUM_QUEUES);\n \t\treturn ENA_COM_INVAL;\n \t}\n \n-\tio_sq = &ena_dev->io_sq_queues[qid];\n-\tio_cq = &ena_dev->io_cq_queues[qid];\n+\tio_sq = &ena_dev->io_sq_queues[ctx->qid];\n+\tio_cq = &ena_dev->io_cq_queues[ctx->qid];\n \n \tmemset(io_sq, 0x0, sizeof(struct ena_com_io_sq));\n \tmemset(io_cq, 0x0, sizeof(struct ena_com_io_cq));\n \n \t/* Init CQ */\n-\tio_cq->q_depth = queue_size;\n-\tio_cq->direction = direction;\n-\tio_cq->qid = qid;\n+\tio_cq->q_depth = ctx->queue_size;\n+\tio_cq->direction = ctx->direction;\n+\tio_cq->qid = ctx->qid;\n \n-\tio_cq->msix_vector = msix_vector;\n+\tio_cq->msix_vector = ctx->msix_vector;\n \n-\tio_sq->q_depth = queue_size;\n-\tio_sq->direction = direction;\n-\tio_sq->qid = qid;\n+\tio_sq->q_depth = ctx->queue_size;\n+\tio_sq->direction = ctx->direction;\n+\tio_sq->qid = ctx->qid;\n \n-\tio_sq->mem_queue_type = mem_queue_type;\n+\tio_sq->mem_queue_type = ctx->mem_queue_type;\n \n-\tif (direction == ENA_COM_IO_QUEUE_DIRECTION_TX)\n+\tif (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)\n \t\t/* header length is limited to 8 bits */\n \t\tio_sq->tx_max_header_size =\n-\t\t\tENA_MIN16(ena_dev->tx_max_header_size, SZ_256);\n+\t\t\tENA_MIN32(ena_dev->tx_max_header_size, SZ_256);\n \n-\tret = ena_com_init_io_sq(ena_dev, io_sq);\n+\tret = ena_com_init_io_sq(ena_dev, ctx, io_sq);\n \tif (ret)\n \t\tgoto error;\n-\tret = ena_com_init_io_cq(ena_dev, io_cq);\n+\tret = ena_com_init_io_cq(ena_dev, ctx, io_cq);\n \tif (ret)\n \t\tgoto error;\n \n@@ -1840,22 +1850,6 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)\n \t\t\t+ ENA_REGS_AENQ_HEAD_DB_OFF);\n }\n \n-/* Sets the function Idx and Queue Idx to be used for\n- * get full statistics feature\n- */\n-int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,\n-\t\t\t\t\t  u32 func_queue)\n-{\n-\t/* Function & Queue is acquired from user in the following format :\n-\t * Bottom Half word:\tfunct\n-\t * Top Half Word:\tqueue\n-\t */\n-\tena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);\n-\tena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);\n-\n-\treturn 0;\n-}\n-\n int ena_com_dev_reset(struct ena_com_dev *ena_dev)\n {\n \tu32 stat, timeout, cap, reset_val;\n@@ -2195,7 +2189,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,\n \t\t*func = rss->hash_func;\n \n \tif (key)\n-\t\tmemcpy(key, hash_key->key, hash_key->keys_num << 2);\n+\t\tmemcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);\n \n \treturn 0;\n }\n@@ -2337,7 +2331,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,\n \tu16 supported_fields;\n \tint rc;\n \n-\tif (proto > ENA_ADMIN_RSS_PROTO_NUM) {\n+\tif (proto >= ENA_ADMIN_RSS_PROTO_NUM) {\n \t\tena_trc_err(\"Invalid proto num (%u)\\n\", proto);\n \t\treturn ENA_COM_INVAL;\n \t}\n@@ -2420,7 +2414,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)\n \t\treturn ret;\n \t}\n \n-\tcmd.control_buffer.length = (1 << rss->tbl_log_size) *\n+\tcmd.control_buffer.length = (1ULL << rss->tbl_log_size) *\n \t\tsizeof(struct ena_admin_rss_ind_table_entry);\n \n \tret = ena_com_execute_admin_command(admin_queue,\n@@ -2444,7 +2438,7 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)\n \tu32 tbl_size;\n \tint i, rc;\n \n-\ttbl_size = (1 << rss->tbl_log_size) *\n+\ttbl_size = (1ULL << rss->tbl_log_size) *\n \t\tsizeof(struct ena_admin_rss_ind_table_entry);\n \n \trc = ena_com_get_feature_ex(ena_dev, &get_resp,\n@@ -2496,22 +2490,18 @@ err_indr_tbl:\n \treturn rc;\n }\n \n-int ena_com_rss_destroy(struct ena_com_dev *ena_dev)\n+void ena_com_rss_destroy(struct ena_com_dev *ena_dev)\n {\n \tena_com_indirect_table_destroy(ena_dev);\n \tena_com_hash_key_destroy(ena_dev);\n \tena_com_hash_ctrl_destroy(ena_dev);\n \n \tmemset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));\n-\n-\treturn 0;\n }\n \n-int ena_com_allocate_host_attribute(struct ena_com_dev *ena_dev,\n-\t\t\t\t    u32 debug_area_size)\n+int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)\n {\n \tstruct ena_host_attribute *host_attr = &ena_dev->host_attr;\n-\tint rc;\n \n \tENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,\n \t\t\t       SZ_4K,\n@@ -2521,33 +2511,29 @@ int ena_com_allocate_host_attribute(struct ena_com_dev *ena_dev,\n \tif (unlikely(!host_attr->host_info))\n \t\treturn ENA_COM_NO_MEM;\n \n-\tif (debug_area_size) {\n+\treturn 0;\n+}\n+\n+int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,\n+\t\t\t\tu32 debug_area_size) {\n+\tstruct ena_host_attribute *host_attr = &ena_dev->host_attr;\n+\n \t\tENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,\n \t\t\t\t       debug_area_size,\n \t\t\t\t       host_attr->debug_area_virt_addr,\n \t\t\t\t       host_attr->debug_area_dma_addr,\n \t\t\t\t       host_attr->debug_area_dma_handle);\n \t\tif (unlikely(!host_attr->debug_area_virt_addr)) {\n-\t\t\trc = ENA_COM_NO_MEM;\n-\t\t\tgoto err;\n-\t\t}\n+\t\t\thost_attr->debug_area_size = 0;\n+\t\t\treturn ENA_COM_NO_MEM;\n \t}\n \n \thost_attr->debug_area_size = debug_area_size;\n \n \treturn 0;\n-err:\n-\n-\tENA_MEM_FREE_COHERENT(ena_dev->dmadev,\n-\t\t\t      SZ_4K,\n-\t\t\t      host_attr->host_info,\n-\t\t\t      host_attr->host_info_dma_addr,\n-\t\t\t      host_attr->host_info_dma_handle);\n-\thost_attr->host_info = NULL;\n-\treturn rc;\n }\n \n-void ena_com_delete_host_attribute(struct ena_com_dev *ena_dev)\n+void ena_com_delete_host_info(struct ena_com_dev *ena_dev)\n {\n \tstruct ena_host_attribute *host_attr = &ena_dev->host_attr;\n \n@@ -2559,6 +2545,11 @@ void ena_com_delete_host_attribute(struct ena_com_dev *ena_dev)\n \t\t\t\t      host_attr->host_info_dma_handle);\n \t\thost_attr->host_info = NULL;\n \t}\n+}\n+\n+void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)\n+{\n+\tstruct ena_host_attribute *host_attr = &ena_dev->host_attr;\n \n \tif (host_attr->debug_area_virt_addr) {\n \t\tENA_MEM_FREE_COHERENT(ena_dev->dmadev,\n@@ -2677,7 +2668,7 @@ void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)\n int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)\n {\n \tstruct ena_admin_get_feat_resp get_resp;\n-\tu32 delay_resolution;\n+\tu16 delay_resolution;\n \tint rc;\n \n \trc = ena_com_get_feature(ena_dev, &get_resp,\ndiff --git a/drivers/net/ena/base/ena_com.h b/drivers/net/ena/base/ena_com.h\nindex 19e53ff..e534592 100644\n--- a/drivers/net/ena/base/ena_com.h\n+++ b/drivers/net/ena/base/ena_com.h\n@@ -120,8 +120,8 @@ struct ena_com_rx_buf_info {\n };\n \n struct ena_com_io_desc_addr {\n-\tvoid  __iomem *pbuf_dev_addr; /* LLQ address */\n-\tvoid  *virt_addr;\n+\tu8  __iomem *pbuf_dev_addr; /* LLQ address */\n+\tu8  *virt_addr;\n \tdma_addr_t phys_addr;\n \tena_mem_handle_t mem_handle;\n };\n@@ -138,13 +138,14 @@ struct ena_com_tx_meta {\n struct ena_com_io_cq {\n \tstruct ena_com_io_desc_addr cdesc_addr;\n \n-\tu32 __iomem *db_addr;\n-\n \t/* Interrupt unmask register */\n \tu32 __iomem *unmask_reg;\n \n \t/* The completion queue head doorbell register */\n-\tuint32_t __iomem *cq_head_db_reg;\n+\tu32 __iomem *cq_head_db_reg;\n+\n+\t/* numa configuration register (for TPH) */\n+\tu32 __iomem *numa_node_cfg_reg;\n \n \t/* The value to write to the above register to unmask\n \t * the interrupt of this queue\n@@ -189,7 +190,7 @@ struct ena_com_io_sq {\n \tu16 idx;\n \tu16 tail;\n \tu16 next_to_comp;\n-\tu16 tx_max_header_size;\n+\tu32 tx_max_header_size;\n \tu8 phase;\n \tu8 desc_entry_size;\n \tu8 dma_addr_bits;\n@@ -312,17 +313,15 @@ struct ena_com_dev {\n \tstruct ena_com_aenq aenq;\n \tstruct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES];\n \tstruct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES];\n-\tvoid __iomem *reg_bar;\n+\tu8 __iomem *reg_bar;\n \tvoid __iomem *mem_bar;\n \tvoid *dmadev;\n \n \tenum ena_admin_placement_policy_type tx_mem_queue_type;\n-\n+\tu32 tx_max_header_size;\n \tu16 stats_func; /* Selected function for extended statistic dump */\n \tu16 stats_queue; /* Selected queue for extended statistic dump */\n \n-\tu16 tx_max_header_size;\n-\n \tstruct ena_com_mmio_read mmio_read;\n \n \tstruct ena_rss rss;\n@@ -343,6 +342,15 @@ struct ena_com_dev_get_features_ctx {\n \tstruct ena_admin_feature_offload_desc offload;\n };\n \n+struct ena_com_create_io_ctx {\n+\tenum ena_admin_placement_policy_type mem_queue_type;\n+\tenum queue_direction direction;\n+\tint numa_node;\n+\tu32 msix_vector;\n+\tu16 queue_size;\n+\tu16 qid;\n+};\n+\n typedef void (*ena_aenq_handler)(void *data,\n \tstruct ena_admin_aenq_entry *aenq_e);\n \n@@ -420,22 +428,14 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev);\n \n /* ena_com_create_io_queue - Create io queue.\n  * @ena_dev: ENA communication layer struct\n- * @qid - the caller virtual queue id.\n- * @direction - the queue direction (Rx/Tx)\n- * @mem_queue_type - Indicate if this queue is LLQ or regular queue\n- * (relevant only for Tx queue)\n- * @msix_vector - MSI-X vector\n- * @queue_size - queue size\n+ * ena_com_create_io_ctx - create context structure\n  *\n- * Create the submission and the completion queues for queue id - qid.\n+ * Create the submission and the completion queues.\n  *\n  * @return - 0 on success, negative value on failure.\n  */\n-int ena_com_create_io_queue(struct ena_com_dev *ena_dev, u16 qid,\n-\t\t\t    enum queue_direction direction,\n-\t\t\t    enum ena_admin_placement_policy_type mem_queue_type,\n-\t\t\t    u32 msix_vector,\n-\t\t\t    u16 queue_size);\n+int ena_com_create_io_queue(struct ena_com_dev *ena_dev,\n+\t\t\t    struct ena_com_create_io_ctx *ctx);\n \n /* ena_com_admin_destroy - Destroy IO queue with the queue id - qid.\n  * @ena_dev: ENA communication layer struct\n@@ -519,7 +519,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);\n  * @ena_dev: ENA communication layer struct\n  *\n  * This method aborts all the outstanding admin commands.\n- * The called should then call ena_com_wait_for_abort_completion to make sure\n+ * The caller should then call ena_com_wait_for_abort_completion to make sure\n  * all the commands were completed.\n  */\n void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);\n@@ -628,10 +628,8 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);\n  * @ena_dev: ENA communication layer struct\n  *\n  * Free all the RSS/RFS resources.\n- *\n- * @return: 0 on Success and negative value otherwise.\n  */\n-int ena_com_rss_destroy(struct ena_com_dev *ena_dev);\n+void ena_com_rss_destroy(struct ena_com_dev *ena_dev);\n \n /* ena_com_fill_hash_function - Fill RSS hash function\n  * @ena_dev: ENA communication layer struct\n@@ -774,26 +772,38 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);\n  */\n int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl);\n \n-/* ena_com_allocate_host_attribute - Allocate host attributes resources.\n+/* ena_com_allocate_host_info - Allocate host info resources.\n  * @ena_dev: ENA communication layer struct\n- * @debug_area_size: Debug aread size\n  *\n- * Allocate host info and debug area.\n+ * @return: 0 on Success and negative value otherwise.\n+ */\n+int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);\n+\n+/* ena_com_allocate_debug_area - Allocate debug area.\n+ * @ena_dev: ENA communication layer struct\n+ * @debug_area_size - debug area size.\n  *\n  * @return: 0 on Success and negative value otherwise.\n  */\n-int ena_com_allocate_host_attribute(struct ena_com_dev *ena_dev,\n-\t\t\t\t    u32 debug_area_size);\n+int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,\n+\t\t\t\tu32 debug_area_size);\n+\n+/* ena_com_delete_debug_area - Free the debug area resources.\n+ * @ena_dev: ENA communication layer struct\n+ *\n+ * Free the allocate debug area.\n+ */\n+void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);\n \n-/* ena_com_allocate_host_attribute - Free the host attributes resources.\n+/* ena_com_delete_host_info - Free the host info resources.\n  * @ena_dev: ENA communication layer struct\n  *\n- * Free the allocate host info and debug area.\n+ * Free the allocate host info.\n  */\n-void ena_com_delete_host_attribute(struct ena_com_dev *ena_dev);\n+void ena_com_delete_host_info(struct ena_com_dev *ena_dev);\n \n /* ena_com_set_host_attributes - Update the device with the host\n- * attributes base address.\n+ * attributes (debug area and host info) base address.\n  * @ena_dev: ENA communication layer struct\n  *\n  * @return: 0 on Success and negative value otherwise.\n@@ -979,7 +989,7 @@ ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,\n \t\t */\n \t\treturn;\n \n-\tcurr_moder_idx = (enum ena_intr_moder_level)*moder_tbl_idx;\n+\tcurr_moder_idx = (enum ena_intr_moder_level)(*moder_tbl_idx);\n \tif (unlikely(curr_moder_idx >=  ENA_INTR_MAX_NUM_OF_LEVELS)) {\n \t\tena_trc_err(\"Wrong moderation index %u\\n\", curr_moder_idx);\n \t\treturn;\ndiff --git a/drivers/net/ena/base/ena_defs/ena_admin_defs.h b/drivers/net/ena/base/ena_defs/ena_admin_defs.h\nindex fe41246..7a031d9 100644\n--- a/drivers/net/ena/base/ena_defs/ena_admin_defs.h\n+++ b/drivers/net/ena/base/ena_defs/ena_admin_defs.h\n@@ -58,30 +58,6 @@ enum ena_admin_aq_opcode {\n \tENA_ADMIN_GET_STATS = 11,\n };\n \n-/* privileged amdin commands opcodes */\n-enum ena_admin_aq_opcode_privileged {\n-\t/* get device capabilities */\n-\tENA_ADMIN_IDENTIFY = 48,\n-\n-\t/* configure device */\n-\tENA_ADMIN_CONFIGURE_PF_DEVICE = 49,\n-\n-\t/* setup SRIOV PCIe Virtual Function capabilities */\n-\tENA_ADMIN_SETUP_VF = 50,\n-\n-\t/* load firmware to the controller */\n-\tENA_ADMIN_LOAD_FIRMWARE = 52,\n-\n-\t/* commit previously loaded firmare */\n-\tENA_ADMIN_COMMIT_FIRMWARE = 53,\n-\n-\t/* quiesce virtual function */\n-\tENA_ADMIN_QUIESCE_VF = 54,\n-\n-\t/* load virtual function from migrates context */\n-\tENA_ADMIN_MIGRATE_VF = 55,\n-};\n-\n /* admin command completion status codes */\n enum ena_admin_aq_completion_status {\n \t/* Request completed successfully */\n@@ -116,25 +92,6 @@ enum ena_admin_aq_feature_id {\n \t/* max number of supported queues per for every queues type */\n \tENA_ADMIN_MAX_QUEUES_NUM = 2,\n \n-\t/* low latency queues capabilities (max entry size, depth) */\n-\tENA_ADMIN_LLQ_CONFIG = 3,\n-\n-\t/* power management capabilities */\n-\tENA_ADMIN_POWER_MANAGEMENT_CONFIG = 4,\n-\n-\t/* MAC address filters support, multicast, broadcast, and\n-\t * promiscuous\n-\t */\n-\tENA_ADMIN_MAC_FILTERS_CONFIG = 5,\n-\n-\t/* VLAN membership, frame format, etc.  */\n-\tENA_ADMIN_VLAN_CONFIG = 6,\n-\n-\t/* Available size for various on-chip memory resources, accessible\n-\t * by the driver\n-\t */\n-\tENA_ADMIN_ON_DEVICE_MEMORY_CONFIG = 7,\n-\n \t/* Receive Side Scaling (RSS) function */\n \tENA_ADMIN_RSS_HASH_FUNCTION = 10,\n \n@@ -150,20 +107,9 @@ enum ena_admin_aq_feature_id {\n \t/* Receive Side Scaling (RSS) hash input */\n \tENA_ADMIN_RSS_HASH_INPUT = 18,\n \n-\t/* overlay tunnels configuration */\n-\tENA_ADMIN_TUNNEL_CONFIG = 19,\n-\n \t/* interrupt moderation parameters */\n \tENA_ADMIN_INTERRUPT_MODERATION = 20,\n \n-\t/* 1588v2 and Timing configuration */\n-\tENA_ADMIN_1588_CONFIG = 21,\n-\n-\t/* Packet Header format templates configuration for input and\n-\t * output parsers\n-\t */\n-\tENA_ADMIN_PKT_HEADER_TEMPLATES_CONFIG = 23,\n-\n \t/* AENQ configuration */\n \tENA_ADMIN_AENQ_CONFIG = 26,\n \n@@ -440,9 +386,7 @@ struct ena_admin_acq_create_sq_resp_desc {\n \n \tuint16_t reserved;\n \n-\t/* word 3 : queue doorbell address as and offset to PCIe MMIO REG\n-\t * BAR\n-\t */\n+\t/* word 3 : queue doorbell address as an offset to PCIe MMIO REG BAR */\n \tuint32_t sq_doorbell_offset;\n \n \t/* word 4 : low latency queue ring base address as an offset to\n@@ -520,18 +464,18 @@ struct ena_admin_acq_create_cq_resp_desc {\n \t/* actual cq depth in # of entries */\n \tuint16_t cq_actual_depth;\n \n-\t/* word 3 : doorbell address as an offset to PCIe MMIO REG BAR */\n-\tuint32_t cq_doorbell_offset;\n+\t/* word 3 : cpu numa node address as an offset to PCIe MMIO REG BAR */\n+\tuint32_t numa_node_register_offset;\n \n \t/* word 4 : completion head doorbell address as an offset to PCIe\n \t * MMIO REG BAR\n \t */\n-\tuint32_t cq_head_db_offset;\n+\tuint32_t cq_head_db_register_offset;\n \n \t/* word 5 : interrupt unmask register address as an offset into\n \t * PCIe MMIO REG BAR\n \t */\n-\tuint32_t cq_interrupt_unmask_register;\n+\tuint32_t cq_interrupt_unmask_register_offset;\n };\n \n /* ENA AQ Destroy Completion Queue command. Placed in control buffer\n@@ -724,7 +668,7 @@ struct ena_admin_queue_feature_desc {\n \n /* ENA MTU Set Feature descriptor. */\n struct ena_admin_set_feature_mtu_desc {\n-\t/* word 0 : mtu size including L2 */\n+\t/* word 0 : mtu payload size (exclude L2) */\n \tuint32_t mtu;\n };\n \n@@ -913,10 +857,7 @@ struct ena_admin_proto_input {\n \t/* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */\n \tuint16_t fields;\n \n-\t/* 0 : inner - for tunneled packet, select the fields\n-\t *    from inner header\n-\t */\n-\tuint16_t flags;\n+\tuint16_t reserved2;\n };\n \n /* ENA RSS hash control buffer structure */\n@@ -927,11 +868,9 @@ struct ena_admin_feature_rss_hash_control {\n \t/* selected input fields */\n \tstruct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM];\n \n-\t/* supported input fields for inner header */\n-\tstruct ena_admin_proto_input supported_inner_fields[ENA_ADMIN_RSS_PROTO_NUM];\n+\tstruct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM];\n \n-\t/* selected input fields */\n-\tstruct ena_admin_proto_input selected_inner_fields[ENA_ADMIN_RSS_PROTO_NUM];\n+\tstruct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM];\n };\n \n /* ENA RSS flow hash input */\n@@ -966,10 +905,10 @@ enum ena_admin_os_type {\n \tENA_ADMIN_OS_DPDK = 3,\n \n \t/* FreeBSD OS */\n-\tENA_ADMIN_OS_FREE_BSD = 4,\n+\tENA_ADMIN_OS_FREEBSD = 4,\n \n \t/* PXE OS */\n-\tENA_ADMIN_OS_PXE = 5,\n+\tENA_ADMIN_OS_IPXE = 5,\n };\n \n /* host info */\n@@ -1284,9 +1223,6 @@ struct ena_admin_ena_mmio_req_read_less_resp {\n #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK \\\n \tGENMASK(7, 0)\n \n-/* proto_input */\n-#define ENA_ADMIN_PROTO_INPUT_INNER_MASK BIT(0)\n-\n /* feature_rss_flow_hash_input */\n #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1\n #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)\n@@ -1816,34 +1752,21 @@ set_ena_admin_feature_rss_flow_hash_function_selected_func(\n }\n \n static inline uint16_t\n-get_ena_admin_proto_input_inner(const struct ena_admin_proto_input *p)\n-{\n-\treturn p->flags & ENA_ADMIN_PROTO_INPUT_INNER_MASK;\n-}\n-\n-static inline void\n-set_ena_admin_proto_input_inner(struct ena_admin_proto_input *p, uint16_t val)\n-{\n-\tp->flags |= val & ENA_ADMIN_PROTO_INPUT_INNER_MASK;\n-}\n-\n-static inline uint16_t\n get_ena_admin_feature_rss_flow_hash_input_L3_sort(\n \t\tconst struct ena_admin_feature_rss_flow_hash_input *p)\n {\n \treturn (p->supported_input_sort &\n-\t\tENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK)\n+\t\t\tENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK)\n \t\t>> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT;\n }\n \n static inline void\n set_ena_admin_feature_rss_flow_hash_input_L3_sort(\n-\t\tstruct ena_admin_feature_rss_flow_hash_input *p,\n-\t\tuint16_t val)\n+\t\tstruct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)\n {\n \tp->supported_input_sort |=\n \t\t(val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT)\n-\t\t & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;\n+\t\t& ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;\n }\n \n static inline uint16_t\n@@ -1862,7 +1785,7 @@ set_ena_admin_feature_rss_flow_hash_input_L4_sort(\n {\n \tp->supported_input_sort |=\n \t\t(val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT)\n-\t\t& ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;\n+\t\t & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;\n }\n \n static inline uint16_t\ndiff --git a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h\nindex a547033..6bc3d6a 100644\n--- a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h\n+++ b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h\n@@ -87,28 +87,17 @@ struct ena_eth_io_tx_desc {\n \n \t/* word 1 : */\n \t/* ethernet control\n-\t * 3:0 : l3_proto_idx - L3 protocol, if\n-\t *    tunnel_ctrl[0] is set, then this is the inner\n-\t *    packet L3. This field required when\n-\t *    l3_csum_en,l3_csum or tso_en are set.\n+\t * 3:0 : l3_proto_idx - L3 protocol. This field\n+\t *    required when l3_csum_en,l3_csum or tso_en are set.\n \t * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and\n \t *    DF flags of the IPv4 header is 0. Otherwise must\n \t *    be set to 1\n \t * 6:5 : reserved5\n-\t * 7 : tso_en - Enable TSO, For TCP only. For packets\n-\t *    with tunnel (tunnel_ctrl[0]=1), then the inner\n-\t *    packet will be segmented while the outer tunnel is\n-\t *    duplicated\n-\t * 12:8 : l4_proto_idx - L4 protocol, if\n-\t *    tunnel_ctrl[0] is set, then this is the inner\n-\t *    packet L4. This field need to be set when\n-\t *    l4_csum_en or tso_en are set.\n-\t * 13 : l3_csum_en - enable IPv4 header checksum. if\n-\t *    tunnel_ctrl[0] is set, then this will enable\n-\t *    checksum for the inner packet IPv4\n-\t * 14 : l4_csum_en - enable TCP/UDP checksum. if\n-\t *    tunnel_ctrl[0] is set, then this will enable\n-\t *    checksum on the inner packet TCP/UDP checksum\n+\t * 7 : tso_en - Enable TSO, For TCP only.\n+\t * 12:8 : l4_proto_idx - L4 protocol. This field need\n+\t *    to be set when l4_csum_en or tso_en are set.\n+\t * 13 : l3_csum_en - enable IPv4 header checksum.\n+\t * 14 : l4_csum_en - enable TCP/UDP checksum.\n \t * 15 : ethernet_fcs_dis - when set, the controller\n \t *    will not append the 802.3 Ethernet Frame Check\n \t *    Sequence to the packet\n@@ -124,11 +113,8 @@ struct ena_eth_io_tx_desc {\n \t *    must not include the tcp length field. L4 partial\n \t *    checksum should be used for IPv6 packet that\n \t *    contains Routing Headers.\n-\t * 20:18 : tunnel_ctrl - Bit 0: tunneling exists, Bit\n-\t *    1: tunnel packet actually uses UDP as L4, Bit 2:\n-\t *    tunnel packet L3 protocol: 0: IPv4 1: IPv6\n-\t * 21 : ts_req - Indicates that the packet is IEEE\n-\t *    1588v2 packet requiring the timestamp\n+\t * 20:18 : reserved18 - MBZ\n+\t * 21 : reserved21 - MBZ\n \t * 31:22 : req_id_lo - Request ID[9:0]\n \t */\n \tuint32_t meta_ctrl;\n@@ -160,9 +146,7 @@ struct ena_eth_io_tx_meta_desc {\n \t/* word 0 : */\n \t/* length, request id and control flags\n \t * 9:0 : req_id_lo - Request ID[9:0]\n-\t * 11:10 : outr_l3_off_hi - valid if\n-\t *    tunnel_ctrl[0]=1. bits[4:3] of outer packet L3\n-\t *    offset\n+\t * 11:10 : reserved10 - MBZ\n \t * 12 : reserved12 - MBZ\n \t * 13 : reserved13 - MBZ\n \t * 14 : ext_valid - if set, offset fields in Word2\n@@ -201,35 +185,19 @@ struct ena_eth_io_tx_meta_desc {\n \t/* word 2 : */\n \t/* word 2\n \t * 7:0 : l3_hdr_len - the header length L3 IP header.\n-\t *    if tunnel_ctrl[0]=1, this is the IP header length\n-\t *    of the inner packet.  FIXME - check if includes IP\n-\t *    options hdr_len\n \t * 15:8 : l3_hdr_off - the offset of the first byte\n \t *    in the L3 header from the beginning of the to-be\n-\t *    transmitted packet. if tunnel_ctrl[0]=1, this is\n-\t *    the offset the L3 header of the inner packet\n+\t *    transmitted packet.\n \t * 21:16 : l4_hdr_len_in_words - counts the L4 header\n \t *    length in words. there is an explicit assumption\n \t *    that L4 header appears right after L3 header and\n-\t *    L4 offset is based on l3_hdr_off+l3_hdr_len FIXME\n-\t *    - pls confirm\n+\t *    L4 offset is based on l3_hdr_off+l3_hdr_len\n \t * 31:22 : mss_lo\n \t */\n \tuint32_t word2;\n \n \t/* word 3 : */\n-\t/* word 3\n-\t * 23:0 : crypto_info\n-\t * 28:24 : outr_l3_hdr_len_words - valid if\n-\t *    tunnel_ctrl[0]=1.  Counts in words\n-\t * 31:29 : outr_l3_off_lo - valid if\n-\t *    tunnel_ctrl[0]=1. bits[2:0] of outer packet L3\n-\t *    offset. Counts the offset of the tunnel IP header\n-\t *    from beginning of the packet. NOTE: if the tunnel\n-\t *    header requires CRC or checksum, it is expected to\n-\t *    be done by the driver as it is not done by the HW\n-\t */\n-\tuint32_t word3;\n+\tuint32_t reserved;\n };\n \n /* ENA IO Queue Tx completions descriptor */\n@@ -298,36 +266,26 @@ struct ena_eth_io_rx_cdesc_base {\n \t/* word 0 : */\n \t/* 4:0 : l3_proto_idx - L3 protocol index\n \t * 6:5 : src_vlan_cnt - Source VLAN count\n-\t * 7 : tunnel - Tunnel exists\n+\t * 7 : reserved7 - MBZ\n \t * 12:8 : l4_proto_idx - L4 protocol index\n \t * 13 : l3_csum_err - when set, either the L3\n \t *    checksum error detected, or, the controller didn't\n-\t *    validate the checksum, If tunnel exists, this\n-\t *    result is for the inner packet. This bit is valid\n-\t *    only when l3_proto_idx indicates IPv4 packet\n+\t *    validate the checksum. This bit is valid only when\n+\t *    l3_proto_idx indicates IPv4 packet\n \t * 14 : l4_csum_err - when set, either the L4\n \t *    checksum error detected, or, the controller didn't\n-\t *    validate the checksum. If tunnel exists, this\n-\t *    result is for the inner packet. This bit is valid\n-\t *    only when l4_proto_idx indicates TCP/UDP packet,\n-\t *    and, ipv4_frag is not set\n+\t *    validate the checksum. This bit is valid only when\n+\t *    l4_proto_idx indicates TCP/UDP packet, and,\n+\t *    ipv4_frag is not set\n \t * 15 : ipv4_frag - Indicates IPv4 fragmented packet\n-\t * 17:16 : reserved16\n-\t * 19:18 : reserved18\n-\t * 20 : secured_pkt - Set if packet was handled by\n-\t *    inline crypto engine\n-\t * 22:21 : crypto_status -  bit 0 secured direction:\n-\t *    0: decryption, 1: encryption. bit 1 reserved\n-\t * 23 : reserved23\n+\t * 23:16 : reserved16\n \t * 24 : phase\n \t * 25 : l3_csum2 - second checksum engine result\n \t * 26 : first - Indicates first descriptor in\n \t *    transaction\n \t * 27 : last - Indicates last descriptor in\n \t *    transaction\n-\t * 28 : inr_l4_csum - TCP/UDP checksum results for\n-\t *    inner packet\n-\t * 29 : reserved29\n+\t * 29:28 : reserved28\n \t * 30 : buffer - 0: Metadata descriptor. 1: Buffer\n \t *    Descriptor was used\n \t * 31 : reserved31\n@@ -381,6 +339,16 @@ struct ena_eth_io_intr_reg {\n \tuint32_t intr_control;\n };\n \n+/* ENA NUMA Node configuration register */\n+struct ena_eth_io_numa_node_cfg_reg {\n+\t/* word 0 : */\n+\t/* 7:0 : numa\n+\t * 30:8 : reserved\n+\t * 31 : enabled\n+\t */\n+\tuint32_t numa_cfg;\n+};\n+\n /* tx_desc */\n #define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)\n #define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16\n@@ -410,10 +378,6 @@ struct ena_eth_io_intr_reg {\n #define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)\n #define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17\n #define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)\n-#define ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT 18\n-#define ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK GENMASK(20, 18)\n-#define ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT 21\n-#define ENA_ETH_IO_TX_DESC_TS_REQ_MASK BIT(21)\n #define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22\n #define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)\n #define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)\n@@ -422,8 +386,6 @@ struct ena_eth_io_intr_reg {\n \n /* tx_meta_desc */\n #define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)\n-#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT 10\n-#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK GENMASK(11, 10)\n #define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14\n #define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)\n #define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT 15\n@@ -452,11 +414,6 @@ struct ena_eth_io_intr_reg {\n #define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)\n #define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22\n #define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)\n-#define ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK GENMASK(23, 0)\n-#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT 24\n-#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK GENMASK(28, 24)\n-#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT 29\n-#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK GENMASK(31, 29)\n \n /* tx_cdesc */\n #define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)\n@@ -474,8 +431,6 @@ struct ena_eth_io_intr_reg {\n #define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)\n #define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5\n #define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)\n-#define ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT 7\n-#define ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK BIT(7)\n #define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8\n #define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)\n #define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13\n@@ -484,10 +439,6 @@ struct ena_eth_io_intr_reg {\n #define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)\n #define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15\n #define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)\n-#define ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT 20\n-#define ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK BIT(20)\n-#define ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT 21\n-#define ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK GENMASK(22, 21)\n #define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24\n #define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)\n #define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25\n@@ -496,8 +447,6 @@ struct ena_eth_io_intr_reg {\n #define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)\n #define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27\n #define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)\n-#define ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT 28\n-#define ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK BIT(28)\n #define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30\n #define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)\n \n@@ -508,6 +457,11 @@ struct ena_eth_io_intr_reg {\n #define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30\n #define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)\n \n+/* numa_node_cfg_reg */\n+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0)\n+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31\n+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)\n+\n #if !defined(ENA_DEFS_LINUX_MAINLINE)\n static inline uint32_t get_ena_eth_io_tx_desc_length(\n \t\tconst struct ena_eth_io_tx_desc *p)\n@@ -743,38 +697,6 @@ static inline void set_ena_eth_io_tx_desc_l4_csum_partial(\n \t\t& ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;\n }\n \n-static inline uint32_t get_ena_eth_io_tx_desc_tunnel_ctrl(\n-\t\tconst struct ena_eth_io_tx_desc *p)\n-{\n-\treturn (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK)\n-\t\t>> ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT;\n-}\n-\n-static inline void set_ena_eth_io_tx_desc_tunnel_ctrl(\n-\t\tstruct ena_eth_io_tx_desc *p,\n-\t\tuint32_t val)\n-{\n-\tp->meta_ctrl |=\n-\t\t(val << ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT)\n-\t\t& ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK;\n-}\n-\n-static inline uint32_t get_ena_eth_io_tx_desc_ts_req(\n-\t\tconst struct ena_eth_io_tx_desc *p)\n-{\n-\treturn (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TS_REQ_MASK)\n-\t\t>> ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT;\n-}\n-\n-static inline void set_ena_eth_io_tx_desc_ts_req(\n-\t\tstruct ena_eth_io_tx_desc *p,\n-\t\tuint32_t val)\n-{\n-\tp->meta_ctrl |=\n-\t\t(val << ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT)\n-\t\t& ENA_ETH_IO_TX_DESC_TS_REQ_MASK;\n-}\n-\n static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(\n \t\tconst struct ena_eth_io_tx_desc *p)\n {\n@@ -783,11 +705,9 @@ static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(\n }\n \n static inline void set_ena_eth_io_tx_desc_req_id_lo(\n-\t\tstruct ena_eth_io_tx_desc *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_tx_desc *p, uint32_t val)\n {\n-\tp->meta_ctrl |=\n-\t\t(val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT)\n+\tp->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT)\n \t\t& ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;\n }\n \n@@ -833,22 +753,6 @@ static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(\n \tp->len_ctrl |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;\n }\n \n-static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_off_hi(\n-\t\tconst struct ena_eth_io_tx_meta_desc *p)\n-{\n-\treturn (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK)\n-\t\t>> ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT;\n-}\n-\n-static inline void set_ena_eth_io_tx_meta_desc_outr_l3_off_hi(\n-\t\tstruct ena_eth_io_tx_meta_desc *p,\n-\t\tuint32_t val)\n-{\n-\tp->len_ctrl |=\n-\t\t(val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT)\n-\t\t& ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK;\n-}\n-\n static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(\n \t\tconst struct ena_eth_io_tx_meta_desc *p)\n {\n@@ -857,11 +761,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(\n }\n \n static inline void set_ena_eth_io_tx_meta_desc_ext_valid(\n-\t\tstruct ena_eth_io_tx_meta_desc *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_tx_meta_desc *p, uint32_t val)\n {\n-\tp->len_ctrl |=\n-\t\t(val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT)\n+\tp->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT)\n \t\t& ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;\n }\n \n@@ -873,11 +775,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_word3_valid(\n }\n \n static inline void set_ena_eth_io_tx_meta_desc_word3_valid(\n-\t\tstruct ena_eth_io_tx_meta_desc *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_tx_meta_desc *p, uint32_t val)\n {\n-\tp->len_ctrl |=\n-\t\t(val << ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT)\n+\tp->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT)\n \t\t& ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK;\n }\n \n@@ -889,11 +789,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi_ptp(\n }\n \n static inline void set_ena_eth_io_tx_meta_desc_mss_hi_ptp(\n-\t\tstruct ena_eth_io_tx_meta_desc *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_tx_meta_desc *p, uint32_t val)\n {\n-\tp->len_ctrl |=\n-\t\t(val << ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT)\n+\tp->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT)\n \t\t& ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK;\n }\n \n@@ -905,11 +803,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(\n }\n \n static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(\n-\t\tstruct ena_eth_io_tx_meta_desc *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_tx_meta_desc *p, uint32_t val)\n {\n-\tp->len_ctrl |=\n-\t\t(val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT)\n+\tp->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT)\n \t\t& ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;\n }\n \n@@ -921,11 +817,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(\n }\n \n static inline void set_ena_eth_io_tx_meta_desc_meta_store(\n-\t\tstruct ena_eth_io_tx_meta_desc *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_tx_meta_desc *p, uint32_t val)\n {\n-\tp->len_ctrl |=\n-\t\t(val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT)\n+\tp->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT)\n \t\t& ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;\n }\n \n@@ -937,11 +831,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(\n }\n \n static inline void set_ena_eth_io_tx_meta_desc_meta_desc(\n-\t\tstruct ena_eth_io_tx_meta_desc *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_tx_meta_desc *p, uint32_t val)\n {\n-\tp->len_ctrl |=\n-\t\t(val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT)\n+\tp->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT)\n \t\t& ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;\n }\n \n@@ -953,11 +845,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(\n }\n \n static inline void set_ena_eth_io_tx_meta_desc_phase(\n-\t\tstruct ena_eth_io_tx_meta_desc *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_tx_meta_desc *p, uint32_t val)\n {\n-\tp->len_ctrl |=\n-\t\t(val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT)\n+\tp->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT)\n \t\t& ENA_ETH_IO_TX_META_DESC_PHASE_MASK;\n }\n \n@@ -969,11 +859,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_first(\n }\n \n static inline void set_ena_eth_io_tx_meta_desc_first(\n-\t\tstruct ena_eth_io_tx_meta_desc *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_tx_meta_desc *p, uint32_t val)\n {\n-\tp->len_ctrl |=\n-\t\t(val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT)\n+\tp->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT)\n \t\t& ENA_ETH_IO_TX_META_DESC_FIRST_MASK;\n }\n \n@@ -985,11 +873,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_last(\n }\n \n static inline void set_ena_eth_io_tx_meta_desc_last(\n-\t\tstruct ena_eth_io_tx_meta_desc *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_tx_meta_desc *p, uint32_t val)\n {\n-\tp->len_ctrl |=\n-\t\t(val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT)\n+\tp->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT)\n \t\t& ENA_ETH_IO_TX_META_DESC_LAST_MASK;\n }\n \n@@ -1001,11 +887,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(\n }\n \n static inline void set_ena_eth_io_tx_meta_desc_comp_req(\n-\t\tstruct ena_eth_io_tx_meta_desc *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_tx_meta_desc *p, uint32_t val)\n {\n-\tp->len_ctrl |=\n-\t\t(val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT)\n+\tp->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT)\n \t\t& ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK;\n }\n \n@@ -1083,51 +967,6 @@ static inline void set_ena_eth_io_tx_meta_desc_mss_lo(\n \t\t& ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;\n }\n \n-static inline uint32_t get_ena_eth_io_tx_meta_desc_crypto_info(\n-\t\tconst struct ena_eth_io_tx_meta_desc *p)\n-{\n-\treturn p->word3 & ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK;\n-}\n-\n-static inline void set_ena_eth_io_tx_meta_desc_crypto_info(\n-\t\tstruct ena_eth_io_tx_meta_desc *p,\n-\t\tuint32_t val)\n-{\n-\tp->word3 |= val & ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK;\n-}\n-\n-static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_hdr_len_words(\n-\t\tconst struct ena_eth_io_tx_meta_desc *p)\n-{\n-\treturn (p->word3 & ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK)\n-\t\t>> ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT;\n-}\n-\n-static inline void set_ena_eth_io_tx_meta_desc_outr_l3_hdr_len_words(\n-\t\tstruct ena_eth_io_tx_meta_desc *p,\n-\t\tuint32_t val)\n-{\n-\tp->word3 |=\n-\t\t(val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT)\n-\t\t& ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK;\n-}\n-\n-static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_off_lo(\n-\t\tconst struct ena_eth_io_tx_meta_desc *p)\n-{\n-\treturn (p->word3 & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK)\n-\t\t>> ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT;\n-}\n-\n-static inline void set_ena_eth_io_tx_meta_desc_outr_l3_off_lo(\n-\t\tstruct ena_eth_io_tx_meta_desc *p,\n-\t\tuint32_t val)\n-{\n-\tp->word3 |=\n-\t\t(val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT)\n-\t\t& ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK;\n-}\n-\n static inline uint8_t get_ena_eth_io_tx_cdesc_phase(\n \t\tconst struct ena_eth_io_tx_cdesc *p)\n {\n@@ -1231,22 +1070,6 @@ static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(\n \t\t& ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK;\n }\n \n-static inline uint32_t get_ena_eth_io_rx_cdesc_base_tunnel(\n-\t\tconst struct ena_eth_io_rx_cdesc_base *p)\n-{\n-\treturn (p->status & ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK)\n-\t\t>> ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT;\n-}\n-\n-static inline void set_ena_eth_io_rx_cdesc_base_tunnel(\n-\t\tstruct ena_eth_io_rx_cdesc_base *p,\n-\t\tuint32_t val)\n-{\n-\tp->status |=\n-\t\t(val << ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT)\n-\t\t& ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK;\n-}\n-\n static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(\n \t\tconst struct ena_eth_io_rx_cdesc_base *p)\n {\n@@ -1255,11 +1078,9 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(\n }\n \n static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(\n-\t\tstruct ena_eth_io_rx_cdesc_base *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_rx_cdesc_base *p, uint32_t val)\n {\n-\tp->status |=\n-\t\t(val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT)\n+\tp->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT)\n \t\t& ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK;\n }\n \n@@ -1271,11 +1092,9 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(\n }\n \n static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(\n-\t\tstruct ena_eth_io_rx_cdesc_base *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_rx_cdesc_base *p, uint32_t val)\n {\n-\tp->status |=\n-\t\t(val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT)\n+\tp->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT)\n \t\t& ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK;\n }\n \n@@ -1287,11 +1106,9 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(\n }\n \n static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(\n-\t\tstruct ena_eth_io_rx_cdesc_base *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_rx_cdesc_base *p, uint32_t val)\n {\n-\tp->status |=\n-\t\t(val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT)\n+\tp->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT)\n \t\t& ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK;\n }\n \n@@ -1303,46 +1120,12 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(\n }\n \n static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(\n-\t\tstruct ena_eth_io_rx_cdesc_base *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_rx_cdesc_base *p, uint32_t val)\n {\n-\tp->status |=\n-\t\t(val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT)\n+\tp->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT)\n \t\t& ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK;\n }\n \n-static inline uint32_t get_ena_eth_io_rx_cdesc_base_secured_pkt(\n-\t\tconst struct ena_eth_io_rx_cdesc_base *p)\n-{\n-\treturn (p->status & ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK)\n-\t\t>> ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT;\n-}\n-\n-static inline void set_ena_eth_io_rx_cdesc_base_secured_pkt(\n-\t\tstruct ena_eth_io_rx_cdesc_base *p,\n-\t\tuint32_t val)\n-{\n-\tp->status |=\n-\t\t(val << ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT)\n-\t\t& ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK;\n-}\n-\n-static inline uint32_t get_ena_eth_io_rx_cdesc_base_crypto_status(\n-\t\tconst struct ena_eth_io_rx_cdesc_base *p)\n-{\n-\treturn (p->status & ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK)\n-\t\t>> ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT;\n-}\n-\n-static inline void set_ena_eth_io_rx_cdesc_base_crypto_status(\n-\t\tstruct ena_eth_io_rx_cdesc_base *p,\n-\t\tuint32_t val)\n-{\n-\tp->status |=\n-\t\t(val << ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT)\n-\t\t& ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK;\n-}\n-\n static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(\n \t\tconst struct ena_eth_io_rx_cdesc_base *p)\n {\n@@ -1351,11 +1134,9 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(\n }\n \n static inline void set_ena_eth_io_rx_cdesc_base_phase(\n-\t\tstruct ena_eth_io_rx_cdesc_base *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_rx_cdesc_base *p, uint32_t val)\n {\n-\tp->status |=\n-\t\t(val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT)\n+\tp->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT)\n \t\t& ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK;\n }\n \n@@ -1367,11 +1148,9 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(\n }\n \n static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(\n-\t\tstruct ena_eth_io_rx_cdesc_base *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_rx_cdesc_base *p, uint32_t val)\n {\n-\tp->status |=\n-\t\t(val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT)\n+\tp->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT)\n \t\t& ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK;\n }\n \n@@ -1383,11 +1162,9 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(\n }\n \n static inline void set_ena_eth_io_rx_cdesc_base_first(\n-\t\tstruct ena_eth_io_rx_cdesc_base *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_rx_cdesc_base *p, uint32_t val)\n {\n-\tp->status |=\n-\t\t(val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT)\n+\tp->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT)\n \t\t& ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK;\n }\n \n@@ -1399,30 +1176,12 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(\n }\n \n static inline void set_ena_eth_io_rx_cdesc_base_last(\n-\t\tstruct ena_eth_io_rx_cdesc_base *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_rx_cdesc_base *p, uint32_t val)\n {\n-\tp->status |=\n-\t\t(val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT)\n+\tp->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT)\n \t\t& ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK;\n }\n \n-static inline uint32_t get_ena_eth_io_rx_cdesc_base_inr_l4_csum(\n-\t\tconst struct ena_eth_io_rx_cdesc_base *p)\n-{\n-\treturn (p->status & ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK)\n-\t\t>> ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT;\n-}\n-\n-static inline void set_ena_eth_io_rx_cdesc_base_inr_l4_csum(\n-\t\tstruct ena_eth_io_rx_cdesc_base *p,\n-\t\tuint32_t val)\n-{\n-\tp->status |=\n-\t\t(val << ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT)\n-\t\t& ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK;\n-}\n-\n static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(\n \t\tconst struct ena_eth_io_rx_cdesc_base *p)\n {\n@@ -1431,11 +1190,9 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(\n }\n \n static inline void set_ena_eth_io_rx_cdesc_base_buffer(\n-\t\tstruct ena_eth_io_rx_cdesc_base *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_rx_cdesc_base *p, uint32_t val)\n {\n-\tp->status |=\n-\t\t(val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT)\n+\tp->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT)\n \t\t& ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK;\n }\n \n@@ -1446,8 +1203,7 @@ static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(\n }\n \n static inline void set_ena_eth_io_intr_reg_rx_intr_delay(\n-\t\tstruct ena_eth_io_intr_reg *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_intr_reg *p, uint32_t val)\n {\n \tp->intr_control |= val & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;\n }\n@@ -1460,11 +1216,9 @@ static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(\n }\n \n static inline void set_ena_eth_io_intr_reg_tx_intr_delay(\n-\t\tstruct ena_eth_io_intr_reg *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_intr_reg *p, uint32_t val)\n {\n-\tp->intr_control |=\n-\t\t(val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)\n+\tp->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)\n \t\t& ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;\n }\n \n@@ -1476,13 +1230,37 @@ static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(\n }\n \n static inline void set_ena_eth_io_intr_reg_intr_unmask(\n-\t\tstruct ena_eth_io_intr_reg *p,\n-\t\tuint32_t val)\n+\t\tstruct ena_eth_io_intr_reg *p, uint32_t val)\n {\n-\tp->intr_control |=\n-\t\t(val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT)\n+\tp->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT)\n \t\t& ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;\n }\n \n+static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa(\n+\t\tconst struct ena_eth_io_numa_node_cfg_reg *p)\n+{\n+\treturn p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;\n+}\n+\n+static inline void set_ena_eth_io_numa_node_cfg_reg_numa(\n+\t\tstruct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)\n+{\n+\tp->numa_cfg |= val & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;\n+}\n+\n+static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled(\n+\t\tconst struct ena_eth_io_numa_node_cfg_reg *p)\n+{\n+\treturn (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK)\n+\t\t>> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT;\n+}\n+\n+static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(\n+\t\tstruct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)\n+{\n+\tp->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT)\n+\t\t& ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;\n+}\n+\n #endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */\n #endif /*_ENA_ETH_IO_H_ */\ndiff --git a/drivers/net/ena/base/ena_defs/ena_gen_info.h b/drivers/net/ena/base/ena_defs/ena_gen_info.h\nindex 4abdffe..3d25209 100644\n--- a/drivers/net/ena/base/ena_defs/ena_gen_info.h\n+++ b/drivers/net/ena/base/ena_defs/ena_gen_info.h\n@@ -31,5 +31,5 @@\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n \n-#define\tENA_GEN_DATE\t\"Mon Feb 15 14:33:08 IST 2016\"\n-#define\tENA_GEN_COMMIT\t\"c71ec25\"\n+#define\tENA_GEN_DATE\t\"Sun Jun  5 10:24:39 IDT 2016\"\n+#define\tENA_GEN_COMMIT\t\"17146ed\"\ndiff --git a/drivers/net/ena/base/ena_eth_com.c b/drivers/net/ena/base/ena_eth_com.c\nindex 459e0bb..290a566 100644\n--- a/drivers/net/ena/base/ena_eth_com.c\n+++ b/drivers/net/ena/base/ena_eth_com.c\n@@ -62,7 +62,7 @@ static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)\n \n \t/* Switch phase bit in case of wrap around */\n \tif (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))\n-\t\tio_cq->phase = 1 - io_cq->phase;\n+\t\tio_cq->phase ^= 1;\n }\n \n static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)\n@@ -97,7 +97,7 @@ static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)\n \n \t/* Switch phase bit in case of wrap around */\n \tif (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))\n-\t\tio_sq->phase = 1 - io_sq->phase;\n+\t\tio_sq->phase ^= 1;\n }\n \n static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,\n@@ -110,7 +110,10 @@ static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,\n \tif (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)\n \t\treturn 0;\n \n-\tENA_ASSERT(io_sq->header_addr, \"header address is NULL\\n\");\n+\tif (unlikely(!io_sq->header_addr)) {\n+\t\tena_trc_err(\"Push buffer header ptr is NULL\\n\");\n+\t\treturn ENA_COM_INVAL;\n+\t}\n \n \tmemcpy_toio(dev_head_addr, head_src, header_len);\n \n@@ -127,8 +130,7 @@ static inline struct ena_eth_io_rx_cdesc_base *\n }\n \n static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,\n-\t\t\t\t\t   u16 *first_cdesc_idx,\n-\t\t\t\t\t   u16 *nb_hw_desc)\n+\t\t\t\t\t   u16 *first_cdesc_idx)\n {\n \tstruct ena_eth_io_rx_cdesc_base *cdesc;\n \tu16 count = 0, head_masked;\n@@ -161,8 +163,7 @@ static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,\n \t\tcount = 0;\n \t}\n \n-\t*nb_hw_desc = count;\n-\treturn 0;\n+\treturn count;\n }\n \n static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,\n@@ -408,21 +409,20 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,\n \tu16 cdesc_idx = 0;\n \tu16 nb_hw_desc;\n \tu16 i;\n-\tint rc;\n \n \tENA_ASSERT(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX,\n \t\t   \"wrong Q type\");\n \n-\trc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc);\n-\tif (rc || (nb_hw_desc == 0)) {\n+\tnb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);\n+\tif (nb_hw_desc == 0) {\n \t\tena_rx_ctx->descs = nb_hw_desc;\n-\t\treturn rc;\n+\t\treturn 0;\n \t}\n \n \tena_trc_dbg(\"fetch rx packet: queue %d completed desc: %d\\n\",\n \t\t    io_cq->qid, nb_hw_desc);\n \n-\tif (unlikely(nb_hw_desc >= ena_rx_ctx->max_bufs)) {\n+\tif (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {\n \t\tena_trc_err(\"Too many RX cdescs (%d) > MAX(%d)\\n\",\n \t\t\t    nb_hw_desc, ena_rx_ctx->max_bufs);\n \t\treturn ENA_COM_NO_SPACE;\n@@ -459,7 +459,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,\n \t\t   \"wrong Q type\");\n \n \tif (unlikely(ena_com_sq_empty_space(io_sq) == 0))\n-\t\treturn -1;\n+\t\treturn ENA_COM_NO_SPACE;\n \n \tdesc = get_sq_desc(io_sq);\n \tmemset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));\n@@ -496,9 +496,13 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)\n \t\t((unsigned char *)io_cq->cdesc_addr.virt_addr\n \t\t+ (masked_head * io_cq->cdesc_entry_size_in_bytes));\n \n+\t/* When the current completion descriptor phase isn't the same as the\n+\t * expected, it mean that the device still didn't update\n+\t * this completion.\n+\t */\n \tcdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;\n \tif (cdesc_phase != expected_phase)\n-\t\treturn -1;\n+\t\treturn ENA_COM_TRY_AGAIN;\n \n \tena_com_cq_inc_head(io_cq);\n \ndiff --git a/drivers/net/ena/base/ena_eth_com.h b/drivers/net/ena/base/ena_eth_com.h\nindex 325d69c..71a880c 100644\n--- a/drivers/net/ena/base/ena_eth_com.h\n+++ b/drivers/net/ena/base/ena_eth_com.h\n@@ -142,6 +142,20 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)\n \treturn 0;\n }\n \n+static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,\n+\t\t\t\t\t    u8 numa_node)\n+{\n+\tstruct ena_eth_io_numa_node_cfg_reg numa_cfg;\n+\n+\tif (!io_cq->numa_node_cfg_reg)\n+\t\treturn;\n+\n+\tnuma_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)\n+\t\t| ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;\n+\n+\tENA_REG_WRITE32(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);\n+}\n+\n static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)\n {\n \tio_sq->next_to_comp += elem;\ndiff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h\nindex 5f69330..3c0203f 100644\n--- a/drivers/net/ena/base/ena_plat_dpdk.h\n+++ b/drivers/net/ena/base/ena_plat_dpdk.h\n@@ -64,8 +64,8 @@ typedef uint64_t dma_addr_t;\n #define ena_atomic32_t rte_atomic32_t\n #define ena_mem_handle_t void *\n \n-#define SZ_256 (256)\n-#define SZ_4K (4096)\n+#define SZ_256 (256U)\n+#define SZ_4K (4096U)\n \n #define ENA_COM_OK\t0\n #define ENA_COM_NO_MEM\t-ENOMEM\n@@ -75,6 +75,7 @@ typedef uint64_t dma_addr_t;\n #define ENA_COM_PERMISSION\t-EPERM\n #define ENA_COM_TIMER_EXPIRED\t-ETIME\n #define ENA_COM_FAULT\t-EFAULT\n+#define ENA_COM_TRY_AGAIN\t-EAGAIN\n \n #define ____cacheline_aligned __rte_cache_aligned\n \n@@ -83,6 +84,7 @@ typedef uint64_t dma_addr_t;\n #define ENA_MSLEEP(x) rte_delay_ms(x)\n #define ENA_UDELAY(x) rte_delay_us(x)\n \n+#define ENA_TOUCH(x) ((void)(x))\n #define memcpy_toio memcpy\n #define wmb rte_wmb\n #define rmb rte_wmb\n@@ -182,7 +184,7 @@ typedef uint64_t dma_addr_t;\n \tdo {\t\t\t\t\t\t\t\t\\\n \t\tconst struct rte_memzone *mz;\t\t\t\t\\\n \t\tchar z_name[RTE_MEMZONE_NAMESIZE];\t\t\t\\\n-\t\t(void)dmadev; (void)handle;\t\t\t\t\\\n+\t\tENA_TOUCH(dmadev); ENA_TOUCH(handle);\t\t\t\\\n \t\tsnprintf(z_name, sizeof(z_name),\t\t\t\\\n \t\t\t\t\"ena_alloc_%d\", ena_alloc_cnt++);\t\\\n \t\tmz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, 0); \\\n@@ -190,9 +192,12 @@ typedef uint64_t dma_addr_t;\n \t\tphys = mz->phys_addr;\t\t\t\t\t\\\n \t} while (0)\n #define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \t\\\n-\t({(void)size; rte_free(virt); })\n+\t\t({ ENA_TOUCH(size); ENA_TOUCH(phys);\t\t\t\\\n+\t\t   ENA_TOUCH(dmadev);\t\t\t\t\t\\\n+\t\t   rte_free(virt); })\n+\n #define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1)\n-#define ENA_MEM_FREE(dmadev, ptr) ({(void)dmadev; rte_free(ptr); })\n+#define ENA_MEM_FREE(dmadev, ptr) ({ENA_TOUCH(dmadev); rte_free(ptr); })\n \n static inline void writel(u32 value, volatile void  *addr)\n {\ndiff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c\nindex e157587..7d91b3b 100644\n--- a/drivers/net/ena/ena_ethdev.c\n+++ b/drivers/net/ena/ena_ethdev.c\n@@ -742,6 +742,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,\n \t\t\t      __rte_unused unsigned int socket_id,\n \t\t\t      __rte_unused const struct rte_eth_txconf *tx_conf)\n {\n+\tstruct ena_com_create_io_ctx ctx = { 0 };\n \tstruct ena_ring *txq = NULL;\n \tstruct ena_adapter *adapter =\n \t\t(struct ena_adapter *)(dev->data->dev_private);\n@@ -767,11 +768,14 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,\n \t}\n \n \tena_qid = ENA_IO_TXQ_IDX(queue_idx);\n-\trc = ena_com_create_io_queue(ena_dev, ena_qid,\n-\t\t\t\t     ENA_COM_IO_QUEUE_DIRECTION_TX,\n-\t\t\t\t     ena_dev->tx_mem_queue_type,\n-\t\t\t\t     -1 /* admin interrupts is not used */,\n-\t\t\t\t     nb_desc);\n+\n+\tctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;\n+\tctx.qid = ena_qid;\n+\tctx.msix_vector = -1; /* admin interrupts not used */\n+\tctx.mem_queue_type = ena_dev->tx_mem_queue_type;\n+\tctx.queue_size = adapter->tx_ring_size;\n+\n+\trc = ena_com_create_io_queue(ena_dev, &ctx);\n \tif (rc) {\n \t\tRTE_LOG(ERR, PMD,\n \t\t\t\"failed to create io TX queue #%d (qid:%d) rc: %d\\n\",\n@@ -780,6 +784,17 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,\n \ttxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];\n \ttxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];\n \n+\trc = ena_com_get_io_handlers(ena_dev, ena_qid,\n+\t\t\t\t     &txq->ena_com_io_sq,\n+\t\t\t\t     &txq->ena_com_io_cq);\n+\tif (rc) {\n+\t\tRTE_LOG(ERR, PMD,\n+\t\t\t\"Failed to get TX queue handlers. TX queue num %d rc: %d\\n\",\n+\t\t\tqueue_idx, rc);\n+\t\tena_com_destroy_io_queue(ena_dev, ena_qid);\n+\t\tgoto err;\n+\t}\n+\n \ttxq->port_id = dev->data->port_id;\n \ttxq->next_to_clean = 0;\n \ttxq->next_to_use = 0;\n@@ -808,7 +823,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,\n \t/* Store pointer to this queue in upper layer */\n \ttxq->configured = 1;\n \tdev->data->tx_queues[queue_idx] = txq;\n-\n+err:\n \treturn rc;\n }\n \n@@ -819,6 +834,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,\n \t\t\t      __rte_unused const struct rte_eth_rxconf *rx_conf,\n \t\t\t      struct rte_mempool *mp)\n {\n+\tstruct ena_com_create_io_ctx ctx = { 0 };\n \tstruct ena_adapter *adapter =\n \t\t(struct ena_adapter *)(dev->data->dev_private);\n \tstruct ena_ring *rxq = NULL;\n@@ -842,11 +858,14 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,\n \t}\n \n \tena_qid = ENA_IO_RXQ_IDX(queue_idx);\n-\trc = ena_com_create_io_queue(ena_dev, ena_qid,\n-\t\t\t\t     ENA_COM_IO_QUEUE_DIRECTION_RX,\n-\t\t\t\t     ENA_ADMIN_PLACEMENT_POLICY_HOST,\n-\t\t\t\t     -1 /* admin interrupts not used */,\n-\t\t\t\t     nb_desc);\n+\n+\tctx.qid = ena_qid;\n+\tctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;\n+\tctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;\n+\tctx.msix_vector = -1; /* admin interrupts not used */\n+\tctx.queue_size = adapter->rx_ring_size;\n+\n+\trc = ena_com_create_io_queue(ena_dev, &ctx);\n \tif (rc)\n \t\tRTE_LOG(ERR, PMD, \"failed to create io RX queue #%d rc: %d\\n\",\n \t\t\tqueue_idx, rc);\n@@ -854,6 +873,16 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,\n \trxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];\n \trxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];\n \n+\trc = ena_com_get_io_handlers(ena_dev, ena_qid,\n+\t\t\t\t     &rxq->ena_com_io_sq,\n+\t\t\t\t     &rxq->ena_com_io_cq);\n+\tif (rc) {\n+\t\tRTE_LOG(ERR, PMD,\n+\t\t\t\"Failed to get RX queue handlers. RX queue num %d rc: %d\\n\",\n+\t\t\tqueue_idx, rc);\n+\t\tena_com_destroy_io_queue(ena_dev, ena_qid);\n+\t}\n+\n \trxq->port_id = dev->data->port_id;\n \trxq->next_to_clean = 0;\n \trxq->next_to_use = 0;\n",
    "prefixes": [
        "dpdk-dev",
        "v3",
        "1/6"
    ]
}