get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/63837/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 63837,
    "url": "http://patches.dpdk.org/api/patches/63837/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20191213133216.23572-2-mk@semihalf.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20191213133216.23572-2-mk@semihalf.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20191213133216.23572-2-mk@semihalf.com",
    "date": "2019-12-13T13:32:15",
    "name": "[1/2] net/ena: upgrade HAL for new HW features",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "39a29b5d882e0d5cba00613dca7b36542d346308",
    "submitter": {
        "id": 786,
        "url": "http://patches.dpdk.org/api/people/786/?format=api",
        "name": "Michal Krawczyk",
        "email": "mk@semihalf.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20191213133216.23572-2-mk@semihalf.com/mbox/",
    "series": [
        {
            "id": 7821,
            "url": "http://patches.dpdk.org/api/series/7821/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=7821",
            "date": "2019-12-13T13:32:14",
            "name": "net/ena: add support for Rx offsets",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/7821/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/63837/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/63837/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 59327A04F1;\n\tFri, 13 Dec 2019 14:32:55 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id DC2521BF6E;\n\tFri, 13 Dec 2019 14:32:54 +0100 (CET)",
            "from mail-lf1-f65.google.com (mail-lf1-f65.google.com\n [209.85.167.65]) by dpdk.org (Postfix) with ESMTP id 6268D1BF6C\n for <dev@dpdk.org>; Fri, 13 Dec 2019 14:32:53 +0100 (CET)",
            "by mail-lf1-f65.google.com with SMTP id n12so1983270lfe.3\n for <dev@dpdk.org>; Fri, 13 Dec 2019 05:32:53 -0800 (PST)",
            "from mkPC.semihalf.local (31-172-191-173.noc.fibertech.net.pl.\n [31.172.191.173])\n by smtp.gmail.com with ESMTPSA id 204sm3154411lfj.47.2019.12.13.05.32.50\n (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);\n Fri, 13 Dec 2019 05:32:51 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=semihalf-com.20150623.gappssmtp.com; s=20150623;\n h=from:to:cc:subject:date:message-id:in-reply-to:references\n :mime-version:content-transfer-encoding;\n bh=FYMUGlsmlvGJ+yZPPLaJzdV3S1l6nOXLcIv6BaMVfGs=;\n b=0EY+SjXp+0H7Ogm2JieBV5O0/V8Ga1oTSHuyHCK6tnbRdAVaTlfXdnrbznW4jqE/As\n llMpxtBWD0IuWUsqhdR2rrpFYGR2mwxynvL51QEudc0lFJh1sNmRFZXv+LjO+1hFH4aO\n sznkq2Tv930uwarCCESVBoaMDl13aK6vRY1cobbetLW6c01KUmwSE0rFGZTnEN9QZw4E\n Ggfwb1bKpXHaxL4bN0ebYc5vD8EhBrN694ybCMLE/ekJsyses7ESNzvZ28PbIj2/kQAj\n 3uy3FG7txYYzr/kgu0gmQ8qUo0mSei6ane+e3mRdT1yHm6MPsLEHBzHS+NKD2LAOakUS\n lxDw==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20161025;\n h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n :references:mime-version:content-transfer-encoding;\n bh=FYMUGlsmlvGJ+yZPPLaJzdV3S1l6nOXLcIv6BaMVfGs=;\n b=dBNQJtcl1rJhr+glbLCK0eQYt8MuPjqSxsULgiopQPbqoztZueZR/kOBWW9Z4ChJMe\n JknxGoinI+Q13gH8Us64uVlQ6d1L8D0znRkh+z0fwaaYpoaV5W7SrJoOu3kYNTPDaljv\n r5p7DwhTcm7LR5o0z9o6MoT7vMehJGzYZ6YmnPqhdrgI6auoa641slcY4gxmyAFQjDRW\n rVOR4z6bnMPf7xWiHLq1s7O42fozoWMM4jy74IqBFHUk9cEqzzkXbp1ug/aaJTWa/yVw\n 9cJwqRipVimGKZF0qx3lND/qjL9g74wr2+t2+lG1X1CHux1dWZxCfzmj8Ia99We1aQTn\n 1kFg==",
        "X-Gm-Message-State": "APjAAAUp3VQdeTaSzjg+kQTGChjdUJmY2XDTAdzPxdyXCZ8yuMrr3Oio\n PQEHSnP0KVx21dRmI2bqmfOmoM46vxc=",
        "X-Google-Smtp-Source": "\n APXvYqyYB6ODme2ia2yBUN/8qTw9rEaWoXd/3pSPz+RPSfc9cFhyNKZfcJnlYMKBmBXViPoId95X4g==",
        "X-Received": "by 2002:ac2:4436:: with SMTP id w22mr9027718lfl.185.1576243971914;\n Fri, 13 Dec 2019 05:32:51 -0800 (PST)",
        "From": "Michal Krawczyk <mk@semihalf.com>",
        "To": "dev@dpdk.org",
        "Cc": "gtzalik@amazon.com, mw@semihalf.com, matua@amazon.com, mba@semihalf.com,\n igorch@amazon.com, Michal Krawczyk <mk@semihalf.com>",
        "Date": "Fri, 13 Dec 2019 14:32:15 +0100",
        "Message-Id": "<20191213133216.23572-2-mk@semihalf.com>",
        "X-Mailer": "git-send-email 2.20.1",
        "In-Reply-To": "<20191213133216.23572-1-mk@semihalf.com>",
        "References": "<20191213133216.23572-1-mk@semihalf.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH 1/2] net/ena: upgrade HAL for new HW features",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This version of the HAL allows to use the latest HW features, like\nrx offsets.\n\nDriver was adjusted to the new version to fix the build.\n\nSigned-off-by: Michal Krawczyk <mk@semihalf.com>\nSigned-off-by: Maciej Bielski <mba@semihalf.com>\n---\n drivers/net/ena/base/ena_com.c                | 154 +++++++++++-------\n drivers/net/ena/base/ena_com.h                |  26 ++-\n .../net/ena/base/ena_defs/ena_admin_defs.h    |  69 +++++++-\n .../net/ena/base/ena_defs/ena_common_defs.h   |   8 +-\n .../net/ena/base/ena_defs/ena_eth_io_defs.h   |   8 +-\n drivers/net/ena/base/ena_defs/ena_gen_info.h  |   4 +-\n drivers/net/ena/base/ena_defs/ena_regs_defs.h |   3 +-\n drivers/net/ena/base/ena_eth_com.c            | 111 ++++---------\n drivers/net/ena/base/ena_eth_com.h            |  77 +++++++--\n drivers/net/ena/base/ena_plat_dpdk.h          |   7 +-\n drivers/net/ena/ena_ethdev.c                  |   6 +-\n 11 files changed, 286 insertions(+), 187 deletions(-)",
    "diff": "diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c\nindex 8b51660a4..eb41bcf19 100644\n--- a/drivers/net/ena/base/ena_com.c\n+++ b/drivers/net/ena/base/ena_com.c\n@@ -14,7 +14,6 @@\n #define ENA_ASYNC_QUEUE_DEPTH 16\n #define ENA_ADMIN_QUEUE_DEPTH 32\n \n-\n #define ENA_CTRL_MAJOR\t\t0\n #define ENA_CTRL_MINOR\t\t0\n #define ENA_CTRL_SUB_MINOR\t1\n@@ -64,7 +63,7 @@ struct ena_com_stats_ctx {\n \tstruct ena_admin_acq_get_stats_resp get_resp;\n };\n \n-static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,\n+static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,\n \t\t\t\t       struct ena_common_mem_addr *ena_addr,\n \t\t\t\t       dma_addr_t addr)\n {\n@@ -74,7 +73,7 @@ static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,\n \t}\n \n \tena_addr->mem_addr_low = lower_32_bits(addr);\n-\tena_addr->mem_addr_high = (u16)upper_32_bits(addr);\n+\tena_addr->mem_addr_high = upper_32_bits(addr);\n \n \treturn 0;\n }\n@@ -88,7 +87,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)\n \t\t\t       sq->mem_handle);\n \n \tif (!sq->entries) {\n-\t\tena_trc_err(\"memory allocation failed\");\n+\t\tena_trc_err(\"memory allocation failed\\n\");\n \t\treturn ENA_COM_NO_MEM;\n \t}\n \n@@ -110,7 +109,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)\n \t\t\t       cq->mem_handle);\n \n \tif (!cq->entries)  {\n-\t\tena_trc_err(\"memory allocation failed\");\n+\t\tena_trc_err(\"memory allocation failed\\n\");\n \t\treturn ENA_COM_NO_MEM;\n \t}\n \n@@ -135,7 +134,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,\n \t\t\taenq->mem_handle);\n \n \tif (!aenq->entries) {\n-\t\tena_trc_err(\"memory allocation failed\");\n+\t\tena_trc_err(\"memory allocation failed\\n\");\n \t\treturn ENA_COM_NO_MEM;\n \t}\n \n@@ -165,7 +164,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,\n \treturn 0;\n }\n \n-static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,\n+static void comp_ctxt_release(struct ena_com_admin_queue *queue,\n \t\t\t\t     struct ena_comp_ctx *comp_ctx)\n {\n \tcomp_ctx->occupied = false;\n@@ -181,6 +180,11 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,\n \t\treturn NULL;\n \t}\n \n+\tif (unlikely(!queue->comp_ctx)) {\n+\t\tena_trc_err(\"Completion context is NULL\\n\");\n+\t\treturn NULL;\n+\t}\n+\n \tif (unlikely(queue->comp_ctx[command_id].occupied && capture)) {\n \t\tena_trc_err(\"Completion context is occupied\\n\");\n \t\treturn NULL;\n@@ -254,7 +258,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu\n \treturn comp_ctx;\n }\n \n-static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)\n+static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)\n {\n \tsize_t size = queue->q_depth * sizeof(struct ena_comp_ctx);\n \tstruct ena_comp_ctx *comp_ctx;\n@@ -262,7 +266,7 @@ static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)\n \n \tqueue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);\n \tif (unlikely(!queue->comp_ctx)) {\n-\t\tena_trc_err(\"memory allocation failed\");\n+\t\tena_trc_err(\"memory allocation failed\\n\");\n \t\treturn ENA_COM_NO_MEM;\n \t}\n \n@@ -335,18 +339,21 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,\n \t\t}\n \n \t\tif (!io_sq->desc_addr.virt_addr) {\n-\t\t\tena_trc_err(\"memory allocation failed\");\n+\t\t\tena_trc_err(\"memory allocation failed\\n\");\n \t\t\treturn ENA_COM_NO_MEM;\n \t\t}\n \t}\n \n \tif (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {\n \t\t/* Allocate bounce buffers */\n-\t\tio_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size;\n-\t\tio_sq->bounce_buf_ctrl.buffers_num = ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;\n+\t\tio_sq->bounce_buf_ctrl.buffer_size =\n+\t\t\tena_dev->llq_info.desc_list_entry_size;\n+\t\tio_sq->bounce_buf_ctrl.buffers_num =\n+\t\t\tENA_COM_BOUNCE_BUFFER_CNTRL_CNT;\n \t\tio_sq->bounce_buf_ctrl.next_to_use = 0;\n \n-\t\tsize = io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num;\n+\t\tsize = io_sq->bounce_buf_ctrl.buffer_size *\n+\t\t\tio_sq->bounce_buf_ctrl.buffers_num;\n \n \t\tENA_MEM_ALLOC_NODE(ena_dev->dmadev,\n \t\t\t\t   size,\n@@ -357,11 +364,12 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,\n \t\t\tio_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);\n \n \t\tif (!io_sq->bounce_buf_ctrl.base_buffer) {\n-\t\t\tena_trc_err(\"bounce buffer memory allocation failed\");\n+\t\t\tena_trc_err(\"bounce buffer memory allocation failed\\n\");\n \t\t\treturn ENA_COM_NO_MEM;\n \t\t}\n \n-\t\tmemcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info));\n+\t\tmemcpy(&io_sq->llq_info, &ena_dev->llq_info,\n+\t\t       sizeof(io_sq->llq_info));\n \n \t\t/* Initiate the first bounce buffer */\n \t\tio_sq->llq_buf_ctrl.curr_bounce_buf =\n@@ -417,7 +425,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,\n \t}\n \n \tif (!io_cq->cdesc_addr.virt_addr) {\n-\t\tena_trc_err(\"memory allocation failed\");\n+\t\tena_trc_err(\"memory allocation failed\\n\");\n \t\treturn ENA_COM_NO_MEM;\n \t}\n \n@@ -495,12 +503,9 @@ static int ena_com_comp_status_to_errno(u8 comp_status)\n \tif (unlikely(comp_status != 0))\n \t\tena_trc_err(\"admin command failed[%u]\\n\", comp_status);\n \n-\tif (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))\n-\t\treturn ENA_COM_INVAL;\n-\n \tswitch (comp_status) {\n \tcase ENA_ADMIN_SUCCESS:\n-\t\treturn 0;\n+\t\treturn ENA_COM_OK;\n \tcase ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:\n \t\treturn ENA_COM_NO_MEM;\n \tcase ENA_ADMIN_UNSUPPORTED_OPCODE:\n@@ -512,14 +517,14 @@ static int ena_com_comp_status_to_errno(u8 comp_status)\n \t\treturn ENA_COM_INVAL;\n \t}\n \n-\treturn 0;\n+\treturn ENA_COM_INVAL;\n }\n \n static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,\n \t\t\t\t\t\t     struct ena_com_admin_queue *admin_queue)\n {\n \tunsigned long flags = 0;\n-\tuint64_t timeout;\n+\tena_time_t timeout;\n \tint ret;\n \n \ttimeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);\n@@ -568,7 +573,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c\n /**\n  * Set the LLQ configurations of the firmware\n  *\n- * The driver provides only the enabled feature values to the FW,\n+ * The driver provides only the enabled feature values to the device,\n  * which in turn, checks if they are supported.\n  */\n static int ena_com_set_llq(struct ena_com_dev *ena_dev)\n@@ -615,7 +620,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,\n \tsupported_feat = llq_features->header_location_ctrl_supported;\n \n \tif (likely(supported_feat & llq_default_cfg->llq_header_location)) {\n-\t\tllq_info->header_location_ctrl = llq_default_cfg->llq_header_location;\n+\t\tllq_info->header_location_ctrl =\n+\t\t\tllq_default_cfg->llq_header_location;\n \t} else {\n \t\tena_trc_err(\"Invalid header location control, supported: 0x%x\\n\",\n \t\t\t    supported_feat);\n@@ -623,8 +629,6 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,\n \t}\n \n \tif (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {\n-\t\tllq_info->inline_header = true;\n-\n \t\tsupported_feat = llq_features->descriptors_stride_ctrl_supported;\n \t\tif (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {\n \t\t\tllq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;\n@@ -639,14 +643,12 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,\n \t\t\t\treturn -EINVAL;\n \t\t\t}\n \n-\t\t\tena_trc_err(\"Default llq stride ctrl is not supported, performing fallback,\"\n-\t\t\t\t    \"default: 0x%x, supported: 0x%x, used: 0x%x\\n\",\n+\t\t\tena_trc_err(\"Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\\n\",\n \t\t\t\t    llq_default_cfg->llq_stride_ctrl,\n \t\t\t\t    supported_feat,\n \t\t\t\t    llq_info->desc_stride_ctrl);\n \t\t}\n \t} else {\n-\t\tllq_info->inline_header = false;\n \t\tllq_info->desc_stride_ctrl = 0;\n \t}\n \n@@ -669,8 +671,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,\n \t\t\treturn -EINVAL;\n \t\t}\n \n-\t\tena_trc_err(\"Default llq ring entry size is not supported, performing fallback,\"\n-\t\t\t    \"default: 0x%x, supported: 0x%x, used: 0x%x\\n\",\n+\t\tena_trc_err(\"Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\\n\",\n \t\t\t    llq_default_cfg->llq_ring_entry_size,\n \t\t\t    supported_feat,\n \t\t\t    llq_info->desc_list_entry_size);\n@@ -708,8 +709,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,\n \t\t\treturn -EINVAL;\n \t\t}\n \n-\t\tena_trc_err(\"Default llq num descs before header is not supported, performing fallback,\"\n-\t\t\t    \"default: 0x%x, supported: 0x%x, used: 0x%x\\n\",\n+\t\tena_trc_err(\"Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\\n\",\n \t\t\t    llq_default_cfg->llq_num_decs_before_header,\n \t\t\t    supported_feat,\n \t\t\t    llq_info->descs_num_before_header);\n@@ -722,11 +722,9 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,\n \tif (rc)\n \t\tena_trc_err(\"Cannot set LLQ configuration: %d\\n\", rc);\n \n-\treturn 0;\n+\treturn rc;\n }\n \n-\n-\n static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,\n \t\t\t\t\t\t\tstruct ena_com_admin_queue *admin_queue)\n {\n@@ -747,16 +745,25 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com\n \t\tadmin_queue->stats.no_completion++;\n \t\tENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);\n \n-\t\tif (comp_ctx->status == ENA_CMD_COMPLETED)\n-\t\t\tena_trc_err(\"The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\\n\",\n-\t\t\t\t    comp_ctx->cmd_opcode);\n-\t\telse\n-\t\t\tena_trc_err(\"The ena device doesn't send any completion for the admin cmd %d status %d\\n\",\n+\t\tif (comp_ctx->status == ENA_CMD_COMPLETED) {\n+\t\t\tena_trc_err(\"The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\\n\",\n+\t\t\t\t    comp_ctx->cmd_opcode, admin_queue->auto_polling ? \"ON\" : \"OFF\");\n+\t\t\t/* Check if fallback to polling is enabled */\n+\t\t\tif (admin_queue->auto_polling)\n+\t\t\t\tadmin_queue->polling = true;\n+\t\t} else {\n+\t\t\tena_trc_err(\"The ena device didn't send a completion for the admin cmd %d status %d\\n\",\n \t\t\t\t    comp_ctx->cmd_opcode, comp_ctx->status);\n-\n-\t\tadmin_queue->running_state = false;\n-\t\tret = ENA_COM_TIMER_EXPIRED;\n-\t\tgoto err;\n+\t\t}\n+\t\t/* Check if shifted to polling mode.\n+\t\t * This will happen if there is a completion without an interrupt\n+\t\t * and autopolling mode is enabled. Continuing normal execution in such case\n+\t\t */\n+\t\tif (!admin_queue->polling) {\n+\t\t\tadmin_queue->running_state = false;\n+\t\t\tret = ENA_COM_TIMER_EXPIRED;\n+\t\t\tgoto err;\n+\t\t}\n \t}\n \n \tret = ena_com_comp_status_to_errno(comp_ctx->comp_status);\n@@ -817,7 +824,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)\n \t}\n \n \tif (read_resp->reg_off != offset) {\n-\t\tena_trc_err(\"Read failure: wrong offset provided\");\n+\t\tena_trc_err(\"Read failure: wrong offset provided\\n\");\n \t\tret = ENA_MMIO_READ_TIMEOUT;\n \t} else {\n \t\tret = read_resp->reg_val;\n@@ -912,8 +919,9 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,\n \t}\n \n \tif (io_sq->bounce_buf_ctrl.base_buffer) {\n-\t\tsize = io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;\n-\t\tENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);\n+\t\tENA_MEM_FREE(ena_dev->dmadev,\n+\t\t\t     io_sq->bounce_buf_ctrl.base_buffer,\n+\t\t\t     (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));\n \t\tio_sq->bounce_buf_ctrl.base_buffer = NULL;\n \t}\n }\n@@ -1155,7 +1163,9 @@ static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)\n \trss->rss_ind_tbl = NULL;\n \n \tif (rss->host_rss_ind_tbl)\n-\t\tENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);\n+\t\tENA_MEM_FREE(ena_dev->dmadev,\n+\t\t\t     rss->host_rss_ind_tbl,\n+\t\t\t     ((1ULL << rss->tbl_log_size) * sizeof(u16)));\n \trss->host_rss_ind_tbl = NULL;\n }\n \n@@ -1636,7 +1646,9 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)\n \n \tENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);\n \tif (admin_queue->comp_ctx)\n-\t\tENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);\n+\t\tENA_MEM_FREE(ena_dev->dmadev,\n+\t\t\t     admin_queue->comp_ctx,\n+\t\t\t     (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));\n \tadmin_queue->comp_ctx = NULL;\n \tsize = ADMIN_SQ_SIZE(admin_queue->q_depth);\n \tif (sq->entries)\n@@ -1670,6 +1682,17 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)\n \tena_dev->admin_queue.polling = polling;\n }\n \n+bool ena_com_get_admin_polling_mode(struct ena_com_dev * ena_dev)\n+{\n+\treturn ena_dev->admin_queue.polling;\n+}\n+\n+void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,\n+\t\t\t\t\t bool polling)\n+{\n+\tena_dev->admin_queue.auto_polling = polling;\n+}\n+\n int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)\n {\n \tstruct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;\n@@ -2080,7 +2103,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)\n \tstruct ena_admin_aenq_entry *aenq_e;\n \tstruct ena_admin_aenq_common_desc *aenq_common;\n \tstruct ena_com_aenq *aenq  = &dev->aenq;\n-\tunsigned long long timestamp;\n+\tu64 timestamp;\n \tena_aenq_handler handler_cb;\n \tu16 masked_head, processed = 0;\n \tu8 phase;\n@@ -2098,8 +2121,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)\n \t\t */\n \t\tdma_rmb();\n \n-\t\ttimestamp = (unsigned long long)aenq_common->timestamp_low |\n-\t\t\t((unsigned long long)aenq_common->timestamp_high << 32);\n+\t\ttimestamp = (u64)aenq_common->timestamp_low |\n+\t\t\t((u64)aenq_common->timestamp_high << 32);\n \t\tENA_TOUCH(timestamp); /* In case debug is disabled */\n \t\tena_trc_dbg(\"AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\\n\",\n \t\t\t    aenq_common->group,\n@@ -2134,7 +2157,9 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)\n \tmb();\n \tENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head,\n \t\t\t\tdev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);\n+#ifndef MMIOWB_NOT_DEFINED\n \tmmiowb();\n+#endif\n }\n \n int ena_com_dev_reset(struct ena_com_dev *ena_dev,\n@@ -2313,7 +2338,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)\n \tif (unlikely(ret))\n \t\treturn ret;\n \n-\tif (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {\n+\tif (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {\n \t\tena_trc_err(\"Func hash %d isn't supported by device, abort\\n\",\n \t\t\t    rss->hash_func);\n \t\treturn ENA_COM_UNSUPPORTED;\n@@ -2398,6 +2423,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,\n \t\treturn ENA_COM_INVAL;\n \t}\n \n+\trss->hash_func = func;\n \trc = ena_com_set_hash_function(ena_dev);\n \n \t/* Restore the old function */\n@@ -2893,7 +2919,9 @@ int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_de\n void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)\n {\n \tif (ena_dev->intr_moder_tbl)\n-\t\tENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);\n+\t\tENA_MEM_FREE(ena_dev->dmadev,\n+\t\t\t     ena_dev->intr_moder_tbl,\n+\t\t\t     (sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS));\n \tena_dev->intr_moder_tbl = NULL;\n }\n \n@@ -2928,7 +2956,9 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)\n \t/* if moderation is supported by device we set adaptive moderation */\n \tdelay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;\n \tena_com_update_intr_delay_resolution(ena_dev, delay_resolution);\n-\tena_com_enable_adaptive_moderation(ena_dev);\n+\n+\t/* Disable adaptive moderation by default - can be enabled later */\n+\tena_com_disable_adaptive_moderation(ena_dev);\n \n \treturn 0;\n err:\n@@ -3036,7 +3066,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,\n \t\t\t    struct ena_llq_configurations *llq_default_cfg)\n {\n \tint rc;\n-\tint size;\n+\tstruct ena_com_llq_info *llq_info = &(ena_dev->llq_info);;\n \n \tif (!llq_features->max_llq_num) {\n \t\tena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;\n@@ -3047,14 +3077,12 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,\n \tif (rc)\n \t\treturn rc;\n \n-\t/* Validate the descriptor is not too big */\n-\tsize = ena_dev->tx_max_header_size;\n-\tsize += ena_dev->llq_info.descs_num_before_header *\n-\t\tsizeof(struct ena_eth_io_tx_desc);\n+\tena_dev->tx_max_header_size = llq_info->desc_list_entry_size -\n+\t\t(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));\n \n-\tif (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {\n+\tif (ena_dev->tx_max_header_size == 0) {\n \t\tena_trc_err(\"the size of the LLQ entry is smaller than needed\\n\");\n-\t\treturn ENA_COM_INVAL;\n+\t\treturn -EINVAL;\n \t}\n \n \tena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;\ndiff --git a/drivers/net/ena/base/ena_com.h b/drivers/net/ena/base/ena_com.h\nindex ef42bd4f5..f2ef26c91 100644\n--- a/drivers/net/ena/base/ena_com.h\n+++ b/drivers/net/ena/base/ena_com.h\n@@ -7,7 +7,6 @@\n #define ENA_COM\n \n #include \"ena_plat.h\"\n-#include \"ena_includes.h\"\n \n #define ENA_MAX_NUM_IO_QUEUES\t\t128U\n /* We need to queues for each IO (on for Tx and one for Rx) */\n@@ -112,7 +111,6 @@ struct ena_com_tx_meta {\n };\n \n struct ena_com_llq_info {\n-\tbool inline_header;\n \tu16 header_location_ctrl;\n \tu16 desc_stride_ctrl;\n \tu16 desc_list_entry_size_ctrl;\n@@ -248,6 +246,9 @@ struct ena_com_admin_queue {\n \t/* Indicate if the admin queue should poll for completion */\n \tbool polling;\n \n+\t/* Define if fallback to polling mode should occur */\n+\tbool auto_polling;\n+\n \tu16 curr_cmd_id;\n \n \t/* Indicate that the ena was initialized and can\n@@ -512,7 +513,7 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);\n  */\n void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);\n \n-/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode\n+/* ena_com_get_admin_polling_mode - Get the admin completion queue polling mode\n  * @ena_dev: ENA communication layer struct\n  *\n  * Get the admin completion mode.\n@@ -522,7 +523,18 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);\n  *\n  * @return state\n  */\n-bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);\n+bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev);\n+\n+/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode\n+ * @ena_dev: ENA communication layer struct\n+ * @polling: Enable/Disable polling mode\n+ *\n+ * Set the autopolling mode.\n+ * If autopolling is on:\n+ * In case of missing interrupt when data is available switch to polling.\n+ */\n+void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,\n+\t\t\t\t\t bool polling);\n \n /* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler\n  * @ena_dev: ENA communication layer struct\n@@ -985,10 +997,10 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,\n \t\t\t\t       enum ena_intr_moder_level level,\n \t\t\t\t       struct ena_intr_moder_entry *entry);\n \n-\n /* ena_com_config_dev_mode - Configure the placement policy of the device.\n  * @ena_dev: ENA communication layer struct\n- * @llq_features: LLQ feature descriptor, retrieve via ena_com_get_dev_attr_feat.\n+ * @llq_features: LLQ feature descriptor, retrieve via\n+ *\t\t   ena_com_get_dev_attr_feat.\n  * @ena_llq_config: The default driver LLQ parameters configurations\n  */\n int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,\n@@ -1115,7 +1127,7 @@ static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer\n \tbuf = bounce_buf_ctrl->base_buffer +\n \t\t(bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;\n \n-\tprefetch(bounce_buf_ctrl->base_buffer +\n+\tprefetchw(bounce_buf_ctrl->base_buffer +\n \t\t(bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);\n \n \treturn buf;\ndiff --git a/drivers/net/ena/base/ena_defs/ena_admin_defs.h b/drivers/net/ena/base/ena_defs/ena_admin_defs.h\nindex cd81e891d..fb4d4d03f 100644\n--- a/drivers/net/ena/base/ena_defs/ena_admin_defs.h\n+++ b/drivers/net/ena/base/ena_defs/ena_admin_defs.h\n@@ -382,6 +382,10 @@ struct ena_admin_basic_stats {\n \tuint32_t rx_drops_low;\n \n \tuint32_t rx_drops_high;\n+\n+\tuint32_t tx_drops_low;\n+\n+\tuint32_t tx_drops_high;\n };\n \n struct ena_admin_acq_get_stats_resp {\n@@ -794,6 +798,14 @@ struct ena_admin_host_info {\n \tuint16_t num_cpus;\n \n \tuint16_t reserved;\n+\n+\t/* 0 : mutable_rss_table_size\n+\t * 1 : rx_offset\n+\t * 2 : interrupt_moderation\n+\t * 3 : map_rx_buf_bidirectional\n+\t * 31:4 : reserved\n+\t */\n+\tuint32_t driver_supported_features;\n };\n \n struct ena_admin_rss_ind_table_entry {\n@@ -812,8 +824,8 @@ struct ena_admin_feature_rss_ind_table {\n \t/* table size (2^size) */\n \tuint16_t size;\n \n-\t/* 0 : one_entry_update - The FW supports setting a\n-\t *    single RSS table entry\n+\t/* 0 : one_entry_update - The ENA device supports\n+\t *    setting a single RSS table entry\n \t */\n \tuint8_t flags;\n \n@@ -1006,6 +1018,10 @@ struct ena_admin_aenq_keep_alive_desc {\n \tuint32_t rx_drops_low;\n \n \tuint32_t rx_drops_high;\n+\n+\tuint32_t tx_drops_low;\n+\n+\tuint32_t tx_drops_high;\n };\n \n struct ena_admin_ena_mmio_req_read_less_resp {\n@@ -1105,6 +1121,13 @@ struct ena_admin_ena_mmio_req_read_less_resp {\n #define ENA_ADMIN_HOST_INFO_DEVICE_MASK                     GENMASK(7, 3)\n #define ENA_ADMIN_HOST_INFO_BUS_SHIFT                       8\n #define ENA_ADMIN_HOST_INFO_BUS_MASK                        GENMASK(15, 8)\n+#define ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK     BIT(0)\n+#define ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT                 1\n+#define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK                  BIT(1)\n+#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT      2\n+#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK       BIT(2)\n+#define ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT  3\n+#define ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK   BIT(3)\n \n /* feature_rss_ind_table */\n #define ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK BIT(0)\n@@ -1526,6 +1549,46 @@ static inline void set_ena_admin_host_info_bus(struct ena_admin_host_info *p, ui\n \tp->bdf |= (val << ENA_ADMIN_HOST_INFO_BUS_SHIFT) & ENA_ADMIN_HOST_INFO_BUS_MASK;\n }\n \n+static inline uint32_t get_ena_admin_host_info_mutable_rss_table_size(const struct ena_admin_host_info *p)\n+{\n+\treturn p->driver_supported_features & ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK;\n+}\n+\n+static inline void set_ena_admin_host_info_mutable_rss_table_size(struct ena_admin_host_info *p, uint32_t val)\n+{\n+\tp->driver_supported_features |= val & ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK;\n+}\n+\n+static inline uint32_t get_ena_admin_host_info_rx_offset(const struct ena_admin_host_info *p)\n+{\n+\treturn (p->driver_supported_features & ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK) >> ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT;\n+}\n+\n+static inline void set_ena_admin_host_info_rx_offset(struct ena_admin_host_info *p, uint32_t val)\n+{\n+\tp->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT) & ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK;\n+}\n+\n+static inline uint32_t get_ena_admin_host_info_interrupt_moderation(const struct ena_admin_host_info *p)\n+{\n+\treturn (p->driver_supported_features & ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK) >> ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT;\n+}\n+\n+static inline void set_ena_admin_host_info_interrupt_moderation(struct ena_admin_host_info *p, uint32_t val)\n+{\n+\tp->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT) & ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK;\n+}\n+\n+static inline uint32_t get_ena_admin_host_info_map_rx_buf_bidirectional(const struct ena_admin_host_info *p)\n+{\n+\treturn (p->driver_supported_features & ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK) >> ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT;\n+}\n+\n+static inline void set_ena_admin_host_info_map_rx_buf_bidirectional(struct ena_admin_host_info *p, uint32_t val)\n+{\n+\tp->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT) & ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK;\n+}\n+\n static inline uint8_t get_ena_admin_feature_rss_ind_table_one_entry_update(const struct ena_admin_feature_rss_ind_table *p)\n {\n \treturn p->flags & ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK;\n@@ -1557,4 +1620,4 @@ static inline void set_ena_admin_aenq_link_change_desc_link_status(struct ena_ad\n }\n \n #endif /* !defined(DEFS_LINUX_MAINLINE) */\n-#endif /*_ENA_ADMIN_H_ */\n+#endif /* _ENA_ADMIN_H_ */\ndiff --git a/drivers/net/ena/base/ena_defs/ena_common_defs.h b/drivers/net/ena/base/ena_defs/ena_common_defs.h\nindex 759bd2397..1818c29a8 100644\n--- a/drivers/net/ena/base/ena_defs/ena_common_defs.h\n+++ b/drivers/net/ena/base/ena_defs/ena_common_defs.h\n@@ -9,14 +9,10 @@\n #define ENA_COMMON_SPEC_VERSION_MAJOR        2\n #define ENA_COMMON_SPEC_VERSION_MINOR        0\n \n-/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */\n struct ena_common_mem_addr {\n \tuint32_t mem_addr_low;\n \n-\tuint16_t mem_addr_high;\n-\n-\t/* MBZ */\n-\tuint16_t reserved16;\n+\tuint32_t mem_addr_high;\n };\n \n-#endif /*_ENA_COMMON_H_ */\n+#endif /* _ENA_COMMON_H_ */\ndiff --git a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h\nindex 82fe03a95..108bed852 100644\n--- a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h\n+++ b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h\n@@ -215,7 +215,7 @@ struct ena_eth_io_rx_cdesc_base {\n \t * 16 : l4_csum_checked - L4 checksum was verified\n \t *    (could be OK or error), when cleared the status of\n \t *    checksum is unknown\n-\t * 23:17 : reserved16\n+\t * 23:17 : reserved17 - MBZ\n \t * 24 : phase\n \t * 25 : l3_csum2 - second checksum engine result\n \t * 26 : first - Indicates first descriptor in\n@@ -238,7 +238,9 @@ struct ena_eth_io_rx_cdesc_base {\n \n \tuint16_t sub_qid;\n \n-\tuint16_t reserved;\n+\tuint8_t offset;\n+\n+\tuint8_t reserved;\n };\n \n /* 8-word format */\n@@ -938,4 +940,4 @@ static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(struct ena_eth_io_nu\n }\n \n #endif /* !defined(DEFS_LINUX_MAINLINE) */\n-#endif /*_ENA_ETH_IO_H_ */\n+#endif /* _ENA_ETH_IO_H_ */\ndiff --git a/drivers/net/ena/base/ena_defs/ena_gen_info.h b/drivers/net/ena/base/ena_defs/ena_gen_info.h\nindex fe4bf5140..019b1fdb7 100644\n--- a/drivers/net/ena/base/ena_defs/ena_gen_info.h\n+++ b/drivers/net/ena/base/ena_defs/ena_gen_info.h\n@@ -3,5 +3,5 @@\n  * All rights reserved.\n  */\n \n-#define\tENA_GEN_DATE\t\"Wed Sep 26 13:46:28 DST 2018\"\n-#define\tENA_GEN_COMMIT\t\"aac865f\"\n+#define\tENA_GEN_DATE\t\"Wed Mar 20 10:40:42 STD 2019\"\n+#define\tENA_GEN_COMMIT\t\"1476830\"\ndiff --git a/drivers/net/ena/base/ena_defs/ena_regs_defs.h b/drivers/net/ena/base/ena_defs/ena_regs_defs.h\nindex 2118ddf32..2d6bf5486 100644\n--- a/drivers/net/ena/base/ena_defs/ena_regs_defs.h\n+++ b/drivers/net/ena/base/ena_defs/ena_regs_defs.h\n@@ -22,6 +22,7 @@ enum ena_regs_reset_reason_types {\n \tENA_REGS_RESET_USER_TRIGGER                 = 12,\n \tENA_REGS_RESET_GENERIC                      = 13,\n \tENA_REGS_RESET_MISS_INTERRUPT               = 14,\n+\tENA_REGS_RESET_LAST,\n };\n \n /* ena_registers offsets */\n@@ -128,4 +129,4 @@ enum ena_regs_reset_reason_types {\n #define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT          16\n #define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK           0xffff0000\n \n-#endif /*_ENA_REGS_H_ */\n+#endif /* _ENA_REGS_H_ */\ndiff --git a/drivers/net/ena/base/ena_eth_com.c b/drivers/net/ena/base/ena_eth_com.c\nindex 2aede7b5a..d4d44226d 100644\n--- a/drivers/net/ena/base/ena_eth_com.c\n+++ b/drivers/net/ena/base/ena_eth_com.c\n@@ -5,7 +5,7 @@\n \n #include \"ena_eth_com.h\"\n \n-static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(\n+static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(\n \tstruct ena_com_io_cq *io_cq)\n {\n \tstruct ena_eth_io_rx_cdesc_base *cdesc;\n@@ -32,7 +32,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(\n \treturn cdesc;\n }\n \n-static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)\n+static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)\n {\n \tu16 tail_masked;\n \tu32 offset;\n@@ -44,7 +44,7 @@ static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)\n \treturn (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);\n }\n \n-static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,\n+static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,\n \t\t\t\t\t\t     u8 *bounce_buffer)\n {\n \tstruct ena_com_llq_info *llq_info = &io_sq->llq_info;\n@@ -56,8 +56,8 @@ static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq\n \tdst_offset = dst_tail_mask * llq_info->desc_list_entry_size;\n \n \tif (is_llq_max_tx_burst_exists(io_sq)) {\n-\t\tif (!io_sq->entries_in_tx_burst_left) {\n-\t\t\tena_trc_err(\"Error: trying to write an llq entry to a full llq entries cache\\n\");\n+\t\tif (unlikely(!io_sq->entries_in_tx_burst_left)) {\n+\t\t\tena_trc_err(\"Error: trying to send more packets than tx burst allows\\n\");\n \t\t\treturn ENA_COM_NO_SPACE;\n \t\t}\n \n@@ -85,7 +85,7 @@ static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq\n \treturn ENA_COM_OK;\n }\n \n-static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,\n+static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,\n \t\t\t\t\t\t u8 *header_src,\n \t\t\t\t\t\t u16 header_len)\n {\n@@ -94,7 +94,7 @@ static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,\n \tu8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;\n \tu16 header_offset;\n \n-\tif (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)\n+\tif (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))\n \t\treturn 0;\n \n \theader_offset =\n@@ -115,7 +115,7 @@ static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,\n \treturn 0;\n }\n \n-static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)\n+static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)\n {\n \tstruct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;\n \tu8 *bounce_buffer;\n@@ -135,13 +135,13 @@ static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)\n \treturn sq_desc;\n }\n \n-static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)\n+static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)\n {\n \tstruct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;\n \tstruct ena_com_llq_info *llq_info = &io_sq->llq_info;\n \tint rc;\n \n-\tif (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)\n+\tif (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))\n \t\treturn ENA_COM_OK;\n \n \t/* bounce buffer was used, so write it and get a new one */\n@@ -153,8 +153,8 @@ static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)\n \n \t\tpkt_ctrl->curr_bounce_buf =\n \t\t\tena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);\n-\t\t\tmemset(io_sq->llq_buf_ctrl.curr_bounce_buf,\n-\t\t\t       0x0, llq_info->desc_list_entry_size);\n+\t\tmemset(io_sq->llq_buf_ctrl.curr_bounce_buf,\n+\t\t       0x0, llq_info->desc_list_entry_size);\n \t}\n \n \tpkt_ctrl->idx = 0;\n@@ -162,7 +162,7 @@ static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)\n \treturn ENA_COM_OK;\n }\n \n-static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)\n+static void *get_sq_desc(struct ena_com_io_sq *io_sq)\n {\n \tif (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)\n \t\treturn get_sq_desc_llq(io_sq);\n@@ -170,7 +170,7 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)\n \treturn get_sq_desc_regular_queue(io_sq);\n }\n \n-static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)\n+static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)\n {\n \tstruct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;\n \tstruct ena_com_llq_info *llq_info = &io_sq->llq_info;\n@@ -188,7 +188,7 @@ static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)\n \t\t\t       0x0, llq_info->desc_list_entry_size);\n \n \t\tpkt_ctrl->idx = 0;\n-\t\tif (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)\n+\t\tif (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))\n \t\t\tpkt_ctrl->descs_left_in_line = 1;\n \t\telse\n \t\t\tpkt_ctrl->descs_left_in_line =\n@@ -198,7 +198,7 @@ static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)\n \treturn ENA_COM_OK;\n }\n \n-static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)\n+static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)\n {\n \tif (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)\n \t\treturn ena_com_sq_update_llq_tail(io_sq);\n@@ -212,7 +212,7 @@ static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)\n \treturn ENA_COM_OK;\n }\n \n-static inline struct ena_eth_io_rx_cdesc_base *\n+static struct ena_eth_io_rx_cdesc_base *\n \tena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)\n {\n \tidx &= (io_cq->q_depth - 1);\n@@ -221,7 +221,7 @@ static inline struct ena_eth_io_rx_cdesc_base *\n \t\tidx * io_cq->cdesc_entry_size_in_bytes);\n }\n \n-static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,\n+static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,\n \t\t\t\t\t   u16 *first_cdesc_idx)\n {\n \tstruct ena_eth_io_rx_cdesc_base *cdesc;\n@@ -258,24 +258,7 @@ static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,\n \treturn count;\n }\n \n-static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,\n-\t\t\t\t\t     struct ena_com_tx_ctx *ena_tx_ctx)\n-{\n-\tint rc;\n-\n-\tif (ena_tx_ctx->meta_valid) {\n-\t\trc = memcmp(&io_sq->cached_tx_meta,\n-\t\t\t    &ena_tx_ctx->ena_meta,\n-\t\t\t    sizeof(struct ena_com_tx_meta));\n-\n-\t\tif (unlikely(rc != 0))\n-\t\t\treturn true;\n-\t}\n-\n-\treturn false;\n-}\n-\n-static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,\n+static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,\n \t\t\t\t\t\t\tstruct ena_com_tx_ctx *ena_tx_ctx)\n {\n \tstruct ena_eth_io_tx_meta_desc *meta_desc = NULL;\n@@ -324,7 +307,7 @@ static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io\n \treturn ena_com_sq_update_tail(io_sq);\n }\n \n-static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,\n+static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,\n \t\t\t\t\tstruct ena_eth_io_rx_cdesc_base *cdesc)\n {\n \tena_rx_ctx->l3_proto = cdesc->status &\n@@ -360,39 +343,6 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,\n /*****************************     API      **********************************/\n /*****************************************************************************/\n \n-bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,\n-\t\t\t\tstruct ena_com_tx_ctx *ena_tx_ctx)\n-{\n-\tu16 num_descs;\n-\tint num_entries_needed;\n-\tint descs_after_first_entry;\n-\tbool have_meta;\n-\tstruct ena_com_llq_info *llq_info;\n-\n-\tif (!is_llq_max_tx_burst_exists(io_sq))\n-\t\treturn false;\n-\n-\tnum_entries_needed = 1;\n-\tllq_info = &io_sq->llq_info;\n-\tnum_descs = ena_tx_ctx->num_bufs;\n-\thave_meta = ena_tx_ctx->meta_valid &&\n-\t\t    ena_com_meta_desc_changed(io_sq, ena_tx_ctx);\n-\n-\tif (have_meta)\n-\t\t++num_descs;\n-\n-\tif (num_descs > llq_info->descs_num_before_header) {\n-\t\tdescs_after_first_entry = num_descs - llq_info->descs_num_before_header;\n-\t\tnum_entries_needed += DIV_ROUND_UP(descs_after_first_entry,\n-\t\t\t\t\t\t   llq_info->descs_per_entry);\n-\t}\n-\n-\tena_trc_dbg(\"queue: %d num_descs: %d num_entries_needed: %d\\n\",\n-\t\t    io_sq->qid, num_descs, num_entries_needed);\n-\n-\treturn num_entries_needed > io_sq->entries_in_tx_burst_left;\n-}\n-\n int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,\n \t\t       struct ena_com_tx_ctx *ena_tx_ctx,\n \t\t       int *nb_hw_desc)\n@@ -411,7 +361,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,\n \t\t \"wrong Q type\");\n \n \t/* num_bufs +1 for potential meta desc */\n-\tif (!ena_com_sq_have_enough_space(io_sq, num_bufs + 1)) {\n+\tif (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {\n \t\tena_trc_dbg(\"Not enough space in the tx queue\\n\");\n \t\treturn ENA_COM_NO_MEM;\n \t}\n@@ -422,7 +372,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,\n \t\treturn ENA_COM_INVAL;\n \t}\n \n-\tif (unlikely((io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)\n+\tif (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV\n \t\t     && !buffer_to_push))\n \t\treturn ENA_COM_INVAL;\n \n@@ -547,7 +497,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,\n \tstruct ena_eth_io_rx_cdesc_base *cdesc = NULL;\n \tu16 cdesc_idx = 0;\n \tu16 nb_hw_desc;\n-\tu16 i;\n+\tu16 i = 0;\n \n \tENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,\n \t\t \"wrong Q type\");\n@@ -567,13 +517,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,\n \t\treturn ENA_COM_NO_SPACE;\n \t}\n \n-\tfor (i = 0; i < nb_hw_desc; i++) {\n-\t\tcdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);\n+\tcdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);\n+\tena_rx_ctx->pkt_offset = cdesc->offset;\n \n+\tdo {\n \t\tena_buf->len = cdesc->length;\n \t\tena_buf->req_id = cdesc->req_id;\n \t\tena_buf++;\n-\t}\n+\t} while ((++i < nb_hw_desc) && (cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i)));\n \n \t/* Update SQ head ptr */\n \tio_sq->next_to_comp += nb_hw_desc;\n@@ -608,10 +559,10 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,\n \n \tdesc->length = ena_buf->len;\n \n-\tdesc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;\n-\tdesc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;\n-\tdesc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;\n-\tdesc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;\n+\tdesc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |\n+\t\tENA_ETH_IO_RX_DESC_LAST_MASK |\n+\t\t(io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |\n+\t\tENA_ETH_IO_RX_DESC_COMP_REQ_MASK;\n \n \tdesc->req_id = req_id;\n \ndiff --git a/drivers/net/ena/base/ena_eth_com.h b/drivers/net/ena/base/ena_eth_com.h\nindex 820057b8b..e37b642d4 100644\n--- a/drivers/net/ena/base/ena_eth_com.h\n+++ b/drivers/net/ena/base/ena_eth_com.h\n@@ -43,17 +43,15 @@ struct ena_com_rx_ctx {\n \tenum ena_eth_io_l4_proto_index l4_proto;\n \tbool l3_csum_err;\n \tbool l4_csum_err;\n-\tbool l4_csum_checked;\n+\tu8 l4_csum_checked;\n \t/* fragmented packet */\n \tbool frag;\n \tu32 hash;\n \tu16 descs;\n \tint max_bufs;\n+\tu8 pkt_offset;\n };\n \n-bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,\n-\t\t\t\tstruct ena_com_tx_ctx *ena_tx_ctx);\n-\n int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,\n \t\t       struct ena_com_tx_ctx *ena_tx_ctx,\n \t\t       int *nb_hw_desc);\n@@ -74,7 +72,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,\n \tENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);\n }\n \n-static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)\n+static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)\n {\n \tu16 tail, next_to_comp, cnt;\n \n@@ -92,7 +90,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,\n \tint temp;\n \n \tif (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)\n-\t\treturn ena_com_free_desc(io_sq) >= required_buffers;\n+\t\treturn ena_com_free_q_entries(io_sq) >= required_buffers;\n \n \t/* This calculation doesn't need to be 100% accurate. So to reduce\n \t * the calculation overhead just Subtract 2 lines from the free descs\n@@ -101,7 +99,18 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,\n \t */\n \ttemp = required_buffers / io_sq->llq_info.descs_per_entry + 2;\n \n-\treturn ena_com_free_desc(io_sq) > temp;\n+\treturn ena_com_free_q_entries(io_sq) > temp;\n+}\n+\n+static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,\n+\t\t\t\t\t     struct ena_com_tx_ctx *ena_tx_ctx)\n+{\n+\tif (!ena_tx_ctx->meta_valid)\n+\t\treturn false;\n+\n+\treturn !!memcmp(&io_sq->cached_tx_meta,\n+\t\t\t&ena_tx_ctx->ena_meta,\n+\t\t\tsizeof(struct ena_com_tx_meta));\n }\n \n static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)\n@@ -110,10 +119,39 @@ static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)\n \t       io_sq->llq_info.max_entries_in_tx_burst > 0;\n }\n \n+static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,\n+\t\t\t\t\t      struct ena_com_tx_ctx *ena_tx_ctx)\n+{\n+\tstruct ena_com_llq_info *llq_info;\n+\tint descs_after_first_entry;\n+\tint num_entries_needed = 1;\n+\tu16 num_descs;\n+\n+\tif (!is_llq_max_tx_burst_exists(io_sq))\n+\t\treturn false;\n+\n+\tllq_info = &io_sq->llq_info;\n+\tnum_descs = ena_tx_ctx->num_bufs;\n+\n+\tif (unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))\n+\t\t++num_descs;\n+\n+\tif (num_descs > llq_info->descs_num_before_header) {\n+\t\tdescs_after_first_entry = num_descs - llq_info->descs_num_before_header;\n+\t\tnum_entries_needed += DIV_ROUND_UP(descs_after_first_entry,\n+\t\t\t\t\t\t   llq_info->descs_per_entry);\n+\t}\n+\n+\tena_trc_dbg(\"queue: %d num_descs: %d num_entries_needed: %d\\n\",\n+\t\t    io_sq->qid, num_descs, num_entries_needed);\n+\n+\treturn num_entries_needed > io_sq->entries_in_tx_burst_left;\n+}\n+\n static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)\n {\n-\tu16 tail = io_sq->tail;\n \tu16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;\n+\tu16 tail = io_sq->tail;\n \n \tena_trc_dbg(\"write submission queue doorbell for queue: %d tail: %d\\n\",\n \t\t    io_sq->qid, tail);\n@@ -134,15 +172,17 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)\n \tu16 unreported_comp, head;\n \tbool need_update;\n \n-\thead = io_cq->head;\n-\tunreported_comp = head - io_cq->last_head_update;\n-\tneed_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);\n-\n-\tif (io_cq->cq_head_db_reg && need_update) {\n-\t\tena_trc_dbg(\"Write completion queue doorbell for queue %d: head: %d\\n\",\n-\t\t\t    io_cq->qid, head);\n-\t\tENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);\n-\t\tio_cq->last_head_update = head;\n+\tif (unlikely(io_cq->cq_head_db_reg)) {\n+\t\thead = io_cq->head;\n+\t\tunreported_comp = head - io_cq->last_head_update;\n+\t\tneed_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);\n+\n+\t\tif (unlikely(need_update)) {\n+\t\t\tena_trc_dbg(\"Write completion queue doorbell for queue %d: head: %d\\n\",\n+\t\t\t\t    io_cq->qid, head);\n+\t\t\tENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);\n+\t\t\tio_cq->last_head_update = head;\n+\t\t}\n \t}\n \n \treturn 0;\n@@ -176,7 +216,8 @@ static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)\n \t\tio_cq->phase ^= 1;\n }\n \n-static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)\n+static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,\n+\t\t\t\t\t     u16 *req_id)\n {\n \tu8 expected_phase, cdesc_phase;\n \tstruct ena_eth_io_tx_cdesc *cdesc;\ndiff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h\nindex 9e1492cac..5d03ceace 100644\n--- a/drivers/net/ena/base/ena_plat_dpdk.h\n+++ b/drivers/net/ena/base/ena_plat_dpdk.h\n@@ -170,6 +170,7 @@ do {                                                                   \\\n #define ena_wait_event_t ena_wait_queue_t\n #define ENA_MIGHT_SLEEP()\n \n+#define ena_time_t uint64_t\n #define ENA_TIME_EXPIRE(timeout)  (timeout < rte_get_timer_cycles())\n #define ENA_GET_SYSTEM_TIMEOUT(timeout_us)                             \\\n        (timeout_us * rte_get_timer_hz() / 1000000 + rte_get_timer_cycles())\n@@ -232,7 +233,8 @@ extern uint32_t ena_alloc_cnt;\n \t} while (0)\n \n #define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1)\n-#define ENA_MEM_FREE(dmadev, ptr) ({ENA_TOUCH(dmadev); rte_free(ptr); })\n+#define ENA_MEM_FREE(dmadev, ptr, size)\t\t\t\t\t\\\n+\t({ ENA_TOUCH(dmadev); ENA_TOUCH(size); rte_free(ptr); })\n \n #define ENA_DB_SYNC(mem_handle) ((void)mem_handle)\n \n@@ -260,6 +262,7 @@ extern uint32_t ena_alloc_cnt;\n #define might_sleep()\n \n #define prefetch(x) rte_prefetch0(x)\n+#define prefetchw(x) prefetch(x)\n \n #define lower_32_bits(x) ((uint32_t)(x))\n #define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))\n@@ -290,4 +293,6 @@ extern uint32_t ena_alloc_cnt;\n \n #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))\n \n+#include \"ena_includes.h\"\n+\n #endif /* DPDK_ENA_COM_ENA_PLAT_DPDK_H_ */\ndiff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c\nindex 8bbd80dfb..f32963558 100644\n--- a/drivers/net/ena/ena_ethdev.c\n+++ b/drivers/net/ena/ena_ethdev.c\n@@ -1169,7 +1169,7 @@ static int ena_queue_start(struct ena_ring *ring)\n \n \tif (ring->type == ENA_RING_TYPE_TX) {\n \t\tring->tx_stats.available_desc =\n-\t\t\tena_com_free_desc(ring->ena_com_io_sq);\n+\t\t\tena_com_free_q_entries(ring->ena_com_io_sq);\n \t\treturn 0;\n \t}\n \n@@ -2357,7 +2357,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\ttx_ring->tx_stats.bytes += total_length;\n \t}\n \ttx_ring->tx_stats.available_desc =\n-\t\tena_com_free_desc(tx_ring->ena_com_io_sq);\n+\t\tena_com_free_q_entries(tx_ring->ena_com_io_sq);\n \n \t/* If there are ready packets to be xmitted... */\n \tif (sent_idx > 0) {\n@@ -2392,7 +2392,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\tbreak;\n \t}\n \ttx_ring->tx_stats.available_desc =\n-\t\tena_com_free_desc(tx_ring->ena_com_io_sq);\n+\t\tena_com_free_q_entries(tx_ring->ena_com_io_sq);\n \n \tif (total_tx_descs > 0) {\n \t\t/* acknowledge completion of sent packets */\n",
    "prefixes": [
        "1/2"
    ]
}