get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/48864/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 48864,
    "url": "https://patches.dpdk.org/api/patches/48864/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20181214131846.22439-7-mk@semihalf.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20181214131846.22439-7-mk@semihalf.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20181214131846.22439-7-mk@semihalf.com",
    "date": "2018-12-14T13:18:32",
    "name": "[06/20] net/ena: add LLQv2 support",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "274f8336c566ecaed12025cc6abc4be4cce4a02b",
    "submitter": {
        "id": 786,
        "url": "https://patches.dpdk.org/api/people/786/?format=api",
        "name": "Michal Krawczyk",
        "email": "mk@semihalf.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20181214131846.22439-7-mk@semihalf.com/mbox/",
    "series": [
        {
            "id": 2783,
            "url": "https://patches.dpdk.org/api/series/2783/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=2783",
            "date": "2018-12-14T13:18:26",
            "name": "net/ena: ENAv2 release",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/2783/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/48864/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/48864/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id F02FD1BB7D;\n\tFri, 14 Dec 2018 14:19:08 +0100 (CET)",
            "from mail-lj1-f196.google.com (mail-lj1-f196.google.com\n\t[209.85.208.196]) by dpdk.org (Postfix) with ESMTP id AB0C61BB3E\n\tfor <dev@dpdk.org>; Fri, 14 Dec 2018 14:19:04 +0100 (CET)",
            "by mail-lj1-f196.google.com with SMTP id v15-v6so4846490ljh.13\n\tfor <dev@dpdk.org>; Fri, 14 Dec 2018 05:19:04 -0800 (PST)",
            "from mkPC.semihalf.local (31-172-191-173.noc.fibertech.net.pl.\n\t[31.172.191.173]) by smtp.gmail.com with ESMTPSA id\n\to25sm873884lfd.29.2018.12.14.05.19.02\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tFri, 14 Dec 2018 05:19:03 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=semihalf-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=YISctMDiggCy11WwPwF3pNKU9vcXmo0uCF3bFp9Ur6A=;\n\tb=X6Vp73KHvz5F8Dj+HNOxqhfj5nhv+5rUhXg0riyfekHurS0ncGbsZwW1dnv9R94XCH\n\tSjIkn3wi9z1DiUbCbApi4tYlYjXUp4Iz/Sx0vSJbTc1hTgmM/PGL6D1y5ER0gjmBdcNj\n\tKELIt3ldMCbmbV3tPHfreRm4umOxIvYxUmWlR5PRzxxy41gqgSku9w5ysGP1485wX1mc\n\t6X/3TXuztndUpAV6g+16GwsXHrkYPueqBkoyV0hJjgdFE/NYeqv+CcNDOZdpGiXOoujI\n\tESSZU5CoSUr8GJaWzIF1lLf+e1+4DfYwLt3rZM48Ep+wW8nq8w2Q3CxSCPn4uLzdlt9K\n\tPLIA==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=YISctMDiggCy11WwPwF3pNKU9vcXmo0uCF3bFp9Ur6A=;\n\tb=AOi0JEuLdBE5S19qStxm7G/fobyC6xKJ1D/syLBj5J0enRGu/ytfPNl9YGHhEZm8eU\n\tK9164eaE3/M3E1JjLGPsbU2IIuViJajg+f0DEVzvApM/BUDqMvc3rz0ssX5DvQsE9oPy\n\tbVafuGM9dONWbZND82Hn8DPd5rjsEHikI7Ziv9hK787tr/zQZTpLAzKkMGA69Cz7gE1o\n\tWzHutHrZTEVySU5qpzeX4BhnaxnK3H9pXGPgvfvFNMktaDu78ZhJB9wXhf20zQND/i8V\n\tnm5cKRjQVUDAtim+xRS412fSJZTjw6BZfZv4AmDoPDScH4pASJaPdVyzJdzlpVZ1hYEn\n\t/0Ww==",
        "X-Gm-Message-State": "AA+aEWa16TX27EZIcVkQPXxuRlnslC/buFSqPPvy54DG4Gw5GKf9ZkBd\n\toBMmMCJYbBhPbtyxuDC0jGzyw+hyyqo=",
        "X-Google-Smtp-Source": "AFSGD/XglXtMz1sHBlMZjY2cI+ZE1+uuQVJlcRSeWTWRK0IZMNGFLFAASfOcm3iPWdpErxQO7TZ3yg==",
        "X-Received": "by 2002:a2e:880a:: with SMTP id\n\tx10-v6mr2053624ljh.174.1544793543753; \n\tFri, 14 Dec 2018 05:19:03 -0800 (PST)",
        "From": "Michal Krawczyk <mk@semihalf.com>",
        "To": "dev@dpdk.org",
        "Cc": "gtzalik@dpdk.org, mw@dpdk.org, matua@amazon.com, rk@semihalf.com,\n\tMichal Krawczyk <mk@semihalf.com>",
        "Date": "Fri, 14 Dec 2018 14:18:32 +0100",
        "Message-Id": "<20181214131846.22439-7-mk@semihalf.com>",
        "X-Mailer": "git-send-email 2.14.1",
        "In-Reply-To": "<20181214131846.22439-1-mk@semihalf.com>",
        "References": "<20181214131846.22439-1-mk@semihalf.com>",
        "Subject": "[dpdk-dev] [PATCH 06/20] net/ena: add LLQv2 support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "LLQ (Low Latency Queue) is the feature that allows pushing header\ndirectly to the device through PCI before even DMA is triggered.\nIt reduces latency, bacause device can start preparing packet before\npayload is sent through DMA.\n\nSigned-off-by: Michal Krawczyk <mk@semihalf.com>\n---\n drivers/net/ena/ena_ethdev.c | 220 ++++++++++++++++++++++++++++++++++++-------\n drivers/net/ena/ena_ethdev.h |   3 +\n 2 files changed, 190 insertions(+), 33 deletions(-)",
    "diff": "diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c\nindex 505d9bf30..73070ad96 100644\n--- a/drivers/net/ena/ena_ethdev.c\n+++ b/drivers/net/ena/ena_ethdev.c\n@@ -116,6 +116,9 @@ struct ena_stats {\n #define ENA_STAT_GLOBAL_ENTRY(stat) \\\n \tENA_STAT_ENTRY(stat, dev)\n \n+#define ENA_MAX_RING_SIZE_RX 1024\n+#define ENA_MAX_RING_SIZE_TX 1024\n+\n /*\n  * Each rte_memzone should have unique name.\n  * To satisfy it, count number of allocation and add it to name.\n@@ -806,6 +809,9 @@ static void ena_tx_queue_release(void *queue)\n \tena_tx_queue_release_bufs(ring);\n \n \t/* Free ring resources */\n+\tif (ring->push_buf_intermediate_buf)\n+\t\trte_free(ring->push_buf_intermediate_buf);\n+\n \tif (ring->tx_buffer_info)\n \t\trte_free(ring->tx_buffer_info);\n \n@@ -814,6 +820,7 @@ static void ena_tx_queue_release(void *queue)\n \n \tring->empty_tx_reqs = NULL;\n \tring->tx_buffer_info = NULL;\n+\tring->push_buf_intermediate_buf = NULL;\n \n \tring->configured = 0;\n \n@@ -937,15 +944,30 @@ static int ena_check_valid_conf(struct ena_adapter *adapter)\n static int\n ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)\n {\n-\tuint32_t tx_queue_size, rx_queue_size;\n+\tstruct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;\n+\tstruct ena_com_dev *ena_dev = ctx->ena_dev;\n+\tuint32_t tx_queue_size = ENA_MAX_RING_SIZE_TX;\n+\tuint32_t rx_queue_size = ENA_MAX_RING_SIZE_RX;\n \n-\tif (ctx->ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {\n+\tif (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {\n \t\tstruct ena_admin_queue_ext_feature_fields *max_queue_ext =\n \t\t\t&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;\n-\t\trx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth,\n+\t\trx_queue_size = RTE_MIN(rx_queue_size,\n+\t\t\tmax_queue_ext->max_rx_cq_depth);\n+\t\trx_queue_size = RTE_MIN(rx_queue_size,\n \t\t\tmax_queue_ext->max_rx_sq_depth);\n-\t\ttx_queue_size = RTE_MIN(max_queue_ext->max_tx_cq_depth,\n-\t\t\tmax_queue_ext->max_tx_sq_depth);\n+\t\ttx_queue_size = RTE_MIN(tx_queue_size,\n+\t\t\tmax_queue_ext->max_tx_cq_depth);\n+\n+\t\tif (ena_dev->tx_mem_queue_type ==\n+\t\t    ENA_ADMIN_PLACEMENT_POLICY_DEV) {\n+\t\t\ttx_queue_size = RTE_MIN(tx_queue_size,\n+\t\t\t\tllq->max_llq_depth);\n+\t\t} else {\n+\t\t\ttx_queue_size = RTE_MIN(tx_queue_size,\n+\t\t\t\tmax_queue_ext->max_tx_sq_depth);\n+\t\t}\n+\n \t\tctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,\n \t\t\tmax_queue_ext->max_per_packet_rx_descs);\n \t\tctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,\n@@ -953,9 +975,22 @@ ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)\n \t} else {\n \t\tstruct ena_admin_queue_feature_desc *max_queues =\n \t\t\t&ctx->get_feat_ctx->max_queues;\n-\t\trx_queue_size = RTE_MIN(max_queues->max_cq_depth,\n+\t\trx_queue_size = RTE_MIN(rx_queue_size,\n+\t\t\tmax_queues->max_cq_depth);\n+\t\trx_queue_size = RTE_MIN(rx_queue_size,\n \t\t\tmax_queues->max_sq_depth);\n-\t\ttx_queue_size = rx_queue_size;\n+\t\ttx_queue_size = RTE_MIN(tx_queue_size,\n+\t\t\tmax_queues->max_cq_depth);\n+\n+\t\tif (ena_dev->tx_mem_queue_type ==\n+\t\t    ENA_ADMIN_PLACEMENT_POLICY_DEV) {\n+\t\t\ttx_queue_size = RTE_MIN(tx_queue_size,\n+\t\t\t\tllq->max_llq_depth);\n+\t\t} else {\n+\t\t\ttx_queue_size = RTE_MIN(tx_queue_size,\n+\t\t\t\tmax_queues->max_sq_depth);\n+\t\t}\n+\n \t\tctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,\n \t\t\tmax_queues->max_packet_tx_descs);\n \t\tctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,\n@@ -1277,6 +1312,17 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,\n \t\treturn -ENOMEM;\n \t}\n \n+\ttxq->push_buf_intermediate_buf =\n+\t\trte_zmalloc(\"txq->push_buf_intermediate_buf\",\n+\t\t\t    txq->tx_max_header_size,\n+\t\t\t    RTE_CACHE_LINE_SIZE);\n+\tif (!txq->push_buf_intermediate_buf) {\n+\t\tRTE_LOG(ERR, PMD, \"failed to alloc push buff for LLQ\\n\");\n+\t\trte_free(txq->tx_buffer_info);\n+\t\trte_free(txq->empty_tx_reqs);\n+\t\treturn -ENOMEM;\n+\t}\n+\n \tfor (i = 0; i < txq->ring_size; i++)\n \t\ttxq->empty_tx_reqs[i] = i;\n \n@@ -1592,28 +1638,87 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,\n \t}\n }\n \n+static inline void\n+set_default_llq_configurations(struct ena_llq_configurations *llq_config)\n+{\n+\tllq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;\n+\tllq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;\n+\tllq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;\n+\tllq_config->llq_num_decs_before_header =\n+\t\tENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;\n+\tllq_config->llq_ring_entry_size_value = 128;\n+}\n+\n+static int\n+ena_set_queues_placement_policy(struct ena_adapter *adapter,\n+\t\t\t\tstruct ena_com_dev *ena_dev,\n+\t\t\t\tstruct ena_admin_feature_llq_desc *llq,\n+\t\t\t\tstruct ena_llq_configurations *llq_default_configurations)\n+{\n+\tint rc;\n+\tu32 llq_feature_mask;\n+\n+\tllq_feature_mask = 1 << ENA_ADMIN_LLQ;\n+\tif (!(ena_dev->supported_features & llq_feature_mask)) {\n+\t\tRTE_LOG(INFO, PMD,\n+\t\t\t\"LLQ is not supported. Fallback to host mode policy.\\n\");\n+\t\tena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;\n+\t\treturn 0;\n+\t}\n+\n+\trc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);\n+\tif (unlikely(rc)) {\n+\t\tPMD_INIT_LOG(WARNING, \"Failed to config dev mode. \"\n+\t\t\t\"Fallback to host mode policy.\\n\");\n+\t\tena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;\n+\t\treturn 0;\n+\t}\n+\n+\t/* Nothing to config, exit */\n+\tif (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)\n+\t\treturn 0;\n+\n+\tif (!adapter->dev_mem_base) {\n+\t\tRTE_LOG(ERR, PMD, \"Unable to access LLQ bar resource. \"\n+\t\t\t\"Fallback to host mode policy.\\n.\");\n+\t\tena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;\n+\t\treturn 0;\n+\t}\n+\n+\tena_dev->mem_bar = adapter->dev_mem_base;\n+\n+\treturn 0;\n+}\n+\n static int ena_calc_io_queue_num(struct ena_com_dev *ena_dev,\n \t\t\t\t struct ena_com_dev_get_features_ctx *get_feat_ctx)\n {\n-\tuint32_t io_sq_num, io_cq_num, io_queue_num;\n+\tuint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num;\n \n \t/* Regular queues capabilities */\n \tif (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {\n \t\tstruct ena_admin_queue_ext_feature_fields *max_queue_ext =\n \t\t\t&get_feat_ctx->max_queue_ext.max_queue_ext;\n-\t\tio_sq_num = max_queue_ext->max_rx_sq_num;\n-\t\tio_sq_num = RTE_MIN(io_sq_num, max_queue_ext->max_tx_sq_num);\n-\n-\t\tio_cq_num = max_queue_ext->max_rx_cq_num;\n-\t\tio_cq_num = RTE_MIN(io_cq_num, max_queue_ext->max_tx_cq_num);\n+\t\tio_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num,\n+\t\t\t\t    max_queue_ext->max_rx_cq_num);\n+\t\tio_tx_sq_num = max_queue_ext->max_tx_sq_num;\n+\t\tio_tx_cq_num = max_queue_ext->max_tx_cq_num;\n \t} else {\n \t\tstruct ena_admin_queue_feature_desc *max_queues =\n \t\t\t&get_feat_ctx->max_queues;\n-\t\tio_sq_num = max_queues->max_sq_num;\n-\t\tio_cq_num = max_queues->max_cq_num;\n+\t\tio_tx_sq_num = max_queues->max_sq_num;\n+\t\tio_tx_cq_num = max_queues->max_cq_num;\n+\t\tio_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num);\n \t}\n \n-\tio_queue_num = RTE_MIN(io_sq_num, io_cq_num);\n+\t/* In case of LLQ use the llq number in the get feature cmd */\n+\tif (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)\n+\t\tio_tx_sq_num = get_feat_ctx->llq.max_llq_num;\n+\n+\tio_queue_num = RTE_MIN(rte_lcore_count(), ENA_MAX_NUM_IO_QUEUES);\n+\tio_queue_num = RTE_MIN(io_queue_num, io_rx_num);\n+\tio_queue_num = RTE_MIN(io_queue_num, io_tx_sq_num);\n+\tio_queue_num = RTE_MIN(io_queue_num, io_tx_cq_num);\n \n \tif (unlikely(io_queue_num == 0)) {\n \t\tRTE_LOG(ERR, PMD, \"Number of IO queues should not be 0\\n\");\n@@ -1632,6 +1737,8 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)\n \t\t(struct ena_adapter *)(eth_dev->data->dev_private);\n \tstruct ena_com_dev *ena_dev = &adapter->ena_dev;\n \tstruct ena_com_dev_get_features_ctx get_feat_ctx;\n+\tstruct ena_llq_configurations llq_config;\n+\tconst char *queue_type_str;\n \tint rc;\n \n \tstatic int adapters_found;\n@@ -1686,11 +1793,22 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)\n \t}\n \tadapter->wd_state = wd_state;\n \n-\tena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;\n+\tset_default_llq_configurations(&llq_config);\n+\trc = ena_set_queues_placement_policy(adapter, ena_dev,\n+\t\t\t\t\t     &get_feat_ctx.llq, &llq_config);\n+\tif (unlikely(rc)) {\n+\t\tPMD_INIT_LOG(CRIT, \"Failed to set placement policy\");\n+\t\treturn rc;\n+\t}\n+\n+\tif (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)\n+\t\tqueue_type_str = \"Regular\";\n+\telse\n+\t\tqueue_type_str = \"Low latency\";\n+\tRTE_LOG(INFO, PMD, \"Placement policy: %s\\n\", queue_type_str);\n \n \tcalc_queue_ctx.ena_dev = ena_dev;\n \tcalc_queue_ctx.get_feat_ctx = &get_feat_ctx;\n-\n \tadapter->num_queues = ena_calc_io_queue_num(ena_dev,\n \t\t\t\t\t\t    &get_feat_ctx);\n \n@@ -2106,13 +2224,21 @@ static void ena_update_hints(struct ena_adapter *adapter,\n static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,\n \t\t\t\t\tstruct rte_mbuf *mbuf)\n {\n-\tint num_segments, rc;\n+\tstruct ena_com_dev *ena_dev;\n+\tint num_segments, header_len, rc;\n \n+\tena_dev = &tx_ring->adapter->ena_dev;\n \tnum_segments = mbuf->nb_segs;\n+\theader_len = mbuf->data_len;\n \n \tif (likely(num_segments < tx_ring->sgl_size))\n \t\treturn 0;\n \n+\tif (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&\n+\t    (num_segments == tx_ring->sgl_size) &&\n+\t    (header_len < tx_ring->tx_max_header_size))\n+\t\treturn 0;\n+\n \trc = rte_pktmbuf_linearize(mbuf);\n \tif (unlikely(rc))\n \t\tRTE_LOG(WARNING, PMD, \"Mbuf linearize failed\\n\");\n@@ -2127,6 +2253,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \tuint16_t next_to_use = tx_ring->next_to_use;\n \tuint16_t next_to_clean = tx_ring->next_to_clean;\n \tstruct rte_mbuf *mbuf;\n+\tuint16_t seg_len;\n \tunsigned int ring_size = tx_ring->ring_size;\n \tunsigned int ring_mask = ring_size - 1;\n \tstruct ena_com_tx_ctx ena_tx_ctx;\n@@ -2134,6 +2261,8 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \tstruct ena_com_buf *ebuf;\n \tuint16_t rc, req_id, total_tx_descs = 0;\n \tuint16_t sent_idx = 0, empty_tx_reqs;\n+\tuint16_t push_len = 0;\n+\tuint16_t delta = 0;\n \tint nb_hw_desc;\n \n \t/* Check adapter state */\n@@ -2166,17 +2295,32 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t       sizeof(struct ena_com_tx_meta));\n \t\tena_tx_ctx.ena_bufs = ebuf;\n \t\tena_tx_ctx.req_id = req_id;\n+\n+\t\tdelta = 0;\n+\t\tseg_len = mbuf->data_len;\n+\n \t\tif (tx_ring->tx_mem_queue_type ==\n \t\t\t\tENA_ADMIN_PLACEMENT_POLICY_DEV) {\n-\t\t\t/* prepare the push buffer with\n-\t\t\t * virtual address of the data\n-\t\t\t */\n-\t\t\tena_tx_ctx.header_len =\n-\t\t\t\tRTE_MIN(mbuf->data_len,\n-\t\t\t\t\ttx_ring->tx_max_header_size);\n-\t\t\tena_tx_ctx.push_header =\n-\t\t\t\t(void *)((char *)mbuf->buf_addr +\n-\t\t\t\t\t mbuf->data_off);\n+\t\t\tpush_len = RTE_MIN(mbuf->pkt_len,\n+\t\t\t\t\t   tx_ring->tx_max_header_size);\n+\t\t\tena_tx_ctx.header_len = push_len;\n+\n+\t\t\tif (likely(push_len <= seg_len)) {\n+\t\t\t\t/* If the push header is in the single segment,\n+\t\t\t\t * then just point it to the 1st mbuf data.\n+\t\t\t\t */\n+\t\t\t\tena_tx_ctx.push_header =\n+\t\t\t\t\trte_pktmbuf_mtod(mbuf, uint8_t *);\n+\t\t\t} else {\n+\t\t\t\t/* If the push header lays in the several\n+\t\t\t\t * segments, copy it to the intermediate buffer.\n+\t\t\t\t */\n+\t\t\t\trte_pktmbuf_read(mbuf, 0, push_len,\n+\t\t\t\t\ttx_ring->push_buf_intermediate_buf);\n+\t\t\t\tena_tx_ctx.push_header =\n+\t\t\t\t\ttx_ring->push_buf_intermediate_buf;\n+\t\t\t\tdelta = push_len - seg_len;\n+\t\t\t}\n \t\t} /* there's no else as we take advantage of memset zeroing */\n \n \t\t/* Set TX offloads flags, if applicable */\n@@ -2191,20 +2335,30 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t/* Process first segment taking into\n \t\t * consideration pushed header\n \t\t */\n-\t\tif (mbuf->data_len > ena_tx_ctx.header_len) {\n+\t\tif (seg_len > push_len) {\n \t\t\tebuf->paddr = mbuf->buf_iova +\n \t\t\t\t      mbuf->data_off +\n-\t\t\t\t      ena_tx_ctx.header_len;\n-\t\t\tebuf->len = mbuf->data_len - ena_tx_ctx.header_len;\n+\t\t\t\t      push_len;\n+\t\t\tebuf->len = seg_len - push_len;\n \t\t\tebuf++;\n \t\t\ttx_info->num_of_bufs++;\n \t\t}\n \n \t\twhile ((mbuf = mbuf->next) != NULL) {\n-\t\t\tebuf->paddr = mbuf->buf_iova + mbuf->data_off;\n-\t\t\tebuf->len = mbuf->data_len;\n+\t\t\tseg_len = mbuf->data_len;\n+\n+\t\t\t/* Skip mbufs if whole data is pushed as a header */\n+\t\t\tif (unlikely(delta > seg_len)) {\n+\t\t\t\tdelta -= seg_len;\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\n+\t\t\tebuf->paddr = mbuf->buf_iova + mbuf->data_off + delta;\n+\t\t\tebuf->len = seg_len - delta;\n \t\t\tebuf++;\n \t\t\ttx_info->num_of_bufs++;\n+\n+\t\t\tdelta = 0;\n \t\t}\n \n \t\tena_tx_ctx.num_bufs = tx_info->num_of_bufs;\ndiff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h\nindex e6f7bd012..713cdea97 100644\n--- a/drivers/net/ena/ena_ethdev.h\n+++ b/drivers/net/ena/ena_ethdev.h\n@@ -110,6 +110,9 @@ struct ena_ring {\n \t/* Max length PMD can push to device for LLQ */\n \tuint8_t tx_max_header_size;\n \tint configured;\n+\n+\tuint8_t *push_buf_intermediate_buf;\n+\n \tstruct ena_adapter *adapter;\n \tuint64_t offloads;\n \tu16 sgl_size;\n",
    "prefixes": [
        "06/20"
    ]
}