get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/80544/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 80544,
    "url": "http://patches.dpdk.org/api/patches/80544/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1602596753-32282-24-git-send-email-arybchenko@solarflare.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1602596753-32282-24-git-send-email-arybchenko@solarflare.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1602596753-32282-24-git-send-email-arybchenko@solarflare.com",
    "date": "2020-10-13T13:45:40",
    "name": "[23/36] net/sfc: support TSO for EF100 native datapath",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "95d3a3e81d3cf618951ec539cd651adc886e182d",
    "submitter": {
        "id": 607,
        "url": "http://patches.dpdk.org/api/people/607/?format=api",
        "name": "Andrew Rybchenko",
        "email": "arybchenko@solarflare.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1602596753-32282-24-git-send-email-arybchenko@solarflare.com/mbox/",
    "series": [
        {
            "id": 12916,
            "url": "http://patches.dpdk.org/api/series/12916/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=12916",
            "date": "2020-10-13T13:45:18",
            "name": "net/sfc: add EF100 support",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/12916/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/80544/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/80544/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id F136AA04B7;\n\tTue, 13 Oct 2020 15:58:58 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id D3DBB1C125;\n\tTue, 13 Oct 2020 15:46:59 +0200 (CEST)",
            "from dispatch1-us1.ppe-hosted.com (dispatch1-us1.ppe-hosted.com\n [67.231.154.164]) by dpdk.org (Postfix) with ESMTP id 85A811DB8B\n for <dev@dpdk.org>; Tue, 13 Oct 2020 15:46:06 +0200 (CEST)",
            "from mx1-us1.ppe-hosted.com (unknown [10.110.50.137])\n by dispatch1-us1.ppe-hosted.com (PPE Hosted ESMTP Server) with ESMTP id\n BADDA20084 for <dev@dpdk.org>; Tue, 13 Oct 2020 13:46:04 +0000 (UTC)",
            "from us4-mdac16-54.at1.mdlocal (unknown [10.110.50.14])\n by mx1-us1.ppe-hosted.com (PPE Hosted ESMTP Server) with ESMTP id B5E3E6009B\n for <dev@dpdk.org>; Tue, 13 Oct 2020 13:46:04 +0000 (UTC)",
            "from mx1-us1.ppe-hosted.com (unknown [10.110.49.106])\n by mx1-us1.ppe-hosted.com (PPE Hosted ESMTP Server) with ESMTPS id\n 2EC9F220054\n for <dev@dpdk.org>; Tue, 13 Oct 2020 13:46:04 +0000 (UTC)",
            "from webmail.solarflare.com (uk.solarflare.com [193.34.186.16])\n (using TLSv1.2 with cipher ECDHE-RSA-AES256-SHA384 (256/256 bits))\n (No client certificate requested)\n by mx1-us1.ppe-hosted.com (PPE Hosted ESMTP Server) with ESMTPS id\n EC7B5B4005B\n for <dev@dpdk.org>; Tue, 13 Oct 2020 13:46:03 +0000 (UTC)",
            "from ukex01.SolarFlarecom.com (10.17.10.4) by\n ukex01.SolarFlarecom.com (10.17.10.4) with Microsoft SMTP Server (TLS) id\n 15.0.1497.2; Tue, 13 Oct 2020 14:45:56 +0100",
            "from opal.uk.solarflarecom.com (10.17.10.1) by\n ukex01.SolarFlarecom.com (10.17.10.4) with Microsoft SMTP Server id\n 15.0.1497.2 via Frontend Transport; Tue, 13 Oct 2020 14:45:56 +0100",
            "from ukv-loginhost.uk.solarflarecom.com\n (ukv-loginhost.uk.solarflarecom.com [10.17.10.39])\n by opal.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id 09DDjuUY006091;\n Tue, 13 Oct 2020 14:45:56 +0100",
            "from ukv-loginhost.uk.solarflarecom.com (localhost [127.0.0.1])\n by ukv-loginhost.uk.solarflarecom.com (Postfix) with ESMTP id 758631613A9;\n Tue, 13 Oct 2020 14:45:56 +0100 (BST)"
        ],
        "X-Virus-Scanned": "Proofpoint Essentials engine",
        "From": "Andrew Rybchenko <arybchenko@solarflare.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Ivan Malov <ivan.malov@oktetlabs.ru>",
        "Date": "Tue, 13 Oct 2020 14:45:40 +0100",
        "Message-ID": "<1602596753-32282-24-git-send-email-arybchenko@solarflare.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1602596753-32282-1-git-send-email-arybchenko@solarflare.com>",
        "References": "<1602596753-32282-1-git-send-email-arybchenko@solarflare.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-TM-AS-Product-Ver": "SMEX-12.5.0.1300-8.6.1012-25722.003",
        "X-TM-AS-Result": "No-20.735000-8.000000-10",
        "X-TMASE-MatchedRID": "fEvLPGzybqvys+NfDoX1KKiUivh0j2Pv6VTG9cZxEjIGmHr1eMxt2UAc\n 6DyoS2rI1rH4Rg0AOT29TeakPKRl/acF7sDdoK+lR0BY8wG7yRA1TzP60UkdHcuvWcrEpQVe7gW\n uEPosBb9GOVSl8PHlYcYGS5HBZwU49JzAi9pclXb9tX9U6GuaiqIik2/euMx1jjTDjakSEVPbWk\n HMATUzuqHwWUX26vTJI/XUCVWjDQSZka9ZOnvpYCMJO6daqdyWMHi1Ydy2WEjwJYZa/L83HSyKL\n dCNwOSsc+TTJ7STenXiTwfAabYVYWiqvF73selKydRP56yRRA9MyauPZ+/XGi4mS5ZcbgykRgfD\n 3I/9fV7Lh0Mby4vIdvygTDyMV9PjE1ZZbRf/HMwinwwiyYyq+ytW5wfuEjEyDpCUEeEFm7DxvWZ\n dOp62bDmCoROg0mm3fCJ0GowEUUVjOXNSZoyCqiTc3NdTt+Z64F58RPNYsrHYL2llLv/jjpOuv4\n LVY2bFlj6yUeIBPW6XV8uUymwo89oopQGeJcDue6Hz0ZpSb5dmlmlkW7eYoFc/CedjlcvkBQ1kH\n BLzvQuVfiyI1V/FhZxxfZ4J8C+WjQZ1YqAZsGF2GcWKGZufBQgqPpbA7sp1tk5HkvbY1uMclX4F\n VBZxp7VYd97rePAOajSAxd8Y6oaxYGKwcfpZW1b0VO9AmFFd+bXcCAqv9wV/GXao7FChFQVTB+/\n HcO3+YTJaG/aPBc4DsqXmi90s0xFlhiT7pasfZacDbE73ZSl9LQinZ4QefMdVaUMTROUqJdW0Gb\n I6Iq73FLeZXNZS4LU+KYi1qWO6ftwZ3X11IV0=",
        "X-TM-AS-User-Approved-Sender": "Yes",
        "X-TM-AS-User-Blocked-Sender": "No",
        "X-TMASE-Result": "10--20.735000-8.000000",
        "X-TMASE-Version": "SMEX-12.5.0.1300-8.6.1012-25722.003",
        "X-MDID": "1602596764-1wU07luAKHbE",
        "X-PPE-DISP": "1602596764;1wU07luAKHbE",
        "Subject": "[dpdk-dev] [PATCH 23/36] net/sfc: support TSO for EF100 native\n\tdatapath",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Ivan Malov <ivan.malov@oktetlabs.ru>\n\nRiverhead boards support TSO version 3.\n\nSigned-off-by: Ivan Malov <ivan.malov@oktetlabs.ru>\nSigned-off-by: Andrew Rybchenko <arybchenko@solarflare.com>\n---\n doc/guides/nics/sfc_efx.rst    |   2 +-\n drivers/net/sfc/sfc.c          |   5 +-\n drivers/net/sfc/sfc_dp_tx.h    |  10 ++\n drivers/net/sfc/sfc_ef100_tx.c | 266 ++++++++++++++++++++++++++++-----\n drivers/net/sfc/sfc_tx.c       |  14 +-\n 5 files changed, 257 insertions(+), 40 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst\nindex f3135fdd70..104ab38aa9 100644\n--- a/doc/guides/nics/sfc_efx.rst\n+++ b/doc/guides/nics/sfc_efx.rst\n@@ -329,7 +329,7 @@ boolean parameters value.\n   is even more faster then **ef10** but does not support multi-segment\n   mbufs, disallows multiple mempools and neglects mbuf reference counters.\n   **ef100** chooses EF100 native datapath which supports multi-segment\n-  mbufs, inner/outer IPv4 and TCP/UDP checksum offloads.\n+  mbufs, inner/outer IPv4 and TCP/UDP checksum and TCP segmentation offloads.\n \n - ``perf_profile`` [auto|throughput|low-latency] (default **throughput**)\n \ndiff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c\nindex cfba485ad2..b41db65003 100644\n--- a/drivers/net/sfc/sfc.c\n+++ b/drivers/net/sfc/sfc.c\n@@ -205,7 +205,7 @@ sfc_estimate_resource_limits(struct sfc_adapter *sa)\n \t\tMIN(encp->enc_txq_limit,\n \t\t    limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);\n \n-\tif (sa->tso)\n+\tif (sa->tso && encp->enc_fw_assisted_tso_v2_enabled)\n \t\tlimits.edl_max_txq_count =\n \t\t\tMIN(limits.edl_max_txq_count,\n \t\t\t    encp->enc_fw_assisted_tso_v2_n_contexts /\n@@ -795,7 +795,8 @@ sfc_attach(struct sfc_adapter *sa)\n \t\tencp->enc_tunnel_encapsulations_supported;\n \n \tif (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) {\n-\t\tsa->tso = encp->enc_fw_assisted_tso_v2_enabled;\n+\t\tsa->tso = encp->enc_fw_assisted_tso_v2_enabled ||\n+\t\t\t  encp->enc_tso_v3_enabled;\n \t\tif (!sa->tso)\n \t\t\tsfc_info(sa, \"TSO support isn't available on this adapter\");\n \t}\ndiff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h\nindex bed8ce84aa..3ecdfcdd28 100644\n--- a/drivers/net/sfc/sfc_dp_tx.h\n+++ b/drivers/net/sfc/sfc_dp_tx.h\n@@ -70,6 +70,16 @@ struct sfc_dp_tx_qcreate_info {\n \t * the hardware to apply TSO packet edits.\n \t */\n \tuint16_t\t\ttso_tcp_header_offset_limit;\n+\t/** Maximum number of header DMA descriptors per TSOv3 transaction */\n+\tuint16_t\t\ttso_max_nb_header_descs;\n+\t/** Maximum header length acceptable by TSOv3 transaction */\n+\tuint16_t\t\ttso_max_header_len;\n+\t/** Maximum number of payload DMA descriptors per TSOv3 transaction */\n+\tuint16_t\t\ttso_max_nb_payload_descs;\n+\t/** Maximum payload length per TSOv3 transaction */\n+\tuint32_t\t\ttso_max_payload_len;\n+\t/** Maximum number of frames to be generated per TSOv3 transaction */\n+\tuint32_t\t\ttso_max_nb_outgoing_frames;\n };\n \n /**\ndiff --git a/drivers/net/sfc/sfc_ef100_tx.c b/drivers/net/sfc/sfc_ef100_tx.c\nindex 20d4d1cf9c..5ad0813a9b 100644\n--- a/drivers/net/sfc/sfc_ef100_tx.c\n+++ b/drivers/net/sfc/sfc_ef100_tx.c\n@@ -77,6 +77,13 @@ struct sfc_ef100_txq {\n \tunsigned int\t\t\tevq_phase_bit_shift;\n \tvolatile efx_qword_t\t\t*evq_hw_ring;\n \n+\tuint16_t\t\t\ttso_tcp_header_offset_limit;\n+\tuint16_t\t\t\ttso_max_nb_header_descs;\n+\tuint16_t\t\t\ttso_max_header_len;\n+\tuint16_t\t\t\ttso_max_nb_payload_descs;\n+\tuint32_t\t\t\ttso_max_payload_len;\n+\tuint32_t\t\t\ttso_max_nb_outgoing_frames;\n+\n \t/* Datapath transmit queue anchor */\n \tstruct sfc_dp_txq\t\tdp;\n };\n@@ -87,6 +94,42 @@ sfc_ef100_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)\n \treturn container_of(dp_txq, struct sfc_ef100_txq, dp);\n }\n \n+static int\n+sfc_ef100_tx_prepare_pkt_tso(struct sfc_ef100_txq * const txq,\n+\t\t\t     struct rte_mbuf *m)\n+{\n+\tsize_t header_len = m->l2_len + m->l3_len + m->l4_len;\n+\tsize_t payload_len = m->pkt_len - header_len;\n+\tunsigned long mss_conformant_max_payload_len;\n+\tunsigned int nb_payload_descs;\n+\n+\tmss_conformant_max_payload_len =\n+\t\tm->tso_segsz * txq->tso_max_nb_outgoing_frames;\n+\n+\t/*\n+\t * Don't really want to know exact number of payload segments.\n+\t * Just use total number of segments as upper limit. Practically\n+\t * maximum number of payload segments is significantly bigger\n+\t * than maximum number header segments, so we can neglect header\n+\t * segments excluded total number of segments to estimate number\n+\t * of payload segments required.\n+\t */\n+\tnb_payload_descs = m->nb_segs;\n+\n+\t/*\n+\t * Carry out multiple independent checks using bitwise OR\n+\t * to avoid unnecessary conditional branching.\n+\t */\n+\tif (unlikely((header_len > txq->tso_max_header_len) |\n+\t\t     (nb_payload_descs > txq->tso_max_nb_payload_descs) |\n+\t\t     (payload_len > txq->tso_max_payload_len) |\n+\t\t     (payload_len > mss_conformant_max_payload_len) |\n+\t\t     (m->pkt_len == header_len)))\n+\t\treturn EINVAL;\n+\n+\treturn 0;\n+}\n+\n static uint16_t\n sfc_ef100_tx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t  uint16_t nb_pkts)\n@@ -110,16 +153,25 @@ sfc_ef100_tx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t    (m->ol_flags & PKT_TX_L4_MASK)) {\n \t\t\tcalc_phdr_cksum = true;\n \t\t\tmax_nb_header_segs = 1;\n+\t\t} else if (m->ol_flags & PKT_TX_TCP_SEG) {\n+\t\t\tmax_nb_header_segs = txq->tso_max_nb_header_descs;\n \t\t}\n \n \t\tret = sfc_dp_tx_prepare_pkt(m, max_nb_header_segs, 0,\n-\t\t\t\t\t    0, txq->max_fill_level, 0, 0);\n+\t\t\t\t\t    txq->tso_tcp_header_offset_limit,\n+\t\t\t\t\t    txq->max_fill_level, 1, 0);\n \t\tif (unlikely(ret != 0)) {\n \t\t\trte_errno = ret;\n \t\t\tbreak;\n \t\t}\n \n-\t\tif (m->nb_segs > EFX_MASK32(ESF_GZ_TX_SEND_NUM_SEGS)) {\n+\t\tif (m->ol_flags & PKT_TX_TCP_SEG) {\n+\t\t\tret = sfc_ef100_tx_prepare_pkt_tso(txq, m);\n+\t\t\tif (unlikely(ret != 0)) {\n+\t\t\t\trte_errno = ret;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t} else if (m->nb_segs > EFX_MASK32(ESF_GZ_TX_SEND_NUM_SEGS)) {\n \t\t\trte_errno = EINVAL;\n \t\t\tbreak;\n \t\t}\n@@ -326,6 +378,48 @@ sfc_ef100_tx_qdesc_seg_create(rte_iova_t addr, uint16_t len,\n \t\t\tESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEG);\n }\n \n+static void\n+sfc_ef100_tx_qdesc_tso_create(const struct rte_mbuf *m,\n+\t\t\t      uint16_t nb_header_descs,\n+\t\t\t      uint16_t nb_payload_descs,\n+\t\t\t      size_t header_len, size_t payload_len,\n+\t\t\t      size_t iph_off, size_t tcph_off,\n+\t\t\t      efx_oword_t *tx_desc)\n+{\n+\tefx_oword_t tx_desc_extra_fields;\n+\t/*\n+\t * If no tunnel encapsulation is present, then the ED_INNER\n+\t * fields should be used.\n+\t */\n+\tint ed_inner_ip_id = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;\n+\n+\tEFX_POPULATE_OWORD_7(*tx_desc,\n+\t\t\tESF_GZ_TX_TSO_MSS, m->tso_segsz,\n+\t\t\tESF_GZ_TX_TSO_HDR_NUM_SEGS, nb_header_descs,\n+\t\t\tESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, nb_payload_descs,\n+\t\t\tESF_GZ_TX_TSO_ED_INNER_IP4_ID, ed_inner_ip_id,\n+\t\t\tESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1,\n+\t\t\tESF_GZ_TX_TSO_HDR_LEN_W, header_len >> 1,\n+\t\t\tESF_GZ_TX_TSO_PAYLOAD_LEN, payload_len);\n+\n+\tEFX_POPULATE_OWORD_5(tx_desc_extra_fields,\n+\t\t\t/*\n+\t\t\t * Inner offsets are required for inner IPv4 ID\n+\t\t\t * and IP length edits.\n+\t\t\t */\n+\t\t\tESF_GZ_TX_TSO_INNER_L3_OFF_W, iph_off >> 1,\n+\t\t\tESF_GZ_TX_TSO_INNER_L4_OFF_W, tcph_off >> 1,\n+\t\t\t/*\n+\t\t\t * Use outer full checksum offloads which do\n+\t\t\t * not require any extra information.\n+\t\t\t */\n+\t\t\tESF_GZ_TX_TSO_CSO_OUTER_L3, 1,\n+\t\t\tESF_GZ_TX_TSO_CSO_OUTER_L4, 1,\n+\t\t\tESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO);\n+\n+\tEFX_OR_OWORD(*tx_desc, tx_desc_extra_fields);\n+}\n+\n static inline void\n sfc_ef100_tx_qpush(struct sfc_ef100_txq *txq, unsigned int added)\n {\n@@ -351,30 +445,115 @@ sfc_ef100_tx_qpush(struct sfc_ef100_txq *txq, unsigned int added)\n static unsigned int\n sfc_ef100_tx_pkt_descs_max(const struct rte_mbuf *m)\n {\n+\tunsigned int extra_descs = 0;\n+\n /** Maximum length of an mbuf segment data */\n #define SFC_MBUF_SEG_LEN_MAX\t\tUINT16_MAX\n \tRTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);\n \n+\tif (m->ol_flags & PKT_TX_TCP_SEG) {\n+\t\t/* Tx TSO descriptor */\n+\t\textra_descs++;\n+\t\t/*\n+\t\t * Extra Tx segment descriptor may be required if header\n+\t\t * ends in the middle of segment.\n+\t\t */\n+\t\textra_descs++;\n+\t} else {\n+\t\t/*\n+\t\t * mbuf segment cannot be bigger than maximum segnment length\n+\t\t * and maximum packet length since TSO is not supported yet.\n+\t\t * Make sure that the first segment does not need fragmentation\n+\t\t * (split into many Tx descriptors).\n+\t\t */\n+\t\tRTE_BUILD_BUG_ON(SFC_EF100_TX_SEND_DESC_LEN_MAX <\n+\t\t\t\t RTE_MIN((unsigned int)EFX_MAC_PDU_MAX,\n+\t\t\t\t SFC_MBUF_SEG_LEN_MAX));\n+\t}\n+\n \t/*\n-\t * mbuf segment cannot be bigger than maximum segnment length and\n-\t * maximum packet length since TSO is not supported yet.\n-\t * Make sure that the first segment does not need fragmentation\n-\t * (split into many Tx descriptors).\n+\t * Any segment of scattered packet cannot be bigger than maximum\n+\t * segment length. Make sure that subsequent segments do not need\n+\t * fragmentation (split into many Tx descriptors).\n \t */\n-\tRTE_BUILD_BUG_ON(SFC_EF100_TX_SEND_DESC_LEN_MAX <\n-\t\tRTE_MIN((unsigned int)EFX_MAC_PDU_MAX, SFC_MBUF_SEG_LEN_MAX));\n+\tRTE_BUILD_BUG_ON(SFC_EF100_TX_SEG_DESC_LEN_MAX < SFC_MBUF_SEG_LEN_MAX);\n+\n+\treturn m->nb_segs + extra_descs;\n+}\n+\n+static struct rte_mbuf *\n+sfc_ef100_xmit_tso_pkt(struct sfc_ef100_txq * const txq,\n+\t\t       struct rte_mbuf *m, unsigned int *added)\n+{\n+\tstruct rte_mbuf *m_seg = m;\n+\tunsigned int nb_hdr_descs;\n+\tunsigned int nb_pld_descs;\n+\tunsigned int seg_split = 0;\n+\tunsigned int tso_desc_id;\n+\tunsigned int id;\n+\tsize_t iph_off;\n+\tsize_t tcph_off;\n+\tsize_t header_len;\n+\tsize_t remaining_hdr_len;\n+\n+\tiph_off = m->l2_len;\n+\ttcph_off = iph_off + m->l3_len;\n+\theader_len = tcph_off + m->l4_len;\n \n \t/*\n-\t * Any segment of scattered packet cannot be bigger than maximum\n-\t * segment length and maximum packet legnth since TSO is not\n-\t * supported yet.\n-\t * Make sure that subsequent segments do not need fragmentation (split\n-\t * into many Tx descriptors).\n+\t * Remember ID of the TX_TSO descriptor to be filled in.\n+\t * We can't fill it in right now since we need to calculate\n+\t * number of header and payload segments first and don't want\n+\t * to traverse it twice here.\n+\t */\n+\ttso_desc_id = (*added)++ & txq->ptr_mask;\n+\n+\tremaining_hdr_len = header_len;\n+\tdo {\n+\t\tid = (*added)++ & txq->ptr_mask;\n+\t\tif (rte_pktmbuf_data_len(m_seg) <= remaining_hdr_len) {\n+\t\t\t/* The segment is fully header segment */\n+\t\t\tsfc_ef100_tx_qdesc_seg_create(\n+\t\t\t\trte_mbuf_data_iova(m_seg),\n+\t\t\t\trte_pktmbuf_data_len(m_seg),\n+\t\t\t\t&txq->txq_hw_ring[id]);\n+\t\t\tremaining_hdr_len -= rte_pktmbuf_data_len(m_seg);\n+\t\t} else {\n+\t\t\t/*\n+\t\t\t * The segment must be split into header and\n+\t\t\t * payload segments\n+\t\t\t */\n+\t\t\tsfc_ef100_tx_qdesc_seg_create(\n+\t\t\t\trte_mbuf_data_iova(m_seg),\n+\t\t\t\tremaining_hdr_len,\n+\t\t\t\t&txq->txq_hw_ring[id]);\n+\t\t\tSFC_ASSERT(txq->sw_ring[id].mbuf == NULL);\n+\n+\t\t\tid = (*added)++ & txq->ptr_mask;\n+\t\t\tsfc_ef100_tx_qdesc_seg_create(\n+\t\t\t\trte_mbuf_data_iova(m_seg) + remaining_hdr_len,\n+\t\t\t\trte_pktmbuf_data_len(m_seg) - remaining_hdr_len,\n+\t\t\t\t&txq->txq_hw_ring[id]);\n+\t\t\tremaining_hdr_len = 0;\n+\t\t\tseg_split = 1;\n+\t\t}\n+\t\ttxq->sw_ring[id].mbuf = m_seg;\n+\t\tm_seg = m_seg->next;\n+\t} while (remaining_hdr_len > 0);\n+\n+\t/*\n+\t * If a segment is split into header and payload segments, added\n+\t * pointer counts it twice and we should correct it.\n \t */\n-\tRTE_BUILD_BUG_ON(SFC_EF100_TX_SEG_DESC_LEN_MAX <\n-\t\tRTE_MIN((unsigned int)EFX_MAC_PDU_MAX, SFC_MBUF_SEG_LEN_MAX));\n+\tnb_hdr_descs = ((id - tso_desc_id) & txq->ptr_mask) - seg_split;\n+\tnb_pld_descs = m->nb_segs - nb_hdr_descs + seg_split;\n+\n+\tsfc_ef100_tx_qdesc_tso_create(m, nb_hdr_descs, nb_pld_descs, header_len,\n+\t\t\t\t      rte_pktmbuf_pkt_len(m) - header_len,\n+\t\t\t\t      iph_off, tcph_off,\n+\t\t\t\t      &txq->txq_hw_ring[tso_desc_id]);\n \n-\treturn m->nb_segs;\n+\treturn m_seg;\n }\n \n static uint16_t\n@@ -428,27 +607,33 @@ sfc_ef100_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \t\t\t\tbreak;\n \t\t}\n \n-\t\tid = added++ & txq->ptr_mask;\n-\t\tsfc_ef100_tx_qdesc_send_create(m_seg, &txq->txq_hw_ring[id]);\n+\t\tif (m_seg->ol_flags & PKT_TX_TCP_SEG) {\n+\t\t\tm_seg = sfc_ef100_xmit_tso_pkt(txq, m_seg, &added);\n+\t\t} else {\n+\t\t\tid = added++ & txq->ptr_mask;\n+\t\t\tsfc_ef100_tx_qdesc_send_create(m_seg,\n+\t\t\t\t\t\t       &txq->txq_hw_ring[id]);\n \n-\t\t/*\n-\t\t * rte_pktmbuf_free() is commonly used in DPDK for\n-\t\t * recycling packets - the function checks every\n-\t\t * segment's reference counter and returns the\n-\t\t * buffer to its pool whenever possible;\n-\t\t * nevertheless, freeing mbuf segments one by one\n-\t\t * may entail some performance decline;\n-\t\t * from this point, sfc_efx_tx_reap() does the same job\n-\t\t * on its own and frees buffers in bulks (all mbufs\n-\t\t * within a bulk belong to the same pool);\n-\t\t * from this perspective, individual segment pointers\n-\t\t * must be associated with the corresponding SW\n-\t\t * descriptors independently so that only one loop\n-\t\t * is sufficient on reap to inspect all the buffers\n-\t\t */\n-\t\ttxq->sw_ring[id].mbuf = m_seg;\n+\t\t\t/*\n+\t\t\t * rte_pktmbuf_free() is commonly used in DPDK for\n+\t\t\t * recycling packets - the function checks every\n+\t\t\t * segment's reference counter and returns the\n+\t\t\t * buffer to its pool whenever possible;\n+\t\t\t * nevertheless, freeing mbuf segments one by one\n+\t\t\t * may entail some performance decline;\n+\t\t\t * from this point, sfc_efx_tx_reap() does the same job\n+\t\t\t * on its own and frees buffers in bulks (all mbufs\n+\t\t\t * within a bulk belong to the same pool);\n+\t\t\t * from this perspective, individual segment pointers\n+\t\t\t * must be associated with the corresponding SW\n+\t\t\t * descriptors independently so that only one loop\n+\t\t\t * is sufficient on reap to inspect all the buffers\n+\t\t\t */\n+\t\t\ttxq->sw_ring[id].mbuf = m_seg;\n+\t\t\tm_seg = m_seg->next;\n+\t\t}\n \n-\t\twhile ((m_seg = m_seg->next) != NULL) {\n+\t\twhile (m_seg != NULL) {\n \t\t\tRTE_BUILD_BUG_ON(SFC_MBUF_SEG_LEN_MAX >\n \t\t\t\t\t SFC_EF100_TX_SEG_DESC_LEN_MAX);\n \n@@ -457,6 +642,7 @@ sfc_ef100_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \t\t\t\t\trte_pktmbuf_data_len(m_seg),\n \t\t\t\t\t&txq->txq_hw_ring[id]);\n \t\t\ttxq->sw_ring[id].mbuf = m_seg;\n+\t\t\tm_seg = m_seg->next;\n \t\t}\n \n \t\tdma_desc_space -= (added - pkt_start);\n@@ -552,6 +738,13 @@ sfc_ef100_tx_qcreate(uint16_t port_id, uint16_t queue_id,\n \t\t\t(info->hw_index << info->vi_window_shift);\n \ttxq->evq_hw_ring = info->evq_hw_ring;\n \n+\ttxq->tso_tcp_header_offset_limit = info->tso_tcp_header_offset_limit;\n+\ttxq->tso_max_nb_header_descs = info->tso_max_nb_header_descs;\n+\ttxq->tso_max_header_len = info->tso_max_header_len;\n+\ttxq->tso_max_nb_payload_descs = info->tso_max_nb_payload_descs;\n+\ttxq->tso_max_payload_len = info->tso_max_payload_len;\n+\ttxq->tso_max_nb_outgoing_frames = info->tso_max_nb_outgoing_frames;\n+\n \tsfc_ef100_tx_debug(txq, \"TxQ doorbell is %p\", txq->doorbell);\n \n \t*dp_txqp = &txq->dp;\n@@ -690,7 +883,8 @@ struct sfc_dp_tx sfc_ef100_tx = {\n \t\t\t\t  DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |\n \t\t\t\t  DEV_TX_OFFLOAD_UDP_CKSUM |\n \t\t\t\t  DEV_TX_OFFLOAD_TCP_CKSUM |\n-\t\t\t\t  DEV_TX_OFFLOAD_MULTI_SEGS,\n+\t\t\t\t  DEV_TX_OFFLOAD_MULTI_SEGS |\n+\t\t\t\t  DEV_TX_OFFLOAD_TCP_TSO,\n \t.get_dev_info\t\t= sfc_ef100_get_dev_info,\n \t.qsize_up_rings\t\t= sfc_ef100_tx_qsize_up_rings,\n \t.qcreate\t\t= sfc_ef100_tx_qcreate,\ndiff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c\nindex d50d49ca56..7a8495efc7 100644\n--- a/drivers/net/sfc/sfc_tx.c\n+++ b/drivers/net/sfc/sfc_tx.c\n@@ -188,6 +188,17 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,\n \tinfo.vi_window_shift = encp->enc_vi_window_shift;\n \tinfo.tso_tcp_header_offset_limit =\n \t\tencp->enc_tx_tso_tcp_header_offset_limit;\n+\tinfo.tso_max_nb_header_descs =\n+\t\tRTE_MIN(encp->enc_tx_tso_max_header_ndescs,\n+\t\t\t(uint32_t)UINT16_MAX);\n+\tinfo.tso_max_header_len =\n+\t\tRTE_MIN(encp->enc_tx_tso_max_header_length,\n+\t\t\t(uint32_t)UINT16_MAX);\n+\tinfo.tso_max_nb_payload_descs =\n+\t\tRTE_MIN(encp->enc_tx_tso_max_payload_ndescs,\n+\t\t\t(uint32_t)UINT16_MAX);\n+\tinfo.tso_max_payload_len = encp->enc_tx_tso_max_payload_length;\n+\tinfo.tso_max_nb_outgoing_frames = encp->enc_tx_tso_max_nframes;\n \n \trc = sa->priv.dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,\n \t\t\t\t     &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,\n@@ -592,7 +603,8 @@ sfc_tx_start(struct sfc_adapter *sa)\n \tsfc_log_init(sa, \"txq_count = %u\", sas->txq_count);\n \n \tif (sa->tso) {\n-\t\tif (!encp->enc_fw_assisted_tso_v2_enabled) {\n+\t\tif (!encp->enc_fw_assisted_tso_v2_enabled &&\n+\t\t    !encp->enc_tso_v3_enabled) {\n \t\t\tsfc_warn(sa, \"TSO support was unable to be restored\");\n \t\t\tsa->tso = B_FALSE;\n \t\t\tsa->tso_encap = B_FALSE;\n",
    "prefixes": [
        "23/36"
    ]
}