get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/3966/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 3966,
    "url": "http://patches.dpdk.org/api/patches/3966/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1426015891-20450-4-git-send-email-vladz@cloudius-systems.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1426015891-20450-4-git-send-email-vladz@cloudius-systems.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1426015891-20450-4-git-send-email-vladz@cloudius-systems.com",
    "date": "2015-03-10T19:31:31",
    "name": "[dpdk-dev,v7,3/3] ixgbe: Add LRO support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "d44f29bf00fc1f1cfc120ea0d5d998897778617f",
    "submitter": {
        "id": 141,
        "url": "http://patches.dpdk.org/api/people/141/?format=api",
        "name": "Vladislav Zolotarov",
        "email": "vladz@cloudius-systems.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1426015891-20450-4-git-send-email-vladz@cloudius-systems.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/3966/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/3966/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 6B5809AAE;\n\tTue, 10 Mar 2015 20:31:42 +0100 (CET)",
            "from mail-wg0-f43.google.com (mail-wg0-f43.google.com\n\t[74.125.82.43]) by dpdk.org (Postfix) with ESMTP id 61D139AA2\n\tfor <dev@dpdk.org>; Tue, 10 Mar 2015 20:31:39 +0100 (CET)",
            "by wggx13 with SMTP id x13so4193627wgg.4\n\tfor <dev@dpdk.org>; Tue, 10 Mar 2015 12:31:39 -0700 (PDT)",
            "from vladz-laptop.cloudius-systems.com. ([212.143.139.214])\n\tby mx.google.com with ESMTPSA id\n\tgz3sm2957940wib.1.2015.03.10.12.31.37\n\t(version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tTue, 10 Mar 2015 12:31:38 -0700 (PDT)"
        ],
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20130820;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=3cw3OE2nR+O9MuNPv/4YopHwKP92Telk6yU8DpdnjAY=;\n\tb=UO4KCPQygzq0FRWHxyPx8msk02gU18/JNJBjdnJSAaAV2LLxkQ2fCOUaIbI+7E+lSE\n\tbIxbI3cmbQ+rRfZHLUS8yFZ107dl7h+w06x/4d5JAKwhpDrxsdLcavXWrIc/FnIVdv0b\n\tmIp4lFdM8vyemsT4Ldm7kLMK6GJvCSe6xPIwPCqiwYLCSI8+zR/dlFNd6pUhHRh70lTS\n\teD7IS23XQQsMXD+0t9ILSHrKTXib312O+FOeE9S3SkJRyTp62ngbDWl5jA0DOXOH36Y5\n\t7e9LIMI+MPKkQAw/X2HsIf2yATKRnwJxDbkpRuHrKNvKyHviaqBcFtNDY7SVNpLWEnr9\n\tJGXQ==",
        "X-Gm-Message-State": "ALoCoQnukxqi9P4p6VViyWmsQ5tPN4yWvpsbWqOladijNO35FuwuoljMTAqHF2thOnyEXzGJovov",
        "X-Received": "by 10.194.88.131 with SMTP id bg3mr72820154wjb.119.1426015899220;\n\tTue, 10 Mar 2015 12:31:39 -0700 (PDT)",
        "From": "Vlad Zolotarov <vladz@cloudius-systems.com>",
        "To": "dev@dpdk.org",
        "Date": "Tue, 10 Mar 2015 21:31:31 +0200",
        "Message-Id": "<1426015891-20450-4-git-send-email-vladz@cloudius-systems.com>",
        "X-Mailer": "git-send-email 2.1.0",
        "In-Reply-To": "<1426015891-20450-1-git-send-email-vladz@cloudius-systems.com>",
        "References": "<1426015891-20450-1-git-send-email-vladz@cloudius-systems.com>",
        "Subject": "[dpdk-dev]  [PATCH v7 3/3] ixgbe: Add LRO support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "- Only x540 and 82599 devices support LRO.\n    - Add the appropriate HW configuration.\n    - Add RSC aware rx_pkt_burst() handlers:\n       - Implemented bulk allocation and non-bulk allocation versions.\n       - Add LRO-specific fields to rte_eth_rxmode, to rte_eth_dev_data\n         and to igb_rx_queue.\n       - Use the appropriate handler when LRO is requested.\n\nSigned-off-by: Vlad Zolotarov <vladz@cloudius-systems.com>\n---\nNew in v7:\n   - Free not-yet-completed RSC aggregations in rte_eth_dev_stop() flow.\n   - Fixed rx_bulk_alloc_allowed and rx_vec_allowed initialization:\n      - Don't set them to FALSE in rte_eth_dev_stop() flow - the following\n        rte_eth_dev_start() will need them.\n      - Reset them to TRUE in rte_eth_dev_configure() and not in a probe() flow.\n        This will ensure the proper behaviour if port is re-configured.\n   - Reset the sw_ring[].mbuf entry in a bulk allocation case.\n     This is needed for ixgbe_rx_queue_release_mbufs().\n   - _recv_pkts_lro(): added the missing memory barrier before RDT update in a\n     non-bulk allocation case.\n   - Don't allow RSC when device is configured in an SR-IOV mode.\n\nNew in v5:\n   - Put the RTE_ETHDEV_HAS_LRO_SUPPORT definition at the beginning of rte_ethdev.h.\n   - Removed the \"TODO: Remove me\" comment near RTE_ETHDEV_HAS_LRO_SUPPORT.\n\nNew in v4:\n   - Define RTE_ETHDEV_HAS_LRO_SUPPORT in rte_ethdev.h instead of\n     RTE_ETHDEV_LRO_SUPPORT defined in config/common_linuxapp.\n\nNew in v2:\n   - Removed rte_eth_dev_data.lro_bulk_alloc.\n   - Fixed a few styling and spelling issues.\n\nixgbe: Fixed rx_bulk_alloc_allowed and rx_vec_allowed initialization\n\n   - Don't set them to FALSE in rte_eth_dev_stop() flow - the following\n     rte_eth_dev_start() will need them.\n   - Reset them to TRUE in rte_eth_dev_configure() and not in a probe() flow.\n     This will ensure the proper behaviour if port is re-configured.\n---\n lib/librte_ether/rte_ethdev.h       |   9 +-\n lib/librte_pmd_ixgbe/ixgbe_ethdev.c |  29 +-\n lib/librte_pmd_ixgbe/ixgbe_ethdev.h |   5 +\n lib/librte_pmd_ixgbe/ixgbe_rxtx.c   | 595 +++++++++++++++++++++++++++++++++++-\n lib/librte_pmd_ixgbe/ixgbe_rxtx.h   |   6 +\n 5 files changed, 628 insertions(+), 16 deletions(-)",
    "diff": "diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h\nindex 8db3127..44f081f 100644\n--- a/lib/librte_ether/rte_ethdev.h\n+++ b/lib/librte_ether/rte_ethdev.h\n@@ -172,6 +172,9 @@ extern \"C\" {\n \n #include <stdint.h>\n \n+/* Use this macro to check if LRO API is supported */\n+#define RTE_ETHDEV_HAS_LRO_SUPPORT\n+\n #include <rte_log.h>\n #include <rte_interrupts.h>\n #include <rte_pci.h>\n@@ -320,14 +323,15 @@ struct rte_eth_rxmode {\n \tenum rte_eth_rx_mq_mode mq_mode;\n \tuint32_t max_rx_pkt_len;  /**< Only used if jumbo_frame enabled. */\n \tuint16_t split_hdr_size;  /**< hdr buf size (header_split enabled).*/\n-\tuint8_t header_split : 1, /**< Header Split enable. */\n+\tuint16_t header_split : 1, /**< Header Split enable. */\n \t\thw_ip_checksum   : 1, /**< IP/UDP/TCP checksum offload enable. */\n \t\thw_vlan_filter   : 1, /**< VLAN filter enable. */\n \t\thw_vlan_strip    : 1, /**< VLAN strip enable. */\n \t\thw_vlan_extend   : 1, /**< Extended VLAN enable. */\n \t\tjumbo_frame      : 1, /**< Jumbo Frame Receipt enable. */\n \t\thw_strip_crc     : 1, /**< Enable CRC stripping by hardware. */\n-\t\tenable_scatter   : 1; /**< Enable scatter packets rx handler */\n+\t\tenable_scatter   : 1, /**< Enable scatter packets rx handler */\n+\t\tenable_lro       : 1; /**< Enable LRO */\n };\n \n /**\n@@ -1515,6 +1519,7 @@ struct rte_eth_dev_data {\n \tuint8_t port_id;           /**< Device [external] port identifier. */\n \tuint8_t promiscuous   : 1, /**< RX promiscuous mode ON(1) / OFF(0). */\n \t\tscattered_rx : 1,  /**< RX of scattered packets is ON(1) / OFF(0) */\n+\t\tlro          : 1,  /**< RX LRO is ON(1) / OFF(0) */\n \t\tall_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */\n \t\tdev_started : 1;   /**< Device state: STARTED(1) / STOPPED(0). */\n };\ndiff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c\nindex 9d3de1a..f0a3100 100644\n--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c\n+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c\n@@ -772,13 +772,6 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,\n \thw->hw_addr = (void *)pci_dev->mem_resource[0].addr;\n \thw->allow_unsupported_sfp = 1;\n \n-\t/*\n-\t * Initialize to TRUE. If any of Rx queues doesn't meet the bulk\n-\t * allocation or vector Rx preconditions we will reset it.\n-\t */\n-\thw->rx_bulk_alloc_allowed = true;\n-\thw->rx_vec_allowed = true;\n-\n \t/* Initialize the shared code (base driver) */\n #ifdef RTE_NIC_BYPASS\n \tdiag = ixgbe_bypass_init_shared_code(hw);\n@@ -1441,12 +1434,21 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)\n {\n \tstruct ixgbe_interrupt *intr =\n \t\tIXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);\n+\tstruct ixgbe_hw *hw =\n+\t\tIXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n \tPMD_INIT_FUNC_TRACE();\n \n \t/* set flag to update link status after init */\n \tintr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;\n \n+\t/*\n+\t * Initialize to TRUE. If any of Rx queues doesn't meet the bulk\n+\t * allocation or vector Rx preconditions we will reset it.\n+\t */\n+\thw->rx_bulk_alloc_allowed = true;\n+\thw->rx_vec_allowed = true;\n+\n \treturn 0;\n }\n \n@@ -1648,8 +1650,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)\n \n \t/* Clear stored conf */\n \tdev->data->scattered_rx = 0;\n-\thw->rx_bulk_alloc_allowed = false;\n-\thw->rx_vec_allowed = false;\n+\tdev->data->lro = 0;\n \n \t/* Clear recorded link status */\n \tmemset(&link, 0, sizeof(link));\n@@ -2018,6 +2019,16 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \t\tDEV_RX_OFFLOAD_IPV4_CKSUM |\n \t\tDEV_RX_OFFLOAD_UDP_CKSUM  |\n \t\tDEV_RX_OFFLOAD_TCP_CKSUM;\n+\n+\t/*\n+\t * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV\n+\t * mode.\n+\t */\n+\tif ((hw->mac.type == ixgbe_mac_82599EB ||\n+\t     hw->mac.type == ixgbe_mac_X540) &&\n+\t    !RTE_ETH_DEV_SRIOV(dev).active)\n+\t\tdev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;\n+\n \tdev_info->tx_offload_capa =\n \t\tDEV_TX_OFFLOAD_VLAN_INSERT |\n \t\tDEV_TX_OFFLOAD_IPV4_CKSUM  |\ndiff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h\nindex a549f5c..e206584 100644\n--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h\n+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h\n@@ -349,6 +349,11 @@ uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,\n uint16_t ixgbe_recv_scattered_pkts(void *rx_queue,\n \t\tstruct rte_mbuf **rx_pkts, uint16_t nb_pkts);\n \n+uint16_t ixgbe_recv_pkts_lro(void *rx_queue,\n+\t\tstruct rte_mbuf **rx_pkts, uint16_t nb_pkts);\n+uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,\n+\t\tstruct rte_mbuf **rx_pkts, uint16_t nb_pkts);\n+\n uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\tuint16_t nb_pkts);\n \ndiff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c\nindex 58e619b..1154fd5 100644\n--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c\n+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c\n@@ -1366,6 +1366,15 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n }\n \n /**\n+ * Detect an RSC descriptor.\n+ */\n+static inline uint32_t ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)\n+{\n+\treturn (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &\n+\t\tIXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;\n+}\n+\n+/**\n  * Initialize the first mbuf of the returned packet:\n  *    - RX port identifier,\n  *    - hardware offload data, if any:\n@@ -1410,6 +1419,294 @@ static inline void ixgbe_fill_cluster_head_buf(\n \t}\n }\n \n+/**\n+ * Bulk receive handler for and LRO case.\n+ *\n+ * @rx_queue Rx queue handle\n+ * @rx_pkts table of received packets\n+ * @nb_pkts size of rx_pkts table\n+ * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling\n+ *\n+ * Handles the Rx HW ring completions when RSC feature is configured. Uses an\n+ * additional ring of igb_rsc_entry's that will hold the relevant RSC info.\n+ *\n+ * We use the same logic as in Lunux and in FreeBSD ixgbe drivers:\n+ * 1) When non-EOP RSC completion arrives:\n+ *    a) Update the HEAD of the current RSC aggregation cluster with the new\n+ *       segment's data length.\n+ *    b) Set the \"next\" pointer of the current segment to point to the segment\n+ *       at the NEXTP index.\n+ *    c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry\n+ *       in the sw_rsc_ring.\n+ * 2) When EOP arrives we just update the cluster's total length and offload\n+ *    flags and deliver the cluster up to the upper layers. In our case - put it\n+ *    in the rx_pkts table.\n+ *\n+ * Returns the number of received packets/clusters (according to the \"bulk\n+ * receive\" interface).\n+ */\n+static inline uint16_t\n+_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,\n+\t       bool bulk_alloc)\n+{\n+\tstruct igb_rx_queue *rxq = rx_queue;\n+\tvolatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;\n+\tstruct igb_rx_entry *sw_ring = rxq->sw_ring;\n+\tstruct igb_rsc_entry *sw_rsc_ring = rxq->sw_rsc_ring;\n+\tuint16_t rx_id = rxq->rx_tail;\n+\tuint16_t nb_rx = 0;\n+\tuint16_t nb_hold = rxq->nb_rx_hold;\n+\tuint16_t prev_id = rxq->rx_tail;\n+\n+\twhile (nb_rx < nb_pkts) {\n+\t\tbool eop;\n+\t\tstruct igb_rx_entry *rxe;\n+\t\tstruct igb_rsc_entry *rsc_entry;\n+\t\tstruct igb_rsc_entry *next_rsc_entry;\n+\t\tstruct igb_rx_entry *next_rxe;\n+\t\tstruct rte_mbuf *first_seg;\n+\t\tstruct rte_mbuf *rxm;\n+\t\tstruct rte_mbuf *nmb;\n+\t\tunion ixgbe_adv_rx_desc rxd;\n+\t\tuint16_t data_len;\n+\t\tuint16_t next_id;\n+\t\tvolatile union ixgbe_adv_rx_desc *rxdp;\n+\t\tuint32_t staterr;\n+\n+next_desc:\n+\t\t/*\n+\t\t * The code in this whole file uses the volatile pointer to\n+\t\t * ensure the read ordering of the status and the rest of the\n+\t\t * descriptor fields (on the compiler level only!!!). This is so\n+\t\t * UGLY - why not to just use the compiler barrier instead? DPDK\n+\t\t * even has the rte_compiler_barrier() for that.\n+\t\t *\n+\t\t * But most importantly this is just wrong because this doesn't\n+\t\t * ensure memory ordering in a general case at all. For\n+\t\t * instance, DPDK is supposed to work on Power CPUs where\n+\t\t * compiler barrier may just not be enough!\n+\t\t *\n+\t\t * I tried to write only this function properly to have a\n+\t\t * starting point (as a part of an LRO/RSC series) but the\n+\t\t * compiler cursed at me when I tried to cast away the\n+\t\t * \"volatile\" from rx_ring (yes, it's volatile too!!!). So, I'm\n+\t\t * keeping it the way it is for now.\n+\t\t *\n+\t\t * The code in this file is broken in so many other places and\n+\t\t * will just not work on a big endian CPU anyway therefore the\n+\t\t * lines below will have to be revisited together with the rest\n+\t\t * of the ixgbe PMD.\n+\t\t *\n+\t\t * TODO:\n+\t\t *    - Get rid of \"volatile\" crap and let the compiler do its\n+\t\t *      job.\n+\t\t *    - Use the proper memory barrier (rte_rmb()) to ensure the\n+\t\t *      memory ordering below.\n+\t\t */\n+\t\trxdp = &rx_ring[rx_id];\n+\t\tstaterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);\n+\n+\t\tif (!(staterr & IXGBE_RXDADV_STAT_DD))\n+\t\t\tbreak;\n+\n+\t\trxd = *rxdp;\n+\n+\t\tPMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_id=%u \"\n+\t\t\t\t  \"staterr=0x%x data_len=%u\",\n+\t\t\t   rxq->port_id, rxq->queue_id, rx_id, staterr,\n+\t\t\t   rte_le_to_cpu_16(rxd.wb.upper.length));\n+\n+\t\tif (!bulk_alloc) {\n+\t\t\tnmb = rte_rxmbuf_alloc(rxq->mb_pool);\n+\t\t\tif (nmb == NULL) {\n+\t\t\t\tPMD_RX_LOG(DEBUG, \"RX mbuf alloc failed \"\n+\t\t\t\t\t\t  \"port_id=%u queue_id=%u\",\n+\t\t\t\t\t   rxq->port_id, rxq->queue_id);\n+\n+\t\t\t\trte_eth_devices[rxq->port_id].data->\n+\t\t\t\t\t\t\trx_mbuf_alloc_failed++;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t} else if (nb_hold > rxq->rx_free_thresh) {\n+\t\t\tuint16_t next_rdt = rxq->rx_free_trigger;\n+\n+\t\t\tif (!ixgbe_rx_alloc_bufs(rxq, false)) {\n+\t\t\t\trte_wmb();\n+\t\t\t\tIXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,\n+\t\t\t\t\t\t    next_rdt);\n+\t\t\t\tnb_hold -= rxq->rx_free_thresh;\n+\t\t\t} else {\n+\t\t\t\tPMD_RX_LOG(DEBUG, \"RX bulk alloc failed \"\n+\t\t\t\t\t\t  \"port_id=%u queue_id=%u\",\n+\t\t\t\t\t   rxq->port_id, rxq->queue_id);\n+\n+\t\t\t\trte_eth_devices[rxq->port_id].data->\n+\t\t\t\t\t\t\trx_mbuf_alloc_failed++;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\n+\t\tnb_hold++;\n+\t\trxe = &sw_ring[rx_id];\n+\t\teop = staterr & IXGBE_RXDADV_STAT_EOP;\n+\n+\t\tnext_id = rx_id + 1;\n+\t\tif (next_id == rxq->nb_rx_desc)\n+\t\t\tnext_id = 0;\n+\n+\t\t/* Prefetch next mbuf while processing current one. */\n+\t\trte_ixgbe_prefetch(sw_ring[next_id].mbuf);\n+\n+\t\t/*\n+\t\t * When next RX descriptor is on a cache-line boundary,\n+\t\t * prefetch the next 4 RX descriptors and the next 4 pointers\n+\t\t * to mbufs.\n+\t\t */\n+\t\tif ((next_id & 0x3) == 0) {\n+\t\t\trte_ixgbe_prefetch(&rx_ring[next_id]);\n+\t\t\trte_ixgbe_prefetch(&sw_ring[next_id]);\n+\t\t}\n+\n+\t\trxm = rxe->mbuf;\n+\n+\t\tif (!bulk_alloc) {\n+\t\t\t__le64 dma =\n+\t\t\t  rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));\n+\t\t\t/*\n+\t\t\t * Update RX descriptor with the physical address of the\n+\t\t\t * new data buffer of the new allocated mbuf.\n+\t\t\t */\n+\t\t\trxe->mbuf = nmb;\n+\n+\t\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\t\trxdp->read.hdr_addr = dma;\n+\t\t\trxdp->read.pkt_addr = dma;\n+\t\t} else\n+\t\t\trxe->mbuf = NULL;\n+\n+\t\t/*\n+\t\t * Set data length & data buffer address of mbuf.\n+\t\t */\n+\t\tdata_len = rte_le_to_cpu_16(rxd.wb.upper.length);\n+\t\trxm->data_len = data_len;\n+\n+\t\tif (!eop) {\n+\t\t\tuint16_t nextp_id;\n+\t\t\t/*\n+\t\t\t * Get next descriptor index:\n+\t\t\t *  - For RSC it's in the NEXTP field.\n+\t\t\t *  - For a scattered packet - it's just a following\n+\t\t\t *    descriptor.\n+\t\t\t */\n+\t\t\tif (ixgbe_rsc_count(&rxd))\n+\t\t\t\tnextp_id =\n+\t\t\t\t\t(staterr & IXGBE_RXDADV_NEXTP_MASK) >>\n+\t\t\t\t\t\t       IXGBE_RXDADV_NEXTP_SHIFT;\n+\t\t\telse\n+\t\t\t\tnextp_id = next_id;\n+\n+\t\t\tnext_rsc_entry = &sw_rsc_ring[nextp_id];\n+\t\t\tnext_rxe = &sw_ring[nextp_id];\n+\t\t\trte_ixgbe_prefetch(next_rxe);\n+\t\t}\n+\n+\t\trsc_entry = &sw_rsc_ring[rx_id];\n+\t\tfirst_seg = rsc_entry->fbuf;\n+\t\trsc_entry->fbuf = NULL;\n+\n+\t\t/*\n+\t\t * If this is the first buffer of the received packet,\n+\t\t * set the pointer to the first mbuf of the packet and\n+\t\t * initialize its context.\n+\t\t * Otherwise, update the total length and the number of segments\n+\t\t * of the current scattered packet, and update the pointer to\n+\t\t * the last mbuf of the current packet.\n+\t\t */\n+\t\tif (first_seg == NULL) {\n+\t\t\tfirst_seg = rxm;\n+\t\t\tfirst_seg->pkt_len = data_len;\n+\t\t\tfirst_seg->nb_segs = 1;\n+\t\t} else {\n+\t\t\tfirst_seg->pkt_len += data_len;\n+\t\t\tfirst_seg->nb_segs++;\n+\t\t}\n+\n+\t\tprev_id = rx_id;\n+\t\trx_id = next_id;\n+\n+\t\t/*\n+\t\t * If this is not the last buffer of the received packet, update\n+\t\t * the pointer to the first mbuf at the NEXTP entry in the\n+\t\t * sw_rsc_ring and continue to parse the RX ring.\n+\t\t */\n+\t\tif (!eop) {\n+\t\t\trxm->next = next_rxe->mbuf;\n+\t\t\tnext_rsc_entry->fbuf = first_seg;\n+\t\t\tgoto next_desc;\n+\t\t}\n+\n+\t\t/*\n+\t\t * This is the last buffer of the received packet - return\n+\t\t * the current cluster to the user.\n+\t\t */\n+\t\trxm->next = NULL;\n+\n+\t\t/* Initialize the first mbuf of the returned packet */\n+\t\tixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,\n+\t\t\t\t\t    staterr);\n+\n+\t\t/* Prefetch data of first segment, if configured to do so. */\n+\t\trte_packet_prefetch((char *)first_seg->buf_addr +\n+\t\t\tfirst_seg->data_off);\n+\n+\t\t/*\n+\t\t * Store the mbuf address into the next entry of the array\n+\t\t * of returned packets.\n+\t\t */\n+\t\trx_pkts[nb_rx++] = first_seg;\n+\t}\n+\n+\t/*\n+\t * Record index of the next RX descriptor to probe.\n+\t */\n+\trxq->rx_tail = rx_id;\n+\n+\t/*\n+\t * If the number of free RX descriptors is greater than the RX free\n+\t * threshold of the queue, advance the Receive Descriptor Tail (RDT)\n+\t * register.\n+\t * Update the RDT with the value of the last processed RX descriptor\n+\t * minus 1, to guarantee that the RDT register is never equal to the\n+\t * RDH register, which creates a \"full\" ring situtation from the\n+\t * hardware point of view...\n+\t */\n+\tif (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {\n+\t\tPMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_tail=%u \"\n+\t\t\t   \"nb_hold=%u nb_rx=%u\",\n+\t\t\t   rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);\n+\n+\t\trte_wmb();\n+\t\tIXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);\n+\t\tnb_hold = 0;\n+\t}\n+\n+\trxq->nb_rx_hold = nb_hold;\n+\treturn nb_rx;\n+}\n+\n+uint16_t\n+ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n+{\n+\treturn _recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);\n+}\n+\n+uint16_t\n+ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t\t       uint16_t nb_pkts)\n+{\n+\treturn _recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);\n+}\n+\n uint16_t\n ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t\t  uint16_t nb_pkts)\n@@ -1993,6 +2290,29 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \treturn (0);\n }\n \n+/**\n+ * Free the not-yet-completed RSC cluster from the sw_rsc_ring\n+ *\n+ * The \"next\" pointer of the last segment of (not-yet-completed) RSC clusters\n+ * in the sw_rsc_ring is not set to NULL but rather points to the next\n+ * mbuf of this RSC aggregation (that has not been completed yet and still\n+ * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we\n+ * will just free first \"nb_segs\" segments of the cluster explicitly by calling\n+ * an rte_pktmbuf_free_seg().\n+ *\n+ * @param m RSC cluster head\n+ */\n+static void _free_rsc_cluster(struct rte_mbuf *m)\n+{\n+\tuint8_t i, nb_segs = m->nb_segs;\n+\tstruct rte_mbuf *next_seg = m->next;\n+\n+\tfor (i = 0; i < nb_segs; i++, next_seg = next_seg->next) {\n+\t\trte_pktmbuf_free_seg(m);\n+\t\tm = next_seg;\n+\t}\n+}\n+\n static void\n ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)\n {\n@@ -2016,6 +2336,13 @@ ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)\n \t\t}\n #endif\n \t}\n+\n+\tif (rxq->sw_rsc_ring)\n+\t\tfor (i = 0; i < rxq->nb_rx_desc; i++)\n+\t\t\tif (rxq->sw_rsc_ring[i].fbuf) {\n+\t\t\t\t_free_rsc_cluster(rxq->sw_rsc_ring[i].fbuf);\n+\t\t\t\trxq->sw_rsc_ring[i].fbuf = NULL;\n+\t\t\t}\n }\n \n static void\n@@ -2024,6 +2351,7 @@ ixgbe_rx_queue_release(struct igb_rx_queue *rxq)\n \tif (rxq != NULL) {\n \t\tixgbe_rx_queue_release_mbufs(rxq);\n \t\trte_free(rxq->sw_ring);\n+\t\trte_free(rxq->sw_rsc_ring);\n \t\trte_free(rxq);\n \t}\n }\n@@ -2146,6 +2474,7 @@ ixgbe_reset_rx_queue(struct ixgbe_hw *hw, struct igb_rx_queue *rxq)\n \trxq->nb_rx_hold = 0;\n \trxq->pkt_first_seg = NULL;\n \trxq->pkt_last_seg = NULL;\n+\trxq->rsc_en = 0;\n }\n \n int\n@@ -2160,6 +2489,14 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \tstruct igb_rx_queue *rxq;\n \tstruct ixgbe_hw     *hw;\n \tuint16_t len;\n+\tstruct rte_eth_dev_info dev_info = { 0 };\n+\tstruct rte_eth_rxmode *dev_rx_mode = &dev->data->dev_conf.rxmode;\n+\tbool rsc_requested = false;\n+\n+\tdev->dev_ops->dev_infos_get(dev, &dev_info);\n+\tif ((dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) &&\n+\t    dev_rx_mode->enable_lro)\n+\t\trsc_requested = true;\n \n \tPMD_INIT_FUNC_TRACE();\n \thw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n@@ -2265,12 +2602,28 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \trxq->sw_ring = rte_zmalloc_socket(\"rxq->sw_ring\",\n \t\t\t\t\t  sizeof(struct igb_rx_entry) * len,\n \t\t\t\t\t  RTE_CACHE_LINE_SIZE, socket_id);\n-\tif (rxq->sw_ring == NULL) {\n+\tif (!rxq->sw_ring) {\n \t\tixgbe_rx_queue_release(rxq);\n \t\treturn (-ENOMEM);\n \t}\n-\tPMD_INIT_LOG(DEBUG, \"sw_ring=%p hw_ring=%p dma_addr=0x%\"PRIx64,\n-\t\t     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);\n+\n+\tif (rsc_requested) {\n+\t\trxq->sw_rsc_ring =\n+\t\t\trte_zmalloc_socket(\"rxq->sw_rsc_ring\",\n+\t\t\t\t\t   sizeof(struct igb_rsc_entry) * len,\n+\t\t\t\t\t   RTE_CACHE_LINE_SIZE, socket_id);\n+\t\tif (!rxq->sw_rsc_ring) {\n+\t\t\tixgbe_rx_queue_release(rxq);\n+\t\t\treturn (-ENOMEM);\n+\t\t}\n+\t} else {\n+\t\trxq->sw_rsc_ring = NULL;\n+\t}\n+\n+\tPMD_INIT_LOG(DEBUG, \"sw_ring=%p sw_rsc_ring=%p hw_ring=%p \"\n+\t\t\t    \"dma_addr=0x%\"PRIx64,\n+\t\t     rxq->sw_ring, rxq->sw_rsc_ring, rxq->rx_ring,\n+\t\t     rxq->rx_ring_phys_addr);\n \n \tif (!rte_is_power_of_2(nb_desc)) {\n \t\tPMD_INIT_LOG(DEBUG, \"queue[%d] doesn't meet Vector Rx \"\n@@ -3515,6 +3868,84 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)\n \treturn 0;\n }\n \n+/**\n+ * get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF\n+ *\n+ * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the\n+ * spec rev. 3.0 chapter 8.2.3.8.13.\n+ *\n+ * @pool Memory pool of the Rx queue\n+ */\n+static inline uint32_t get_rscctl_maxdesc(struct rte_mempool *pool)\n+{\n+\tstruct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);\n+\n+\t/* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */\n+\tuint16_t maxdesc =\n+\t\t65535 / (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);\n+\n+\tif (maxdesc >= 16)\n+\t\treturn IXGBE_RSCCTL_MAXDESC_16;\n+\telse if (maxdesc >= 8)\n+\t\treturn IXGBE_RSCCTL_MAXDESC_8;\n+\telse if (maxdesc >= 4)\n+\t\treturn IXGBE_RSCCTL_MAXDESC_4;\n+\telse\n+\t\treturn IXGBE_RSCCTL_MAXDESC_1;\n+}\n+\n+/* (Taken from FreeBSD tree)\n+** Setup the correct IVAR register for a particular MSIX interrupt\n+**   (yes this is all very magic and confusing :)\n+**  - entry is the register array entry\n+**  - vector is the MSIX vector for this queue\n+**  - type is RX/TX/MISC\n+*/\n+static void\n+ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)\n+{\n+\tstruct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tu32 ivar, index;\n+\n+\tvector |= IXGBE_IVAR_ALLOC_VAL;\n+\n+\tswitch (hw->mac.type) {\n+\n+\tcase ixgbe_mac_82598EB:\n+\t\tif (type == -1)\n+\t\t\tentry = IXGBE_IVAR_OTHER_CAUSES_INDEX;\n+\t\telse\n+\t\t\tentry += (type * 64);\n+\t\tindex = (entry >> 2) & 0x1F;\n+\t\tivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));\n+\t\tivar &= ~(0xFF << (8 * (entry & 0x3)));\n+\t\tivar |= (vector << (8 * (entry & 0x3)));\n+\t\tIXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);\n+\t\tbreak;\n+\n+\tcase ixgbe_mac_82599EB:\n+\tcase ixgbe_mac_X540:\n+\t\tif (type == -1) { /* MISC IVAR */\n+\t\t\tindex = (entry & 1) * 8;\n+\t\t\tivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);\n+\t\t\tivar &= ~(0xFF << index);\n+\t\t\tivar |= (vector << index);\n+\t\t\tIXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);\n+\t\t} else {\t/* RX/TX IVARS */\n+\t\t\tindex = (16 * (entry & 1)) + (8 * type);\n+\t\t\tivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));\n+\t\t\tivar &= ~(0xFF << index);\n+\t\t\tivar |= (vector << index);\n+\t\t\tIXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);\n+\t\t}\n+\n+\t\tbreak;\n+\n+\tdefault:\n+\t\tbreak;\n+\t}\n+}\n+\n void set_rx_function(struct rte_eth_dev *dev)\n {\n \tstruct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n@@ -3565,6 +3996,25 @@ void set_rx_function(struct rte_eth_dev *dev)\n \t\t\tdev->rx_pkt_burst = ixgbe_recv_scattered_pkts;\n \t\t}\n \t}\n+\n+\t/*\n+\t * Initialize the appropriate LRO callback.\n+\t *\n+\t * If all queues satisfy the bulk allocation preconditions\n+\t * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.\n+\t * Otherwise use a single allocation version.\n+\t */\n+\tif (dev->data->lro) {\n+\t\tif (hw->rx_bulk_alloc_allowed) {\n+\t\t\tPMD_INIT_LOG(INFO, \"LRO is requested. Using a bulk \"\n+\t\t\t\t\t   \"allocation version\");\n+\t\t\tdev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;\n+\t\t} else {\n+\t\t\tPMD_INIT_LOG(INFO, \"LRO is requested. Using a single \"\n+\t\t\t\t\t   \"allocation version\");\n+\t\t\tdev->rx_pkt_burst = ixgbe_recv_pkts_lro;\n+\t\t}\n+\t}\n }\n \n /*\n@@ -3583,10 +4033,26 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)\n \tuint32_t maxfrs;\n \tuint32_t srrctl;\n \tuint32_t rdrxctl;\n+\tuint32_t rscctl;\n+\tuint32_t psrtype;\n+\tuint32_t rfctl;\n \tuint32_t rxcsum;\n \tuint16_t buf_size;\n \tuint16_t i;\n \tstruct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;\n+\tstruct rte_eth_dev_info dev_info = { 0 };\n+\tbool rsc_capable = false;\n+\n+\t/* Sanity check */\n+\tdev->dev_ops->dev_infos_get(dev, &dev_info);\n+\tif (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)\n+\t\trsc_capable = true;\n+\n+\tif (!rsc_capable && rx_conf->enable_lro) {\n+\t\tPMD_INIT_LOG(CRIT, \"LRO is requested on HW that doesn't \"\n+\t\t\t\t   \"support it\");\n+\t\treturn -EINVAL;\n+\t}\n \n \tPMD_INIT_FUNC_TRACE();\n \thw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n@@ -3606,13 +4072,44 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)\n \tIXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);\n \n \t/*\n+\t * RFCTL configuration\n+\t *\n+\t * Since NFS packets coalescing is not supported - clear RFCTL.NFSW_DIS\n+\t * and RFCTL.NFSR_DIS when RSC is enabled.\n+\t */\n+\tif (rsc_capable) {\n+\t\trfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);\n+\t\tif (rx_conf->enable_lro) {\n+\t\t\trfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |\n+\t\t\t\t   IXGBE_RFCTL_NFSR_DIS);\n+\t\t} else {\n+\t\t\trfctl |= IXGBE_RFCTL_RSC_DIS;\n+\t\t}\n+\n+\t\tIXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);\n+\t}\n+\n+\n+\t/*\n \t * Configure CRC stripping, if any.\n \t */\n \thlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);\n \tif (rx_conf->hw_strip_crc)\n \t\thlreg0 |= IXGBE_HLREG0_RXCRCSTRP;\n-\telse\n+\telse {\n \t\thlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;\n+\t\tif (rx_conf->enable_lro) {\n+\t\t\t/*\n+\t\t\t * According to chapter of 4.6.7.2.1 of the Spec Rev.\n+\t\t\t * 3.0 RSC configuration requires HW CRC stripping being\n+\t\t\t * enabled. If user requested both HW CRC stripping off\n+\t\t\t * and RSC on - return an error.\n+\t\t\t */\n+\t\t\tPMD_INIT_LOG(CRIT, \"LRO can't be enabled when HW CRC \"\n+\t\t\t\t\t    \"is disabled\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n \n \t/*\n \t * Configure jumbo frame support, if any.\n@@ -3664,9 +4161,18 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)\n \t\t * Configure Header Split\n \t\t */\n \t\tif (rx_conf->header_split) {\n+\t\t\t/*\n+\t\t\t * Print a warning if split_hdr_size is less\n+\t\t\t * than 128 bytes when RSC is requested.\n+\t\t\t */\n+\t\t\tif (rx_conf->enable_lro &&\n+\t\t\t    rx_conf->split_hdr_size < 128)\n+\t\t\t\tPMD_INIT_LOG(INFO, \"split_hdr_size less than \"\n+\t\t\t\t\t\t   \"128 bytes (%d)!\",\n+\t\t\t\t\t     rx_conf->split_hdr_size);\n+\n \t\t\tif (hw->mac.type == ixgbe_mac_82599EB) {\n \t\t\t\t/* Must setup the PSRTYPE register */\n-\t\t\t\tuint32_t psrtype;\n \t\t\t\tpsrtype = IXGBE_PSRTYPE_TCPHDR |\n \t\t\t\t\tIXGBE_PSRTYPE_UDPHDR   |\n \t\t\t\t\tIXGBE_PSRTYPE_IPV4HDR  |\n@@ -3679,7 +4185,20 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)\n \t\t\tsrrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;\n \t\t} else\n #endif\n+\t\t{\n \t\t\tsrrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;\n+\t\t\t/*\n+\t\t\t * Following the 4.6.7.2.1 chapter of the 82599/x540\n+\t\t\t * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER\n+\t\t\t * should be configured even if header split is not\n+\t\t\t * enabled. In the later case we will configure it 128\n+\t\t\t * bytes following the recommendation in the spec.\n+\t\t\t */\n+\t\t\tif (rx_conf->enable_lro)\n+\t\t\t\tsrrctl |=\n+\t\t\t\t     ((128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &\n+\t\t\t\t\t\t    IXGBE_SRRCTL_BSIZEHDR_MASK);\n+\t\t}\n \n \t\t/* Set if packets are dropped when no descriptors available */\n \t\tif (rxq->drop_en)\n@@ -3696,6 +4215,13 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)\n \t\t\t\t       RTE_PKTMBUF_HEADROOM);\n \t\tsrrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &\n \t\t\t   IXGBE_SRRCTL_BSIZEPKT_MASK);\n+\n+\t\t/*\n+\t\t * TODO: Consider setting the Receive Descriptor Minimum\n+\t\t * Threshold Size for and RSC case. This is not an obviously\n+\t\t * beneficiary option but the one worth considering...\n+\t\t */\n+\n \t\tIXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);\n \n \t\tbuf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<\n@@ -3705,11 +4231,57 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)\n \t\tif (dev->data->dev_conf.rxmode.max_rx_pkt_len +\n \t\t\t\t\t    2 * IXGBE_VLAN_TAG_SIZE > buf_size)\n \t\t\tdev->data->scattered_rx = 1;\n+\n+\t\t/* RSC per-queue configuration */\n+\t\tif (rx_conf->enable_lro) {\n+\t\t\tuint32_t eitr;\n+\n+\t\t\trscctl =\n+\t\t\t\tIXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));\n+\t\t\tpsrtype =\n+\t\t\t\tIXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));\n+\t\t\teitr = IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));\n+\n+\t\t\trscctl |= IXGBE_RSCCTL_RSCEN;\n+\t\t\trscctl |= get_rscctl_maxdesc(rxq->mb_pool);\n+\t\t\tpsrtype |= IXGBE_PSRTYPE_TCPHDR;\n+\n+\t\t\t/*\n+\t\t\t * RSC: Set ITR interval corresponding to 2K ints/s.\n+\t\t\t *\n+\t\t\t * Full-sized RSC aggregations for a 10Gb/s link will\n+\t\t\t * arrive at about 20K aggregation/s rate.\n+\t\t\t *\n+\t\t\t * 2K inst/s rate will make only 10% of the\n+\t\t\t * aggregations to be closed due to the interrupt timer\n+\t\t\t * expiration for a streaming at wire-speed case.\n+\t\t\t *\n+\t\t\t * For a sparse streaming case this setting will yield\n+\t\t\t * at most 500us latency for a single RSC aggregation.\n+\t\t\t */\n+\t\t\teitr   |= (2000 | IXGBE_EITR_CNT_WDIS);\n+\n+\t\t\tIXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);\n+\t\t\tIXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx),\n+\t\t\t\t\t\t\t\t       psrtype);\n+\t\t\tIXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);\n+\n+\t\t\t/*\n+\t\t\t * RSC requires the mapping of the queue to the\n+\t\t\t * interrupt vector.\n+\t\t\t */\n+\t\t\tixgbe_set_ivar(dev, rxq->reg_idx, i, 0);\n+\n+\t\t\trxq->rsc_en = 1;\n+\t\t}\n \t}\n \n \tif (rx_conf->enable_scatter)\n \t\tdev->data->scattered_rx = 1;\n \n+\tif (rx_conf->enable_lro)\n+\t\tdev->data->lro = 1;\n+\n \tset_rx_function(dev);\n \n \t/*\n@@ -3742,6 +4314,19 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)\n \t\tIXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);\n \t}\n \n+\t/* Finalize RSC configuration  */\n+\tif (rx_conf->enable_lro) {\n+\t\t/*\n+\t\t * Follow the instructions in the 4.6.7.2.1 of the Spec Rev. 3.0\n+\t\t */\n+\t\trdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);\n+\t\trdrxctl |= IXGBE_RDRXCTL_RSCACKC;\n+\t\tIXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);\n+\n+\t\tPMD_INIT_LOG(INFO, \"enabling LRO mode\");\n+\t}\n+\n+\n \treturn 0;\n }\n \ndiff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h\nindex bbe5ff3..389173f 100644\n--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h\n+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h\n@@ -79,6 +79,10 @@ struct igb_rx_entry {\n \tstruct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */\n };\n \n+struct igb_rsc_entry {\n+\tstruct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */\n+};\n+\n /**\n  * Structure associated with each descriptor of the TX ring of a TX queue.\n  */\n@@ -105,6 +109,7 @@ struct igb_rx_queue {\n \tvolatile uint32_t   *rdt_reg_addr; /**< RDT register address. */\n \tvolatile uint32_t   *rdh_reg_addr; /**< RDH register address. */\n \tstruct igb_rx_entry *sw_ring; /**< address of RX software ring. */\n+\tstruct igb_rsc_entry *sw_rsc_ring; /**< address of RSC software ring. */\n \tstruct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */\n \tstruct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */\n \tuint64_t            mbuf_initializer; /**< value to init mbufs */\n@@ -126,6 +131,7 @@ struct igb_rx_queue {\n \tuint8_t             port_id;  /**< Device port identifier. */\n \tuint8_t             crc_len;  /**< 0 if CRC stripped, 4 otherwise. */\n \tuint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */\n+\tuint8_t             rsc_en;   /**< If not 0, RSC is enabled. */\n \tuint8_t             rx_deferred_start; /**< not in global dev start. */\n #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC\n \t/** need to alloc dummy mbuf, for wraparound when scanning hw ring */\n",
    "prefixes": [
        "dpdk-dev",
        "v7",
        "3/3"
    ]
}