get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/117612/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 117612,
    "url": "http://patches.dpdk.org/api/patches/117612/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20221007174336.54354-33-andrew.boyer@amd.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221007174336.54354-33-andrew.boyer@amd.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221007174336.54354-33-andrew.boyer@amd.com",
    "date": "2022-10-07T17:43:33",
    "name": "[32/35] net/ionic: add optimized handlers for non-scattered Rx/Tx",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "77457e87463b7f66788383898e0b2f07b5037927",
    "submitter": {
        "id": 2861,
        "url": "http://patches.dpdk.org/api/people/2861/?format=api",
        "name": "Andrew Boyer",
        "email": "Andrew.Boyer@amd.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20221007174336.54354-33-andrew.boyer@amd.com/mbox/",
    "series": [
        {
            "id": 25037,
            "url": "http://patches.dpdk.org/api/series/25037/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=25037",
            "date": "2022-10-07T17:43:01",
            "name": "net/ionic: updates for 22.11 release",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/25037/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/117612/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/117612/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C99CAA04FD;\n\tFri,  7 Oct 2022 19:48:00 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 5067442C15;\n\tFri,  7 Oct 2022 19:45:15 +0200 (CEST)",
            "from NAM12-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam12on2046.outbound.protection.outlook.com [40.107.244.46])\n by mails.dpdk.org (Postfix) with ESMTP id 79B2042B76\n for <dev@dpdk.org>; Fri,  7 Oct 2022 19:45:11 +0200 (CEST)",
            "from MW4PR03CA0359.namprd03.prod.outlook.com (2603:10b6:303:dc::34)\n by DM6PR12MB4402.namprd12.prod.outlook.com (2603:10b6:5:2a5::18) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5676.24; Fri, 7 Oct\n 2022 17:45:08 +0000",
            "from CO1NAM11FT068.eop-nam11.prod.protection.outlook.com\n (2603:10b6:303:dc:cafe::a3) by MW4PR03CA0359.outlook.office365.com\n (2603:10b6:303:dc::34) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5676.26 via Frontend\n Transport; Fri, 7 Oct 2022 17:45:08 +0000",
            "from SATLEXMB04.amd.com (165.204.84.17) by\n CO1NAM11FT068.mail.protection.outlook.com (10.13.175.142) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id\n 15.20.5709.10 via Frontend Transport; Fri, 7 Oct 2022 17:45:07 +0000",
            "from driver-dev1.pensando.io (10.180.168.240) by SATLEXMB04.amd.com\n (10.181.40.145) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.28; Fri, 7 Oct\n 2022 12:45:04 -0500"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=mUFcWAZ2NOY3qjbkohpMAIseLM23i1QXv26QvdSRyrvj2q572vK1jv3L5W03YD6hj/kRoisxOwW/uY5koErx3HnB1U9bWoMXtZ3qWdbbtzg1AZm/bnlrWVR2s0/DumZGyi5UJFa6DfiO83NHsOw+qIoUjHSwcCFEprzRkDC3zAQYdLR8dCO8JReMmiQ9PgHJxj7XJU53putWfQtnjKle66Mn1ib1WM30d16LKDv59/cNVTxEMdytTVCdV34SFs8QMgcpBaMG6iJv0IuEhYPQgRdUC04bksVBsiccHI/ffeqI5FjupZPsca/hsNnou3YJbmh4lqxCkcRyh8cnAvvr/g==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=iAZPsTezsyYdyAJfywl0uObL7SxhpIV2fvlXV6Hjn0I=;\n b=XYrMPmMiEyAtSj8nUpkV4bVAgZSlOr9QLZz4DP5Lm0dF6F26A9UCU3Vkc5+EBwD4DjrjXHZ/2EuVzF63GyPFWmJLpDhTGgVHLQF59TMP7Qf1EwN68/aZgBg8ZfZXR3lA8YNmp4ZCyBXP+mSUtqDWoF3FTR5fvxTmRlrXi5GKGoPiCBLLxOQqq+pl9NqsrZ5nZkX6Kqr2JiNq9DlNmGJ6pXcDXHuk0Q7K+Klt0FxTmZZg//P66HopgGuKOlqYX+rVvvRN3K4bp+wGi/GzsUW0ilrdphLBxXggZOk74mTy1CbvWEVQjSVLKRYptT37+vvl+tIQkypQmIU7hrRwp5Rlhw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 165.204.84.17) smtp.rcpttodomain=dpdk.org smtp.mailfrom=amd.com; dmarc=pass\n (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=amd.com; s=selector1;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=iAZPsTezsyYdyAJfywl0uObL7SxhpIV2fvlXV6Hjn0I=;\n b=gz0B0v3SjKeOb3cE4BGGkFSr0zcz4b4I0hRU1I5ET75PhZbf4Gp8CrU3mOVJNwrqYWEyihg0eYOkkE7P/YfTbsr1b0hL1eb3NnFkG4Rkf+V0g1dj4qNkjyAry0rG7c+L7UrBpcbbFrTx1ASXMCiq4NtSjbfxbP2vSda3CAtOSYc=",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 165.204.84.17)\n smtp.mailfrom=amd.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=amd.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of amd.com designates\n 165.204.84.17 as permitted sender) receiver=protection.outlook.com;\n client-ip=165.204.84.17; helo=SATLEXMB04.amd.com; pr=C",
        "From": "Andrew Boyer <andrew.boyer@amd.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Andrew Boyer <andrew.boyer@amd.com>,\n R Mohamed Shah <mohamedshah.r@amd.com>",
        "Subject": "[PATCH 32/35] net/ionic: add optimized handlers for non-scattered\n Rx/Tx",
        "Date": "Fri, 7 Oct 2022 10:43:33 -0700",
        "Message-ID": "<20221007174336.54354-33-andrew.boyer@amd.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20221007174336.54354-1-andrew.boyer@amd.com>",
        "References": "<20221007174336.54354-1-andrew.boyer@amd.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.180.168.240]",
        "X-ClientProxiedBy": "SATLEXMB04.amd.com (10.181.40.145) To SATLEXMB04.amd.com\n (10.181.40.145)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "CO1NAM11FT068:EE_|DM6PR12MB4402:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "22c6e93b-5b2e-4a05-22c9-08daa88bacbd",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n Zg/d03+eBk1nq4HBkRxiI1AC5s7rgEFXFwA4yt5Ld3TgUUCt3oA04X8xiE/Q/pPDdBDsE2PVJs6m1onSG4MbywYzqH7/ohAo24f+2Ha5n+ayJea04UFR3fiuuNqgMqdMmK0o9kqlxjROz43NZ7OFRvqT14tYQdRk3NQLxFczVMbWWL6QQf4fiD7szTgM4UgLetOO4vFThguRqk2ymdN7tVrAVWqpUcOeCdCHOIlD+4LwMvdV5YCYlwKeItRbDoynP0+nIgzc+/YY4gvxnxXQE9efkaioOoRpjzGTVdZ62Ash1TnVqAm606YjAuUFy3MCRMdswwMpt5/eWcCZQ5lw9flqZKELJfN9o4UOXF80Gtime1MWzU6mN1UDyohnSdkjEkMPqdbhVncnSl8O/9nowXRqJariBvMk12hPV3schVL0IjgUTzCxNqPxEMOod/eyWIfdUkZ7J2Wf9HQQKiXfAq43Rf6J37rCekq+CFvLnQRRnubV7RcTjj115gm/Y+d7o8ZGZiFkROE44yVovXcDFf/rV4PuNIjb19QUsXsk8cdMJyeNN32on0b6PbYz0YYEIFJP0waV2RwgSx40hkIJIj6Wv8kIc2rGkcQrxY8oDb2VFeZnau7ULiumrXvcnTqh2sNwNBtQPmA/NFwFic21IKpBU4TlM1HA+QupwSTnexItFEIECoP5Ufr47QllS5E5RoLUIEELAUrJfAaOMy8REQPRxlyZO8SiTaffsJKOdpptqC0i+r5sxDdTfMVMIz6FslTLGHQECM7dfzVT1IuAwUXLPYCwXmrrGJwaJ9rAs3pq5Az3ds1SMTYcng9CRN62",
        "X-Forefront-Antispam-Report": "CIP:165.204.84.17; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:CAL; SFV:NSPM; H:SATLEXMB04.amd.com; PTR:InfoDomainNonexistent; CAT:NONE;\n SFS:(13230022)(4636009)(346002)(136003)(39860400002)(376002)(396003)(451199015)(46966006)(40470700004)(36840700001)(40480700001)(41300700001)(36860700001)(40460700003)(83380400001)(6666004)(426003)(2906002)(86362001)(44832011)(82310400005)(81166007)(356005)(54906003)(478600001)(30864003)(336012)(47076005)(8936002)(5660300002)(8676002)(316002)(36756003)(186003)(1076003)(16526019)(2616005)(6916009)(70586007)(4326008)(70206006)(82740400003)(26005)(36900700001)(579004)(559001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "amd.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "07 Oct 2022 17:45:07.6624 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 22c6e93b-5b2e-4a05-22c9-08daa88bacbd",
        "X-MS-Exchange-CrossTenant-Id": "3dd8961f-e488-4e60-8e11-a82d994e183d",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=3dd8961f-e488-4e60-8e11-a82d994e183d; Ip=[165.204.84.17];\n Helo=[SATLEXMB04.amd.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT068.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM6PR12MB4402",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "The code is very similar, but the simple case can skip a few branches\nin the hot path. This improves PPS when 10KB mbufs are used.\n\nS/G is enabled on the Rx side by offload DEV_RX_OFFLOAD_SCATTER.\nS/G is enabled on the Tx side by offload DEV_TX_OFFLOAD_MULTI_SEGS.\n\nS/G is automatically enabled on the Rx side if the provided mbufs are\ntoo small to hold the maximum possible frame.\n\nTo enable S/G in testpmd, add these args:\n  --rx-offloads=0x2000 --tx-offloads=0x8000\n\nSigned-off-by: Andrew Boyer <andrew.boyer@amd.com>\nSigned-off-by: R Mohamed Shah <mohamedshah.r@amd.com>\n---\n doc/guides/rel_notes/release_22_11.rst |   1 +\n drivers/net/ionic/ionic_ethdev.c       |  25 +-\n drivers/net/ionic/ionic_lif.c          |  61 ++-\n drivers/net/ionic/ionic_lif.h          |   1 +\n drivers/net/ionic/ionic_rxtx.c         | 576 ++-----------------------\n drivers/net/ionic/ionic_rxtx.h         |  46 +-\n drivers/net/ionic/ionic_rxtx_sg.c      | 496 +++++++++++++++++++++\n drivers/net/ionic/ionic_rxtx_simple.c  | 417 ++++++++++++++++++\n drivers/net/ionic/meson.build          |   2 +\n 9 files changed, 1054 insertions(+), 571 deletions(-)\n create mode 100644 drivers/net/ionic/ionic_rxtx_sg.c\n create mode 100644 drivers/net/ionic/ionic_rxtx_simple.c",
    "diff": "diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst\nindex 974400d0a6..9992236217 100644\n--- a/doc/guides/rel_notes/release_22_11.rst\n+++ b/doc/guides/rel_notes/release_22_11.rst\n@@ -88,6 +88,7 @@ New Features\n   * Added support for advertising packet types.\n   * Added support for descriptor status functions.\n   * Added Q-in-CMB feature controlled by devarg ionic_cmb.\n+  * Added optimized handlers for non-scattered Rx and Tx.\n \n Removed Items\n -------------\ndiff --git a/drivers/net/ionic/ionic_ethdev.c b/drivers/net/ionic/ionic_ethdev.c\nindex 28297879cf..d29aa717e3 100644\n--- a/drivers/net/ionic/ionic_ethdev.c\n+++ b/drivers/net/ionic/ionic_ethdev.c\n@@ -828,8 +828,6 @@ ionic_dev_configure(struct rte_eth_dev *eth_dev)\n \n \tionic_lif_configure(lif);\n \n-\tionic_lif_set_features(lif);\n-\n \treturn 0;\n }\n \n@@ -883,6 +881,13 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)\n \tif (dev_conf->lpbk_mode)\n \t\tIONIC_PRINT(WARNING, \"Loopback mode not supported\");\n \n+\t/* Re-set features in case SG flag was added in rx_queue_setup() */\n+\terr = ionic_lif_set_features(lif);\n+\tif (err) {\n+\t\tIONIC_PRINT(ERR, \"Cannot set LIF features: %d\", err);\n+\t\treturn err;\n+\t}\n+\n \tlif->frame_size = eth_dev->data->mtu + IONIC_ETH_OVERHEAD;\n \n \terr = ionic_lif_change_mtu(lif, eth_dev->data->mtu);\n@@ -917,6 +922,18 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)\n \t\t\t\tspeed);\n \t}\n \n+\tif (lif->hw_features & IONIC_ETH_HW_RX_SG)\n+\t\teth_dev->rx_pkt_burst = &ionic_recv_pkts_sg;\n+\telse\n+\t\teth_dev->rx_pkt_burst = &ionic_recv_pkts;\n+\n+\tif (lif->hw_features & IONIC_ETH_HW_TX_SG)\n+\t\teth_dev->tx_pkt_burst = &ionic_xmit_pkts_sg;\n+\telse\n+\t\teth_dev->tx_pkt_burst = &ionic_xmit_pkts;\n+\n+\teth_dev->tx_pkt_prepare = &ionic_prep_pkts;\n+\n \tionic_dev_link_update(eth_dev, 0);\n \n \treturn 0;\n@@ -980,10 +997,6 @@ eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void *init_params)\n \tIONIC_PRINT_CALL();\n \n \teth_dev->dev_ops = &ionic_eth_dev_ops;\n-\teth_dev->rx_pkt_burst = &ionic_recv_pkts;\n-\teth_dev->tx_pkt_burst = &ionic_xmit_pkts;\n-\teth_dev->tx_pkt_prepare = &ionic_prep_pkts;\n-\n \teth_dev->rx_descriptor_status = ionic_dev_rx_descriptor_status;\n \teth_dev->tx_descriptor_status = ionic_dev_tx_descriptor_status;\n \ndiff --git a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c\nindex cf9605c791..affb6a44af 100644\n--- a/drivers/net/ionic/ionic_lif.c\n+++ b/drivers/net/ionic/ionic_lif.c\n@@ -755,11 +755,10 @@ ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,\n \t\tstruct ionic_rx_qcq **rxq_out)\n {\n \tstruct ionic_rx_qcq *rxq;\n-\tuint16_t flags, seg_size, hdr_seg_size, max_segs, max_segs_fw;\n+\tuint16_t flags = 0, seg_size, hdr_seg_size, max_segs, max_segs_fw = 1;\n \tuint32_t max_mtu;\n \tint err;\n \n-\tflags = IONIC_QCQ_F_SG;\n \tif (lif->state & IONIC_LIF_F_Q_IN_CMB)\n \t\tflags |= IONIC_QCQ_F_CMB;\n \n@@ -770,7 +769,18 @@ ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,\n \n \tmax_mtu = rte_le_to_cpu_32(lif->adapter->ident.lif.eth.max_mtu);\n \n-\tmax_segs_fw = IONIC_RX_MAX_SG_ELEMS + 1;\n+\t/* If mbufs are too small to hold received packets, enable SG */\n+\tif (max_mtu > hdr_seg_size) {\n+\t\tIONIC_PRINT(NOTICE, \"Enabling RX_OFFLOAD_SCATTER\");\n+\t\tlif->eth_dev->data->dev_conf.rxmode.offloads |=\n+\t\t\tRTE_ETH_RX_OFFLOAD_SCATTER;\n+\t\tionic_lif_configure_rx_sg_offload(lif);\n+\t}\n+\n+\tif (lif->features & IONIC_ETH_HW_RX_SG) {\n+\t\tflags |= IONIC_QCQ_F_SG;\n+\t\tmax_segs_fw = IONIC_RX_MAX_SG_ELEMS + 1;\n+\t}\n \n \t/*\n \t * Calculate how many fragment pointers might be stored in queue.\n@@ -820,14 +830,17 @@ ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,\n \t\tuint16_t ntxq_descs, struct ionic_tx_qcq **txq_out)\n {\n \tstruct ionic_tx_qcq *txq;\n-\tuint16_t flags, num_segs_fw;\n+\tuint16_t flags = 0, num_segs_fw = 1;\n \tint err;\n \n-\tflags = IONIC_QCQ_F_SG;\n+\tif (lif->features & IONIC_ETH_HW_TX_SG) {\n+\t\tflags |= IONIC_QCQ_F_SG;\n+\t\tnum_segs_fw = IONIC_TX_MAX_SG_ELEMS_V1 + 1;\n+\t}\n \tif (lif->state & IONIC_LIF_F_Q_IN_CMB)\n \t\tflags |= IONIC_QCQ_F_CMB;\n \n-\tnum_segs_fw = IONIC_TX_MAX_SG_ELEMS_V1 + 1;\n+\tIONIC_PRINT(DEBUG, \"txq %u num_segs %u\", index, num_segs_fw);\n \n \terr = ionic_qcq_alloc(lif,\n \t\tIONIC_QTYPE_TXQ,\n@@ -1561,8 +1574,7 @@ ionic_lif_txq_init(struct ionic_tx_qcq *txq)\n \t\t\t.type = q->type,\n \t\t\t.ver = lif->qtype_info[q->type].version,\n \t\t\t.index = rte_cpu_to_le_32(q->index),\n-\t\t\t.flags = rte_cpu_to_le_16(IONIC_QINIT_F_SG |\n-\t\t\t\t\t\tIONIC_QINIT_F_ENA),\n+\t\t\t.flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),\n \t\t\t.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),\n \t\t\t.ring_size = rte_log2_u32(q->num_descs),\n \t\t\t.ring_base = rte_cpu_to_le_64(q->base_pa),\n@@ -1572,6 +1584,8 @@ ionic_lif_txq_init(struct ionic_tx_qcq *txq)\n \t};\n \tint err;\n \n+\tif (txq->flags & IONIC_QCQ_F_SG)\n+\t\tctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);\n \tif (txq->flags & IONIC_QCQ_F_CMB)\n \t\tctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);\n \n@@ -1615,8 +1629,7 @@ ionic_lif_rxq_init(struct ionic_rx_qcq *rxq)\n \t\t\t.type = q->type,\n \t\t\t.ver = lif->qtype_info[q->type].version,\n \t\t\t.index = rte_cpu_to_le_32(q->index),\n-\t\t\t.flags = rte_cpu_to_le_16(IONIC_QINIT_F_SG |\n-\t\t\t\t\t\tIONIC_QINIT_F_ENA),\n+\t\t\t.flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),\n \t\t\t.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),\n \t\t\t.ring_size = rte_log2_u32(q->num_descs),\n \t\t\t.ring_base = rte_cpu_to_le_64(q->base_pa),\n@@ -1626,6 +1639,8 @@ ionic_lif_rxq_init(struct ionic_rx_qcq *rxq)\n \t};\n \tint err;\n \n+\tif (rxq->flags & IONIC_QCQ_F_SG)\n+\t\tctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);\n \tif (rxq->flags & IONIC_QCQ_F_CMB)\n \t\tctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);\n \n@@ -1791,6 +1806,20 @@ ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)\n \t}\n }\n \n+void\n+ionic_lif_configure_rx_sg_offload(struct ionic_lif *lif)\n+{\n+\tstruct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;\n+\n+\tif (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {\n+\t\tlif->features |= IONIC_ETH_HW_RX_SG;\n+\t\tlif->eth_dev->data->scattered_rx = 1;\n+\t} else {\n+\t\tlif->features &= ~IONIC_ETH_HW_RX_SG;\n+\t\tlif->eth_dev->data->scattered_rx = 0;\n+\t}\n+}\n+\n void\n ionic_lif_configure(struct ionic_lif *lif)\n {\n@@ -1836,13 +1865,11 @@ ionic_lif_configure(struct ionic_lif *lif)\n \telse\n \t\tlif->features &= ~IONIC_ETH_HW_RX_CSUM;\n \n-\tif (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {\n-\t\tlif->features |= IONIC_ETH_HW_RX_SG;\n-\t\tlif->eth_dev->data->scattered_rx = 1;\n-\t} else {\n-\t\tlif->features &= ~IONIC_ETH_HW_RX_SG;\n-\t\tlif->eth_dev->data->scattered_rx = 0;\n-\t}\n+\t/*\n+\t * NB: RX_SG may be enabled later during rx_queue_setup() if\n+\t * required by the mbuf/mtu configuration\n+\t */\n+\tionic_lif_configure_rx_sg_offload(lif);\n \n \t/* Covers VLAN_STRIP */\n \tionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);\ndiff --git a/drivers/net/ionic/ionic_lif.h b/drivers/net/ionic/ionic_lif.h\nindex e4af138a51..2aa9f774ff 100644\n--- a/drivers/net/ionic/ionic_lif.h\n+++ b/drivers/net/ionic/ionic_lif.h\n@@ -188,6 +188,7 @@ void ionic_lif_stop(struct ionic_lif *lif);\n \n void ionic_lif_configure(struct ionic_lif *lif);\n void ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask);\n+void ionic_lif_configure_rx_sg_offload(struct ionic_lif *lif);\n void ionic_lif_reset(struct ionic_lif *lif);\n \n int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr);\ndiff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c\nindex 9a346f4143..64733da535 100644\n--- a/drivers/net/ionic/ionic_rxtx.c\n+++ b/drivers/net/ionic/ionic_rxtx.c\n@@ -2,50 +2,28 @@\n  * Copyright 2018-2022 Advanced Micro Devices, Inc. All Rights Reserved.\n  */\n \n-#include <sys/queue.h>\n #include <stdio.h>\n-#include <stdlib.h>\n #include <string.h>\n #include <errno.h>\n #include <stdint.h>\n-#include <stdarg.h>\n-#include <unistd.h>\n-#include <inttypes.h>\n \n-#include <rte_byteorder.h>\n #include <rte_common.h>\n-#include <rte_cycles.h>\n+#include <rte_byteorder.h>\n+#include <rte_errno.h>\n #include <rte_log.h>\n-#include <rte_debug.h>\n-#include <rte_interrupts.h>\n-#include <rte_pci.h>\n-#include <rte_memory.h>\n-#include <rte_memzone.h>\n-#include <rte_launch.h>\n-#include <rte_eal.h>\n-#include <rte_per_lcore.h>\n-#include <rte_lcore.h>\n-#include <rte_atomic.h>\n-#include <rte_branch_prediction.h>\n-#include <rte_mempool.h>\n-#include <rte_malloc.h>\n #include <rte_mbuf.h>\n #include <rte_ether.h>\n-#include <ethdev_driver.h>\n-#include <rte_prefetch.h>\n-#include <rte_udp.h>\n-#include <rte_tcp.h>\n-#include <rte_sctp.h>\n-#include <rte_string_fns.h>\n-#include <rte_errno.h>\n #include <rte_ip.h>\n-#include <rte_net.h>\n+#include <rte_tcp.h>\n+#include <rte_ethdev.h>\n+#include <ethdev_driver.h>\n \n-#include \"ionic_logs.h\"\n-#include \"ionic_mac_api.h\"\n-#include \"ionic_ethdev.h\"\n+#include \"ionic.h\"\n+#include \"ionic_dev.h\"\n #include \"ionic_lif.h\"\n+#include \"ionic_ethdev.h\"\n #include \"ionic_rxtx.h\"\n+#include \"ionic_logs.h\"\n \n static void\n ionic_empty_array(void **array, uint32_t cnt, uint16_t idx)\n@@ -103,60 +81,6 @@ ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n \tqinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;\n }\n \n-static __rte_always_inline void\n-ionic_tx_flush(struct ionic_tx_qcq *txq)\n-{\n-\tstruct ionic_cq *cq = &txq->qcq.cq;\n-\tstruct ionic_queue *q = &txq->qcq.q;\n-\tstruct rte_mbuf *txm;\n-\tstruct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base;\n-\tvoid **info;\n-\tuint32_t i;\n-\n-\tcq_desc = &cq_desc_base[cq->tail_idx];\n-\n-\twhile (color_match(cq_desc->color, cq->done_color)) {\n-\t\tcq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);\n-\t\tif (cq->tail_idx == 0)\n-\t\t\tcq->done_color = !cq->done_color;\n-\n-\t\t/* Prefetch 4 x 16B comp at cq->tail_idx + 4 */\n-\t\tif ((cq->tail_idx & 0x3) == 0)\n-\t\t\trte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);\n-\n-\t\twhile (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) {\n-\t\t\t/* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */\n-\t\t\trte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2)));\n-\n-\t\t\t/* Prefetch next mbuf */\n-\t\t\tvoid **next_info =\n-\t\t\t\tIONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1));\n-\t\t\tif (next_info[0])\n-\t\t\t\trte_mbuf_prefetch_part2(next_info[0]);\n-\t\t\tif (next_info[1])\n-\t\t\t\trte_mbuf_prefetch_part2(next_info[1]);\n-\n-\t\t\tinfo = IONIC_INFO_PTR(q, q->tail_idx);\n-\t\t\tfor (i = 0; i < q->num_segs; i++) {\n-\t\t\t\ttxm = info[i];\n-\t\t\t\tif (!txm)\n-\t\t\t\t\tbreak;\n-\n-\t\t\t\tif (txq->flags & IONIC_QCQ_F_FAST_FREE)\n-\t\t\t\t\trte_mempool_put(txm->pool, txm);\n-\t\t\t\telse\n-\t\t\t\t\trte_pktmbuf_free_seg(txm);\n-\n-\t\t\t\tinfo[i] = NULL;\n-\t\t\t}\n-\n-\t\t\tq->tail_idx = Q_NEXT_TO_SRVC(q, 1);\n-\t\t}\n-\n-\t\tcq_desc = &cq_desc_base[cq->tail_idx];\n-\t}\n-}\n-\n void __rte_cold\n ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n@@ -394,7 +318,7 @@ ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem)\n \treturn desc;\n }\n \n-static int\n+int\n ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)\n {\n \tstruct ionic_queue *q = &txq->qcq.q;\n@@ -405,7 +329,7 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)\n \trte_iova_t data_iova;\n \tuint64_t desc_addr = 0, next_addr;\n \tuint16_t desc_len = 0;\n-\tuint8_t desc_nsge;\n+\tuint8_t desc_nsge = 0;\n \tuint32_t hdrlen;\n \tuint32_t mss = txm->tso_segsz;\n \tuint32_t frag_left = 0;\n@@ -416,6 +340,7 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)\n \tbool start, done;\n \tbool encap;\n \tbool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN);\n+\tbool use_sgl = !!(txq->flags & IONIC_QCQ_F_SG);\n \tuint16_t vlan_tci = txm->vlan_tci;\n \tuint64_t ol_flags = txm->ol_flags;\n \n@@ -438,48 +363,22 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)\n \t\thdrlen = txm->l2_len + txm->l3_len + txm->l4_len;\n \t}\n \n-\tseglen = hdrlen + mss;\n-\tleft = txm->data_len;\n-\tdata_iova = rte_mbuf_data_iova(txm);\n-\n \tdesc = ionic_tx_tso_next(txq, &elem);\n+\ttxm_seg = txm;\n \tstart = true;\n+\tseglen = hdrlen + mss;\n \n-\t/* Chop data up into desc segments */\n-\n-\twhile (left > 0) {\n-\t\tlen = RTE_MIN(seglen, left);\n-\t\tfrag_left = seglen - len;\n-\t\tdesc_addr = rte_cpu_to_le_64(data_iova + offset);\n-\t\tdesc_len = len;\n-\t\tdesc_nsge = 0;\n-\t\tleft -= len;\n-\t\toffset += len;\n-\t\tif (txm->nb_segs > 1 && frag_left > 0)\n-\t\t\tcontinue;\n-\t\tdone = (txm->nb_segs == 1 && left == 0);\n-\t\tionic_tx_tso_post(q, desc, txm,\n-\t\t\tdesc_addr, desc_nsge, desc_len,\n-\t\t\thdrlen, mss,\n-\t\t\tencap,\n-\t\t\tvlan_tci, has_vlan,\n-\t\t\tstart, done);\n-\t\tdesc = ionic_tx_tso_next(txq, &elem);\n-\t\tstart = false;\n-\t\tseglen = mss;\n-\t}\n-\n-\t/* Chop frags into desc segments */\n-\n-\ttxm_seg = txm->next;\n+\t/* Walk the chain of mbufs */\n \twhile (txm_seg != NULL) {\n \t\toffset = 0;\n \t\tdata_iova = rte_mbuf_data_iova(txm_seg);\n \t\tleft = txm_seg->data_len;\n \n+\t\t/* Split the mbuf data up into multiple descriptors */\n \t\twhile (left > 0) {\n \t\t\tnext_addr = rte_cpu_to_le_64(data_iova + offset);\n-\t\t\tif (frag_left > 0) {\n+\t\t\tif (frag_left > 0 && use_sgl) {\n+\t\t\t\t/* Fill previous descriptor's SGE */\n \t\t\t\tlen = RTE_MIN(frag_left, left);\n \t\t\t\tfrag_left -= len;\n \t\t\t\telem->addr = next_addr;\n@@ -487,16 +386,19 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)\n \t\t\t\telem++;\n \t\t\t\tdesc_nsge++;\n \t\t\t} else {\n-\t\t\t\tlen = RTE_MIN(mss, left);\n-\t\t\t\tfrag_left = mss - len;\n+\t\t\t\t/* Fill new descriptor's data field */\n+\t\t\t\tlen = RTE_MIN(seglen, left);\n+\t\t\t\tfrag_left = seglen - len;\n \t\t\t\tdesc_addr = next_addr;\n \t\t\t\tdesc_len = len;\n \t\t\t\tdesc_nsge = 0;\n \t\t\t}\n \t\t\tleft -= len;\n \t\t\toffset += len;\n-\t\t\tif (txm_seg->next != NULL && frag_left > 0)\n-\t\t\t\tcontinue;\n+\n+\t\t\t/* Pack the next mbuf's data into the descriptor */\n+\t\t\tif (txm_seg->next != NULL && frag_left > 0 && use_sgl)\n+\t\t\t\tbreak;\n \n \t\t\tdone = (txm_seg->next == NULL && left == 0);\n \t\t\tionic_tx_tso_post(q, desc, txm_seg,\n@@ -507,6 +409,7 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)\n \t\t\t\tstart, done);\n \t\t\tdesc = ionic_tx_tso_next(txq, &elem);\n \t\t\tstart = false;\n+\t\t\tseglen = mss;\n \t\t}\n \n \t\ttxm_seg = txm_seg->next;\n@@ -517,157 +420,6 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)\n \treturn 0;\n }\n \n-static __rte_always_inline int\n-ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)\n-{\n-\tstruct ionic_queue *q = &txq->qcq.q;\n-\tstruct ionic_txq_desc *desc, *desc_base = q->base;\n-\tstruct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;\n-\tstruct ionic_txq_sg_elem *elem;\n-\tstruct ionic_tx_stats *stats = &txq->stats;\n-\tstruct rte_mbuf *txm_seg;\n-\tvoid **info;\n-\trte_iova_t data_iova;\n-\tuint64_t ol_flags = txm->ol_flags;\n-\tuint64_t addr, cmd;\n-\tuint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;\n-\tuint8_t flags = 0;\n-\n-\tdesc = &desc_base[q->head_idx];\n-\tinfo = IONIC_INFO_PTR(q, q->head_idx);\n-\n-\tif ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&\n-\t    (txq->flags & IONIC_QCQ_F_CSUM_L3)) {\n-\t\topcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;\n-\t\tflags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;\n-\t}\n-\n-\tif (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&\n-\t     (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||\n-\t    ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&\n-\t     (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {\n-\t\topcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;\n-\t\tflags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;\n-\t}\n-\n-\tif (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)\n-\t\tstats->no_csum++;\n-\n-\tif (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||\n-\t     (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&\n-\t    ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||\n-\t     (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) {\n-\t\tflags |= IONIC_TXQ_DESC_FLAG_ENCAP;\n-\t}\n-\n-\tif (ol_flags & RTE_MBUF_F_TX_VLAN) {\n-\t\tflags |= IONIC_TXQ_DESC_FLAG_VLAN;\n-\t\tdesc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci);\n-\t}\n-\n-\taddr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));\n-\n-\tcmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);\n-\tdesc->cmd = rte_cpu_to_le_64(cmd);\n-\tdesc->len = rte_cpu_to_le_16(txm->data_len);\n-\n-\tinfo[0] = txm;\n-\n-\tif (txm->nb_segs > 1) {\n-\t\ttxm_seg = txm->next;\n-\n-\t\telem = sg_desc_base[q->head_idx].elems;\n-\n-\t\twhile (txm_seg != NULL) {\n-\t\t\t/* Stash the mbuf ptr in the array */\n-\t\t\tinfo++;\n-\t\t\t*info = txm_seg;\n-\n-\t\t\t/* Configure the SGE */\n-\t\t\tdata_iova = rte_mbuf_data_iova(txm_seg);\n-\t\t\telem->len = rte_cpu_to_le_16(txm_seg->data_len);\n-\t\t\telem->addr = rte_cpu_to_le_64(data_iova);\n-\t\t\telem++;\n-\n-\t\t\ttxm_seg = txm_seg->next;\n-\t\t}\n-\t}\n-\n-\tq->head_idx = Q_NEXT_TO_POST(q, 1);\n-\n-\treturn 0;\n-}\n-\n-uint16_t\n-ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n-\t\tuint16_t nb_pkts)\n-{\n-\tstruct ionic_tx_qcq *txq = tx_queue;\n-\tstruct ionic_queue *q = &txq->qcq.q;\n-\tstruct ionic_tx_stats *stats = &txq->stats;\n-\tstruct rte_mbuf *mbuf;\n-\tuint32_t bytes_tx = 0;\n-\tuint16_t nb_avail, nb_tx = 0;\n-\tint err;\n-\n-\tstruct ionic_txq_desc *desc_base = q->base;\n-\tif (!(txq->flags & IONIC_QCQ_F_CMB))\n-\t\trte_prefetch0(&desc_base[q->head_idx]);\n-\trte_prefetch0(IONIC_INFO_PTR(q, q->head_idx));\n-\n-\tif (tx_pkts) {\n-\t\trte_mbuf_prefetch_part1(tx_pkts[0]);\n-\t\trte_mbuf_prefetch_part2(tx_pkts[0]);\n-\t}\n-\n-\tif (unlikely(ionic_q_space_avail(q) < txq->free_thresh)) {\n-\t\t/* Cleaning old buffers */\n-\t\tionic_tx_flush(txq);\n-\t}\n-\n-\tnb_avail = ionic_q_space_avail(q);\n-\tif (unlikely(nb_avail < nb_pkts)) {\n-\t\tstats->stop += nb_pkts - nb_avail;\n-\t\tnb_pkts = nb_avail;\n-\t}\n-\n-\twhile (nb_tx < nb_pkts) {\n-\t\tuint16_t next_idx = Q_NEXT_TO_POST(q, 1);\n-\t\tif (!(txq->flags & IONIC_QCQ_F_CMB))\n-\t\t\trte_prefetch0(&desc_base[next_idx]);\n-\t\trte_prefetch0(IONIC_INFO_PTR(q, next_idx));\n-\n-\t\tif (nb_tx + 1 < nb_pkts) {\n-\t\t\trte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]);\n-\t\t\trte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]);\n-\t\t}\n-\n-\t\tmbuf = tx_pkts[nb_tx];\n-\n-\t\tif (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)\n-\t\t\terr = ionic_tx_tso(txq, mbuf);\n-\t\telse\n-\t\t\terr = ionic_tx(txq, mbuf);\n-\t\tif (err) {\n-\t\t\tstats->drop += nb_pkts - nb_tx;\n-\t\t\tbreak;\n-\t\t}\n-\n-\t\tbytes_tx += mbuf->pkt_len;\n-\t\tnb_tx++;\n-\t}\n-\n-\tif (nb_tx > 0) {\n-\t\trte_wmb();\n-\t\tionic_q_flush(q);\n-\n-\t\tstats->packets += nb_tx;\n-\t\tstats->bytes += bytes_tx;\n-\t}\n-\n-\treturn nb_tx;\n-}\n-\n /*********************************************************************\n  *\n  *  TX prep functions\n@@ -820,7 +572,7 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,\n }\n \n #define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1)\n-static const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK]\n+const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK]\n \t\t__rte_cache_aligned = {\n \t/* IP_BAD set */\n \t[IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD,\n@@ -850,7 +602,7 @@ static const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK]\n };\n \n /* RTE_PTYPE_UNKNOWN is 0x0 */\n-static const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK]\n+const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK]\n \t\t__rte_cache_aligned = {\n \t[IONIC_PKT_TYPE_NON_IP]   = RTE_PTYPE_UNKNOWN,\n \t[IONIC_PKT_TYPE_IPV4]     = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,\n@@ -884,203 +636,6 @@ ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)\n \treturn ptypes;\n }\n \n-/*\n- * Cleans one descriptor. Connects the filled mbufs into a chain.\n- * Does not advance the tail index.\n- */\n-static __rte_always_inline void\n-ionic_rx_clean_one(struct ionic_rx_qcq *rxq,\n-\t\tstruct ionic_rxq_comp *cq_desc,\n-\t\tstruct ionic_rx_service *rx_svc)\n-{\n-\tstruct ionic_queue *q = &rxq->qcq.q;\n-\tstruct rte_mbuf *rxm, *rxm_seg, *prev_rxm;\n-\tstruct ionic_rx_stats *stats = &rxq->stats;\n-\tuint64_t pkt_flags = 0;\n-\tuint32_t pkt_type;\n-\tuint32_t left, i;\n-\tuint16_t cq_desc_len;\n-\tuint8_t ptype, cflags;\n-\tvoid **info;\n-\n-\tcq_desc_len = rte_le_to_cpu_16(cq_desc->len);\n-\n-\tinfo = IONIC_INFO_PTR(q, q->tail_idx);\n-\n-\trxm = info[0];\n-\n-\tif (cq_desc->status) {\n-\t\tstats->bad_cq_status++;\n-\t\treturn;\n-\t}\n-\n-\tif (cq_desc_len > rxq->frame_size || cq_desc_len == 0) {\n-\t\tstats->bad_len++;\n-\t\treturn;\n-\t}\n-\n-\tinfo[0] = NULL;\n-\n-\t/* Set the mbuf metadata based on the cq entry */\n-\trxm->rearm_data[0] = rxq->rearm_data;\n-\trxm->pkt_len = cq_desc_len;\n-\trxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len);\n-\tleft = cq_desc_len - rxm->data_len;\n-\trxm->nb_segs = cq_desc->num_sg_elems + 1;\n-\tprev_rxm = rxm;\n-\n-\tfor (i = 1; i < rxm->nb_segs && left; i++) {\n-\t\trxm_seg = info[i];\n-\t\tinfo[i] = NULL;\n-\n-\t\t/* Set the chained mbuf metadata */\n-\t\trxm_seg->rearm_data[0] = rxq->rearm_seg_data;\n-\t\trxm_seg->data_len = RTE_MIN(rxq->seg_size, left);\n-\t\tleft -= rxm_seg->data_len;\n-\n-\t\t/* Link the mbuf */\n-\t\tprev_rxm->next = rxm_seg;\n-\t\tprev_rxm = rxm_seg;\n-\t}\n-\n-\t/* Terminate the mbuf chain */\n-\tprev_rxm->next = NULL;\n-\n-\t/* RSS */\n-\tpkt_flags |= RTE_MBUF_F_RX_RSS_HASH;\n-\trxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash);\n-\n-\t/* Vlan Strip */\n-\tif (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {\n-\t\tpkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;\n-\t\trxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci);\n-\t}\n-\n-\t/* Checksum */\n-\tif (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {\n-\t\tcflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK;\n-\t\tpkt_flags |= ionic_csum_flags[cflags];\n-\t}\n-\n-\trxm->ol_flags = pkt_flags;\n-\n-\t/* Packet Type */\n-\tptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK;\n-\tpkt_type = ionic_ptype_table[ptype];\n-\tif (pkt_type == RTE_PTYPE_UNKNOWN) {\n-\t\tstruct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,\n-\t\t\t\tstruct rte_ether_hdr *);\n-\t\tuint16_t ether_type = eth_h->ether_type;\n-\t\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))\n-\t\t\tpkt_type = RTE_PTYPE_L2_ETHER_ARP;\n-\t\telse if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP))\n-\t\t\tpkt_type = RTE_PTYPE_L2_ETHER_LLDP;\n-\t\telse if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588))\n-\t\t\tpkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC;\n-\t\tstats->mtods++;\n-\t}\n-\n-\trxm->packet_type = pkt_type;\n-\n-\trx_svc->rx_pkts[rx_svc->nb_rx] = rxm;\n-\trx_svc->nb_rx++;\n-\n-\tstats->packets++;\n-\tstats->bytes += rxm->pkt_len;\n-}\n-\n-/*\n- * Fills one descriptor with mbufs. Does not advance the head index.\n- */\n-static __rte_always_inline int\n-ionic_rx_fill_one(struct ionic_rx_qcq *rxq)\n-{\n-\tstruct ionic_queue *q = &rxq->qcq.q;\n-\tstruct rte_mbuf *rxm, *rxm_seg;\n-\tstruct ionic_rxq_desc *desc, *desc_base = q->base;\n-\tstruct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;\n-\trte_iova_t data_iova;\n-\tuint32_t i;\n-\tvoid **info;\n-\tint ret;\n-\n-\tinfo = IONIC_INFO_PTR(q, q->head_idx);\n-\tdesc = &desc_base[q->head_idx];\n-\tsg_desc = &sg_desc_base[q->head_idx];\n-\n-\t/* mbuf is unused => whole chain is unused */\n-\tif (unlikely(info[0]))\n-\t\treturn 0;\n-\n-\tif (rxq->mb_idx == 0) {\n-\t\tret = rte_mempool_get_bulk(rxq->mb_pool,\n-\t\t\t\t\t(void **)rxq->mbs,\n-\t\t\t\t\tIONIC_MBUF_BULK_ALLOC);\n-\t\tif (ret) {\n-\t\t\tassert(0);\n-\t\t\treturn -ENOMEM;\n-\t\t}\n-\n-\t\trxq->mb_idx = IONIC_MBUF_BULK_ALLOC;\n-\t}\n-\n-\trxm = rxq->mbs[--rxq->mb_idx];\n-\tinfo[0] = rxm;\n-\n-\tdata_iova = rte_mbuf_data_iova_default(rxm);\n-\tdesc->addr = rte_cpu_to_le_64(data_iova);\n-\n-\tfor (i = 1; i < q->num_segs; i++) {\n-\t\t/* mbuf is unused => rest of the chain is unused */\n-\t\tif (info[i])\n-\t\t\treturn 0;\n-\n-\t\tif (rxq->mb_idx == 0) {\n-\t\t\tret = rte_mempool_get_bulk(rxq->mb_pool,\n-\t\t\t\t\t(void **)rxq->mbs,\n-\t\t\t\t\tIONIC_MBUF_BULK_ALLOC);\n-\t\t\tif (ret) {\n-\t\t\t\tassert(0);\n-\t\t\t\treturn -ENOMEM;\n-\t\t\t}\n-\n-\t\t\trxq->mb_idx = IONIC_MBUF_BULK_ALLOC;\n-\t\t}\n-\n-\t\trxm_seg = rxq->mbs[--rxq->mb_idx];\n-\t\tinfo[i] = rxm_seg;\n-\n-\t\t/* The data_off does not get set to 0 until later */\n-\t\tdata_iova = rxm_seg->buf_iova;\n-\t\tsg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova);\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Fills all descriptors with mbufs.\n- */\n-static int __rte_cold\n-ionic_rx_fill(struct ionic_rx_qcq *rxq)\n-{\n-\tstruct ionic_queue *q = &rxq->qcq.q;\n-\tuint32_t i;\n-\tint err;\n-\n-\tfor (i = 1; i < q->num_descs; i++) {\n-\t\terr = ionic_rx_fill_one(rxq);\n-\t\tif (err)\n-\t\t\treturn err;\n-\n-\t\tq->head_idx = Q_NEXT_TO_POST(q, 1);\n-\t}\n-\n-\tionic_q_flush(q);\n-\n-\treturn 0;\n-}\n-\n /*\n  * Perform one-time initialization of descriptor fields\n  * which will not change for the life of the queue.\n@@ -1148,10 +703,13 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)\n \tif (err)\n \t\treturn err;\n \n-\t/* Allocate buffers for descriptor rings */\n-\tif (ionic_rx_fill(rxq) != 0) {\n-\t\tIONIC_PRINT(ERR, \"Could not alloc mbuf for queue:%d\",\n-\t\t\trx_queue_id);\n+\t/* Allocate buffers for descriptor ring */\n+\tif (rxq->flags & IONIC_QCQ_F_SG)\n+\t\terr = ionic_rx_fill_sg(rxq);\n+\telse\n+\t\terr = ionic_rx_fill(rxq);\n+\tif (err != 0) {\n+\t\tIONIC_PRINT(ERR, \"Could not fill queue %d\", rx_queue_id);\n \t\treturn -1;\n \t}\n \n@@ -1160,55 +718,6 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)\n \treturn 0;\n }\n \n-/*\n- * Walk the CQ to find completed receive descriptors.\n- * Any completed descriptor found is refilled.\n- */\n-static __rte_always_inline void\n-ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do,\n-\t\tstruct ionic_rx_service *rx_svc)\n-{\n-\tstruct ionic_cq *cq = &rxq->qcq.cq;\n-\tstruct ionic_queue *q = &rxq->qcq.q;\n-\tstruct ionic_rxq_desc *q_desc_base = q->base;\n-\tstruct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;\n-\tuint32_t work_done = 0;\n-\n-\tcq_desc = &cq_desc_base[cq->tail_idx];\n-\n-\twhile (color_match(cq_desc->pkt_type_color, cq->done_color)) {\n-\t\tcq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);\n-\n-\t\tif (cq->tail_idx == 0)\n-\t\t\tcq->done_color = !cq->done_color;\n-\n-\t\t/* Prefetch 8 x 8B bufinfo */\n-\t\trte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8)));\n-\t\t/* Prefetch 4 x 16B comp */\n-\t\trte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);\n-\t\t/* Prefetch 4 x 16B descriptors */\n-\t\tif (!(rxq->flags & IONIC_QCQ_F_CMB))\n-\t\t\trte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]);\n-\n-\t\tionic_rx_clean_one(rxq, cq_desc, rx_svc);\n-\n-\t\tq->tail_idx = Q_NEXT_TO_SRVC(q, 1);\n-\n-\t\t(void)ionic_rx_fill_one(rxq);\n-\n-\t\tq->head_idx = Q_NEXT_TO_POST(q, 1);\n-\n-\t\tif (++work_done == work_to_do)\n-\t\t\tbreak;\n-\n-\t\tcq_desc = &cq_desc_base[cq->tail_idx];\n-\t}\n-\n-\t/* Update the queue indices and ring the doorbell */\n-\tif (work_done)\n-\t\tionic_q_flush(q);\n-}\n-\n /*\n  * Stop Receive Units for specified queue.\n  */\n@@ -1237,21 +746,6 @@ ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)\n \treturn 0;\n }\n \n-uint16_t\n-ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n-\t\tuint16_t nb_pkts)\n-{\n-\tstruct ionic_rx_qcq *rxq = rx_queue;\n-\tstruct ionic_rx_service rx_svc;\n-\n-\trx_svc.rx_pkts = rx_pkts;\n-\trx_svc.nb_rx = 0;\n-\n-\tionic_rxq_service(rxq, nb_pkts, &rx_svc);\n-\n-\treturn rx_svc.nb_rx;\n-}\n-\n int\n ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)\n {\ndiff --git a/drivers/net/ionic/ionic_rxtx.h b/drivers/net/ionic/ionic_rxtx.h\nindex f950d6472c..5939777963 100644\n--- a/drivers/net/ionic/ionic_rxtx.h\n+++ b/drivers/net/ionic/ionic_rxtx.h\n@@ -5,7 +5,19 @@\n #ifndef _IONIC_RXTX_H_\n #define _IONIC_RXTX_H_\n \n-#include <rte_mbuf.h>\n+#include <stdint.h>\n+\n+#include \"ionic_if.h\"\n+\n+struct ionic_rx_qcq;\n+struct ionic_tx_qcq;\n+struct rte_eth_dev;\n+struct rte_eth_rxconf;\n+struct rte_eth_rxq_info;\n+struct rte_eth_txconf;\n+struct rte_eth_txq_info;\n+struct rte_mbuf;\n+struct rte_mempool;\n \n struct ionic_rx_service {\n \t/* cb in */\n@@ -14,13 +26,12 @@ struct ionic_rx_service {\n \tuint16_t nb_rx;\n };\n \n-uint16_t ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n-\tuint16_t nb_pkts);\n-uint16_t ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n-\tuint16_t nb_pkts);\n-uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n-\tuint16_t nb_pkts);\n+#define IONIC_CSUM_FLAG_MASK\t(IONIC_RXQ_COMP_CSUM_F_VLAN - 1)\n+\n+extern const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK];\n+extern const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK];\n \n+/* ionic_rxtx.c */\n int ionic_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n \tuint16_t nb_desc, uint32_t socket_id,\n \tconst struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp);\n@@ -45,4 +56,25 @@ int ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);\n \n const uint32_t *ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev);\n \n+int ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm);\n+\n+uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\tuint16_t nb_pkts);\n+\n+/* ionic_rxtx_simple.c */\n+uint16_t ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\tuint16_t nb_pkts);\n+uint16_t ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\tuint16_t nb_pkts);\n+\n+int ionic_rx_fill(struct ionic_rx_qcq *rxq);\n+\n+/* ionic_rxtx_sg.c */\n+uint16_t ionic_recv_pkts_sg(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\tuint16_t nb_pkts);\n+uint16_t ionic_xmit_pkts_sg(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\tuint16_t nb_pkts);\n+\n+int ionic_rx_fill_sg(struct ionic_rx_qcq *rxq);\n+\n #endif /* _IONIC_RXTX_H_ */\ndiff --git a/drivers/net/ionic/ionic_rxtx_sg.c b/drivers/net/ionic/ionic_rxtx_sg.c\nnew file mode 100644\nindex 0000000000..bdca3fa4b4\n--- /dev/null\n+++ b/drivers/net/ionic/ionic_rxtx_sg.c\n@@ -0,0 +1,496 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2018-2022 Advanced Micro Devices, Inc. All Rights Reserved.\n+ */\n+\n+#include <stdio.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <assert.h>\n+\n+#include <rte_common.h>\n+#include <rte_byteorder.h>\n+#include <rte_atomic.h>\n+#include <rte_mempool.h>\n+#include <rte_mbuf.h>\n+#include <rte_ether.h>\n+#include <rte_prefetch.h>\n+\n+#include \"ionic.h\"\n+#include \"ionic_if.h\"\n+#include \"ionic_dev.h\"\n+#include \"ionic_lif.h\"\n+#include \"ionic_rxtx.h\"\n+\n+static __rte_always_inline void\n+ionic_tx_flush_sg(struct ionic_tx_qcq *txq)\n+{\n+\tstruct ionic_cq *cq = &txq->qcq.cq;\n+\tstruct ionic_queue *q = &txq->qcq.q;\n+\tstruct rte_mbuf *txm;\n+\tstruct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base;\n+\tvoid **info;\n+\tuint32_t i;\n+\n+\tcq_desc = &cq_desc_base[cq->tail_idx];\n+\n+\twhile (color_match(cq_desc->color, cq->done_color)) {\n+\t\tcq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);\n+\t\tif (cq->tail_idx == 0)\n+\t\t\tcq->done_color = !cq->done_color;\n+\n+\t\t/* Prefetch 4 x 16B comp at cq->tail_idx + 4 */\n+\t\tif ((cq->tail_idx & 0x3) == 0)\n+\t\t\trte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);\n+\n+\t\twhile (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) {\n+\t\t\t/* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */\n+\t\t\trte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2)));\n+\n+\t\t\t/* Prefetch next mbuf */\n+\t\t\tvoid **next_info =\n+\t\t\t\tIONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1));\n+\t\t\tif (next_info[0])\n+\t\t\t\trte_mbuf_prefetch_part2(next_info[0]);\n+\t\t\tif (next_info[1])\n+\t\t\t\trte_mbuf_prefetch_part2(next_info[1]);\n+\n+\t\t\tinfo = IONIC_INFO_PTR(q, q->tail_idx);\n+\t\t\tfor (i = 0; i < q->num_segs; i++) {\n+\t\t\t\ttxm = info[i];\n+\t\t\t\tif (!txm)\n+\t\t\t\t\tbreak;\n+\n+\t\t\t\tif (txq->flags & IONIC_QCQ_F_FAST_FREE)\n+\t\t\t\t\trte_mempool_put(txm->pool, txm);\n+\t\t\t\telse\n+\t\t\t\t\trte_pktmbuf_free_seg(txm);\n+\n+\t\t\t\tinfo[i] = NULL;\n+\t\t\t}\n+\n+\t\t\tq->tail_idx = Q_NEXT_TO_SRVC(q, 1);\n+\t\t}\n+\n+\t\tcq_desc = &cq_desc_base[cq->tail_idx];\n+\t}\n+}\n+\n+static __rte_always_inline int\n+ionic_tx_sg(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)\n+{\n+\tstruct ionic_queue *q = &txq->qcq.q;\n+\tstruct ionic_txq_desc *desc, *desc_base = q->base;\n+\tstruct ionic_txq_sg_desc_v1 *sg_desc, *sg_desc_base = q->sg_base;\n+\tstruct ionic_txq_sg_elem *elem;\n+\tstruct ionic_tx_stats *stats = &txq->stats;\n+\tstruct rte_mbuf *txm_seg;\n+\trte_iova_t data_iova;\n+\tvoid **info;\n+\tuint64_t ol_flags = txm->ol_flags;\n+\tuint64_t addr, cmd;\n+\tuint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;\n+\tuint8_t flags = 0;\n+\n+\tdesc = &desc_base[q->head_idx];\n+\tsg_desc = &sg_desc_base[q->head_idx];\n+\tinfo = IONIC_INFO_PTR(q, q->head_idx);\n+\n+\tif ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&\n+\t    (txq->flags & IONIC_QCQ_F_CSUM_L3)) {\n+\t\topcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;\n+\t\tflags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;\n+\t}\n+\n+\tif (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&\n+\t     (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||\n+\t    ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&\n+\t     (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {\n+\t\topcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;\n+\t\tflags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;\n+\t}\n+\n+\tif (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)\n+\t\tstats->no_csum++;\n+\n+\tif (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||\n+\t     (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&\n+\t    ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||\n+\t     (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) {\n+\t\tflags |= IONIC_TXQ_DESC_FLAG_ENCAP;\n+\t}\n+\n+\tif (ol_flags & RTE_MBUF_F_TX_VLAN) {\n+\t\tflags |= IONIC_TXQ_DESC_FLAG_VLAN;\n+\t\tdesc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci);\n+\t}\n+\n+\taddr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));\n+\n+\tcmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);\n+\tdesc->cmd = rte_cpu_to_le_64(cmd);\n+\tdesc->len = rte_cpu_to_le_16(txm->data_len);\n+\n+\tinfo[0] = txm;\n+\n+\tif (txm->nb_segs > 1) {\n+\t\ttxm_seg = txm->next;\n+\n+\t\telem = sg_desc->elems;\n+\n+\t\twhile (txm_seg != NULL) {\n+\t\t\t/* Stash the mbuf ptr in the array */\n+\t\t\tinfo++;\n+\t\t\t*info = txm_seg;\n+\n+\t\t\t/* Configure the SGE */\n+\t\t\tdata_iova = rte_mbuf_data_iova(txm_seg);\n+\t\t\telem->len = rte_cpu_to_le_16(txm_seg->data_len);\n+\t\t\telem->addr = rte_cpu_to_le_64(data_iova);\n+\t\t\telem++;\n+\n+\t\t\ttxm_seg = txm_seg->next;\n+\t\t}\n+\t}\n+\n+\tq->head_idx = Q_NEXT_TO_POST(q, 1);\n+\n+\treturn 0;\n+}\n+\n+uint16_t\n+ionic_xmit_pkts_sg(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tstruct ionic_tx_qcq *txq = tx_queue;\n+\tstruct ionic_queue *q = &txq->qcq.q;\n+\tstruct ionic_tx_stats *stats = &txq->stats;\n+\tstruct rte_mbuf *mbuf;\n+\tuint32_t bytes_tx = 0;\n+\tuint16_t nb_avail, nb_tx = 0;\n+\tint err;\n+\n+\tstruct ionic_txq_desc *desc_base = q->base;\n+\tif (!(txq->flags & IONIC_QCQ_F_CMB))\n+\t\trte_prefetch0(&desc_base[q->head_idx]);\n+\trte_prefetch0(IONIC_INFO_PTR(q, q->head_idx));\n+\n+\tif (tx_pkts) {\n+\t\trte_mbuf_prefetch_part1(tx_pkts[0]);\n+\t\trte_mbuf_prefetch_part2(tx_pkts[0]);\n+\t}\n+\n+\tif (ionic_q_space_avail(q) < txq->free_thresh) {\n+\t\t/* Cleaning old buffers */\n+\t\tionic_tx_flush_sg(txq);\n+\t}\n+\n+\tnb_avail = ionic_q_space_avail(q);\n+\tif (nb_avail < nb_pkts) {\n+\t\tstats->stop += nb_pkts - nb_avail;\n+\t\tnb_pkts = nb_avail;\n+\t}\n+\n+\twhile (nb_tx < nb_pkts) {\n+\t\tuint16_t next_idx = Q_NEXT_TO_POST(q, 1);\n+\t\tif (!(txq->flags & IONIC_QCQ_F_CMB))\n+\t\t\trte_prefetch0(&desc_base[next_idx]);\n+\t\trte_prefetch0(IONIC_INFO_PTR(q, next_idx));\n+\n+\t\tif (nb_tx + 1 < nb_pkts) {\n+\t\t\trte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]);\n+\t\t\trte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]);\n+\t\t}\n+\n+\t\tmbuf = tx_pkts[nb_tx];\n+\n+\t\tif (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)\n+\t\t\terr = ionic_tx_tso(txq, mbuf);\n+\t\telse\n+\t\t\terr = ionic_tx_sg(txq, mbuf);\n+\t\tif (err) {\n+\t\t\tstats->drop += nb_pkts - nb_tx;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tbytes_tx += mbuf->pkt_len;\n+\t\tnb_tx++;\n+\t}\n+\n+\tif (nb_tx > 0) {\n+\t\trte_wmb();\n+\t\tionic_q_flush(q);\n+\n+\t\tstats->packets += nb_tx;\n+\t\tstats->bytes += bytes_tx;\n+\t}\n+\n+\treturn nb_tx;\n+}\n+\n+/*\n+ * Cleans one descriptor. Connects the filled mbufs into a chain.\n+ * Does not advance the tail index.\n+ */\n+static __rte_always_inline void\n+ionic_rx_clean_one_sg(struct ionic_rx_qcq *rxq,\n+\t\tstruct ionic_rxq_comp *cq_desc,\n+\t\tstruct ionic_rx_service *rx_svc)\n+{\n+\tstruct ionic_queue *q = &rxq->qcq.q;\n+\tstruct rte_mbuf *rxm;\n+\tstruct rte_mbuf *rxm_seg, *prev_rxm;\n+\tstruct ionic_rx_stats *stats = &rxq->stats;\n+\tuint64_t pkt_flags = 0;\n+\tuint32_t pkt_type;\n+\tuint32_t left, i;\n+\tuint16_t cq_desc_len;\n+\tuint8_t ptype, cflags;\n+\tvoid **info;\n+\n+\tcq_desc_len = rte_le_to_cpu_16(cq_desc->len);\n+\n+\tinfo = IONIC_INFO_PTR(q, q->tail_idx);\n+\n+\trxm = info[0];\n+\n+\tif (cq_desc->status) {\n+\t\tstats->bad_cq_status++;\n+\t\treturn;\n+\t}\n+\n+\tif (cq_desc_len > rxq->frame_size || cq_desc_len == 0) {\n+\t\tstats->bad_len++;\n+\t\treturn;\n+\t}\n+\n+\tinfo[0] = NULL;\n+\n+\t/* Set the mbuf metadata based on the cq entry */\n+\trxm->rearm_data[0] = rxq->rearm_data;\n+\trxm->pkt_len = cq_desc_len;\n+\trxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len);\n+\tleft = cq_desc_len - rxm->data_len;\n+\trxm->nb_segs = cq_desc->num_sg_elems + 1;\n+\n+\tprev_rxm = rxm;\n+\n+\tfor (i = 1; i < rxm->nb_segs && left; i++) {\n+\t\trxm_seg = info[i];\n+\t\tinfo[i] = NULL;\n+\n+\t\t/* Set the chained mbuf metadata */\n+\t\trxm_seg->rearm_data[0] = rxq->rearm_seg_data;\n+\t\trxm_seg->data_len = RTE_MIN(rxq->seg_size, left);\n+\t\tleft -= rxm_seg->data_len;\n+\n+\t\t/* Link the mbuf */\n+\t\tprev_rxm->next = rxm_seg;\n+\t\tprev_rxm = rxm_seg;\n+\t}\n+\n+\t/* Terminate the mbuf chain */\n+\tprev_rxm->next = NULL;\n+\n+\t/* RSS */\n+\tpkt_flags |= RTE_MBUF_F_RX_RSS_HASH;\n+\trxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash);\n+\n+\t/* Vlan Strip */\n+\tif (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {\n+\t\tpkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;\n+\t\trxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci);\n+\t}\n+\n+\t/* Checksum */\n+\tif (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {\n+\t\tcflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK;\n+\t\tpkt_flags |= ionic_csum_flags[cflags];\n+\t}\n+\n+\trxm->ol_flags = pkt_flags;\n+\n+\t/* Packet Type */\n+\tptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK;\n+\tpkt_type = ionic_ptype_table[ptype];\n+\tif (pkt_type == RTE_PTYPE_UNKNOWN) {\n+\t\tstruct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,\n+\t\t\t\tstruct rte_ether_hdr *);\n+\t\tuint16_t ether_type = eth_h->ether_type;\n+\t\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))\n+\t\t\tpkt_type = RTE_PTYPE_L2_ETHER_ARP;\n+\t\telse if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP))\n+\t\t\tpkt_type = RTE_PTYPE_L2_ETHER_LLDP;\n+\t\telse if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588))\n+\t\t\tpkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC;\n+\t\tstats->mtods++;\n+\t} else if (pkt_flags & RTE_MBUF_F_RX_VLAN) {\n+\t\tpkt_type |= RTE_PTYPE_L2_ETHER_VLAN;\n+\t} else {\n+\t\tpkt_type |= RTE_PTYPE_L2_ETHER;\n+\t}\n+\n+\trxm->packet_type = pkt_type;\n+\n+\trx_svc->rx_pkts[rx_svc->nb_rx] = rxm;\n+\trx_svc->nb_rx++;\n+\n+\tstats->packets++;\n+\tstats->bytes += rxm->pkt_len;\n+}\n+\n+/*\n+ * Fills one descriptor with mbufs. Does not advance the head index.\n+ */\n+static __rte_always_inline int\n+ionic_rx_fill_one_sg(struct ionic_rx_qcq *rxq)\n+{\n+\tstruct ionic_queue *q = &rxq->qcq.q;\n+\tstruct rte_mbuf *rxm;\n+\tstruct rte_mbuf *rxm_seg;\n+\tstruct ionic_rxq_desc *desc, *desc_base = q->base;\n+\tstruct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;\n+\trte_iova_t data_iova;\n+\tuint32_t i;\n+\tvoid **info;\n+\tint ret;\n+\n+\tinfo = IONIC_INFO_PTR(q, q->head_idx);\n+\tdesc = &desc_base[q->head_idx];\n+\tsg_desc = &sg_desc_base[q->head_idx];\n+\n+\t/* mbuf is unused => whole chain is unused */\n+\tif (info[0])\n+\t\treturn 0;\n+\n+\tif (rxq->mb_idx == 0) {\n+\t\tret = rte_mempool_get_bulk(rxq->mb_pool,\n+\t\t\t\t\t(void **)rxq->mbs,\n+\t\t\t\t\tIONIC_MBUF_BULK_ALLOC);\n+\t\tif (ret) {\n+\t\t\tassert(0);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\trxq->mb_idx = IONIC_MBUF_BULK_ALLOC;\n+\t}\n+\n+\trxm = rxq->mbs[--rxq->mb_idx];\n+\tinfo[0] = rxm;\n+\n+\tdata_iova = rte_mbuf_data_iova_default(rxm);\n+\tdesc->addr = rte_cpu_to_le_64(data_iova);\n+\n+\tfor (i = 1; i < q->num_segs; i++) {\n+\t\t/* mbuf is unused => rest of the chain is unused */\n+\t\tif (info[i])\n+\t\t\treturn 0;\n+\n+\t\tif (rxq->mb_idx == 0) {\n+\t\t\tret = rte_mempool_get_bulk(rxq->mb_pool,\n+\t\t\t\t\t(void **)rxq->mbs,\n+\t\t\t\t\tIONIC_MBUF_BULK_ALLOC);\n+\t\t\tif (ret) {\n+\t\t\t\tassert(0);\n+\t\t\t\treturn -ENOMEM;\n+\t\t\t}\n+\n+\t\t\trxq->mb_idx = IONIC_MBUF_BULK_ALLOC;\n+\t\t}\n+\n+\t\trxm_seg = rxq->mbs[--rxq->mb_idx];\n+\t\tinfo[i] = rxm_seg;\n+\n+\t\t/* The data_off does not get set to 0 until later */\n+\t\tdata_iova = rxm_seg->buf_iova;\n+\t\tsg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * Walk the CQ to find completed receive descriptors.\n+ * Any completed descriptor found is refilled.\n+ */\n+static __rte_always_inline void\n+ionic_rxq_service_sg(struct ionic_rx_qcq *rxq, uint32_t work_to_do,\n+\t\tstruct ionic_rx_service *rx_svc)\n+{\n+\tstruct ionic_cq *cq = &rxq->qcq.cq;\n+\tstruct ionic_queue *q = &rxq->qcq.q;\n+\tstruct ionic_rxq_desc *q_desc_base = q->base;\n+\tstruct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;\n+\tuint32_t work_done = 0;\n+\n+\tcq_desc = &cq_desc_base[cq->tail_idx];\n+\n+\twhile (color_match(cq_desc->pkt_type_color, cq->done_color)) {\n+\t\tcq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);\n+\t\tif (cq->tail_idx == 0)\n+\t\t\tcq->done_color = !cq->done_color;\n+\n+\t\t/* Prefetch 8 x 8B bufinfo */\n+\t\trte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8)));\n+\t\t/* Prefetch 4 x 16B comp */\n+\t\trte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);\n+\t\t/* Prefetch 4 x 16B descriptors */\n+\t\tif (!(rxq->flags & IONIC_QCQ_F_CMB))\n+\t\t\trte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]);\n+\n+\t\t/* Clean one descriptor */\n+\t\tionic_rx_clean_one_sg(rxq, cq_desc, rx_svc);\n+\t\tq->tail_idx = Q_NEXT_TO_SRVC(q, 1);\n+\n+\t\t/* Fill one descriptor */\n+\t\t(void)ionic_rx_fill_one_sg(rxq);\n+\n+\t\tq->head_idx = Q_NEXT_TO_POST(q, 1);\n+\n+\t\tif (++work_done == work_to_do)\n+\t\t\tbreak;\n+\n+\t\tcq_desc = &cq_desc_base[cq->tail_idx];\n+\t}\n+\n+\t/* Update the queue indices and ring the doorbell */\n+\tif (work_done)\n+\t\tionic_q_flush(q);\n+}\n+\n+uint16_t\n+ionic_recv_pkts_sg(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tstruct ionic_rx_qcq *rxq = rx_queue;\n+\tstruct ionic_rx_service rx_svc;\n+\n+\trx_svc.rx_pkts = rx_pkts;\n+\trx_svc.nb_rx = 0;\n+\n+\tionic_rxq_service_sg(rxq, nb_pkts, &rx_svc);\n+\n+\treturn rx_svc.nb_rx;\n+}\n+\n+/*\n+ * Fills all descriptors with mbufs.\n+ */\n+int __rte_cold\n+ionic_rx_fill_sg(struct ionic_rx_qcq *rxq)\n+{\n+\tstruct ionic_queue *q = &rxq->qcq.q;\n+\tuint32_t i;\n+\tint err = 0;\n+\n+\tfor (i = 0; i < q->num_descs - 1u; i++) {\n+\t\terr = ionic_rx_fill_one_sg(rxq);\n+\t\tif (err)\n+\t\t\tbreak;\n+\n+\t\tq->head_idx = Q_NEXT_TO_POST(q, 1);\n+\t}\n+\n+\tionic_q_flush(q);\n+\n+\treturn err;\n+}\ndiff --git a/drivers/net/ionic/ionic_rxtx_simple.c b/drivers/net/ionic/ionic_rxtx_simple.c\nnew file mode 100644\nindex 0000000000..fe10e2624e\n--- /dev/null\n+++ b/drivers/net/ionic/ionic_rxtx_simple.c\n@@ -0,0 +1,417 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2018-2022 Advanced Micro Devices, Inc. All Rights Reserved.\n+ */\n+\n+#include <stdio.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <assert.h>\n+\n+#include <rte_common.h>\n+#include <rte_byteorder.h>\n+#include <rte_atomic.h>\n+#include <rte_mempool.h>\n+#include <rte_mbuf.h>\n+#include <rte_ether.h>\n+#include <rte_prefetch.h>\n+\n+#include \"ionic.h\"\n+#include \"ionic_if.h\"\n+#include \"ionic_dev.h\"\n+#include \"ionic_lif.h\"\n+#include \"ionic_rxtx.h\"\n+\n+static __rte_always_inline void\n+ionic_tx_flush(struct ionic_tx_qcq *txq)\n+{\n+\tstruct ionic_cq *cq = &txq->qcq.cq;\n+\tstruct ionic_queue *q = &txq->qcq.q;\n+\tstruct rte_mbuf *txm;\n+\tstruct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base;\n+\tvoid **info;\n+\n+\tcq_desc = &cq_desc_base[cq->tail_idx];\n+\n+\twhile (color_match(cq_desc->color, cq->done_color)) {\n+\t\tcq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);\n+\t\tif (cq->tail_idx == 0)\n+\t\t\tcq->done_color = !cq->done_color;\n+\n+\t\t/* Prefetch 4 x 16B comp at cq->tail_idx + 4 */\n+\t\tif ((cq->tail_idx & 0x3) == 0)\n+\t\t\trte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);\n+\n+\t\twhile (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) {\n+\t\t\t/* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */\n+\t\t\trte_prefetch0(&q->info[Q_NEXT_TO_SRVC(q, 2)]);\n+\n+\t\t\t/* Prefetch next mbuf */\n+\t\t\tvoid **next_info =\n+\t\t\t\t&q->info[Q_NEXT_TO_SRVC(q, 1)];\n+\t\t\tif (next_info[0])\n+\t\t\t\trte_mbuf_prefetch_part2(next_info[0]);\n+\n+\t\t\tinfo = &q->info[q->tail_idx];\n+\t\t\t{\n+\t\t\t\ttxm = info[0];\n+\n+\t\t\t\tif (txq->flags & IONIC_QCQ_F_FAST_FREE)\n+\t\t\t\t\trte_mempool_put(txm->pool, txm);\n+\t\t\t\telse\n+\t\t\t\t\trte_pktmbuf_free_seg(txm);\n+\n+\t\t\t\tinfo[0] = NULL;\n+\t\t\t}\n+\n+\t\t\tq->tail_idx = Q_NEXT_TO_SRVC(q, 1);\n+\t\t}\n+\n+\t\tcq_desc = &cq_desc_base[cq->tail_idx];\n+\t}\n+}\n+\n+static __rte_always_inline int\n+ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)\n+{\n+\tstruct ionic_queue *q = &txq->qcq.q;\n+\tstruct ionic_txq_desc *desc, *desc_base = q->base;\n+\tstruct ionic_tx_stats *stats = &txq->stats;\n+\tvoid **info;\n+\tuint64_t ol_flags = txm->ol_flags;\n+\tuint64_t addr, cmd;\n+\tuint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;\n+\tuint8_t flags = 0;\n+\n+\tif (txm->nb_segs > 1)\n+\t\treturn -EINVAL;\n+\n+\tdesc = &desc_base[q->head_idx];\n+\tinfo = &q->info[q->head_idx];\n+\n+\tif ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&\n+\t    (txq->flags & IONIC_QCQ_F_CSUM_L3)) {\n+\t\topcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;\n+\t\tflags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;\n+\t}\n+\n+\tif (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&\n+\t     (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||\n+\t    ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&\n+\t     (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {\n+\t\topcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;\n+\t\tflags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;\n+\t}\n+\n+\tif (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)\n+\t\tstats->no_csum++;\n+\n+\tif (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||\n+\t     (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&\n+\t    ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||\n+\t     (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) {\n+\t\tflags |= IONIC_TXQ_DESC_FLAG_ENCAP;\n+\t}\n+\n+\tif (ol_flags & RTE_MBUF_F_TX_VLAN) {\n+\t\tflags |= IONIC_TXQ_DESC_FLAG_VLAN;\n+\t\tdesc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci);\n+\t}\n+\n+\taddr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));\n+\n+\tcmd = encode_txq_desc_cmd(opcode, flags, 0, addr);\n+\tdesc->cmd = rte_cpu_to_le_64(cmd);\n+\tdesc->len = rte_cpu_to_le_16(txm->data_len);\n+\n+\tinfo[0] = txm;\n+\n+\tq->head_idx = Q_NEXT_TO_POST(q, 1);\n+\n+\treturn 0;\n+}\n+\n+uint16_t\n+ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tstruct ionic_tx_qcq *txq = tx_queue;\n+\tstruct ionic_queue *q = &txq->qcq.q;\n+\tstruct ionic_tx_stats *stats = &txq->stats;\n+\tstruct rte_mbuf *mbuf;\n+\tuint32_t bytes_tx = 0;\n+\tuint16_t nb_avail, nb_tx = 0;\n+\tint err;\n+\n+\tstruct ionic_txq_desc *desc_base = q->base;\n+\tif (!(txq->flags & IONIC_QCQ_F_CMB))\n+\t\trte_prefetch0(&desc_base[q->head_idx]);\n+\trte_prefetch0(&q->info[q->head_idx]);\n+\n+\tif (tx_pkts) {\n+\t\trte_mbuf_prefetch_part1(tx_pkts[0]);\n+\t\trte_mbuf_prefetch_part2(tx_pkts[0]);\n+\t}\n+\n+\tif (ionic_q_space_avail(q) < txq->free_thresh) {\n+\t\t/* Cleaning old buffers */\n+\t\tionic_tx_flush(txq);\n+\t}\n+\n+\tnb_avail = ionic_q_space_avail(q);\n+\tif (nb_avail < nb_pkts) {\n+\t\tstats->stop += nb_pkts - nb_avail;\n+\t\tnb_pkts = nb_avail;\n+\t}\n+\n+\twhile (nb_tx < nb_pkts) {\n+\t\tuint16_t next_idx = Q_NEXT_TO_POST(q, 1);\n+\t\tif (!(txq->flags & IONIC_QCQ_F_CMB))\n+\t\t\trte_prefetch0(&desc_base[next_idx]);\n+\t\trte_prefetch0(&q->info[next_idx]);\n+\n+\t\tif (nb_tx + 1 < nb_pkts) {\n+\t\t\trte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]);\n+\t\t\trte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]);\n+\t\t}\n+\n+\t\tmbuf = tx_pkts[nb_tx];\n+\n+\t\tif (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)\n+\t\t\terr = ionic_tx_tso(txq, mbuf);\n+\t\telse\n+\t\t\terr = ionic_tx(txq, mbuf);\n+\t\tif (err) {\n+\t\t\tstats->drop += nb_pkts - nb_tx;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tbytes_tx += mbuf->pkt_len;\n+\t\tnb_tx++;\n+\t}\n+\n+\tif (nb_tx > 0) {\n+\t\trte_wmb();\n+\t\tionic_q_flush(q);\n+\n+\t\tstats->packets += nb_tx;\n+\t\tstats->bytes += bytes_tx;\n+\t}\n+\n+\treturn nb_tx;\n+}\n+\n+/*\n+ * Cleans one descriptor. Connects the filled mbufs into a chain.\n+ * Does not advance the tail index.\n+ */\n+static __rte_always_inline void\n+ionic_rx_clean_one(struct ionic_rx_qcq *rxq,\n+\t\tstruct ionic_rxq_comp *cq_desc,\n+\t\tstruct ionic_rx_service *rx_svc)\n+{\n+\tstruct ionic_queue *q = &rxq->qcq.q;\n+\tstruct rte_mbuf *rxm;\n+\tstruct ionic_rx_stats *stats = &rxq->stats;\n+\tuint64_t pkt_flags = 0;\n+\tuint32_t pkt_type;\n+\tuint16_t cq_desc_len;\n+\tuint8_t ptype, cflags;\n+\tvoid **info;\n+\n+\tcq_desc_len = rte_le_to_cpu_16(cq_desc->len);\n+\n+\tinfo = &q->info[q->tail_idx];\n+\n+\trxm = info[0];\n+\n+\tif (cq_desc->status) {\n+\t\tstats->bad_cq_status++;\n+\t\treturn;\n+\t}\n+\n+\tif (cq_desc_len > rxq->frame_size || cq_desc_len == 0) {\n+\t\tstats->bad_len++;\n+\t\treturn;\n+\t}\n+\n+\tinfo[0] = NULL;\n+\n+\t/* Set the mbuf metadata based on the cq entry */\n+\trxm->rearm_data[0] = rxq->rearm_data;\n+\trxm->pkt_len = cq_desc_len;\n+\trxm->data_len = cq_desc_len;\n+\n+\t/* RSS */\n+\tpkt_flags |= RTE_MBUF_F_RX_RSS_HASH;\n+\trxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash);\n+\n+\t/* Vlan Strip */\n+\tif (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {\n+\t\tpkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;\n+\t\trxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci);\n+\t}\n+\n+\t/* Checksum */\n+\tif (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {\n+\t\tcflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK;\n+\t\tpkt_flags |= ionic_csum_flags[cflags];\n+\t}\n+\n+\trxm->ol_flags = pkt_flags;\n+\n+\t/* Packet Type */\n+\tptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK;\n+\tpkt_type = ionic_ptype_table[ptype];\n+\tif (pkt_type == RTE_PTYPE_UNKNOWN) {\n+\t\tstruct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,\n+\t\t\t\tstruct rte_ether_hdr *);\n+\t\tuint16_t ether_type = eth_h->ether_type;\n+\t\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))\n+\t\t\tpkt_type = RTE_PTYPE_L2_ETHER_ARP;\n+\t\telse if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP))\n+\t\t\tpkt_type = RTE_PTYPE_L2_ETHER_LLDP;\n+\t\telse if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588))\n+\t\t\tpkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC;\n+\t\tstats->mtods++;\n+\t} else if (pkt_flags & RTE_MBUF_F_RX_VLAN) {\n+\t\tpkt_type |= RTE_PTYPE_L2_ETHER_VLAN;\n+\t} else {\n+\t\tpkt_type |= RTE_PTYPE_L2_ETHER;\n+\t}\n+\n+\trxm->packet_type = pkt_type;\n+\n+\trx_svc->rx_pkts[rx_svc->nb_rx] = rxm;\n+\trx_svc->nb_rx++;\n+\n+\tstats->packets++;\n+\tstats->bytes += rxm->pkt_len;\n+}\n+\n+/*\n+ * Fills one descriptor with mbufs. Does not advance the head index.\n+ */\n+static __rte_always_inline int\n+ionic_rx_fill_one(struct ionic_rx_qcq *rxq)\n+{\n+\tstruct ionic_queue *q = &rxq->qcq.q;\n+\tstruct rte_mbuf *rxm;\n+\tstruct ionic_rxq_desc *desc, *desc_base = q->base;\n+\trte_iova_t data_iova;\n+\tvoid **info;\n+\tint ret;\n+\n+\tinfo = &q->info[q->head_idx];\n+\tdesc = &desc_base[q->head_idx];\n+\n+\t/* mbuf is unused */\n+\tif (info[0])\n+\t\treturn 0;\n+\n+\tif (rxq->mb_idx == 0) {\n+\t\tret = rte_mempool_get_bulk(rxq->mb_pool,\n+\t\t\t\t\t(void **)rxq->mbs,\n+\t\t\t\t\tIONIC_MBUF_BULK_ALLOC);\n+\t\tif (ret) {\n+\t\t\tassert(0);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\trxq->mb_idx = IONIC_MBUF_BULK_ALLOC;\n+\t}\n+\n+\trxm = rxq->mbs[--rxq->mb_idx];\n+\tinfo[0] = rxm;\n+\n+\tdata_iova = rte_mbuf_data_iova_default(rxm);\n+\tdesc->addr = rte_cpu_to_le_64(data_iova);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * Walk the CQ to find completed receive descriptors.\n+ * Any completed descriptor found is refilled.\n+ */\n+static __rte_always_inline void\n+ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do,\n+\t\tstruct ionic_rx_service *rx_svc)\n+{\n+\tstruct ionic_cq *cq = &rxq->qcq.cq;\n+\tstruct ionic_queue *q = &rxq->qcq.q;\n+\tstruct ionic_rxq_desc *q_desc_base = q->base;\n+\tstruct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;\n+\tuint32_t work_done = 0;\n+\n+\tcq_desc = &cq_desc_base[cq->tail_idx];\n+\n+\twhile (color_match(cq_desc->pkt_type_color, cq->done_color)) {\n+\t\tcq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);\n+\t\tif (cq->tail_idx == 0)\n+\t\t\tcq->done_color = !cq->done_color;\n+\n+\t\t/* Prefetch 8 x 8B bufinfo */\n+\t\trte_prefetch0(&q->info[Q_NEXT_TO_SRVC(q, 8)]);\n+\t\t/* Prefetch 4 x 16B comp */\n+\t\trte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);\n+\t\t/* Prefetch 4 x 16B descriptors */\n+\t\tif (!(rxq->flags & IONIC_QCQ_F_CMB))\n+\t\t\trte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]);\n+\n+\t\t/* Clean one descriptor */\n+\t\tionic_rx_clean_one(rxq, cq_desc, rx_svc);\n+\t\tq->tail_idx = Q_NEXT_TO_SRVC(q, 1);\n+\n+\t\t/* Fill one descriptor */\n+\t\t(void)ionic_rx_fill_one(rxq);\n+\n+\t\tq->head_idx = Q_NEXT_TO_POST(q, 1);\n+\n+\t\tif (++work_done == work_to_do)\n+\t\t\tbreak;\n+\n+\t\tcq_desc = &cq_desc_base[cq->tail_idx];\n+\t}\n+\n+\t/* Update the queue indices and ring the doorbell */\n+\tif (work_done)\n+\t\tionic_q_flush(q);\n+}\n+\n+uint16_t\n+ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tstruct ionic_rx_qcq *rxq = rx_queue;\n+\tstruct ionic_rx_service rx_svc;\n+\n+\trx_svc.rx_pkts = rx_pkts;\n+\trx_svc.nb_rx = 0;\n+\n+\tionic_rxq_service(rxq, nb_pkts, &rx_svc);\n+\n+\treturn rx_svc.nb_rx;\n+}\n+\n+/*\n+ * Fills all descriptors with mbufs.\n+ */\n+int __rte_cold\n+ionic_rx_fill(struct ionic_rx_qcq *rxq)\n+{\n+\tstruct ionic_queue *q = &rxq->qcq.q;\n+\tuint32_t i;\n+\tint err = 0;\n+\n+\tfor (i = 0; i < q->num_descs - 1u; i++) {\n+\t\terr = ionic_rx_fill_one(rxq);\n+\t\tif (err)\n+\t\t\tbreak;\n+\n+\t\tq->head_idx = Q_NEXT_TO_POST(q, 1);\n+\t}\n+\n+\tionic_q_flush(q);\n+\n+\treturn err;\n+}\ndiff --git a/drivers/net/ionic/meson.build b/drivers/net/ionic/meson.build\nindex 2869e0027c..629e6a037d 100644\n--- a/drivers/net/ionic/meson.build\n+++ b/drivers/net/ionic/meson.build\n@@ -16,4 +16,6 @@ sources = files(\n         'ionic_main.c',\n         'ionic_rx_filter.c',\n         'ionic_rxtx.c',\n+        'ionic_rxtx_simple.c',\n+        'ionic_rxtx_sg.c',\n )\n",
    "prefixes": [
        "32/35"
    ]
}