get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/63690/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 63690,
    "url": "http://patches.dpdk.org/api/patches/63690/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20191209214656.27347-14-cardigliano@ntop.org/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20191209214656.27347-14-cardigliano@ntop.org>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20191209214656.27347-14-cardigliano@ntop.org",
    "date": "2019-12-09T21:46:52",
    "name": "[v3,13/17] net/ionic: add RX and TX handling",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "676875ab977fe0ef51fa0b13ae2918f5618f1255",
    "submitter": {
        "id": 1465,
        "url": "http://patches.dpdk.org/api/people/1465/?format=api",
        "name": "Alfredo Cardigliano",
        "email": "cardigliano@ntop.org"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20191209214656.27347-14-cardigliano@ntop.org/mbox/",
    "series": [
        {
            "id": 7760,
            "url": "http://patches.dpdk.org/api/series/7760/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=7760",
            "date": "2019-12-09T21:46:39",
            "name": "Introduces net/ionic PMD",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/7760/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/63690/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/63690/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 00498A04B3;\n\tMon,  9 Dec 2019 22:51:01 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id DC7A81BFBD;\n\tMon,  9 Dec 2019 22:48:59 +0100 (CET)",
            "from mail.ntop.org (mail-digitalocean.ntop.org [167.99.215.164])\n by dpdk.org (Postfix) with ESMTP id A4F091BDFD\n for <dev@dpdk.org>; Mon,  9 Dec 2019 22:48:34 +0100 (CET)",
            "from devele.ntop.org (net-93-145-196-230.cust.vodafonedsl.it\n [93.145.196.230])\n by mail.ntop.org (Postfix) with ESMTPSA id 4F6E041BA8;\n Mon,  9 Dec 2019 22:48:34 +0100 (CET)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple; d=ntop.org; s=mail;\n t=1575928114; bh=c+TQu0k0j5AAR/PHH0FfbL4vVNRUpzvfzw3MWgimGF0=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n b=pO/xxGngpEWnxEjeFfuvoLjxKNq0iA8S437yF5qPUK4RKMR/8HdrqAN29lgGpiW3H\n jxqVjdRkmx7vrcYBCawSzKIpjtm/rhJ1kRrh+oQv4haY2amEAJIfdCzxm1vNVSWICc\n lAf2jX74kdRYYelSKwvwKzXzTxzMB3bH2U0+Svxg=",
        "From": "Alfredo Cardigliano <cardigliano@ntop.org>",
        "To": "Alfredo Cardigliano <cardigliano@ntop.org>,\n John McNamara <john.mcnamara@intel.com>,\n Marko Kovacevic <marko.kovacevic@intel.com>",
        "Cc": "dev@dpdk.org",
        "Date": "Mon,  9 Dec 2019 22:46:52 +0100",
        "Message-Id": "<20191209214656.27347-14-cardigliano@ntop.org>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20191209214656.27347-1-cardigliano@ntop.org>",
        "References": "<20191209214656.27347-1-cardigliano@ntop.org>",
        "Subject": "[dpdk-dev] [PATCH v3 13/17] net/ionic: add RX and TX handling",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add RX and TX queues setup and handling.\n\nSigned-off-by: Alfredo Cardigliano <cardigliano@ntop.org>\nReviewed-by: Shannon Nelson <snelson@pensando.io>\n---\n doc/guides/nics/features/ionic.ini |  11 +\n drivers/net/ionic/Makefile         |   1 +\n drivers/net/ionic/ionic_dev.h      |   1 +\n drivers/net/ionic/ionic_ethdev.c   | 115 ++++\n drivers/net/ionic/ionic_lif.c      | 222 ++++++-\n drivers/net/ionic/ionic_lif.h      |  44 ++\n drivers/net/ionic/ionic_rxtx.c     | 999 +++++++++++++++++++++++++++++\n drivers/net/ionic/ionic_rxtx.h     |  44 ++\n drivers/net/ionic/meson.build      |   1 +\n 9 files changed, 1437 insertions(+), 1 deletion(-)\n create mode 100644 drivers/net/ionic/ionic_rxtx.c\n create mode 100644 drivers/net/ionic/ionic_rxtx.h",
    "diff": "diff --git a/doc/guides/nics/features/ionic.ini b/doc/guides/nics/features/ionic.ini\nindex 05bdb2d98..1308bf321 100644\n--- a/doc/guides/nics/features/ionic.ini\n+++ b/doc/guides/nics/features/ionic.ini\n@@ -7,12 +7,23 @@\n Speed capabilities   = Y\n Link status          = Y\n Link status event    = Y\n+Queue start/stop     = Y\n MTU update           = Y\n+Jumbo frame          = Y\n+Scattered Rx         = Y\n+LRO                  = Y\n+TSO                  = Y\n Promiscuous mode     = Y\n Allmulticast mode    = Y\n Unicast MAC filter   = Y\n VLAN filter          = Y\n+VLAN offload         = Y\n Flow control         = Y\n+CRC offload          = Y\n+VLAN offload         = Y\n+L3 checksum offload  = Y\n+L4 checksum offload  = Y\n+Packet type parsing  = Y\n Linux UIO            = Y\n Linux VFIO           = Y\n x86-64               = Y\ndiff --git a/drivers/net/ionic/Makefile b/drivers/net/ionic/Makefile\nindex 3291190cd..08b3abe32 100644\n--- a/drivers/net/ionic/Makefile\n+++ b/drivers/net/ionic/Makefile\n@@ -38,6 +38,7 @@ endif\n #\n SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_mac_api.c\n SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_rx_filter.c\n+SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_rxtx.c\n SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_dev.c\n SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_ethdev.c\n SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_lif.c\ndiff --git a/drivers/net/ionic/ionic_dev.h b/drivers/net/ionic/ionic_dev.h\nindex 61576621b..8c1ec13a6 100644\n--- a/drivers/net/ionic/ionic_dev.h\n+++ b/drivers/net/ionic/ionic_dev.h\n@@ -14,6 +14,7 @@\n \n #define IONIC_MAX_RING_DESC\t\t32768\n #define IONIC_MIN_RING_DESC\t\t16\n+#define IONIC_DEF_TXRX_DESC\t\t4096\n \n #define IONIC_LIFS_MAX\t\t\t1024\n \ndiff --git a/drivers/net/ionic/ionic_ethdev.c b/drivers/net/ionic/ionic_ethdev.c\nindex 9872740b7..00c33f35b 100644\n--- a/drivers/net/ionic/ionic_ethdev.c\n+++ b/drivers/net/ionic/ionic_ethdev.c\n@@ -15,6 +15,7 @@\n #include \"ionic_mac_api.h\"\n #include \"ionic_lif.h\"\n #include \"ionic_ethdev.h\"\n+#include \"ionic_rxtx.h\"\n \n static int  eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void *init_params);\n static int  eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev);\n@@ -33,6 +34,7 @@ static int  ionic_flow_ctrl_get(struct rte_eth_dev *eth_dev,\n \tstruct rte_eth_fc_conf *fc_conf);\n static int  ionic_flow_ctrl_set(struct rte_eth_dev *eth_dev,\n \tstruct rte_eth_fc_conf *fc_conf);\n+static int  ionic_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask);\n \n int ionic_logtype_driver;\n \n@@ -43,6 +45,20 @@ static const struct rte_pci_id pci_id_ionic_map[] = {\n \t{ .vendor_id = 0, /* sentinel */ },\n };\n \n+static const struct rte_eth_desc_lim rx_desc_lim = {\n+\t.nb_max = IONIC_MAX_RING_DESC,\n+\t.nb_min = IONIC_MIN_RING_DESC,\n+\t.nb_align = 1,\n+};\n+\n+static const struct rte_eth_desc_lim tx_desc_lim = {\n+\t.nb_max = IONIC_MAX_RING_DESC,\n+\t.nb_min = IONIC_MIN_RING_DESC,\n+\t.nb_align = 1,\n+\t.nb_seg_max = IONIC_TX_MAX_SG_ELEMS,\n+\t.nb_mtu_seg_max = IONIC_TX_MAX_SG_ELEMS,\n+};\n+\n static const struct eth_dev_ops ionic_eth_dev_ops = {\n \t.dev_infos_get          = ionic_dev_info_get,\n \t.dev_configure          = ionic_dev_configure,\n@@ -63,6 +79,17 @@ static const struct eth_dev_ops ionic_eth_dev_ops = {\n \t.allmulticast_disable   = ionic_dev_allmulticast_disable,\n \t.flow_ctrl_get          = ionic_flow_ctrl_get,\n \t.flow_ctrl_set          = ionic_flow_ctrl_set,\n+\t.rxq_info_get           = ionic_rxq_info_get,\n+\t.txq_info_get           = ionic_txq_info_get,\n+\t.rx_queue_setup         = ionic_dev_rx_queue_setup,\n+\t.rx_queue_release       = ionic_dev_rx_queue_release,\n+\t.rx_queue_start\t        = ionic_dev_rx_queue_start,\n+\t.rx_queue_stop          = ionic_dev_rx_queue_stop,\n+\t.tx_queue_setup         = ionic_dev_tx_queue_setup,\n+\t.tx_queue_release       = ionic_dev_tx_queue_release,\n+\t.tx_queue_start\t        = ionic_dev_tx_queue_start,\n+\t.tx_queue_stop          = ionic_dev_tx_queue_stop,\n+\t.vlan_offload_set       = ionic_vlan_offload_set,\n };\n \n /*\n@@ -249,6 +276,50 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,\n \t\tETH_LINK_SPEED_50G |\n \t\tETH_LINK_SPEED_100G;\n \n+\t/*\n+\t * Per-queue capabilities. Actually most of the offloads are enabled\n+\t * by default on the port and can be used on selected queues (by adding\n+\t * packet flags at runtime when required)\n+\t */\n+\n+\tdev_info->rx_queue_offload_capa =\n+\t\tDEV_RX_OFFLOAD_IPV4_CKSUM |\n+\t\tDEV_RX_OFFLOAD_UDP_CKSUM |\n+\t\tDEV_RX_OFFLOAD_TCP_CKSUM |\n+\t\t0;\n+\n+\tdev_info->tx_queue_offload_capa =\n+\t\tDEV_TX_OFFLOAD_VLAN_INSERT |\n+\t\t0;\n+\n+\t/*\n+\t * Per-port capabilities\n+\t * See ionic_set_features to request and check supported features\n+\t */\n+\n+\tdev_info->rx_offload_capa = dev_info->rx_queue_offload_capa |\n+\t\tDEV_RX_OFFLOAD_JUMBO_FRAME |\n+\t\tDEV_RX_OFFLOAD_VLAN_FILTER |\n+\t\tDEV_RX_OFFLOAD_VLAN_STRIP |\n+\t\tDEV_RX_OFFLOAD_SCATTER |\n+\t\t0;\n+\n+\tdev_info->tx_offload_capa = dev_info->tx_queue_offload_capa |\n+\t\tDEV_TX_OFFLOAD_MULTI_SEGS |\n+\t\tDEV_TX_OFFLOAD_TCP_TSO |\n+\t\t0;\n+\n+\tdev_info->rx_desc_lim = rx_desc_lim;\n+\tdev_info->tx_desc_lim = tx_desc_lim;\n+\n+\t/* Driver-preferred Rx/Tx parameters */\n+\tdev_info->default_rxportconf.burst_size = 32;\n+\tdev_info->default_txportconf.burst_size = 32;\n+\tdev_info->default_rxportconf.nb_queues = 1;\n+\tdev_info->default_txportconf.nb_queues = 1;\n+\tdev_info->default_rxportconf.ring_size = IONIC_DEF_TXRX_DESC;\n+\tdev_info->default_txportconf.ring_size = IONIC_DEF_TXRX_DESC;\n+\n \treturn 0;\n }\n \n@@ -302,6 +373,44 @@ ionic_flow_ctrl_set(struct rte_eth_dev *eth_dev,\n \treturn 0;\n }\n \n+static int\n+ionic_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)\n+{\n+\tstruct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);\n+\tstruct rte_eth_rxmode *rxmode;\n+\trxmode = &eth_dev->data->dev_conf.rxmode;\n+\tint i;\n+\n+\tif (mask & ETH_VLAN_STRIP_MASK) {\n+\t\tif (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {\n+\t\t\tfor (i = 0; i < eth_dev->data->nb_rx_queues; i++) {\n+\t\t\t\tstruct ionic_qcq *rxq =\n+\t\t\t\t\teth_dev->data->rx_queues[i];\n+\t\t\t\trxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;\n+\t\t\t}\n+\t\t\tlif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;\n+\t\t} else {\n+\t\t\tfor (i = 0; i < eth_dev->data->nb_rx_queues; i++) {\n+\t\t\t\tstruct ionic_qcq *rxq =\n+\t\t\t\t\teth_dev->data->rx_queues[i];\n+\t\t\t\trxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;\n+\t\t\t}\n+\t\t\tlif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;\n+\t\t}\n+\t}\n+\n+\tif (mask & ETH_VLAN_FILTER_MASK) {\n+\t\tif (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)\n+\t\t\tlif->features |= IONIC_ETH_HW_VLAN_RX_FILTER;\n+\t\telse\n+\t\t\tlif->features &= ~IONIC_ETH_HW_VLAN_RX_FILTER;\n+\t}\n+\n+\tionic_lif_set_features(lif);\n+\n+\treturn 0;\n+}\n+\n static int\n ionic_dev_configure(struct rte_eth_dev *eth_dev)\n {\n@@ -439,6 +548,9 @@ eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void *init_params)\n \tIONIC_PRINT_CALL();\n \n \teth_dev->dev_ops = &ionic_eth_dev_ops;\n+\teth_dev->rx_pkt_burst = &ionic_recv_pkts;\n+\teth_dev->tx_pkt_burst = &ionic_xmit_pkts;\n+\teth_dev->tx_pkt_prepare = &ionic_prep_pkts;\n \n \t/* Multi-process not supported, primary does initialization anyway */\n \tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n@@ -510,6 +622,9 @@ eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev)\n \tionic_lif_free(lif);\n \n \teth_dev->dev_ops = NULL;\n+\teth_dev->rx_pkt_burst = NULL;\n+\teth_dev->tx_pkt_burst = NULL;\n+\teth_dev->tx_pkt_prepare = NULL;\n \n \treturn 0;\n }\ndiff --git a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c\nindex 564c57744..88da803dc 100644\n--- a/drivers/net/ionic/ionic_lif.c\n+++ b/drivers/net/ionic/ionic_lif.c\n@@ -10,6 +10,7 @@\n #include \"ionic_lif.h\"\n #include \"ionic_ethdev.h\"\n #include \"ionic_rx_filter.h\"\n+#include \"ionic_rxtx.h\"\n \n static int ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr);\n static int ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr);\n@@ -583,6 +584,52 @@ ionic_qcq_free(struct ionic_qcq *qcq)\n \trte_free(qcq);\n }\n \n+int\n+ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t index, uint16_t nrxq_descs,\n+\t\tstruct ionic_qcq **qcq)\n+{\n+\tuint32_t flags;\n+\tint err = -ENOMEM;\n+\n+\tflags = IONIC_QCQ_F_SG;\n+\terr = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, index, \"rx\", flags,\n+\t\tnrxq_descs,\n+\t\tsizeof(struct ionic_rxq_desc),\n+\t\tsizeof(struct ionic_rxq_comp),\n+\t\tsizeof(struct ionic_rxq_sg_desc),\n+\t\tlif->kern_pid, &lif->rxqcqs[index]);\n+\n+\tif (err)\n+\t\treturn err;\n+\n+\t*qcq = lif->rxqcqs[index];\n+\n+\treturn 0;\n+}\n+\n+int\n+ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t index, uint16_t ntxq_descs,\n+\t\tstruct ionic_qcq **qcq)\n+{\n+\tuint32_t flags;\n+\tint err = -ENOMEM;\n+\n+\tflags = IONIC_QCQ_F_SG;\n+\terr = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, index, \"tx\", flags,\n+\t\tntxq_descs,\n+\t\tsizeof(struct ionic_txq_desc),\n+\t\tsizeof(struct ionic_txq_comp),\n+\t\tsizeof(struct ionic_txq_sg_desc),\n+\t\tlif->kern_pid, &lif->txqcqs[index]);\n+\n+\tif (err)\n+\t\treturn err;\n+\n+\t*qcq = lif->txqcqs[index];\n+\n+\treturn 0;\n+}\n+\n static int\n ionic_admin_qcq_alloc(struct ionic_lif *lif)\n {\n@@ -662,6 +709,22 @@ ionic_lif_alloc(struct ionic_lif *lif)\n \t\treturn -ENOMEM;\n \t}\n \n+\tlif->txqcqs = rte_zmalloc(\"ionic\", sizeof(*lif->txqcqs) *\n+\t\tadapter->max_ntxqs_per_lif, 0);\n+\n+\tif (!lif->txqcqs) {\n+\t\tIONIC_PRINT(ERR, \"Cannot allocate tx queues array\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tlif->rxqcqs = rte_zmalloc(\"ionic\", sizeof(*lif->rxqcqs) *\n+\t\tadapter->max_nrxqs_per_lif, 0);\n+\n+\tif (!lif->rxqcqs) {\n+\t\tIONIC_PRINT(ERR, \"Cannot allocate rx queues array\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n \tIONIC_PRINT(DEBUG, \"Allocating Notify Queue\");\n \n \terr = ionic_notify_qcq_alloc(lif);\n@@ -714,6 +777,16 @@ ionic_lif_free(struct ionic_lif *lif)\n \t\tlif->adminqcq = NULL;\n \t}\n \n+\tif (lif->txqcqs) {\n+\t\trte_free(lif->txqcqs);\n+\t\tlif->txqcqs = NULL;\n+\t}\n+\n+\tif (lif->rxqcqs) {\n+\t\trte_free(lif->rxqcqs);\n+\t\tlif->rxqcqs = NULL;\n+\t}\n+\n \tif (lif->info) {\n \t\trte_memzone_free(lif->info_z);\n \t\tlif->info = NULL;\n@@ -735,6 +808,18 @@ ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)\n \tqcq->flags &= ~IONIC_QCQ_F_INITED;\n }\n \n+void\n+ionic_lif_txq_deinit(struct ionic_qcq *qcq)\n+{\n+\tionic_lif_qcq_deinit(qcq->lif, qcq);\n+}\n+\n+void\n+ionic_lif_rxq_deinit(struct ionic_qcq *qcq)\n+{\n+\tionic_lif_qcq_deinit(qcq->lif, qcq);\n+}\n+\n bool\n ionic_adminq_service(struct ionic_cq *cq, uint32_t cq_desc_index,\n \t\tvoid *cb_arg __rte_unused)\n@@ -1000,6 +1085,102 @@ ionic_lif_set_features(struct ionic_lif *lif)\n \treturn 0;\n }\n \n+int\n+ionic_lif_txq_init(struct ionic_qcq *qcq)\n+{\n+\tstruct ionic_queue *q = &qcq->q;\n+\tstruct ionic_lif *lif = qcq->lif;\n+\tstruct ionic_cq *cq = &qcq->cq;\n+\tstruct ionic_admin_ctx ctx = {\n+\t\t.pending_work = true,\n+\t\t.cmd.q_init = {\n+\t\t\t.opcode = IONIC_CMD_Q_INIT,\n+\t\t\t.lif_index = lif->index,\n+\t\t\t.type = q->type,\n+\t\t\t.index = q->index,\n+\t\t\t.flags = IONIC_QINIT_F_SG,\n+\t\t\t.intr_index = cq->bound_intr->index,\n+\t\t\t.pid = q->pid,\n+\t\t\t.ring_size = ilog2(q->num_descs),\n+\t\t\t.ring_base = q->base_pa,\n+\t\t\t.cq_ring_base = cq->base_pa,\n+\t\t\t.sg_ring_base = q->sg_base_pa,\n+\t\t},\n+\t};\n+\tint err;\n+\n+\tIONIC_PRINT(DEBUG, \"txq_init.pid %d\", ctx.cmd.q_init.pid);\n+\tIONIC_PRINT(DEBUG, \"txq_init.index %d\", ctx.cmd.q_init.index);\n+\tIONIC_PRINT(DEBUG, \"txq_init.ring_base 0x%lx\",\n+\t\tctx.cmd.q_init.ring_base);\n+\tIONIC_PRINT(DEBUG, \"txq_init.ring_size %d\",\n+\t\tctx.cmd.q_init.ring_size);\n+\n+\terr = ionic_adminq_post_wait(qcq->lif, &ctx);\n+\tif (err)\n+\t\treturn err;\n+\n+\tq->hw_type = ctx.comp.q_init.hw_type;\n+\tq->hw_index = ctx.comp.q_init.hw_index;\n+\tq->db = ionic_db_map(lif, q);\n+\n+\tIONIC_PRINT(DEBUG, \"txq->hw_type %d\", q->hw_type);\n+\tIONIC_PRINT(DEBUG, \"txq->hw_index %d\", q->hw_index);\n+\tIONIC_PRINT(DEBUG, \"txq->db %p\", q->db);\n+\n+\tqcq->flags |= IONIC_QCQ_F_INITED;\n+\n+\treturn 0;\n+}\n+\n+int\n+ionic_lif_rxq_init(struct ionic_qcq *qcq)\n+{\n+\tstruct ionic_queue *q = &qcq->q;\n+\tstruct ionic_lif *lif = qcq->lif;\n+\tstruct ionic_cq *cq = &qcq->cq;\n+\tstruct ionic_admin_ctx ctx = {\n+\t\t.pending_work = true,\n+\t\t.cmd.q_init = {\n+\t\t\t.opcode = IONIC_CMD_Q_INIT,\n+\t\t\t.lif_index = lif->index,\n+\t\t\t.type = q->type,\n+\t\t\t.index = q->index,\n+\t\t\t.flags = IONIC_QINIT_F_SG,\n+\t\t\t.intr_index = cq->bound_intr->index,\n+\t\t\t.pid = q->pid,\n+\t\t\t.ring_size = ilog2(q->num_descs),\n+\t\t\t.ring_base = q->base_pa,\n+\t\t\t.cq_ring_base = cq->base_pa,\n+\t\t\t.sg_ring_base = q->sg_base_pa,\n+\t\t},\n+\t};\n+\tint err;\n+\n+\tIONIC_PRINT(DEBUG, \"rxq_init.pid %d\", ctx.cmd.q_init.pid);\n+\tIONIC_PRINT(DEBUG, \"rxq_init.index %d\", ctx.cmd.q_init.index);\n+\tIONIC_PRINT(DEBUG, \"rxq_init.ring_base 0x%lx\",\n+\t\tctx.cmd.q_init.ring_base);\n+\tIONIC_PRINT(DEBUG, \"rxq_init.ring_size %d\",\n+\t\tctx.cmd.q_init.ring_size);\n+\n+\terr = ionic_adminq_post_wait(qcq->lif, &ctx);\n+\tif (err)\n+\t\treturn err;\n+\n+\tq->hw_type = ctx.comp.q_init.hw_type;\n+\tq->hw_index = ctx.comp.q_init.hw_index;\n+\tq->db = ionic_db_map(lif, q);\n+\n+\tqcq->flags |= IONIC_QCQ_F_INITED;\n+\n+\tIONIC_PRINT(DEBUG, \"rxq->hw_type %d\", q->hw_type);\n+\tIONIC_PRINT(DEBUG, \"rxq->hw_index %d\", q->hw_index);\n+\tIONIC_PRINT(DEBUG, \"rxq->db %p\", q->db);\n+\n+\treturn 0;\n+}\n+\n static int\n ionic_station_set(struct ionic_lif *lif)\n {\n@@ -1084,7 +1265,17 @@ ionic_lif_init(struct ionic_lif *lif)\n \tif (err)\n \t\tgoto err_out_adminq_deinit;\n \n-\tlif->features = 0;\n+\tlif->features =\n+\t\t  IONIC_ETH_HW_VLAN_TX_TAG\n+\t\t| IONIC_ETH_HW_VLAN_RX_STRIP\n+\t\t| IONIC_ETH_HW_VLAN_RX_FILTER\n+\t\t| IONIC_ETH_HW_RX_HASH\n+\t\t| IONIC_ETH_HW_TX_SG\n+\t\t| IONIC_ETH_HW_RX_SG\n+\t\t| IONIC_ETH_HW_RX_CSUM\n+\t\t| IONIC_ETH_HW_TSO\n+\t\t| IONIC_ETH_HW_TSO_IPV6\n+\t\t| IONIC_ETH_HW_TSO_ECN;\n \n \terr = ionic_lif_set_features(lif);\n \n@@ -1137,6 +1328,9 @@ ionic_lif_configure(struct ionic_lif *lif)\n {\n \tlif->port_id = lif->eth_dev->data->port_id;\n \n+\tlif->nrxqcqs = 1;\n+\tlif->ntxqcqs = 1;\n+\n \treturn 0;\n }\n \n@@ -1144,6 +1338,8 @@ int\n ionic_lif_start(struct ionic_lif *lif)\n {\n \tuint32_t rx_mode = 0;\n+\tuint32_t i;\n+\tint err;\n \n \tIONIC_PRINT(DEBUG, \"Setting RX mode on port %u\",\n \t\tlif->port_id);\n@@ -1156,6 +1352,30 @@ ionic_lif_start(struct ionic_lif *lif)\n \n \tionic_set_rx_mode(lif, rx_mode);\n \n+\tIONIC_PRINT(DEBUG, \"Starting %u RX queues and %u TX queues \"\n+\t\t\"on port %u\",\n+\t\tlif->nrxqcqs, lif->ntxqcqs, lif->port_id);\n+\n+\tfor (i = 0; i < lif->nrxqcqs; i++) {\n+\t\tstruct ionic_qcq *rxq = lif->rxqcqs[i];\n+\t\tif (!rxq->deferred_start) {\n+\t\t\terr = ionic_dev_rx_queue_start(lif->eth_dev, i);\n+\n+\t\t\tif (err)\n+\t\t\t\treturn err;\n+\t\t}\n+\t}\n+\n+\tfor (i = 0; i < lif->ntxqcqs; i++) {\n+\t\tstruct ionic_qcq *txq = lif->txqcqs[i];\n+\t\tif (!txq->deferred_start) {\n+\t\t\terr = ionic_dev_tx_queue_start(lif->eth_dev, i);\n+\n+\t\t\tif (err)\n+\t\t\t\treturn err;\n+\t\t}\n+\t}\n+\n \tionic_link_status_check(lif);\n \n \t/* Carrier ON here */\ndiff --git a/drivers/net/ionic/ionic_lif.h b/drivers/net/ionic/ionic_lif.h\nindex e0863c5de..5e7d9ae0c 100644\n--- a/drivers/net/ionic/ionic_lif.h\n+++ b/drivers/net/ionic/ionic_lif.h\n@@ -17,6 +17,26 @@\n #define IONIC_ADMINQ_LENGTH\t16\t/* must be a power of two */\n #define IONIC_NOTIFYQ_LENGTH\t64\t/* must be a power of two */\n \n+#define IONIC_GET_SG_CNTR_IDX(num_sg_elems)\t(num_sg_elems)\n+\n+struct ionic_tx_stats {\n+\tuint64_t packets;\n+\tuint64_t bytes;\n+\tuint64_t drop;\n+\tuint64_t stop;\n+\tuint64_t tso;\n+\tuint64_t frags;\n+};\n+\n+struct ionic_rx_stats {\n+\tuint64_t packets;\n+\tuint64_t bytes;\n+\tuint64_t no_cb_arg;\n+\tuint64_t bad_cq_status;\n+\tuint64_t no_room;\n+\tuint64_t bad_len;\n+};\n+\n #define IONIC_QCQ_F_INITED\tBIT(0)\n #define IONIC_QCQ_F_SG\t\tBIT(1)\n #define IONIC_QCQ_F_INTR\tBIT(2)\n@@ -24,18 +44,28 @@\n \n /* Queue / Completion Queue */\n struct ionic_qcq {\n+\tuint64_t offloads;\n \tstruct ionic_queue q;        /**< Queue */\n \tstruct ionic_cq cq;          /**< Completion Queue */\n \tstruct ionic_lif *lif;       /**< LIF */\n \tstruct rte_mempool *mb_pool; /**< mbuf pool to populate the RX ring */\n+\tunion {\n+\t\tstruct ionic_tx_stats tx;\n+\t\tstruct ionic_rx_stats rx;\n+\t} stats;\n \tconst struct rte_memzone *base_z;\n \tvoid *base;\n \trte_iova_t base_pa;\n \tuint32_t total_size;\n \tuint32_t flags;\n \tstruct ionic_intr_info intr;\n+\tbool deferred_start;\n };\n \n+#define IONIC_Q_TO_QCQ(q)\tcontainer_of(q, struct ionic_qcq, q)\n+#define IONIC_Q_TO_TX_STATS(q)\t(&IONIC_Q_TO_QCQ(q)->stats.tx)\n+#define IONIC_Q_TO_RX_STATS(q)\t(&IONIC_Q_TO_QCQ(q)->stats.rx)\n+\n #define IONIC_LIF_F_INITED\t\tBIT(0)\n #define IONIC_LIF_F_LINK_CHECK_NEEDED\tBIT(1)\n \n@@ -49,11 +79,15 @@ struct ionic_lif {\n \tuint32_t index;\n \tuint32_t hw_index;\n \tuint32_t state;\n+\tuint32_t ntxqcqs;\n+\tuint32_t nrxqcqs;\n \tuint32_t kern_pid;\n \trte_spinlock_t adminq_lock;\n \trte_spinlock_t adminq_service_lock;\n \tstruct ionic_qcq *adminqcq;\n \tstruct ionic_qcq *notifyqcq;\n+\tstruct ionic_qcq **txqcqs;\n+\tstruct ionic_qcq **rxqcqs;\n \tstruct ionic_rx_filters rx_filters;\n \tstruct ionic_doorbell __iomem *kern_dbpage;\n \tuint64_t last_eid;\n@@ -107,11 +141,21 @@ int ionic_dev_promiscuous_disable(struct rte_eth_dev *dev);\n int ionic_dev_allmulticast_enable(struct rte_eth_dev *dev);\n int ionic_dev_allmulticast_disable(struct rte_eth_dev *dev);\n \n+int ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t index,\n+\tuint16_t nrxq_descs, struct ionic_qcq **qcq);\n+int ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t index,\n+\tuint16_t ntxq_descs, struct ionic_qcq **qcq);\n void ionic_qcq_free(struct ionic_qcq *qcq);\n \n int ionic_qcq_enable(struct ionic_qcq *qcq);\n int ionic_qcq_disable(struct ionic_qcq *qcq);\n \n+int ionic_lif_rxq_init(struct ionic_qcq *qcq);\n+void ionic_lif_rxq_deinit(struct ionic_qcq *qcq);\n+\n+int ionic_lif_txq_init(struct ionic_qcq *qcq);\n+void ionic_lif_txq_deinit(struct ionic_qcq *qcq);\n+\n int ionic_lif_set_features(struct ionic_lif *lif);\n \n int ionic_notifyq_handler(struct ionic_lif *lif, int budget);\ndiff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c\nnew file mode 100644\nindex 000000000..44cc6278c\n--- /dev/null\n+++ b/drivers/net/ionic/ionic_rxtx.c\n@@ -0,0 +1,999 @@\n+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)\n+ * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved.\n+ */\n+\n+#include <sys/queue.h>\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <string.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <stdarg.h>\n+#include <unistd.h>\n+#include <inttypes.h>\n+\n+#include <rte_byteorder.h>\n+#include <rte_common.h>\n+#include <rte_cycles.h>\n+#include <rte_log.h>\n+#include <rte_debug.h>\n+#include <rte_interrupts.h>\n+#include <rte_pci.h>\n+#include <rte_memory.h>\n+#include <rte_memzone.h>\n+#include <rte_launch.h>\n+#include <rte_eal.h>\n+#include <rte_per_lcore.h>\n+#include <rte_lcore.h>\n+#include <rte_atomic.h>\n+#include <rte_branch_prediction.h>\n+#include <rte_mempool.h>\n+#include <rte_malloc.h>\n+#include <rte_mbuf.h>\n+#include <rte_ether.h>\n+#include <rte_ethdev_driver.h>\n+#include <rte_prefetch.h>\n+#include <rte_udp.h>\n+#include <rte_tcp.h>\n+#include <rte_sctp.h>\n+#include <rte_string_fns.h>\n+#include <rte_errno.h>\n+#include <rte_ip.h>\n+#include <rte_net.h>\n+\n+#include \"ionic_logs.h\"\n+#include \"ionic_mac_api.h\"\n+#include \"ionic_ethdev.h\"\n+#include \"ionic_lif.h\"\n+#include \"ionic_rxtx.h\"\n+\n+#define IONIC_RX_RING_DOORBELL_STRIDE\t\t(32 - 1)\n+\n+/*********************************************************************\n+ *\n+ *  TX functions\n+ *\n+ **********************************************************************/\n+\n+void\n+ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\t\tstruct rte_eth_txq_info *qinfo)\n+{\n+\tstruct ionic_qcq *txq = dev->data->tx_queues[queue_id];\n+\tstruct ionic_queue *q = &txq->q;\n+\n+\tqinfo->nb_desc = q->num_descs;\n+\tqinfo->conf.offloads = txq->offloads;\n+\tqinfo->conf.tx_deferred_start = txq->deferred_start;\n+}\n+\n+static inline void __attribute__((cold))\n+ionic_tx_flush(struct ionic_cq *cq)\n+{\n+\tstruct ionic_queue *q = cq->bound_q;\n+\tstruct ionic_desc_info *q_desc_info;\n+\tstruct rte_mbuf *txm, *next;\n+\tstruct ionic_txq_comp *cq_desc_base = cq->base;\n+\tstruct ionic_txq_comp *cq_desc;\n+\tu_int32_t comp_index = (u_int32_t)-1;\n+\n+\tcq_desc = &cq_desc_base[cq->tail_idx];\n+\twhile (color_match(cq_desc->color, cq->done_color)) {\n+\t\tcq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);\n+\n+\t\t/* Prefetch the next 4 descriptors (not really useful here) */\n+\t\tif ((cq->tail_idx & 0x3) == 0)\n+\t\t\trte_prefetch0(&cq_desc_base[cq->tail_idx]);\n+\n+\t\tif (cq->tail_idx == 0)\n+\t\t\tcq->done_color = !cq->done_color;\n+\n+\t\tcomp_index = cq_desc->comp_index;\n+\n+\t\tcq_desc = &cq_desc_base[cq->tail_idx];\n+\t}\n+\n+\tif (comp_index != (u_int32_t)-1) {\n+\t\twhile (q->tail_idx != comp_index) {\n+\t\t\tq_desc_info = &q->info[q->tail_idx];\n+\n+\t\t\tq->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);\n+\n+\t\t\t/* Prefetch the next 4 descriptors */\n+\t\t\tif ((q->tail_idx & 0x3) == 0)\n+\t\t\t\t/* q desc info */\n+\t\t\t\trte_prefetch0(&q->info[q->tail_idx]);\n+\n+\t\t\t/*\n+\t\t\t * Note: you can just use rte_pktmbuf_free,\n+\t\t\t * but this loop is faster\n+\t\t\t */\n+\t\t\ttxm = q_desc_info->cb_arg;\n+\t\t\twhile (txm != NULL) {\n+\t\t\t\tnext = txm->next;\n+\t\t\t\trte_pktmbuf_free_seg(txm);\n+\t\t\t\ttxm = next;\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+void __attribute__((cold))\n+ionic_dev_tx_queue_release(void *tx_queue)\n+{\n+\tstruct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;\n+\n+\tIONIC_PRINT_CALL();\n+\n+\tionic_qcq_free(txq);\n+}\n+\n+int __attribute__((cold))\n+ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)\n+{\n+\tstruct ionic_qcq *txq;\n+\n+\tIONIC_PRINT_CALL();\n+\n+\ttxq = eth_dev->data->tx_queues[tx_queue_id];\n+\n+\t/*\n+\t * Note: we should better post NOP Tx desc and wait for its completion\n+\t * before disabling Tx queue\n+\t */\n+\n+\tionic_qcq_disable(txq);\n+\n+\tionic_tx_flush(&txq->cq);\n+\n+\tionic_lif_txq_deinit(txq);\n+\n+\teth_dev->data->tx_queue_state[tx_queue_id] =\n+\t\tRTE_ETH_QUEUE_STATE_STOPPED;\n+\n+\treturn 0;\n+}\n+\n+int __attribute__((cold))\n+ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,\n+\t\tuint16_t nb_desc, uint32_t socket_id __rte_unused,\n+\t\tconst struct rte_eth_txconf *tx_conf)\n+{\n+\tstruct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);\n+\tstruct ionic_qcq *txq;\n+\tuint64_t offloads;\n+\tint err;\n+\n+\tIONIC_PRINT_CALL();\n+\n+\tIONIC_PRINT(DEBUG, \"Configuring TX queue %u with %u buffers\",\n+\t\ttx_queue_id, nb_desc);\n+\n+\tif (tx_queue_id >= lif->ntxqcqs) {\n+\t\tIONIC_PRINT(DEBUG, \"Queue index %u not available \"\n+\t\t\t\"(max %u queues)\",\n+\t\t\ttx_queue_id, lif->ntxqcqs);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\toffloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;\n+\n+\t/* Validate number of receive descriptors */\n+\tif (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)\n+\t\treturn -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */\n+\n+\t/* Free memory prior to re-allocation if needed... */\n+\tif (eth_dev->data->tx_queues[tx_queue_id] != NULL) {\n+\t\tvoid *tx_queue = eth_dev->data->tx_queues[tx_queue_id];\n+\t\tionic_dev_tx_queue_release(tx_queue);\n+\t\teth_dev->data->tx_queues[tx_queue_id] = NULL;\n+\t}\n+\n+\terr = ionic_tx_qcq_alloc(lif, tx_queue_id, nb_desc, &txq);\n+\n+\tif (err) {\n+\t\tIONIC_PRINT(DEBUG, \"Queue allocation failure\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Do not start queue with rte_eth_dev_start() */\n+\ttxq->deferred_start = tx_conf->tx_deferred_start;\n+\n+\ttxq->offloads = offloads;\n+\n+\teth_dev->data->tx_queues[tx_queue_id] = txq;\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * Start Transmit Units for specified queue.\n+ */\n+int __attribute__((cold))\n+ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)\n+{\n+\tstruct ionic_qcq *txq;\n+\tint err;\n+\n+\tIONIC_PRINT_CALL();\n+\n+\ttxq = eth_dev->data->tx_queues[tx_queue_id];\n+\n+\terr = ionic_lif_txq_init(txq);\n+\n+\tif (err)\n+\t\treturn err;\n+\n+\tionic_qcq_enable(txq);\n+\n+\teth_dev->data->tx_queue_state[tx_queue_id] =\n+\t\tRTE_ETH_QUEUE_STATE_STARTED;\n+\n+\treturn 0;\n+}\n+\n+static void\n+ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,\n+\t\tstruct rte_mbuf *txm,\n+\t\trte_iova_t addr, uint8_t nsge, uint16_t len,\n+\t\tuint32_t hdrlen, uint32_t mss,\n+\t\tuint16_t vlan_tci, bool has_vlan,\n+\t\tbool start, bool done)\n+{\n+\tuint8_t flags = 0;\n+\tflags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;\n+\tflags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;\n+\tflags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;\n+\n+\tdesc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,\n+\t\tflags, nsge, addr);\n+\tdesc->len = len;\n+\tdesc->vlan_tci = vlan_tci;\n+\tdesc->hdr_len = hdrlen;\n+\tdesc->mss = mss;\n+\n+\tionic_q_post(q, done, NULL, done ? txm : NULL);\n+}\n+\n+static struct ionic_txq_desc *\n+ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem)\n+{\n+\tstruct ionic_txq_desc *desc_base = q->base;\n+\tstruct ionic_txq_sg_desc *sg_desc_base = q->sg_base;\n+\tstruct ionic_txq_desc *desc = &desc_base[q->head_idx];\n+\tstruct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx];\n+\n+\t*elem = sg_desc->elems;\n+\treturn desc;\n+}\n+\n+static int\n+ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm,\n+\t\tuint64_t offloads __rte_unused, bool not_xmit_more)\n+{\n+\tstruct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);\n+\tstruct ionic_txq_desc *desc;\n+\tstruct ionic_txq_sg_elem *elem;\n+\tstruct rte_mbuf *txm_seg;\n+\tuint64_t desc_addr = 0;\n+\tuint16_t desc_len = 0;\n+\tuint8_t desc_nsge;\n+\tuint32_t hdrlen;\n+\tuint32_t mss = txm->tso_segsz;\n+\tuint32_t frag_left = 0;\n+\tuint32_t left;\n+\tuint32_t seglen;\n+\tuint32_t len;\n+\tuint32_t offset = 0;\n+\tbool start, done;\n+\tbool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT);\n+\tuint16_t vlan_tci = txm->vlan_tci;\n+\n+\thdrlen = txm->l2_len + txm->l3_len;\n+\n+\tseglen = hdrlen + mss;\n+\tleft = txm->data_len;\n+\n+\tdesc = ionic_tx_tso_next(q, &elem);\n+\tstart = true;\n+\n+\t/* Chop data up into desc segments */\n+\n+\twhile (left > 0) {\n+\t\tlen = RTE_MIN(seglen, left);\n+\t\tfrag_left = seglen - len;\n+\t\tdesc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));\n+\t\tdesc_len = len;\n+\t\tdesc_nsge = 0;\n+\t\tleft -= len;\n+\t\toffset += len;\n+\t\tif (txm->nb_segs > 1 && frag_left > 0)\n+\t\t\tcontinue;\n+\t\tdone = (txm->nb_segs == 1 && left == 0);\n+\t\tionic_tx_tso_post(q, desc, txm,\n+\t\t\tdesc_addr, desc_nsge, desc_len,\n+\t\t\thdrlen, mss,\n+\t\t\tvlan_tci, has_vlan,\n+\t\t\tstart, done && not_xmit_more);\n+\t\tdesc = ionic_tx_tso_next(q, &elem);\n+\t\tstart = false;\n+\t\tseglen = mss;\n+\t}\n+\n+\t/* Chop frags into desc segments */\n+\n+\ttxm_seg = txm->next;\n+\twhile (txm_seg != NULL) {\n+\t\toffset = 0;\n+\t\tleft = txm_seg->data_len;\n+\t\tstats->frags++;\n+\n+\t\twhile (left > 0) {\n+\t\t\trte_iova_t data_iova;\n+\t\t\tdata_iova = rte_mbuf_data_iova(txm_seg);\n+\t\t\telem->addr = rte_cpu_to_le_64(data_iova) + offset;\n+\t\t\tif (frag_left > 0) {\n+\t\t\t\tlen = RTE_MIN(frag_left, left);\n+\t\t\t\tfrag_left -= len;\n+\t\t\t\telem->len = len;\n+\t\t\t\telem++;\n+\t\t\t\tdesc_nsge++;\n+\t\t\t} else {\n+\t\t\t\tlen = RTE_MIN(mss, left);\n+\t\t\t\tfrag_left = mss - len;\n+\t\t\t\tdata_iova = rte_mbuf_data_iova(txm_seg);\n+\t\t\t\tdesc_addr = rte_cpu_to_le_64(data_iova);\n+\t\t\t\tdesc_len = len;\n+\t\t\t\tdesc_nsge = 0;\n+\t\t\t}\n+\t\t\tleft -= len;\n+\t\t\toffset += len;\n+\t\t\tif (txm_seg->next != NULL && frag_left > 0)\n+\t\t\t\tcontinue;\n+\t\t\tdone = (txm_seg->next == NULL && left == 0);\n+\t\t\tionic_tx_tso_post(q, desc, txm_seg,\n+\t\t\t\tdesc_addr, desc_nsge, desc_len,\n+\t\t\t\thdrlen, mss,\n+\t\t\t\tvlan_tci, has_vlan,\n+\t\t\t\tstart, done && not_xmit_more);\n+\t\t\tdesc = ionic_tx_tso_next(q, &elem);\n+\t\t\tstart = false;\n+\t\t}\n+\n+\t\ttxm_seg = txm_seg->next;\n+\t}\n+\n+\tstats->tso++;\n+\n+\treturn 0;\n+}\n+\n+static int\n+ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm,\n+\t\tuint64_t offloads __rte_unused, bool not_xmit_more)\n+{\n+\tstruct ionic_txq_desc *desc_base = q->base;\n+\tstruct ionic_txq_sg_desc *sg_desc_base = q->sg_base;\n+\tstruct ionic_txq_desc *desc = &desc_base[q->head_idx];\n+\tstruct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx];\n+\tstruct ionic_txq_sg_elem *elem = sg_desc->elems;\n+\tstruct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);\n+\tstruct rte_mbuf *txm_seg;\n+\tbool has_vlan;\n+\tuint64_t ol_flags = txm->ol_flags;\n+\tuint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));\n+\tuint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;\n+\tuint8_t flags = 0;\n+\n+\thas_vlan = (ol_flags & PKT_TX_VLAN_PKT);\n+\n+\tflags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;\n+\n+\tdesc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);\n+\tdesc->len = txm->data_len;\n+\tdesc->vlan_tci = txm->vlan_tci;\n+\n+\ttxm_seg = txm->next;\n+\twhile (txm_seg != NULL) {\n+\t\telem->len = txm_seg->data_len;\n+\t\telem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg));\n+\t\tstats->frags++;\n+\t\telem++;\n+\t\ttxm_seg = txm_seg->next;\n+\t}\n+\n+\tionic_q_post(q, not_xmit_more, NULL, txm);\n+\n+\treturn 0;\n+}\n+\n+uint16_t\n+ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tstruct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;\n+\tstruct ionic_queue *q = &txq->q;\n+\tstruct ionic_cq *cq = &txq->cq;\n+\tstruct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);\n+\tuint32_t next_q_head_idx;\n+\tuint32_t bytes_tx = 0;\n+\tuint16_t nb_tx = 0;\n+\tint err;\n+\tbool last;\n+\n+\t/* Cleaning old buffers */\n+\tionic_tx_flush(cq);\n+\n+\tif (unlikely(ionic_q_space_avail(q) < nb_pkts)) {\n+\t\tstats->stop += nb_pkts;\n+\t\treturn 0;\n+\t}\n+\n+\twhile (nb_tx < nb_pkts) {\n+\t\tlast = (nb_tx == (nb_pkts - 1));\n+\n+\t\tnext_q_head_idx = (q->head_idx + 1) & (q->num_descs - 1);\n+\t\tif ((next_q_head_idx & 0x3) == 0) {\n+\t\t\tstruct ionic_txq_desc *desc_base = q->base;\n+\t\t\trte_prefetch0(&desc_base[next_q_head_idx]);\n+\t\t\trte_prefetch0(&q->info[next_q_head_idx]);\n+\t\t}\n+\n+\t\tif (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG)\n+\t\t\terr = ionic_tx_tso(q, tx_pkts[nb_tx], txq->offloads,\n+\t\t\t\tlast);\n+\t\telse\n+\t\t\terr = ionic_tx(q, tx_pkts[nb_tx], txq->offloads, last);\n+\n+\t\tif (err) {\n+\t\t\tstats->drop += nb_pkts - nb_tx;\n+\t\t\tif (nb_tx > 0)\n+\t\t\t\tionic_q_flush(q);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tbytes_tx += tx_pkts[nb_tx]->pkt_len;\n+\t\tnb_tx++;\n+\t}\n+\n+\tstats->packets += nb_tx;\n+\tstats->bytes += bytes_tx;\n+\n+\treturn nb_tx;\n+}\n+\n+/*********************************************************************\n+ *\n+ *  TX prep functions\n+ *\n+ **********************************************************************/\n+\n+#define IONIC_TX_OFFLOAD_MASK (\t\\\n+\tPKT_TX_IPV4 |\t\t\\\n+\tPKT_TX_IPV6 |\t\t\\\n+\tPKT_TX_VLAN |\t\t\\\n+\tPKT_TX_TCP_SEG |\t\\\n+\tPKT_TX_L4_MASK)\n+\n+#define IONIC_TX_OFFLOAD_NOTSUP_MASK \\\n+\t(PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)\n+\n+uint16_t\n+ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tstruct rte_mbuf *txm;\n+\tuint64_t offloads;\n+\tint i = 0;\n+\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\ttxm = tx_pkts[i];\n+\n+\t\tif (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS) {\n+\t\t\trte_errno = -EINVAL;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\toffloads = txm->ol_flags;\n+\n+\t\tif (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {\n+\t\t\trte_errno = -ENOTSUP;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn i;\n+}\n+\n+/*********************************************************************\n+ *\n+ *  RX functions\n+ *\n+ **********************************************************************/\n+\n+static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,\n+\t\tstruct rte_mbuf *mbuf);\n+\n+void\n+ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\t\tstruct rte_eth_rxq_info *qinfo)\n+{\n+\tstruct ionic_qcq *rxq = dev->data->rx_queues[queue_id];\n+\tstruct ionic_queue *q = &rxq->q;\n+\n+\tqinfo->mp = rxq->mb_pool;\n+\tqinfo->scattered_rx = dev->data->scattered_rx;\n+\tqinfo->nb_desc = q->num_descs;\n+\tqinfo->conf.rx_deferred_start = rxq->deferred_start;\n+\tqinfo->conf.offloads = rxq->offloads;\n+}\n+\n+static void __attribute__((cold))\n+ionic_rx_empty(struct ionic_queue *q)\n+{\n+\tstruct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);\n+\tstruct ionic_desc_info *cur;\n+\tstruct rte_mbuf *mbuf;\n+\n+\twhile (q->tail_idx != q->head_idx) {\n+\t\tcur = &q->info[q->tail_idx];\n+\t\tmbuf = cur->cb_arg;\n+\t\trte_mempool_put(rxq->mb_pool, mbuf);\n+\n+\t\tq->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);\n+\t}\n+}\n+\n+void __attribute__((cold))\n+ionic_dev_rx_queue_release(void *rx_queue)\n+{\n+\tstruct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;\n+\n+\tIONIC_PRINT_CALL();\n+\n+\tionic_rx_empty(&rxq->q);\n+\n+\tionic_qcq_free(rxq);\n+}\n+\n+int __attribute__((cold))\n+ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,\n+\t\tuint16_t rx_queue_id,\n+\t\tuint16_t nb_desc,\n+\t\tuint32_t socket_id __rte_unused,\n+\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\tstruct rte_mempool *mp)\n+{\n+\tstruct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);\n+\tstruct ionic_qcq *rxq;\n+\tuint64_t offloads;\n+\tint err;\n+\n+\tIONIC_PRINT_CALL();\n+\n+\tIONIC_PRINT(DEBUG, \"Configuring RX queue %u with %u buffers\",\n+\t\trx_queue_id, nb_desc);\n+\n+\tif (rx_queue_id >= lif->nrxqcqs) {\n+\t\tIONIC_PRINT(ERR,\n+\t\t\t\"Queue index %u not available (max %u queues)\",\n+\t\t\trx_queue_id, lif->nrxqcqs);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\toffloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;\n+\n+\t/* Validate number of receive descriptors */\n+\tif (!rte_is_power_of_2(nb_desc) ||\n+\t\t\tnb_desc < IONIC_MIN_RING_DESC ||\n+\t\t\tnb_desc > IONIC_MAX_RING_DESC) {\n+\t\tIONIC_PRINT(ERR,\n+\t\t\t\"Bad number of descriptors (%u) for queue %u (min: %u)\",\n+\t\t\tnb_desc, rx_queue_id, IONIC_MIN_RING_DESC);\n+\t\treturn -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */\n+\t}\n+\n+\tif (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)\n+\t\teth_dev->data->scattered_rx = 1;\n+\n+\t/* Free memory prior to re-allocation if needed... */\n+\tif (eth_dev->data->rx_queues[rx_queue_id] != NULL) {\n+\t\tvoid *rx_queue = eth_dev->data->rx_queues[rx_queue_id];\n+\t\tionic_dev_rx_queue_release(rx_queue);\n+\t\teth_dev->data->rx_queues[rx_queue_id] = NULL;\n+\t}\n+\n+\terr = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc, &rxq);\n+\n+\tif (err) {\n+\t\tIONIC_PRINT(ERR, \"Queue allocation failure\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trxq->mb_pool = mp;\n+\n+\t/*\n+\t * Note: the interface does not currently support\n+\t * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN\n+\t * when the adapter will be able to keep the CRC and subtract\n+\t * it to the length for all received packets:\n+\t * if (eth_dev->data->dev_conf.rxmode.offloads &\n+\t *     DEV_RX_OFFLOAD_KEEP_CRC)\n+\t *   rxq->crc_len = ETHER_CRC_LEN;\n+\t */\n+\n+\t/* Do not start queue with rte_eth_dev_start() */\n+\trxq->deferred_start = rx_conf->rx_deferred_start;\n+\n+\trxq->offloads = offloads;\n+\n+\teth_dev->data->rx_queues[rx_queue_id] = rxq;\n+\n+\treturn 0;\n+}\n+\n+static void\n+ionic_rx_clean(struct ionic_queue *q,\n+\t\tuint32_t q_desc_index, uint32_t cq_desc_index,\n+\t\tvoid *cb_arg, void *service_cb_arg)\n+{\n+\tstruct ionic_rxq_comp *cq_desc_base = q->bound_cq->base;\n+\tstruct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];\n+\tstruct rte_mbuf *rxm = cb_arg;\n+\tstruct rte_mbuf *rxm_seg;\n+\tstruct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);\n+\tuint32_t max_frame_size =\n+\t\trxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;\n+\tuint64_t pkt_flags = 0;\n+\tuint32_t pkt_type;\n+\tstruct ionic_rx_stats *stats = IONIC_Q_TO_RX_STATS(q);\n+\tstruct ionic_rx_service *recv_args = (struct ionic_rx_service *)\n+\t\tservice_cb_arg;\n+\tuint32_t buf_size = (uint16_t)\n+\t\t(rte_pktmbuf_data_room_size(rxq->mb_pool) -\n+\t\tRTE_PKTMBUF_HEADROOM);\n+\tuint32_t left;\n+\n+\tif (!recv_args) {\n+\t\tstats->no_cb_arg++;\n+\t\t/* Flush */\n+\t\trte_pktmbuf_free(rxm);\n+\t\t/*\n+\t\t * Note: rte_mempool_put is faster with no segs\n+\t\t * rte_mempool_put(rxq->mb_pool, rxm);\n+\t\t */\n+\t\treturn;\n+\t}\n+\n+\tif (cq_desc->status) {\n+\t\tstats->bad_cq_status++;\n+\t\tionic_rx_recycle(q, q_desc_index, rxm);\n+\t\treturn;\n+\t}\n+\n+\tif (recv_args->nb_rx >= recv_args->nb_pkts) {\n+\t\tstats->no_room++;\n+\t\tionic_rx_recycle(q, q_desc_index, rxm);\n+\t\treturn;\n+\t}\n+\n+\tif (cq_desc->len > max_frame_size ||\n+\t\t\tcq_desc->len == 0) {\n+\t\tstats->bad_len++;\n+\t\tionic_rx_recycle(q, q_desc_index, rxm);\n+\t\treturn;\n+\t}\n+\n+\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\trte_prefetch1((char *)rxm->buf_addr + rxm->data_off);\n+\trxm->nb_segs = 1; /* cq_desc->num_sg_elems */\n+\trxm->pkt_len = cq_desc->len;\n+\trxm->port = rxq->lif->port_id;\n+\n+\tleft = cq_desc->len;\n+\n+\trxm->data_len = RTE_MIN(buf_size, left);\n+\tleft -= rxm->data_len;\n+\n+\trxm_seg = rxm->next;\n+\twhile (rxm_seg && left) {\n+\t\trxm_seg->data_len = RTE_MIN(buf_size, left);\n+\t\tleft -= rxm_seg->data_len;\n+\n+\t\trxm_seg = rxm_seg->next;\n+\t\trxm->nb_segs++;\n+\t}\n+\n+\t/* Vlan Strip */\n+\tif (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {\n+\t\tpkt_flags |= PKT_RX_VLAN_STRIPPED;\n+\t\trxm->vlan_tci = cq_desc->vlan_tci;\n+\t}\n+\n+\t/* Checksum */\n+\tif (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {\n+\t\tif (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK)\n+\t\t\tpkt_flags |= PKT_RX_IP_CKSUM_GOOD;\n+\t\telse if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)\n+\t\t\tpkt_flags |= PKT_RX_IP_CKSUM_BAD;\n+\n+\t\tif ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) ||\n+\t\t\t(cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK))\n+\t\t\tpkt_flags |= PKT_RX_L4_CKSUM_GOOD;\n+\t\telse if ((cq_desc->csum_flags &\n+\t\t\t\tIONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||\n+\t\t\t\t(cq_desc->csum_flags &\n+\t\t\t\tIONIC_RXQ_COMP_CSUM_F_UDP_BAD))\n+\t\t\tpkt_flags |= PKT_RX_L4_CKSUM_BAD;\n+\t}\n+\n+\trxm->ol_flags = pkt_flags;\n+\n+\t/* Packet Type */\n+\tswitch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {\n+\tcase IONIC_PKT_TYPE_IPV4:\n+\t\tpkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;\n+\t\tbreak;\n+\tcase IONIC_PKT_TYPE_IPV6:\n+\t\tpkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;\n+\t\tbreak;\n+\tcase IONIC_PKT_TYPE_IPV4_TCP:\n+\t\tpkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |\n+\t\t\tRTE_PTYPE_L4_TCP;\n+\t\tbreak;\n+\tcase IONIC_PKT_TYPE_IPV6_TCP:\n+\t\tpkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |\n+\t\t\tRTE_PTYPE_L4_TCP;\n+\t\tbreak;\n+\tcase IONIC_PKT_TYPE_IPV4_UDP:\n+\t\tpkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |\n+\t\t\tRTE_PTYPE_L4_UDP;\n+\t\tbreak;\n+\tcase IONIC_PKT_TYPE_IPV6_UDP:\n+\t\tpkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |\n+\t\t\tRTE_PTYPE_L4_UDP;\n+\t\tbreak;\n+\tdefault:\n+\t\t{\n+\t\t\tstruct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,\n+\t\t\t\tstruct rte_ether_hdr *);\n+\t\t\tuint16_t ether_type = eth_h->ether_type;\n+\t\t\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))\n+\t\t\t\tpkt_type = RTE_PTYPE_L2_ETHER_ARP;\n+\t\t\telse\n+\t\t\t\tpkt_type = RTE_PTYPE_UNKNOWN;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\trxm->packet_type = pkt_type;\n+\n+\trecv_args->rx_pkts[recv_args->nb_rx] = rxm;\n+\trecv_args->nb_rx++;\n+\n+\tstats->packets++;\n+\tstats->bytes += rxm->pkt_len;\n+}\n+\n+static void\n+ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,\n+\t\t struct rte_mbuf *mbuf)\n+{\n+\tstruct ionic_rxq_desc *desc_base = q->base;\n+\tstruct ionic_rxq_desc *old = &desc_base[q_desc_index];\n+\tstruct ionic_rxq_desc *new = &desc_base[q->head_idx];\n+\n+\tnew->addr = old->addr;\n+\tnew->len = old->len;\n+\n+\tionic_q_post(q, true, ionic_rx_clean, mbuf);\n+}\n+\n+static int __attribute__((cold))\n+ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len)\n+{\n+\tstruct ionic_queue *q = &rxq->q;\n+\tstruct ionic_rxq_desc *desc_base = q->base;\n+\tstruct ionic_rxq_sg_desc *sg_desc_base = q->sg_base;\n+\tstruct ionic_rxq_desc *desc;\n+\tstruct ionic_rxq_sg_desc *sg_desc;\n+\tstruct ionic_rxq_sg_elem *elem;\n+\trte_iova_t dma_addr;\n+\tuint32_t i, j, nsegs, buf_size, size;\n+\tbool ring_doorbell;\n+\n+\tbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -\n+\t\tRTE_PKTMBUF_HEADROOM);\n+\n+\t/* Initialize software ring entries */\n+\tfor (i = ionic_q_space_avail(q); i; i--) {\n+\t\tstruct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\t\tstruct rte_mbuf *prev_rxm_seg;\n+\n+\t\tif (rxm == NULL) {\n+\t\t\tIONIC_PRINT(ERR, \"RX mbuf alloc failed\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\tnsegs = (len + buf_size - 1) / buf_size;\n+\n+\t\tdesc = &desc_base[q->head_idx];\n+\t\tdma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm));\n+\t\tdesc->addr = dma_addr;\n+\t\tdesc->len = buf_size;\n+\t\tsize = buf_size;\n+\t\tdesc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG :\n+\t\t\tIONIC_RXQ_DESC_OPCODE_SIMPLE;\n+\t\trxm->next = NULL;\n+\n+\t\tprev_rxm_seg = rxm;\n+\t\tsg_desc = &sg_desc_base[q->head_idx];\n+\t\telem = sg_desc->elems;\n+\t\tfor (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) {\n+\t\t\tstruct rte_mbuf *rxm_seg;\n+\t\t\trte_iova_t data_iova;\n+\n+\t\t\trxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\t\t\tif (rxm_seg == NULL) {\n+\t\t\t\tIONIC_PRINT(ERR, \"RX mbuf alloc failed\");\n+\t\t\t\treturn -ENOMEM;\n+\t\t\t}\n+\n+\t\t\tdata_iova = rte_mbuf_data_iova(rxm_seg);\n+\t\t\tdma_addr = rte_cpu_to_le_64(data_iova);\n+\t\t\telem->addr = dma_addr;\n+\t\t\telem->len = buf_size;\n+\t\t\tsize += buf_size;\n+\t\t\telem++;\n+\t\t\trxm_seg->next = NULL;\n+\t\t\tprev_rxm_seg->next = rxm_seg;\n+\t\t\tprev_rxm_seg = rxm_seg;\n+\t\t}\n+\n+\t\tif (size < len)\n+\t\t\tIONIC_PRINT(ERR, \"Rx SG size is not sufficient (%d < %d)\",\n+\t\t\t\tsize, len);\n+\n+\t\tring_doorbell = ((q->head_idx + 1) &\n+\t\t\tIONIC_RX_RING_DOORBELL_STRIDE) == 0;\n+\n+\t\tionic_q_post(q, ring_doorbell, ionic_rx_clean, rxm);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * Start Receive Units for specified queue.\n+ */\n+int __attribute__((cold))\n+ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)\n+{\n+\tuint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;\n+\tstruct ionic_qcq *rxq;\n+\tint err;\n+\n+\tIONIC_PRINT_CALL();\n+\n+\tIONIC_PRINT(DEBUG, \"Allocating RX queue buffers (size: %u)\",\n+\t\tframe_size);\n+\n+\trxq = eth_dev->data->rx_queues[rx_queue_id];\n+\n+\terr = ionic_lif_rxq_init(rxq);\n+\n+\tif (err)\n+\t\treturn err;\n+\n+\t/* Allocate buffers for descriptor rings */\n+\tif (ionic_rx_fill(rxq, frame_size) != 0) {\n+\t\tIONIC_PRINT(ERR, \"Could not alloc mbuf for queue:%d\",\n+\t\t\trx_queue_id);\n+\t\treturn -1;\n+\t}\n+\n+\tionic_qcq_enable(rxq);\n+\n+\teth_dev->data->rx_queue_state[rx_queue_id] =\n+\t\tRTE_ETH_QUEUE_STATE_STARTED;\n+\n+\treturn 0;\n+}\n+\n+static inline void __attribute__((cold))\n+ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,\n+\t\tvoid *service_cb_arg)\n+{\n+\tstruct ionic_queue *q = cq->bound_q;\n+\tstruct ionic_desc_info *q_desc_info;\n+\tstruct ionic_rxq_comp *cq_desc_base = cq->base;\n+\tstruct ionic_rxq_comp *cq_desc;\n+\tbool more;\n+\tuint32_t curr_q_tail_idx, curr_cq_tail_idx;\n+\tuint32_t work_done = 0;\n+\n+\tif (work_to_do == 0)\n+\t\treturn;\n+\n+\tcq_desc = &cq_desc_base[cq->tail_idx];\n+\twhile (color_match(cq_desc->pkt_type_color, cq->done_color)) {\n+\t\tcurr_cq_tail_idx = cq->tail_idx;\n+\t\tcq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);\n+\n+\t\tif (cq->tail_idx == 0)\n+\t\t\tcq->done_color = !cq->done_color;\n+\n+\t\t/* Prefetch the next 4 descriptors */\n+\t\tif ((cq->tail_idx & 0x3) == 0)\n+\t\t\trte_prefetch0(&cq_desc_base[cq->tail_idx]);\n+\n+\t\tdo {\n+\t\t\tmore = (q->tail_idx != cq_desc->comp_index);\n+\n+\t\t\tq_desc_info = &q->info[q->tail_idx];\n+\n+\t\t\tcurr_q_tail_idx = q->tail_idx;\n+\t\t\tq->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);\n+\n+\t\t\t/* Prefetch the next 4 descriptors */\n+\t\t\tif ((q->tail_idx & 0x3) == 0)\n+\t\t\t\t/* q desc info */\n+\t\t\t\trte_prefetch0(&q->info[q->tail_idx]);\n+\n+\t\t\tionic_rx_clean(q, curr_q_tail_idx, curr_cq_tail_idx,\n+\t\t\t\tq_desc_info->cb_arg, service_cb_arg);\n+\n+\t\t} while (more);\n+\n+\t\tif (++work_done == work_to_do)\n+\t\t\tbreak;\n+\n+\t\tcq_desc = &cq_desc_base[cq->tail_idx];\n+\t}\n+}\n+\n+/*\n+ * Stop Receive Units for specified queue.\n+ */\n+int __attribute__((cold))\n+ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)\n+{\n+\tstruct ionic_qcq *rxq;\n+\n+\tIONIC_PRINT_CALL();\n+\n+\trxq = eth_dev->data->rx_queues[rx_queue_id];\n+\n+\tionic_qcq_disable(rxq);\n+\n+\t/* Flush */\n+\tionic_rxq_service(&rxq->cq, -1, NULL);\n+\n+\tionic_lif_rxq_deinit(rxq);\n+\n+\teth_dev->data->rx_queue_state[rx_queue_id] =\n+\t\tRTE_ETH_QUEUE_STATE_STOPPED;\n+\n+\treturn 0;\n+}\n+\n+uint16_t\n+ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tstruct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;\n+\tuint32_t frame_size =\n+\t\trxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;\n+\tstruct ionic_cq *cq = &rxq->cq;\n+\tstruct ionic_rx_service service_cb_arg;\n+\n+\tservice_cb_arg.rx_pkts = rx_pkts;\n+\tservice_cb_arg.nb_pkts = nb_pkts;\n+\tservice_cb_arg.nb_rx = 0;\n+\n+\tionic_rxq_service(cq, nb_pkts, &service_cb_arg);\n+\n+\tionic_rx_fill(rxq, frame_size);\n+\n+\treturn service_cb_arg.nb_rx;\n+}\ndiff --git a/drivers/net/ionic/ionic_rxtx.h b/drivers/net/ionic/ionic_rxtx.h\nnew file mode 100644\nindex 000000000..5c85b9c49\n--- /dev/null\n+++ b/drivers/net/ionic/ionic_rxtx.h\n@@ -0,0 +1,44 @@\n+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)\n+ * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved.\n+ */\n+\n+#ifndef _IONIC_RXTX_H_\n+#define _IONIC_RXTX_H_\n+\n+#include <rte_mbuf.h>\n+\n+struct ionic_rx_service {\n+\t/* cb in */\n+\tstruct rte_mbuf **rx_pkts;\n+\tuint16_t nb_pkts;\n+\t/* cb out */\n+\tuint16_t nb_rx;\n+};\n+\n+uint16_t ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\tuint16_t nb_pkts);\n+uint16_t ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\tuint16_t nb_pkts);\n+uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\tuint16_t nb_pkts);\n+\n+int ionic_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n+\tuint16_t nb_desc, uint32_t socket_id,\n+\tconst struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp);\n+void ionic_dev_rx_queue_release(void *rxq);\n+int ionic_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+int ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id);\n+\n+int ionic_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n+\tuint16_t nb_desc,  uint32_t socket_id,\n+\tconst struct rte_eth_txconf *tx_conf);\n+void ionic_dev_tx_queue_release(void *tx_queue);\n+int ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id);\n+int ionic_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+\n+void ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_rxq_info *qinfo);\n+void ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_txq_info *qinfo);\n+\n+#endif /* _IONIC_RXTX_H_ */\ndiff --git a/drivers/net/ionic/meson.build b/drivers/net/ionic/meson.build\nindex ec7246753..dee8a3608 100644\n--- a/drivers/net/ionic/meson.build\n+++ b/drivers/net/ionic/meson.build\n@@ -4,6 +4,7 @@\n sources = files(\n \t'ionic_mac_api.c',\n \t'ionic_rx_filter.c',\n+\t'ionic_rxtx.c',\n \t'ionic_dev.c',\n \t'ionic_ethdev.c',\n \t'ionic_lif.c',\n",
    "prefixes": [
        "v3",
        "13/17"
    ]
}