get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/73389/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 73389,
    "url": "https://patches.dpdk.org/api/patches/73389/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20200707092244.12791-10-hemant.agrawal@nxp.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200707092244.12791-10-hemant.agrawal@nxp.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200707092244.12791-10-hemant.agrawal@nxp.com",
    "date": "2020-07-07T09:22:24",
    "name": "[v2,09/29] net/dpaa: enable Tx queue taildrop",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "ed417d3ab00fc544b7b303a4d59cbfdbf572829d",
    "submitter": {
        "id": 477,
        "url": "https://patches.dpdk.org/api/people/477/?format=api",
        "name": "Hemant Agrawal",
        "email": "hemant.agrawal@nxp.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20200707092244.12791-10-hemant.agrawal@nxp.com/mbox/",
    "series": [
        {
            "id": 10842,
            "url": "https://patches.dpdk.org/api/series/10842/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=10842",
            "date": "2020-07-07T09:22:15",
            "name": "NXP DPAAx enhancements",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/10842/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/73389/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/73389/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 05CE2A00BE;\n\tTue,  7 Jul 2020 11:28:40 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id A75621DD39;\n\tTue,  7 Jul 2020 11:27:13 +0200 (CEST)",
            "from inva021.nxp.com (inva021.nxp.com [92.121.34.21])\n by dpdk.org (Postfix) with ESMTP id 5E0911DC91\n for <dev@dpdk.org>; Tue,  7 Jul 2020 11:27:04 +0200 (CEST)",
            "from inva021.nxp.com (localhost [127.0.0.1])\n by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id 357272008E1;\n Tue,  7 Jul 2020 11:27:04 +0200 (CEST)",
            "from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com\n [165.114.16.14])\n by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id 200E62008CE;\n Tue,  7 Jul 2020 11:27:02 +0200 (CEST)",
            "from bf-netperf1.ap.freescale.net (bf-netperf1.ap.freescale.net\n [10.232.133.63])\n by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id D7863402F7;\n Tue,  7 Jul 2020 17:26:59 +0800 (SGT)"
        ],
        "From": "Hemant Agrawal <hemant.agrawal@nxp.com>",
        "To": "dev@dpdk.org",
        "Cc": "ferruh.yigit@intel.com,\n\tGagandeep Singh <g.singh@nxp.com>",
        "Date": "Tue,  7 Jul 2020 14:52:24 +0530",
        "Message-Id": "<20200707092244.12791-10-hemant.agrawal@nxp.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20200707092244.12791-1-hemant.agrawal@nxp.com>",
        "References": "<20200527132326.1382-1-hemant.agrawal@nxp.com>\n <20200707092244.12791-1-hemant.agrawal@nxp.com>",
        "X-Virus-Scanned": "ClamAV using ClamSMTP",
        "Subject": "[dpdk-dev] [PATCH v2 09/29] net/dpaa: enable Tx queue taildrop",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Gagandeep Singh <g.singh@nxp.com>\n\nEnable congestion handling/tail drop for TX queues.\n\nSigned-off-by: Gagandeep Singh <g.singh@nxp.com>\n---\n drivers/bus/dpaa/base/qbman/qman.c        |  43 +++++++++\n drivers/bus/dpaa/include/fsl_qman.h       |  17 ++++\n drivers/bus/dpaa/rte_bus_dpaa_version.map |   2 +\n drivers/net/dpaa/dpaa_ethdev.c            | 111 ++++++++++++++++++++--\n drivers/net/dpaa/dpaa_ethdev.h            |   1 +\n drivers/net/dpaa/dpaa_rxtx.c              |  71 ++++++++++++++\n drivers/net/dpaa/dpaa_rxtx.h              |   3 +\n 7 files changed, 242 insertions(+), 6 deletions(-)",
    "diff": "diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c\nindex b596e79c2..447c09177 100644\n--- a/drivers/bus/dpaa/base/qbman/qman.c\n+++ b/drivers/bus/dpaa/base/qbman/qman.c\n@@ -40,6 +40,8 @@\n \t\t\tspin_unlock(&__fq478->fqlock); \\\n \t} while (0)\n \n+static qman_cb_free_mbuf qman_free_mbuf_cb;\n+\n static inline void fq_set(struct qman_fq *fq, u32 mask)\n {\n \tdpaa_set_bits(mask, &fq->flags);\n@@ -790,6 +792,47 @@ static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,\n \tFQUNLOCK(fq);\n }\n \n+void\n+qman_ern_register_cb(qman_cb_free_mbuf cb)\n+{\n+\tqman_free_mbuf_cb = cb;\n+}\n+\n+\n+void\n+qman_ern_poll_free(void)\n+{\n+\tstruct qman_portal *p = get_affine_portal();\n+\tu8 verb, num = 0;\n+\tconst struct qm_mr_entry *msg;\n+\tconst struct qm_fd *fd;\n+\tstruct qm_mr_entry swapped_msg;\n+\n+\tqm_mr_pvb_update(&p->p);\n+\tmsg = qm_mr_current(&p->p);\n+\n+\twhile (msg != NULL) {\n+\t\tswapped_msg = *msg;\n+\t\thw_fd_to_cpu(&swapped_msg.ern.fd);\n+\t\tverb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;\n+\t\tfd = &swapped_msg.ern.fd;\n+\n+\t\tif (unlikely(verb & 0x20)) {\n+\t\t\tprintf(\"HW ERN notification, Nothing to do\\n\");\n+\t\t} else {\n+\t\t\tif ((fd->bpid & 0xff) != 0xff)\n+\t\t\t\tqman_free_mbuf_cb(fd);\n+\t\t}\n+\n+\t\tnum++;\n+\t\tqm_mr_next(&p->p);\n+\t\tqm_mr_pvb_update(&p->p);\n+\t\tmsg = qm_mr_current(&p->p);\n+\t}\n+\n+\tqm_mr_cci_consume(&p->p, num);\n+}\n+\n static u32 __poll_portal_slow(struct qman_portal *p, u32 is)\n {\n \tconst struct qm_mr_entry *msg;\ndiff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h\nindex 78b698f39..0d9cfc339 100644\n--- a/drivers/bus/dpaa/include/fsl_qman.h\n+++ b/drivers/bus/dpaa/include/fsl_qman.h\n@@ -1158,6 +1158,10 @@ typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,\n /* This callback type is used when handling DCP ERNs */\n typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,\n \t\t\t\tconst struct qm_mr_entry *msg);\n+\n+/* This callback function will be used to free mbufs of ERN */\n+typedef uint16_t (*qman_cb_free_mbuf)(const struct qm_fd *fd);\n+\n /*\n  * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +\n  * held-active + held-suspended are just \"sched\". Things like \"retired\" will not\n@@ -1808,6 +1812,19 @@ __rte_internal\n int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,\n \t\t       int frames_to_send);\n \n+/**\n+ * qman_ern_poll_free - Polling on MR and calling a callback function to free\n+ * mbufs when SW ERNs received.\n+ */\n+__rte_internal\n+void qman_ern_poll_free(void);\n+\n+/**\n+ * qman_ern_register_cb - Register a callback function to free buffers.\n+ */\n+__rte_internal\n+void qman_ern_register_cb(qman_cb_free_mbuf cb);\n+\n /**\n  * qman_enqueue_multi_fq - Enqueue multiple frames to their respective frame\n  * queues.\ndiff --git a/drivers/bus/dpaa/rte_bus_dpaa_version.map b/drivers/bus/dpaa/rte_bus_dpaa_version.map\nindex 46d42f7d6..8069b05af 100644\n--- a/drivers/bus/dpaa/rte_bus_dpaa_version.map\n+++ b/drivers/bus/dpaa/rte_bus_dpaa_version.map\n@@ -61,6 +61,8 @@ INTERNAL {\n \tqman_enqueue;\n \tqman_enqueue_multi;\n \tqman_enqueue_multi_fq;\n+\tqman_ern_poll_free;\n+\tqman_ern_register_cb;\n \tqman_fq_fqid;\n \tqman_fq_portal_irqsource_add;\n \tqman_fq_portal_irqsource_remove;\ndiff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c\nindex f1c9a7151..fd2c0c681 100644\n--- a/drivers/net/dpaa/dpaa_ethdev.c\n+++ b/drivers/net/dpaa/dpaa_ethdev.c\n@@ -1,7 +1,7 @@\n /* SPDX-License-Identifier: BSD-3-Clause\n  *\n  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.\n- *   Copyright 2017-2019 NXP\n+ *   Copyright 2017-2020 NXP\n  *\n  */\n /* System headers */\n@@ -86,9 +86,12 @@ static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;\n static int dpaa_push_queue_idx; /* Queue index which are in push mode*/\n \n \n-/* Per FQ Taildrop in frame count */\n+/* Per RX FQ Taildrop in frame count */\n static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;\n \n+/* Per TX FQ Taildrop in frame count, disabled by default */\n+static unsigned int td_tx_threshold;\n+\n struct rte_dpaa_xstats_name_off {\n \tchar name[RTE_ETH_XSTATS_NAME_SIZE];\n \tuint32_t offset;\n@@ -275,7 +278,11 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev)\n \tPMD_INIT_FUNC_TRACE();\n \n \t/* Change tx callback to the real one */\n-\tdev->tx_pkt_burst = dpaa_eth_queue_tx;\n+\tif (dpaa_intf->cgr_tx)\n+\t\tdev->tx_pkt_burst = dpaa_eth_queue_tx_slow;\n+\telse\n+\t\tdev->tx_pkt_burst = dpaa_eth_queue_tx;\n+\n \tfman_if_enable_rx(dpaa_intf->fif);\n \n \treturn 0;\n@@ -867,6 +874,7 @@ int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \tDPAA_PMD_INFO(\"Tx queue setup for queue index: %d fq_id (0x%x)\",\n \t\t\tqueue_idx, dpaa_intf->tx_queues[queue_idx].fqid);\n \tdev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];\n+\n \treturn 0;\n }\n \n@@ -1236,9 +1244,19 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,\n \n /* Initialise a Tx FQ */\n static int dpaa_tx_queue_init(struct qman_fq *fq,\n-\t\t\t      struct fman_if *fman_intf)\n+\t\t\t      struct fman_if *fman_intf,\n+\t\t\t      struct qman_cgr *cgr_tx)\n {\n \tstruct qm_mcc_initfq opts = {0};\n+\tstruct qm_mcc_initcgr cgr_opts = {\n+\t\t.we_mask = QM_CGR_WE_CS_THRES |\n+\t\t\t\tQM_CGR_WE_CSTD_EN |\n+\t\t\t\tQM_CGR_WE_MODE,\n+\t\t.cgr = {\n+\t\t\t.cstd_en = QM_CGR_EN,\n+\t\t\t.mode = QMAN_CGR_MODE_FRAME\n+\t\t}\n+\t};\n \tint ret;\n \n \tret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |\n@@ -1257,6 +1275,27 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,\n \topts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;\n \topts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;\n \tDPAA_PMD_DEBUG(\"init tx fq %p, fqid 0x%x\", fq, fq->fqid);\n+\n+\tif (cgr_tx) {\n+\t\t/* Enable tail drop with cgr on this queue */\n+\t\tqm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres,\n+\t\t\t\t      td_tx_threshold, 0);\n+\t\tcgr_tx->cb = NULL;\n+\t\tret = qman_create_cgr(cgr_tx, QMAN_CGR_FLAG_USE_INIT,\n+\t\t\t\t      &cgr_opts);\n+\t\tif (ret) {\n+\t\t\tDPAA_PMD_WARN(\n+\t\t\t\t\"rx taildrop init fail on rx fqid 0x%x(ret=%d)\",\n+\t\t\t\tfq->fqid, ret);\n+\t\t\tgoto without_cgr;\n+\t\t}\n+\t\topts.we_mask |= QM_INITFQ_WE_CGID;\n+\t\topts.fqd.cgid = cgr_tx->cgrid;\n+\t\topts.fqd.fq_ctrl |= QM_FQCTRL_CGE;\n+\t\tDPAA_PMD_DEBUG(\"Tx FQ tail drop enabled, threshold = %d\\n\",\n+\t\t\t\ttd_tx_threshold);\n+\t}\n+without_cgr:\n \tret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);\n \tif (ret)\n \t\tDPAA_PMD_ERR(\"init tx fqid 0x%x failed %d\", fq->fqid, ret);\n@@ -1309,6 +1348,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)\n \tstruct fman_if *fman_intf;\n \tstruct fman_if_bpool *bp, *tmp_bp;\n \tuint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];\n+\tuint32_t cgrid_tx[MAX_DPAA_CORES];\n \tchar eth_buf[RTE_ETHER_ADDR_FMT_SIZE];\n \n \tPMD_INIT_FUNC_TRACE();\n@@ -1319,7 +1359,10 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)\n \t\teth_dev->dev_ops = &dpaa_devops;\n \t\t/* Plugging of UCODE burst API not supported in Secondary */\n \t\teth_dev->rx_pkt_burst = dpaa_eth_queue_rx;\n-\t\teth_dev->tx_pkt_burst = dpaa_eth_queue_tx;\n+\t\tif (dpaa_intf->cgr_tx)\n+\t\t\teth_dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;\n+\t\telse\n+\t\t\teth_dev->tx_pkt_burst = dpaa_eth_queue_tx;\n #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP\n \t\tqman_set_fq_lookup_table(\n \t\t\t\tdpaa_intf->rx_queues->qman_fq_lookup_table);\n@@ -1366,6 +1409,21 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)\n \t\treturn -ENOMEM;\n \t}\n \n+\tmemset(cgrid, 0, sizeof(cgrid));\n+\tmemset(cgrid_tx, 0, sizeof(cgrid_tx));\n+\n+\t/* if DPAA_TX_TAILDROP_THRESHOLD is set, use that value; if 0, it means\n+\t * Tx tail drop is disabled.\n+\t */\n+\tif (getenv(\"DPAA_TX_TAILDROP_THRESHOLD\")) {\n+\t\ttd_tx_threshold = atoi(getenv(\"DPAA_TX_TAILDROP_THRESHOLD\"));\n+\t\tDPAA_PMD_DEBUG(\"Tail drop threshold env configured: %u\",\n+\t\t\t       td_tx_threshold);\n+\t\t/* if a very large value is being configured */\n+\t\tif (td_tx_threshold > UINT16_MAX)\n+\t\t\ttd_tx_threshold = CGR_RX_PERFQ_THRESH;\n+\t}\n+\n \t/* If congestion control is enabled globally*/\n \tif (td_threshold) {\n \t\tdpaa_intf->cgr_rx = rte_zmalloc(NULL,\n@@ -1414,9 +1472,36 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)\n \t\tgoto free_rx;\n \t}\n \n+\t/* If congestion control is enabled globally*/\n+\tif (td_tx_threshold) {\n+\t\tdpaa_intf->cgr_tx = rte_zmalloc(NULL,\n+\t\t\tsizeof(struct qman_cgr) * MAX_DPAA_CORES,\n+\t\t\tMAX_CACHELINE);\n+\t\tif (!dpaa_intf->cgr_tx) {\n+\t\t\tDPAA_PMD_ERR(\"Failed to alloc mem for cgr_tx\\n\");\n+\t\t\tret = -ENOMEM;\n+\t\t\tgoto free_rx;\n+\t\t}\n+\n+\t\tret = qman_alloc_cgrid_range(&cgrid_tx[0], MAX_DPAA_CORES,\n+\t\t\t\t\t     1, 0);\n+\t\tif (ret != MAX_DPAA_CORES) {\n+\t\t\tDPAA_PMD_WARN(\"insufficient CGRIDs available\");\n+\t\t\tret = -EINVAL;\n+\t\t\tgoto free_rx;\n+\t\t}\n+\t} else {\n+\t\tdpaa_intf->cgr_tx = NULL;\n+\t}\n+\n+\n \tfor (loop = 0; loop < MAX_DPAA_CORES; loop++) {\n+\t\tif (dpaa_intf->cgr_tx)\n+\t\t\tdpaa_intf->cgr_tx[loop].cgrid = cgrid_tx[loop];\n+\n \t\tret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],\n-\t\t\t\t\t fman_intf);\n+\t\t\tfman_intf,\n+\t\t\tdpaa_intf->cgr_tx ? &dpaa_intf->cgr_tx[loop] : NULL);\n \t\tif (ret)\n \t\t\tgoto free_tx;\n \t\tdpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;\n@@ -1487,6 +1572,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)\n \n free_rx:\n \trte_free(dpaa_intf->cgr_rx);\n+\trte_free(dpaa_intf->cgr_tx);\n \trte_free(dpaa_intf->rx_queues);\n \tdpaa_intf->rx_queues = NULL;\n \tdpaa_intf->nb_rx_queues = 0;\n@@ -1527,6 +1613,17 @@ dpaa_dev_uninit(struct rte_eth_dev *dev)\n \trte_free(dpaa_intf->cgr_rx);\n \tdpaa_intf->cgr_rx = NULL;\n \n+\t/* Release TX congestion Groups */\n+\tif (dpaa_intf->cgr_tx) {\n+\t\tfor (loop = 0; loop < MAX_DPAA_CORES; loop++)\n+\t\t\tqman_delete_cgr(&dpaa_intf->cgr_tx[loop]);\n+\n+\t\tqman_release_cgrid_range(dpaa_intf->cgr_tx[loop].cgrid,\n+\t\t\t\t\t MAX_DPAA_CORES);\n+\t\trte_free(dpaa_intf->cgr_tx);\n+\t\tdpaa_intf->cgr_tx = NULL;\n+\t}\n+\n \trte_free(dpaa_intf->rx_queues);\n \tdpaa_intf->rx_queues = NULL;\n \n@@ -1631,6 +1728,8 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,\n \teth_dev->device = &dpaa_dev->device;\n \tdpaa_dev->eth_dev = eth_dev;\n \n+\tqman_ern_register_cb(dpaa_free_mbuf);\n+\n \t/* Invoke PMD device initialization function */\n \tdiag = dpaa_dev_init(eth_dev);\n \tif (diag == 0) {\ndiff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h\nindex 6a6477ac8..d4261f885 100644\n--- a/drivers/net/dpaa/dpaa_ethdev.h\n+++ b/drivers/net/dpaa/dpaa_ethdev.h\n@@ -111,6 +111,7 @@ struct dpaa_if {\n \tstruct qman_fq *rx_queues;\n \tstruct qman_cgr *cgr_rx;\n \tstruct qman_fq *tx_queues;\n+\tstruct qman_cgr *cgr_tx;\n \tstruct qman_fq debug_queues[2];\n \tuint16_t nb_rx_queues;\n \tuint16_t nb_tx_queues;\ndiff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c\nindex 3aeecb7d2..819cad7c6 100644\n--- a/drivers/net/dpaa/dpaa_rxtx.c\n+++ b/drivers/net/dpaa/dpaa_rxtx.c\n@@ -398,6 +398,69 @@ dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)\n \treturn mbuf;\n }\n \n+uint16_t\n+dpaa_free_mbuf(const struct qm_fd *fd)\n+{\n+\tstruct rte_mbuf *mbuf;\n+\tstruct dpaa_bp_info *bp_info;\n+\tuint8_t format;\n+\tvoid *ptr;\n+\n+\tbp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);\n+\tformat = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;\n+\tif (unlikely(format == qm_fd_sg)) {\n+\t\tstruct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;\n+\t\tstruct qm_sg_entry *sgt, *sg_temp;\n+\t\tvoid *vaddr, *sg_vaddr;\n+\t\tint i = 0;\n+\t\tuint16_t fd_offset = fd->offset;\n+\n+\t\tvaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));\n+\t\tif (!vaddr) {\n+\t\t\tDPAA_PMD_ERR(\"unable to convert physical address\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\tsgt = vaddr + fd_offset;\n+\t\tsg_temp = &sgt[i++];\n+\t\thw_sg_to_cpu(sg_temp);\n+\t\ttemp = (struct rte_mbuf *)\n+\t\t\t((char *)vaddr - bp_info->meta_data_size);\n+\t\tsg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,\n+\t\t\t\t\t\tqm_sg_entry_get64(sg_temp));\n+\n+\t\tfirst_seg = (struct rte_mbuf *)((char *)sg_vaddr -\n+\t\t\t\t\t\tbp_info->meta_data_size);\n+\t\tfirst_seg->nb_segs = 1;\n+\t\tprev_seg = first_seg;\n+\t\twhile (i < DPAA_SGT_MAX_ENTRIES) {\n+\t\t\tsg_temp = &sgt[i++];\n+\t\t\thw_sg_to_cpu(sg_temp);\n+\t\t\tsg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,\n+\t\t\t\t\t\tqm_sg_entry_get64(sg_temp));\n+\t\t\tcur_seg = (struct rte_mbuf *)((char *)sg_vaddr -\n+\t\t\t\t\t\t      bp_info->meta_data_size);\n+\t\t\tfirst_seg->nb_segs += 1;\n+\t\t\tprev_seg->next = cur_seg;\n+\t\t\tif (sg_temp->final) {\n+\t\t\t\tcur_seg->next = NULL;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tprev_seg = cur_seg;\n+\t\t}\n+\n+\t\trte_pktmbuf_free_seg(temp);\n+\t\trte_pktmbuf_free_seg(first_seg);\n+\t\treturn 0;\n+\t}\n+\n+\tptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));\n+\tmbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);\n+\n+\trte_pktmbuf_free(mbuf);\n+\n+\treturn 0;\n+}\n+\n /* Specific for LS1043 */\n void\n dpaa_rx_cb_no_prefetch(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,\n@@ -1011,6 +1074,14 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)\n \treturn sent;\n }\n \n+uint16_t\n+dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)\n+{\n+\tqman_ern_poll_free();\n+\n+\treturn dpaa_eth_queue_tx(q, bufs, nb_bufs);\n+}\n+\n uint16_t dpaa_eth_tx_drop_all(void *q  __rte_unused,\n \t\t\t      struct rte_mbuf **bufs __rte_unused,\n \t\tuint16_t nb_bufs __rte_unused)\ndiff --git a/drivers/net/dpaa/dpaa_rxtx.h b/drivers/net/dpaa/dpaa_rxtx.h\nindex 4f896fba1..fe8eb6dc7 100644\n--- a/drivers/net/dpaa/dpaa_rxtx.h\n+++ b/drivers/net/dpaa/dpaa_rxtx.h\n@@ -254,6 +254,8 @@ struct annotations_t {\n \n uint16_t dpaa_eth_queue_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);\n \n+uint16_t dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs,\n+\t\t\t\tuint16_t nb_bufs);\n uint16_t dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);\n \n uint16_t dpaa_eth_tx_drop_all(void *q  __rte_unused,\n@@ -266,6 +268,7 @@ int dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,\n \t\t\t   struct qm_fd *fd,\n \t\t\t   uint32_t bpid);\n \n+uint16_t dpaa_free_mbuf(const struct qm_fd *fd);\n void dpaa_rx_cb(struct qman_fq **fq,\n \t\tstruct qm_dqrr_entry **dqrr, void **bufs, int num_bufs);\n \n",
    "prefixes": [
        "v2",
        "09/29"
    ]
}