get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/24712/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 24712,
    "url": "https://patches.dpdk.org/api/patches/24712/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/fb31c96cb91161a3979f0a3b409a93e419453bfe.1495856647.git.rahul.lakkireddy@chelsio.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<fb31c96cb91161a3979f0a3b409a93e419453bfe.1495856647.git.rahul.lakkireddy@chelsio.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/fb31c96cb91161a3979f0a3b409a93e419453bfe.1495856647.git.rahul.lakkireddy@chelsio.com",
    "date": "2017-05-27T03:47:57",
    "name": "[dpdk-dev,1/4] cxgbe: improve latency for slow traffic",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "8749f09909a71db76bf0568e9f628534887d8168",
    "submitter": {
        "id": 241,
        "url": "https://patches.dpdk.org/api/people/241/?format=api",
        "name": "Rahul Lakkireddy",
        "email": "rahul.lakkireddy@chelsio.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/fb31c96cb91161a3979f0a3b409a93e419453bfe.1495856647.git.rahul.lakkireddy@chelsio.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/24712/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/24712/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 1A4D4377A;\n\tSat, 27 May 2017 07:47:51 +0200 (CEST)",
            "from stargate.chelsio.com (stargate.chelsio.com [12.32.117.8])\n\tby dpdk.org (Postfix) with ESMTP id 464E6378E\n\tfor <dev@dpdk.org>; Sat, 27 May 2017 07:47:49 +0200 (CEST)",
            "from localhost (scalar.blr.asicdesigners.com [10.193.185.94])\n\tby stargate.chelsio.com (8.13.8/8.13.8) with ESMTP id v4R5liqi016592; \n\tFri, 26 May 2017 22:47:45 -0700"
        ],
        "From": "Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>",
        "To": "dev@dpdk.org",
        "Cc": "Nirranjan Kirubaharan <nirranjan@chelsio.com>,\n\tIndranil Choudhury <indranil@chelsio.com>,\n\tKumar Sanghvi <kumaras@chelsio.com>",
        "Date": "Sat, 27 May 2017 09:17:57 +0530",
        "Message-Id": "<fb31c96cb91161a3979f0a3b409a93e419453bfe.1495856647.git.rahul.lakkireddy@chelsio.com>",
        "X-Mailer": "git-send-email 2.5.3",
        "In-Reply-To": [
            "<cover.1495856647.git.rahul.lakkireddy@chelsio.com>",
            "<cover.1495856647.git.rahul.lakkireddy@chelsio.com>"
        ],
        "References": [
            "<cover.1495856647.git.rahul.lakkireddy@chelsio.com>",
            "<cover.1495856647.git.rahul.lakkireddy@chelsio.com>"
        ],
        "Subject": "[dpdk-dev] [PATCH 1/4] cxgbe: improve latency for slow traffic",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "TX coalescing waits for ETH_COALESCE_PKT_NUM packets to be coalesced\nacross bursts before transmitting them.  For slow traffic, such as\n100 PPS, this approach increases latency since packets are received\none at a time and tx coalescing has to wait for ETH_COALESCE_PKT\nnumber of packets to arrive before transmitting.\n\nTo fix this:\n\n- Update rx path to use status page instead and only receive packets\n  when either the ingress interrupt timer threshold (5 us) or\n  the ingress interrupt packet count threshold (32 packets) fires.\n  (i.e. whichever happens first).\n\n- If number of packets coalesced is <= number of packets sent\n  by tx burst function, stop coalescing and transmit these packets\n  immediately.\n\nAlso added compile time option to favor throughput over latency by\ndefault.\n\nSigned-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>\nSigned-off-by: Kumar Sanghvi <kumaras@chelsio.com>\n---\n config/common_base                      |   3 +-\n doc/guides/nics/cxgbe.rst               |   4 ++\n doc/guides/rel_notes/release_17_08.rst  |   5 ++\n drivers/net/cxgbe/base/adapter.h        |   4 +-\n drivers/net/cxgbe/base/t4_regs_values.h |   2 +-\n drivers/net/cxgbe/base/t4fw_interface.h |   8 +++\n drivers/net/cxgbe/cxgbe_compat.h        |  11 +++-\n drivers/net/cxgbe/cxgbe_ethdev.c        |   3 +-\n drivers/net/cxgbe/cxgbe_main.c          |   5 +-\n drivers/net/cxgbe/sge.c                 | 109 ++++++++++++++++----------------\n 10 files changed, 92 insertions(+), 62 deletions(-)",
    "diff": "diff --git a/config/common_base b/config/common_base\nindex 67ef2ec..b2a6ff6 100644\n--- a/config/common_base\n+++ b/config/common_base\n@@ -240,7 +240,7 @@ CONFIG_RTE_LIBRTE_BNX2X_MF_SUPPORT=n\n CONFIG_RTE_LIBRTE_BNX2X_DEBUG_PERIODIC=n\n \n #\n-# Compile burst-oriented Chelsio Terminator 10GbE/40GbE (CXGBE) PMD\n+# Compile burst-oriented Chelsio Terminator (CXGBE) PMD\n #\n CONFIG_RTE_LIBRTE_CXGBE_PMD=y\n CONFIG_RTE_LIBRTE_CXGBE_DEBUG=n\n@@ -248,6 +248,7 @@ CONFIG_RTE_LIBRTE_CXGBE_DEBUG_REG=n\n CONFIG_RTE_LIBRTE_CXGBE_DEBUG_MBOX=n\n CONFIG_RTE_LIBRTE_CXGBE_DEBUG_TX=n\n CONFIG_RTE_LIBRTE_CXGBE_DEBUG_RX=n\n+CONFIG_RTE_LIBRTE_CXGBE_TPUT=y\n \n #\n # Compile burst-oriented Cisco ENIC PMD driver\ndiff --git a/doc/guides/nics/cxgbe.rst b/doc/guides/nics/cxgbe.rst\nindex 176c189..8651a7b 100644\n--- a/doc/guides/nics/cxgbe.rst\n+++ b/doc/guides/nics/cxgbe.rst\n@@ -130,6 +130,10 @@ enabling debugging options may affect system performance.\n \n   Toggle display of receiving data path run-time check messages.\n \n+- ``CONFIG_RTE_LIBRTE_CXGBE_TPUT`` (default **y**)\n+\n+  Toggle behaviour to prefer Throughput or Latency.\n+\n .. _driver-compilation:\n \n Driver compilation and testing\ndiff --git a/doc/guides/rel_notes/release_17_08.rst b/doc/guides/rel_notes/release_17_08.rst\nindex 39a3398..bd4ea2c 100644\n--- a/doc/guides/rel_notes/release_17_08.rst\n+++ b/doc/guides/rel_notes/release_17_08.rst\n@@ -79,6 +79,11 @@ EAL\n Drivers\n ~~~~~~~\n \n+* **net/cxgbe: latency and performance improvements**\n+\n+  TX and RX path reworked to improve performance.  Also reduced latency\n+  for slow traffic.\n+\n \n Libraries\n ~~~~~~~~~\ndiff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h\nindex cc89e49..58c6903 100644\n--- a/drivers/net/cxgbe/base/adapter.h\n+++ b/drivers/net/cxgbe/base/adapter.h\n@@ -148,6 +148,7 @@ struct sge_rspq {                   /* state for an SGE response queue */\n \n \tvoid __iomem *bar2_addr;    /* address of BAR2 Queue registers */\n \tunsigned int bar2_qid;      /* Queue ID for BAR2 Queue registers */\n+\tstruct sge_qstat *stat;\n \n \tunsigned int cidx;          /* consumer index */\n \tunsigned int gts_idx;\t    /* last gts write sent */\n@@ -708,7 +709,8 @@ void reclaim_completed_tx(struct sge_txq *q);\n void t4_free_sge_resources(struct adapter *adap);\n void t4_sge_tx_monitor_start(struct adapter *adap);\n void t4_sge_tx_monitor_stop(struct adapter *adap);\n-int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf);\n+int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,\n+\t\tuint16_t nb_pkts);\n int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,\n \t\t     const struct pkt_gl *gl);\n int t4_sge_init(struct adapter *adap);\ndiff --git a/drivers/net/cxgbe/base/t4_regs_values.h b/drivers/net/cxgbe/base/t4_regs_values.h\nindex 1326594..9085ff6d 100644\n--- a/drivers/net/cxgbe/base/t4_regs_values.h\n+++ b/drivers/net/cxgbe/base/t4_regs_values.h\n@@ -82,7 +82,7 @@\n /*\n  * Ingress Context field values\n  */\n-#define X_UPDATEDELIVERY_INTERRUPT\t1\n+#define X_UPDATEDELIVERY_STATUS_PAGE\t2\n \n #define X_RSPD_TYPE_FLBUF\t\t0\n #define X_RSPD_TYPE_CPL\t\t\t1\ndiff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h\nindex fcc61bf..6283fe9 100644\n--- a/drivers/net/cxgbe/base/t4fw_interface.h\n+++ b/drivers/net/cxgbe/base/t4fw_interface.h\n@@ -84,6 +84,7 @@ enum fw_memtype {\n enum fw_wr_opcodes {\n \tFW_ETH_TX_PKT_WR\t= 0x08,\n \tFW_ETH_TX_PKTS_WR\t= 0x09,\n+\tFW_ETH_TX_PKTS2_WR      = 0x78,\n };\n \n /*\n@@ -591,6 +592,13 @@ struct fw_iq_cmd {\n #define G_FW_IQ_CMD_IQESIZE(x)\t\\\n \t(((x) >> S_FW_IQ_CMD_IQESIZE) & M_FW_IQ_CMD_IQESIZE)\n \n+#define S_FW_IQ_CMD_IQRO                30\n+#define M_FW_IQ_CMD_IQRO                0x1\n+#define V_FW_IQ_CMD_IQRO(x)             ((x) << S_FW_IQ_CMD_IQRO)\n+#define G_FW_IQ_CMD_IQRO(x)             \\\n+\t(((x) >> S_FW_IQ_CMD_IQRO) & M_FW_IQ_CMD_IQRO)\n+#define F_FW_IQ_CMD_IQRO                V_FW_IQ_CMD_IQRO(1U)\n+\n #define S_FW_IQ_CMD_IQFLINTCONGEN\t27\n #define M_FW_IQ_CMD_IQFLINTCONGEN\t0x1\n #define V_FW_IQ_CMD_IQFLINTCONGEN(x)\t((x) << S_FW_IQ_CMD_IQFLINTCONGEN)\ndiff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h\nindex 1551cbf..03bba9f 100644\n--- a/drivers/net/cxgbe/cxgbe_compat.h\n+++ b/drivers/net/cxgbe/cxgbe_compat.h\n@@ -1,7 +1,7 @@\n /*-\n  *   BSD LICENSE\n  *\n- *   Copyright(c) 2014-2015 Chelsio Communications.\n+ *   Copyright(c) 2014-2017 Chelsio Communications.\n  *   All rights reserved.\n  *\n  *   Redistribution and use in source and binary forms, with or without\n@@ -226,6 +226,15 @@ static inline int cxgbe_fls(int x)\n \treturn x ? sizeof(x) * 8 - __builtin_clz(x) : 0;\n }\n \n+/**\n+ * cxgbe_ffs - find first bit set\n+ * @x: the word to search\n+ */\n+static inline int cxgbe_ffs(int x)\n+{\n+\treturn x ? __builtin_ffs(x) : 0;\n+}\n+\n static inline unsigned long ilog2(unsigned long n)\n {\n \tunsigned int e = 0;\ndiff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c\nindex ac70f22..7282575 100644\n--- a/drivers/net/cxgbe/cxgbe_ethdev.c\n+++ b/drivers/net/cxgbe/cxgbe_ethdev.c\n@@ -104,7 +104,8 @@ static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\tpkts_remain = nb_pkts - total_sent;\n \n \t\tfor (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {\n-\t\t\tret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent]);\n+\t\t\tret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent],\n+\t\t\t\t\t  nb_pkts);\n \t\t\tif (ret < 0)\n \t\t\t\tbreak;\n \t\t}\ndiff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c\nindex 42238ef..2522354 100644\n--- a/drivers/net/cxgbe/cxgbe_main.c\n+++ b/drivers/net/cxgbe/cxgbe_main.c\n@@ -301,7 +301,7 @@ void cfg_queues(struct rte_eth_dev *eth_dev)\n \t\tfor (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {\n \t\t\tstruct sge_eth_rxq *r = &s->ethrxq[i];\n \n-\t\t\tinit_rspq(adap, &r->rspq, 0, 0, 1024, 64);\n+\t\t\tinit_rspq(adap, &r->rspq, 5, 32, 1024, 64);\n \t\t\tr->usembufs = 1;\n \t\t\tr->fl.size = (r->usembufs ? 1024 : 72);\n \t\t}\n@@ -445,6 +445,9 @@ static int adap_init0_tweaks(struct adapter *adapter)\n \t\t\t V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING,\n \t\t\t V_CREDITCNT(3) | V_CREDITCNTPACKING(1));\n \n+\tt4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD,\n+\t\t\t V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U));\n+\n \tt4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U),\n \t\t\t V_IDMAARBROUNDROBIN(1U));\n \ndiff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c\nindex 020879a..d98c3f6 100644\n--- a/drivers/net/cxgbe/sge.c\n+++ b/drivers/net/cxgbe/sge.c\n@@ -848,7 +848,7 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,\n \n \t/* fill the pkts WR header */\n \twr = (void *)&q->desc[q->pidx];\n-\twr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));\n+\twr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));\n \n \twr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));\n \tndesc = flits_to_desc(q->coalesce.flits);\n@@ -971,7 +971,7 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,\n \t\t\t\t\tstruct rte_mbuf *mbuf,\n \t\t\t\t\tint flits, struct adapter *adap,\n \t\t\t\t\tconst struct port_info *pi,\n-\t\t\t\t\tdma_addr_t *addr)\n+\t\t\t\t\tdma_addr_t *addr, uint16_t nb_pkts)\n {\n \tu64 cntrl, *end;\n \tstruct sge_txq *q = &txq->q;\n@@ -981,6 +981,10 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,\n \tstruct tx_sw_desc *sd;\n \tunsigned int idx = q->coalesce.idx, len = mbuf->pkt_len;\n \n+#ifdef RTE_LIBRTE_CXGBE_TPUT\n+\tRTE_SET_USED(nb_pkts);\n+#endif\n+\n \tif (q->coalesce.type == 0) {\n \t\tmc = (struct ulp_txpkt *)q->coalesce.ptr;\n \t\tmc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) |\n@@ -1050,7 +1054,11 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,\n \tsd->coalesce.idx = (idx & 1) + 1;\n \n \t/* send the coaelsced work request if max reached */\n-\tif (++q->coalesce.idx == ETH_COALESCE_PKT_NUM)\n+\tif (++q->coalesce.idx == ETH_COALESCE_PKT_NUM\n+#ifndef RTE_LIBRTE_CXGBE_TPUT\n+\t    || q->coalesce.idx >= nb_pkts\n+#endif\n+\t    )\n \t\tship_tx_pkt_coalesce_wr(adap, txq);\n \treturn 0;\n }\n@@ -1062,7 +1070,8 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,\n  *\n  * Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.\n  */\n-int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf)\n+int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,\n+\t\tuint16_t nb_pkts)\n {\n \tconst struct port_info *pi;\n \tstruct cpl_tx_pkt_lso_core *lso;\n@@ -1116,7 +1125,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf)\n \t\t\t}\n \t\t\trte_prefetch0((volatile void *)addr);\n \t\t\treturn tx_do_packet_coalesce(txq, mbuf, cflits, adap,\n-\t\t\t\t\t\t     pi, addr);\n+\t\t\t\t\t\t     pi, addr, nb_pkts);\n \t\t} else {\n \t\t\treturn -EBUSY;\n \t\t}\n@@ -1398,20 +1407,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,\n \treturn 0;\n }\n \n-/**\n- * is_new_response - check if a response is newly written\n- * @r: the response descriptor\n- * @q: the response queue\n- *\n- * Returns true if a response descriptor contains a yet unprocessed\n- * response.\n- */\n-static inline bool is_new_response(const struct rsp_ctrl *r,\n-\t\t\t\t   const struct sge_rspq *q)\n-{\n-\treturn (r->u.type_gen >> S_RSPD_GEN) == q->gen;\n-}\n-\n #define CXGB4_MSG_AN ((void *)1)\n \n /**\n@@ -1453,12 +1448,12 @@ static int process_responses(struct sge_rspq *q, int budget,\n \tstruct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);\n \n \twhile (likely(budget_left)) {\n+\t\tif (q->cidx == ntohs(q->stat->pidx))\n+\t\t\tbreak;\n+\n \t\trc = (const struct rsp_ctrl *)\n \t\t     ((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc)));\n \n-\t\tif (!is_new_response(rc, q))\n-\t\t\tbreak;\n-\n \t\t/*\n \t\t * Ensure response has been read\n \t\t */\n@@ -1548,35 +1543,6 @@ static int process_responses(struct sge_rspq *q, int budget,\n \n \t\trspq_next(q);\n \t\tbudget_left--;\n-\n-\t\tif (R_IDXDIFF(q, gts_idx) >= 64) {\n-\t\t\tunsigned int cidx_inc = R_IDXDIFF(q, gts_idx);\n-\t\t\tunsigned int params;\n-\t\t\tu32 val;\n-\n-\t\t\tif (fl_cap(&rxq->fl) - rxq->fl.avail >= 64)\n-\t\t\t\t__refill_fl(q->adapter, &rxq->fl);\n-\t\t\tparams = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);\n-\t\t\tq->next_intr_params = params;\n-\t\t\tval = V_CIDXINC(cidx_inc) | V_SEINTARM(params);\n-\n-\t\t\tif (unlikely(!q->bar2_addr))\n-\t\t\t\tt4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),\n-\t\t\t\t\t     val |\n-\t\t\t\t\t     V_INGRESSQID((u32)q->cntxt_id));\n-\t\t\telse {\n-\t\t\t\twritel(val | V_INGRESSQID(q->bar2_qid),\n-\t\t\t\t       (void *)((uintptr_t)q->bar2_addr +\n-\t\t\t\t       SGE_UDB_GTS));\n-\t\t\t\t/*\n-\t\t\t\t * This Write memory Barrier will force the\n-\t\t\t\t * write to the User Doorbell area to be\n-\t\t\t\t * flushed.\n-\t\t\t\t */\n-\t\t\t\twmb();\n-\t\t\t}\n-\t\t\tq->gts_idx = q->cidx;\n-\t\t}\n \t}\n \n \t/*\n@@ -1594,10 +1560,38 @@ static int process_responses(struct sge_rspq *q, int budget,\n int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,\n \t       unsigned int budget, unsigned int *work_done)\n {\n-\tint err = 0;\n+\tstruct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);\n+\tunsigned int cidx_inc;\n+\tunsigned int params;\n+\tu32 val;\n \n \t*work_done = process_responses(q, budget, rx_pkts);\n-\treturn err;\n+\n+\tif (*work_done) {\n+\t\tcidx_inc = R_IDXDIFF(q, gts_idx);\n+\n+\t\tif (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)\n+\t\t\t__refill_fl(q->adapter, &rxq->fl);\n+\n+\t\tparams = q->intr_params;\n+\t\tq->next_intr_params = params;\n+\t\tval = V_CIDXINC(cidx_inc) | V_SEINTARM(params);\n+\n+\t\tif (unlikely(!q->bar2_addr)) {\n+\t\t\tt4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),\n+\t\t\t\t     val | V_INGRESSQID((u32)q->cntxt_id));\n+\t\t} else {\n+\t\t\twritel(val | V_INGRESSQID(q->bar2_qid),\n+\t\t\t       (void *)((uintptr_t)q->bar2_addr + SGE_UDB_GTS));\n+\t\t\t/* This Write memory Barrier will force the\n+\t\t\t * write to the User Doorbell area to be\n+\t\t\t * flushed.\n+\t\t\t */\n+\t\t\twmb();\n+\t\t}\n+\t\tq->gts_idx = q->cidx;\n+\t}\n+\treturn 0;\n }\n \n /**\n@@ -1687,18 +1681,20 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,\n \t\t      V_FW_IQ_CMD_IQASYNCH(fwevtq) |\n \t\t      V_FW_IQ_CMD_VIID(pi->viid) |\n \t\t      V_FW_IQ_CMD_IQANDST(intr_idx < 0) |\n-\t\t      V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT) |\n+\t\t      V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_STATUS_PAGE) |\n \t\t      V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :\n \t\t\t\t\t\t\t       -intr_idx - 1));\n \tc.iqdroprss_to_iqesize =\n-\t\thtons(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |\n+\t\thtons(V_FW_IQ_CMD_IQPCIECH(cong > 0 ? cxgbe_ffs(cong) - 1 :\n+\t\t\t\t\t\t      pi->tx_chan) |\n \t\t      F_FW_IQ_CMD_IQGTSMODE |\n \t\t      V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |\n \t\t      V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));\n \tc.iqsize = htons(iq->size);\n \tc.iqaddr = cpu_to_be64(iq->phys_addr);\n \tif (cong >= 0)\n-\t\tc.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN);\n+\t\tc.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |\n+\t\t\t\t\t    F_FW_IQ_CMD_IQRO);\n \n \tif (fl) {\n \t\tstruct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,\n@@ -1773,6 +1769,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,\n \tiq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS,\n \t\t\t\t     &iq->bar2_qid);\n \tiq->size--;                           /* subtract status entry */\n+\tiq->stat = (void *)&iq->desc[iq->size * 8];\n \tiq->eth_dev = eth_dev;\n \tiq->handler = hnd;\n \tiq->port_id = pi->port_id;\n",
    "prefixes": [
        "dpdk-dev",
        "1/4"
    ]
}