get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/35946/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 35946,
    "url": "https://patches.dpdk.org/api/patches/35946/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/26af679b31d10f695a23e0dc3ba6bc202c0d6bb1.1520720053.git.rahul.lakkireddy@chelsio.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<26af679b31d10f695a23e0dc3ba6bc202c0d6bb1.1520720053.git.rahul.lakkireddy@chelsio.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/26af679b31d10f695a23e0dc3ba6bc202c0d6bb1.1520720053.git.rahul.lakkireddy@chelsio.com",
    "date": "2018-03-10T22:48:23",
    "name": "[dpdk-dev,05/13] cxgbe: initialize SGE and queues for VF",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "8d4c37d738b6335086f79bcb08a8e754191ec65e",
    "submitter": {
        "id": 241,
        "url": "https://patches.dpdk.org/api/people/241/?format=api",
        "name": "Rahul Lakkireddy",
        "email": "rahul.lakkireddy@chelsio.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/26af679b31d10f695a23e0dc3ba6bc202c0d6bb1.1520720053.git.rahul.lakkireddy@chelsio.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/35946/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/35946/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 2946EAAA0;\n\tSat, 10 Mar 2018 23:49:42 +0100 (CET)",
            "from stargate.chelsio.com (stargate.chelsio.com [12.32.117.8])\n\tby dpdk.org (Postfix) with ESMTP id 3CEAAAAAC\n\tfor <dev@dpdk.org>; Sat, 10 Mar 2018 23:49:40 +0100 (CET)",
            "from localhost (scalar.blr.asicdesigners.com [10.193.185.94])\n\tby stargate.chelsio.com (8.13.8/8.13.8) with ESMTP id w2AMnZgb014054; \n\tSat, 10 Mar 2018 14:49:36 -0800"
        ],
        "From": "Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>",
        "To": "dev@dpdk.org",
        "Cc": "kumaras@chelsio.com, nirranjan@chelsio.com, indranil@chelsio.com",
        "Date": "Sun, 11 Mar 2018 04:18:23 +0530",
        "Message-Id": "<26af679b31d10f695a23e0dc3ba6bc202c0d6bb1.1520720053.git.rahul.lakkireddy@chelsio.com>",
        "X-Mailer": "git-send-email 2.5.3",
        "In-Reply-To": [
            "<cover.1520720053.git.rahul.lakkireddy@chelsio.com>",
            "<cover.1520720053.git.rahul.lakkireddy@chelsio.com>"
        ],
        "References": [
            "<cover.1520720053.git.rahul.lakkireddy@chelsio.com>",
            "<cover.1520720053.git.rahul.lakkireddy@chelsio.com>"
        ],
        "Subject": "[dpdk-dev] [PATCH 05/13] cxgbe: initialize SGE and queues for VF",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Kumar Sanghvi <kumaras@chelsio.com>\n\nQuery firmware and initialize SGE parameters and enable queue\nallocation for VF.  Calculate pcie channel and queue congestion\nmanagement for VF.\n\nSigned-off-by: Kumar Sanghvi <kumaras@chelsio.com>\nSigned-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>\n---\n drivers/net/cxgbe/base/adapter.h        |   2 +\n drivers/net/cxgbe/base/common.h         |   1 +\n drivers/net/cxgbe/base/t4_regs.h        |  12 ++\n drivers/net/cxgbe/base/t4fw_interface.h |   6 +\n drivers/net/cxgbe/base/t4vf_hw.c        |  40 ++++++\n drivers/net/cxgbe/cxgbe_ethdev.c        |  13 +-\n drivers/net/cxgbe/cxgbe_main.c          |   3 +-\n drivers/net/cxgbe/cxgbevf_main.c        |   5 +\n drivers/net/cxgbe/sge.c                 | 239 +++++++++++++++++++++++++++++---\n 9 files changed, 296 insertions(+), 25 deletions(-)",
    "diff": "diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h\nindex 6b2fc8b34..95752d1b4 100644\n--- a/drivers/net/cxgbe/base/adapter.h\n+++ b/drivers/net/cxgbe/base/adapter.h\n@@ -249,6 +249,7 @@ struct sge_txq {\n \tunsigned int equeidx;\t   /* last sent credit request */\n \tunsigned int last_pidx;\t   /* last pidx recorded by tx monitor */\n \tunsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */\n+\tunsigned int abs_id;\n \n \tint db_disabled;            /* doorbell state */\n \tunsigned short db_pidx;     /* doorbell producer index */\n@@ -719,6 +720,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,\n int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,\n \t\t     const struct pkt_gl *gl);\n int t4_sge_init(struct adapter *adap);\n+int t4vf_sge_init(struct adapter *adap);\n int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,\n \t\t\t struct rte_eth_dev *eth_dev, uint16_t queue_id,\n \t\t\t unsigned int iqid, int socket_id);\ndiff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h\nindex 3307827c8..d74903dfd 100644\n--- a/drivers/net/cxgbe/base/common.h\n+++ b/drivers/net/cxgbe/base/common.h\n@@ -333,6 +333,7 @@ int t4vf_fw_reset(struct adapter *adap);\n int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset);\n int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);\n int t4_fl_pkt_align(struct adapter *adap);\n+int t4vf_fl_pkt_align(struct adapter *adap, u32 sge_control, u32 sge_control2);\n int t4vf_get_vfres(struct adapter *adap);\n int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size,\n \t\t\t\tunsigned int cache_line_size,\ndiff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h\nindex f553d146d..43d6a0c73 100644\n--- a/drivers/net/cxgbe/base/t4_regs.h\n+++ b/drivers/net/cxgbe/base/t4_regs.h\n@@ -77,6 +77,7 @@\n #define SGE_BASE_ADDR 0x1000\n \n #define A_SGE_PF_KDOORBELL 0x0\n+#define A_SGE_VF_KDOORBELL 0x0\n \n #define S_QID    15\n #define M_QID    0x1ffffU\n@@ -103,6 +104,9 @@\n \n #define A_SGE_PF_GTS 0x4\n \n+#define T4VF_SGE_BASE_ADDR 0x0000\n+#define A_SGE_VF_GTS 0x4\n+\n #define S_INGRESSQID    16\n #define M_INGRESSQID    0xffffU\n #define V_INGRESSQID(x) ((x) << S_INGRESSQID)\n@@ -191,6 +195,8 @@\n #define V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0)\n #define G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0)\n \n+#define A_SGE_EGRESS_QUEUES_PER_PAGE_VF 0x1014\n+\n #define S_ERR_CPL_EXCEED_IQE_SIZE    22\n #define V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE)\n #define F_ERR_CPL_EXCEED_IQE_SIZE    V_ERR_CPL_EXCEED_IQE_SIZE(1U)\n@@ -280,6 +286,11 @@\n \n #define A_SGE_CONM_CTRL 0x1094\n \n+#define S_T6_EGRTHRESHOLDPACKING    16\n+#define M_T6_EGRTHRESHOLDPACKING    0xffU\n+#define G_T6_EGRTHRESHOLDPACKING(x) (((x) >> S_T6_EGRTHRESHOLDPACKING) & \\\n+\t\t\t\t     M_T6_EGRTHRESHOLDPACKING)\n+\n #define S_EGRTHRESHOLD    8\n #define M_EGRTHRESHOLD    0x3fU\n #define V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD)\n@@ -370,6 +381,7 @@\n #define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5)\n \n #define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4\n+#define A_SGE_INGRESS_QUEUES_PER_PAGE_VF 0x10f8\n \n #define A_SGE_CONTROL2 0x1124\n \ndiff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h\nindex b40bfb960..6c5c97270 100644\n--- a/drivers/net/cxgbe/base/t4fw_interface.h\n+++ b/drivers/net/cxgbe/base/t4fw_interface.h\n@@ -480,6 +480,7 @@ struct fw_caps_config_cmd {\n enum fw_params_mnem {\n \tFW_PARAMS_MNEM_DEV\t\t= 1,\t/* device params */\n \tFW_PARAMS_MNEM_PFVF\t\t= 2,\t/* function params */\n+\tFW_PARAMS_MNEM_REG\t\t= 3,\t/* limited register access */\n \tFW_PARAMS_MNEM_DMAQ\t\t= 4,\t/* dma queue params */\n };\n \n@@ -887,6 +888,11 @@ struct fw_eq_eth_cmd {\n #define G_FW_EQ_ETH_CMD_EQID(x)\t\\\n \t(((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID)\n \n+#define S_FW_EQ_ETH_CMD_PHYSEQID        0\n+#define M_FW_EQ_ETH_CMD_PHYSEQID        0xfffff\n+#define G_FW_EQ_ETH_CMD_PHYSEQID(x)     \\\n+\t(((x) >> S_FW_EQ_ETH_CMD_PHYSEQID) & M_FW_EQ_ETH_CMD_PHYSEQID)\n+\n #define S_FW_EQ_ETH_CMD_FETCHRO\t\t22\n #define M_FW_EQ_ETH_CMD_FETCHRO\t\t0x1\n #define V_FW_EQ_ETH_CMD_FETCHRO(x)\t((x) << S_FW_EQ_ETH_CMD_FETCHRO)\ndiff --git a/drivers/net/cxgbe/base/t4vf_hw.c b/drivers/net/cxgbe/base/t4vf_hw.c\nindex 6f222c4a1..8e48588b0 100644\n--- a/drivers/net/cxgbe/base/t4vf_hw.c\n+++ b/drivers/net/cxgbe/base/t4vf_hw.c\n@@ -460,6 +460,46 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,\n \treturn t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);\n }\n \n+/**\n+ * t4vf_fl_pkt_align - return the fl packet alignment\n+ * @adapter: the adapter\n+ *\n+ * T4 has a single field to specify the packing and padding boundary.\n+ * T5 onwards has separate fields for this and hence the alignment for\n+ * next packet offset is maximum of these two.\n+ */\n+int t4vf_fl_pkt_align(struct adapter *adapter, u32 sge_control,\n+\t\t      u32 sge_control2)\n+{\n+\tunsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;\n+\n+\t/* T4 uses a single control field to specify both the PCIe Padding and\n+\t * Packing Boundary.  T5 introduced the ability to specify these\n+\t * separately.  The actual Ingress Packet Data alignment boundary\n+\t * within Packed Buffer Mode is the maximum of these two\n+\t * specifications.\n+\t */\n+\tif (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)\n+\t\tingpad_shift = X_INGPADBOUNDARY_SHIFT;\n+\telse\n+\t\tingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;\n+\n+\tingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);\n+\n+\tfl_align = ingpadboundary;\n+\tif (!is_t4(adapter->params.chip)) {\n+\t\tingpackboundary = G_INGPACKBOUNDARY(sge_control2);\n+\t\tif (ingpackboundary == X_INGPACKBOUNDARY_16B)\n+\t\t\tingpackboundary = 16;\n+\t\telse\n+\t\t\tingpackboundary = 1 << (ingpackboundary +\n+\t\t\t\t\tX_INGPACKBOUNDARY_SHIFT);\n+\n+\t\tfl_align = max(ingpadboundary, ingpackboundary);\n+\t}\n+\treturn fl_align;\n+}\n+\n unsigned int t4vf_get_pf_from_vf(struct adapter *adapter)\n {\n \tu32 whoami;\ndiff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c\nindex 7f523265d..16031f38d 100644\n--- a/drivers/net/cxgbe/cxgbe_ethdev.c\n+++ b/drivers/net/cxgbe/cxgbe_ethdev.c\n@@ -481,9 +481,8 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,\n \terr = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,\n \t\t\t\t   s->fw_evtq.cntxt_id, socket_id);\n \n-\tdev_debug(adapter, \"%s: txq->q.cntxt_id= %d err = %d\\n\",\n-\t\t  __func__, txq->q.cntxt_id, err);\n-\n+\tdev_debug(adapter, \"%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\\n\",\n+\t\t  __func__, txq->q.cntxt_id, txq->q.abs_id, err);\n \treturn err;\n }\n \n@@ -610,11 +609,13 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,\n \n \terr = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,\n \t\t\t       &rxq->fl, t4_ethrx_handler,\n-\t\t\t       t4_get_tp_ch_map(adapter, pi->tx_chan), mp,\n+\t\t\t       is_pf4(adapter) ?\n+\t\t\t       t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,\n \t\t\t       queue_idx, socket_id);\n \n-\tdev_debug(adapter, \"%s: err = %d; port_id = %d; cntxt_id = %u\\n\",\n-\t\t  __func__, err, pi->port_id, rxq->rspq.cntxt_id);\n+\tdev_debug(adapter, \"%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\\n\",\n+\t\t  __func__, err, pi->port_id, rxq->rspq.cntxt_id,\n+\t\t  rxq->rspq.abs_id);\n \treturn err;\n }\n \ndiff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c\nindex 8d78ad427..d9e772717 100644\n--- a/drivers/net/cxgbe/cxgbe_main.c\n+++ b/drivers/net/cxgbe/cxgbe_main.c\n@@ -1064,7 +1064,8 @@ int setup_rss(struct port_info *pi)\n static void enable_rx(struct adapter *adap, struct sge_rspq *q)\n {\n \t/* 0-increment GTS to start the timer and enable interrupts */\n-\tt4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS),\n+\tt4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :\n+\t\t\t\t\t  T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,\n \t\t     V_SEINTARM(q->intr_params) |\n \t\t     V_INGRESSQID(q->cntxt_id));\n }\ndiff --git a/drivers/net/cxgbe/cxgbevf_main.c b/drivers/net/cxgbe/cxgbevf_main.c\nindex 0624267c7..f4d0f4de5 100644\n--- a/drivers/net/cxgbe/cxgbevf_main.c\n+++ b/drivers/net/cxgbe/cxgbevf_main.c\n@@ -108,6 +108,11 @@ static int adap_init0vf(struct adapter *adapter)\n \t}\n \n \tadapter->pf = t4vf_get_pf_from_vf(adapter);\n+\terr = t4vf_sge_init(adapter);\n+\tif (err) {\n+\t\tdev_err(adapter->pdev_dev, \"error in sge init\\n\");\n+\t\treturn err;\n+\t}\n \n \t/* If we're running on newer firmware, let it know that we're\n \t * prepared to deal with encapsulated CPL messages.  Older\ndiff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c\nindex 6ff8bc46b..aba1a49f3 100644\n--- a/drivers/net/cxgbe/sge.c\n+++ b/drivers/net/cxgbe/sge.c\n@@ -1689,6 +1689,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,\n \tchar z_name[RTE_MEMZONE_NAMESIZE];\n \tchar z_name_sw[RTE_MEMZONE_NAMESIZE];\n \tunsigned int nb_refill;\n+\tu8 pciechan;\n \n \t/* Size needs to be multiple of 16, including status entry. */\n \tiq->size = cxgbe_roundup(iq->size, 16);\n@@ -1706,8 +1707,19 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,\n \n \tmemset(&c, 0, sizeof(c));\n \tc.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |\n-\t\t\t    F_FW_CMD_WRITE | F_FW_CMD_EXEC |\n-\t\t\t    V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0));\n+\t\t\t    F_FW_CMD_WRITE | F_FW_CMD_EXEC);\n+\n+\tif (is_pf4(adap)) {\n+\t\tpciechan = cong > 0 ? cxgbe_ffs(cong) - 1 : pi->tx_chan;\n+\t\tc.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) |\n+\t\t\t\t     V_FW_IQ_CMD_VFN(0));\n+\t\tif (cong >= 0)\n+\t\t\tc.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |\n+\t\t\t\t\t\t    F_FW_IQ_CMD_IQRO);\n+\t} else {\n+\t\tpciechan = pi->port_id;\n+\t}\n+\n \tc.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |\n \t\t\t\t (sizeof(c) / 16));\n \tc.type_to_iqandstindex =\n@@ -1719,16 +1731,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,\n \t\t      V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :\n \t\t\t\t\t\t\t       -intr_idx - 1));\n \tc.iqdroprss_to_iqesize =\n-\t\thtons(V_FW_IQ_CMD_IQPCIECH(cong > 0 ? cxgbe_ffs(cong) - 1 :\n-\t\t\t\t\t\t      pi->tx_chan) |\n+\t\thtons(V_FW_IQ_CMD_IQPCIECH(pciechan) |\n \t\t      F_FW_IQ_CMD_IQGTSMODE |\n \t\t      V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |\n \t\t      V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));\n \tc.iqsize = htons(iq->size);\n \tc.iqaddr = cpu_to_be64(iq->phys_addr);\n-\tif (cong >= 0)\n-\t\tc.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |\n-\t\t\t\t\t    F_FW_IQ_CMD_IQRO);\n \n \tif (fl) {\n \t\tstruct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,\n@@ -1768,7 +1776,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,\n \t\t\t       0 : F_FW_IQ_CMD_FL0PACKEN) |\n \t\t\t      F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |\n \t\t\t      F_FW_IQ_CMD_FL0PADEN);\n-\t\tif (cong >= 0)\n+\t\tif (is_pf4(adap) && cong >= 0)\n \t\t\tc.iqns_to_fl0congen |=\n \t\t\t\thtonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |\n \t\t\t\t      F_FW_IQ_CMD_FL0CONGCIF |\n@@ -1789,7 +1797,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,\n \t\tc.fl0addr = cpu_to_be64(fl->addr);\n \t}\n \n-\tret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);\n+\tif (is_pf4(adap))\n+\t\tret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);\n+\telse\n+\t\tret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);\n \tif (ret)\n \t\tgoto err;\n \n@@ -1806,7 +1817,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,\n \tiq->stat = (void *)&iq->desc[iq->size * 8];\n \tiq->eth_dev = eth_dev;\n \tiq->handler = hnd;\n-\tiq->port_id = pi->port_id;\n+\tiq->port_id = pi->pidx;\n \tiq->mb_pool = mp;\n \n \t/* set offset to -1 to distinguish ingress queues without FL */\n@@ -1846,7 +1857,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,\n \t * a lot easier to fix in one place ...  For now we do something very\n \t * simple (and hopefully less wrong).\n \t */\n-\tif (!is_t4(adap->params.chip) && cong >= 0) {\n+\tif (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) {\n \t\tu32 param, val;\n \t\tint i;\n \n@@ -1893,9 +1904,11 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,\n \treturn ret;\n }\n \n-static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)\n+static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id,\n+\t\t     unsigned int abs_id)\n {\n \tq->cntxt_id = id;\n+\tq->abs_id = abs_id;\n \tq->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS,\n \t\t\t\t    &q->bar2_qid);\n \tq->cidx = 0;\n@@ -1943,6 +1956,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,\n \tstruct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);\n \tchar z_name[RTE_MEMZONE_NAMESIZE];\n \tchar z_name_sw[RTE_MEMZONE_NAMESIZE];\n+\tu8 pciechan;\n \n \t/* Add status entries */\n \tnentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);\n@@ -1961,16 +1975,22 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,\n \n \tmemset(&c, 0, sizeof(c));\n \tc.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |\n-\t\t\t    F_FW_CMD_WRITE | F_FW_CMD_EXEC |\n-\t\t\t    V_FW_EQ_ETH_CMD_PFN(adap->pf) |\n-\t\t\t    V_FW_EQ_ETH_CMD_VFN(0));\n+\t\t\t    F_FW_CMD_WRITE | F_FW_CMD_EXEC);\n+\tif (is_pf4(adap)) {\n+\t\tpciechan = pi->tx_chan;\n+\t\tc.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) |\n+\t\t\t\t     V_FW_EQ_ETH_CMD_VFN(0));\n+\t} else {\n+\t\tpciechan = pi->port_id;\n+\t}\n+\n \tc.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC |\n \t\t\t\t F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16));\n \tc.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE |\n \t\t\t\t     V_FW_EQ_ETH_CMD_VIID(pi->viid));\n \tc.fetchszm_to_iqid =\n \t\thtonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |\n-\t\t      V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |\n+\t\t      V_FW_EQ_ETH_CMD_PCIECHN(pciechan) |\n \t\t      F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid));\n \tc.dcaen_to_eqsize =\n \t\thtonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |\n@@ -1978,7 +1998,10 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,\n \t\t      V_FW_EQ_ETH_CMD_EQSIZE(nentries));\n \tc.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr);\n \n-\tret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);\n+\tif (is_pf4(adap))\n+\t\tret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);\n+\telse\n+\t\tret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);\n \tif (ret) {\n \t\trte_free(txq->q.sdesc);\n \t\ttxq->q.sdesc = NULL;\n@@ -1986,7 +2009,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,\n \t\treturn ret;\n \t}\n \n-\tinit_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)));\n+\tinit_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)),\n+\t\t G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd)));\n \ttxq->stats.tso = 0;\n \ttxq->stats.pkts = 0;\n \ttxq->stats.tx_cso = 0;\n@@ -2281,3 +2305,182 @@ int t4_sge_init(struct adapter *adap)\n \n \treturn 0;\n }\n+\n+int t4vf_sge_init(struct adapter *adap)\n+{\n+\tstruct sge_params *sge_params = &adap->params.sge;\n+\tu32 sge_ingress_queues_per_page;\n+\tu32 sge_egress_queues_per_page;\n+\tu32 sge_control, sge_control2;\n+\tu32 fl_small_pg, fl_large_pg;\n+\tu32 sge_ingress_rx_threshold;\n+\tu32 sge_timer_value_0_and_1;\n+\tu32 sge_timer_value_2_and_3;\n+\tu32 sge_timer_value_4_and_5;\n+\tu32 sge_congestion_control;\n+\tstruct sge *s = &adap->sge;\n+\tunsigned int s_hps, s_qpp;\n+\tu32 sge_host_page_size;\n+\tu32 params[7], vals[7];\n+\tint v;\n+\n+\t/* query basic params from fw */\n+\tparams[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |\n+\t\t     V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL));\n+\tparams[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |\n+\t\t     V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE));\n+\tparams[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |\n+\t\t     V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0));\n+\tparams[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |\n+\t\t     V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1));\n+\tparams[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |\n+\t\t     V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1));\n+\tparams[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |\n+\t\t     V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3));\n+\tparams[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |\n+\t\t     V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5));\n+\tv = t4vf_query_params(adap, 7, params, vals);\n+\tif (v != FW_SUCCESS)\n+\t\treturn v;\n+\n+\tsge_control = vals[0];\n+\tsge_host_page_size = vals[1];\n+\tfl_small_pg = vals[2];\n+\tfl_large_pg = vals[3];\n+\tsge_timer_value_0_and_1 = vals[4];\n+\tsge_timer_value_2_and_3 = vals[5];\n+\tsge_timer_value_4_and_5 = vals[6];\n+\n+\t/*\n+\t * Start by vetting the basic SGE parameters which have been set up by\n+\t * the Physical Function Driver.\n+\t */\n+\n+\t/* We only bother using the Large Page logic if the Large Page Buffer\n+\t * is larger than our Page Size Buffer.\n+\t */\n+\tif (fl_large_pg <= fl_small_pg)\n+\t\tfl_large_pg = 0;\n+\n+\t/* The Page Size Buffer must be exactly equal to our Page Size and the\n+\t * Large Page Size Buffer should be 0 (per above) or a power of 2.\n+\t */\n+\tif (fl_small_pg != CXGBE_PAGE_SIZE ||\n+\t    (fl_large_pg & (fl_large_pg - 1)) != 0) {\n+\t\tdev_err(adapter->pdev_dev, \"bad SGE FL buffer sizes [%d, %d]\\n\",\n+\t\t\tfl_small_pg, fl_large_pg);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif ((sge_control & F_RXPKTCPLMODE) !=\n+\t    V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {\n+\t\tdev_err(adapter->pdev_dev, \"bad SGE CPL MODE\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\n+\t/* Grab ingress packing boundary from SGE_CONTROL2 for */\n+\tparams[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |\n+\t\t     V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2));\n+\tv = t4vf_query_params(adap, 1, params, vals);\n+\tif (v != FW_SUCCESS) {\n+\t\tdev_err(adapter, \"Unable to get SGE Control2; \"\n+\t\t\t\"probably old firmware.\\n\");\n+\t\treturn v;\n+\t}\n+\tsge_control2 = vals[0];\n+\n+\tparams[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |\n+\t\t     V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD));\n+\tparams[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |\n+\t\t     V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL));\n+\tv = t4vf_query_params(adap, 2, params, vals);\n+\tif (v != FW_SUCCESS)\n+\t\treturn v;\n+\tsge_ingress_rx_threshold = vals[0];\n+\tsge_congestion_control = vals[1];\n+\tparams[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |\n+\t\t     V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF));\n+\tparams[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |\n+\t\t     V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF));\n+\tv = t4vf_query_params(adap, 2, params, vals);\n+\tif (v != FW_SUCCESS) {\n+\t\tdev_warn(adap, \"Unable to get VF SGE Queues/Page; \"\n+\t\t\t \"probably old firmware.\\n\");\n+\t\treturn v;\n+\t}\n+\tsge_egress_queues_per_page = vals[0];\n+\tsge_ingress_queues_per_page = vals[1];\n+\n+\t/*\n+\t * We need the Queues/Page for our VF.  This is based on the\n+\t * PF from which we're instantiated and is indexed in the\n+\t * register we just read.\n+\t */\n+\ts_hps = (S_HOSTPAGESIZEPF0 +\n+\t\t (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf);\n+\tsge_params->hps =\n+\t\t((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0);\n+\n+\ts_qpp = (S_QUEUESPERPAGEPF0 +\n+\t\t (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf);\n+\tsge_params->eq_qpp =\n+\t\t((sge_egress_queues_per_page >> s_qpp)\n+\t\t & M_QUEUESPERPAGEPF0);\n+\tsge_params->iq_qpp =\n+\t\t((sge_ingress_queues_per_page >> s_qpp)\n+\t\t & M_QUEUESPERPAGEPF0);\n+\n+\t/*\n+\t * Now translate the queried parameters into our internal forms.\n+\t */\n+\tif (fl_large_pg)\n+\t\ts->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;\n+\ts->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE)\n+\t\t\t? 128 : 64);\n+\ts->pktshift = G_PKTSHIFT(sge_control);\n+\ts->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2);\n+\n+\t/*\n+\t * A FL with <= fl_starve_thres buffers is starving and a periodic\n+\t * timer will attempt to refill it.  This needs to be larger than the\n+\t * SGE's Egress Congestion Threshold.  If it isn't, then we can get\n+\t * stuck waiting for new packets while the SGE is waiting for us to\n+\t * give it more Free List entries.  (Note that the SGE's Egress\n+\t * Congestion Threshold is in units of 2 Free List pointers.)\n+\t */\n+\tswitch (CHELSIO_CHIP_VERSION(adap->params.chip)) {\n+\tcase CHELSIO_T5:\n+\t\ts->fl_starve_thres =\n+\t\t\tG_EGRTHRESHOLDPACKING(sge_congestion_control);\n+\t\tbreak;\n+\tcase CHELSIO_T6:\n+\tdefault:\n+\t\ts->fl_starve_thres =\n+\t\t\tG_T6_EGRTHRESHOLDPACKING(sge_congestion_control);\n+\t\tbreak;\n+\t}\n+\ts->fl_starve_thres = s->fl_starve_thres * 2 + 1;\n+\n+\t/*\n+\t * Save RX interrupt holdoff timer values and counter\n+\t * threshold values from the SGE parameters.\n+\t */\n+\ts->timer_val[0] = core_ticks_to_us(adap,\n+\t\t\tG_TIMERVALUE0(sge_timer_value_0_and_1));\n+\ts->timer_val[1] = core_ticks_to_us(adap,\n+\t\t\tG_TIMERVALUE1(sge_timer_value_0_and_1));\n+\ts->timer_val[2] = core_ticks_to_us(adap,\n+\t\t\tG_TIMERVALUE2(sge_timer_value_2_and_3));\n+\ts->timer_val[3] = core_ticks_to_us(adap,\n+\t\t\tG_TIMERVALUE3(sge_timer_value_2_and_3));\n+\ts->timer_val[4] = core_ticks_to_us(adap,\n+\t\t\tG_TIMERVALUE4(sge_timer_value_4_and_5));\n+\ts->timer_val[5] = core_ticks_to_us(adap,\n+\t\t\tG_TIMERVALUE5(sge_timer_value_4_and_5));\n+\ts->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold);\n+\ts->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold);\n+\ts->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold);\n+\ts->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold);\n+\treturn 0;\n+}\n",
    "prefixes": [
        "dpdk-dev",
        "05/13"
    ]
}