get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/40928/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 40928,
    "url": "https://patches.dpdk.org/api/patches/40928/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1be701875ffb3a27247a076b771993545d0513b6.1528469677.git.rahul.lakkireddy@chelsio.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1be701875ffb3a27247a076b771993545d0513b6.1528469677.git.rahul.lakkireddy@chelsio.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1be701875ffb3a27247a076b771993545d0513b6.1528469677.git.rahul.lakkireddy@chelsio.com",
    "date": "2018-06-08T17:58:13",
    "name": "[dpdk-dev,3/7] net/cxgbe: add control queue to communicate filter requests",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "bfaaafa0ec2fce634a57e8147d6006d93844811c",
    "submitter": {
        "id": 241,
        "url": "https://patches.dpdk.org/api/people/241/?format=api",
        "name": "Rahul Lakkireddy",
        "email": "rahul.lakkireddy@chelsio.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1be701875ffb3a27247a076b771993545d0513b6.1528469677.git.rahul.lakkireddy@chelsio.com/mbox/",
    "series": [
        {
            "id": 63,
            "url": "https://patches.dpdk.org/api/series/63/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=63",
            "date": "2018-06-08T17:58:10",
            "name": "cxgbe: add support to offload flows via rte_flow",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/63/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/40928/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/40928/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id F0C781D00A;\n\tFri,  8 Jun 2018 19:59:19 +0200 (CEST)",
            "from stargate.chelsio.com (stargate.chelsio.com [12.32.117.8])\n\tby dpdk.org (Postfix) with ESMTP id 646AC1D003\n\tfor <dev@dpdk.org>; Fri,  8 Jun 2018 19:59:18 +0200 (CEST)",
            "from localhost (scalar.blr.asicdesigners.com [10.193.185.94])\n\tby stargate.chelsio.com (8.13.8/8.13.8) with ESMTP id w58HxF8Z017294; \n\tFri, 8 Jun 2018 10:59:16 -0700"
        ],
        "From": "Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>",
        "To": "dev@dpdk.org",
        "Cc": "shaguna@chelsio.com, kumaras@chelsio.com, indranil@chelsio.com,\n\tnirranjan@chelsio.com",
        "Date": "Fri,  8 Jun 2018 23:28:13 +0530",
        "Message-Id": "<1be701875ffb3a27247a076b771993545d0513b6.1528469677.git.rahul.lakkireddy@chelsio.com>",
        "X-Mailer": "git-send-email 2.5.3",
        "In-Reply-To": [
            "<cover.1528469677.git.rahul.lakkireddy@chelsio.com>",
            "<cover.1528469677.git.rahul.lakkireddy@chelsio.com>"
        ],
        "References": [
            "<cover.1528469677.git.rahul.lakkireddy@chelsio.com>",
            "<cover.1528469677.git.rahul.lakkireddy@chelsio.com>"
        ],
        "Subject": "[dpdk-dev] [PATCH 3/7] net/cxgbe: add control queue to communicate\n\tfilter requests",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Shagun Agrawal <shaguna@chelsio.com>\n\nAdd control queue to communicate filter creation/deletion requests\nwith firmware. This API will be used by subsequent patches.\n\nSigned-off-by: Shagun Agrawal <shaguna@chelsio.com>\nSigned-off-by: Kumar Sanghvi <kumaras@chelsio.com>\nSigned-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>\n---\n drivers/net/cxgbe/base/adapter.h        |  15 +++\n drivers/net/cxgbe/base/common.h         |   2 +\n drivers/net/cxgbe/base/t4_hw.c          |  25 ++++\n drivers/net/cxgbe/base/t4fw_interface.h |  70 ++++++++++++\n drivers/net/cxgbe/cxgbe.h               |   1 +\n drivers/net/cxgbe/cxgbe_ethdev.c        |   3 +\n drivers/net/cxgbe/cxgbe_main.c          |  41 +++++++\n drivers/net/cxgbe/sge.c                 | 197 +++++++++++++++++++++++++++++++-\n 8 files changed, 353 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h\nindex f3434d28a..9a66a4a99 100644\n--- a/drivers/net/cxgbe/base/adapter.h\n+++ b/drivers/net/cxgbe/base/adapter.h\n@@ -19,6 +19,7 @@\n \n enum {\n \tMAX_ETH_QSETS = 64,           /* # of Ethernet Tx/Rx queue sets */\n+\tMAX_CTRL_QUEUES = NCHAN,      /* # of control Tx queues */\n };\n \n struct adapter;\n@@ -256,10 +257,20 @@ struct sge_eth_txq {                   /* state for an SGE Ethernet Tx queue */\n \tunsigned int flags;            /* flags for state of the queue */\n } __rte_cache_aligned;\n \n+struct sge_ctrl_txq {                /* State for an SGE control Tx queue */\n+\tstruct sge_txq q;            /* txq */\n+\tstruct adapter *adapter;     /* adapter associated with this queue */\n+\trte_spinlock_t ctrlq_lock;   /* control queue lock */\n+\tu8 full;                     /* the Tx ring is full */\n+\tu64 txp;                     /* number of transmits */\n+\tstruct rte_mempool *mb_pool; /* mempool to generate ctrl pkts */\n+} __rte_cache_aligned;\n+\n struct sge {\n \tstruct sge_eth_txq ethtxq[MAX_ETH_QSETS];\n \tstruct sge_eth_rxq ethrxq[MAX_ETH_QSETS];\n \tstruct sge_rspq fw_evtq __rte_cache_aligned;\n+\tstruct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];\n \n \tu16 max_ethqsets;           /* # of available Ethernet queue sets */\n \tu32 stat_len;               /* length of status page at ring end */\n@@ -720,6 +731,7 @@ void t4_sge_tx_monitor_start(struct adapter *adap);\n void t4_sge_tx_monitor_stop(struct adapter *adap);\n int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,\n \t\tuint16_t nb_pkts);\n+int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf);\n int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,\n \t\t     const struct pkt_gl *gl);\n int t4_sge_init(struct adapter *adap);\n@@ -727,6 +739,9 @@ int t4vf_sge_init(struct adapter *adap);\n int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,\n \t\t\t struct rte_eth_dev *eth_dev, uint16_t queue_id,\n \t\t\t unsigned int iqid, int socket_id);\n+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,\n+\t\t\t  struct rte_eth_dev *eth_dev, uint16_t queue_id,\n+\t\t\t  unsigned int iqid, int socket_id);\n int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq,\n \t\t     struct rte_eth_dev *eth_dev, int intr_idx,\n \t\t     struct sge_fl *fl, rspq_handler_t handler,\ndiff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h\nindex 155a30288..c80304b24 100644\n--- a/drivers/net/cxgbe/base/common.h\n+++ b/drivers/net/cxgbe/base/common.h\n@@ -378,6 +378,8 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,\n \t       unsigned int fl0id, unsigned int fl1id);\n int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,\n \t\t   unsigned int vf, unsigned int eqid);\n+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,\n+\t\t    unsigned int vf, unsigned int eqid);\n \n static inline unsigned int core_ticks_per_usec(const struct adapter *adap)\n {\ndiff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c\nindex e5ef73b67..c146c911e 100644\n--- a/drivers/net/cxgbe/base/t4_hw.c\n+++ b/drivers/net/cxgbe/base/t4_hw.c\n@@ -4490,6 +4490,31 @@ static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)\n \t}\n }\n \n+/**\n+ * t4_ctrl_eq_free - free a control egress queue\n+ * @adap: the adapter\n+ * @mbox: mailbox to use for the FW command\n+ * @pf: the PF owning the queue\n+ * @vf: the VF owning the queue\n+ * @eqid: egress queue id\n+ *\n+ * Frees a control egress queue.\n+ */\n+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,\n+\t\t    unsigned int vf, unsigned int eqid)\n+{\n+\tstruct fw_eq_ctrl_cmd c;\n+\n+\tmemset(&c, 0, sizeof(c));\n+\tc.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |\n+\t\t\t\t  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |\n+\t\t\t\t  V_FW_EQ_CTRL_CMD_PFN(pf) |\n+\t\t\t\t  V_FW_EQ_CTRL_CMD_VFN(vf));\n+\tc.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));\n+\tc.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));\n+\treturn t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);\n+}\n+\n /**\n  * t4_handle_fw_rpl - process a FW reply message\n  * @adap: the adapter\ndiff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h\nindex 95b2aec48..44b6f6dac 100644\n--- a/drivers/net/cxgbe/base/t4fw_interface.h\n+++ b/drivers/net/cxgbe/base/t4fw_interface.h\n@@ -178,6 +178,7 @@ enum fw_cmd_opcodes {\n \tFW_PFVF_CMD\t\t       = 0x09,\n \tFW_IQ_CMD                      = 0x10,\n \tFW_EQ_ETH_CMD                  = 0x12,\n+\tFW_EQ_CTRL_CMD                 = 0x13,\n \tFW_VI_CMD                      = 0x14,\n \tFW_VI_MAC_CMD                  = 0x15,\n \tFW_VI_RXMODE_CMD               = 0x16,\n@@ -960,6 +961,75 @@ struct fw_eq_eth_cmd {\n #define G_FW_EQ_ETH_CMD_VIID(x)\t\\\n \t(((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID)\n \n+struct fw_eq_ctrl_cmd {\n+\t__be32 op_to_vfn;\n+\t__be32 alloc_to_len16;\n+\t__be32 cmpliqid_eqid;\n+\t__be32 physeqid_pkd;\n+\t__be32 fetchszm_to_iqid;\n+\t__be32 dcaen_to_eqsize;\n+\t__be64 eqaddr;\n+};\n+\n+#define S_FW_EQ_CTRL_CMD_PFN\t\t8\n+#define V_FW_EQ_CTRL_CMD_PFN(x)\t\t((x) << S_FW_EQ_CTRL_CMD_PFN)\n+\n+#define S_FW_EQ_CTRL_CMD_VFN\t\t0\n+#define V_FW_EQ_CTRL_CMD_VFN(x)\t\t((x) << S_FW_EQ_CTRL_CMD_VFN)\n+\n+#define S_FW_EQ_CTRL_CMD_ALLOC\t\t31\n+#define V_FW_EQ_CTRL_CMD_ALLOC(x)\t((x) << S_FW_EQ_CTRL_CMD_ALLOC)\n+#define F_FW_EQ_CTRL_CMD_ALLOC\t\tV_FW_EQ_CTRL_CMD_ALLOC(1U)\n+\n+#define S_FW_EQ_CTRL_CMD_FREE\t\t30\n+#define V_FW_EQ_CTRL_CMD_FREE(x)\t((x) << S_FW_EQ_CTRL_CMD_FREE)\n+#define F_FW_EQ_CTRL_CMD_FREE\t\tV_FW_EQ_CTRL_CMD_FREE(1U)\n+\n+#define S_FW_EQ_CTRL_CMD_EQSTART\t28\n+#define V_FW_EQ_CTRL_CMD_EQSTART(x)\t((x) << S_FW_EQ_CTRL_CMD_EQSTART)\n+#define F_FW_EQ_CTRL_CMD_EQSTART\tV_FW_EQ_CTRL_CMD_EQSTART(1U)\n+\n+#define S_FW_EQ_CTRL_CMD_CMPLIQID\t20\n+#define V_FW_EQ_CTRL_CMD_CMPLIQID(x)\t((x) << S_FW_EQ_CTRL_CMD_CMPLIQID)\n+\n+#define S_FW_EQ_CTRL_CMD_EQID\t\t0\n+#define M_FW_EQ_CTRL_CMD_EQID\t\t0xfffff\n+#define V_FW_EQ_CTRL_CMD_EQID(x)\t((x) << S_FW_EQ_CTRL_CMD_EQID)\n+#define G_FW_EQ_CTRL_CMD_EQID(x)\t\\\n+\t(((x) >> S_FW_EQ_CTRL_CMD_EQID) & M_FW_EQ_CTRL_CMD_EQID)\n+\n+#define S_FW_EQ_CTRL_CMD_PHYSEQID       0\n+#define M_FW_EQ_CTRL_CMD_PHYSEQID       0xfffff\n+#define V_FW_EQ_CTRL_CMD_PHYSEQID(x)    ((x) << S_FW_EQ_CTRL_CMD_PHYSEQID)\n+#define G_FW_EQ_CTRL_CMD_PHYSEQID(x)    \\\n+\t(((x) >> S_FW_EQ_CTRL_CMD_PHYSEQID) & M_FW_EQ_CTRL_CMD_PHYSEQID)\n+\n+#define S_FW_EQ_CTRL_CMD_FETCHRO\t22\n+#define V_FW_EQ_CTRL_CMD_FETCHRO(x)\t((x) << S_FW_EQ_CTRL_CMD_FETCHRO)\n+#define F_FW_EQ_CTRL_CMD_FETCHRO\tV_FW_EQ_CTRL_CMD_FETCHRO(1U)\n+\n+#define S_FW_EQ_CTRL_CMD_HOSTFCMODE\t20\n+#define M_FW_EQ_CTRL_CMD_HOSTFCMODE\t0x3\n+#define V_FW_EQ_CTRL_CMD_HOSTFCMODE(x)\t((x) << S_FW_EQ_CTRL_CMD_HOSTFCMODE)\n+\n+#define S_FW_EQ_CTRL_CMD_PCIECHN\t16\n+#define V_FW_EQ_CTRL_CMD_PCIECHN(x)\t((x) << S_FW_EQ_CTRL_CMD_PCIECHN)\n+\n+#define S_FW_EQ_CTRL_CMD_IQID\t\t0\n+#define V_FW_EQ_CTRL_CMD_IQID(x)\t((x) << S_FW_EQ_CTRL_CMD_IQID)\n+\n+#define S_FW_EQ_CTRL_CMD_FBMIN\t\t23\n+#define V_FW_EQ_CTRL_CMD_FBMIN(x)\t((x) << S_FW_EQ_CTRL_CMD_FBMIN)\n+\n+#define S_FW_EQ_CTRL_CMD_FBMAX\t\t20\n+#define V_FW_EQ_CTRL_CMD_FBMAX(x)\t((x) << S_FW_EQ_CTRL_CMD_FBMAX)\n+\n+#define S_FW_EQ_CTRL_CMD_CIDXFTHRESH\t16\n+#define V_FW_EQ_CTRL_CMD_CIDXFTHRESH(x)\t((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESH)\n+\n+#define S_FW_EQ_CTRL_CMD_EQSIZE\t\t0\n+#define V_FW_EQ_CTRL_CMD_EQSIZE(x)\t((x) << S_FW_EQ_CTRL_CMD_EQSIZE)\n+\n enum fw_vi_func {\n \tFW_VI_FUNC_ETH,\n };\ndiff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h\nindex e4a525607..44f5934d1 100644\n--- a/drivers/net/cxgbe/cxgbe.h\n+++ b/drivers/net/cxgbe/cxgbe.h\n@@ -42,6 +42,7 @@ int link_start(struct port_info *pi);\n void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us,\n \t       unsigned int cnt, unsigned int size, unsigned int iqe_size);\n int setup_sge_fwevtq(struct adapter *adapter);\n+int setup_sge_ctrl_txq(struct adapter *adapter);\n void cfg_queues(struct rte_eth_dev *eth_dev);\n int cfg_queue_count(struct rte_eth_dev *eth_dev);\n int init_rss(struct adapter *adap);\ndiff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c\nindex 1adb8e41f..713dc8fae 100644\n--- a/drivers/net/cxgbe/cxgbe_ethdev.c\n+++ b/drivers/net/cxgbe/cxgbe_ethdev.c\n@@ -365,6 +365,9 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)\n \t\tif (err)\n \t\t\treturn err;\n \t\tadapter->flags |= FW_QUEUE_BOUND;\n+\t\terr = setup_sge_ctrl_txq(adapter);\n+\t\tif (err)\n+\t\t\treturn err;\n \t}\n \n \terr = cfg_queue_count(eth_dev);\ndiff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c\nindex 9880257d2..5416800de 100644\n--- a/drivers/net/cxgbe/cxgbe_main.c\n+++ b/drivers/net/cxgbe/cxgbe_main.c\n@@ -94,6 +94,47 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,\n \treturn 0;\n }\n \n+/**\n+ * Setup sge control queues to pass control information.\n+ */\n+int setup_sge_ctrl_txq(struct adapter *adapter)\n+{\n+\tstruct sge *s = &adapter->sge;\n+\tint err = 0, i = 0;\n+\n+\tfor_each_port(adapter, i) {\n+\t\tchar name[RTE_ETH_NAME_MAX_LEN];\n+\t\tstruct sge_ctrl_txq *q = &s->ctrlq[i];\n+\n+\t\tq->q.size = 1024;\n+\t\terr = t4_sge_alloc_ctrl_txq(adapter, q,\n+\t\t\t\t\t    adapter->eth_dev,  i,\n+\t\t\t\t\t    s->fw_evtq.cntxt_id,\n+\t\t\t\t\t    rte_socket_id());\n+\t\tif (err) {\n+\t\t\tdev_err(adapter, \"Failed to alloc ctrl txq. Err: %d\",\n+\t\t\t\terr);\n+\t\t\tgoto out;\n+\t\t}\n+\t\tsnprintf(name, sizeof(name), \"cxgbe_ctrl_pool_%d\", i);\n+\t\tq->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,\n+\t\t\t\t\t\t     RTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t\t     RTE_MBUF_PRIV_ALIGN,\n+\t\t\t\t\t\t     RTE_MBUF_DEFAULT_BUF_SIZE,\n+\t\t\t\t\t\t     SOCKET_ID_ANY);\n+\t\tif (!q->mb_pool) {\n+\t\t\tdev_err(adapter, \"Can't create ctrl pool for port: %d\",\n+\t\t\t\ti);\n+\t\t\terr = -ENOMEM;\n+\t\t\tgoto out;\n+\t\t}\n+\t}\n+\treturn 0;\n+out:\n+\tt4_free_sge_resources(adapter);\n+\treturn err;\n+}\n+\n int setup_sge_fwevtq(struct adapter *adapter)\n {\n \tstruct sge *s = &adapter->sge;\ndiff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c\nindex b5d3611da..357b4856d 100644\n--- a/drivers/net/cxgbe/sge.c\n+++ b/drivers/net/cxgbe/sge.c\n@@ -54,6 +54,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,\n  */\n #define MAX_IMM_TX_PKT_LEN 256\n \n+/*\n+ * Max size of a WR sent through a control Tx queue.\n+ */\n+#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN\n+\n /*\n  * Rx buffer sizes for \"usembufs\" Free List buffers (one ingress packet\n  * per mbuf buffer).  We currently only support two sizes for 1500- and\n@@ -1299,6 +1304,126 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,\n \treturn 0;\n }\n \n+/**\n+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs\n+ * @q: the SGE control Tx queue\n+ *\n+ * This is a variant of reclaim_completed_tx() that is used for Tx queues\n+ * that send only immediate data (presently just the control queues) and\n+ * thus do not have any mbufs to release.\n+ */\n+static inline void reclaim_completed_tx_imm(struct sge_txq *q)\n+{\n+\tint hw_cidx = ntohs(q->stat->cidx);\n+\tint reclaim = hw_cidx - q->cidx;\n+\n+\tif (reclaim < 0)\n+\t\treclaim += q->size;\n+\n+\tq->in_use -= reclaim;\n+\tq->cidx = hw_cidx;\n+}\n+\n+/**\n+ * is_imm - check whether a packet can be sent as immediate data\n+ * @mbuf: the packet\n+ *\n+ * Returns true if a packet can be sent as a WR with immediate data.\n+ */\n+static inline int is_imm(const struct rte_mbuf *mbuf)\n+{\n+\treturn mbuf->pkt_len <= MAX_CTRL_WR_LEN;\n+}\n+\n+/**\n+ * inline_tx_mbuf: inline a packet's data into TX descriptors\n+ * @q: the TX queue where the packet will be inlined\n+ * @from: pointer to data portion of packet\n+ * @to: pointer after cpl where data has to be inlined\n+ * @len: length of data to inline\n+ *\n+ * Inline a packet's contents directly to TX descriptors, starting at\n+ * the given position within the TX DMA ring.\n+ * Most of the complexity of this operation is dealing with wrap arounds\n+ * in the middle of the packet we want to inline.\n+ */\n+static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to,\n+\t\t\t   int len)\n+{\n+\tint left = RTE_PTR_DIFF(q->stat, *to);\n+\n+\tif (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) {\n+\t\trte_memcpy(*to, from, len);\n+\t\t*to = RTE_PTR_ADD(*to, len);\n+\t} else {\n+\t\trte_memcpy(*to, from, left);\n+\t\tfrom = RTE_PTR_ADD(from, left);\n+\t\tleft = len - left;\n+\t\trte_memcpy((void *)q->desc, from, left);\n+\t\t*to = RTE_PTR_ADD((void *)q->desc, left);\n+\t}\n+}\n+\n+/**\n+ * ctrl_xmit - send a packet through an SGE control Tx queue\n+ * @q: the control queue\n+ * @mbuf: the packet\n+ *\n+ * Send a packet through an SGE control Tx queue.  Packets sent through\n+ * a control queue must fit entirely as immediate data.\n+ */\n+static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)\n+{\n+\tunsigned int ndesc;\n+\tstruct fw_wr_hdr *wr;\n+\tcaddr_t dst;\n+\n+\tif (unlikely(!is_imm(mbuf))) {\n+\t\tWARN_ON(1);\n+\t\trte_pktmbuf_free(mbuf);\n+\t\treturn -1;\n+\t}\n+\n+\treclaim_completed_tx_imm(&q->q);\n+\tndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc));\n+\tt4_os_lock(&q->ctrlq_lock);\n+\n+\tq->full = txq_avail(&q->q) < ndesc ? 1 : 0;\n+\tif (unlikely(q->full)) {\n+\t\tt4_os_unlock(&q->ctrlq_lock);\n+\t\treturn -1;\n+\t}\n+\n+\twr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];\n+\tdst = (void *)wr;\n+\tinline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t),\n+\t\t       &dst, mbuf->data_len);\n+\n+\ttxq_advance(&q->q, ndesc);\n+\tif (unlikely(txq_avail(&q->q) < 64))\n+\t\twr->lo |= htonl(F_FW_WR_EQUEQ);\n+\n+\tq->txp++;\n+\n+\tring_tx_db(q->adapter, &q->q);\n+\tt4_os_unlock(&q->ctrlq_lock);\n+\n+\trte_pktmbuf_free(mbuf);\n+\treturn 0;\n+}\n+\n+/**\n+ * t4_mgmt_tx - send a management message\n+ * @q: the control queue\n+ * @mbuf: the packet containing the management message\n+ *\n+ * Send a management message through control queue.\n+ */\n+int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)\n+{\n+\treturn ctrl_xmit(q, mbuf);\n+}\n+\n /**\n  * alloc_ring - allocate resources for an SGE descriptor ring\n  * @dev: the PCI device's core device\n@@ -2080,6 +2205,64 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,\n \treturn 0;\n }\n \n+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,\n+\t\t\t  struct rte_eth_dev *eth_dev, uint16_t queue_id,\n+\t\t\t  unsigned int iqid, int socket_id)\n+{\n+\tint ret, nentries;\n+\tstruct fw_eq_ctrl_cmd c;\n+\tstruct sge *s = &adap->sge;\n+\tstruct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);\n+\tchar z_name[RTE_MEMZONE_NAMESIZE];\n+\tchar z_name_sw[RTE_MEMZONE_NAMESIZE];\n+\n+\t/* Add status entries */\n+\tnentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);\n+\n+\tsnprintf(z_name, sizeof(z_name), \"%s_%s_%d_%d\",\n+\t\t eth_dev->device->driver->name, \"ctrl_tx_ring\",\n+\t\t eth_dev->data->port_id, queue_id);\n+\tsnprintf(z_name_sw, sizeof(z_name_sw), \"%s_sw_ring\", z_name);\n+\n+\ttxq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),\n+\t\t\t\t 0, &txq->q.phys_addr,\n+\t\t\t\t NULL, 0, queue_id,\n+\t\t\t\t socket_id, z_name, z_name_sw);\n+\tif (!txq->q.desc)\n+\t\treturn -ENOMEM;\n+\n+\tmemset(&c, 0, sizeof(c));\n+\tc.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |\n+\t\t\t    F_FW_CMD_WRITE | F_FW_CMD_EXEC |\n+\t\t\t    V_FW_EQ_CTRL_CMD_PFN(adap->pf) |\n+\t\t\t    V_FW_EQ_CTRL_CMD_VFN(0));\n+\tc.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC |\n+\t\t\t\t F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16));\n+\tc.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0));\n+\tc.physeqid_pkd = htonl(0);\n+\tc.fetchszm_to_iqid =\n+\t\thtonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |\n+\t\t      V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |\n+\t\t      F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid));\n+\tc.dcaen_to_eqsize =\n+\t\thtonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |\n+\t\t      V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |\n+\t\t      V_FW_EQ_CTRL_CMD_EQSIZE(nentries));\n+\tc.eqaddr = cpu_to_be64(txq->q.phys_addr);\n+\n+\tret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);\n+\tif (ret) {\n+\t\ttxq->q.desc = NULL;\n+\t\treturn ret;\n+\t}\n+\n+\tinit_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid)),\n+\t\t G_FW_EQ_CTRL_CMD_EQID(ntohl(c. physeqid_pkd)));\n+\ttxq->adapter = adap;\n+\ttxq->full = 0;\n+\treturn 0;\n+}\n+\n static void free_txq(struct sge_txq *q)\n {\n \tq->cntxt_id = 0;\n@@ -2174,7 +2357,7 @@ void t4_sge_tx_monitor_stop(struct adapter *adap)\n  */\n void t4_free_sge_resources(struct adapter *adap)\n {\n-\tint i;\n+\tunsigned int i;\n \tstruct sge_eth_rxq *rxq = &adap->sge.ethrxq[0];\n \tstruct sge_eth_txq *txq = &adap->sge.ethtxq[0];\n \n@@ -2191,6 +2374,18 @@ void t4_free_sge_resources(struct adapter *adap)\n \t\t}\n \t}\n \n+\t/* clean up control Tx queues */\n+\tfor (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {\n+\t\tstruct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];\n+\n+\t\tif (cq->q.desc) {\n+\t\t\treclaim_completed_tx_imm(&cq->q);\n+\t\t\tt4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,\n+\t\t\t\t\tcq->q.cntxt_id);\n+\t\t\tfree_txq(&cq->q);\n+\t\t}\n+\t}\n+\n \tif (adap->sge.fw_evtq.desc)\n \t\tfree_rspq_fl(adap, &adap->sge.fw_evtq, NULL);\n }\n",
    "prefixes": [
        "dpdk-dev",
        "3/7"
    ]
}