get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/57855/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 57855,
    "url": "http://patches.dpdk.org/api/patches/57855/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1566568031-45991-12-git-send-email-xavier.huwei@huawei.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1566568031-45991-12-git-send-email-xavier.huwei@huawei.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1566568031-45991-12-git-send-email-xavier.huwei@huawei.com",
    "date": "2019-08-23T13:47:00",
    "name": "[11/22] net/hns3: add support for flow control of hns3 PMD driver",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "ca65c446a5b7fb5cbcb9bcd9c56f7b0c7b37ee1f",
    "submitter": {
        "id": 1405,
        "url": "http://patches.dpdk.org/api/people/1405/?format=api",
        "name": "Wei Hu (Xavier)",
        "email": "xavier.huwei@huawei.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1566568031-45991-12-git-send-email-xavier.huwei@huawei.com/mbox/",
    "series": [
        {
            "id": 6114,
            "url": "http://patches.dpdk.org/api/series/6114/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=6114",
            "date": "2019-08-23T13:46:49",
            "name": "add hns3 ethernet PMD driver",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/6114/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/57855/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/57855/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id E9E201C030;\n\tFri, 23 Aug 2019 15:50:06 +0200 (CEST)",
            "from huawei.com (szxga07-in.huawei.com [45.249.212.35])\n\tby dpdk.org (Postfix) with ESMTP id E1C6D1BFB4\n\tfor <dev@dpdk.org>; Fri, 23 Aug 2019 15:49:38 +0200 (CEST)",
            "from DGGEMS406-HUB.china.huawei.com (unknown [172.30.72.59])\n\tby Forcepoint Email with ESMTP id C4B3E8377CD3E7F89FB8;\n\tFri, 23 Aug 2019 21:49:36 +0800 (CST)",
            "from localhost.localdomain (10.67.212.132) by\n\tDGGEMS406-HUB.china.huawei.com (10.3.19.206) with Microsoft SMTP\n\tServer id 14.3.439.0; Fri, 23 Aug 2019 21:49:28 +0800"
        ],
        "From": "\"Wei Hu (Xavier)\" <xavier.huwei@huawei.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<linuxarm@huawei.com>, <xavier_huwei@163.com>, <liudongdong3@huawei.com>,\n\t<forest.zhouchang@huawei.com>",
        "Date": "Fri, 23 Aug 2019 21:47:00 +0800",
        "Message-ID": "<1566568031-45991-12-git-send-email-xavier.huwei@huawei.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1566568031-45991-1-git-send-email-xavier.huwei@huawei.com>",
        "References": "<1566568031-45991-1-git-send-email-xavier.huwei@huawei.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.67.212.132]",
        "X-CFilter-Loop": "Reflected",
        "Subject": "[dpdk-dev] [PATCH 11/22] net/hns3: add support for flow control of\n\thns3 PMD driver",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch adds support for MAC PAUSE flow control and priority flow\ncontrol of hns3 PMD driver. All user priorities(up) must be mapped to\ntc0 when MAC PAUSE flow control is enabled. Ups can be mapped to other\ntcs driver permit when PFC is enabled. Flow control function by default\nis turned off to ensure that app startup state is the same each time.\n\nSigned-off-by: Huisong Li <lihuisong@huawei.com>\nSigned-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>\nSigned-off-by: Chunsong Feng <fengchunsong@huawei.com>\nSigned-off-by: Min Hu (Connor) <humin29@huawei.com>\nSigned-off-by: Hao Chen <chenhao164@huawei.com>\n---\n drivers/net/hns3/hns3_dcb.c    | 1647 ++++++++++++++++++++++++++++++++++++++++\n drivers/net/hns3/hns3_dcb.h    |  166 ++++\n drivers/net/hns3/hns3_ethdev.c |  202 +++++\n 3 files changed, 2015 insertions(+)\n create mode 100644 drivers/net/hns3/hns3_dcb.c\n create mode 100644 drivers/net/hns3/hns3_dcb.h",
    "diff": "diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c\nnew file mode 100644\nindex 0000000..0644299\n--- /dev/null\n+++ b/drivers/net/hns3/hns3_dcb.c\n@@ -0,0 +1,1647 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018-2019 Hisilicon Limited.\n+ */\n+\n+#include <errno.h>\n+#include <inttypes.h>\n+#include <stdbool.h>\n+#include <string.h>\n+#include <unistd.h>\n+#include <rte_io.h>\n+#include <rte_common.h>\n+#include <rte_ethdev.h>\n+#include <rte_memcpy.h>\n+#include <rte_spinlock.h>\n+\n+#include \"hns3_logs.h\"\n+#include \"hns3_cmd.h\"\n+#include \"hns3_rss.h\"\n+#include \"hns3_fdir.h\"\n+#include \"hns3_regs.h\"\n+#include \"hns3_ethdev.h\"\n+#include \"hns3_dcb.h\"\n+\n+#define HNS3_SHAPER_BS_U_DEF\t5\n+#define HNS3_SHAPER_BS_S_DEF\t20\n+#define BW_MAX_PERCENT\t\t100\n+#define HNS3_ETHER_MAX_RATE\t100000\n+\n+/*\n+ * hns3_shaper_para_calc: calculate ir parameter for the shaper\n+ * @ir: Rate to be config, its unit is Mbps\n+ * @shaper_level: the shaper level. eg: port, pg, priority, queueset\n+ * @shaper_para: shaper parameter of IR shaper\n+ *\n+ * the formula:\n+ *\n+ *\t\tIR_b * (2 ^ IR_u) * 8\n+ * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)\n+ *\t\tTick * (2 ^ IR_s)\n+ *\n+ * @return: 0: calculate sucessful, negative: fail\n+ */\n+static int\n+hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,\n+\t\t      struct hns3_shaper_parameter *shaper_para)\n+{\n+#define SHAPER_DEFAULT_IR_B\t126\n+#define DIVISOR_CLK\t\t(1000 * 8)\n+#define DIVISOR_IR_B_126\t(126 * DIVISOR_CLK)\n+\n+\tconst uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {\n+\t\t6 * 256,    /* Prioriy level */\n+\t\t6 * 32,     /* Prioriy group level */\n+\t\t6 * 8,      /* Port level */\n+\t\t6 * 256     /* Qset level */\n+\t};\n+\tuint8_t ir_u_calc = 0;\n+\tuint8_t ir_s_calc = 0;\n+\tuint32_t denominator;\n+\tuint32_t ir_calc;\n+\tuint32_t tick;\n+\n+\t/* Calc tick */\n+\tif (shaper_level >= HNS3_SHAPER_LVL_CNT) {\n+\t\thns3_err(hw,\n+\t\t\t \"shaper_level(%d) is greater than HNS3_SHAPER_LVL_CNT(%d)\",\n+\t\t\t shaper_level, HNS3_SHAPER_LVL_CNT);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (ir > HNS3_ETHER_MAX_RATE) {\n+\t\thns3_err(hw, \"rate(%d) exceeds the rate driver supported \"\n+\t\t\t \"HNS3_ETHER_MAX_RATE(%d)\", ir, HNS3_ETHER_MAX_RATE);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttick = tick_array[shaper_level];\n+\n+\t/*\n+\t * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0\n+\t * the formula is changed to:\n+\t *\t\t126 * 1 * 8\n+\t * ir_calc = ---------------- * 1000\n+\t *\t\ttick * 1\n+\t */\n+\tir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;\n+\n+\tif (ir_calc == ir) {\n+\t\tshaper_para->ir_b = SHAPER_DEFAULT_IR_B;\n+\t} else if (ir_calc > ir) {\n+\t\t/* Increasing the denominator to select ir_s value */\n+\t\tdo {\n+\t\t\tir_s_calc++;\n+\t\t\tir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));\n+\t\t} while (ir_calc > ir);\n+\n+\t\tif (ir_calc == ir)\n+\t\t\tshaper_para->ir_b = SHAPER_DEFAULT_IR_B;\n+\t\telse\n+\t\t\tshaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +\n+\t\t\t\t (DIVISOR_CLK >> 1)) / DIVISOR_CLK;\n+\t} else {\n+\t\t/*\n+\t\t * Increasing the numerator to select ir_u value. ir_u_calc will\n+\t\t * get maximum value when ir_calc is minimum and ir is maximum.\n+\t\t * ir_calc gets minimum value when tick is the maximum value.\n+\t\t * At the same time, value of ir_u_calc can only be increased up\n+\t\t * to eight after the while loop if the value of ir is equal\n+\t\t * to HNS3_ETHER_MAX_RATE.\n+\t\t */\n+\t\tuint32_t numerator;\n+\t\tdo {\n+\t\t\tir_u_calc++;\n+\t\t\tnumerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);\n+\t\t\tir_calc = (numerator + (tick >> 1)) / tick;\n+\t\t} while (ir_calc < ir);\n+\n+\t\tif (ir_calc == ir) {\n+\t\t\tshaper_para->ir_b = SHAPER_DEFAULT_IR_B;\n+\t\t} else {\n+\t\t\t--ir_u_calc;\n+\n+\t\t\t/*\n+\t\t\t * The maximum value of ir_u_calc in this branch is\n+\t\t\t * seven in all cases. Thus, value of denominator can\n+\t\t\t * not be zero here.\n+\t\t\t */\n+\t\t\tdenominator = DIVISOR_CLK * (1 << ir_u_calc);\n+\t\t\tshaper_para->ir_b =\n+\t\t\t\t(ir * tick + (denominator >> 1)) / denominator;\n+\t\t}\n+\t}\n+\n+\tshaper_para->ir_u = ir_u_calc;\n+\tshaper_para->ir_s = ir_s_calc;\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)\n+{\n+#define HNS3_HALF_BYTE_BIT_OFFSET 4\n+\tuint8_t tc = hw->dcb_info.prio_tc[pri_id];\n+\n+\tif (tc >= hw->dcb_info.num_tc)\n+\t\treturn -EINVAL;\n+\n+\t/*\n+\t * The register for priority has four bytes, the first bytes includes\n+\t *  priority0 and priority1, the higher 4bit stands for priority1\n+\t *  while the lower 4bit stands for priority0, as below:\n+\t * first byte:\t| pri_1 | pri_0 |\n+\t * second byte:\t| pri_3 | pri_2 |\n+\t * third byte:\t| pri_5 | pri_4 |\n+\t * fourth byte:\t| pri_7 | pri_6 |\n+\t */\n+\tpri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_up_to_tc_map(struct hns3_hw *hw)\n+{\n+\tstruct hns3_cmd_desc desc;\n+\tuint8_t *pri = (uint8_t *)desc.data;\n+\tuint8_t pri_id;\n+\tint ret;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);\n+\n+\tfor (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {\n+\t\tret = hns3_fill_pri_array(hw, pri, pri_id);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)\n+{\n+\tstruct hns3_pg_to_pri_link_cmd *map;\n+\tstruct hns3_cmd_desc desc;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);\n+\n+\tmap = (struct hns3_pg_to_pri_link_cmd *)desc.data;\n+\n+\tmap->pg_id = pg_id;\n+\tmap->pri_bit_map = pri_bit_map;\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_pg_to_pri_map(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tstruct hns3_pg_info *pg_info;\n+\tint ret, i;\n+\n+\tif (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)\n+\t\treturn -EINVAL;\n+\n+\tfor (i = 0; i < hw->dcb_info.num_pg; i++) {\n+\t\t/* Cfg pg to priority mapping */\n+\t\tpg_info = &hw->dcb_info.pg_info[i];\n+\t\tret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)\n+{\n+\tstruct hns3_qs_to_pri_link_cmd *map;\n+\tstruct hns3_cmd_desc desc;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);\n+\n+\tmap = (struct hns3_qs_to_pri_link_cmd *)desc.data;\n+\n+\tmap->qs_id = rte_cpu_to_le_16(qs_id);\n+\tmap->priority = pri;\n+\tmap->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)\n+{\n+\tstruct hns3_qs_weight_cmd *weight;\n+\tstruct hns3_cmd_desc desc;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);\n+\n+\tweight = (struct hns3_qs_weight_cmd *)desc.data;\n+\n+\tweight->qs_id = rte_cpu_to_le_16(qs_id);\n+\tweight->dwrr = dwrr;\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)\n+{\n+#define DEFAULT_TC_WEIGHT\t1\n+#define DEFAULT_TC_OFFSET\t14\n+\tstruct hns3_ets_tc_weight_cmd *ets_weight;\n+\tstruct hns3_cmd_desc desc;\n+\tuint8_t i;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);\n+\tets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;\n+\n+\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++) {\n+\t\tstruct hns3_pg_info *pg_info;\n+\n+\t\tets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;\n+\n+\t\tif (!(hw->hw_tc_map & BIT(i)))\n+\t\t\tcontinue;\n+\n+\t\tpg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];\n+\t\tets_weight->tc_weight[i] = pg_info->tc_dwrr[i];\n+\t}\n+\n+\tets_weight->weight_offset = DEFAULT_TC_OFFSET;\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)\n+{\n+\tstruct hns3_priority_weight_cmd *weight;\n+\tstruct hns3_cmd_desc desc;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);\n+\n+\tweight = (struct hns3_priority_weight_cmd *)desc.data;\n+\n+\tweight->pri_id = pri_id;\n+\tweight->dwrr = dwrr;\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)\n+{\n+\tstruct hns3_pg_weight_cmd *weight;\n+\tstruct hns3_cmd_desc desc;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);\n+\n+\tweight = (struct hns3_pg_weight_cmd *)desc.data;\n+\n+\tweight->pg_id = pg_id;\n+\tweight->dwrr = dwrr;\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+static int\n+hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)\n+{\n+\tstruct hns3_cmd_desc desc;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);\n+\n+\tif (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)\n+\t\tdesc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);\n+\telse\n+\t\tdesc.data[1] = 0;\n+\n+\tdesc.data[0] = rte_cpu_to_le_32(pg_id);\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static uint32_t\n+hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,\n+\t\t\t   uint8_t bs_b, uint8_t bs_s)\n+{\n+\tuint32_t shapping_para = 0;\n+\n+\thns3_dcb_set_field(shapping_para, IR_B, ir_b);\n+\thns3_dcb_set_field(shapping_para, IR_U, ir_u);\n+\thns3_dcb_set_field(shapping_para, IR_S, ir_s);\n+\thns3_dcb_set_field(shapping_para, BS_B, bs_b);\n+\thns3_dcb_set_field(shapping_para, BS_S, bs_s);\n+\n+\treturn shapping_para;\n+}\n+\n+static int\n+hns3_dcb_port_shaper_cfg(struct hns3_hw *hw)\n+{\n+\tstruct hns3_port_shapping_cmd *shap_cfg_cmd;\n+\tstruct hns3_shaper_parameter shaper_parameter;\n+\tuint32_t shapping_para;\n+\tuint32_t ir_u, ir_b, ir_s;\n+\tstruct hns3_cmd_desc desc;\n+\tint ret;\n+\n+\tret = hns3_shaper_para_calc(hw, hw->mac.link_speed,\n+\t\t\t\t    HNS3_SHAPER_LVL_PORT, &shaper_parameter);\n+\tif (ret) {\n+\t\thns3_err(hw, \"calculate shaper parameter failed: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);\n+\tshap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;\n+\n+\tir_b = shaper_parameter.ir_b;\n+\tir_u = shaper_parameter.ir_u;\n+\tir_s = shaper_parameter.ir_s;\n+\tshapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,\n+\t\t\t\t\t\t   HNS3_SHAPER_BS_U_DEF,\n+\t\t\t\t\t\t   HNS3_SHAPER_BS_S_DEF);\n+\n+\tshap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,\n+\t\t\t uint8_t pg_id, uint32_t shapping_para)\n+{\n+\tstruct hns3_pg_shapping_cmd *shap_cfg_cmd;\n+\tenum hns3_opcode_type opcode;\n+\tstruct hns3_cmd_desc desc;\n+\n+\topcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :\n+\t\t HNS3_OPC_TM_PG_C_SHAPPING;\n+\thns3_cmd_setup_basic_desc(&desc, opcode, false);\n+\n+\tshap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;\n+\n+\tshap_cfg_cmd->pg_id = pg_id;\n+\n+\tshap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_shaper_parameter shaper_parameter;\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tuint32_t ir_u, ir_b, ir_s;\n+\tuint32_t shaper_para;\n+\tuint8_t i;\n+\tint ret;\n+\n+\t/* Cfg pg schd */\n+\tif (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)\n+\t\treturn -EINVAL;\n+\n+\t/* Pg to pri */\n+\tfor (i = 0; i < hw->dcb_info.num_pg; i++) {\n+\t\t/* Calc shaper para */\n+\t\tret = hns3_shaper_para_calc(hw,\n+\t\t\t\t\t    hw->dcb_info.pg_info[i].bw_limit,\n+\t\t\t\t\t    HNS3_SHAPER_LVL_PG,\n+\t\t\t\t\t    &shaper_parameter);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw, \"calculate shaper parameter failed: %d\",\n+\t\t\t\t ret);\n+\t\t\treturn ret;\n+\t\t}\n+\n+\t\tshaper_para = hns3_dcb_get_shapping_para(0, 0, 0,\n+\t\t\t\t\t\t\t HNS3_SHAPER_BS_U_DEF,\n+\t\t\t\t\t\t\t HNS3_SHAPER_BS_S_DEF);\n+\n+\t\tret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,\n+\t\t\t\t\t       shaper_para);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw,\n+\t\t\t\t \"config PG CIR shaper parameter failed: %d\",\n+\t\t\t\t ret);\n+\t\t\treturn ret;\n+\t\t}\n+\n+\t\tir_b = shaper_parameter.ir_b;\n+\t\tir_u = shaper_parameter.ir_u;\n+\t\tir_s = shaper_parameter.ir_s;\n+\t\tshaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,\n+\t\t\t\t\t\t\t HNS3_SHAPER_BS_U_DEF,\n+\t\t\t\t\t\t\t HNS3_SHAPER_BS_S_DEF);\n+\n+\t\tret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,\n+\t\t\t\t\t       shaper_para);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw,\n+\t\t\t\t \"config PG PIR shaper parameter failed: %d\",\n+\t\t\t\t ret);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)\n+{\n+\tstruct hns3_cmd_desc desc;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);\n+\n+\tif (mode == HNS3_SCH_MODE_DWRR)\n+\t\tdesc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);\n+\telse\n+\t\tdesc.data[1] = 0;\n+\n+\tdesc.data[0] = rte_cpu_to_le_32(qs_id);\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)\n+{\n+\tstruct hns3_cmd_desc desc;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);\n+\n+\tif (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)\n+\t\tdesc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);\n+\telse\n+\t\tdesc.data[1] = 0;\n+\n+\tdesc.data[0] = rte_cpu_to_le_32(pri_id);\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,\n+\t\t\t  uint8_t pri_id, uint32_t shapping_para)\n+{\n+\tstruct hns3_pri_shapping_cmd *shap_cfg_cmd;\n+\tenum hns3_opcode_type opcode;\n+\tstruct hns3_cmd_desc desc;\n+\n+\topcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :\n+\t\t HNS3_OPC_TM_PRI_C_SHAPPING;\n+\n+\thns3_cmd_setup_basic_desc(&desc, opcode, false);\n+\n+\tshap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;\n+\n+\tshap_cfg_cmd->pri_id = pri_id;\n+\n+\tshap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_dcb_pri_tc_base_shaper_cfg(struct hns3_hw *hw)\n+{\n+\tstruct hns3_shaper_parameter shaper_parameter;\n+\tuint32_t ir_u, ir_b, ir_s;\n+\tuint32_t shaper_para;\n+\tint ret, i;\n+\n+\tfor (i = 0; i < hw->dcb_info.num_tc; i++) {\n+\t\tret = hns3_shaper_para_calc(hw,\n+\t\t\t\t\t    hw->dcb_info.tc_info[i].bw_limit,\n+\t\t\t\t\t    HNS3_SHAPER_LVL_PRI,\n+\t\t\t\t\t    &shaper_parameter);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw, \"calculate shaper parameter failed: %d\",\n+\t\t\t\t ret);\n+\t\t\treturn ret;\n+\t\t}\n+\n+\t\tshaper_para = hns3_dcb_get_shapping_para(0, 0, 0,\n+\t\t\t\t\t\t\t HNS3_SHAPER_BS_U_DEF,\n+\t\t\t\t\t\t\t HNS3_SHAPER_BS_S_DEF);\n+\n+\t\tret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,\n+\t\t\t\t\t\tshaper_para);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw,\n+\t\t\t\t \"config priority CIR shaper parameter failed: %d\",\n+\t\t\t\t ret);\n+\t\t\treturn ret;\n+\t\t}\n+\n+\t\tir_b = shaper_parameter.ir_b;\n+\t\tir_u = shaper_parameter.ir_u;\n+\t\tir_s = shaper_parameter.ir_s;\n+\t\tshaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,\n+\t\t\t\t\t\t\t HNS3_SHAPER_BS_U_DEF,\n+\t\t\t\t\t\t\t HNS3_SHAPER_BS_S_DEF);\n+\n+\t\tret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,\n+\t\t\t\t\t\tshaper_para);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw,\n+\t\t\t\t \"config priority PIR shaper parameter failed: %d\",\n+\t\t\t\t ret);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+\n+static int\n+hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tint ret;\n+\n+\tif (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)\n+\t\treturn -EINVAL;\n+\n+\tret = hns3_dcb_pri_tc_base_shaper_cfg(hw);\n+\tif (ret)\n+\t\thns3_err(hw, \"config port shaper failed: %d\", ret);\n+\n+\treturn ret;\n+}\n+\n+void\n+hns3_tc_queue_mapping_cfg(struct hns3_hw *hw)\n+{\n+\tstruct hns3_tc_queue_info *tc_queue;\n+\tuint8_t i;\n+\n+\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++) {\n+\t\ttc_queue = &hw->tc_queue[i];\n+\t\tif (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {\n+\t\t\ttc_queue->enable = true;\n+\t\t\ttc_queue->tqp_offset = i * hw->alloc_rss_size;\n+\t\t\ttc_queue->tqp_count = hw->alloc_rss_size;\n+\t\t\ttc_queue->tc = i;\n+\t\t} else {\n+\t\t\t/* Set to default queue if TC is disable */\n+\t\t\ttc_queue->enable = false;\n+\t\t\ttc_queue->tqp_offset = 0;\n+\t\t\ttc_queue->tqp_count = 0;\n+\t\t\ttc_queue->tc = 0;\n+\t\t}\n+\t}\n+}\n+\n+static void\n+hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t queue_num)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tuint16_t tqpnum_per_tc;\n+\tuint16_t alloc_tqps;\n+\n+\talloc_tqps = RTE_MIN(hw->tqps_num, queue_num);\n+\thw->num_tc = RTE_MIN(alloc_tqps, hw->dcb_info.num_tc);\n+\ttqpnum_per_tc = RTE_MIN(hw->rss_size_max, alloc_tqps / hw->num_tc);\n+\n+\tif (hw->alloc_rss_size != tqpnum_per_tc) {\n+\t\tPMD_INIT_LOG(INFO, \"rss size changes from %d to %d\",\n+\t\t\t     hw->alloc_rss_size, tqpnum_per_tc);\n+\t\thw->alloc_rss_size = tqpnum_per_tc;\n+\t}\n+\thw->alloc_tqps = hw->num_tc * hw->alloc_rss_size;\n+\n+\thns3_tc_queue_mapping_cfg(hw);\n+\n+\tmemcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);\n+}\n+\n+int\n+hns3_dcb_info_init(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tint i, k;\n+\n+\tif (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&\n+\t    hw->dcb_info.num_pg != 1)\n+\t\treturn -EINVAL;\n+\n+\t/* Initializing PG information */\n+\tmemset(hw->dcb_info.pg_info, 0,\n+\t       sizeof(struct hns3_pg_info) * HNS3_PG_NUM);\n+\tfor (i = 0; i < hw->dcb_info.num_pg; i++) {\n+\t\thw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;\n+\t\thw->dcb_info.pg_info[i].pg_id = i;\n+\t\thw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;\n+\t\thw->dcb_info.pg_info[i].bw_limit = HNS3_ETHER_MAX_RATE;\n+\n+\t\tif (i != 0)\n+\t\t\tcontinue;\n+\n+\t\thw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;\n+\t\tfor (k = 0; k < hw->dcb_info.num_tc; k++)\n+\t\t\thw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;\n+\t}\n+\n+\t/* All UPs mapping to TC0 */\n+\tfor (i = 0; i < HNS3_MAX_USER_PRIO; i++)\n+\t\thw->dcb_info.prio_tc[i] = 0;\n+\n+\t/* Initializing tc information */\n+\tmemset(hw->dcb_info.tc_info, 0,\n+\t       sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);\n+\tfor (i = 0; i < hw->dcb_info.num_tc; i++) {\n+\t\thw->dcb_info.tc_info[i].tc_id = i;\n+\t\thw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;\n+\t\thw->dcb_info.tc_info[i].pgid = 0;\n+\t\thw->dcb_info.tc_info[i].bw_limit =\n+\t\t\thw->dcb_info.pg_info[0].bw_limit;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tint ret, i;\n+\n+\t/* Only being config on TC-Based scheduler mode */\n+\tif (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)\n+\t\treturn -EINVAL;\n+\n+\tfor (i = 0; i < hw->dcb_info.num_pg; i++) {\n+\t\tret = hns3_dcb_pg_schd_mode_cfg(hw, i);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tuint8_t i;\n+\tint ret;\n+\n+\tif (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {\n+\t\tfor (i = 0; i < hw->dcb_info.num_tc; i++) {\n+\t\t\tret = hns3_dcb_pri_schd_mode_cfg(hw, i);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\n+\t\t\tret = hns3_dcb_qs_schd_mode_cfg(hw, i,\n+\t\t\t\t\t\t\tHNS3_SCH_MODE_DWRR);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)\n+{\n+\tint ret;\n+\n+\tret = hns3_dcb_lvl2_schd_mode_cfg(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"config lvl2_schd_mode failed: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = hns3_dcb_lvl34_schd_mode_cfg(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"config lvl34_schd_mode failed: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)\n+{\n+\tstruct hns3_pg_info *pg_info;\n+\tuint8_t dwrr;\n+\tint ret, i;\n+\n+\tfor (i = 0; i < hw->dcb_info.num_tc; i++) {\n+\t\tpg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];\n+\t\tdwrr = pg_info->tc_dwrr[i];\n+\n+\t\tret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw, \"fail to send priority weight cmd: %d\", i);\n+\t\t\treturn ret;\n+\t\t}\n+\n+\t\tret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw, \"fail to send qs_weight cmd: %d\", i);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tint ret;\n+\n+\tif (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)\n+\t\treturn -EINVAL;\n+\n+\tret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tif (!hns3_dev_dcb_supported(hw))\n+\t\treturn 0;\n+\n+\tret = hns3_dcb_ets_tc_dwrr_cfg(hw);\n+\tif (ret == -EOPNOTSUPP) {\n+\t\thns3_warn(hw, \"fw %08x does't support ets tc weight cmd\",\n+\t\t\t  hw->fw_version);\n+\t\tret = 0;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int\n+hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tint ret, i;\n+\n+\t/* Cfg pg schd */\n+\tif (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)\n+\t\treturn -EINVAL;\n+\n+\t/* Cfg pg to prio */\n+\tfor (i = 0; i < hw->dcb_info.num_pg; i++) {\n+\t\t/* Cfg dwrr */\n+\t\tret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_dcb_dwrr_cfg(struct hns3_hw *hw)\n+{\n+\tint ret;\n+\n+\tret = hns3_dcb_pg_dwrr_cfg(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"config pg_dwrr failed: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = hns3_dcb_pri_dwrr_cfg(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"config pri_dwrr failed: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_dcb_shaper_cfg(struct hns3_hw *hw)\n+{\n+\tint ret;\n+\n+\tret = hns3_dcb_port_shaper_cfg(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"config port shaper failed: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = hns3_dcb_pg_shaper_cfg(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"config pg shaper failed: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\treturn hns3_dcb_pri_shaper_cfg(hw);\n+}\n+\n+static int\n+hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)\n+{\n+\tstruct hns3_nq_to_qs_link_cmd *map;\n+\tstruct hns3_cmd_desc desc;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);\n+\n+\tmap = (struct hns3_nq_to_qs_link_cmd *)desc.data;\n+\n+\tmap->nq_id = rte_cpu_to_le_16(q_id);\n+\tmap->qset_id = rte_cpu_to_le_16(qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_q_to_qs_map(struct hns3_hw *hw)\n+{\n+\tstruct hns3_tc_queue_info *tc_queue;\n+\tuint16_t q_id;\n+\tuint32_t i, j;\n+\tint ret;\n+\n+\tfor (i = 0; i < hw->num_tc; i++) {\n+\t\ttc_queue = &hw->tc_queue[i];\n+\t\tfor (j = 0; j < tc_queue->tqp_count; j++) {\n+\t\t\tq_id = tc_queue->tqp_offset + j;\n+\t\t\tret = hns3_q_to_qs_map_cfg(hw, q_id, i);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_pri_q_qs_cfg(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tuint32_t i;\n+\tint ret;\n+\n+\tif (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)\n+\t\treturn -EINVAL;\n+\n+\t/* Cfg qs -> pri mapping */\n+\tfor (i = 0; i < hw->num_tc; i++) {\n+\t\tret = hns3_qs_to_pri_map_cfg(hw, i, i);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw, \"qs_to_pri mapping fail: %d\", ret);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\t/* Cfg q -> qs mapping */\n+\tret = hns3_q_to_qs_map(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"nq_to_qs mapping fail: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_dcb_map_cfg(struct hns3_hw *hw)\n+{\n+\tint ret;\n+\n+\tret = hns3_up_to_tc_map(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"up_to_tc mapping fail: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = hns3_pg_to_pri_map(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"pri_to_pg mapping fail: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\treturn hns3_pri_q_qs_cfg(hw);\n+}\n+\n+static int\n+hns3_dcb_schd_setup_hw(struct hns3_hw *hw)\n+{\n+\tint ret;\n+\n+\t/* Cfg dcb mapping  */\n+\tret = hns3_dcb_map_cfg(hw);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Cfg dcb shaper */\n+\tret = hns3_dcb_shaper_cfg(hw);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Cfg dwrr */\n+\tret = hns3_dcb_dwrr_cfg(hw);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Cfg schd mode for each level schd */\n+\treturn hns3_dcb_schd_mode_cfg(hw);\n+}\n+\n+static int\n+hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,\n+\t\t     uint8_t pause_trans_gap, uint16_t pause_trans_time)\n+{\n+\tstruct hns3_cfg_pause_param_cmd *pause_param;\n+\tstruct hns3_cmd_desc desc;\n+\n+\tpause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);\n+\n+\tmemcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);\n+\tmemcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);\n+\tpause_param->pause_trans_gap = pause_trans_gap;\n+\tpause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+int\n+hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)\n+{\n+\tstruct hns3_cfg_pause_param_cmd *pause_param;\n+\tstruct hns3_cmd_desc desc;\n+\tuint16_t trans_time;\n+\tuint8_t trans_gap;\n+\tint ret;\n+\n+\tpause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);\n+\n+\tret = hns3_cmd_send(hw, &desc, 1);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\ttrans_gap = pause_param->pause_trans_gap;\n+\ttrans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);\n+\n+\treturn hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);\n+}\n+\n+static int\n+hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)\n+{\n+#define PAUSE_TIME_DIV_BY\t2\n+#define PAUSE_TIME_MIN_VALUE\t0x4\n+\n+\tstruct hns3_mac *mac = &hw->mac;\n+\tuint8_t pause_trans_gap;\n+\n+\t/*\n+\t * Pause transmit gap must be less than \"pause_time / 2\", otherwise\n+\t * the behavior of MAC is undefined.\n+\t */\n+\tif (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)\n+\t\tpause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;\n+\telse if (pause_time >= PAUSE_TIME_MIN_VALUE &&\n+\t\t pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)\n+\t\tpause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;\n+\telse {\n+\t\thns3_warn(hw, \"pause_time(%d) is adjusted to 4\", pause_time);\n+\t\tpause_time = PAUSE_TIME_MIN_VALUE;\n+\t\tpause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;\n+\t}\n+\n+\treturn hns3_pause_param_cfg(hw, mac->mac_addr,\n+\t\t\t\t    pause_trans_gap, pause_time);\n+}\n+\n+static int\n+hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)\n+{\n+\tstruct hns3_cmd_desc desc;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);\n+\n+\tdesc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |\n+\t\t(rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)\n+{\n+\tstruct hns3_cmd_desc desc;\n+\tstruct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);\n+\n+\tpfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |\n+\t\t\t\t\t(rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));\n+\n+\tpfc->pri_en_bitmap = pfc_bitmap;\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)\n+{\n+\tstruct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;\n+\tstruct hns3_cmd_desc desc;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);\n+\n+\tbp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;\n+\n+\tbp_to_qs_map_cmd->tc_id = tc;\n+\tbp_to_qs_map_cmd->qs_group_id = grp_id;\n+\tbp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static void\n+hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)\n+{\n+\tswitch (hw->current_mode) {\n+\tcase HNS3_FC_NONE:\n+\t\t*tx_en = false;\n+\t\t*rx_en = false;\n+\t\tbreak;\n+\tcase HNS3_FC_RX_PAUSE:\n+\t\t*tx_en = false;\n+\t\t*rx_en = true;\n+\t\tbreak;\n+\tcase HNS3_FC_TX_PAUSE:\n+\t\t*tx_en = true;\n+\t\t*rx_en = false;\n+\t\tbreak;\n+\tcase HNS3_FC_FULL:\n+\t\t*tx_en = true;\n+\t\t*rx_en = true;\n+\t\tbreak;\n+\tdefault:\n+\t\t*tx_en = false;\n+\t\t*rx_en = false;\n+\t\tbreak;\n+\t}\n+}\n+\n+static int\n+hns3_mac_pause_setup_hw(struct hns3_hw *hw)\n+{\n+\tbool tx_en, rx_en;\n+\n+\tif (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)\n+\t\thns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);\n+\telse {\n+\t\ttx_en = false;\n+\t\trx_en = false;\n+\t}\n+\n+\treturn hns3_mac_pause_en_cfg(hw, tx_en, rx_en);\n+}\n+\n+static int\n+hns3_pfc_setup_hw(struct hns3_hw *hw)\n+{\n+\tbool tx_en, rx_en;\n+\n+\tif (hw->current_fc_status == HNS3_FC_STATUS_PFC)\n+\t\thns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);\n+\telse {\n+\t\ttx_en = false;\n+\t\trx_en = false;\n+\t}\n+\n+\treturn hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);\n+}\n+\n+/*\n+ * Each Tc has a 1024 queue sets to backpress, it divides to\n+ * 32 group, each group contains 32 queue sets, which can be\n+ * represented by uint32_t bitmap.\n+ */\n+static int\n+hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)\n+{\n+\tuint32_t qs_bitmap;\n+\tint ret;\n+\tint i;\n+\n+\tfor (i = 0; i < HNS3_BP_GRP_NUM; i++) {\n+\t\tuint8_t grp, sub_grp;\n+\t\tqs_bitmap = 0;\n+\n+\t\tgrp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);\n+\t\tsub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,\n+\t\t\t\t\t HNS3_BP_SUB_GRP_ID_S);\n+\t\tif (i == grp)\n+\t\t\tqs_bitmap |= (1 << sub_grp);\n+\n+\t\tret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_dcb_bp_setup(struct hns3_hw *hw)\n+{\n+\tint ret, i;\n+\n+\tfor (i = 0; i < hw->dcb_info.num_tc; i++) {\n+\t\tret = hns3_bp_setup_hw(hw, i);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_dcb_pause_setup_hw(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tint ret;\n+\n+\tret = hns3_pause_param_setup_hw(hw, pf->pause_time);\n+\tif (ret) {\n+\t\thns3_err(hw, \"Fail to set pause parameter. ret = %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = hns3_mac_pause_setup_hw(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"Fail to setup MAC pause. ret = %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\t/* Only DCB-supported dev supports qset back pressure and pfc cmd */\n+\tif (!hns3_dev_dcb_supported(hw))\n+\t\treturn 0;\n+\n+\tret = hns3_pfc_setup_hw(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"config pfc failed! ret = %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\treturn hns3_dcb_bp_setup(hw);\n+}\n+\n+static uint8_t\n+hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)\n+{\n+\tuint8_t pfc_map = 0;\n+\tuint8_t *prio_tc;\n+\tuint8_t i, j;\n+\n+\tprio_tc = hw->dcb_info.prio_tc;\n+\tfor (i = 0; i < hw->dcb_info.num_tc; i++) {\n+\t\tfor (j = 0; j < HNS3_MAX_USER_PRIO; j++) {\n+\t\t\tif (prio_tc[j] == i && pfc_en & BIT(j)) {\n+\t\t\t\tpfc_map |= BIT(i);\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn pfc_map;\n+}\n+\n+static void\n+hns3_dcb_cfg_validate(struct hns3_adapter *hns, uint8_t *tc, bool *changed)\n+{\n+\tstruct rte_eth_dcb_rx_conf *dcb_rx_conf;\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tuint8_t max_tc = 0;\n+\tuint8_t pfc_en;\n+\tint i;\n+\n+\tdcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;\n+\tfor (i = 0; i < HNS3_MAX_USER_PRIO; i++) {\n+\t\tif (dcb_rx_conf->dcb_tc[i] != hw->dcb_info.prio_tc[i])\n+\t\t\t*changed = true;\n+\n+\t\tif (dcb_rx_conf->dcb_tc[i] > max_tc)\n+\t\t\tmax_tc = dcb_rx_conf->dcb_tc[i];\n+\t}\n+\t*tc = max_tc + 1;\n+\tif (*tc != hw->dcb_info.num_tc)\n+\t\t*changed = true;\n+\n+\t/*\n+\t * We ensure that dcb information can be reconfigured\n+\t * after the hns3_priority_flow_ctrl_set function called.\n+\t */\n+\tif (hw->current_mode != HNS3_FC_FULL)\n+\t\t*changed = true;\n+\tpfc_en = RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);\n+\tif (hw->dcb_info.pfc_en != pfc_en)\n+\t\t*changed = true;\n+}\n+\n+static void\n+hns3_dcb_info_cfg(struct hns3_adapter *hns)\n+{\n+\tstruct rte_eth_dcb_rx_conf *dcb_rx_conf;\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tuint8_t tc_bw, bw_rest;\n+\tuint8_t i, j;\n+\n+\tdcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;\n+\tpf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;\n+\tpf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;\n+\n+\t/* Config pg0 */\n+\tmemset(hw->dcb_info.pg_info, 0,\n+\t       sizeof(struct hns3_pg_info) * HNS3_PG_NUM);\n+\thw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;\n+\thw->dcb_info.pg_info[0].pg_id = 0;\n+\thw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;\n+\thw->dcb_info.pg_info[0].bw_limit = HNS3_ETHER_MAX_RATE;\n+\thw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;\n+\n+\t/* Each tc has same bw for valid tc by default */\n+\ttc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;\n+\tfor (i = 0; i < hw->dcb_info.num_tc; i++)\n+\t\thw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;\n+\t/* To ensure the sum of tc_dwrr is equal to 100 */\n+\tbw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;\n+\tfor (j = 0; j < bw_rest; j++)\n+\t\thw->dcb_info.pg_info[0].tc_dwrr[j]++;\n+\tfor (; i < dcb_rx_conf->nb_tcs; i++)\n+\t\thw->dcb_info.pg_info[0].tc_dwrr[i] = 0;\n+\n+\t/* All tcs map to pg0 */\n+\tmemset(hw->dcb_info.tc_info, 0,\n+\t       sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);\n+\tfor (i = 0; i < hw->dcb_info.num_tc; i++) {\n+\t\thw->dcb_info.tc_info[i].tc_id = i;\n+\t\thw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;\n+\t\thw->dcb_info.tc_info[i].pgid = 0;\n+\t\thw->dcb_info.tc_info[i].bw_limit =\n+\t\t\t\t\thw->dcb_info.pg_info[0].bw_limit;\n+\t}\n+\n+\tfor (i = 0; i < HNS3_MAX_USER_PRIO; i++)\n+\t\thw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];\n+\n+\thns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues);\n+}\n+\n+static void\n+hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)\n+{\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tuint8_t bit_map = 0;\n+\tuint8_t i;\n+\n+\tif (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&\n+\t    hw->dcb_info.num_pg != 1)\n+\t\treturn;\n+\n+\t/* Currently not support uncontinuous tc */\n+\thw->dcb_info.num_tc = num_tc;\n+\tfor (i = 0; i < hw->dcb_info.num_tc; i++)\n+\t\tbit_map |= BIT(i);\n+\n+\tif (!bit_map) {\n+\t\tbit_map = 1;\n+\t\thw->dcb_info.num_tc = 1;\n+\t}\n+\n+\thw->hw_tc_map = bit_map;\n+\n+\thns3_dcb_info_cfg(hns);\n+}\n+\n+static int\n+hns3_dcb_hw_configure(struct hns3_adapter *hns)\n+{\n+\tstruct rte_eth_dcb_rx_conf *dcb_rx_conf;\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tenum hns3_fc_status fc_status = hw->current_fc_status;\n+\tenum hns3_fc_mode current_mode = hw->current_mode;\n+\tuint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;\n+\tint ret, status;\n+\n+\tif (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&\n+\t    pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)\n+\t\treturn -ENOTSUP;\n+\n+\tret = hns3_dcb_schd_setup_hw(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"dcb schdule configure failed! ret = %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tif (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {\n+\t\tdcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;\n+\t\tif (dcb_rx_conf->nb_tcs == 0)\n+\t\t\thw->dcb_info.pfc_en = 1; /* tc0 only */\n+\t\telse\n+\t\t\thw->dcb_info.pfc_en =\n+\t\t\tRTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);\n+\n+\t\thw->dcb_info.hw_pfc_map =\n+\t\t\t\thns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);\n+\n+\t\tret = hns3_buffer_alloc(hw);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\n+\t\thw->current_fc_status = HNS3_FC_STATUS_PFC;\n+\t\thw->current_mode = HNS3_FC_FULL;\n+\t\tret = hns3_dcb_pause_setup_hw(hw);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw, \"setup pfc failed! ret = %d\", ret);\n+\t\t\tgoto pfc_setup_fail;\n+\t\t}\n+\t} else {\n+\t\t/*\n+\t\t * Although dcb_capability_en is lack of ETH_DCB_PFC_SUPPORT\n+\t\t * flag, the DCB information is configured, such as tc numbers.\n+\t\t * Therefore, refreshing the allocation of packet buffer is\n+\t\t * necessary.\n+\t\t */\n+\t\tret = hns3_buffer_alloc(hw);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+\n+pfc_setup_fail:\n+\thw->current_mode = current_mode;\n+\thw->current_fc_status = fc_status;\n+\thw->dcb_info.hw_pfc_map = hw_pfc_map;\n+\tstatus = hns3_buffer_alloc(hw);\n+\tif (status)\n+\t\thns3_err(hw, \"recover packet buffer fail! status = %d\", status);\n+\n+\treturn ret;\n+}\n+\n+/*\n+ * hns3_dcb_configure - setup dcb related config\n+ * @hns: pointer to hns3 adapter\n+ * Returns 0 on success, negative value on failure.\n+ */\n+int\n+hns3_dcb_configure(struct hns3_adapter *hns)\n+{\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tbool map_changed = false;\n+\tuint8_t num_tc = 0;\n+\tint ret;\n+\n+\thns3_dcb_cfg_validate(hns, &num_tc, &map_changed);\n+\tif (map_changed || rte_atomic16_read(&hw->reset.resetting)) {\n+\t\thns3_dcb_info_update(hns, num_tc);\n+\t\tret = hns3_dcb_hw_configure(hns);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw, \"dcb sw configure fails: %d\", ret);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+hns3_dcb_init_hw(struct hns3_hw *hw)\n+{\n+\tint ret;\n+\n+\tret = hns3_dcb_schd_setup_hw(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"dcb schedule setup failed: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = hns3_dcb_pause_setup_hw(hw);\n+\tif (ret)\n+\t\thns3_err(hw, \"PAUSE setup failed: %d\", ret);\n+\n+\treturn ret;\n+}\n+\n+int\n+hns3_dcb_init(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tint ret;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/*\n+\t * According to the 'adapter_state' identifier, the following branch\n+\t * is only executed to initialize default configurations of dcb during\n+\t * the initializing driver process. Due to driver saving dcb-related\n+\t * information before reset triggered, the reinit dev stage of the\n+\t * reset process can not access to the branch, or those information\n+\t * will be changed.\n+\t */\n+\tif (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {\n+\t\thw->requested_mode = HNS3_FC_NONE;\n+\t\thw->current_mode = hw->requested_mode;\n+\t\tpf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;\n+\t\thw->current_fc_status = HNS3_FC_STATUS_NONE;\n+\n+\t\tret = hns3_dcb_info_init(hw);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw, \"dcb info init failed: %d\", ret);\n+\t\t\treturn ret;\n+\t\t}\n+\t\thns3_dcb_update_tc_queue_mapping(hw, hw->tqps_num);\n+\t}\n+\n+\t/*\n+\t * DCB hardware will be configured by following the function during\n+\t * the initializing driver process and the reset process. However,\n+\t * driver will restore directly configurations of dcb hardware based\n+\t * on dcb-related information soft maintained when driver\n+\t * initialization has finished and reset is coming.\n+\t */\n+\tret = hns3_dcb_init_hw(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"dcb init hardware failed: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_update_queue_map_configure(struct hns3_adapter *hns)\n+{\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tuint16_t queue_num = hw->data->nb_rx_queues;\n+\tint ret;\n+\n+\thns3_dcb_update_tc_queue_mapping(hw, queue_num);\n+\tret = hns3_q_to_qs_map(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"failed to map nq to qs! ret = %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+hns3_dcb_cfg_update(struct hns3_adapter *hns)\n+{\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tenum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;\n+\tint ret;\n+\n+\tif ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {\n+\t\tret = hns3_dcb_configure(hns);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw, \"Failed to config dcb: %d\", ret);\n+\t\t\treturn ret;\n+\t\t}\n+\t} else {\n+\t\t/*\n+\t\t * Update queue map without PFC configuration,\n+\t\t * due to queues reconfigured by user.\n+\t\t */\n+\t\tret = hns3_update_queue_map_configure(hns);\n+\t\tif (ret)\n+\t\t\thns3_err(hw,\n+\t\t\t\t \"Failed to update queue mapping configure: %d\",\n+\t\t\t\t ret);\n+\t}\n+\n+\treturn ret;\n+}\n+\n+/*\n+ * hns3_dcb_pfc_enable - Enable priority flow control\n+ * @dev: pointer to ethernet device\n+ *\n+ * Configures the pfc settings for one porority.\n+ */\n+int\n+hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)\n+{\n+\tstruct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tenum hns3_fc_status fc_status = hw->current_fc_status;\n+\tenum hns3_fc_mode current_mode = hw->current_mode;\n+\tuint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;\n+\tuint8_t pfc_en = hw->dcb_info.pfc_en;\n+\tuint8_t priority = pfc_conf->priority;\n+\tuint16_t pause_time = pf->pause_time;\n+\tint ret, status;\n+\n+\tpf->pause_time = pfc_conf->fc.pause_time;\n+\thw->current_mode = hw->requested_mode;\n+\thw->current_fc_status = HNS3_FC_STATUS_PFC;\n+\thw->dcb_info.pfc_en |= BIT(priority);\n+\thw->dcb_info.hw_pfc_map =\n+\t\t\thns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);\n+\tret = hns3_buffer_alloc(hw);\n+\tif (ret)\n+\t\tgoto pfc_setup_fail;\n+\n+\t/*\n+\t * The flow control mode of all UPs will be changed based on\n+\t * current_mode coming from user.\n+\t */\n+\tret = hns3_dcb_pause_setup_hw(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"enable pfc failed! ret = %d\", ret);\n+\t\tgoto pfc_setup_fail;\n+\t}\n+\n+\treturn 0;\n+\n+pfc_setup_fail:\n+\thw->current_mode = current_mode;\n+\thw->current_fc_status = fc_status;\n+\tpf->pause_time = pause_time;\n+\thw->dcb_info.pfc_en = pfc_en;\n+\thw->dcb_info.hw_pfc_map = hw_pfc_map;\n+\tstatus = hns3_buffer_alloc(hw);\n+\tif (status)\n+\t\thns3_err(hw, \"recover packet buffer fail: %d\", status);\n+\n+\treturn ret;\n+}\n+\n+/*\n+ * hns3_fc_enable - Enable MAC pause\n+ * @dev: pointer to ethernet device\n+ *\n+ * Configures the MAC pause settings.\n+ */\n+int\n+hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)\n+{\n+\tstruct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tenum hns3_fc_status fc_status = hw->current_fc_status;\n+\tenum hns3_fc_mode current_mode = hw->current_mode;\n+\tuint16_t pause_time = pf->pause_time;\n+\tint ret;\n+\n+\tpf->pause_time = fc_conf->pause_time;\n+\thw->current_mode = hw->requested_mode;\n+\n+\t/*\n+\t * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode\n+\t * of flow control is configured to be HNS3_FC_NONE.\n+\t */\n+\tif (hw->current_mode == HNS3_FC_NONE)\n+\t\thw->current_fc_status = HNS3_FC_STATUS_NONE;\n+\telse\n+\t\thw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;\n+\n+\tret = hns3_dcb_pause_setup_hw(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"enable MAC Pause failed! ret = %d\", ret);\n+\t\tgoto setup_fc_fail;\n+\t}\n+\n+\treturn 0;\n+\n+setup_fc_fail:\n+\thw->current_mode = current_mode;\n+\thw->current_fc_status = fc_status;\n+\tpf->pause_time = pause_time;\n+\n+\treturn ret;\n+}\ndiff --git a/drivers/net/hns3/hns3_dcb.h b/drivers/net/hns3/hns3_dcb.h\nnew file mode 100644\nindex 0000000..9ec4e70\n--- /dev/null\n+++ b/drivers/net/hns3/hns3_dcb.h\n@@ -0,0 +1,166 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018-2019 Hisilicon Limited.\n+ */\n+\n+#ifndef _HNS3_DCB_H_\n+#define _HNS3_DCB_H_\n+\n+/* MAC Pause */\n+#define HNS3_TX_MAC_PAUSE_EN_MSK\tBIT(0)\n+#define HNS3_RX_MAC_PAUSE_EN_MSK\tBIT(1)\n+\n+#define HNS3_DEFAULT_PAUSE_TRANS_GAP\t0x18\n+#define HNS3_DEFAULT_PAUSE_TRANS_TIME\t0xFFFF\n+\n+/* SP or DWRR */\n+#define HNS3_DCB_TX_SCHD_DWRR_MSK\tBIT(0)\n+#define HNS3_DCB_TX_SCHD_SP_MSK\t\t(0xFE)\n+\n+enum hns3_shap_bucket {\n+\tHNS3_DCB_SHAP_C_BUCKET = 0,\n+\tHNS3_DCB_SHAP_P_BUCKET,\n+};\n+\n+struct hns3_priority_weight_cmd {\n+\tuint8_t pri_id;\n+\tuint8_t dwrr;\n+};\n+\n+struct hns3_qs_weight_cmd {\n+\tuint16_t qs_id;\n+\tuint8_t dwrr;\n+};\n+\n+struct hns3_pg_weight_cmd {\n+\tuint8_t pg_id;\n+\tuint8_t dwrr;\n+};\n+\n+struct hns3_ets_tc_weight_cmd {\n+\tuint8_t tc_weight[HNS3_MAX_TC_NUM];\n+\tuint8_t weight_offset;\n+\tuint8_t rsvd[15];\n+};\n+\n+struct hns3_qs_to_pri_link_cmd {\n+\tuint16_t qs_id;\n+\tuint16_t rsvd;\n+\tuint8_t priority;\n+#define HNS3_DCB_QS_PRI_LINK_VLD_MSK\tBIT(0)\n+\tuint8_t link_vld;\n+};\n+\n+struct hns3_nq_to_qs_link_cmd {\n+\tuint16_t nq_id;\n+\tuint16_t rsvd;\n+#define HNS3_DCB_Q_QS_LINK_VLD_MSK\tBIT(10)\n+\tuint16_t qset_id;\n+};\n+\n+#define HNS3_DCB_SHAP_IR_B_MSK  GENMASK(7, 0)\n+#define HNS3_DCB_SHAP_IR_B_LSH\t0\n+#define HNS3_DCB_SHAP_IR_U_MSK  GENMASK(11, 8)\n+#define HNS3_DCB_SHAP_IR_U_LSH\t8\n+#define HNS3_DCB_SHAP_IR_S_MSK  GENMASK(15, 12)\n+#define HNS3_DCB_SHAP_IR_S_LSH\t12\n+#define HNS3_DCB_SHAP_BS_B_MSK  GENMASK(20, 16)\n+#define HNS3_DCB_SHAP_BS_B_LSH\t16\n+#define HNS3_DCB_SHAP_BS_S_MSK  GENMASK(25, 21)\n+#define HNS3_DCB_SHAP_BS_S_LSH\t21\n+\n+struct hns3_pri_shapping_cmd {\n+\tuint8_t pri_id;\n+\tuint8_t rsvd[3];\n+\tuint32_t pri_shapping_para;\n+};\n+\n+struct hns3_pg_shapping_cmd {\n+\tuint8_t pg_id;\n+\tuint8_t rsvd[3];\n+\tuint32_t pg_shapping_para;\n+};\n+\n+#define HNS3_BP_GRP_NUM\t\t32\n+#define HNS3_BP_SUB_GRP_ID_S\t\t0\n+#define HNS3_BP_SUB_GRP_ID_M\t\tGENMASK(4, 0)\n+#define HNS3_BP_GRP_ID_S\t\t5\n+#define HNS3_BP_GRP_ID_M\t\tGENMASK(9, 5)\n+struct hns3_bp_to_qs_map_cmd {\n+\tuint8_t tc_id;\n+\tuint8_t rsvd[2];\n+\tuint8_t qs_group_id;\n+\tuint32_t qs_bit_map;\n+\tuint32_t rsvd1;\n+};\n+\n+struct hns3_pfc_en_cmd {\n+\tuint8_t tx_rx_en_bitmap;\n+\tuint8_t pri_en_bitmap;\n+};\n+\n+struct hns3_port_shapping_cmd {\n+\tuint32_t port_shapping_para;\n+};\n+\n+struct hns3_cfg_pause_param_cmd {\n+\tuint8_t mac_addr[RTE_ETHER_ADDR_LEN];\n+\tuint8_t pause_trans_gap;\n+\tuint8_t rsvd;\n+\tuint16_t pause_trans_time;\n+\tuint8_t rsvd1[6];\n+\t/* extra mac address to do double check for pause frame */\n+\tuint8_t mac_addr_extra[RTE_ETHER_ADDR_LEN];\n+\tuint16_t rsvd2;\n+};\n+\n+struct hns3_pg_to_pri_link_cmd {\n+\tuint8_t pg_id;\n+\tuint8_t rsvd1[3];\n+\tuint8_t pri_bit_map;\n+};\n+\n+enum hns3_shaper_level {\n+\tHNS3_SHAPER_LVL_PRI\t= 0,\n+\tHNS3_SHAPER_LVL_PG\t= 1,\n+\tHNS3_SHAPER_LVL_PORT\t= 2,\n+\tHNS3_SHAPER_LVL_QSET\t= 3,\n+\tHNS3_SHAPER_LVL_CNT\t= 4,\n+\tHNS3_SHAPER_LVL_VF\t= 0,\n+\tHNS3_SHAPER_LVL_PF\t= 1,\n+};\n+\n+struct hns3_shaper_parameter {\n+\tuint32_t ir_b;  /* IR_B parameter of IR shaper */\n+\tuint32_t ir_u;  /* IR_U parameter of IR shaper */\n+\tuint32_t ir_s;  /* IR_S parameter of IR shaper */\n+};\n+\n+#define hns3_dcb_set_field(dest, string, val) \\\n+\t\t\t   hns3_set_field((dest), \\\n+\t\t\t   (HNS3_DCB_SHAP_##string##_MSK), \\\n+\t\t\t   (HNS3_DCB_SHAP_##string##_LSH), val)\n+#define hns3_dcb_get_field(src, string) \\\n+\t\t\thns3_get_field((src), (HNS3_DCB_SHAP_##string##_MSK), \\\n+\t\t\t\t       (HNS3_DCB_SHAP_##string##_LSH))\n+\n+int hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr);\n+\n+int hns3_dcb_configure(struct hns3_adapter *hns);\n+\n+int hns3_dcb_init(struct hns3_hw *hw);\n+\n+int hns3_dcb_init_hw(struct hns3_hw *hw);\n+\n+int hns3_dcb_info_init(struct hns3_hw *hw);\n+\n+int\n+hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);\n+\n+int\n+hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf);\n+\n+void hns3_tc_queue_mapping_cfg(struct hns3_hw *hw);\n+\n+int hns3_dcb_cfg_update(struct hns3_adapter *hns);\n+\n+#endif /* _HNS3_DCB_H_ */\ndiff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c\nindex 723ada1..a91c1cd 100644\n--- a/drivers/net/hns3/hns3_ethdev.c\n+++ b/drivers/net/hns3/hns3_ethdev.c\n@@ -35,6 +35,7 @@\n #include \"hns3_ethdev.h\"\n #include \"hns3_logs.h\"\n #include \"hns3_regs.h\"\n+#include \"hns3_dcb.h\"\n \n #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE\t32\n #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM\t1\n@@ -2616,6 +2617,12 @@ hns3_init_hardware(struct hns3_adapter *hns)\n \t\tgoto err_mac_init;\n \t}\n \n+\tret = hns3_dcb_init(hw);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init dcb: %d\", ret);\n+\t\tgoto err_mac_init;\n+\t}\n+\n \tret = hns3_init_fd_config(hns);\n \tif (ret) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to init flow director: %d\", ret);\n@@ -2738,11 +2745,200 @@ hns3_dev_close(struct rte_eth_dev *eth_dev)\n \thw->adapter_state = HNS3_NIC_CLOSED;\n }\n \n+static int\n+hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)\n+{\n+\tstruct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\n+\tfc_conf->pause_time = pf->pause_time;\n+\n+\t/* return fc current mode */\n+\tswitch (hw->current_mode) {\n+\tcase HNS3_FC_FULL:\n+\t\tfc_conf->mode = RTE_FC_FULL;\n+\t\tbreak;\n+\tcase HNS3_FC_TX_PAUSE:\n+\t\tfc_conf->mode = RTE_FC_TX_PAUSE;\n+\t\tbreak;\n+\tcase HNS3_FC_RX_PAUSE:\n+\t\tfc_conf->mode = RTE_FC_RX_PAUSE;\n+\t\tbreak;\n+\tcase HNS3_FC_NONE:\n+\tdefault:\n+\t\tfc_conf->mode = RTE_FC_NONE;\n+\t\tbreak;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void\n+hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)\n+{\n+\tswitch (mode) {\n+\tcase RTE_FC_NONE:\n+\t\thw->requested_mode = HNS3_FC_NONE;\n+\t\tbreak;\n+\tcase RTE_FC_RX_PAUSE:\n+\t\thw->requested_mode = HNS3_FC_RX_PAUSE;\n+\t\tbreak;\n+\tcase RTE_FC_TX_PAUSE:\n+\t\thw->requested_mode = HNS3_FC_TX_PAUSE;\n+\t\tbreak;\n+\tcase RTE_FC_FULL:\n+\t\thw->requested_mode = HNS3_FC_FULL;\n+\t\tbreak;\n+\tdefault:\n+\t\thw->requested_mode = HNS3_FC_NONE;\n+\t\thns3_warn(hw, \"fc_mode(%u) exceeds member scope and is \"\n+\t\t\t  \"configured to RTE_FC_NONE\", mode);\n+\t\tbreak;\n+\t}\n+}\n+\n+static int\n+hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)\n+{\n+\tstruct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tint ret;\n+\n+\tif (fc_conf->high_water || fc_conf->low_water ||\n+\t    fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) {\n+\t\thns3_err(hw, \"Unsupported flow control settings specified, \"\n+\t\t\t \"high_water(%u), low_water(%u), send_xon(%u) and \"\n+\t\t\t \"mac_ctrl_frame_fwd(%u) must be set to '0'\",\n+\t\t\t fc_conf->high_water, fc_conf->low_water,\n+\t\t\t fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (fc_conf->autoneg) {\n+\t\thns3_err(hw, \"Unsupported fc auto-negotiation setting.\");\n+\t\treturn -EINVAL;\n+\t}\n+\tif (!fc_conf->pause_time) {\n+\t\thns3_err(hw, \"Invalid pause time %d setting.\",\n+\t\t\t fc_conf->pause_time);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||\n+\t    hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) {\n+\t\thns3_err(hw, \"PFC is enabled. Cannot set MAC pause. \"\n+\t\t\t \"current_fc_status = %d\", hw->current_fc_status);\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\thns3_get_fc_mode(hw, fc_conf->mode);\n+\tif (hw->requested_mode == hw->current_mode &&\n+\t    pf->pause_time == fc_conf->pause_time)\n+\t\treturn 0;\n+\n+\trte_spinlock_lock(&hw->lock);\n+\tret = hns3_fc_enable(dev, fc_conf);\n+\trte_spinlock_unlock(&hw->lock);\n+\n+\treturn ret;\n+}\n+\n+static int\n+hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev,\n+\t\t\t    struct rte_eth_pfc_conf *pfc_conf)\n+{\n+\tstruct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tuint8_t priority;\n+\tint ret;\n+\n+\tif (!hns3_dev_dcb_supported(hw)) {\n+\t\thns3_err(hw, \"This port does not support dcb configurations.\");\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\tif (pfc_conf->fc.high_water || pfc_conf->fc.low_water ||\n+\t    pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) {\n+\t\thns3_err(hw, \"Unsupported flow control settings specified, \"\n+\t\t\t \"high_water(%u), low_water(%u), send_xon(%u) and \"\n+\t\t\t \"mac_ctrl_frame_fwd(%u) must be set to '0'\",\n+\t\t\t pfc_conf->fc.high_water, pfc_conf->fc.low_water,\n+\t\t\t pfc_conf->fc.send_xon,\n+\t\t\t pfc_conf->fc.mac_ctrl_frame_fwd);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (pfc_conf->fc.autoneg) {\n+\t\thns3_err(hw, \"Unsupported fc auto-negotiation setting.\");\n+\t\treturn -EINVAL;\n+\t}\n+\tif (pfc_conf->fc.pause_time == 0) {\n+\t\thns3_err(hw, \"Invalid pause time %d setting.\",\n+\t\t\t pfc_conf->fc.pause_time);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||\n+\t    hw->current_fc_status == HNS3_FC_STATUS_PFC)) {\n+\t\thns3_err(hw, \"MAC pause is enabled. Cannot set PFC.\"\n+\t\t\t     \"current_fc_status = %d\", hw->current_fc_status);\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\tpriority = pfc_conf->priority;\n+\thns3_get_fc_mode(hw, pfc_conf->fc.mode);\n+\tif (hw->dcb_info.pfc_en & BIT(priority) &&\n+\t    hw->requested_mode == hw->current_mode &&\n+\t    pfc_conf->fc.pause_time == pf->pause_time)\n+\t\treturn 0;\n+\n+\trte_spinlock_lock(&hw->lock);\n+\tret = hns3_dcb_pfc_enable(dev, pfc_conf);\n+\trte_spinlock_unlock(&hw->lock);\n+\n+\treturn ret;\n+}\n+\n+static int\n+hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)\n+{\n+\tstruct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tenum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;\n+\tint i;\n+\n+\trte_spinlock_lock(&hw->lock);\n+\tif ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)\n+\t\tdcb_info->nb_tcs = pf->local_max_tc;\n+\telse\n+\t\tdcb_info->nb_tcs = 1;\n+\n+\tfor (i = 0; i < HNS3_MAX_USER_PRIO; i++)\n+\t\tdcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i];\n+\tfor (i = 0; i < dcb_info->nb_tcs; i++)\n+\t\tdcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];\n+\n+\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++) {\n+\t\tdcb_info->tc_queue.tc_rxq[0][i].base =\n+\t\t\t\t\thw->tc_queue[i].tqp_offset;\n+\t\tdcb_info->tc_queue.tc_txq[0][i].base =\n+\t\t\t\t\thw->tc_queue[i].tqp_offset;\n+\t\tdcb_info->tc_queue.tc_rxq[0][i].nb_queue =\n+\t\t\t\t\thw->tc_queue[i].tqp_count;\n+\t\tdcb_info->tc_queue.tc_txq[0][i].nb_queue =\n+\t\t\t\t\thw->tc_queue[i].tqp_count;\n+\t}\n+\trte_spinlock_unlock(&hw->lock);\n+\n+\treturn 0;\n+}\n+\n static const struct eth_dev_ops hns3_eth_dev_ops = {\n \t.dev_close          = hns3_dev_close,\n \t.mtu_set            = hns3_dev_mtu_set,\n \t.dev_infos_get          = hns3_dev_infos_get,\n \t.fw_version_get         = hns3_fw_version_get,\n+\t.flow_ctrl_get          = hns3_flow_ctrl_get,\n+\t.flow_ctrl_set          = hns3_flow_ctrl_set,\n+\t.priority_flow_ctrl_set = hns3_priority_flow_ctrl_set,\n \t.mac_addr_add           = hns3_add_mac_addr,\n \t.mac_addr_remove        = hns3_remove_mac_addr,\n \t.mac_addr_set           = hns3_set_default_mac_addr,\n@@ -2753,6 +2949,7 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {\n \t.reta_update            = hns3_dev_rss_reta_update,\n \t.reta_query             = hns3_dev_rss_reta_query,\n \t.filter_ctrl            = hns3_dev_filter_ctrl,\n+\t.get_dcb_info           = hns3_get_dcb_info,\n };\n \n static int\n@@ -2783,6 +2980,11 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)\n \teth_dev->dev_ops = &hns3_eth_dev_ops;\n \trte_eth_copy_pci_info(eth_dev, pci_dev);\n \n+\tif (device_id == HNS3_DEV_ID_25GE_RDMA ||\n+\t    device_id == HNS3_DEV_ID_50GE_RDMA ||\n+\t    device_id == HNS3_DEV_ID_100G_RDMA_MACSEC)\n+\t\thns3_set_bit(hw->flag, HNS3_DEV_SUPPORT_DCB_B, 1);\n+\n \thns->is_vf = false;\n \thw->data = eth_dev->data;\n \n",
    "prefixes": [
        "11/22"
    ]
}