From patchwork Mon Sep 30 14:00:39 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60188 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5717D37B4; Mon, 30 Sep 2019 15:46:25 +0200 (CEST) Received: from huawei.com (szxga05-in.huawei.com [45.249.212.191]) by dpdk.org (Postfix) with ESMTP id 984EC3423 for ; Mon, 30 Sep 2019 15:46:23 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id 3A6CA94F9D60038EF787 for ; Mon, 30 Sep 2019 21:46:21 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:46:13 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:39 +0800 Message-ID: <389732a67827393a53af416ca94081e80b041e29.1569850827.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 01/19] net/hinic/base: add mbox command channel for SRIOV X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add mbox command channel for SR-IOV, which is used to communicate between VF and VF, VF and PF. This patch introduces data structures, initialization, interfaces and commands of mbox channel. Signed-off-by: Xiaoyun wang --- doc/guides/nics/features/hinic.ini | 1 + doc/guides/nics/hinic.rst | 1 + drivers/net/hinic/Makefile | 1 + drivers/net/hinic/base/hinic_compat.h | 34 +- drivers/net/hinic/base/hinic_pmd_hwdev.h | 5 +- drivers/net/hinic/base/hinic_pmd_mbox.c | 938 +++++++++++++++++++++++++++++++ drivers/net/hinic/base/hinic_pmd_mbox.h | 93 +++ drivers/net/hinic/base/meson.build | 1 + 8 files changed, 1071 insertions(+), 3 deletions(-) create mode 100644 drivers/net/hinic/base/hinic_pmd_mbox.c create mode 100644 drivers/net/hinic/base/hinic_pmd_mbox.h diff --git a/doc/guides/nics/features/hinic.ini b/doc/guides/nics/features/hinic.ini index fe063d6..c858411 100644 --- a/doc/guides/nics/features/hinic.ini +++ b/doc/guides/nics/features/hinic.ini @@ -19,6 +19,7 @@ RSS hash = Y RSS key update = Y RSS reta update = Y Inner RSS = Y +SR-IOV = Y CRC offload = Y L3 checksum offload = Y L4 checksum offload = Y diff --git a/doc/guides/nics/hinic.rst b/doc/guides/nics/hinic.rst index c9329bc..c3ce101 100644 --- a/doc/guides/nics/hinic.rst +++ b/doc/guides/nics/hinic.rst @@ -24,6 +24,7 @@ Features - Link state information - Link flow control - Scattered and gather for TX and RX +- SR-IOV - Partially supported at this point, VFIO only Prerequisites ------------- diff --git a/drivers/net/hinic/Makefile b/drivers/net/hinic/Makefile index 42b4a78..20a338e 100644 --- a/drivers/net/hinic/Makefile +++ b/drivers/net/hinic/Makefile @@ -59,6 +59,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_mgmt.c SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_niccfg.c SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_nicio.c SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_wq.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_mbox.c SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_rx.c diff --git a/drivers/net/hinic/base/hinic_compat.h b/drivers/net/hinic/base/hinic_compat.h index f599947..db60a51 100644 --- a/drivers/net/hinic/base/hinic_compat.h +++ b/drivers/net/hinic/base/hinic_compat.h @@ -227,7 +227,7 @@ static inline u16 ilog2(u32 n) * hinic_cpu_to_be32 - convert data to big endian 32 bit format * @data: the data to convert * @len: length of data to convert, must be Multiple of 4B - **/ + */ static inline void hinic_cpu_to_be32(void *data, u32 len) { u32 i; @@ -243,7 +243,7 @@ static inline void hinic_cpu_to_be32(void *data, u32 len) * hinic_be32_to_cpu - convert data from big endian 32 bit format * @data: the data to convert * @len: length of data to convert, must be Multiple of 4B - **/ + */ static inline void hinic_be32_to_cpu(void *data, u32 len) { u32 i; @@ -278,4 +278,34 @@ static inline int hinic_mutex_destroy(pthread_mutex_t *pthreadmutex) return err; } +static inline int hinic_mutex_lock(pthread_mutex_t *pthreadmutex) +{ + int err; + + err = pthread_mutex_lock(pthreadmutex); + if (!err) { + return err; + } else if (err == EOWNERDEAD) { + PMD_DRV_LOG(ERR, "Mutex lock failed. (ErrorNo=%d)", errno); +#if defined(__GLIBC__) +#if __GLIBC_PREREQ(2, 12) + (void)pthread_mutex_consistent(pthreadmutex); +#else + (void)pthread_mutex_consistent_np(pthreadmutex); +#endif +#else + (void)pthread_mutex_consistent(pthreadmutex); +#endif + } else { + PMD_DRV_LOG(ERR, "Mutex lock failed. (ErrorNo=%d)", errno); + } + + return err; +} + +static inline int hinic_mutex_unlock(pthread_mutex_t *pthreadmutex) +{ + return pthread_mutex_unlock(pthreadmutex); +} + #endif /* _HINIC_COMPAT_H_ */ diff --git a/drivers/net/hinic/base/hinic_pmd_hwdev.h b/drivers/net/hinic/base/hinic_pmd_hwdev.h index 6c21c47..fc5bdf9 100644 --- a/drivers/net/hinic/base/hinic_pmd_hwdev.h +++ b/drivers/net/hinic/base/hinic_pmd_hwdev.h @@ -7,13 +7,15 @@ #include "hinic_pmd_cmd.h" -#define HINIC_PAGE_SIZE_MAX 20 +#define HINIC_PAGE_SIZE_MAX 20 #define HINIC_MGMT_CMD_UNSUPPORTED 0xFF #define HINIC_PF_SET_VF_ALREADY 0x4 #define MAX_PCIE_DFX_BUF_SIZE 1024 +#define HINIC_DEV_BUSY_ACTIVE_FW 0xFE + /* dma pool */ struct dma_pool { u32 inuse; @@ -436,6 +438,7 @@ struct hinic_hwdev { struct hinic_hwif *hwif; struct cfg_mgmt_info *cfg_mgmt; struct hinic_aeqs *aeqs; + struct hinic_mbox_func_to_func *func_to_func; struct hinic_msg_pf_to_mgmt *pf_to_mgmt; struct hinic_cmdqs *cmdqs; struct hinic_nic_io *nic_io; diff --git a/drivers/net/hinic/base/hinic_pmd_mbox.c b/drivers/net/hinic/base/hinic_pmd_mbox.c new file mode 100644 index 0000000..7ae8fd0 --- /dev/null +++ b/drivers/net/hinic/base/hinic_pmd_mbox.c @@ -0,0 +1,938 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include "hinic_compat.h" +#include "hinic_csr.h" +#include "hinic_pmd_hwdev.h" +#include "hinic_pmd_hwif.h" +#include "hinic_pmd_eqs.h" +#include "hinic_pmd_mgmt.h" +#include "hinic_pmd_mbox.h" + +#define HINIC_MBOX_INT_DST_FUNC_SHIFT 0 +#define HINIC_MBOX_INT_DST_AEQN_SHIFT 10 +#define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12 +#define HINIC_MBOX_INT_STAT_DMA_SHIFT 14 +/* The size of data to be send (unit of 4 bytes) */ +#define HINIC_MBOX_INT_TX_SIZE_SHIFT 20 +/* SO_RO(strong order, relax order) */ +#define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25 +#define HINIC_MBOX_INT_WB_EN_SHIFT 28 + + +#define HINIC_MBOX_INT_DST_FUNC_MASK 0x3FF +#define HINIC_MBOX_INT_DST_AEQN_MASK 0x3 +#define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK 0x3 +#define HINIC_MBOX_INT_STAT_DMA_MASK 0x3F +#define HINIC_MBOX_INT_TX_SIZE_MASK 0x1F +#define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3 +#define HINIC_MBOX_INT_WB_EN_MASK 0x1 + +#define HINIC_MBOX_INT_SET(val, field) \ + (((val) & HINIC_MBOX_INT_##field##_MASK) << \ + HINIC_MBOX_INT_##field##_SHIFT) + +enum hinic_mbox_tx_status { + TX_DONE = 0, + TX_IN_PROGRESS, +}; + +#define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0 +/* specifies the issue request for the message data. + * 0 - Tx request is done; + * 1 - Tx request is in process. + */ +#define HINIC_MBOX_CTRL_TX_STATUS_SHIFT 1 + +#define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1 +#define HINIC_MBOX_CTRL_TX_STATUS_MASK 0x1 + +#define HINIC_MBOX_CTRL_SET(val, field) \ + (((val) & HINIC_MBOX_CTRL_##field##_MASK) << \ + HINIC_MBOX_CTRL_##field##_SHIFT) + +#define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0 +#define HINIC_MBOX_HEADER_MODULE_SHIFT 11 +#define HINIC_MBOX_HEADER_SEG_LEN_SHIFT 16 +#define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22 +#define HINIC_MBOX_HEADER_SEQID_SHIFT 24 +#define HINIC_MBOX_HEADER_LAST_SHIFT 30 + +#define HINIC_MBOX_HEADER_DIRECTION_SHIFT 31 +#define HINIC_MBOX_HEADER_CMD_SHIFT 32 +#define HINIC_MBOX_HEADER_MSG_ID_SHIFT 40 +#define HINIC_MBOX_HEADER_STATUS_SHIFT 48 +#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT 54 + +#define HINIC_MBOX_HEADER_MSG_LEN_MASK 0x7FF +#define HINIC_MBOX_HEADER_MODULE_MASK 0x1F +#define HINIC_MBOX_HEADER_SEG_LEN_MASK 0x3F +#define HINIC_MBOX_HEADER_NO_ACK_MASK 0x1 +#define HINIC_MBOX_HEADER_SEQID_MASK 0x3F +#define HINIC_MBOX_HEADER_LAST_MASK 0x1 +#define HINIC_MBOX_HEADER_DIRECTION_MASK 0x1 +#define HINIC_MBOX_HEADER_CMD_MASK 0xFF +#define HINIC_MBOX_HEADER_MSG_ID_MASK 0xFF +#define HINIC_MBOX_HEADER_STATUS_MASK 0x3F +#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK 0x3FF + +#define HINIC_MBOX_HEADER_GET(val, field) \ + (((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \ + HINIC_MBOX_HEADER_##field##_MASK) +#define HINIC_MBOX_HEADER_SET(val, field) \ + ((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \ + HINIC_MBOX_HEADER_##field##_SHIFT) + +#define HINIC_MBOX_COMP_TIME_MS 8000U +#define MBOX_MSG_POLLING_TIMEOUT_MS 5000 + +/* The size unit is Bytes */ +#define HINIC_MBOX_DATA_SIZE 2040 +#define MBOX_MAX_BUF_SZ 2048UL +#define MBOX_HEADER_SZ 8 + +/* MBOX size is 64B, 8B for mbox_header, 4B reserved */ +#define MBOX_SEG_LEN 48 +#define MBOX_SEG_LEN_ALIGN 4 +#define MBOX_WB_STATUS_LEN 16UL +#define MBOX_SIZE 64 + +/* mbox write back status is 16B, only first 4B is used */ +#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF +#define MBOX_WB_STATUS_MASK 0xFF +#define MBOX_WB_ERROR_CODE_MASK 0xFF00 +#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF +#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE +#define MBOX_WB_STATUS_NOT_FINISHED 0x00 + +#define MBOX_STATUS_FINISHED(wb) \ + (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED) +#define MBOX_STATUS_SUCCESS(wb) \ + (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS) +#define MBOX_STATUS_ERRCODE(wb) \ + ((wb) & MBOX_WB_ERROR_CODE_MASK) + +#define SEQ_ID_START_VAL 0 + +#define DST_AEQ_IDX_DEFAULT_VAL 0 +#define SRC_AEQ_IDX_DEFAULT_VAL 0 +#define NO_DMA_ATTRIBUTE_VAL 0 + +#define MBOX_MSG_NO_DATA_LEN 1 + +#define FUNC_ID_OFF_SET_8B 8 +#define FUNC_ID_OFF_SET_10B 10 + +#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ) +#define MBOX_AREA(hwif) \ + ((hwif)->cfg_regs_base + HINIC_FUNC_CSR_MAILBOX_DATA_OFF) + +#define MBOX_RESPONSE_ERROR 0x1 +#define MBOX_MSG_ID_MASK 0xFF +#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id) +#define MBOX_MSG_ID_INC(func_to_func) (MBOX_MSG_ID(func_to_func) = \ + (MBOX_MSG_ID(func_to_func) + 1) & MBOX_MSG_ID_MASK) + +enum hinic_hwif_direction_type { + /* driver send msg to up or up send msg to drier*/ + HINIC_HWIF_DIRECT_SEND = 0, + /* after driver/up send msg to each other, then up/driver ack the msg */ + HINIC_HWIF_RESPONSE, +}; + +enum mbox_send_mod { + MBOX_SEND_MSG_POLL = 1 +}; + +enum mbox_seg_type { + NOT_LAST_SEG, + LAST_SEG, +}; + +enum mbox_ordering_type { + STRONG_ORDER, + RELAX_ORDER, +}; + +enum mbox_write_back_type { + NOT_WRITE_BACK = 0, + WRITE_BACK, +}; + +enum mbox_aeq_trig_type { + NOT_TRIGGER, + TRIGGER, +}; + +static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, + enum hinic_mod_type mod, u16 cmd, void *msg, + u16 msg_len, u16 dst_func, + enum hinic_hwif_direction_type direction, + enum hinic_mbox_ack_type ack_type, + struct mbox_msg_info *msg_info); + +static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox, + void *buf_out, u16 *out_size, void *param) +{ + int rc = 0; + *out_size = 0; + + switch (recv_mbox->mod) { + case HINIC_MOD_COMM: + hinic_comm_async_event_handle(func_to_func->hwdev, + recv_mbox->cmd, recv_mbox->mbox, + recv_mbox->mbox_len, + buf_out, out_size); + break; + case HINIC_MOD_L2NIC: + hinic_l2nic_async_event_handle(func_to_func->hwdev, param, + recv_mbox->cmd, recv_mbox->mbox, + recv_mbox->mbox_len, + buf_out, out_size); + break; + default: + PMD_DRV_LOG(ERR, "No handler, mod = %d", + recv_mbox->mod); + rc = HINIC_MBOX_VF_CMD_ERROR; + break; + } + + return rc; +} + +static void set_mbx_msg_status(struct mbox_msg_info *msg_info, int status) +{ + if (status == HINIC_DEV_BUSY_ACTIVE_FW) + msg_info->status = HINIC_MBOX_PF_BUSY_ACTIVE_FW; + else if (status == HINIC_MBOX_VF_CMD_ERROR) + msg_info->status = HINIC_MBOX_VF_CMD_ERROR; + else if (status) + msg_info->status = HINIC_MBOX_PF_SEND_ERR; +} + +static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox, + u16 src_func_idx, void *param) +{ + struct hinic_hwdev *dev = func_to_func->hwdev; + struct mbox_msg_info msg_info = { 0 }; + u16 out_size = MBOX_MAX_BUF_SZ; + void *buf_out = recv_mbox->buf_out; + int err = 0; + + if (HINIC_IS_VF(dev)) { + err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out, + &out_size, param); + } else { + err = -EINVAL; + PMD_DRV_LOG(ERR, "PMD doesn't support non-VF handle mailbox message"); + } + + if (!out_size || err) + out_size = MBOX_MSG_NO_DATA_LEN; + + if (recv_mbox->ack_type == MBOX_ACK) { + msg_info.msg_id = recv_mbox->msg_info.msg_id; + set_mbx_msg_status(&msg_info, err); + send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd, + buf_out, out_size, src_func_idx, + HINIC_HWIF_RESPONSE, MBOX_ACK, &msg_info); + } +} + +static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox, + u8 seq_id, u8 seg_len) +{ + if (seq_id > HINIC_SEQ_ID_MAX_VAL || seg_len > HINIC_MSG_SEG_LEN) + return false; + + if (seq_id == 0) { + recv_mbox->sed_id = seq_id; + } else { + if (seq_id != recv_mbox->sed_id + 1) { + recv_mbox->sed_id = 0; + return false; + } + + recv_mbox->sed_id = seq_id; + } + + return true; +} + +static void clear_mbox_status(struct hinic_send_mbox *mbox) +{ + /* clear mailbox write back status */ + *mbox->wb_status = 0; + rte_wmb(); +} + +static void mbox_copy_header(struct hinic_send_mbox *mbox, u64 *header) +{ + u32 *data = (u32 *)header; + u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32); + + for (i = 0; i < idx_max; i++) + __raw_writel(*(data + i), mbox->data + i * sizeof(u32)); +} + +static void +mbox_copy_send_data(struct hinic_send_mbox *mbox, void *seg, u16 seg_len) +{ + u32 *data = (u32 *)seg; + u32 data_len, chk_sz = sizeof(u32); + u32 i, idx_max; + u8 mbox_max_buf[MBOX_SEG_LEN] = {0}; + + /* The mbox message should be aligned in 4 bytes. */ + if (seg_len % chk_sz) { + memcpy(mbox_max_buf, seg, seg_len); + data = (u32 *)mbox_max_buf; + } + + data_len = seg_len; + idx_max = ALIGN(data_len, chk_sz) / chk_sz; + + for (i = 0; i < idx_max; i++) + __raw_writel(*(data + i), + mbox->data + MBOX_HEADER_SZ + i * sizeof(u32)); +} + +static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func, + u16 dst_func, u16 dst_aeqn, + __rte_unused u16 seg_len, int poll) +{ + u32 mbox_int, mbox_ctrl; + + mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) | + HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) | + HINIC_MBOX_INT_SET(HINIC_MBOX_RSP_AEQN, SRC_RESP_AEQN) | + HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) | + HINIC_MBOX_INT_SET(ALIGN(MBOX_SIZE, MBOX_SEG_LEN_ALIGN) >> 2, + TX_SIZE) | + HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) | + HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN); + + hinic_hwif_write_reg(func_to_func->hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int); + + rte_wmb(); + mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_IN_PROGRESS, TX_STATUS); + + if (poll) + mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE); + else + mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE); + + hinic_hwif_write_reg(func_to_func->hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl); +} + +static int init_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + int err; + + mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!mbox_info->mbox) { + PMD_DRV_LOG(ERR, "Alloc mbox buf_in mem failed\n"); + return -ENOMEM; + } + + mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!mbox_info->buf_out) { + PMD_DRV_LOG(ERR, "Alloc mbox buf_out mem failed\n"); + err = -ENOMEM; + goto alloc_buf_out_err; + } + + return 0; + +alloc_buf_out_err: + kfree(mbox_info->mbox); + + return err; +} + +static void clean_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + kfree(mbox_info->buf_out); + kfree(mbox_info->mbox); +} + +static int alloc_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + u16 func_idx, i; + int err; + + for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) { + err = init_mbox_info(&mbox_info[func_idx]); + if (err) { + PMD_DRV_LOG(ERR, "Initialize function[%d] mailbox information failed, err: %d", + func_idx, err); + goto init_mbox_info_err; + } + } + + return 0; + +init_mbox_info_err: + for (i = 0; i < func_idx; i++) + clean_mbox_info(&mbox_info[i]); + + return err; +} + +static void free_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + u16 func_idx; + + for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) + clean_mbox_info(&mbox_info[func_idx]); +} + +static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + + send_mbox->data = MBOX_AREA(func_to_func->hwdev->hwif); +} + +static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic_hwdev *hwdev = func_to_func->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + u32 addr_h, addr_l; + + send_mbox->wb_vaddr = dma_zalloc_coherent(hwdev, + MBOX_WB_STATUS_LEN, + &send_mbox->wb_paddr, + GFP_KERNEL); + if (!send_mbox->wb_vaddr) { + PMD_DRV_LOG(ERR, "Allocating memory for mailbox wb status failed"); + return -ENOMEM; + } + send_mbox->wb_status = (volatile u64 *)send_mbox->wb_vaddr; + + addr_h = upper_32_bits(send_mbox->wb_paddr); + addr_l = lower_32_bits(send_mbox->wb_paddr); + hinic_hwif_write_reg(hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF, addr_h); + hinic_hwif_write_reg(hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF, addr_l); + + return 0; +} + +static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic_hwdev *hwdev = func_to_func->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + + hinic_hwif_write_reg(hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF, 0); + hinic_hwif_write_reg(hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF, 0); + + dma_free_coherent(hwdev, MBOX_WB_STATUS_LEN, + send_mbox->wb_vaddr, send_mbox->wb_paddr); +} + +static int recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + void *header, struct hinic_recv_mbox *recv_mbox, void *param) +{ + u64 mbox_header = *((u64 *)header); + void *mbox_body = MBOX_BODY_FROM_HDR(header); + u16 src_func_idx; + enum hinic_hwif_direction_type direction; + u8 seq_id, seg_len; + + seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID); + seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN); + direction = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION); + src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) { + PMD_DRV_LOG(ERR, + "Mailbox sequence and segment check failed, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n", + src_func_idx, recv_mbox->sed_id, seq_id, seg_len); + return HINIC_ERROR; + } + + memcpy((u8 *)recv_mbox->mbox + seq_id * HINIC_MSG_SEG_LEN, + mbox_body, seg_len); + + if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST)) + return HINIC_ERROR; + + recv_mbox->sed_id = 0; + recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD); + recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE); + recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN); + recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK); + recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID); + recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS); + + if (direction == HINIC_HWIF_RESPONSE) { + if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id && + func_to_func->event_flag == EVENT_START) { + return HINIC_OK; + } + + PMD_DRV_LOG(ERR, "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)", + func_to_func->send_msg_id, recv_mbox->msg_info.msg_id, + recv_mbox->msg_info.status); + return HINIC_ERROR; + } + + recv_func_mbox_handler(func_to_func, recv_mbox, src_func_idx, param); + + return HINIC_ERROR; +} + +/** + * hinic_mbox_func_aeqe_handler - Process mbox info from func which is + * sent by aeqe. + * + * @param handle + * Pointer to hradware nic device. + * @param header + * Mbox header info. + * @param size + * The size of aeqe descriptor. + * @param param + * customized parameter. + * + * @return + * 0 on success, negative error value otherwise. + */ +int hinic_mbox_func_aeqe_handler(void *handle, u8 *header, + __rte_unused u8 size, void *param) +{ + struct hinic_mbox_func_to_func *func_to_func = + ((struct hinic_hwdev *)handle)->func_to_func; + struct hinic_recv_mbox *recv_mbox; + u64 mbox_header = *((u64 *)header); + u16 src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + if (src >= HINIC_MAX_FUNCTIONS) { + PMD_DRV_LOG(ERR, "Mailbox source function id: %d is invalid", + src); + return HINIC_ERROR; + } + + recv_mbox = (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) == + HINIC_HWIF_DIRECT_SEND) ? + &func_to_func->mbox_send[src] : + &func_to_func->mbox_resp[src]; + + return recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox, param); +} + +static u16 get_mbox_status(struct hinic_send_mbox *mbox) +{ + /* write back is 16B, but only use first 4B */ + u64 wb_val = be64_to_cpu(*mbox->wb_status); + + rte_rmb(); /* verify reading before check */ + + return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK); +} + +static void dump_mox_reg(struct hinic_hwdev *hwdev) +{ + u32 val; + + val = hinic_hwif_read_reg(hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF); + PMD_DRV_LOG(WARNING, "Mailbox control reg: 0x%x", val); + val = hinic_hwif_read_reg(hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF); + PMD_DRV_LOG(WARNING, "Mailbox interrupt offset: 0x%x", val); +} + +static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func, + u64 header, u16 dst_func, void *seg, u16 seg_len) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic_hwdev *hwdev = func_to_func->hwdev; + u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION); + u16 dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ? + HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN; + u16 err_code, wb_status = 0; + u32 cnt = 0; + + clear_mbox_status(send_mbox); + + mbox_copy_header(send_mbox, &header); + + mbox_copy_send_data(send_mbox, seg, seg_len); + + write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, + MBOX_SEND_MSG_POLL); + + rte_wmb(); + + while (cnt < MBOX_MSG_POLLING_TIMEOUT_MS) { + wb_status = get_mbox_status(send_mbox); + if (MBOX_STATUS_FINISHED(wb_status)) + break; + + rte_delay_ms(1); /* loop every ms */ + cnt++; + } + + if (cnt == MBOX_MSG_POLLING_TIMEOUT_MS) { + PMD_DRV_LOG(ERR, "Send mailbox segment timeout, wb status: 0x%x", + wb_status); + dump_mox_reg(hwdev); + return -ETIMEDOUT; + } + + if (!MBOX_STATUS_SUCCESS(wb_status)) { + PMD_DRV_LOG(ERR, "Send mailbox segment to function %d error, wb status: 0x%x", + dst_func, wb_status); + /* + * err_code: 0 responses no errors, other values can + * refer to FS doc. + */ + err_code = MBOX_STATUS_ERRCODE(wb_status); + return err_code ? err_code : -EFAULT; + } + + return 0; +} + +static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func, + enum mbox_event_state event_flag) +{ + spin_lock(&func_to_func->mbox_lock); + func_to_func->event_flag = event_flag; + spin_unlock(&func_to_func->mbox_lock); +} + +static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, + enum hinic_mod_type mod, u16 cmd, void *msg, + u16 msg_len, u16 dst_func, + enum hinic_hwif_direction_type direction, + enum hinic_mbox_ack_type ack_type, + struct mbox_msg_info *msg_info) +{ + struct hinic_hwdev *hwdev = func_to_func->hwdev; + int err = 0; + u32 seq_id = 0; + u16 seg_len = HINIC_MSG_SEG_LEN; + u16 left = msg_len; + u8 *msg_seg = (u8 *)msg; + u64 header = 0; + + err = hinic_mutex_lock(&func_to_func->msg_send_mutex); + if (err) + return err; + + header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) | + HINIC_MBOX_HEADER_SET(mod, MODULE) | + HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) | + HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) | + HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) | + HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) | + HINIC_MBOX_HEADER_SET(direction, DIRECTION) | + HINIC_MBOX_HEADER_SET(cmd, CMD) | + HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) | + HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) | + HINIC_MBOX_HEADER_SET(hinic_global_func_id(hwdev), + SRC_GLB_FUNC_IDX); + + while (!(HINIC_MBOX_HEADER_GET(header, LAST))) { + if (left <= HINIC_MSG_SEG_LEN) { + header &= + ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, + SEG_LEN)); + header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN); + header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST); + + seg_len = left; + } + + err = send_mbox_seg(func_to_func, header, dst_func, msg_seg, + seg_len); + if (err) { + PMD_DRV_LOG(ERR, "Fail to send mbox seg, seq_id: 0x%lx, err: %d", + HINIC_MBOX_HEADER_GET(header, SEQID), err); + goto send_err; + } + + left -= HINIC_MSG_SEG_LEN; + msg_seg += HINIC_MSG_SEG_LEN; + + seq_id++; + header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK, + SEQID)); + header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID); + } + +send_err: + (void)hinic_mutex_unlock(&func_to_func->msg_send_mutex); + + return err; +} + +static int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, + enum hinic_mod_type mod, u16 cmd, u16 dst_func, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size, + u32 timeout) +{ + struct hinic_recv_mbox *mbox_for_resp = + &func_to_func->mbox_resp[dst_func]; + struct mbox_msg_info msg_info = {0}; + u32 time; + int err; + + err = hinic_mutex_lock(&func_to_func->mbox_send_mutex); + if (err) + return err; + + msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func); + + set_mbox_to_func_event(func_to_func, EVENT_START); + + err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size, + dst_func, HINIC_HWIF_DIRECT_SEND, + MBOX_ACK, &msg_info); + if (err) + goto send_err; + + time = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME_MS); + err = hinic_aeq_poll_msg(func_to_func->rsp_aeq, time, NULL); + if (err) { + set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT); + PMD_DRV_LOG(ERR, "Send mailbox message time out"); + err = -ETIMEDOUT; + goto send_err; + } + + set_mbox_to_func_event(func_to_func, EVENT_END); + + if (mbox_for_resp->msg_info.status) { + err = mbox_for_resp->msg_info.status; + if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) + PMD_DRV_LOG(ERR, "Mailbox response error: 0x%x", + mbox_for_resp->msg_info.status); + else + PMD_DRV_LOG(ERR, "Chip is in active, PF can't process VF message"); + goto send_err; + } + + rte_rmb(); + + if (mbox_for_resp->mbox_len && buf_out && out_size) { + if (mbox_for_resp->mbox_len <= *out_size) { + memcpy(buf_out, mbox_for_resp->mbox, + mbox_for_resp->mbox_len); + *out_size = mbox_for_resp->mbox_len; + } else { + PMD_DRV_LOG(ERR, "Mailbox response message len[%u] overflow", + mbox_for_resp->mbox_len); + err = -ERANGE; + } + } + +send_err: + if (err && out_size) + *out_size = 0; + (void)hinic_mutex_unlock(&func_to_func->mbox_send_mutex); + + return err; +} + +static int +mbox_func_params_valid(__rte_unused struct hinic_mbox_func_to_func *mbox_obj, + void *buf_in, u16 in_size) +{ + if (!buf_in || !in_size) + return -EINVAL; + + if (in_size > HINIC_MBOX_DATA_SIZE) { + PMD_DRV_LOG(ERR, "Mailbox message len(%d) exceed limit(%d)", + in_size, HINIC_MBOX_DATA_SIZE); + return -EINVAL; + } + + return 0; +} + +static u8 hinic_pf_id_of_vf(void *hwdev) +{ + struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; + return hwif->attr.port_to_port_idx; +} + +/** + * hinic_mbox_to_pf - Send mbox info to pf and need pf to response. + * + * @param hwdev + * Pointer to hardware nic device. + * @param mod + * Mode type of hardware. + * @param cmd + * The command sent to pf. + * @param buf_in + * Input parameter. + * @param in_size + * Input parameter size. + * @param buf_out + * Output parameter. + * @param out_size + * Output parameter size. + * @param timeout + * Timeout. + * + * @return + * 0 on success, negative error value otherwise. + */ +int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + int err; + + err = mbox_func_params_valid(func_to_func, buf_in, in_size); + if (err) { + PMD_DRV_LOG(ERR, "Mailbox parameters check failed: %d", err); + return err; + } + + if (!HINIC_IS_VF(hwdev)) { + PMD_DRV_LOG(ERR, "Input function type error, func_type: %d", + hinic_func_type(hwdev)); + return -EINVAL; + } + + return hinic_mbox_to_func(func_to_func, mod, cmd, + hinic_pf_id_of_vf(hwdev), buf_in, in_size, + buf_out, out_size, timeout); +} + +/** + * hinic_mbox_to_pf_no_ack - Send mbox info to pf and do not need pf to response + * + * @param hwdev + * Pointer to hardware nic device. + * @param mod + * Mode type of hardware. + * @param cmd + * The command sent to pf. + * @param buf_in + * Input parameter. + * @param in_size + * Input parameter size. + * + * @return + * 0 on success, negative error value otherwise. + */ +int hinic_mbox_to_pf_no_ack(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size) +{ + int err; + struct mbox_msg_info msg_info = {0}; + + err = hinic_mutex_lock(&hwdev->func_to_func->mbox_send_mutex); + if (err) + return err; + + err = send_mbox_to_func(hwdev->func_to_func, mod, cmd, buf_in, in_size, + hinic_pf_id_of_vf(hwdev), HINIC_HWIF_DIRECT_SEND, + MBOX_NO_ACK, &msg_info); + if (err) + PMD_DRV_LOG(ERR, "Send mailbox no ack failed, err: %d", err); + + (void)hinic_mutex_unlock(&hwdev->func_to_func->mbox_send_mutex); + + return err; +} + +static int hinic_func_to_func_init(struct hinic_hwdev *hwdev) +{ + struct hinic_mbox_func_to_func *func_to_func; + int err; + + func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL); + if (!func_to_func) { + PMD_DRV_LOG(ERR, "Allocating memory for func_to_func object failed"); + return -ENOMEM; + } + hwdev->func_to_func = func_to_func; + func_to_func->hwdev = hwdev; + (void)hinic_mutex_init(&func_to_func->mbox_send_mutex, NULL); + (void)hinic_mutex_init(&func_to_func->msg_send_mutex, NULL); + + err = alloc_mbox_info(func_to_func->mbox_send); + if (err) { + PMD_DRV_LOG(ERR, "Allocating memory for mailbox sending failed"); + goto alloc_mbox_for_send_err; + } + + err = alloc_mbox_info(func_to_func->mbox_resp); + if (err) { + PMD_DRV_LOG(ERR, "Allocating memory for mailbox responsing failed"); + goto alloc_mbox_for_resp_err; + } + + err = alloc_mbox_wb_status(func_to_func); + if (err) + goto alloc_wb_status_err; + + prepare_send_mbox(func_to_func); + + return 0; + +alloc_wb_status_err: + free_mbox_info(func_to_func->mbox_resp); + +alloc_mbox_for_resp_err: + free_mbox_info(func_to_func->mbox_send); + +alloc_mbox_for_send_err: + kfree(func_to_func); + + return err; +} + +/** + * hinic_comm_func_to_func_free - Uninitialize func to func resource. + * + * @param hwdev + * Pointer to hardware nic device. + */ +void hinic_comm_func_to_func_free(struct hinic_hwdev *hwdev) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + free_mbox_wb_status(func_to_func); + free_mbox_info(func_to_func->mbox_resp); + free_mbox_info(func_to_func->mbox_send); + (void)hinic_mutex_destroy(&func_to_func->mbox_send_mutex); + (void)hinic_mutex_destroy(&func_to_func->msg_send_mutex); + kfree(func_to_func); +} + +/** + * hinic_comm_func_to_func_init - Initialize func to func resource. + * + * @param hwdev + * Pointer to hardware nic device. + */ +int hinic_comm_func_to_func_init(struct hinic_hwdev *hwdev) +{ + int rc; + + rc = hinic_func_to_func_init(hwdev); + if (rc) + return rc; + + hwdev->func_to_func->rsp_aeq = &hwdev->aeqs->aeq[HINIC_MBOX_RSP_AEQN]; + hwdev->func_to_func->recv_aeq = &hwdev->aeqs->aeq[HINIC_MBOX_RECV_AEQN]; + + return 0; +} + diff --git a/drivers/net/hinic/base/hinic_pmd_mbox.h b/drivers/net/hinic/base/hinic_pmd_mbox.h new file mode 100644 index 0000000..bf7b490 --- /dev/null +++ b/drivers/net/hinic/base/hinic_pmd_mbox.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_MBOX_H_ +#define _HINIC_PMD_MBOX_H_ + +#define HINIC_MBOX_RECV_AEQN 0 +#define HINIC_MBOX_RSP_AEQN 2 + +#define HINIC_MBOX_PF_SEND_ERR 0x1 +#define HINIC_MBOX_PF_BUSY_ACTIVE_FW 0x2 +#define HINIC_MBOX_VF_CMD_ERROR 0x3 + +/* PFs do not support enable SR-IOV cap when PFs use PMD, VFs just receive + * mailbox message from PFs. The max number of PFs is 16, so the max number + * of mailbox buffer for functions is also 16. + */ +#define HINIC_MAX_FUNCTIONS 16 +#define HINIC_MAX_PF_FUNCS 16 + +#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF + +#define HINIC_SEQ_ID_MAX_VAL 42 +#define HINIC_MSG_SEG_LEN 48 + +enum hinic_mbox_ack_type { + MBOX_ACK, + MBOX_NO_ACK, +}; + +struct mbox_msg_info { + u8 msg_id; + u8 status; /*can only use 6 bit*/ +}; + +struct hinic_recv_mbox { + void *mbox; + u8 cmd; + enum hinic_mod_type mod; + u16 mbox_len; + void *buf_out; + enum hinic_mbox_ack_type ack_type; + struct mbox_msg_info msg_info; + u8 sed_id; +}; + +struct hinic_send_mbox { + u8 *data; + volatile u64 *wb_status; + void *wb_vaddr; + dma_addr_t wb_paddr; +}; + +enum mbox_event_state { + EVENT_START = 0, + EVENT_TIMEOUT, + EVENT_END, +}; + +struct hinic_mbox_func_to_func { + struct hinic_hwdev *hwdev; + + pthread_mutex_t mbox_send_mutex; + pthread_mutex_t msg_send_mutex; + + struct hinic_send_mbox send_mbox; + + struct hinic_recv_mbox mbox_resp[HINIC_MAX_FUNCTIONS]; + struct hinic_recv_mbox mbox_send[HINIC_MAX_FUNCTIONS]; + + struct hinic_eq *rsp_aeq; + struct hinic_eq *recv_aeq; + + u8 send_msg_id; + enum mbox_event_state event_flag; + spinlock_t mbox_lock; /* lock for mbox event flag */ +}; + +/* + * mbox function prototypes + */ +int hinic_comm_func_to_func_init(struct hinic_hwdev *hwdev); +void hinic_comm_func_to_func_free(struct hinic_hwdev *hwdev); +int hinic_mbox_func_aeqe_handler(void *handle, u8 *header, + u8 size, void *param); +int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); +int hinic_mbox_to_pf_no_ack(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size); + +#endif /* _HINIC_PMD_MBOX_H_ */ diff --git a/drivers/net/hinic/base/meson.build b/drivers/net/hinic/base/meson.build index cde394a..6cf947f 100644 --- a/drivers/net/hinic/base/meson.build +++ b/drivers/net/hinic/base/meson.build @@ -12,6 +12,7 @@ sources = [ 'hinic_pmd_niccfg.c', 'hinic_pmd_nicio.c', 'hinic_pmd_wq.c', + 'hinic_pmd_mbox.c', ] extra_flags = [] From patchwork Mon Sep 30 14:00:40 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60189 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id CC72B4C77; Mon, 30 Sep 2019 15:46:56 +0200 (CEST) Received: from huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id 889163423 for ; Mon, 30 Sep 2019 15:46:54 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id A08EE67FEC310F7CF18 for ; Mon, 30 Sep 2019 21:46:51 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:46:42 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:40 +0800 Message-ID: X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 02/19] net/hinic/base: add HW interfaces for SR-IOV X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch introduces some HW interfaces required for SR-IOV function, including: init hwdev, set port state, get default cos, vf dma attribute table, vf txrx flush and so on. Signed-off-by: Xiaoyun wang --- drivers/net/hinic/base/hinic_compat.h | 32 ---- drivers/net/hinic/base/hinic_pmd_api_cmd.c | 60 +++---- drivers/net/hinic/base/hinic_pmd_cfg.c | 35 ++++ drivers/net/hinic/base/hinic_pmd_cmd.h | 14 +- drivers/net/hinic/base/hinic_pmd_hwdev.c | 198 ++++++++++++++++----- drivers/net/hinic/base/hinic_pmd_hwdev.h | 4 + drivers/net/hinic/base/hinic_pmd_hwif.c | 85 ++++++++- drivers/net/hinic/base/hinic_pmd_hwif.h | 15 +- drivers/net/hinic/base/hinic_pmd_mgmt.c | 83 +++++---- drivers/net/hinic/base/hinic_pmd_mgmt.h | 2 +- drivers/net/hinic/base/hinic_pmd_niccfg.c | 268 +++++++++++++++++++++++------ drivers/net/hinic/base/hinic_pmd_niccfg.h | 4 + drivers/net/hinic/base/hinic_pmd_nicio.c | 15 +- drivers/net/hinic/base/hinic_pmd_nicio.h | 3 +- 14 files changed, 606 insertions(+), 212 deletions(-) diff --git a/drivers/net/hinic/base/hinic_compat.h b/drivers/net/hinic/base/hinic_compat.h index db60a51..85cbb5b 100644 --- a/drivers/net/hinic/base/hinic_compat.h +++ b/drivers/net/hinic/base/hinic_compat.h @@ -223,38 +223,6 @@ static inline u16 ilog2(u32 n) return res; } -/** - * hinic_cpu_to_be32 - convert data to big endian 32 bit format - * @data: the data to convert - * @len: length of data to convert, must be Multiple of 4B - */ -static inline void hinic_cpu_to_be32(void *data, u32 len) -{ - u32 i; - u32 *mem = (u32 *)data; - - for (i = 0; i < (len >> 2); i++) { - *mem = cpu_to_be32(*mem); - mem++; - } -} - -/** - * hinic_be32_to_cpu - convert data from big endian 32 bit format - * @data: the data to convert - * @len: length of data to convert, must be Multiple of 4B - */ -static inline void hinic_be32_to_cpu(void *data, u32 len) -{ - u32 i; - u32 *mem = (u32 *)data; - - for (i = 0; i < (len >> 2); i++) { - *mem = be32_to_cpu(*mem); - mem++; - } -} - static inline int hinic_mutex_init(pthread_mutex_t *pthreadmutex, const pthread_mutexattr_t *mattr) { diff --git a/drivers/net/hinic/base/hinic_pmd_api_cmd.c b/drivers/net/hinic/base/hinic_pmd_api_cmd.c index 1ea86fa..dbffc2e 100644 --- a/drivers/net/hinic/base/hinic_pmd_api_cmd.c +++ b/drivers/net/hinic/base/hinic_pmd_api_cmd.c @@ -120,7 +120,7 @@ static void dump_api_chain_reg(struct hinic_api_cmd_chain *chain) /** * chain_busy - check if the chain is still processing last requests * @chain: chain to check - **/ + */ static int chain_busy(struct hinic_api_cmd_chain *chain) { switch (chain->chain_type) { @@ -146,7 +146,7 @@ static int chain_busy(struct hinic_api_cmd_chain *chain) /** * get_cell_data_size - get the data size of specific cell type * @type: chain type - **/ + */ static u16 get_cell_data_size(enum hinic_api_cmd_chain_type type, __rte_unused u16 cmd_size) { @@ -170,7 +170,7 @@ static u16 get_cell_data_size(enum hinic_api_cmd_chain_type type, * prepare_cell_ctrl - prepare the ctrl of the cell for the command * @cell_ctrl: the control of the cell to set the control into it * @cell_len: the size of the cell - **/ + */ static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len) { u64 ctrl; @@ -202,7 +202,7 @@ static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len) * @dest: destination node on the card that will receive the command * @cmd: command data * @cmd_size: the command size - **/ + */ static void prepare_api_cmd(struct hinic_api_cmd_chain *chain, struct hinic_api_cmd_cell *cell, enum hinic_node_id dest, @@ -254,7 +254,7 @@ static void prepare_api_cmd(struct hinic_api_cmd_chain *chain, * @dest: destination node on the card that will receive the command * @cmd: command data * @cmd_size: the command size - **/ + */ static void prepare_cell(struct hinic_api_cmd_chain *chain, enum hinic_node_id dest, void *cmd, u16 cmd_size) @@ -283,7 +283,7 @@ static void issue_api_cmd(struct hinic_api_cmd_chain *chain) /** * api_cmd_status_update - update the status of the chain * @chain: chain to update - **/ + */ static void api_cmd_status_update(struct hinic_api_cmd_chain *chain) { struct hinic_api_cmd_status *wb_status; @@ -314,7 +314,7 @@ static void api_cmd_status_update(struct hinic_api_cmd_chain *chain) * wait_for_status_poll - wait for write to mgmt command to complete * @chain: the chain of the command * Return: 0 - success, negative - failure - **/ + */ static int wait_for_status_poll(struct hinic_api_cmd_chain *chain) { unsigned long end; @@ -340,7 +340,7 @@ static int wait_for_status_poll(struct hinic_api_cmd_chain *chain) * wait_for_api_cmd_completion - wait for command to complete * @chain: chain for the command * Return: 0 - success, negative - failure - **/ + */ static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain, __rte_unused struct hinic_api_cmd_cell_ctxt *ctxt, __rte_unused void *ack, __rte_unused u16 ack_size) @@ -384,7 +384,7 @@ static inline void update_api_cmd_ctxt(struct hinic_api_cmd_chain *chain, * @ack: pointer to messages to response * @ack_size: the size of ack message * Return: 0 - success, negative - failure - **/ + */ static int api_cmd(struct hinic_api_cmd_chain *chain, enum hinic_node_id dest, void *cmd, u16 cmd_size, void *ack, u16 ack_size) @@ -423,7 +423,7 @@ static int api_cmd(struct hinic_api_cmd_chain *chain, * @cmd: command data * @size: the command size * Return: 0 - success, negative - failure - **/ + */ int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, enum hinic_node_id dest, void *cmd, u16 size) { @@ -434,7 +434,7 @@ int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, /** * api_cmd_hw_restart - restart the chain in the HW * @chain: the API CMD specific chain to restart - **/ + */ static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwdev->hwif; @@ -470,7 +470,7 @@ static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain) /** * api_cmd_ctrl_init - set the control register of a chain * @chain: the API CMD specific chain to set control register for - **/ + */ static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwdev->hwif; @@ -497,7 +497,7 @@ static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain) /** * api_cmd_set_status_addr - set the status address of a chain in the HW * @chain: the API CMD specific chain to set status address for - **/ + */ static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwdev->hwif; @@ -515,7 +515,7 @@ static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain) /** * api_cmd_set_num_cells - set the number cells of a chain in the HW * @chain: the API CMD specific chain to set the number of cells for - **/ + */ static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwdev->hwif; @@ -529,7 +529,7 @@ static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain) /** * api_cmd_head_init - set the head cell of a chain in the HW * @chain: the API CMD specific chain to set the head for - **/ + */ static void api_cmd_head_init(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwdev->hwif; @@ -548,7 +548,7 @@ static void api_cmd_head_init(struct hinic_api_cmd_chain *chain) * wait_for_ready_chain - wait for the chain to be ready * @chain: the API CMD specific chain to wait for * Return: 0 - success, negative - failure - **/ + */ static int wait_for_ready_chain(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwdev->hwif; @@ -580,7 +580,7 @@ static int wait_for_ready_chain(struct hinic_api_cmd_chain *chain) /** * api_cmd_chain_hw_clean - clean the HW * @chain: the API CMD specific chain - **/ + */ static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwdev->hwif; @@ -603,7 +603,7 @@ static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain) *(initialize API command csr) * @chain: the API CMD specific chain to initialize in HW * Return: 0 - success, negative - failure - **/ + */ static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain) { api_cmd_chain_hw_clean(chain); @@ -626,7 +626,7 @@ static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain) * free_cmd_buf - free the dma buffer of API CMD command * @chain: the API CMD specific chain of the cmd * @cell_idx: the cell index of the cmd - **/ + */ static void free_cmd_buf(struct hinic_api_cmd_chain *chain, u32 cell_idx) { struct hinic_api_cmd_cell_ctxt *cell_ctxt; @@ -645,7 +645,7 @@ static void free_cmd_buf(struct hinic_api_cmd_chain *chain, u32 cell_idx) * @cell: the cell in the HW for the cmd * @cell_idx: the index of the cell * Return: 0 - success, negative - failure - **/ + */ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, struct hinic_api_cmd_cell *cell, u32 cell_idx) { @@ -697,7 +697,7 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, * @pre_node: previous cell * @node_vaddr: the virt addr of the cell * Return: 0 - success, negative - failure - **/ + */ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, u32 cell_idx, struct hinic_api_cmd_cell *pre_node, @@ -774,7 +774,7 @@ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, * api_cmd_destroy_cell - destroy API CMD cell of specific chain * @chain: the API CMD specific chain to destroy its cell * @cell_idx: the cell to destroy - **/ + */ static void api_cmd_destroy_cell(struct hinic_api_cmd_chain *chain, u32 cell_idx) { @@ -808,7 +808,7 @@ static void api_cmd_destroy_cell(struct hinic_api_cmd_chain *chain, * api_cmd_destroy_cells - destroy API CMD cells of specific chain * @chain: the API CMD specific chain to destroy its cells * @num_cells: number of cells to destroy - **/ + */ static void api_cmd_destroy_cells(struct hinic_api_cmd_chain *chain, u32 num_cells) { @@ -822,7 +822,7 @@ static void api_cmd_destroy_cells(struct hinic_api_cmd_chain *chain, * api_cmd_create_cells - create API CMD cells for specific chain * @chain: the API CMD specific chain * Return: 0 - success, negative - failure - **/ + */ static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain) { struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL; @@ -861,7 +861,7 @@ static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain) * @chain: the API CMD specific chain to initialize * @attr: attributes to set in the chain * Return: 0 - success, negative - failure - **/ + */ static int api_chain_init(struct hinic_api_cmd_chain *chain, struct hinic_api_cmd_chain_attr *attr) { @@ -910,7 +910,7 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain, /** * api_chain_free - free API CMD specific chain * @chain: the API CMD specific chain to free - **/ + */ static void api_chain_free(struct hinic_api_cmd_chain *chain) { void *dev = chain->hwdev; @@ -925,7 +925,7 @@ static void api_chain_free(struct hinic_api_cmd_chain *chain) * @cmd_chain: the API CMD specific chain to create * @attr: attributes to set in the chain * Return: 0 - success, negative - failure - **/ + */ static int api_cmd_create_chain(struct hinic_api_cmd_chain **cmd_chain, struct hinic_api_cmd_chain_attr *attr) { @@ -981,7 +981,7 @@ static int api_cmd_create_chain(struct hinic_api_cmd_chain **cmd_chain, /** * api_cmd_destroy_chain - destroy API CMD specific chain * @chain: the API CMD specific chain to destroy - **/ + */ static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain) { api_cmd_destroy_cells(chain, chain->num_cells); @@ -994,7 +994,7 @@ static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain) * @hwdev: the hardware interface of a pci function device * @chain: the API CMD chains that will be initialized * Return: 0 - success, negative - failure - **/ + */ int hinic_api_cmd_init(struct hinic_hwdev *hwdev, struct hinic_api_cmd_chain **chain) { @@ -1031,7 +1031,7 @@ int hinic_api_cmd_init(struct hinic_hwdev *hwdev, /** * hinic_api_cmd_free - free the API CMD chains * @chain: the API CMD chains that will be freed - **/ + */ void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain) { enum hinic_api_cmd_chain_type chain_type; diff --git a/drivers/net/hinic/base/hinic_pmd_cfg.c b/drivers/net/hinic/base/hinic_pmd_cfg.c index 61537b1..aa883e0 100644 --- a/drivers/net/hinic/base/hinic_pmd_cfg.c +++ b/drivers/net/hinic/base/hinic_pmd_cfg.c @@ -8,6 +8,7 @@ #include "hinic_pmd_mgmt.h" #include "hinic_pmd_eqs.h" #include "hinic_pmd_cfg.h" +#include "hinic_pmd_mbox.h" bool hinic_support_nic(struct hinic_hwdev *hwdev, struct nic_service_cap *cap) { @@ -122,6 +123,10 @@ static void hinic_parse_pub_res_cap(struct service_cap *cap, cap->vf_id_start = dev_cap->vf_id_start; cap->max_sqs = dev_cap->nic_max_sq + 1; cap->max_rqs = dev_cap->nic_max_rq + 1; + } else { + cap->max_vf = 0; + cap->max_sqs = dev_cap->nic_max_sq; + cap->max_rqs = dev_cap->nic_max_rq; } cap->chip_svc_type = CFG_SVC_NIC_BIT0; @@ -180,6 +185,28 @@ static int get_cap_from_fw(struct hinic_hwdev *dev, enum func_type type) return 0; } +static int get_cap_from_pf(struct hinic_hwdev *dev, enum func_type type) +{ + int err; + u16 in_len, out_len; + struct hinic_dev_cap dev_cap; + + memset(&dev_cap, 0, sizeof(dev_cap)); + in_len = sizeof(dev_cap); + out_len = in_len; + err = hinic_mbox_to_pf(dev, HINIC_MOD_CFGM, HINIC_CFG_MBOX_CAP, + &dev_cap, in_len, &dev_cap, &out_len, + CFG_MAX_CMD_TIMEOUT); + if (err || dev_cap.mgmt_msg_head.status || !out_len) { + PMD_DRV_LOG(ERR, "Get capability from PF failed, err: %d, status: %d, out_len: %d", + err, dev_cap.mgmt_msg_head.status, out_len); + return -EFAULT; + } + + parse_dev_cap(dev, &dev_cap, type); + return 0; +} + static int get_dev_cap(struct hinic_hwdev *dev) { int err; @@ -194,6 +221,14 @@ static int get_dev_cap(struct hinic_hwdev *dev) return err; } break; + case TYPE_VF: + err = get_cap_from_pf(dev, type); + if (err) { + PMD_DRV_LOG(ERR, "Get VF capability failed, err: %d", + err); + return err; + } + break; default: PMD_DRV_LOG(ERR, "Unsupported PCI function type"); return -EINVAL; diff --git a/drivers/net/hinic/base/hinic_pmd_cmd.h b/drivers/net/hinic/base/hinic_pmd_cmd.h index 7a9e9f6..c8750b8 100644 --- a/drivers/net/hinic/base/hinic_pmd_cmd.h +++ b/drivers/net/hinic/base/hinic_pmd_cmd.h @@ -25,6 +25,12 @@ enum hinic_mod_type { HINIC_MOD_MAX = 15 }; +/* only used by VFD communicating with PFD to register or unregister, + * command mode type is HINIC_MOD_L2NIC + */ +#define HINIC_PORT_CMD_VF_REGISTER 0x0 +#define HINIC_PORT_CMD_VF_UNREGISTER 0x1 + /* cmd of mgmt CPU message for NIC module */ enum hinic_port_cmd { HINIC_PORT_CMD_MGMT_RESET = 0x0, @@ -416,12 +422,12 @@ enum hinic_pf_status { }; /* total doorbell or direct wqe size is 512kB, db num: 128, dwqe: 128 */ -#define HINIC_DB_DWQE_SIZE 0x00080000 +#define HINIC_DB_DWQE_SIZE 0x00080000 -/* db/dwqe page size: 4K */ -#define HINIC_DB_PAGE_SIZE 0x00001000ULL +/* db page size: 4K */ +#define HINIC_DB_PAGE_SIZE 0x00001000ULL -#define HINIC_DB_MAX_AREAS (HINIC_DB_DWQE_SIZE / HINIC_DB_PAGE_SIZE) +#define HINIC_DB_MAX_AREAS (HINIC_DB_DWQE_SIZE / HINIC_DB_PAGE_SIZE) #define HINIC_PCI_MSIX_ENTRY_SIZE 16 #define HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL 12 diff --git a/drivers/net/hinic/base/hinic_pmd_hwdev.c b/drivers/net/hinic/base/hinic_pmd_hwdev.c index 4f70baf..6480d56 100644 --- a/drivers/net/hinic/base/hinic_pmd_hwdev.c +++ b/drivers/net/hinic/base/hinic_pmd_hwdev.c @@ -15,6 +15,7 @@ #include "hinic_pmd_cmdq.h" #include "hinic_pmd_mgmt.h" #include "hinic_pmd_niccfg.h" +#include "hinic_pmd_mbox.h" #define HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT 0 #define HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF @@ -65,6 +66,52 @@ "Unrecognized module", }; +struct hinic_vf_dma_attr_table { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_idx; + u8 func_dma_entry_num; + u8 entry_idx; + u8 st; + u8 at; + u8 ph; + u8 no_snooping; + u8 tph_en; + u8 resv1[3]; +}; + +/** + * hinic_cpu_to_be32 - convert data to big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert, must be Multiple of 4B + */ +void hinic_cpu_to_be32(void *data, u32 len) +{ + u32 i; + u32 *mem = (u32 *)data; + + for (i = 0; i < (len >> 2); i++) { + *mem = cpu_to_be32(*mem); + mem++; + } +} + +/** + * hinic_be32_to_cpu - convert data from big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert, must be Multiple of 4B + */ +void hinic_be32_to_cpu(void *data, u32 len) +{ + u32 i; + u32 *mem = (u32 *)data; + + for (i = 0; i < (len >> 2); i++) { + *mem = be32_to_cpu(*mem); + mem++; + } +} + static void * hinic_dma_mem_zalloc(struct hinic_hwdev *hwdev, size_t size, dma_addr_t *dma_handle, unsigned int flag, unsigned int align) @@ -332,18 +379,6 @@ void hinic_osdep_deinit(struct hinic_hwdev *hwdev) } } - - - - - - - - - - - - /** * hinic_set_ci_table - set ci attribute table * @hwdev: the hardware interface of a nic device @@ -352,7 +387,7 @@ void hinic_osdep_deinit(struct hinic_hwdev *hwdev) * @return * 0 on success and ci attribute table is filled, * negative error value otherwise. - **/ + */ int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr) { struct hinic_cons_idx_attr cons_idx_attr; @@ -385,7 +420,7 @@ int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr) * @return * 0 on success, * negative error value otherwise. - **/ + */ int hinic_set_pagesize(void *hwdev, u8 page_size) { struct hinic_page_size cmd; @@ -417,7 +452,6 @@ static int wait_for_flr_finish(struct hinic_hwif *hwif) do { status = hinic_get_pf_status(hwif); if (status == HINIC_PF_STATUS_FLR_FINISH_FLAG) { - hinic_set_pf_status(hwif, HINIC_PF_STATUS_ACTIVE_FLAG); return 0; } @@ -463,13 +497,51 @@ static int wait_cmdq_stop(struct hinic_hwdev *hwdev) return err; } +static int hinic_vf_rx_tx_flush(struct hinic_hwdev *hwdev) +{ + struct hinic_clear_resource clr_res; + int err; + + err = wait_cmdq_stop(hwdev); + if (err) { + PMD_DRV_LOG(WARNING, "Cmdq is still working"); + return err; + } + + memset(&clr_res, 0, sizeof(clr_res)); + clr_res.func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif); + clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif); + err = hinic_mbox_to_pf_no_ack(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_START_FLR, &clr_res, sizeof(clr_res)); + if (err) + PMD_DRV_LOG(WARNING, "Notice flush message failed"); + + /* + * PF firstly set VF doorbell flush csr to be disabled. After PF finish + * VF resources flush, PF will set VF doorbell flush csr to be enabled. + */ + err = wait_until_doorbell_flush_states(hwdev->hwif, DISABLE_DOORBELL); + if (err) + PMD_DRV_LOG(WARNING, "Wait doorbell flush disable timeout"); + + err = wait_until_doorbell_flush_states(hwdev->hwif, ENABLE_DOORBELL); + if (err) + PMD_DRV_LOG(WARNING, "Wait doorbell flush enable timeout"); + + err = hinic_reinit_cmdq_ctxts(hwdev); + if (err) + PMD_DRV_LOG(WARNING, "Reinit cmdq failed"); + + return err; +} + /** * hinic_pf_rx_tx_flush - clean up hardware resource * @hwdev: the hardware interface of a nic device * @return * 0 on success, * negative error value otherwise. - **/ + */ static int hinic_pf_rx_tx_flush(struct hinic_hwdev *hwdev) { struct hinic_hwif *hwif = hwdev->hwif; @@ -523,7 +595,10 @@ static int hinic_pf_rx_tx_flush(struct hinic_hwdev *hwdev) int hinic_func_rx_tx_flush(struct hinic_hwdev *hwdev) { - return hinic_pf_rx_tx_flush(hwdev); + if (HINIC_FUNC_TYPE(hwdev) == TYPE_VF) + return hinic_vf_rx_tx_flush(hwdev); + else + return hinic_pf_rx_tx_flush(hwdev); } /** @@ -531,7 +606,7 @@ int hinic_func_rx_tx_flush(struct hinic_hwdev *hwdev) * @hwdev: the hardware interface of a nic device * @interrupt_info: Information of Interrupt aggregation * Return: 0 on success, negative error value otherwise. - **/ + */ static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev, struct nic_interrupt_info *interrupt_info) { @@ -567,7 +642,7 @@ static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev, * @hwdev: the hardware interface of a nic device * @interrupt_info: Information of Interrupt aggregation * Return: 0 on success, negative error value otherwise. - **/ + */ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev, struct nic_interrupt_info interrupt_info) { @@ -623,7 +698,7 @@ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev, * @return * 0 on success, * negative error value otherwise. - **/ + */ int init_aeqs_msix_attr(void *hwdev) { struct hinic_hwdev *nic_hwdev = hwdev; @@ -662,7 +737,7 @@ int init_aeqs_msix_attr(void *hwdev) * @ph: PCIE TLP Processing Hint field * @no_snooping: PCIE TLP No snooping * @tph_en: PCIE TLP Processing Hint Enable - **/ + */ static void set_pf_dma_attr_entry(struct hinic_hwdev *hwdev, u32 entry_idx, u8 st, u8 at, u8 ph, enum hinic_pcie_nosnoop no_snooping, @@ -690,28 +765,74 @@ static void set_pf_dma_attr_entry(struct hinic_hwdev *hwdev, u32 entry_idx, hinic_hwif_write_reg(hwdev->hwif, addr, val); } +static int set_vf_dma_attr_entry(struct hinic_hwdev *hwdev, u8 entry_idx, + u8 st, u8 at, u8 ph, + enum hinic_pcie_nosnoop no_snooping, + enum hinic_pcie_tph tph_en) +{ + struct hinic_vf_dma_attr_table attr; + + memset(&attr, 0, sizeof(attr)); + attr.func_idx = hinic_global_func_id(hwdev); + attr.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + attr.func_dma_entry_num = hinic_dma_attr_entry_num(hwdev); + attr.entry_idx = entry_idx; + attr.st = st; + attr.at = at; + attr.ph = ph; + attr.no_snooping = no_snooping; + attr.tph_en = tph_en; + + return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_DMA_ATTR_SET, + &attr, sizeof(attr), NULL, NULL, 0); +} + /** * dma_attr_table_init - initialize the the default dma attributes * @hwdev: the pointer to the private hardware device object - **/ -static void dma_attr_table_init(struct hinic_hwdev *hwdev) + */ +static int dma_attr_table_init(struct hinic_hwdev *hwdev) { + int err = 0; + if (HINIC_IS_VF(hwdev)) - return; + err = set_vf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY, + HINIC_PCIE_ST_DISABLE, HINIC_PCIE_AT_DISABLE, + HINIC_PCIE_PH_DISABLE, HINIC_PCIE_SNOOP, + HINIC_PCIE_TPH_DISABLE); + else + set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY, + HINIC_PCIE_ST_DISABLE, HINIC_PCIE_AT_DISABLE, + HINIC_PCIE_PH_DISABLE, HINIC_PCIE_SNOOP, + HINIC_PCIE_TPH_DISABLE); - set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY, - HINIC_PCIE_ST_DISABLE, - HINIC_PCIE_AT_DISABLE, - HINIC_PCIE_PH_DISABLE, - HINIC_PCIE_SNOOP, - HINIC_PCIE_TPH_DISABLE); + return err; } +/** + * hinic_init_attr_table - init dma and aeq msix attribute table + * @hwdev: the pointer to the private hardware device object + */ int hinic_init_attr_table(struct hinic_hwdev *hwdev) { - dma_attr_table_init(hwdev); + int err; - return init_aeqs_msix_attr(hwdev); + err = dma_attr_table_init(hwdev); + if (err) { + PMD_DRV_LOG(ERR, "Initialize dma attribute table failed, err: %d", + err); + return err; + } + + err = init_aeqs_msix_attr(hwdev); + if (err) { + PMD_DRV_LOG(ERR, "Initialize aeqs msix attribute failed, err: %d", + err); + return err; + } + + return 0; } #define FAULT_SHOW_STR_LEN 16 @@ -826,7 +947,7 @@ static int resources_state_set(struct hinic_hwdev *hwdev, * @return * 0 on success, * negative error value otherwise. - **/ + */ int hinic_activate_hwdev_state(struct hinic_hwdev *hwdev) { int rc = HINIC_OK; @@ -834,9 +955,7 @@ int hinic_activate_hwdev_state(struct hinic_hwdev *hwdev) if (!hwdev) return -EINVAL; - if (!HINIC_IS_VF(hwdev)) - hinic_set_pf_status(hwdev->hwif, - HINIC_PF_STATUS_ACTIVE_FLAG); + hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG); rc = resources_state_set(hwdev, HINIC_RES_ACTIVE); if (rc) { @@ -851,7 +970,7 @@ int hinic_activate_hwdev_state(struct hinic_hwdev *hwdev) * hinic_deactivate_hwdev_state - Deactivate host nic state and notify mgmt * channel that host nic is not ready. * @hwdev: the pointer to the private hardware device object - **/ + */ void hinic_deactivate_hwdev_state(struct hinic_hwdev *hwdev) { int rc = HINIC_OK; @@ -863,8 +982,7 @@ void hinic_deactivate_hwdev_state(struct hinic_hwdev *hwdev) if (rc) PMD_DRV_LOG(ERR, "Deinit resources state failed"); - if (!HINIC_IS_VF(hwdev)) - hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT); + hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT); } int hinic_get_board_info(void *hwdev, struct hinic_board_info *info) @@ -898,7 +1016,7 @@ int hinic_get_board_info(void *hwdev, struct hinic_board_info *info) * @return * 0 on success, * negative error value otherwise. - **/ + */ int hinic_l2nic_reset(struct hinic_hwdev *hwdev) { struct hinic_hwif *hwif = hwdev->hwif; diff --git a/drivers/net/hinic/base/hinic_pmd_hwdev.h b/drivers/net/hinic/base/hinic_pmd_hwdev.h index fc5bdf9..c37771b 100644 --- a/drivers/net/hinic/base/hinic_pmd_hwdev.h +++ b/drivers/net/hinic/base/hinic_pmd_hwdev.h @@ -485,4 +485,8 @@ void hinic_hilink_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd, int hinic_set_pagesize(void *hwdev, u8 page_size); +void hinic_cpu_to_be32(void *data, u32 len); + +void hinic_be32_to_cpu(void *data, u32 len); + #endif /* _HINIC_PMD_HWDEV_H_ */ diff --git a/drivers/net/hinic/base/hinic_pmd_hwif.c b/drivers/net/hinic/base/hinic_pmd_hwif.c index a5e223b..8a01fbc 100644 --- a/drivers/net/hinic/base/hinic_pmd_hwif.c +++ b/drivers/net/hinic/base/hinic_pmd_hwif.c @@ -13,6 +13,9 @@ #define HINIC_INTR_MSI_BAR 2 #define HINIC_DB_MEM_BAR 4 +#define PAGE_SIZE_4K 0x1000 +#define PAGE_SIZE_64K 0x10000 + #define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29 #define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x7U @@ -24,17 +27,22 @@ * hwif_ready - test if the HW initialization passed * @hwdev: the pointer to the private hardware device object * Return: 0 - success, negative - failure - **/ + */ static int hwif_ready(struct hinic_hwdev *hwdev) { - u32 addr, attr1; + u32 addr, attr0, attr1; addr = HINIC_CSR_FUNC_ATTR1_ADDR; attr1 = hinic_hwif_read_reg(hwdev->hwif, addr); - if (!HINIC_AF1_GET(attr1, MGMT_INIT_STATUS)) return -EBUSY; + addr = HINIC_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic_hwif_read_reg(hwdev->hwif, addr); + if ((HINIC_AF0_GET(attr0, FUNC_TYPE) == TYPE_VF) && + !HINIC_AF1_GET(attr1, PF_INIT_STATUS)) + return -EBUSY; + return 0; } @@ -44,7 +52,7 @@ static int hwif_ready(struct hinic_hwdev *hwdev) * @attr0: the first attribute that was read from the hw * @attr1: the second attribute that was read from the hw * @attr2: the third attribute that was read from the hw - **/ + */ static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1, u32 attr2) { @@ -68,7 +76,7 @@ static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1, /** * get_hwif_attr - read and set the attributes as members in hwif * @hwif: the hardware interface of a pci function device - **/ + */ static void get_hwif_attr(struct hinic_hwif *hwif) { u32 addr, attr0, attr1, attr2; @@ -90,6 +98,11 @@ void hinic_set_pf_status(struct hinic_hwif *hwif, enum hinic_pf_status status) u32 attr5 = HINIC_AF5_SET(status, PF_STATUS); u32 addr = HINIC_CSR_FUNC_ATTR5_ADDR; + if (hwif->attr.func_type == TYPE_VF) { + PMD_DRV_LOG(ERR, "VF doesn't support set attr5"); + return; + } + hinic_hwif_write_reg(hwif, addr, attr5); } @@ -145,7 +158,7 @@ void hinic_disable_doorbell(struct hinic_hwif *hwif) /** * set_ppf - try to set hwif as ppf and set the type of hwif in this case * @hwif: the hardware interface of a pci function device - **/ + */ static void set_ppf(struct hinic_hwif *hwif) { struct hinic_func_attr *attr = &hwif->attr; @@ -285,6 +298,30 @@ static void disable_all_msix(struct hinic_hwdev *hwdev) hinic_set_msix_state(hwdev, i, HINIC_MSIX_DISABLE); } +/** + * Wait for up enable or disable doorbell flush finished. + * @hwif: the hardware interface of a pci function device. + * @states: Disable or Enable. + */ +int wait_until_doorbell_flush_states(struct hinic_hwif *hwif, + enum hinic_doorbell_ctrl states) +{ + unsigned long end; + enum hinic_doorbell_ctrl db_ctrl; + + end = jiffies + + msecs_to_jiffies(HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT); + do { + db_ctrl = hinic_get_doorbell_ctrl_status(hwif); + if (db_ctrl == states) + return 0; + + rte_delay_ms(1); + } while (time_before(jiffies, end)); + + return -EFAULT; +} + static int wait_until_doorbell_and_outbound_enabled(struct hinic_hwif *hwif) { unsigned long end; @@ -329,6 +366,17 @@ u8 hinic_ppf_idx(void *hwdev) } /** + * hinic_dma_attr_entry_num - get number id of DMA attribute table. + * @hwdev: the pointer to the private hardware device object. + * Return: The number id of DMA attribute table. + */ +u8 hinic_dma_attr_entry_num(void *hwdev) +{ + struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; + return hwif->attr.num_dma_attr; +} + +/** * hinic_init_hwif - initialize the hw interface * @hwdev: the pointer to the private hardware device object * @cfg_reg_base: base physical address of configuration registers @@ -337,7 +385,7 @@ u8 hinic_ppf_idx(void *hwdev) * @db_base: base virtual address of doorbell registers * @dwqe_mapping: direct wqe io mapping address * Return: 0 - success, negative - failure - **/ + */ static int hinic_init_hwif(struct hinic_hwdev *hwdev, void *cfg_reg_base, void *intr_reg_base, u64 db_base_phy, void *db_base, __rte_unused void *dwqe_mapping) @@ -371,6 +419,9 @@ static int hinic_init_hwif(struct hinic_hwdev *hwdev, void *cfg_reg_base, if (!HINIC_IS_VF(hwdev)) set_ppf(hwif); + /* disable mgmt cpu report any event */ + hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT); + return 0; hwif_ready_err: @@ -403,10 +454,28 @@ static void hinic_get_mmio(struct hinic_hwdev *hwdev, void **cfg_regs_base, void **intr_base, void **db_base) { struct rte_pci_device *pci_dev = hwdev->pcidev_hdl; + uint64_t bar0_size; + uint64_t bar2_size; + uint64_t bar0_phy_addr; + uint64_t pagesize = sysconf(_SC_PAGESIZE); *cfg_regs_base = pci_dev->mem_resource[HINIC_CFG_REGS_BAR].addr; *intr_base = pci_dev->mem_resource[HINIC_INTR_MSI_BAR].addr; *db_base = pci_dev->mem_resource[HINIC_DB_MEM_BAR].addr; + + bar0_size = pci_dev->mem_resource[HINIC_CFG_REGS_BAR].len; + bar2_size = pci_dev->mem_resource[HINIC_INTR_MSI_BAR].len; + + if (pagesize == PAGE_SIZE_64K && (bar0_size % pagesize != 0)) { + bar0_phy_addr = + pci_dev->mem_resource[HINIC_CFG_REGS_BAR].phys_addr; + if (bar0_phy_addr % pagesize != 0 && + (bar0_size + bar2_size <= pagesize) && + bar2_size >= bar0_size) { + *cfg_regs_base = (void *)((uint8_t *)(*intr_base) + + bar2_size); + } + } } void hinic_hwif_res_free(struct hinic_hwdev *hwdev) @@ -459,7 +528,7 @@ int hinic_hwif_res_init(struct hinic_hwdev *hwdev) * @hwdev: the hardware interface of a nic device * @msix_idx: Index of msix interrupt * @clear_resend_en: enable flag of clear resend configuration - **/ + */ void hinic_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, u8 clear_resend_en) { diff --git a/drivers/net/hinic/base/hinic_pmd_hwif.h b/drivers/net/hinic/base/hinic_pmd_hwif.h index c1289b5..6752587 100644 --- a/drivers/net/hinic/base/hinic_pmd_hwif.h +++ b/drivers/net/hinic/base/hinic_pmd_hwif.h @@ -31,7 +31,7 @@ enum hinic_msix_state { HINIC_MSIX_DISABLE, }; -/* Defines the IRQ information structure*/ +/* Defines the IRQ information structure */ struct irq_info { u16 msix_entry_idx; /* IRQ corresponding index number */ u32 irq_id; /* the IRQ number from OS */ @@ -59,11 +59,11 @@ struct hinic_func_attr { u8 ppf_idx; - u16 num_irqs; /* max: 2 ^ 15 */ - u8 num_aeqs; /* max: 2 ^ 3 */ - u8 num_ceqs; /* max: 2 ^ 7 */ + u16 num_irqs; /* max: 2 ^ 15 */ + u8 num_aeqs; /* max: 2 ^ 3 */ + u8 num_ceqs; /* max: 2 ^ 7 */ - u8 num_dma_attr; /* max: 2 ^ 6 */ + u8 num_dma_attr; /* max: 2 ^ 6 */ u16 global_vf_id_of_pf; }; @@ -104,6 +104,9 @@ static inline u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg) void hinic_free_db_addr(void *hwdev, void __iomem *db_base); +int wait_until_doorbell_flush_states(struct hinic_hwif *hwif, + enum hinic_doorbell_ctrl states); + void hinic_set_msix_state(void *hwdev, u16 msix_idx, enum hinic_msix_state flag); @@ -116,4 +119,6 @@ void hinic_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, void hinic_hwif_res_free(struct hinic_hwdev *hwdev); +u8 hinic_dma_attr_entry_num(void *hwdev); + #endif /* _HINIC_PMD_HWIF_H_ */ diff --git a/drivers/net/hinic/base/hinic_pmd_mgmt.c b/drivers/net/hinic/base/hinic_pmd_mgmt.c index a18e567..eee50a8 100644 --- a/drivers/net/hinic/base/hinic_pmd_mgmt.c +++ b/drivers/net/hinic/base/hinic_pmd_mgmt.c @@ -7,6 +7,7 @@ #include "hinic_pmd_hwdev.h" #include "hinic_pmd_hwif.h" #include "hinic_pmd_mgmt.h" +#include "hinic_pmd_mbox.h" #define BUF_OUT_DEFAULT_SIZE 1 @@ -72,7 +73,7 @@ * mgmt_msg_len - calculate the total message length * @msg_data_len: the length of the message data * Return: the total message length - **/ + */ static u16 mgmt_msg_len(u16 msg_data_len) { /* u64 - the size of the header */ @@ -99,7 +100,7 @@ static u16 mgmt_msg_len(u16 msg_data_len) * @direction: the direction of the original message * @cmd: the command to do * @msg_id: message id - **/ + */ static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, u64 *header, int msg_len, enum hinic_mod_type mod, enum hinic_msg_ack_type ack_type, @@ -128,7 +129,7 @@ static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, * @header: pointer of the header to prepare * @msg: the data of the message * @msg_len: the length of the message - **/ + */ static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, void *msg, int msg_len) { @@ -149,7 +150,7 @@ static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, void *msg, * alloc_recv_msg - allocate received message memory * @recv_msg: pointer that will hold the allocated data * Return: 0 - success, negative - failure - **/ + */ static int alloc_recv_msg(struct hinic_recv_msg *recv_msg) { int err; @@ -177,7 +178,7 @@ static int alloc_recv_msg(struct hinic_recv_msg *recv_msg) /** * free_recv_msg - free received message memory * @recv_msg: pointer that holds the allocated data - **/ + */ static void free_recv_msg(struct hinic_recv_msg *recv_msg) { kfree(recv_msg->buf_out); @@ -188,7 +189,7 @@ static void free_recv_msg(struct hinic_recv_msg *recv_msg) * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel * @pf_to_mgmt: PF to MGMT channel * Return: 0 - success, negative - failure - **/ + */ static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) { int err; @@ -237,7 +238,7 @@ static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) * free_msg_buf - free all the message buffers of PF to MGMT channel * @pf_to_mgmt: PF to MGMT channel * Return: 0 - success, negative - failure - **/ + */ static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) { kfree(pf_to_mgmt->sync_msg_buf); @@ -257,7 +258,7 @@ static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) * @direction: the direction of the original message * @resp_msg_id: message id of response * Return: 0 - success, negative - failure - **/ + */ static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, enum hinic_mod_type mod, u8 cmd, void *msg, u16 msg_len, @@ -295,7 +296,7 @@ static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, * @direction: the direction of the original message * @resp_msg_id: msg id to response for * Return: 0 - success, negative - failure - **/ + */ static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, enum hinic_mod_type mod, u8 cmd, void *msg, u16 msg_len, @@ -327,7 +328,7 @@ static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, * hinic_pf_to_mgmt_init - initialize PF to MGMT channel * @hwdev: the pointer to the private hardware device object * Return: 0 - success, negative - failure - **/ + */ static int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev) { struct hinic_msg_pf_to_mgmt *pf_to_mgmt; @@ -342,7 +343,7 @@ static int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev) hwdev->pf_to_mgmt = pf_to_mgmt; pf_to_mgmt->hwdev = hwdev; - err = hinic_mutex_init(&pf_to_mgmt->sync_msg_lock, NULL); + err = hinic_mutex_init(&pf_to_mgmt->sync_msg_mutex, NULL); if (err) goto mutex_init_err; @@ -364,7 +365,7 @@ static int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev) free_msg_buf(pf_to_mgmt); alloc_msg_buf_err: - hinic_mutex_destroy(&pf_to_mgmt->sync_msg_lock); + hinic_mutex_destroy(&pf_to_mgmt->sync_msg_mutex); mutex_init_err: kfree(pf_to_mgmt); @@ -375,14 +376,14 @@ static int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev) /** * hinic_pf_to_mgmt_free - free PF to MGMT channel * @hwdev: the pointer to the private hardware device object - **/ + */ static void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev) { struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; hinic_api_cmd_free(pf_to_mgmt->cmd_chain); free_msg_buf(pf_to_mgmt); - hinic_mutex_destroy(&pf_to_mgmt->sync_msg_lock); + hinic_mutex_destroy(&pf_to_mgmt->sync_msg_mutex); kfree(pf_to_mgmt); } @@ -396,7 +397,9 @@ static void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev) u32 timeo; int err, i; - pthread_mutex_lock(&pf_to_mgmt->sync_msg_lock); + err = hinic_mutex_lock(&pf_to_mgmt->sync_msg_mutex); + if (err) + return err; SYNC_MSG_ID_INC(pf_to_mgmt); recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; @@ -455,7 +458,7 @@ static void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev) unlock_sync_msg: if (err && out_size) *out_size = 0; - pthread_mutex_unlock(&pf_to_mgmt->sync_msg_lock); + (void)hinic_mutex_unlock(&pf_to_mgmt->sync_msg_mutex); return err; } @@ -464,6 +467,9 @@ static int hinic_get_mgmt_channel_status(void *hwdev) struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; u32 val; + if (hinic_func_type((struct hinic_hwdev *)hwdev) == TYPE_VF) + return false; + val = hinic_hwif_read_reg(hwif, HINIC_ICPL_RESERVD_ADDR); return HINIC_GET_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS); @@ -482,9 +488,13 @@ int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, if (hinic_get_mgmt_channel_status(hwdev)) return -EPERM; - rc = hinic_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, - in_size, buf_out, out_size, - timeout); + if (hinic_func_type(hwdev) == TYPE_VF) { + rc = hinic_mbox_to_pf(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + } else { + rc = hinic_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + } return rc; } @@ -502,13 +512,15 @@ int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd, return err; } - pthread_mutex_lock(&pf_to_mgmt->sync_msg_lock); + err = hinic_mutex_lock(&pf_to_mgmt->sync_msg_mutex); + if (err) + return err; err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, HINIC_MSG_NO_ACK, HINIC_MSG_DIRECT_SEND, MSG_NO_RESP); - pthread_mutex_unlock(&pf_to_mgmt->sync_msg_lock); + (void)hinic_mutex_unlock(&pf_to_mgmt->sync_msg_mutex); return err; } @@ -537,7 +549,7 @@ static bool check_mgmt_seq_id_and_seg_len(struct hinic_recv_msg *recv_msg, * @pf_to_mgmt: PF to MGMT channel * @recv_msg: received message details * @param: customized parameter - **/ + */ static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, struct hinic_recv_msg *recv_msg, void *param) @@ -589,7 +601,7 @@ static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, * @param: customized parameter * Return: 0 when aeq is response message, -1 default result, * and when wrong message or not last message - **/ + */ static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, u8 *header, struct hinic_recv_msg *recv_msg, void *param) @@ -609,7 +621,7 @@ static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, "func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x", hinic_global_func_id(pf_to_mgmt->hwdev), recv_msg->sed_id, seq_id, seq_len); - return HINIC_RECV_NEXT_AEQE; + return HINIC_ERROR; } dest_msg = (u8 *)recv_msg->msg + seq_id * HINIC_MSG_SEG_LEN; @@ -617,7 +629,7 @@ static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, memcpy(dest_msg, msg_body, seq_len); if (!HINIC_MSG_HEADER_GET(msg_header, LAST)) - return HINIC_RECV_NEXT_AEQE; + return HINIC_ERROR; recv_msg->cmd = HINIC_MSG_HEADER_GET(msg_header, CMD); recv_msg->mod = HINIC_MSG_HEADER_GET(msg_header, MODULE); @@ -627,11 +639,11 @@ static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, recv_msg->msg_id = HINIC_MSG_HEADER_GET(msg_header, MSG_ID); if (HINIC_MSG_HEADER_GET(msg_header, DIRECTION) == HINIC_MSG_RESPONSE) - return HINIC_RECV_DONE; + return HINIC_OK; hinic_mgmt_recv_msg_handler(pf_to_mgmt, recv_msg, param); - return HINIC_RECV_NEXT_AEQE; + return HINIC_ERROR; } /** @@ -642,7 +654,7 @@ static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, * @param: customized parameter * Return: 0 when aeq is response message, * -1 default result, and when wrong message or not last message - **/ + */ static int hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, __rte_unused u8 size, void *param) { @@ -667,10 +679,13 @@ static int hinic_handle_aeqe(void *handle, enum hinic_aeq_type event, case HINIC_MSG_FROM_MGMT_CPU: rc = hinic_mgmt_msg_aeqe_handler(handle, data, size, param); break; + case HINIC_MBX_FROM_FUNC: + rc = hinic_mbox_func_aeqe_handler(handle, data, size, param); + break; default: PMD_DRV_LOG(ERR, "Unknown event type: 0x%x, size: %d", event, size); - rc = HINIC_RECV_NEXT_AEQE; + rc = HINIC_ERROR; break; } @@ -685,7 +700,7 @@ static int hinic_handle_aeqe(void *handle, enum hinic_aeq_type event, * used in polling mode. * @param: customized parameter * Return: 0 - Success, EIO - poll timeout, ENODEV - swe not support - **/ + */ int hinic_aeq_poll_msg(struct hinic_eq *eq, u32 timeout, void *param) { struct hinic_aeq_elem *aeqe_pos; @@ -753,6 +768,10 @@ int hinic_comm_pf_to_mgmt_init(struct hinic_hwdev *hwdev) { int rc; + /* VF do not support send msg to mgmt directly */ + if (hinic_func_type(hwdev) == TYPE_VF) + return 0; + rc = hinic_pf_to_mgmt_init(hwdev); if (rc) return rc; @@ -764,6 +783,10 @@ int hinic_comm_pf_to_mgmt_init(struct hinic_hwdev *hwdev) void hinic_comm_pf_to_mgmt_free(struct hinic_hwdev *hwdev) { + /* VF do not support send msg to mgmt directly */ + if (hinic_func_type(hwdev) == TYPE_VF) + return; + hinic_pf_to_mgmt_free(hwdev); } diff --git a/drivers/net/hinic/base/hinic_pmd_mgmt.h b/drivers/net/hinic/base/hinic_pmd_mgmt.h index 7804708..cc18843 100644 --- a/drivers/net/hinic/base/hinic_pmd_mgmt.h +++ b/drivers/net/hinic/base/hinic_pmd_mgmt.h @@ -82,7 +82,7 @@ struct hinic_msg_pf_to_mgmt { struct hinic_hwdev *hwdev; /* mutex for sync message */ - pthread_mutex_t sync_msg_lock; + pthread_mutex_t sync_msg_mutex; void *async_msg_buf; void *sync_msg_buf; diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.c b/drivers/net/hinic/base/hinic_pmd_niccfg.c index 78012b8..d3bbc3d 100644 --- a/drivers/net/hinic/base/hinic_pmd_niccfg.c +++ b/drivers/net/hinic/base/hinic_pmd_niccfg.c @@ -10,6 +10,7 @@ #include "hinic_pmd_mgmt.h" #include "hinic_pmd_cmdq.h" #include "hinic_pmd_niccfg.h" +#include "hinic_pmd_mbox.h" #define l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, \ in_size, buf_out, out_size) \ @@ -17,6 +18,18 @@ buf_in, in_size, \ buf_out, out_size, 0) +/** + * hinic_init_function_table - Initialize function table. + * + * @param hwdev + * The hardware interface of a nic device. + * @param rx_buf_sz + * Receive buffer size. + * + * @return + * 0 on success. + * negative error value otherwise. + */ int hinic_init_function_table(void *hwdev, u16 rx_buf_sz) { struct hinic_function_table function_table; @@ -40,8 +53,8 @@ int hinic_init_function_table(void *hwdev, u16 rx_buf_sz) &function_table, &out_size, 0); if (err || function_table.mgmt_msg_head.status || !out_size) { PMD_DRV_LOG(ERR, - "Failed to init func table, ret = %d", - function_table.mgmt_msg_head.status); + "Failed to init func table, err: %d, status: 0x%x, out size: 0x%x", + err, function_table.mgmt_msg_head.status, out_size); return -EFAULT; } @@ -49,13 +62,17 @@ int hinic_init_function_table(void *hwdev, u16 rx_buf_sz) } /** - * hinic_get_base_qpn - get global number of queue - * @hwdev: the hardware interface of a nic device - * @global_qpn: vat page size + * hinic_get_base_qpn - Get global queue number. + * + * @param hwdev + * The hardware interface of a nic device. + * @param global_qpn + * Global queue number. + * * @return - * 0 on success, + * 0 on success. * negative error value otherwise. - **/ + */ int hinic_get_base_qpn(void *hwdev, u16 *global_qpn) { struct hinic_cmd_qpn cmd_qpn; @@ -77,8 +94,8 @@ int hinic_get_base_qpn(void *hwdev, u16 *global_qpn) &out_size, 0); if (err || !out_size || cmd_qpn.mgmt_msg_head.status) { PMD_DRV_LOG(ERR, - "Failed to get base qpn, status(%d)", - cmd_qpn.mgmt_msg_head.status); + "Failed to get base qpn, err: %d, status: 0x%x, out size: 0x%x", + err, cmd_qpn.mgmt_msg_head.status, out_size); return -EINVAL; } @@ -89,12 +106,18 @@ int hinic_get_base_qpn(void *hwdev, u16 *global_qpn) /** * hinic_set_mac - Init mac_vlan table in NIC. - * @hwdev: the hardware interface of a nic device - * @mac_addr: mac address - * @vlan_id: set 0 for mac_vlan table initialization - * @func_id: global function id of NIC + * + * @param hwdev + * The hardware interface of a nic device. + * @param mac_addr + * MAC address. + * @param vlan_id + * Set 0 for mac_vlan table initialization. + * @param func_id + * Global function id of NIC. + * * @return - * 0 on success and stats is filled, + * 0 on success. * negative error value otherwise. */ int hinic_set_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id) @@ -122,6 +145,7 @@ int hinic_set_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id) err, mac_info.mgmt_msg_head.status, out_size); return -EINVAL; } + if (mac_info.mgmt_msg_head.status == HINIC_PF_SET_VF_ALREADY) { PMD_DRV_LOG(WARNING, "PF has already set vf mac, Ignore set operation."); return HINIC_PF_SET_VF_ALREADY; @@ -132,16 +156,21 @@ int hinic_set_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id) /** * hinic_del_mac - Uninit mac_vlan table in NIC. - * @hwdev: the hardware interface of a nic device - * @mac_addr: mac address - * @vlan_id: set 0 for mac_vlan table initialization - * @func_id: global function id of NIC + * + * @param hwdev + * The hardware interface of a nic device. + * @param mac_addr + * MAC address. + * @param vlan_id + * Set 0 for mac_vlan table initialization. + * @param func_id + * Global function id of NIC. + * * @return - * 0 on success and stats is filled, + * 0 on success. * negative error value otherwise. */ -int hinic_del_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, - u16 func_id) +int hinic_del_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id) { struct hinic_port_mac_set mac_info; u16 out_size = sizeof(mac_info); @@ -179,6 +208,18 @@ int hinic_del_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, return 0; } +/** + * hinic_get_default_mac - Get default mac address from hardware. + * + * @param hwdev + * The hardware interface of a nic device. + * @param mac_addr + * MAC address. + * + * @return + * 0 on success. + * negative error value otherwise. + */ int hinic_get_default_mac(void *hwdev, u8 *mac_addr) { struct hinic_port_mac_set mac_info; @@ -208,6 +249,18 @@ int hinic_get_default_mac(void *hwdev, u8 *mac_addr) return 0; } +/** + * hinic_set_port_mtu - Set MTU to port. + * + * @param hwdev + * The hardware interface of a nic device. + * @param new_mtu + * MTU size. + * + * @return + * 0 on success. + * negative error value otherwise. + */ int hinic_set_port_mtu(void *hwdev, u32 new_mtu) { struct hinic_mtu mtu_info; @@ -236,6 +289,18 @@ int hinic_set_port_mtu(void *hwdev, u32 new_mtu) return 0; } +/** + * hinic_get_link_status - Get link status from hardware. + * + * @param hwdev + * The hardware interface of a nic device. + * @param link_state + * Link status. + * + * @return + * 0 on success. + * negative error value otherwise. + */ int hinic_get_link_status(void *hwdev, u8 *link_state) { struct hinic_get_link get_link; @@ -267,10 +332,16 @@ int hinic_get_link_status(void *hwdev, u8 *link_state) /** * hinic_set_vport_enable - Notify firmware that driver is ready or not. - * @hwdev: the hardware interface of a nic device - * @enable: 1: driver is ready; 0: driver is not ok. - * Return: 0 on success and state is filled, negative error value otherwise. - **/ + * + * @param hwdev + * The hardware interface of a nic device. + * @param enable + * 1: driver is ready; 0: driver is not ok. + * + * @return + * 0 on success. + * negative error value otherwise. + */ int hinic_set_vport_enable(void *hwdev, bool enable) { struct hinic_vport_state en_state; @@ -300,11 +371,15 @@ int hinic_set_vport_enable(void *hwdev, bool enable) } /** - * hinic_set_port_enable - open MAG to receive packets. - * @hwdev: the hardware interface of a nic device - * @enable: 1: open MAG; 0: close MAG. + * hinic_set_port_enable - Open MAG to receive packets. + * + * @param hwdev + * The hardware interface of a nic device. + * @param enable + * 1: open MAG; 0: close MAG. + * * @return - * 0 on success and stats is filled, + * 0 on success. * negative error value otherwise. */ int hinic_set_port_enable(void *hwdev, bool enable) @@ -318,6 +393,9 @@ int hinic_set_port_enable(void *hwdev, bool enable) return -EINVAL; } + if (HINIC_IS_VF((struct hinic_hwdev *)hwdev)) + return 0; + memset(&en_state, 0, sizeof(en_state)); en_state.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; en_state.state = (enable ? HINIC_PORT_ENABLE : HINIC_PORT_DISABLE); @@ -432,7 +510,7 @@ int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, memset(&ets, 0, sizeof(ets)); ets.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; - ets.port_id = 0; /* reserved */ + ets.port_id = 0; /* reserved */ memcpy(ets.up_tc, up_tc, HINIC_DCB_TC_MAX); memcpy(ets.pg_bw, pg_bw, HINIC_DCB_UP_MAX); memcpy(ets.pgid, pgid, HINIC_DCB_UP_MAX); @@ -672,12 +750,17 @@ int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp) } /** - * hinic_rss_set_hash_engine - Init rss hash function . - * @hwdev: the hardware interface of a nic device - * @tmpl_idx: index of rss template from NIC. - * @type: hash function, such as Toeplitz or XOR. + * hinic_rss_set_hash_engine - Init rss hash function. + * + * @param hwdev + * The hardware interface of a nic device. + * @param tmpl_idx + * Index of rss template from NIC. + * @param type + * Hash function, such as Toeplitz or XOR. + * * @return - * 0 on success and stats is filled, + * 0 on success. * negative error value otherwise. */ int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type) @@ -845,12 +928,18 @@ int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc) } /** - * hinic_rss_template_alloc - get rss template id from the chip, - * all functions share 96 templates. - * @hwdev: the pointer to the private hardware device object - * @tmpl_idx: index of rss template from chip. - * Return: 0 on success and stats is filled, negative error value otherwise. - **/ + * hinic_rss_template_alloc - Get rss template id from the chip, + * all functions share 96 templates. + * + * @param hwdev + * The hardware interface of a nic device. + * @param tmpl_idx + * Index of rss template from chip. + * + * @return + * 0 on success. + * negative error value otherwise. + */ int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx) { struct hinic_rss_template_mgmt template_mgmt; @@ -882,11 +971,17 @@ int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx) } /** - * hinic_rss_template_alloc - free rss template id to the chip - * @hwdev: the hardware interface of a nic device - * @tmpl_idx: index of rss template from NIC. - * Return: 0 on success and stats is filled, negative error value otherwise. - **/ + * hinic_rss_template_free - Free rss template id to the chip. + * + * @param hwdev + * The hardware interface of a nic device. + * @param tmpl_idx + * Index of rss template from chip. + * + * @return + * 0 on success. + * negative error value otherwise. + */ int hinic_rss_template_free(void *hwdev, u8 tmpl_idx) { struct hinic_rss_template_mgmt template_mgmt; @@ -917,12 +1012,17 @@ int hinic_rss_template_free(void *hwdev, u8 tmpl_idx) } /** - * hinic_set_rx_vhd_mode - change rx buffer size after initialization, - * @hwdev: the hardware interface of a nic device - * @mode: not needed. - * @rx_buf_sz: receive buffer size. + * hinic_set_rx_vhd_mode - Change rx buffer size after initialization. + * + * @param hwdev + * The hardware interface of a nic device. + * @param vhd_mode + * Not needed. + * @param rx_buf_sz + * receive buffer size. + * * @return - * 0 on success and stats is filled, + * 0 on success. * negative error value otherwise. */ int hinic_set_rx_vhd_mode(void *hwdev, u16 vhd_mode, u16 rx_buf_sz) @@ -1100,6 +1200,54 @@ int hinic_reset_port_link_cfg(void *hwdev) return 0; } +/** + * hinic_vf_func_init - Register VF to PF. + * + * @param hwdev + * The hardware interface of a nic device. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_vf_func_init(struct hinic_hwdev *hwdev) +{ + int err, state = 0; + + if (!HINIC_IS_VF(hwdev)) + return 0; + + err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_VF_REGISTER, &state, sizeof(state), + NULL, NULL, 0); + if (err) { + PMD_DRV_LOG(ERR, "Fail to register vf"); + return err; + } + + return 0; +} + +/** + * hinic_vf_func_free - Unregister VF from PF. + * + * @param hwdev + * The hardware interface of a nic device. + */ +void hinic_vf_func_free(struct hinic_hwdev *hwdev) +{ + int err; + + if (hinic_func_type(hwdev) != TYPE_VF) + return; + + err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_VF_UNREGISTER, &err, sizeof(err), + NULL, NULL, 0); + if (err) + PMD_DRV_LOG(ERR, "Fail to unregister VF, err: %d", err); +} + int hinic_set_fast_recycle_mode(void *hwdev, u8 mode) { struct hinic_fast_recycled_mode fast_recycled_mode; @@ -1199,6 +1347,9 @@ int hinic_set_link_status_follow(void *hwdev, if (!hwdev) return -EINVAL; + if (HINIC_IS_VF((struct hinic_hwdev *)hwdev)) + return 0; + if (status >= HINIC_LINK_FOLLOW_STATUS_MAX) { PMD_DRV_LOG(ERR, "Invalid link follow status: %d", status); @@ -1254,11 +1405,16 @@ int hinic_get_link_mode(void *hwdev, u32 *supported, u32 *advertised) } /** - * hinic_flush_qp_res - Flush tx && rx chip resources in case of set vport fake - * failed when device start. - * @hwdev: the hardware interface of a nic device - * Return: 0 on success, negative error value otherwise. - **/ + * hinic_flush_qp_res - Flush tx && rx chip resources in case of set vport + * fake failed when device start. + * + * @param hwdev + * The hardware interface of a nic device. + * + * @return + * 0 on success. + * negative error value otherwise. + */ int hinic_flush_qp_res(void *hwdev) { struct hinic_clear_qp_resource qp_res; diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.h b/drivers/net/hinic/base/hinic_pmd_niccfg.h index 174b40f..dcb9602 100644 --- a/drivers/net/hinic/base/hinic_pmd_niccfg.h +++ b/drivers/net/hinic/base/hinic_pmd_niccfg.h @@ -655,4 +655,8 @@ int hinic_set_link_status_follow(void *hwdev, int hinic_clear_phy_port_stats(struct hinic_hwdev *hwdev); +int hinic_vf_func_init(struct hinic_hwdev *hwdev); + +void hinic_vf_func_free(struct hinic_hwdev *hwdev); + #endif /* _HINIC_PMD_NICCFG_H_ */ diff --git a/drivers/net/hinic/base/hinic_pmd_nicio.c b/drivers/net/hinic/base/hinic_pmd_nicio.c index 248211f..f6cc033 100644 --- a/drivers/net/hinic/base/hinic_pmd_nicio.c +++ b/drivers/net/hinic/base/hinic_pmd_nicio.c @@ -442,7 +442,7 @@ static int clean_qp_offload_ctxt(struct hinic_nic_io *nic_io) * @rx_buf_sz: receive buffer size * @return * hw rx buffer size - **/ + */ static u16 get_hw_rx_buf_size(u32 rx_buf_sz) { u16 num_hw_types = sizeof(hinic_hw_rx_buf_size) @@ -466,7 +466,7 @@ static u16 get_hw_rx_buf_size(u32 rx_buf_sz) * @sq_depth: the depth of transmit queue * @rx_buf_sz: receive buffer size from app * Return: 0 on success, negative error value otherwise. - **/ + */ static int hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz) { @@ -495,7 +495,7 @@ static u16 get_hw_rx_buf_size(u32 rx_buf_sz) * @return * 0 on success, * negative error value otherwise. - **/ + */ static int hinic_clean_root_ctxt(void *hwdev) { struct hinic_root_ctxt root_ctxt; @@ -618,6 +618,12 @@ static int hinic_init_nic_hwdev(struct hinic_hwdev *hwdev) goto err_init_nic_hwdev; } + err = hinic_vf_func_init(hwdev); + if (err) { + PMD_DRV_LOG(ERR, "Failed to init nic mbox"); + goto err_init_nic_hwdev; + } + err = hinic_set_fast_recycle_mode(hwdev, RECYCLE_MODE_DPDK); if (err) { PMD_DRV_LOG(ERR, "Failed to set fast recycle mode"); @@ -632,6 +638,7 @@ static int hinic_init_nic_hwdev(struct hinic_hwdev *hwdev) static void hinic_free_nic_hwdev(struct hinic_hwdev *hwdev) { + hinic_vf_func_free(hwdev); hwdev->nic_io = NULL; } @@ -861,7 +868,7 @@ void hinic_deinit_nicio(struct hinic_hwdev *hwdev) * @return * 0 on success, * negative error value otherwise. - **/ + */ int hinic_convert_rx_buf_size(u32 rx_buf_sz, u32 *match_sz) { u32 i, num_hw_types, best_match_sz; diff --git a/drivers/net/hinic/base/hinic_pmd_nicio.h b/drivers/net/hinic/base/hinic_pmd_nicio.h index 487e440..9a487d0 100644 --- a/drivers/net/hinic/base/hinic_pmd_nicio.h +++ b/drivers/net/hinic/base/hinic_pmd_nicio.h @@ -217,7 +217,7 @@ struct hinic_nic_io { u16 vhd_mode; struct hinic_qp *qps; - /* sq ci mem base addr of the function*/ + /* sq ci mem base addr of the function */ void *ci_vaddr_base; dma_addr_t ci_dma_base; @@ -229,7 +229,6 @@ struct hinic_sq_db { u32 db_info; }; - int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev); void hinic_free_qp_ctxts(struct hinic_hwdev *hwdev); From patchwork Mon Sep 30 14:00:41 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60190 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 939481B948; Mon, 30 Sep 2019 15:46:59 +0200 (CEST) Received: from huawei.com (szxga07-in.huawei.com [45.249.212.35]) by dpdk.org (Postfix) with ESMTP id 1C22A5B32 for ; Mon, 30 Sep 2019 15:46:58 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id B9683FCDCF90B4443ECB for ; Mon, 30 Sep 2019 21:46:56 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:46:47 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:41 +0800 Message-ID: X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 03/19] net/hinic: add VF PMD operation interfaces X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds VF PMD operation interfaces to support SR-IOV. Signed-off-by: Xiaoyun wang --- drivers/net/hinic/base/hinic_pmd_niccfg.c | 35 ++++ drivers/net/hinic/base/hinic_pmd_niccfg.h | 14 ++ drivers/net/hinic/hinic_pmd_ethdev.c | 255 ++++++++++++++++++++---------- 3 files changed, 223 insertions(+), 81 deletions(-) diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.c b/drivers/net/hinic/base/hinic_pmd_niccfg.c index d3bbc3d..0a20ade 100644 --- a/drivers/net/hinic/base/hinic_pmd_niccfg.c +++ b/drivers/net/hinic/base/hinic_pmd_niccfg.c @@ -1436,3 +1436,38 @@ int hinic_flush_qp_res(void *hwdev) return 0; } + +/** + * hinic_vf_get_default_cos - Get default cos of VF. + * + * @param hwdev + * The hardware interface of a nic device. + * @param cos_id + * Cos value. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_vf_get_default_cos(struct hinic_hwdev *hwdev, u8 *cos_id) +{ + struct hinic_vf_default_cos vf_cos; + u16 out_size = sizeof(vf_cos); + int err; + + memset(&vf_cos, 0, sizeof(vf_cos)); + vf_cos.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_GET_VF_COS, &vf_cos, + sizeof(vf_cos), &vf_cos, + &out_size, 0); + if (err || !out_size || vf_cos.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Get VF default cos failed, err: %d, status: 0x%x, out size: 0x%x", + err, vf_cos.mgmt_msg_head.status, out_size); + return -EFAULT; + } + *cos_id = vf_cos.state.default_cos; + + return 0; +} diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.h b/drivers/net/hinic/base/hinic_pmd_niccfg.h index dcb9602..94371bb 100644 --- a/drivers/net/hinic/base/hinic_pmd_niccfg.h +++ b/drivers/net/hinic/base/hinic_pmd_niccfg.h @@ -562,6 +562,18 @@ struct hinic_clear_qp_resource { u16 rsvd1; }; +struct hinic_dcb_state { + u8 dcb_on; + u8 default_cos; + u8 up_cos[8]; +}; + +struct hinic_vf_default_cos { + struct hinic_mgmt_msg_head mgmt_msg_head; + + struct hinic_dcb_state state; +}; + /* set physical port Anti-Attack rate */ struct hinic_port_anti_attack_rate { struct hinic_mgmt_msg_head mgmt_msg_head; @@ -659,4 +671,6 @@ int hinic_set_link_status_follow(void *hwdev, void hinic_vf_func_free(struct hinic_hwdev *hwdev); +int hinic_vf_get_default_cos(struct hinic_hwdev *hwdev, u8 *cos_id); + #endif /* _HINIC_PMD_NICCFG_H_ */ diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c index c9a400e..2f413e3 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.c +++ b/drivers/net/hinic/hinic_pmd_ethdev.c @@ -20,6 +20,7 @@ #include "base/hinic_pmd_cmdq.h" #include "base/hinic_pmd_niccfg.h" #include "base/hinic_pmd_nicio.h" +#include "base/hinic_pmd_mbox.h" #include "hinic_pmd_ethdev.h" #include "hinic_pmd_tx.h" #include "hinic_pmd_rx.h" @@ -29,13 +30,21 @@ /* Hinic devices */ #define HINIC_DEV_ID_PRD 0x1822 +#define HINIC_DEV_ID_VF 0x375E +#define HINIC_DEV_ID_VF_HV 0x379E + +/* Mezz card for Blade Server */ #define HINIC_DEV_ID_MEZZ_25GE 0x0210 #define HINIC_DEV_ID_MEZZ_40GE 0x020D #define HINIC_DEV_ID_MEZZ_100GE 0x0205 +/* 2*25G and 2*100G card */ +#define HINIC_DEV_ID_1822_DUAL_25GE 0x0206 +#define HINIC_DEV_ID_1822_100GE 0x0200 + #define HINIC_SERVICE_MODE_NIC 2 -#define HINIC_INTR_CB_UNREG_MAX_RETRIES 10 +#define HINIC_INTR_CB_UNREG_MAX_RETRIES 10 #define DEFAULT_BASE_COS 4 #define NR_MAX_COS 8 @@ -43,7 +52,7 @@ #define HINIC_MIN_RX_BUF_SIZE 1024 #define HINIC_MAX_MAC_ADDRS 1 -/** Driver-specific log messages type. */ +/* Driver-specific log messages type */ int hinic_logtype; struct hinic_xstats_name_off { @@ -197,10 +206,16 @@ struct hinic_xstats_name_off { static int hinic_xstats_calc_num(struct hinic_nic_dev *nic_dev) { - return (HINIC_VPORT_XSTATS_NUM + - HINIC_PHYPORT_XSTATS_NUM + - HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq + - HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq); + if (HINIC_IS_VF(nic_dev->hwdev)) { + return (HINIC_VPORT_XSTATS_NUM + + HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq + + HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq); + } else { + return (HINIC_VPORT_XSTATS_NUM + + HINIC_PHYPORT_XSTATS_NUM + + HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq + + HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq); + } } static const struct rte_eth_desc_lim hinic_rx_desc_lim = { @@ -599,7 +614,7 @@ static void hinic_reset_tx_queue(struct rte_eth_dev *dev) txq->wq->delta = txq->q_depth; txq->wq->mask = txq->q_depth - 1; - /*clear hardware ci*/ + /* clear hardware ci */ ci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base, q_id); *ci_addr = 0; @@ -888,6 +903,9 @@ static int hinic_dev_start(struct rte_eth_dev *dev) goto cfg_rxtx_fail; } + /* reactive pf status, so that uP report asyn event */ + hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG); + /* open virtual port and ready to start packet receiving */ rc = hinic_set_vport_enable(nic_dev->hwdev, true); if (rc) { @@ -915,6 +933,8 @@ static int hinic_dev_start(struct rte_eth_dev *dev) (void)hinic_set_vport_enable(nic_dev->hwdev, false); en_vport_fail: + hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_INIT); + /* Flush tx && rx chip resources in case of set vport fake fail */ (void)hinic_flush_qp_res(nic_dev->hwdev); rte_delay_ms(100); @@ -1233,7 +1253,7 @@ static int hinic_dev_stats_reset(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device structure. - **/ + */ static int hinic_dev_xstats_reset(struct rte_eth_dev *dev) { struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); @@ -1697,6 +1717,9 @@ static int hinic_dev_xstats_get(struct rte_eth_dev *dev, count++; } + if (HINIC_IS_VF(nic_dev->hwdev)) + return count; + /* Get stats from hinic_phy_port_stats */ err = hinic_get_phy_port_stats(nic_dev->hwdev, &port_stats); if (err) @@ -1765,6 +1788,9 @@ static int hinic_dev_xstats_get_names(struct rte_eth_dev *dev, count++; } + if (HINIC_IS_VF(nic_dev->hwdev)) + return count; + /* get phy port stats */ for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) { snprintf(xstats_names[count].name, @@ -1805,18 +1831,39 @@ static int hinic_set_default_dcb_feature(struct hinic_nic_dev *nic_dev) up_pgid, up_bw, up_strict); } -static void hinic_init_default_cos(struct hinic_nic_dev *nic_dev) +static int hinic_init_default_cos(struct hinic_nic_dev *nic_dev) { - nic_dev->default_cos = - (hinic_global_func_id(nic_dev->hwdev) + - DEFAULT_BASE_COS) % NR_MAX_COS; + u8 cos_id = 0; + int err; + + if (!HINIC_IS_VF(nic_dev->hwdev)) { + nic_dev->default_cos = + (hinic_global_func_id(nic_dev->hwdev) + + DEFAULT_BASE_COS) % NR_MAX_COS; + } else { + err = hinic_vf_get_default_cos(nic_dev->hwdev, &cos_id); + if (err) { + PMD_DRV_LOG(ERR, "Get VF default cos failed, err: %d", + err); + return HINIC_ERROR; + } + + nic_dev->default_cos = cos_id; + } + + return 0; } static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev) { int err; - hinic_init_default_cos(nic_dev); + err = hinic_init_default_cos(nic_dev); + if (err) + return err; + + if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) + return 0; /* Restore DCB configure to default status */ err = hinic_set_default_dcb_feature(nic_dev); @@ -1852,6 +1899,9 @@ static int32_t hinic_card_workmode_check(struct hinic_nic_dev *nic_dev) struct hinic_board_info info = { 0 }; int rc; + if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) + return 0; + rc = hinic_get_board_info(nic_dev->hwdev, &info); if (rc) return rc; @@ -1867,7 +1917,7 @@ static int hinic_copy_mempool_init(struct hinic_nic_dev *nic_dev) nic_dev->cpy_mpool = rte_pktmbuf_pool_create(nic_dev->proc_dev_name, HINIC_COPY_MEMPOOL_DEPTH, - RTE_CACHE_LINE_SIZE, 0, + 0, 0, HINIC_COPY_MBUF_SIZE, rte_socket_id()); if (!nic_dev->cpy_mpool) { @@ -1978,6 +2028,14 @@ static int hinic_nic_dev_create(struct rte_eth_dev *eth_dev) goto init_pf_to_mgmt_fail; } + /* init mailbox */ + rc = hinic_comm_func_to_func_init(nic_dev->hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize func_to_func failed, dev_name: %s", + eth_dev->data->name); + goto init_func_to_func_fail; + } + rc = hinic_card_workmode_check(nic_dev); if (rc) { PMD_DRV_LOG(ERR, "Check card workmode failed, dev_name: %s", @@ -2077,6 +2135,9 @@ static int hinic_nic_dev_create(struct rte_eth_dev *eth_dev) init_cmdq_fail: l2nic_reset_fail: workmode_check_fail: + hinic_comm_func_to_func_free(nic_dev->hwdev); + +init_func_to_func_fail: hinic_comm_pf_to_mgmt_free(nic_dev->hwdev); init_pf_to_mgmt_fail: @@ -2110,6 +2171,7 @@ static void hinic_nic_dev_destroy(struct rte_eth_dev *eth_dev) hinic_deinit_nicio(nic_dev->hwdev); hinic_deactivate_hwdev_state(nic_dev->hwdev); hinic_comm_cmdqs_free(nic_dev->hwdev); + hinic_comm_func_to_func_free(nic_dev->hwdev); hinic_comm_pf_to_mgmt_free(nic_dev->hwdev); hinic_comm_aeqs_free(nic_dev->hwdev); free_cfg_mgmt(nic_dev->hwdev); @@ -2119,6 +2181,93 @@ static void hinic_nic_dev_destroy(struct rte_eth_dev *eth_dev) nic_dev->hwdev = NULL; } +/** + * DPDK callback to close the device. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void hinic_dev_close(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + if (hinic_test_and_set_bit(HINIC_DEV_CLOSE, &nic_dev->dev_status)) { + PMD_DRV_LOG(WARNING, "Device %s already closed", + dev->data->name); + return; + } + + /* stop device first */ + hinic_dev_stop(dev); + + /* rx_cqe, rx_info */ + hinic_free_all_rx_resources(dev); + + /* tx_info */ + hinic_free_all_tx_resources(dev); + + /* free wq, pi_dma_addr */ + hinic_free_all_rq(nic_dev); + + /* free wq, db_addr */ + hinic_free_all_sq(nic_dev); + + /* deinit mac vlan tbl */ + hinic_deinit_mac_addr(dev); + + /* disable hardware and uio interrupt */ + hinic_disable_interrupt(dev); + + /* deinit nic hardware device */ + hinic_nic_dev_destroy(dev); +} + +static const struct eth_dev_ops hinic_pmd_ops = { + .dev_configure = hinic_dev_configure, + .dev_infos_get = hinic_dev_infos_get, + .rx_queue_setup = hinic_rx_queue_setup, + .tx_queue_setup = hinic_tx_queue_setup, + .dev_start = hinic_dev_start, + .link_update = hinic_link_update, + .rx_queue_release = hinic_rx_queue_release, + .tx_queue_release = hinic_tx_queue_release, + .dev_stop = hinic_dev_stop, + .dev_close = hinic_dev_close, + .promiscuous_enable = hinic_dev_promiscuous_enable, + .promiscuous_disable = hinic_dev_promiscuous_disable, + .rss_hash_update = hinic_rss_hash_update, + .rss_hash_conf_get = hinic_rss_conf_get, + .reta_update = hinic_rss_indirtbl_update, + .reta_query = hinic_rss_indirtbl_query, + .stats_get = hinic_dev_stats_get, + .stats_reset = hinic_dev_stats_reset, + .xstats_get = hinic_dev_xstats_get, + .xstats_reset = hinic_dev_xstats_reset, + .xstats_get_names = hinic_dev_xstats_get_names, +}; + +static const struct eth_dev_ops hinic_pmd_vf_ops = { + .dev_configure = hinic_dev_configure, + .dev_infos_get = hinic_dev_infos_get, + .rx_queue_setup = hinic_rx_queue_setup, + .tx_queue_setup = hinic_tx_queue_setup, + .dev_start = hinic_dev_start, + .link_update = hinic_link_update, + .rx_queue_release = hinic_rx_queue_release, + .tx_queue_release = hinic_tx_queue_release, + .dev_stop = hinic_dev_stop, + .dev_close = hinic_dev_close, + .rss_hash_update = hinic_rss_hash_update, + .rss_hash_conf_get = hinic_rss_conf_get, + .reta_update = hinic_rss_indirtbl_update, + .reta_query = hinic_rss_indirtbl_query, + .stats_get = hinic_dev_stats_get, + .stats_reset = hinic_dev_stats_reset, + .xstats_get = hinic_dev_xstats_get, + .xstats_reset = hinic_dev_xstats_reset, + .xstats_get_names = hinic_dev_xstats_get_names, +}; + static int hinic_func_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; @@ -2173,6 +2322,11 @@ static int hinic_func_init(struct rte_eth_dev *eth_dev) goto create_nic_dev_fail; } + if (HINIC_IS_VF(nic_dev->hwdev)) + eth_dev->dev_ops = &hinic_pmd_vf_ops; + else + eth_dev->dev_ops = &hinic_pmd_ops; + rc = hinic_init_mac_addr(eth_dev); if (rc) { PMD_DRV_LOG(ERR, "Initialize mac table failed, dev_name: %s", @@ -2214,6 +2368,7 @@ static int hinic_func_init(struct rte_eth_dev *eth_dev) hinic_deinit_mac_addr(eth_dev); init_mac_fail: + eth_dev->dev_ops = NULL; hinic_nic_dev_destroy(eth_dev); create_nic_dev_fail: @@ -2226,71 +2381,6 @@ static int hinic_func_init(struct rte_eth_dev *eth_dev) return rc; } -/** - * DPDK callback to close the device. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void hinic_dev_close(struct rte_eth_dev *dev) -{ - struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); - - if (hinic_test_and_set_bit(HINIC_DEV_CLOSE, &nic_dev->dev_status)) { - PMD_DRV_LOG(WARNING, "Device %s already closed", - dev->data->name); - return; - } - - /* stop device first */ - hinic_dev_stop(dev); - - /* rx_cqe, rx_info */ - hinic_free_all_rx_resources(dev); - - /* tx_info */ - hinic_free_all_tx_resources(dev); - - /* free wq, pi_dma_addr */ - hinic_free_all_rq(nic_dev); - - /* free wq, db_addr */ - hinic_free_all_sq(nic_dev); - - /* deinit mac vlan tbl */ - hinic_deinit_mac_addr(dev); - - /* disable hardware and uio interrupt */ - hinic_disable_interrupt(dev); - - /* deinit nic hardware device */ - hinic_nic_dev_destroy(dev); -} - -static const struct eth_dev_ops hinic_pmd_ops = { - .dev_configure = hinic_dev_configure, - .dev_infos_get = hinic_dev_infos_get, - .rx_queue_setup = hinic_rx_queue_setup, - .tx_queue_setup = hinic_tx_queue_setup, - .dev_start = hinic_dev_start, - .link_update = hinic_link_update, - .rx_queue_release = hinic_rx_queue_release, - .tx_queue_release = hinic_tx_queue_release, - .dev_stop = hinic_dev_stop, - .dev_close = hinic_dev_close, - .promiscuous_enable = hinic_dev_promiscuous_enable, - .promiscuous_disable = hinic_dev_promiscuous_disable, - .rss_hash_update = hinic_rss_hash_update, - .rss_hash_conf_get = hinic_rss_conf_get, - .reta_update = hinic_rss_indirtbl_update, - .reta_query = hinic_rss_indirtbl_query, - .stats_get = hinic_dev_stats_get, - .stats_reset = hinic_dev_stats_reset, - .xstats_get = hinic_dev_xstats_get, - .xstats_reset = hinic_dev_xstats_reset, - .xstats_get_names = hinic_dev_xstats_get_names, -}; - static int hinic_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; @@ -2303,8 +2393,7 @@ static int hinic_dev_init(struct rte_eth_dev *eth_dev) (rte_eal_process_type() == RTE_PROC_PRIMARY) ? "primary" : "secondary"); - /* rte_eth_dev ops, rx_burst and tx_burst */ - eth_dev->dev_ops = &hinic_pmd_ops; + /* rte_eth_dev rx_burst and tx_burst */ eth_dev->rx_pkt_burst = hinic_recv_pkts; eth_dev->tx_pkt_burst = hinic_xmit_pkts; @@ -2338,6 +2427,10 @@ static int hinic_dev_uninit(struct rte_eth_dev *dev) { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_25GE) }, { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_40GE) }, { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_100GE) }, + { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF) }, + { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF_HV) }, + { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_DUAL_25GE) }, + { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_100GE) }, {.vendor_id = 0}, }; From patchwork Mon Sep 30 14:00:42 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60191 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id BB1604CE4; Mon, 30 Sep 2019 15:47:36 +0200 (CEST) Received: from huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id BE01829C6 for ; Mon, 30 Sep 2019 15:47:33 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 346FA367E37564FAA78C for ; Mon, 30 Sep 2019 21:47:32 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:47:17 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:42 +0800 Message-ID: <03934d80c2f2dacbf05ebab1377205d88e487316.1569850827.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 04/19] net/hinic: add VLAN filter and offload X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds support for VLAN filter and VLAN offload. Signed-off-by: Xiaoyun wang --- doc/guides/nics/features/hinic.ini | 2 + doc/guides/nics/hinic.rst | 1 + drivers/net/hinic/base/hinic_pmd_cmd.h | 1 + drivers/net/hinic/base/hinic_pmd_niccfg.c | 136 +++++++++++++++++++++ drivers/net/hinic/base/hinic_pmd_niccfg.h | 29 +++++ drivers/net/hinic/hinic_pmd_ethdev.c | 194 +++++++++++++++++++++++++++++- drivers/net/hinic/hinic_pmd_ethdev.h | 15 ++- 7 files changed, 372 insertions(+), 6 deletions(-) diff --git a/doc/guides/nics/features/hinic.ini b/doc/guides/nics/features/hinic.ini index c858411..8b09509 100644 --- a/doc/guides/nics/features/hinic.ini +++ b/doc/guides/nics/features/hinic.ini @@ -20,6 +20,8 @@ RSS key update = Y RSS reta update = Y Inner RSS = Y SR-IOV = Y +VLAN filter = Y +VLAN offload = Y CRC offload = Y L3 checksum offload = Y L4 checksum offload = Y diff --git a/doc/guides/nics/hinic.rst b/doc/guides/nics/hinic.rst index c3ce101..881075a 100644 --- a/doc/guides/nics/hinic.rst +++ b/doc/guides/nics/hinic.rst @@ -25,6 +25,7 @@ Features - Link flow control - Scattered and gather for TX and RX - SR-IOV - Partially supported at this point, VFIO only +- VLAN filter and VLAN offload Prerequisites ------------- diff --git a/drivers/net/hinic/base/hinic_pmd_cmd.h b/drivers/net/hinic/base/hinic_pmd_cmd.h index c8750b8..5c38b5f 100644 --- a/drivers/net/hinic/base/hinic_pmd_cmd.h +++ b/drivers/net/hinic/base/hinic_pmd_cmd.h @@ -140,6 +140,7 @@ enum hinic_port_cmd { HINIC_PORT_CMD_SET_VHD_CFG = 0xF7, HINIC_PORT_CMD_SET_LINK_FOLLOW = 0xF8, + HINIC_PORT_CMD_SET_VLAN_FILTER = 0xFF }; /* cmd of mgmt CPU message for HW module */ diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.c b/drivers/net/hinic/base/hinic_pmd_niccfg.c index 0a20ade..8bd7ed6 100644 --- a/drivers/net/hinic/base/hinic_pmd_niccfg.c +++ b/drivers/net/hinic/base/hinic_pmd_niccfg.c @@ -290,6 +290,142 @@ int hinic_set_port_mtu(void *hwdev, u32 new_mtu) } /** + * hinic_add_remove_vlan - Add or remove vlan id to vlan elb table. + * + * @param hwdev + * The hardware interface of a nic device. + * @param vlan_id + * Vlan id. + * @param func_id + * Global function id of NIC. + * @param add + * Add or remove operation. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_add_remove_vlan(void *hwdev, u16 vlan_id, u16 func_id, bool add) +{ + struct hinic_vlan_config vlan_info; + u16 out_size = sizeof(vlan_info); + u8 cmd; + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + cmd = add ? HINIC_PORT_CMD_ADD_VLAN : HINIC_PORT_CMD_DEL_VLAN; + + memset(&vlan_info, 0, sizeof(vlan_info)); + vlan_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + vlan_info.func_id = func_id; + vlan_info.vlan_id = vlan_id; + + err = l2nic_msg_to_mgmt_sync(hwdev, cmd, &vlan_info, + sizeof(vlan_info), &vlan_info, + &out_size); + if (err || !out_size || vlan_info.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to %s vlan, err: %d, status: 0x%x, out size: 0x%x\n", + add ? "add" : "remove", err, + vlan_info.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +/** + * hinic_config_vlan_filter - Enable or Disable vlan filter. + * + * @param hwdev + * The hardware interface of a nic device. + * @param vlan_filter_ctrl + * Enable or Disable. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_config_vlan_filter(void *hwdev, u32 vlan_filter_ctrl) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_vlan_filter vlan_filter; + u16 out_size = sizeof(vlan_filter); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&vlan_filter, 0, sizeof(vlan_filter)); + vlan_filter.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + vlan_filter.func_id = hinic_global_func_id(nic_hwdev); + vlan_filter.vlan_filter_ctrl = vlan_filter_ctrl; + + err = l2nic_msg_to_mgmt_sync(nic_hwdev, HINIC_PORT_CMD_SET_VLAN_FILTER, + &vlan_filter, sizeof(vlan_filter), + &vlan_filter, &out_size); + if (vlan_filter.mgmt_msg_head.status == HINIC_MGMT_CMD_UNSUPPORTED) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + } else if ((err == HINIC_MBOX_VF_CMD_ERROR) && + (HINIC_IS_VF(nic_hwdev))) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + } else if (err || !out_size || vlan_filter.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to config vlan filter, vlan_filter_ctrl: 0x%x, err: %d, status: 0x%x, out size: 0x%x\n", + vlan_filter_ctrl, err, + vlan_filter.mgmt_msg_head.status, out_size); + err = -EINVAL; + } + + return err; +} + +/** + * hinic_set_rx_vlan_offload - Enable or Disable vlan offload. + * + * @param hwdev + * The hardware interface of a nic device. + * @param en + * Enable or Disable. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_set_rx_vlan_offload(void *hwdev, u8 en) +{ + struct hinic_vlan_offload vlan_cfg; + u16 out_size = sizeof(vlan_cfg); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&vlan_cfg, 0, sizeof(vlan_cfg)); + vlan_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + vlan_cfg.func_id = hinic_global_func_id(hwdev); + vlan_cfg.vlan_rx_offload = en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, + &vlan_cfg, sizeof(vlan_cfg), + &vlan_cfg, &out_size); + if (err || !out_size || vlan_cfg.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, vlan_cfg.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +/** * hinic_get_link_status - Get link status from hardware. * * @param hwdev diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.h b/drivers/net/hinic/base/hinic_pmd_niccfg.h index 94371bb..d19a834 100644 --- a/drivers/net/hinic/base/hinic_pmd_niccfg.h +++ b/drivers/net/hinic/base/hinic_pmd_niccfg.h @@ -380,6 +380,29 @@ struct hinic_mtu { u32 mtu; }; +struct hinic_vlan_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 vlan_id; +}; + +struct hinic_vlan_filter { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 rsvd1[2]; + u32 vlan_filter_ctrl; +}; + +struct hinic_vlan_offload { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 vlan_rx_offload; + u8 rsvd1[5]; +}; + struct hinic_get_link { struct hinic_mgmt_msg_head mgmt_msg_head; @@ -597,6 +620,12 @@ int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, int hinic_set_port_mtu(void *hwdev, u32 new_mtu); +int hinic_add_remove_vlan(void *hwdev, u16 vlan_id, u16 func_id, bool add); + +int hinic_config_vlan_filter(void *hwdev, u32 vlan_filter_ctrl); + +int hinic_set_rx_vlan_offload(void *hwdev, u8 en); + int hinic_set_vport_enable(void *hwdev, bool enable); int hinic_set_port_enable(void *hwdev, bool enable); diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c index 2f413e3..4e2a69c 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.c +++ b/drivers/net/hinic/hinic_pmd_ethdev.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "base/hinic_compat.h" #include "base/hinic_pmd_hwdev.h" @@ -52,6 +53,17 @@ #define HINIC_MIN_RX_BUF_SIZE 1024 #define HINIC_MAX_MAC_ADDRS 1 +/* + * vlan_id is a 12 bit number. + * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. + * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element. + * The higher 7 bit val specifies VFTA array index. + */ +#define HINIC_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F)) +#define HINIC_VFTA_IDX(vlan_id) ((vlan_id) >> 5) + +#define HINIC_VLAN_FILTER_EN (1U << 0) + /* Driver-specific log messages type */ int hinic_logtype; @@ -230,6 +242,7 @@ static int hinic_xstats_calc_num(struct hinic_nic_dev *nic_dev) .nb_align = HINIC_TXD_ALIGN, }; +static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask); /** * Interrupt handler triggered by NIC for handling @@ -313,6 +326,15 @@ static int hinic_dev_configure(struct rte_eth_dev *dev) return err; } + /* init vlan offoad */ + err = hinic_vlan_offload_set(dev, + ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK); + if (err) { + PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed\n"); + (void)hinic_config_mq_mode(dev, FALSE); + return err; + } + return HINIC_OK; } @@ -695,7 +717,8 @@ static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa) info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_VLAN_FILTER; info->tx_queue_offload_capa = 0; info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | @@ -1353,6 +1376,170 @@ static void hinic_deinit_mac_addr(struct rte_eth_dev *eth_dev) eth_dev->data->name); } +static void hinic_store_vlan_filter(struct hinic_nic_dev *nic_dev, + u16 vlan_id, bool on) +{ + u32 vid_idx, vid_bit; + + vid_idx = HINIC_VFTA_IDX(vlan_id); + vid_bit = HINIC_VFTA_BIT(vlan_id); + + if (on) + nic_dev->vfta[vid_idx] |= vid_bit; + else + nic_dev->vfta[vid_idx] &= ~vid_bit; +} + +static bool hinic_find_vlan_filter(struct hinic_nic_dev *nic_dev, + uint16_t vlan_id) +{ + u32 vid_idx, vid_bit; + + vid_idx = HINIC_VFTA_IDX(vlan_id); + vid_bit = HINIC_VFTA_BIT(vlan_id); + + return (nic_dev->vfta[vid_idx] & vid_bit) ? TRUE : FALSE; +} + +/** + * DPDK callback to set vlan filter. + * + * @param dev + * Pointer to Ethernet device structure. + * @param vlan_id + * vlan id is used to filter vlan packets + * @param enable + * enable disable or enable vlan filter function + */ +static int hinic_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int enable) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + int err = 0; + u16 func_id; + + if (vlan_id > RTE_ETHER_MAX_VLAN_ID) + return -EINVAL; + + func_id = hinic_global_func_id(nic_dev->hwdev); + + if (enable) { + /* If vlanid is already set, just return */ + if (hinic_find_vlan_filter(nic_dev, vlan_id)) { + PMD_DRV_LOG(INFO, "Vlan %u has been added, device: %s", + vlan_id, nic_dev->proc_dev_name); + return 0; + } + + err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id, + func_id, TRUE); + } else { + /* If vlanid can't be found, just return */ + if (!hinic_find_vlan_filter(nic_dev, vlan_id)) { + PMD_DRV_LOG(INFO, "Vlan %u is not in the vlan filter list, device: %s", + vlan_id, nic_dev->proc_dev_name); + return 0; + } + + err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id, + func_id, FALSE); + } + + if (err) { + PMD_DRV_LOG(ERR, "%s vlan failed, func_id: %d, vlan_id: %d, err: %d", + enable ? "Add" : "Remove", func_id, vlan_id, err); + return err; + } + + hinic_store_vlan_filter(nic_dev, vlan_id, enable); + + PMD_DRV_LOG(INFO, "%s vlan %u succeed, device: %s", + enable ? "Add" : "Remove", vlan_id, nic_dev->proc_dev_name); + return 0; +} + +/** + * DPDK callback to enable or disable vlan offload. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mask + * Definitions used for VLAN setting + */ +static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + bool on; + int err; + + /* Enable or disable VLAN filter */ + if (mask & ETH_VLAN_FILTER_MASK) { + on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ? + TRUE : FALSE; + err = hinic_config_vlan_filter(nic_dev->hwdev, on); + if (err == HINIC_MGMT_CMD_UNSUPPORTED) { + PMD_DRV_LOG(WARNING, + "Current matching version does not support vlan filter configuration, device: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + } else if (err) { + PMD_DRV_LOG(ERR, "Failed to %s vlan filter, device: %s, port_id: %d, err: %d", + on ? "enable" : "disable", + nic_dev->proc_dev_name, + dev->data->port_id, err); + return err; + } + + PMD_DRV_LOG(INFO, "%s vlan filter succeed, device: %s, port_id: %d", + on ? "Enable" : "Disable", + nic_dev->proc_dev_name, dev->data->port_id); + } + + /* Enable or disable VLAN stripping */ + if (mask & ETH_VLAN_STRIP_MASK) { + on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ? + TRUE : FALSE; + err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on); + if (err) { + PMD_DRV_LOG(ERR, "Failed to %s vlan strip, device: %s, port_id: %d, err: %d", + on ? "enable" : "disable", + nic_dev->proc_dev_name, + dev->data->port_id, err); + return err; + } + + PMD_DRV_LOG(INFO, "%s vlan strip succeed, device: %s, port_id: %d", + on ? "Enable" : "Disable", + nic_dev->proc_dev_name, dev->data->port_id); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + PMD_DRV_LOG(ERR, "Don't support vlan qinq, device: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + return -ENOTSUP; + } + + return 0; +} + +static void hinic_remove_all_vlanid(struct rte_eth_dev *eth_dev) +{ + struct hinic_nic_dev *nic_dev = + HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); + u16 func_id; + int i; + + func_id = hinic_global_func_id(nic_dev->hwdev); + for (i = 0; i <= RTE_ETHER_MAX_VLAN_ID; i++) { + /* If can't find it, continue */ + if (!hinic_find_vlan_filter(nic_dev, i)) + continue; + + (void)hinic_add_remove_vlan(nic_dev->hwdev, i, func_id, FALSE); + hinic_store_vlan_filter(nic_dev, i, false); + } +} + /** * DPDK callback to enable promiscuous mode. * @@ -2214,6 +2401,7 @@ static void hinic_dev_close(struct rte_eth_dev *dev) /* deinit mac vlan tbl */ hinic_deinit_mac_addr(dev); + hinic_remove_all_vlanid(dev); /* disable hardware and uio interrupt */ hinic_disable_interrupt(dev); @@ -2233,6 +2421,8 @@ static void hinic_dev_close(struct rte_eth_dev *dev) .tx_queue_release = hinic_tx_queue_release, .dev_stop = hinic_dev_stop, .dev_close = hinic_dev_close, + .vlan_filter_set = hinic_vlan_filter_set, + .vlan_offload_set = hinic_vlan_offload_set, .promiscuous_enable = hinic_dev_promiscuous_enable, .promiscuous_disable = hinic_dev_promiscuous_disable, .rss_hash_update = hinic_rss_hash_update, @@ -2257,6 +2447,8 @@ static void hinic_dev_close(struct rte_eth_dev *dev) .tx_queue_release = hinic_tx_queue_release, .dev_stop = hinic_dev_stop, .dev_close = hinic_dev_close, + .vlan_filter_set = hinic_vlan_filter_set, + .vlan_offload_set = hinic_vlan_offload_set, .rss_hash_update = hinic_rss_hash_update, .rss_hash_conf_get = hinic_rss_conf_get, .reta_update = hinic_rss_indirtbl_update, diff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h index 4aeddc2..66eaf20 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.h +++ b/drivers/net/hinic/hinic_pmd_ethdev.h @@ -11,12 +11,12 @@ #include "base/hinic_compat.h" #include "base/hinic_pmd_cfg.h" -#define HINIC_DEV_NAME_LEN (32) -#define HINIC_MAX_RX_QUEUES (64) +#define HINIC_DEV_NAME_LEN 32 +#define HINIC_MAX_RX_QUEUES 64 /* mbuf pool for copy invalid mbuf segs */ -#define HINIC_COPY_MEMPOOL_DEPTH (128) -#define HINIC_COPY_MBUF_SIZE (4096) +#define HINIC_COPY_MEMPOOL_DEPTH 128 +#define HINIC_COPY_MBUF_SIZE 4096 #define SIZE_8BYTES(size) (ALIGN((u32)(size), 8) >> 3) @@ -31,6 +31,9 @@ #define HINIC_TXD_ALIGN 1 #define HINIC_RXD_ALIGN 1 +#define HINIC_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t)) +#define HINIC_VFTA_SIZE (4096 / HINIC_UINT32_BIT_SIZE) + enum hinic_dev_status { HINIC_DEV_INIT, HINIC_DEV_CLOSE, @@ -54,10 +57,12 @@ struct hinic_nic_dev { u8 num_rss; u8 rx_queue_list[HINIC_MAX_RX_QUEUES]; + u32 vfta[HINIC_VFTA_SIZE]; /* VLAN bitmap */ + /* info */ unsigned int flags; struct nic_service_cap nic_cap; - u32 rx_mode_status; /* promisc allmulticast */ + u32 rx_mode_status; /* promisc or allmulticast */ unsigned long dev_status; /* dpdk only */ From patchwork Mon Sep 30 14:00:43 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60192 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 180747CBC; Mon, 30 Sep 2019 15:47:50 +0200 (CEST) Received: from huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id 08D191B948 for ; Mon, 30 Sep 2019 15:47:49 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 69091256D268B8180CC3 for ; Mon, 30 Sep 2019 21:47:47 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:47:40 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:43 +0800 Message-ID: X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 05/19] net/hinic: add allmulticast mode and MTU set X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" When enable allmulticast mode, all multicast packets can be received. This patch also adds support for mtu set, the range of MTU is from 256 to 9600. Signed-off-by: Xiaoyun wang --- doc/guides/nics/features/hinic.ini | 4 +- doc/guides/nics/hinic.rst | 2 + drivers/net/hinic/hinic_pmd_ethdev.c | 110 ++++++++++++++++++++++++++++++++++- drivers/net/hinic/hinic_pmd_ethdev.h | 3 - 4 files changed, 114 insertions(+), 5 deletions(-) diff --git a/doc/guides/nics/features/hinic.ini b/doc/guides/nics/features/hinic.ini index 8b09509..1f9d62c 100644 --- a/doc/guides/nics/features/hinic.ini +++ b/doc/guides/nics/features/hinic.ini @@ -9,10 +9,12 @@ Link status = Y Link status event = Y Free Tx mbuf on demand = Y Queue start/stop = Y -Jumbo frame = N +MTU update = Y +Jumbo frame = Y Scattered Rx = Y TSO = Y Promiscuous mode = Y +Allmulticast mode = Y Unicast MAC filter = Y Multicast MAC filter = Y RSS hash = Y diff --git a/doc/guides/nics/hinic.rst b/doc/guides/nics/hinic.rst index 881075a..681519c 100644 --- a/doc/guides/nics/hinic.rst +++ b/doc/guides/nics/hinic.rst @@ -26,6 +26,8 @@ Features - Scattered and gather for TX and RX - SR-IOV - Partially supported at this point, VFIO only - VLAN filter and VLAN offload +- Allmulticast mode +- MTU update Prerequisites ------------- diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c index 4e2a69c..96967a3 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.c +++ b/drivers/net/hinic/hinic_pmd_ethdev.c @@ -64,6 +64,12 @@ #define HINIC_VLAN_FILTER_EN (1U << 0) +#define HINIC_MTU_TO_PKTLEN(mtu) \ + ((mtu) + ETH_HLEN + ETH_CRC_LEN) + +#define HINIC_PKTLEN_TO_MTU(pktlen) \ + ((pktlen) - (ETH_HLEN + ETH_CRC_LEN)) + /* Driver-specific log messages type */ int hinic_logtype; @@ -711,6 +717,8 @@ static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa) info->min_rx_bufsize = HINIC_MIN_RX_BUF_SIZE; info->max_rx_pktlen = HINIC_MAX_JUMBO_FRAME_SIZE; info->max_mac_addrs = HINIC_MAX_MAC_ADDRS; + info->min_mtu = HINIC_MIN_MTU_SIZE; + info->max_mtu = HINIC_MAX_MTU_SIZE; hinic_get_speed_capa(dev, &info->speed_capa); info->rx_queue_offload_capa = 0; @@ -718,7 +726,9 @@ static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa) DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_VLAN_FILTER; + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_JUMBO_FRAME; info->tx_queue_offload_capa = 0; info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | @@ -1376,6 +1386,33 @@ static void hinic_deinit_mac_addr(struct rte_eth_dev *eth_dev) eth_dev->data->name); } +static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + int ret = 0; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + PMD_DRV_LOG(INFO, "Set port mtu, port_id: %d, mtu: %d, max_pkt_len: %d", + dev->data->port_id, mtu, HINIC_MTU_TO_PKTLEN(mtu)); + + if (mtu < HINIC_MIN_MTU_SIZE || mtu > HINIC_MAX_MTU_SIZE) { + PMD_DRV_LOG(ERR, "Invalid mtu: %d, must between %d and %d", + mtu, HINIC_MIN_MTU_SIZE, HINIC_MAX_MTU_SIZE); + return -EINVAL; + } + + ret = hinic_set_port_mtu(nic_dev->hwdev, mtu); + if (ret) { + PMD_DRV_LOG(ERR, "Set port mtu failed, ret: %d", ret); + return ret; + } + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = HINIC_MTU_TO_PKTLEN(mtu); + nic_dev->mtu_size = mtu; + + return ret; +} + static void hinic_store_vlan_filter(struct hinic_nic_dev *nic_dev, u16 vlan_id, bool on) { @@ -1540,6 +1577,71 @@ static void hinic_remove_all_vlanid(struct rte_eth_dev *eth_dev) } } +static int hinic_set_dev_allmulticast(struct hinic_nic_dev *nic_dev, + bool enable) +{ + u32 rx_mode_ctrl = nic_dev->rx_mode_status; + + if (enable) + rx_mode_ctrl |= HINIC_RX_MODE_MC_ALL; + else + rx_mode_ctrl &= (~HINIC_RX_MODE_MC_ALL); + + return hinic_config_rx_mode(nic_dev, rx_mode_ctrl); +} + +/** + * DPDK callback to enable allmulticast mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, + * negative error value otherwise. + */ +static int hinic_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + int ret = HINIC_OK; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + ret = hinic_set_dev_allmulticast(nic_dev, true); + if (ret) { + PMD_DRV_LOG(ERR, "Enable allmulticast failed, error: %d", ret); + return ret; + } + + PMD_DRV_LOG(INFO, "Enable allmulticast succeed, nic_dev: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + return 0; +} + +/** + * DPDK callback to disable allmulticast mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, + * negative error value otherwise. + */ +static int hinic_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + int ret = HINIC_OK; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + ret = hinic_set_dev_allmulticast(nic_dev, false); + if (ret) { + PMD_DRV_LOG(ERR, "Disable allmulticast failed, error: %d", ret); + return ret; + } + + PMD_DRV_LOG(INFO, "Disable allmulticast succeed, nic_dev: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + return 0; +} + /** * DPDK callback to enable promiscuous mode. * @@ -2421,8 +2523,11 @@ static void hinic_dev_close(struct rte_eth_dev *dev) .tx_queue_release = hinic_tx_queue_release, .dev_stop = hinic_dev_stop, .dev_close = hinic_dev_close, + .mtu_set = hinic_dev_set_mtu, .vlan_filter_set = hinic_vlan_filter_set, .vlan_offload_set = hinic_vlan_offload_set, + .allmulticast_enable = hinic_dev_allmulticast_enable, + .allmulticast_disable = hinic_dev_allmulticast_disable, .promiscuous_enable = hinic_dev_promiscuous_enable, .promiscuous_disable = hinic_dev_promiscuous_disable, .rss_hash_update = hinic_rss_hash_update, @@ -2447,8 +2552,11 @@ static void hinic_dev_close(struct rte_eth_dev *dev) .tx_queue_release = hinic_tx_queue_release, .dev_stop = hinic_dev_stop, .dev_close = hinic_dev_close, + .mtu_set = hinic_dev_set_mtu, .vlan_filter_set = hinic_vlan_filter_set, .vlan_offload_set = hinic_vlan_offload_set, + .allmulticast_enable = hinic_dev_allmulticast_enable, + .allmulticast_disable = hinic_dev_allmulticast_disable, .rss_hash_update = hinic_rss_hash_update, .rss_hash_conf_get = hinic_rss_conf_get, .reta_update = hinic_rss_indirtbl_update, diff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h index 66eaf20..f7a1167 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.h +++ b/drivers/net/hinic/hinic_pmd_ethdev.h @@ -20,9 +20,6 @@ #define SIZE_8BYTES(size) (ALIGN((u32)(size), 8) >> 3) -#define HINIC_PKTLEN_TO_MTU(pktlen) \ - ((pktlen) - (ETH_HLEN + ETH_CRC_LEN)) - #define HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev) \ ((struct hinic_nic_dev *)(dev)->data->dev_private) From patchwork Mon Sep 30 14:00:44 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60193 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 367524CE4; Mon, 30 Sep 2019 15:48:21 +0200 (CEST) Received: from huawei.com (szxga06-in.huawei.com [45.249.212.32]) by dpdk.org (Postfix) with ESMTP id 897644CA6 for ; Mon, 30 Sep 2019 15:48:19 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id D2C02B618AD21A1095FA for ; Mon, 30 Sep 2019 21:48:17 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:48:07 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:44 +0800 Message-ID: <9e7e0a9b186e756ac3099fd0c21dd33cc8a2ff02.1569850827.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 06/19] net/hinic: add unicast and multicast MAC set X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds unicast and multicast set interfaces. Application can add or remove unicast MAC address, also can set multicast MAC address, tha maximum multicast list size is 2048. Signed-off-by: Xiaoyun wang --- doc/guides/nics/hinic.rst | 2 + drivers/net/hinic/base/hinic_pmd_niccfg.c | 55 +++++++ drivers/net/hinic/hinic_pmd_ethdev.c | 260 +++++++++++++++++++++++++++--- drivers/net/hinic/hinic_pmd_ethdev.h | 2 + 4 files changed, 298 insertions(+), 21 deletions(-) diff --git a/doc/guides/nics/hinic.rst b/doc/guides/nics/hinic.rst index 681519c..4df5f16 100644 --- a/doc/guides/nics/hinic.rst +++ b/doc/guides/nics/hinic.rst @@ -28,6 +28,8 @@ Features - VLAN filter and VLAN offload - Allmulticast mode - MTU update +- Unicast MAC filter +- Multicast MAC filter Prerequisites ------------- diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.c b/drivers/net/hinic/base/hinic_pmd_niccfg.c index 8bd7ed6..054925c 100644 --- a/drivers/net/hinic/base/hinic_pmd_niccfg.c +++ b/drivers/net/hinic/base/hinic_pmd_niccfg.c @@ -250,6 +250,61 @@ int hinic_get_default_mac(void *hwdev, u8 *mac_addr) } /** +* hinic_update_mac - Update mac address to hardware. +* +* @param hwdev +* The hardware interface of a nic device. +* @param old_mac +* Old mac address. +* @param new_mac +* New mac address. +* @param vlan_id +* Set 0 for mac_vlan table initialization. +* @param func_id +* Global function id of NIC. +* +* @return +* 0 on success. +* negative error value otherwise. +*/ +int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, + u16 func_id) +{ + struct hinic_port_mac_update mac_info; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !old_mac || !new_mac) { + PMD_DRV_LOG(ERR, "Hwdev, old_mac or new_mac is NULL\n"); + return -EINVAL; + } + + memset(&mac_info, 0, sizeof(mac_info)); + mac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + memcpy(mac_info.old_mac, old_mac, ETH_ALEN); + memcpy(mac_info.new_mac, new_mac, ETH_ALEN); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UPDATE_MAC, + &mac_info, sizeof(mac_info), + &mac_info, &out_size); + if (err || !out_size || + (mac_info.mgmt_msg_head.status && + mac_info.mgmt_msg_head.status != HINIC_PF_SET_VF_ALREADY)) { + PMD_DRV_LOG(ERR, "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n", + err, mac_info.mgmt_msg_head.status, out_size); + return -EINVAL; + } + if (mac_info.mgmt_msg_head.status == HINIC_PF_SET_VF_ALREADY) { + PMD_DRV_LOG(WARNING, "PF has already set vf mac, Ignore update operation.\n"); + return HINIC_PF_SET_VF_ALREADY; + } + + return 0; +} + +/** * hinic_set_port_mtu - Set MTU to port. * * @param hwdev diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c index 96967a3..91b4f98 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.c +++ b/drivers/net/hinic/hinic_pmd_ethdev.c @@ -51,8 +51,8 @@ #define NR_MAX_COS 8 #define HINIC_MIN_RX_BUF_SIZE 1024 -#define HINIC_MAX_MAC_ADDRS 1 - +#define HINIC_MAX_UC_MAC_ADDRS 128 +#define HINIC_MAX_MC_MAC_ADDRS 2048 /* * vlan_id is a 12 bit number. * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. @@ -716,7 +716,7 @@ static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa) info->max_tx_queues = nic_dev->nic_cap.max_sqs; info->min_rx_bufsize = HINIC_MIN_RX_BUF_SIZE; info->max_rx_pktlen = HINIC_MAX_JUMBO_FRAME_SIZE; - info->max_mac_addrs = HINIC_MAX_MAC_ADDRS; + info->max_mac_addrs = HINIC_MAX_UC_MAC_ADDRS; info->min_mtu = HINIC_MIN_MTU_SIZE; info->max_mtu = HINIC_MAX_MTU_SIZE; @@ -1342,21 +1342,41 @@ static int hinic_init_mac_addr(struct rte_eth_dev *eth_dev) if (rc) return rc; - memmove(eth_dev->data->mac_addrs->addr_bytes, - addr_bytes, RTE_ETHER_ADDR_LEN); - - if (rte_is_zero_ether_addr(eth_dev->data->mac_addrs)) - hinic_gen_random_mac_addr(eth_dev->data->mac_addrs); + rte_ether_addr_copy((struct rte_ether_addr *)addr_bytes, + ð_dev->data->mac_addrs[0]); + if (rte_is_zero_ether_addr(ð_dev->data->mac_addrs[0])) + hinic_gen_random_mac_addr(ð_dev->data->mac_addrs[0]); func_id = hinic_global_func_id(nic_dev->hwdev); - rc = hinic_set_mac(nic_dev->hwdev, eth_dev->data->mac_addrs->addr_bytes, - 0, func_id); + rc = hinic_set_mac(nic_dev->hwdev, + eth_dev->data->mac_addrs[0].addr_bytes, + 0, func_id); if (rc && rc != HINIC_PF_SET_VF_ALREADY) return rc; + rte_ether_addr_copy(ð_dev->data->mac_addrs[0], + &nic_dev->default_addr); + return 0; } +static void hinic_delete_mc_addr_list(struct hinic_nic_dev *nic_dev) +{ + u16 func_id; + u32 i; + + func_id = hinic_global_func_id(nic_dev->hwdev); + + for (i = 0; i < HINIC_MAX_MC_MAC_ADDRS; i++) { + if (rte_is_zero_ether_addr(&nic_dev->mc_list[i])) + break; + + hinic_del_mac(nic_dev->hwdev, nic_dev->mc_list[i].addr_bytes, + 0, func_id); + memset(&nic_dev->mc_list[i], 0, sizeof(struct rte_ether_addr)); + } +} + /** * Deinit mac_vlan table in NIC. * @@ -1371,19 +1391,29 @@ static void hinic_deinit_mac_addr(struct rte_eth_dev *eth_dev) { struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); - int rc; u16 func_id = 0; - - if (rte_is_zero_ether_addr(eth_dev->data->mac_addrs)) - return; + int rc; + int i; func_id = hinic_global_func_id(nic_dev->hwdev); - rc = hinic_del_mac(nic_dev->hwdev, - eth_dev->data->mac_addrs->addr_bytes, - 0, func_id); - if (rc && rc != HINIC_PF_SET_VF_ALREADY) - PMD_DRV_LOG(ERR, "Delete mac table failed, dev_name: %s", - eth_dev->data->name); + + for (i = 0; i < HINIC_MAX_UC_MAC_ADDRS; i++) { + if (rte_is_zero_ether_addr(ð_dev->data->mac_addrs[i])) + continue; + + rc = hinic_del_mac(nic_dev->hwdev, + eth_dev->data->mac_addrs[i].addr_bytes, + 0, func_id); + if (rc && rc != HINIC_PF_SET_VF_ALREADY) + PMD_DRV_LOG(ERR, "Delete mac table failed, dev_name: %s", + eth_dev->data->name); + + memset(ð_dev->data->mac_addrs[i], 0, + sizeof(struct rte_ether_addr)); + } + + /* delete multicast mac addrs */ + hinic_delete_mc_addr_list(nic_dev); } static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) @@ -2091,6 +2121,169 @@ static int hinic_dev_xstats_get_names(struct rte_eth_dev *dev, return count; } +/** + * DPDK callback to set mac address + * + * @param dev + * Pointer to Ethernet device structure. + * @param addr + * Pointer to mac address + * @return + * 0 on success, negative error value otherwise. + */ +static int hinic_set_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + u16 func_id; + int err; + + func_id = hinic_global_func_id(nic_dev->hwdev); + err = hinic_update_mac(nic_dev->hwdev, nic_dev->default_addr.addr_bytes, + addr->addr_bytes, 0, func_id); + if (err) + return err; + + rte_ether_addr_copy(addr, &nic_dev->default_addr); + + PMD_DRV_LOG(INFO, "Set new mac address %02x:%02x:%02x:%02x:%02x:%02x\n", + addr->addr_bytes[0], addr->addr_bytes[1], + addr->addr_bytes[2], addr->addr_bytes[3], + addr->addr_bytes[4], addr->addr_bytes[5]); + + return 0; +} + +/** + * DPDK callback to remove a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param index + * MAC address index. + */ +static void hinic_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + u16 func_id; + int ret; + + if (index >= HINIC_MAX_UC_MAC_ADDRS) { + PMD_DRV_LOG(INFO, "Remove mac index(%u) is out of range", + index); + return; + } + + func_id = hinic_global_func_id(nic_dev->hwdev); + ret = hinic_del_mac(nic_dev->hwdev, + dev->data->mac_addrs[index].addr_bytes, 0, func_id); + if (ret) + return; + + memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr)); +} + +/** + * DPDK callback to add a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * @param index + * MAC address index. + * @param vmdq + * VMDq pool index to associate address with (ignored). + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ + +static int hinic_mac_addr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, uint32_t index, + __rte_unused uint32_t vmdq) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + unsigned int i; + u16 func_id; + int ret; + + if (index >= HINIC_MAX_UC_MAC_ADDRS) { + PMD_DRV_LOG(INFO, "Add mac index(%u) is out of range,", index); + return -EINVAL; + } + + /* First, make sure this address isn't already configured. */ + for (i = 0; (i != HINIC_MAX_UC_MAC_ADDRS); ++i) { + /* Skip this index, it's going to be reconfigured. */ + if (i == index) + continue; + + if (memcmp(&dev->data->mac_addrs[i], + mac_addr, sizeof(*mac_addr))) + continue; + + PMD_DRV_LOG(INFO, "MAC address already configured"); + return -EADDRINUSE; + } + + func_id = hinic_global_func_id(nic_dev->hwdev); + ret = hinic_set_mac(nic_dev->hwdev, mac_addr->addr_bytes, 0, func_id); + if (ret) + return ret; + + dev->data->mac_addrs[index] = *mac_addr; + return 0; +} + +/** + * DPDK callback to set multicast mac address + * + * @param dev + * Pointer to Ethernet device structure. + * @param mc_addr_set + * Pointer to multicast mac address + * @param nb_mc_addr + * mc addr count + * @return + * 0 on success, negative error value otherwise. + */ +static int hinic_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + u16 func_id; + int ret; + u32 i; + + func_id = hinic_global_func_id(nic_dev->hwdev); + + /* delete old multi_cast addrs firstly */ + hinic_delete_mc_addr_list(nic_dev); + + if (nb_mc_addr > HINIC_MAX_MC_MAC_ADDRS) + goto allmulti; + + for (i = 0; i < nb_mc_addr; i++) { + ret = hinic_set_mac(nic_dev->hwdev, mc_addr_set[i].addr_bytes, + 0, func_id); + /* if add mc addr failed, set all multi_cast */ + if (ret) { + hinic_delete_mc_addr_list(nic_dev); + goto allmulti; + } + + rte_ether_addr_copy(&mc_addr_set[i], &nic_dev->mc_list[i]); + } + + return 0; + +allmulti: + hinic_dev_allmulticast_enable(dev); + + return 0; +} static int hinic_set_default_pause_feature(struct hinic_nic_dev *nic_dev) { @@ -2539,6 +2732,10 @@ static void hinic_dev_close(struct rte_eth_dev *dev) .xstats_get = hinic_dev_xstats_get, .xstats_reset = hinic_dev_xstats_reset, .xstats_get_names = hinic_dev_xstats_get_names, + .mac_addr_set = hinic_set_mac_addr, + .mac_addr_remove = hinic_mac_addr_remove, + .mac_addr_add = hinic_mac_addr_add, + .set_mc_addr_list = hinic_set_mc_addr_list, }; static const struct eth_dev_ops hinic_pmd_vf_ops = { @@ -2566,6 +2763,10 @@ static void hinic_dev_close(struct rte_eth_dev *dev) .xstats_get = hinic_dev_xstats_get, .xstats_reset = hinic_dev_xstats_reset, .xstats_get_names = hinic_dev_xstats_get_names, + .mac_addr_set = hinic_set_mac_addr, + .mac_addr_remove = hinic_mac_addr_remove, + .mac_addr_add = hinic_mac_addr_add, + .set_mc_addr_list = hinic_set_mc_addr_list, }; static int hinic_func_init(struct rte_eth_dev *eth_dev) @@ -2573,6 +2774,7 @@ static int hinic_func_init(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev; struct rte_ether_addr *eth_addr; struct hinic_nic_dev *nic_dev; + u32 mac_size; int rc; pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); @@ -2599,7 +2801,8 @@ static int hinic_func_init(struct rte_eth_dev *eth_dev) pci_dev->addr.devid, pci_dev->addr.function); /* alloc mac_addrs */ - eth_addr = rte_zmalloc("hinic_mac", sizeof(*eth_addr), 0); + mac_size = HINIC_MAX_UC_MAC_ADDRS * sizeof(struct rte_ether_addr); + eth_addr = rte_zmalloc("hinic_mac", mac_size, 0); if (!eth_addr) { PMD_DRV_LOG(ERR, "Allocate ethernet addresses' memory failed, dev_name: %s", eth_dev->data->name); @@ -2608,6 +2811,15 @@ static int hinic_func_init(struct rte_eth_dev *eth_dev) } eth_dev->data->mac_addrs = eth_addr; + mac_size = HINIC_MAX_MC_MAC_ADDRS * sizeof(struct rte_ether_addr); + nic_dev->mc_list = rte_zmalloc("hinic_mc", mac_size, 0); + if (!nic_dev->mc_list) { + PMD_DRV_LOG(ERR, "Allocate mcast address' memory failed, dev_name: %s", + eth_dev->data->name); + rc = -ENOMEM; + goto mc_addr_fail; + } + /* * Pass the information to the rte_eth_dev_close() that it should also * release the private port resources. @@ -2672,6 +2884,10 @@ static int hinic_func_init(struct rte_eth_dev *eth_dev) hinic_nic_dev_destroy(eth_dev); create_nic_dev_fail: + rte_free(nic_dev->mc_list); + nic_dev->mc_list = NULL; + +mc_addr_fail: rte_free(eth_addr); eth_dev->data->mac_addrs = NULL; @@ -2716,6 +2932,8 @@ static int hinic_dev_uninit(struct rte_eth_dev *dev) dev->rx_pkt_burst = NULL; dev->tx_pkt_burst = NULL; + rte_free(nic_dev->mc_list); + rte_free(dev->data->mac_addrs); dev->data->mac_addrs = NULL; diff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h index f7a1167..b4f93ad 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.h +++ b/drivers/net/hinic/hinic_pmd_ethdev.h @@ -56,6 +56,8 @@ struct hinic_nic_dev { u32 vfta[HINIC_VFTA_SIZE]; /* VLAN bitmap */ + struct rte_ether_addr default_addr; + struct rte_ether_addr *mc_list; /* info */ unsigned int flags; struct nic_service_cap nic_cap; From patchwork Mon Sep 30 14:00:45 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60194 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id DE84C1B948; Mon, 30 Sep 2019 15:48:35 +0200 (CEST) Received: from huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id C747E1B948 for ; Mon, 30 Sep 2019 15:48:34 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id B6179E07508A0D5F1471 for ; Mon, 30 Sep 2019 21:48:33 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:48:25 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:45 +0800 Message-ID: <6c2f11797ea1236e086d38bc7322dc3aeac7fa39.1569850827.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 07/19] net/hinic/base: add fdir config interface X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds fdir config operation, including set fdir filter, normal filter, set and clear fdir tcam. Signed-off-by: Xiaoyun wang --- doc/guides/nics/features/hinic.ini | 1 + doc/guides/nics/hinic.rst | 1 + drivers/net/hinic/base/hinic_pmd_cmd.h | 2 + drivers/net/hinic/base/hinic_pmd_niccfg.c | 205 ++++++++++++++++++++++++++++++ drivers/net/hinic/base/hinic_pmd_niccfg.h | 134 +++++++++++++++++++ 5 files changed, 343 insertions(+) diff --git a/doc/guides/nics/features/hinic.ini b/doc/guides/nics/features/hinic.ini index 1f9d62c..a7cb33e 100644 --- a/doc/guides/nics/features/hinic.ini +++ b/doc/guides/nics/features/hinic.ini @@ -32,6 +32,7 @@ Inner L4 checksum = Y Basic stats = Y Extended stats = Y Stats per queue = Y +Flow director = Y Linux UIO = Y Linux VFIO = Y BSD nic_uio = N diff --git a/doc/guides/nics/hinic.rst b/doc/guides/nics/hinic.rst index 4df5f16..8ff4171 100644 --- a/doc/guides/nics/hinic.rst +++ b/doc/guides/nics/hinic.rst @@ -30,6 +30,7 @@ Features - MTU update - Unicast MAC filter - Multicast MAC filter +- Flow director Prerequisites ------------- diff --git a/drivers/net/hinic/base/hinic_pmd_cmd.h b/drivers/net/hinic/base/hinic_pmd_cmd.h index 5c38b5f..6b3dcf3 100644 --- a/drivers/net/hinic/base/hinic_pmd_cmd.h +++ b/drivers/net/hinic/base/hinic_pmd_cmd.h @@ -140,6 +140,8 @@ enum hinic_port_cmd { HINIC_PORT_CMD_SET_VHD_CFG = 0xF7, HINIC_PORT_CMD_SET_LINK_FOLLOW = 0xF8, + HINIC_PORT_CMD_Q_FILTER = 0xFC, + HINIC_PORT_CMD_TCAM_FILTER = 0xFE, HINIC_PORT_CMD_SET_VLAN_FILTER = 0xFF }; diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.c b/drivers/net/hinic/base/hinic_pmd_niccfg.c index 054925c..53d981b 100644 --- a/drivers/net/hinic/base/hinic_pmd_niccfg.c +++ b/drivers/net/hinic/base/hinic_pmd_niccfg.c @@ -18,6 +18,23 @@ buf_in, in_size, \ buf_out, out_size, 0) + +#define TCAM_SET 0x1 +#define TCAM_CLEAR 0x2 + +struct hinic_port_qfilter_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 normal_type_enable; + u8 filter_type_enable; + u8 filter_enable; + u8 filter_type; + u8 qid; + u8 fdir_flag; + u32 key; +}; + /** * hinic_init_function_table - Initialize function table. * @@ -1662,3 +1679,191 @@ int hinic_vf_get_default_cos(struct hinic_hwdev *hwdev, u8 *cos_id) return 0; } + +/** + * hinic_set_fdir_filter - Set fdir filter for control path + * packet to notify firmware. + * + * @param hwdev + * The hardware interface of a nic device. + * @param filter_type + * Packet type to filter. + * @param qid + * Rx qid to filter. + * @param type_enable + * The status of pkt type filter. + * @param enable + * Fdir function Enable or Disable. + * @return + * 0 on success, + * negative error value otherwise. + */ +int hinic_set_fdir_filter(void *hwdev, u8 filter_type, u8 qid, u8 type_enable, + bool enable) +{ + struct hinic_port_qfilter_info port_filer_cmd; + u16 out_size = sizeof(port_filer_cmd); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&port_filer_cmd, 0, sizeof(port_filer_cmd)); + port_filer_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + port_filer_cmd.func_id = hinic_global_func_id(hwdev); + port_filer_cmd.filter_enable = (u8)enable; + port_filer_cmd.filter_type = filter_type; + port_filer_cmd.qid = qid; + port_filer_cmd.filter_type_enable = type_enable; + port_filer_cmd.fdir_flag = 0; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_Q_FILTER, + &port_filer_cmd, sizeof(port_filer_cmd), + &port_filer_cmd, &out_size); + if (err || !out_size || port_filer_cmd.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Set port Q filter failed, err: %d, status: 0x%x, out size: 0x%x, type: 0x%x," + " enable: 0x%x, qid: 0x%x, filter_type_enable: 0x%x\n", + err, port_filer_cmd.mgmt_msg_head.status, out_size, + filter_type, enable, qid, type_enable); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_set_normal_filter - Set fdir filter for IO path packet. + * + * @param hwdev + * The hardware interface of a nic device. + * @param qid + * Rx qid to filter. + * @param normal_type_enable + * IO path packet function Enable or Disable + * @param key + * IO path packet filter key value, such as DIP from pkt. + * @param enable + * Fdir function Enable or Disable. + * @param flag + * Filter flag, such as dip or others. + * @return + * 0 on success, + * negative error value otherwise. + */ +int hinic_set_normal_filter(void *hwdev, u8 qid, u8 normal_type_enable, + u32 key, bool enable, u8 flag) +{ + struct hinic_port_qfilter_info port_filer_cmd; + u16 out_size = sizeof(port_filer_cmd); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&port_filer_cmd, 0, sizeof(port_filer_cmd)); + port_filer_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + port_filer_cmd.func_id = hinic_global_func_id(hwdev); + port_filer_cmd.filter_enable = (u8)enable; + port_filer_cmd.qid = qid; + port_filer_cmd.normal_type_enable = normal_type_enable; + port_filer_cmd.fdir_flag = flag; /* fdir flag: support dip */ + port_filer_cmd.key = key; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_Q_FILTER, + &port_filer_cmd, sizeof(port_filer_cmd), + &port_filer_cmd, &out_size); + if (err || !out_size || port_filer_cmd.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Set normal filter failed, err: %d, status: 0x%x, out size: 0x%x, fdir_flag: 0x%x," + " enable: 0x%x, qid: 0x%x, normal_type_enable: 0x%x, key:0x%x\n", + err, port_filer_cmd.mgmt_msg_head.status, out_size, + flag, enable, qid, normal_type_enable, key); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_set_fdir_tcam - Set fdir filter for control packet + * by tcam table to notify hardware. + * + * @param hwdev + * The hardware interface of a nic device. + * @param type_mask + * Index of TCAM. + * @param filter_rule + * TCAM rule for control packet, such as lacp or bgp. + * @param filter_action + * TCAM action for control packet, such as accept or drop. + * @return + * 0 on success, + * negative error value otherwise. + */ +int hinic_set_fdir_tcam(void *hwdev, u16 type_mask, + struct tag_pa_rule *filter_rule, + struct tag_pa_action *filter_action) +{ + struct hinic_fdir_tcam_info port_tcam_cmd; + u16 out_size = sizeof(port_tcam_cmd); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&port_tcam_cmd, 0, sizeof(port_tcam_cmd)); + port_tcam_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + port_tcam_cmd.tcam_index = type_mask; + port_tcam_cmd.flag = TCAM_SET; + memcpy((void *)&port_tcam_cmd.filter_rule, + (void *)filter_rule, sizeof(struct tag_pa_rule)); + memcpy((void *)&port_tcam_cmd.filter_action, + (void *)filter_action, sizeof(struct tag_pa_action)); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_TCAM_FILTER, + &port_tcam_cmd, sizeof(port_tcam_cmd), + &port_tcam_cmd, &out_size); + if (err || !out_size || port_tcam_cmd.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Set tcam table failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_tcam_cmd.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_clear_fdir_tcam - Clear fdir filter TCAM table for control packet. + * + * @param hwdev + * The hardware interface of a nic device. + * @param type_mask + * Index of TCAM. + * @return + * 0 on success, + * negative error value otherwise. + */ +int hinic_clear_fdir_tcam(void *hwdev, u16 type_mask) +{ + struct hinic_fdir_tcam_info port_tcam_cmd; + u16 out_size = sizeof(port_tcam_cmd); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&port_tcam_cmd, 0, sizeof(port_tcam_cmd)); + port_tcam_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + port_tcam_cmd.tcam_index = type_mask; + port_tcam_cmd.flag = TCAM_CLEAR; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_TCAM_FILTER, + &port_tcam_cmd, sizeof(port_tcam_cmd), + &port_tcam_cmd, &out_size); + if (err || !out_size || port_tcam_cmd.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Clear tcam table failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_tcam_cmd.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.h b/drivers/net/hinic/base/hinic_pmd_niccfg.h index d19a834..e8ce332 100644 --- a/drivers/net/hinic/base/hinic_pmd_niccfg.h +++ b/drivers/net/hinic/base/hinic_pmd_niccfg.h @@ -609,6 +609,129 @@ struct hinic_port_anti_attack_rate { u32 xbs; /* eXtended Burst Size */ }; +struct pa_u8_s { + u8 val8; + u8 mask8; +}; + +struct pa_u16_s { + u16 val16; + u16 mask16; +}; + +struct pa_u32_s { + u32 val32; + u32 mask32; +}; + +struct pa_u48_s { + u8 val8[6]; + u8 mask8[6]; +}; + +struct pa_u64_s { + u8 val8[8]; + u8 mask8[8]; +}; + +struct tag_pa_eth_ip_header { + struct pa_u8_s ip_ver; /* 3bit */ + struct pa_u8_s ipv4_option_flag; /* 1bit */ + /* 8bit ipv4 option or ipv6 next header */ + struct pa_u8_s protocol; + struct pa_u8_s dscp; /* 6bit DSCP */ +}; + +struct tag_pa_common_l2_header { + struct pa_u48_s dmac; /* dmac 48bit */ + struct pa_u16_s eth_type; /* ethernet type/length 16bit */ + struct pa_u8_s tag_flag; /* tag flag: 4bit */ + struct pa_u8_s np2np_hdr_qindex; /* NP2NP Header Qindex 4bit */ + struct pa_u8_s e_tag_pcp; /* 3bit */ + struct pa_u8_s vlan_layer; /* 2bit */ + struct pa_u8_s s_tag; /* 3bit */ + struct pa_u8_s c_tag; /* 3bit */ + struct pa_u16_s vlan_id; /* 12bit */ +}; + +struct tag_pa_tcp { + struct pa_u16_s sport; /* 16bit */ + struct pa_u16_s dport; /* 16bit */ + struct pa_u16_s tcp_flag; /* 6bit */ +}; + +struct tag_pa_udp { + struct pa_u16_s sport; /* 16bit */ + struct pa_u16_s dport; /* 16bit */ + /* 8bit : + * 1.udp dport=67/68 && ipv4 protocol=0x11 + * 2.udp dport=546/547 && ipv6 next header=0x11 + * 3. do not care + */ + struct pa_u8_s dhcp_op_or_msg_type; +}; + +/* ICMP: + * ipv4 protocol = 0x1 + * ipv6 next header = 0x3A + */ +struct tag_pa_icmp { + struct pa_u8_s type; /* 8bit */ + struct pa_u8_s code; /* 8bit */ +}; + +/* IGMP: + * ipv4 protocol = 0x2 + */ +struct tag_pa_ipv4_igmp { + struct pa_u32_s dip; /* 32bit */ + struct pa_u8_s type; /* 8bit */ +}; + +struct tag_pa_rule { + struct pa_u8_s ncsi_flag; /* 1bit valid */ + struct tag_pa_common_l2_header l2_header; + + u8 eth_type; + + struct pa_u64_s eth_other; /* eth_type=other 64bit */ + struct pa_u8_s eth_roce_opcode; /* eth_type=roce 8bit opcode */ + + struct tag_pa_eth_ip_header ip_header; /* eth_type=ip */ + + u8 ip_protocol_type; + + struct tag_pa_tcp eth_ip_tcp; /* eth_type=ip && ip_protocol = tcp */ + struct tag_pa_udp eth_ip_udp; /* eth_type=ip && ip_protocol = udp */ + struct tag_pa_icmp eth_ip_icmp; /* eth_type=ip && ip_protocol = icmp */ + + /* eth_type=ip && ip_protocol = ipv4_igmp */ + struct tag_pa_ipv4_igmp eth_ipv4_igmp; + + /* eth_type=ip && ip_protocol = sctp; + * 16bit ipv4 protocol=0x84 or ipv6 nhr=0x84 + */ + struct pa_u16_s eth_ip_sctp; +}; + +struct tag_pa_action { + u16 pkt_type; + u8 err_type; + u8 pri; + u8 fwd_action; + u8 push_len; +}; + +struct hinic_fdir_tcam_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 tcam_index; + u8 flag; /* clear or set tcam table flag */ + u8 rsvd1; + struct tag_pa_rule filter_rule; + struct tag_pa_action filter_action; +}; + int hinic_set_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id); int hinic_del_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id); @@ -702,4 +825,15 @@ int hinic_set_link_status_follow(void *hwdev, int hinic_vf_get_default_cos(struct hinic_hwdev *hwdev, u8 *cos_id); +int hinic_set_fdir_filter(void *hwdev, u8 filter_type, u8 qid, + u8 type_enable, bool enable); + +int hinic_set_normal_filter(void *hwdev, u8 qid, u8 normal_type_enable, + u32 key, bool enable, u8 flag); + +int hinic_set_fdir_tcam(void *hwdev, u16 type_mask, + struct tag_pa_rule *filter_rule, struct tag_pa_action *filter_action); + +int hinic_clear_fdir_tcam(void *hwdev, u16 type_mask); + #endif /* _HINIC_PMD_NICCFG_H_ */ From patchwork Mon Sep 30 14:00:46 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60195 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1720D1BE81; Mon, 30 Sep 2019 15:48:41 +0200 (CEST) Received: from huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id 271A71BE81 for ; Mon, 30 Sep 2019 15:48:39 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id 1C57DC7A436DE3EB2A99 for ; Mon, 30 Sep 2019 21:48:38 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:48:31 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:46 +0800 Message-ID: <2dfcbd8366dbb2e1f6d110e60cddd94938c731d0.1569850827.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 08/19] net/hinic: add fdir validate flow operations X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch is to validate the filter rules, which includes ntuple filter, ethertype filter and fdir filter. The packets type that supported are BGP,VRRP,LACP,ARP and ICMP. Signed-off-by: Xiaoyun wang --- drivers/net/hinic/Makefile | 1 + drivers/net/hinic/hinic_pmd_ethdev.c | 42 ++ drivers/net/hinic/hinic_pmd_ethdev.h | 24 + drivers/net/hinic/hinic_pmd_flow.c | 1172 ++++++++++++++++++++++++++++++++++ drivers/net/hinic/meson.build | 1 + 5 files changed, 1240 insertions(+) create mode 100644 drivers/net/hinic/hinic_pmd_flow.c diff --git a/drivers/net/hinic/Makefile b/drivers/net/hinic/Makefile index 20a338e..b78fd8d 100644 --- a/drivers/net/hinic/Makefile +++ b/drivers/net/hinic/Makefile @@ -60,6 +60,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_niccfg.c SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_nicio.c SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_wq.c SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_mbox.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_flow.c SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_rx.c diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c index 91b4f98..f2f0fa9 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.c +++ b/drivers/net/hinic/hinic_pmd_ethdev.c @@ -2285,6 +2285,46 @@ static int hinic_set_mc_addr_list(struct rte_eth_dev *dev, return 0; } +/** + * DPDK callback to manage filter operations + * + * @param dev + * Pointer to Ethernet device structure. + * @param filter_type + * Filter type. + * @param filter_op + * Operation to perform. + * @param arg + * Pointer to operation-specific structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int hinic_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + int func_id = hinic_global_func_id(nic_dev->hwdev); + + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &hinic_flow_ops; + break; + default: + PMD_DRV_LOG(INFO, "Filter type (%d) not supported", + filter_type); + return -EINVAL; + } + + PMD_DRV_LOG(INFO, "Set filter_ctrl succeed, func_id: 0x%x, filter_type: 0x%x," + "filter_op: 0x%x.", func_id, filter_type, filter_op); + return 0; +} + static int hinic_set_default_pause_feature(struct hinic_nic_dev *nic_dev) { struct nic_pause_config pause_config = {0}; @@ -2736,6 +2776,7 @@ static void hinic_dev_close(struct rte_eth_dev *dev) .mac_addr_remove = hinic_mac_addr_remove, .mac_addr_add = hinic_mac_addr_add, .set_mc_addr_list = hinic_set_mc_addr_list, + .filter_ctrl = hinic_dev_filter_ctrl, }; static const struct eth_dev_ops hinic_pmd_vf_ops = { @@ -2767,6 +2808,7 @@ static void hinic_dev_close(struct rte_eth_dev *dev) .mac_addr_remove = hinic_mac_addr_remove, .mac_addr_add = hinic_mac_addr_add, .set_mc_addr_list = hinic_set_mc_addr_list, + .filter_ctrl = hinic_dev_filter_ctrl, }; static int hinic_func_init(struct rte_eth_dev *eth_dev) diff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h index b4f93ad..58a38d9 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.h +++ b/drivers/net/hinic/hinic_pmd_ethdev.h @@ -38,6 +38,30 @@ enum hinic_dev_status { HINIC_DEV_INTR_EN, }; +/* Information about the fdir mode. */ +struct hinic_hw_fdir_mask { + uint32_t src_ipv4_mask; + uint32_t dst_ipv4_mask; + uint16_t src_port_mask; + uint16_t dst_port_mask; +}; + +/* Flow Director attribute */ +struct hinic_atr_input { + u32 dst_ip; + u32 src_ip; + u16 src_port; + u16 dst_port; +}; + +struct hinic_fdir_rule { + struct hinic_hw_fdir_mask mask; + struct hinic_atr_input hinic_fdir; /* key of fdir filter */ + uint8_t queue; /* queue assigned when matched */ +}; + +extern const struct rte_flow_ops hinic_flow_ops; + /* hinic nic_device */ struct hinic_nic_dev { /* hardware device */ diff --git a/drivers/net/hinic/hinic_pmd_flow.c b/drivers/net/hinic/hinic_pmd_flow.c new file mode 100644 index 0000000..cf9f105 --- /dev/null +++ b/drivers/net/hinic/hinic_pmd_flow.c @@ -0,0 +1,1172 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include "base/hinic_compat.h" +#include "base/hinic_pmd_hwdev.h" +#include "base/hinic_pmd_hwif.h" +#include "base/hinic_pmd_wq.h" +#include "base/hinic_pmd_cmdq.h" +#include "base/hinic_pmd_niccfg.h" +#include "hinic_pmd_ethdev.h" + +#ifndef UINT8_MAX +#define UINT8_MAX (u8)(~((u8)0)) /* 0xFF */ +#define UINT16_MAX (u16)(~((u16)0)) /* 0xFFFF */ +#define UINT32_MAX (u32)(~((u32)0)) /* 0xFFFFFFFF */ +#define UINT64_MAX (u64)(~((u64)0)) /* 0xFFFFFFFFFFFFFFFF */ +#define ASCII_MAX (0x7F) +#endif + +#define HINIC_MIN_N_TUPLE_PRIO 1 +#define HINIC_MAX_N_TUPLE_PRIO 7 + +/** + * Endless loop will never happen with below assumption + * 1. there is at least one no-void item(END) + * 2. cur is before END. + */ +static inline const struct rte_flow_item * +next_no_void_pattern(const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + cur ? cur + 1 : &pattern[0]; + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_VOID) + return next; + next++; + } +} + +static inline const struct rte_flow_action * +next_no_void_action(const struct rte_flow_action actions[], + const struct rte_flow_action *cur) +{ + const struct rte_flow_action *next = + cur ? cur + 1 : &actions[0]; + while (1) { + if (next->type != RTE_FLOW_ACTION_TYPE_VOID) + return next; + next++; + } +} + +static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + /* Must be input direction */ + if (!attr->ingress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + if (attr->egress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + if (attr->priority) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + if (attr->group) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "Not support group."); + return -rte_errno; + } + + return 0; +} + +static int hinic_check_filter_arg(const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + if (!pattern) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + return 0; +} + +static int hinic_check_ethertype_first_item(const struct rte_flow_item *item, + struct rte_flow_error *error) +{ + /* The first non-void item should be MAC */ + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter"); + return -rte_errno; + } + + /* Not supported last point for range */ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Get the MAC info. */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter"); + return -rte_errno; + } + return 0; +} + +static int +hinic_parse_ethertype_aciton(const struct rte_flow_action *actions, + const struct rte_flow_action *act, + const struct rte_flow_action_queue *act_q, + struct rte_eth_ethertype_filter *filter, + struct rte_flow_error *error) +{ + /* Parse action */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && + act->type != RTE_FLOW_ACTION_TYPE_DROP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->queue = act_q->index; + } else { + filter->flags |= RTE_ETHTYPE_FLAGS_DROP; + } + + /* Check if the next non-void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + return 0; +} + +/** + * Parse the rule to see if it is a ethertype rule. + * And get the ethertype filter info BTW. + * pattern: + * The first not void item can be ETH. + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * ETH type 0x0807 0xFFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_ethertype_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_eth_ethertype_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act = NULL; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_action_queue *act_q = NULL; + + if (hinic_check_filter_arg(attr, pattern, actions, error)) + return -rte_errno; + + item = next_no_void_pattern(pattern, NULL); + if (hinic_check_ethertype_first_item(item, error)) + return -rte_errno; + + eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_mask = (const struct rte_flow_item_eth *)item->mask; + + /* + * Mask bits of source MAC address must be full of 0. + * Mask bits of destination MAC address must be full + * of 1 or full of 0. + */ + if (!rte_is_zero_ether_addr(ð_mask->src) || + (!rte_is_zero_ether_addr(ð_mask->dst) && + !rte_is_broadcast_ether_addr(ð_mask->dst))) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ether address mask"); + return -rte_errno; + } + + if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ethertype mask"); + return -rte_errno; + } + + /* + * If mask bits of destination MAC address + * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. + */ + if (rte_is_broadcast_ether_addr(ð_mask->dst)) { + filter->mac_addr = eth_spec->dst; + filter->flags |= RTE_ETHTYPE_FLAGS_MAC; + } else { + filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC; + } + filter->ether_type = rte_be_to_cpu_16(eth_spec->type); + + /* Check if the next non-void item is END. */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter."); + return -rte_errno; + } + + if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error)) + return -rte_errno; + + if (hinic_check_ethertype_attr_ele(attr, error)) + return -rte_errno; + + return 0; +} + +static int +hinic_parse_ethertype_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ethertype_filter *filter, + struct rte_flow_error *error) +{ + if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error)) + return -rte_errno; + + /* NIC doesn't support MAC address. */ + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by ethertype filter"); + return -rte_errno; + } + + if (filter->queue >= dev->data->nb_rx_queues) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Queue index much too big"); + return -rte_errno; + } + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "IPv4/IPv6 not supported by ethertype filter"); + return -rte_errno; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Drop option is unsupported"); + return -rte_errno; + } + + /* Hinic only support LACP/ARP for ether type */ + if (filter->ether_type != RTE_ETHER_TYPE_SLOW && + filter->ether_type != RTE_ETHER_TYPE_ARP) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "only lacp/arp type supported by ethertype filter"); + return -rte_errno; + } + + return 0; +} + +static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr, + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + /* Must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + if (attr->priority > 0xFFFF) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Error priority."); + return -rte_errno; + } + + if (attr->priority < HINIC_MIN_N_TUPLE_PRIO || + attr->priority > HINIC_MAX_N_TUPLE_PRIO) + filter->priority = 1; + else + filter->priority = (uint16_t)attr->priority; + + return 0; +} + +static int +hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item, + const struct rte_flow_action actions[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act; + /* + * n-tuple only supports forwarding, + * check if the first not void action is QUEUE. + */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Flow action type is not QUEUE."); + return -rte_errno; + } + filter->queue = + ((const struct rte_flow_action_queue *)act->conf)->index; + + /* Check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Next not void item is not END."); + return -rte_errno; + } + + return 0; +} + +static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item, + const struct rte_flow_item pattern[], + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + + /* The first not void item can be MAC or IPv4 */ + item = next_no_void_pattern(pattern, NULL); + + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + /* Skip Ethernet */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + /* Not supported last point for range */ + if (item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /* if the first item is MAC, the content should be NULL */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + /* check if the next not void item is IPv4 */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + } + + *ipv4_item = item; + return 0; +} + +static int +hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item, + const struct rte_flow_item pattern[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv4 *ipv4_spec; + const struct rte_flow_item_ipv4 *ipv4_mask; + const struct rte_flow_item *item = *in_out_item; + + /* Get the IPv4 info */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ntuple mask"); + return -rte_errno; + } + /* Not supported last point for range */ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + /* + * Only support src & dst addresses, protocol, + * others should be masked. + */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.hdr_checksum || + !ipv4_mask->hdr.next_proto_id) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_ip_mask = ipv4_mask->hdr.dst_addr; + filter->src_ip_mask = ipv4_mask->hdr.src_addr; + filter->proto_mask = ipv4_mask->hdr.next_proto_id; + + ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; + filter->dst_ip = ipv4_spec->hdr.dst_addr; + filter->src_ip = ipv4_spec->hdr.src_addr; + filter->proto = ipv4_spec->hdr.next_proto_id; + + /* Get next no void item */ + *in_out_item = next_no_void_pattern(pattern, item); + return 0; +} + +static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item, + const struct rte_flow_item pattern[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_item_icmp *icmp_mask; + const struct rte_flow_item *item = *in_out_item; + u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter); + + if (item->type == RTE_FLOW_ITEM_TYPE_END) + return 0; + + /* Get TCP or UDP info */ + if (item->type != RTE_FLOW_ITEM_TYPE_END && + (!item->spec || !item->mask)) { + memset(filter, 0, ntuple_filter_size); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ntuple mask"); + return -rte_errno; + } + + /* Not supported last point for range */ + if (item->last) { + memset(filter, 0, ntuple_filter_size); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { + tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + + /* + * Only support src & dst ports, tcp flags, + * others should be masked. + */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + memset(filter, 0, ntuple_filter_size); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_port_mask = tcp_mask->hdr.dst_port; + filter->src_port_mask = tcp_mask->hdr.src_port; + if (tcp_mask->hdr.tcp_flags == 0xFF) { + filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG; + } else if (!tcp_mask->hdr.tcp_flags) { + filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG; + } else { + memset(filter, 0, ntuple_filter_size); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + filter->dst_port = tcp_spec->hdr.dst_port; + filter->src_port = tcp_spec->hdr.src_port; + filter->tcp_flags = tcp_spec->hdr.tcp_flags; + } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) { + icmp_mask = (const struct rte_flow_item_icmp *)item->mask; + + /* ICMP all should be masked. */ + if (icmp_mask->hdr.icmp_cksum || + icmp_mask->hdr.icmp_ident || + icmp_mask->hdr.icmp_seq_nb || + icmp_mask->hdr.icmp_type || + icmp_mask->hdr.icmp_code) { + memset(filter, 0, ntuple_filter_size); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + } + + /* Get next no void item */ + *in_out_item = next_no_void_pattern(pattern, item); + return 0; +} + +static int hinic_ntuple_item_check_end(const struct rte_flow_item *item, + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + /* Check if the next not void item is END */ + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + return 0; +} + +static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item, + const struct rte_flow_item pattern[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + if (hinic_ntuple_item_check_ether(&item, pattern, error) || + hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) || + hinic_ntuple_item_check_l4(&item, pattern, filter, error) || + hinic_ntuple_item_check_end(item, filter, error)) + return -rte_errno; + + return 0; +} + +/** + * Parse the rule to see if it is a n-tuple rule. + * And get the n-tuple filter info BTW. + * pattern: + * The first not void item can be ETH or IPV4. + * The second not void item must be IPV4 if the first one is ETH. + * The third not void item must be UDP or TCP. + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4 src_addr 192.168.1.20 0xFFFFFFFF + * dst_addr 192.167.3.50 0xFFFFFFFF + * next_proto_id 17 0xFF + * UDP/TCP/ src_port 80 0xFFFF + * SCTP dst_port 80 0xFFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + * Please aware there's an asumption for all the parsers. + * rte_flow_item is using big endian, rte_flow_attr and + * rte_flow_action are using CPU order. + * Because the pattern is used to describe the packets, + * normally the packets should use network order. + */ +static int +cons_parse_ntuple_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = NULL; + + if (hinic_check_filter_arg(attr, pattern, actions, error)) + return -rte_errno; + + if (hinic_check_ntuple_item_ele(item, pattern, filter, error)) + return -rte_errno; + + if (hinic_check_ntuple_act_ele(item, actions, filter, error)) + return -rte_errno; + + if (hinic_check_ntuple_attr_ele(attr, filter, error)) + return -rte_errno; + + return 0; +} + +static int +hinic_parse_ntuple_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + int ret; + + ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error); + if (ret) + return ret; + + /* Hinic doesn't support tcp flags */ + if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by ntuple filter"); + return -rte_errno; + } + + /* Hinic doesn't support many priorities */ + if (filter->priority < HINIC_MIN_N_TUPLE_PRIO || + filter->priority > HINIC_MAX_N_TUPLE_PRIO) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Priority not supported by ntuple filter"); + return -rte_errno; + } + + if (filter->queue >= dev->data->nb_rx_queues) + return -rte_errno; + + /* Fixed value for hinic */ + filter->flags = RTE_5TUPLE_FLAGS; + return 0; +} + +static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item, + const struct rte_flow_item pattern[], + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + + /* The first not void item can be MAC or IPv4 or TCP or UDP */ + item = next_no_void_pattern(pattern, NULL); + + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not supported by fdir filter,support mac,ipv4,tcp,udp"); + return -rte_errno; + } + + /* Not supported last point for range */ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item, + "Not supported last point for range"); + return -rte_errno; + } + + /* Skip Ethernet */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + /* All should be masked. */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter,support mac"); + return -rte_errno; + } + /* Check if the next not void item is IPv4 */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not supported by fdir filter,support mac,ipv4"); + return -rte_errno; + } + } + + *ip_item = item; + return 0; +} + +static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item, + const struct rte_flow_item pattern[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv4 *ipv4_spec; + const struct rte_flow_item_ipv4 *ipv4_mask; + const struct rte_flow_item *item = *in_out_item; + + /* Get the IPv4 info */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { + /* Not supported last point for range */ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + if (!item->mask) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid fdir filter mask"); + return -rte_errno; + } + + ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + /* + * Only support src & dst addresses, + * others should be masked. + */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.next_proto_id || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not supported by fdir filter, support src,dst ip"); + return -rte_errno; + } + + rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr; + rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr; + + if (item->spec) { + ipv4_spec = + (const struct rte_flow_item_ipv4 *)item->spec; + rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr; + rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr; + } + + /* + * Check if the next not void item is + * TCP or UDP or END. + */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not supported by fdir filter, support tcp, udp, end"); + return -rte_errno; + } + } + + *in_out_item = item; + return 0; +} + +static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item, + const struct rte_flow_item pattern[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_item_udp *udp_spec; + const struct rte_flow_item_udp *udp_mask; + const struct rte_flow_item *item = *in_out_item; + + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + /* Not supported last point for range */ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Get TCP/UDP info */ + if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { + /* + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not supported by fdir filter,support src,dst ports"); + return -rte_errno; + } + + tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter,support tcp"); + return -rte_errno; + } + + rule->mask.src_port_mask = tcp_mask->hdr.src_port; + rule->mask.dst_port_mask = tcp_mask->hdr.dst_port; + + if (item->spec) { + tcp_spec = + (const struct rte_flow_item_tcp *) + item->spec; + rule->hinic_fdir.src_port = + tcp_spec->hdr.src_port; + rule->hinic_fdir.dst_port = + tcp_spec->hdr.dst_port; + } + + } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { + /* + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter,support src,dst ports"); + return -rte_errno; + } + + udp_mask = (const struct rte_flow_item_udp *)item->mask; + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter,support udp"); + return -rte_errno; + } + rule->mask.src_port_mask = udp_mask->hdr.src_port; + rule->mask.dst_port_mask = udp_mask->hdr.dst_port; + + if (item->spec) { + udp_spec = + (const struct rte_flow_item_udp *) + item->spec; + rule->hinic_fdir.src_port = + udp_spec->hdr.src_port; + rule->hinic_fdir.dst_port = + udp_spec->hdr.dst_port; + } + } else { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter,support tcp/udp"); + return -rte_errno; + } + + /* Get next no void item */ + *in_out_item = next_no_void_pattern(pattern, item); + } + + return 0; +} + +static int hinic_normal_item_check_end(const struct rte_flow_item *item, + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + /* Check if the next not void item is END */ + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter,support end"); + return -rte_errno; + } + + return 0; +} + +static int hinic_check_normal_item_ele(const struct rte_flow_item *item, + const struct rte_flow_item pattern[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + if (hinic_normal_item_check_ether(&item, pattern, error) || + hinic_normal_item_check_ip(&item, pattern, rule, error) || + hinic_normal_item_check_l4(&item, pattern, rule, error) || + hinic_normal_item_check_end(item, rule, error)) + return -rte_errno; + + return 0; +} + +static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr, + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + /* Must be input direction */ + if (!attr->ingress) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->egress) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->priority) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + return 0; +} + +static int hinic_check_normal_act_ele(const struct rte_flow_item *item, + const struct rte_flow_action actions[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act; + + /* Check if the first not void action is QUEUE */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + item, "Not supported action."); + return -rte_errno; + } + + rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index; + + /* Check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + return 0; +} + +/** + * Parse the rule to see if it is a IP or MAC VLAN flow director rule. + * And get the flow director filter info BTW. + * UDP/TCP/SCTP PATTERN: + * The first not void item can be ETH or IPV4 or IPV6 + * The second not void item must be IPV4 or IPV6 if the first one is ETH. + * The next not void item could be UDP or TCP(optional) + * The next not void item must be END. + * ACTION: + * The first not void action should be QUEUE. + * The second not void optional action should be MARK, + * mark_id is a uint32_t number. + * The next not void action should be END. + * UDP/TCP pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4 src_addr 1.2.3.6 0xFFFFFFFF + * dst_addr 1.2.3.5 0xFFFFFFFF + * UDP/TCP src_port 80 0xFFFF + * dst_port 80 0xFFFF + * END + * Other members in mask and spec should set to 0x00. + * Item->last should be NULL. + */ +static int +hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = NULL; + + if (hinic_check_filter_arg(attr, pattern, actions, error)) + return -rte_errno; + + if (hinic_check_normal_item_ele(item, pattern, rule, error)) + return -rte_errno; + + if (hinic_check_normal_attr_ele(attr, rule, error)) + return -rte_errno; + + if (hinic_check_normal_act_ele(item, actions, rule, error)) + return -rte_errno; + + return 0; +} + +static int +hinic_parse_fdir_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + int ret; + + ret = hinic_parse_fdir_filter_normal(attr, pattern, + actions, rule, error); + if (ret) + return ret; + + if (rule->queue >= dev->data->nb_rx_queues) + return -ENOTSUP; + + return ret; +} + +/** + * Check if the flow rule is supported by nic. + * It only checkes the format. Don't guarantee the rule can be programmed into + * the HW. Because there can be no enough room for the rule. + */ +static int hinic_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_ntuple_filter ntuple_filter; + struct hinic_fdir_rule fdir_rule; + int ret; + + memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + ret = hinic_parse_ntuple_filter(dev, attr, pattern, + actions, &ntuple_filter, error); + if (!ret) + return 0; + + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + ret = hinic_parse_ethertype_filter(dev, attr, pattern, + actions, ðertype_filter, error); + + if (!ret) + return 0; + + memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule)); + ret = hinic_parse_fdir_filter(dev, attr, pattern, + actions, &fdir_rule, error); + + return ret; +} + +const struct rte_flow_ops hinic_flow_ops = { + .validate = hinic_flow_validate, +}; diff --git a/drivers/net/hinic/meson.build b/drivers/net/hinic/meson.build index 87c8d16..bc7e246 100644 --- a/drivers/net/hinic/meson.build +++ b/drivers/net/hinic/meson.build @@ -8,6 +8,7 @@ sources = files( 'hinic_pmd_ethdev.c', 'hinic_pmd_rx.c', 'hinic_pmd_tx.c', + 'hinic_pmd_flow.c', ) includes += include_directories('base') From patchwork Mon Sep 30 14:00:47 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60196 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 322C71B9B5; Mon, 30 Sep 2019 15:48:47 +0200 (CEST) Received: from huawei.com (szxga05-in.huawei.com [45.249.212.191]) by dpdk.org (Postfix) with ESMTP id 6F2541BE8F for ; Mon, 30 Sep 2019 15:48:44 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id 2F242E4DC933DDC150B1 for ; Mon, 30 Sep 2019 21:48:43 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:48:34 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:47 +0800 Message-ID: X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 09/19] net/hinic: create and destroy ntuple filter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for creating/destroying ntuple filter. The filter rule includes BGP and VRRP packet type. Signed-off-by: Xiaoyun wang --- drivers/net/hinic/hinic_pmd_ethdev.h | 75 ++++ drivers/net/hinic/hinic_pmd_flow.c | 684 +++++++++++++++++++++++++++++++++++ 2 files changed, 759 insertions(+) diff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h index 58a38d9..93cd6a7 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.h +++ b/drivers/net/hinic/hinic_pmd_ethdev.h @@ -38,6 +38,57 @@ enum hinic_dev_status { HINIC_DEV_INTR_EN, }; +#define HINIC_MAX_Q_FILTERS 64 /* hinic just support 64 filter types */ +#define HINIC_PKT_TYPE_FIND_ID(pkt_type) ((pkt_type) - HINIC_MAX_Q_FILTERS) + +/* 5tuple filter info */ +struct hinic_5tuple_filter_info { + uint32_t dst_ip; + uint32_t src_ip; + uint16_t dst_port; + uint16_t src_port; + uint8_t proto; /* l4 protocol. */ + /* + * seven levels (001b-111b), 111b is highest, + * used when more than one filter matches. + */ + uint8_t priority; + uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */ + src_ip_mask:1, /* if mask is 1b, do not compare src ip. */ + dst_port_mask:1, /* if mask is 1b, do not compare dst port. */ + src_port_mask:1, /* if mask is 1b, do not compare src port. */ + proto_mask:1; /* if mask is 1b, do not compare protocol. */ +}; + +/* 5tuple filter structure */ +struct hinic_5tuple_filter { + TAILQ_ENTRY(hinic_5tuple_filter) entries; + uint16_t index; /* the index of 5tuple filter */ + struct hinic_5tuple_filter_info filter_info; + uint16_t queue; /* rx queue assigned to */ +}; + +TAILQ_HEAD(hinic_5tuple_filter_list, hinic_5tuple_filter); + +/* + * If this filter is added by configuration, + * it should not be removed. + */ +struct hinic_pkt_filter { + uint16_t pkt_proto; + uint8_t qid; + bool enable; +}; + +/* Structure to store filters' info. */ +struct hinic_filter_info { + uint8_t pkt_type; + uint8_t qid; + uint64_t type_mask; /* Bit mask for every used filter */ + struct hinic_5tuple_filter_list fivetuple_list; + struct hinic_pkt_filter pkt_filters[HINIC_MAX_Q_FILTERS]; +}; + /* Information about the fdir mode. */ struct hinic_hw_fdir_mask { uint32_t src_ipv4_mask; @@ -60,6 +111,26 @@ struct hinic_fdir_rule { uint8_t queue; /* queue assigned when matched */ }; +/* ntuple filter list structure */ +struct hinic_ntuple_filter_ele { + TAILQ_ENTRY(hinic_ntuple_filter_ele) entries; + struct rte_eth_ntuple_filter filter_info; +}; + +struct rte_flow { + enum rte_filter_type filter_type; + void *rule; +}; + +/* hinic_flow memory list structure */ +struct hinic_flow_mem { + TAILQ_ENTRY(hinic_flow_mem) entries; + struct rte_flow *flow; +}; + +TAILQ_HEAD(hinic_ntuple_filter_list, hinic_ntuple_filter_ele); +TAILQ_HEAD(hinic_flow_mem_list, hinic_flow_mem); + extern const struct rte_flow_ops hinic_flow_ops; /* hinic nic_device */ @@ -94,6 +165,10 @@ struct hinic_nic_dev { * vf: the same with associate pf */ u32 default_cos; + + struct hinic_filter_info filter; + struct hinic_ntuple_filter_list filter_ntuple_list; + struct hinic_flow_mem_list hinic_flow_list; }; #endif /* _HINIC_PMD_ETHDEV_H_ */ diff --git a/drivers/net/hinic/hinic_pmd_flow.c b/drivers/net/hinic/hinic_pmd_flow.c index cf9f105..73a336a 100644 --- a/drivers/net/hinic/hinic_pmd_flow.c +++ b/drivers/net/hinic/hinic_pmd_flow.c @@ -23,6 +23,8 @@ #include "base/hinic_pmd_niccfg.h" #include "hinic_pmd_ethdev.h" +#define HINIC_MAX_RX_QUEUE_NUM 64 + #ifndef UINT8_MAX #define UINT8_MAX (u8)(~((u8)0)) /* 0xFF */ #define UINT16_MAX (u16)(~((u16)0)) /* 0xFFFF */ @@ -31,9 +33,56 @@ #define ASCII_MAX (0x7F) #endif +/* IPSURX MACRO */ +#define PA_ETH_TYPE_ROCE 0 +#define PA_ETH_TYPE_IPV4 1 +#define PA_ETH_TYPE_IPV6 2 +#define PA_ETH_TYPE_OTHER 3 + +#define PA_IP_PROTOCOL_TYPE_TCP 1 +#define PA_IP_PROTOCOL_TYPE_UDP 2 +#define PA_IP_PROTOCOL_TYPE_ICMP 3 +#define PA_IP_PROTOCOL_TYPE_IPV4_IGMP 4 +#define PA_IP_PROTOCOL_TYPE_SCTP 5 +#define PA_IP_PROTOCOL_TYPE_VRRP 112 + +#define IP_HEADER_PROTOCOL_TYPE_TCP 6 + #define HINIC_MIN_N_TUPLE_PRIO 1 #define HINIC_MAX_N_TUPLE_PRIO 7 +/* TCAM type mask in hardware */ +#define TCAM_PKT_BGP_SPORT 1 +#define TCAM_PKT_VRRP 2 +#define TCAM_PKT_BGP_DPORT 3 +#define TCAM_PKT_LACP 4 + +#define BGP_DPORT_ID 179 +#define IPPROTO_VRRP 112 + +/* Packet type defined in hardware to perform filter */ +#define PKT_IGMP_IPV4_TYPE 64 +#define PKT_ICMP_IPV4_TYPE 65 +#define PKT_ICMP_IPV6_TYPE 66 +#define PKT_ICMP_IPV6RS_TYPE 67 +#define PKT_ICMP_IPV6RA_TYPE 68 +#define PKT_ICMP_IPV6NS_TYPE 69 +#define PKT_ICMP_IPV6NA_TYPE 70 +#define PKT_ICMP_IPV6RE_TYPE 71 +#define PKT_DHCP_IPV4_TYPE 72 +#define PKT_DHCP_IPV6_TYPE 73 +#define PKT_LACP_TYPE 74 +#define PKT_ARP_REQ_TYPE 79 +#define PKT_ARP_REP_TYPE 80 +#define PKT_ARP_TYPE 81 +#define PKT_BGPD_DPORT_TYPE 83 +#define PKT_BGPD_SPORT_TYPE 84 +#define PKT_VRRP_TYPE 85 + +#define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \ + (&((struct hinic_nic_dev *)nic_dev)->filter) + + /** * Endless loop will never happen with below assumption * 1. there is at least one no-void item(END) @@ -1167,6 +1216,641 @@ static int hinic_flow_validate(struct rte_eth_dev *dev, return ret; } +static inline int +ntuple_ip_filter(struct rte_eth_ntuple_filter *filter, + struct hinic_5tuple_filter_info *filter_info) +{ + switch (filter->dst_ip_mask) { + case UINT32_MAX: + filter_info->dst_ip_mask = 0; + filter_info->dst_ip = filter->dst_ip; + break; + case 0: + filter_info->dst_ip_mask = 1; + filter_info->dst_ip = 0; + break; + default: + PMD_DRV_LOG(ERR, "Invalid dst_ip mask."); + return -EINVAL; + } + + switch (filter->src_ip_mask) { + case UINT32_MAX: + filter_info->src_ip_mask = 0; + filter_info->src_ip = filter->src_ip; + break; + case 0: + filter_info->src_ip_mask = 1; + filter_info->src_ip = 0; + break; + default: + PMD_DRV_LOG(ERR, "Invalid src_ip mask."); + return -EINVAL; + } + return 0; +} + +static inline int +ntuple_port_filter(struct rte_eth_ntuple_filter *filter, + struct hinic_5tuple_filter_info *filter_info) +{ + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + filter_info->dst_port = 0; + break; + default: + PMD_DRV_LOG(ERR, "Invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->src_port_mask) { + case UINT16_MAX: + filter_info->src_port_mask = 0; + filter_info->src_port = filter->src_port; + break; + case 0: + filter_info->src_port_mask = 1; + filter_info->src_port = 0; + break; + default: + PMD_DRV_LOG(ERR, "Invalid src_port mask."); + return -EINVAL; + } + + return 0; +} + +static inline int +ntuple_proto_filter(struct rte_eth_ntuple_filter *filter, + struct hinic_5tuple_filter_info *filter_info) +{ + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = filter->proto; + break; + case 0: + filter_info->proto_mask = 1; + filter_info->proto = 0; + break; + default: + PMD_DRV_LOG(ERR, "Invalid protocol mask."); + return -EINVAL; + } + + return 0; +} + +static inline int +ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, + struct hinic_5tuple_filter_info *filter_info) +{ + if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM || + filter->priority > HINIC_MAX_N_TUPLE_PRIO || + filter->priority < HINIC_MIN_N_TUPLE_PRIO) + return -EINVAL; + + if (ntuple_ip_filter(filter, filter_info) || + ntuple_port_filter(filter, filter_info) || + ntuple_proto_filter(filter, filter_info)) + return -EINVAL; + + filter_info->priority = (uint8_t)filter->priority; + return 0; +} + +static inline struct hinic_5tuple_filter * +hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list, + struct hinic_5tuple_filter_info *key) +{ + struct hinic_5tuple_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct hinic_5tuple_filter_info)) == 0) { + return it; + } + } + + return NULL; +} + +static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev) +{ + struct tag_pa_rule bgp_rule; + struct tag_pa_action bgp_action; + + memset(&bgp_rule, 0, sizeof(bgp_rule)); + memset(&bgp_action, 0, sizeof(bgp_action)); + /* BGP TCAM rule */ + bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */ + bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP; + bgp_rule.ip_header.protocol.mask8 = UINT8_MAX; + bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP; + bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */ + bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX; + + /* BGP TCAM action */ + bgp_action.err_type = 0x3f; /* err from ipsu, not convert */ + bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */ + bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */ + bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse + * results, not need to convert + */ + bgp_action.push_len = 0xf; /* push_len:0xf, not convert */ + + return hinic_set_fdir_tcam(nic_dev->hwdev, + TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action); +} + +static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev) +{ + struct tag_pa_rule bgp_rule; + struct tag_pa_action bgp_action; + + memset(&bgp_rule, 0, sizeof(bgp_rule)); + memset(&bgp_action, 0, sizeof(bgp_action)); + /* BGP TCAM rule */ + bgp_rule.eth_type = PA_ETH_TYPE_IPV4; + bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP; + bgp_rule.ip_header.protocol.mask8 = UINT8_MAX; + bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP; + bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID; + bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX; + + /* BGP TCAM action */ + bgp_action.err_type = 0x3f; /* err from ipsu, not convert */ + bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */ + bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */ + bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse + * results, not need to convert + */ + bgp_action.push_len = 0xf; /* push_len:0xf, not convert */ + + return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT, + &bgp_rule, &bgp_action); +} + +static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev) +{ + struct tag_pa_rule vrrp_rule; + struct tag_pa_action vrrp_action; + + memset(&vrrp_rule, 0, sizeof(vrrp_rule)); + memset(&vrrp_action, 0, sizeof(vrrp_action)); + /* VRRP TCAM rule */ + vrrp_rule.eth_type = PA_ETH_TYPE_IPV4; + vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP; + vrrp_rule.ip_header.protocol.mask8 = 0xff; + vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP; + + /* VRRP TCAM action */ + vrrp_action.err_type = 0x3f; + vrrp_action.fwd_action = 0x7; + vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */ + vrrp_action.pri = 0xf; + vrrp_action.push_len = 0xf; + + return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP, + &vrrp_rule, &vrrp_action); +} + +static int +hinic_filter_info_init(struct hinic_5tuple_filter *filter, + struct hinic_filter_info *filter_info) +{ + switch (filter->filter_info.proto) { + case IPPROTO_TCP: + /* Filter type is bgp type if dst_port or src_port is 179 */ + if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) && + !(filter->filter_info.dst_port_mask)) { + filter_info->pkt_type = PKT_BGPD_DPORT_TYPE; + } else if (filter->filter_info.src_port == + RTE_BE16(BGP_DPORT_ID) && + !(filter->filter_info.src_port_mask)) { + filter_info->pkt_type = PKT_BGPD_SPORT_TYPE; + } else { + PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters" + " just support BGP now, proto:0x%x, " + "dst_port:0x%x, dst_port_mask:0x%x." + "src_port:0x%x, src_port_mask:0x%x.", + filter->filter_info.proto, + filter->filter_info.dst_port, + filter->filter_info.dst_port_mask, + filter->filter_info.src_port, + filter->filter_info.src_port_mask); + return -EINVAL; + } + break; + + case IPPROTO_VRRP: + filter_info->pkt_type = PKT_VRRP_TYPE; + break; + + case IPPROTO_ICMP: + filter_info->pkt_type = PKT_ICMP_IPV4_TYPE; + break; + + case IPPROTO_ICMPV6: + filter_info->pkt_type = PKT_ICMP_IPV6_TYPE; + break; + + default: + PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, " + "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x." + "src_port: 0x%x, src_port_mask: 0x%x.", + filter->filter_info.proto, filter->filter_info.dst_port, + filter->filter_info.dst_port_mask, + filter->filter_info.src_port, + filter->filter_info.src_port_mask); + return -EINVAL; + } + + return 0; +} + +static int +hinic_lookup_new_filter(struct hinic_5tuple_filter *filter, + struct hinic_filter_info *filter_info, + int *index) +{ + int type_id; + + type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type); + + if (type_id > HINIC_MAX_Q_FILTERS - 1) { + PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type."); + return -EINVAL; + } + + if (!(filter_info->type_mask & (1 << type_id))) { + filter_info->type_mask |= 1 << type_id; + filter->index = type_id; + filter_info->pkt_filters[type_id].enable = true; + filter_info->pkt_filters[type_id].pkt_proto = + filter->filter_info.proto; + TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, + filter, entries); + } else { + PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id); + return -EIO; + } + + *index = type_id; + return 0; +} + +/* + * Add a 5tuple filter + * + * @param dev: + * Pointer to struct rte_eth_dev. + * @param filter: + * Pointer to the filter that will be added. + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +hinic_add_5tuple_filter(struct rte_eth_dev *dev, + struct hinic_5tuple_filter *filter) +{ + struct hinic_filter_info *filter_info = + HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i, ret_fw; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + if (hinic_filter_info_init(filter, filter_info) || + hinic_lookup_new_filter(filter, filter_info, &i)) + return -EFAULT; + + ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type, + filter_info->qid, + filter_info->pkt_filters[i].enable, + true); + if (ret_fw) { + PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter->queue, + filter_info->pkt_filters[i].enable); + return -EFAULT; + } + + PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter_info->qid, + filter_info->pkt_filters[filter->index].enable); + + switch (filter->filter_info.proto) { + case IPPROTO_TCP: + if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) { + ret_fw = hinic_set_bgp_dport_tcam(nic_dev); + if (ret_fw) { + PMD_DRV_LOG(ERR, "Set dport bgp failed, " + "type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter->queue, + filter_info->pkt_filters[i].enable); + return -EFAULT; + } + + PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x", + filter->queue, + filter_info->pkt_filters[i].enable); + } else if (filter->filter_info.src_port == + RTE_BE16(BGP_DPORT_ID)) { + ret_fw = hinic_set_bgp_sport_tcam(nic_dev); + if (ret_fw) { + PMD_DRV_LOG(ERR, "Set sport bgp failed, " + "type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter->queue, + filter_info->pkt_filters[i].enable); + return -EFAULT; + } + + PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x", + filter->queue, + filter_info->pkt_filters[i].enable); + } + + break; + + case IPPROTO_VRRP: + ret_fw = hinic_set_vrrp_tcam(nic_dev); + if (ret_fw) { + PMD_DRV_LOG(ERR, "Set VRRP failed, " + "type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter->queue, + filter_info->pkt_filters[i].enable); + return -EFAULT; + } + PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x", + filter->queue, + filter_info->pkt_filters[i].enable); + break; + + default: + break; + } + + return 0; +} + +/* + * Remove a 5tuple filter + * + * @param dev + * Pointer to struct rte_eth_dev. + * @param filter + * The pointer of the filter will be removed. + */ +static void +hinic_remove_5tuple_filter(struct rte_eth_dev *dev, + struct hinic_5tuple_filter *filter) +{ + struct hinic_filter_info *filter_info = + HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + switch (filter->filter_info.proto) { + case IPPROTO_VRRP: + (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP); + break; + + case IPPROTO_TCP: + if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) + (void)hinic_clear_fdir_tcam(nic_dev->hwdev, + TCAM_PKT_BGP_DPORT); + else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID)) + (void)hinic_clear_fdir_tcam(nic_dev->hwdev, + TCAM_PKT_BGP_SPORT); + break; + + default: + break; + } + + hinic_filter_info_init(filter, filter_info); + + filter_info->pkt_filters[filter->index].enable = false; + filter_info->pkt_filters[filter->index].pkt_proto = 0; + + PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, + filter_info->pkt_filters[filter->index].qid, + filter_info->pkt_filters[filter->index].enable); + (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type, + filter_info->pkt_filters[filter->index].qid, + filter_info->pkt_filters[filter->index].enable, + true); + + filter_info->pkt_type = 0; + filter_info->qid = 0; + filter_info->pkt_filters[filter->index].qid = 0; + filter_info->type_mask &= ~(1 << (filter->index)); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); + + rte_free(filter); +} + +/* + * Add or delete a ntuple filter + * + * @param dev + * Pointer to struct rte_eth_dev. + * @param ntuple_filter + * Pointer to struct rte_eth_ntuple_filter + * @param add + * If true, add filter; if false, remove filter + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter, + bool add) +{ + struct hinic_filter_info *filter_info = + HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct hinic_5tuple_filter_info filter_5tuple; + struct hinic_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "Only 5tuple is supported."); + return -EINVAL; + } + + memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; + + filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter != NULL && add) { + PMD_DRV_LOG(ERR, "Filter exists."); + return -EEXIST; + } + if (filter == NULL && !add) { + PMD_DRV_LOG(ERR, "Filter doesn't exist."); + return -ENOENT; + } + + if (add) { + filter = rte_zmalloc("hinic_5tuple_filter", + sizeof(struct hinic_5tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; + rte_memcpy(&filter->filter_info, &filter_5tuple, + sizeof(struct hinic_5tuple_filter_info)); + filter->queue = ntuple_filter->queue; + + filter_info->qid = ntuple_filter->queue; + + ret = hinic_add_5tuple_filter(dev, filter); + if (ret) + rte_free(filter); + + return ret; + } + + hinic_remove_5tuple_filter(dev, filter); + + return 0; +} + +/** + * Create or destroy a flow rule. + * Theorically one rule can match more than one filters. + * We will let it use the filter which it hitt first. + * So, the sequence matters. + */ +static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret; + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_flow *flow = NULL; + struct hinic_ntuple_filter_ele *ntuple_filter_ptr; + struct hinic_flow_mem *hinic_flow_mem_ptr; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0); + if (!flow) { + PMD_DRV_LOG(ERR, "Failed to allocate flow memory"); + return NULL; + } + + hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem", + sizeof(struct hinic_flow_mem), 0); + if (!hinic_flow_mem_ptr) { + PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr"); + rte_free(flow); + return NULL; + } + + hinic_flow_mem_ptr->flow = flow; + TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, + entries); + + /* add ntuple filter */ + memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + ret = hinic_parse_ntuple_filter(dev, attr, pattern, + actions, &ntuple_filter, error); + if (ret) + goto out; + + ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); + if (ret) + goto out; + ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter", + sizeof(struct hinic_ntuple_filter_ele), 0); + rte_memcpy(&ntuple_filter_ptr->filter_info, + &ntuple_filter, + sizeof(struct rte_eth_ntuple_filter)); + TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list, + ntuple_filter_ptr, entries); + flow->rule = ntuple_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_NTUPLE; + + PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x", + hinic_global_func_id(nic_dev->hwdev)); + return flow; + +out: + TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries); + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + rte_free(hinic_flow_mem_ptr); + rte_free(flow); + return NULL; +} + +/* Destroy a flow rule on hinic. */ +static int hinic_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + struct rte_flow *pmd_flow = flow; + enum rte_filter_type filter_type = pmd_flow->filter_type; + struct rte_eth_ntuple_filter ntuple_filter; + struct hinic_ntuple_filter_ele *ntuple_filter_ptr; + struct hinic_flow_mem *hinic_flow_mem_ptr; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *) + pmd_flow->rule; + rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info, + sizeof(struct rte_eth_ntuple_filter)); + ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&nic_dev->filter_ntuple_list, + ntuple_filter_ptr, entries); + rte_free(ntuple_filter_ptr); + } + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to destroy flow"); + return ret; + } + + TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) { + if (hinic_flow_mem_ptr->flow == pmd_flow) { + TAILQ_REMOVE(&nic_dev->hinic_flow_list, + hinic_flow_mem_ptr, entries); + rte_free(hinic_flow_mem_ptr); + break; + } + } + rte_free(flow); + + PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x", + hinic_global_func_id(nic_dev->hwdev)); + + return ret; +} + const struct rte_flow_ops hinic_flow_ops = { .validate = hinic_flow_validate, + .create = hinic_flow_create, + .destroy = hinic_flow_destroy, }; From patchwork Mon Sep 30 14:00:48 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60197 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5A8B21B9A9; Mon, 30 Sep 2019 15:49:10 +0200 (CEST) Received: from huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id 6BC7F4C90 for ; Mon, 30 Sep 2019 15:48:59 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 6922B6A7D52945CA91C0 for ; Mon, 30 Sep 2019 21:48:58 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:48:49 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:48 +0800 Message-ID: <2b6a6a19f9baf113d52c969c235339eb23cb570d.1569850827.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 10/19] net/hinic: create and destroy fdir filter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for creating/destroying fdir filter. The filter rule includes LACP,ARP and ICMP packet type. Signed-off-by: Xiaoyun wang --- drivers/net/hinic/hinic_pmd_ethdev.h | 16 ++ drivers/net/hinic/hinic_pmd_flow.c | 423 +++++++++++++++++++++++++++++++++-- 2 files changed, 422 insertions(+), 17 deletions(-) diff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h index 93cd6a7..b7fcef6 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.h +++ b/drivers/net/hinic/hinic_pmd_ethdev.h @@ -117,6 +117,18 @@ struct hinic_ntuple_filter_ele { struct rte_eth_ntuple_filter filter_info; }; +/* ethertype filter list structure */ +struct hinic_ethertype_filter_ele { + TAILQ_ENTRY(hinic_ethertype_filter_ele) entries; + struct rte_eth_ethertype_filter filter_info; +}; + +/* fdir filter list structure */ +struct hinic_fdir_rule_ele { + TAILQ_ENTRY(hinic_fdir_rule_ele) entries; + struct hinic_fdir_rule filter_info; +}; + struct rte_flow { enum rte_filter_type filter_type; void *rule; @@ -129,6 +141,8 @@ struct hinic_flow_mem { }; TAILQ_HEAD(hinic_ntuple_filter_list, hinic_ntuple_filter_ele); +TAILQ_HEAD(hinic_ethertype_filter_list, hinic_ethertype_filter_ele); +TAILQ_HEAD(hinic_fdir_rule_filter_list, hinic_fdir_rule_ele); TAILQ_HEAD(hinic_flow_mem_list, hinic_flow_mem); extern const struct rte_flow_ops hinic_flow_ops; @@ -168,6 +182,8 @@ struct hinic_nic_dev { struct hinic_filter_info filter; struct hinic_ntuple_filter_list filter_ntuple_list; + struct hinic_ethertype_filter_list filter_ethertype_list; + struct hinic_fdir_rule_filter_list filter_fdir_rule_list; struct hinic_flow_mem_list hinic_flow_list; }; diff --git a/drivers/net/hinic/hinic_pmd_flow.c b/drivers/net/hinic/hinic_pmd_flow.c index 73a336a..b2eff2e 100644 --- a/drivers/net/hinic/hinic_pmd_flow.c +++ b/drivers/net/hinic/hinic_pmd_flow.c @@ -82,6 +82,19 @@ #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \ (&((struct hinic_nic_dev *)nic_dev)->filter) +enum hinic_atr_flow_type { + HINIC_ATR_FLOW_TYPE_IPV4_DIP = 0x1, + HINIC_ATR_FLOW_TYPE_IPV4_SIP = 0x2, + HINIC_ATR_FLOW_TYPE_DPORT = 0x3, + HINIC_ATR_FLOW_TYPE_SPORT = 0x4, +}; + +/* Structure to store fdir's info. */ +struct hinic_fdir_info { + uint8_t fdir_flag; + uint8_t qid; + uint32_t fdir_key; +}; /** * Endless loop will never happen with below assumption @@ -1340,6 +1353,29 @@ static int hinic_flow_validate(struct rte_eth_dev *dev, return NULL; } +static int hinic_set_lacp_tcam(struct hinic_nic_dev *nic_dev) +{ + struct tag_pa_rule lacp_rule; + struct tag_pa_action lacp_action; + + memset(&lacp_rule, 0, sizeof(lacp_rule)); + memset(&lacp_action, 0, sizeof(lacp_action)); + /* LACP TCAM rule */ + lacp_rule.eth_type = PA_ETH_TYPE_OTHER; + lacp_rule.l2_header.eth_type.val16 = 0x8809; + lacp_rule.l2_header.eth_type.mask16 = 0xffff; + + /* LACP TCAM action */ + lacp_action.err_type = 0x3f; /* err from ipsu, not convert */ + lacp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */ + lacp_action.pkt_type = PKT_LACP_TYPE; + lacp_action.pri = 0x0; + lacp_action.push_len = 0xf; /* push_len:0xf, not convert */ + + return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP, + &lacp_rule, &lacp_action); +} + static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev) { struct tag_pa_rule bgp_rule; @@ -1722,6 +1758,276 @@ static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev, return 0; } +static inline int +hinic_check_ethertype_filter(struct rte_eth_ethertype_filter *filter) +{ + if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM) + return -EINVAL; + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { + PMD_DRV_LOG(ERR, "Unsupported ether_type(0x%04x) in" + " ethertype filter", filter->ether_type); + return -EINVAL; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + PMD_DRV_LOG(ERR, "Mac compare is not supported"); + return -EINVAL; + } + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + PMD_DRV_LOG(ERR, "Drop option is not supported"); + return -EINVAL; + } + + return 0; +} + +static inline int +hinic_ethertype_filter_lookup(struct hinic_filter_info *filter_info, + struct hinic_pkt_filter *ethertype_filter) +{ + switch (ethertype_filter->pkt_proto) { + case RTE_ETHER_TYPE_SLOW: + filter_info->pkt_type = PKT_LACP_TYPE; + break; + + case RTE_ETHER_TYPE_ARP: + filter_info->pkt_type = PKT_ARP_TYPE; + break; + + default: + PMD_DRV_LOG(ERR, "Just support LACP/ARP for ethertype filters"); + return -EIO; + } + + return HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type); +} + +static inline int +hinic_ethertype_filter_insert(struct hinic_filter_info *filter_info, + struct hinic_pkt_filter *ethertype_filter) +{ + int id; + + /* Find LACP or VRRP type id */ + id = hinic_ethertype_filter_lookup(filter_info, ethertype_filter); + if (id < 0) + return -EINVAL; + + if (!(filter_info->type_mask & (1 << id))) { + filter_info->type_mask |= 1 << id; + filter_info->pkt_filters[id].pkt_proto = + ethertype_filter->pkt_proto; + filter_info->pkt_filters[id].enable = ethertype_filter->enable; + filter_info->qid = ethertype_filter->qid; + return id; + } + + PMD_DRV_LOG(ERR, "Filter type: %d exists", id); + return -EINVAL; +} + +static inline void +hinic_ethertype_filter_remove(struct hinic_filter_info *filter_info, + uint8_t idx) +{ + if (idx >= HINIC_MAX_Q_FILTERS) + return; + + filter_info->pkt_type = 0; + filter_info->type_mask &= ~(1 << idx); + filter_info->pkt_filters[idx].pkt_proto = (uint16_t)0; + filter_info->pkt_filters[idx].enable = FALSE; + filter_info->pkt_filters[idx].qid = 0; +} + +static inline int +hinic_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct hinic_filter_info *filter_info = + HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct hinic_pkt_filter ethertype_filter; + int i; + int ret_fw; + + if (hinic_check_ethertype_filter(filter)) + return -EINVAL; + + if (add) { + ethertype_filter.pkt_proto = filter->ether_type; + ethertype_filter.enable = TRUE; + ethertype_filter.qid = (u8)filter->queue; + i = hinic_ethertype_filter_insert(filter_info, + ðertype_filter); + if (i < 0) + return -ENOSPC; + + ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, + filter_info->pkt_type, filter_info->qid, + filter_info->pkt_filters[i].enable, true); + if (ret_fw) { + PMD_DRV_LOG(ERR, "add ethertype failed, type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter->queue, + filter_info->pkt_filters[i].enable); + + hinic_ethertype_filter_remove(filter_info, i); + return -ENOENT; + } + PMD_DRV_LOG(INFO, "Add ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter->queue, + filter_info->pkt_filters[i].enable); + + switch (ethertype_filter.pkt_proto) { + case RTE_ETHER_TYPE_SLOW: + ret_fw = hinic_set_lacp_tcam(nic_dev); + if (ret_fw) { + PMD_DRV_LOG(ERR, "Add lacp tcam failed"); + hinic_ethertype_filter_remove(filter_info, i); + return -ENOENT; + } + + PMD_DRV_LOG(INFO, "Add lacp tcam succeed"); + break; + default: + break; + } + + } else { + ethertype_filter.pkt_proto = filter->ether_type; + i = hinic_ethertype_filter_lookup(filter_info, + ðertype_filter); + + if ((filter_info->type_mask & (1 << i))) { + filter_info->pkt_filters[i].enable = FALSE; + (void)hinic_set_fdir_filter(nic_dev->hwdev, + filter_info->pkt_type, + filter_info->pkt_filters[i].qid, + filter_info->pkt_filters[i].enable, + true); + + PMD_DRV_LOG(INFO, "Del ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, + filter_info->pkt_filters[i].qid, + filter_info->pkt_filters[i].enable); + + switch (ethertype_filter.pkt_proto) { + case RTE_ETHER_TYPE_SLOW: + (void)hinic_clear_fdir_tcam(nic_dev->hwdev, + TCAM_PKT_LACP); + PMD_DRV_LOG(INFO, + "Del lacp tcam succeed"); + break; + default: + break; + } + + hinic_ethertype_filter_remove(filter_info, i); + + } else { + PMD_DRV_LOG(ERR, "Ethertype doesn't exist, type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter->queue, + filter_info->pkt_filters[i].enable); + return -ENOENT; + } + } + + return 0; +} + +static int +hinic_fdir_info_init(struct hinic_fdir_rule *rule, + struct hinic_fdir_info *fdir_info) +{ + switch (rule->mask.src_ipv4_mask) { + case UINT32_MAX: + fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_SIP; + fdir_info->qid = rule->queue; + fdir_info->fdir_key = rule->hinic_fdir.src_ip; + return 0; + + case 0: + break; + + default: + PMD_DRV_LOG(ERR, "Invalid src_ip mask."); + return -EINVAL; + } + + switch (rule->mask.dst_ipv4_mask) { + case UINT32_MAX: + fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_DIP; + fdir_info->qid = rule->queue; + fdir_info->fdir_key = rule->hinic_fdir.dst_ip; + return 0; + + case 0: + break; + + default: + PMD_DRV_LOG(ERR, "Invalid dst_ip mask."); + return -EINVAL; + } + + if (fdir_info->fdir_flag == 0) { + PMD_DRV_LOG(ERR, "All support mask is NULL."); + return -EINVAL; + } + + return 0; +} + +static inline int +hinic_add_del_fdir_filter(struct rte_eth_dev *dev, + struct hinic_fdir_rule *rule, + bool add) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct hinic_fdir_info fdir_info; + int ret; + + memset(&fdir_info, 0, sizeof(struct hinic_fdir_info)); + + ret = hinic_fdir_info_init(rule, &fdir_info); + if (ret) { + PMD_DRV_LOG(ERR, "Init hinic fdir info failed!"); + return ret; + } + + if (add) { + ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid, + true, fdir_info.fdir_key, + true, fdir_info.fdir_flag); + if (ret) { + PMD_DRV_LOG(ERR, "Add fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x", + fdir_info.fdir_flag, fdir_info.qid, + fdir_info.fdir_key); + return -ENOENT; + } + PMD_DRV_LOG(INFO, "Add fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x", + fdir_info.fdir_flag, fdir_info.qid, + fdir_info.fdir_key); + } else { + ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid, + false, fdir_info.fdir_key, true, + fdir_info.fdir_flag); + if (ret) { + PMD_DRV_LOG(ERR, "Del fdir filter ailed, flag: 0x%x, qid: 0x%x, key: 0x%x", + fdir_info.fdir_flag, fdir_info.qid, + fdir_info.fdir_key); + return -ENOENT; + } + PMD_DRV_LOG(INFO, "Del fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x", + fdir_info.fdir_flag, fdir_info.qid, + fdir_info.fdir_key); + } + + return 0; +} + /** * Create or destroy a flow rule. * Theorically one rule can match more than one filters. @@ -1736,8 +2042,12 @@ static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev, { int ret; struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct hinic_fdir_rule fdir_rule; struct rte_flow *flow = NULL; + struct hinic_ethertype_filter_ele *ethertype_filter_ptr; struct hinic_ntuple_filter_ele *ntuple_filter_ptr; + struct hinic_fdir_rule_ele *fdir_rule_ptr; struct hinic_flow_mem *hinic_flow_mem_ptr; struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); @@ -1759,29 +2069,78 @@ static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev, TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries); - /* add ntuple filter */ + /* Add ntuple filter */ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); ret = hinic_parse_ntuple_filter(dev, attr, pattern, actions, &ntuple_filter, error); - if (ret) + if (!ret) { + ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); + if (!ret) { + ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter", + sizeof(struct hinic_ntuple_filter_ele), 0); + rte_memcpy(&ntuple_filter_ptr->filter_info, + &ntuple_filter, + sizeof(struct rte_eth_ntuple_filter)); + TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list, + ntuple_filter_ptr, entries); + flow->rule = ntuple_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_NTUPLE; + + PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x", + hinic_global_func_id(nic_dev->hwdev)); + return flow; + } + goto out; + } + + /* Add ethertype filter */ + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + ret = hinic_parse_ethertype_filter(dev, attr, pattern, actions, + ðertype_filter, error); + if (!ret) { + ret = hinic_add_del_ethertype_filter(dev, ðertype_filter, + TRUE); + if (!ret) { + ethertype_filter_ptr = + rte_zmalloc("hinic_ethertype_filter", + sizeof(struct hinic_ethertype_filter_ele), 0); + rte_memcpy(ðertype_filter_ptr->filter_info, + ðertype_filter, + sizeof(struct rte_eth_ethertype_filter)); + TAILQ_INSERT_TAIL(&nic_dev->filter_ethertype_list, + ethertype_filter_ptr, entries); + flow->rule = ethertype_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_ETHERTYPE; + + PMD_DRV_LOG(INFO, "Create flow ethertype succeed, func_id: 0x%x", + hinic_global_func_id(nic_dev->hwdev)); + return flow; + } goto out; + } - ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); - if (ret) + /* Add fdir filter */ + memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule)); + ret = hinic_parse_fdir_filter(dev, attr, pattern, + actions, &fdir_rule, error); + if (!ret) { + ret = hinic_add_del_fdir_filter(dev, &fdir_rule, TRUE); + if (!ret) { + fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule", + sizeof(struct hinic_fdir_rule_ele), 0); + rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule, + sizeof(struct hinic_fdir_rule)); + TAILQ_INSERT_TAIL(&nic_dev->filter_fdir_rule_list, + fdir_rule_ptr, entries); + flow->rule = fdir_rule_ptr; + flow->filter_type = RTE_ETH_FILTER_FDIR; + + PMD_DRV_LOG(INFO, "Create flow fdir rule succeed, func_id : 0x%x", + hinic_global_func_id(nic_dev->hwdev)); + return flow; + } goto out; - ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter", - sizeof(struct hinic_ntuple_filter_ele), 0); - rte_memcpy(&ntuple_filter_ptr->filter_info, - &ntuple_filter, - sizeof(struct rte_eth_ntuple_filter)); - TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list, - ntuple_filter_ptr, entries); - flow->rule = ntuple_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_NTUPLE; - - PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x", - hinic_global_func_id(nic_dev->hwdev)); - return flow; + } out: TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries); @@ -1802,7 +2161,11 @@ static int hinic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *pmd_flow = flow; enum rte_filter_type filter_type = pmd_flow->filter_type; struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct hinic_fdir_rule fdir_rule; struct hinic_ntuple_filter_ele *ntuple_filter_ptr; + struct hinic_ethertype_filter_ele *ethertype_filter_ptr; + struct hinic_fdir_rule_ele *fdir_rule_ptr; struct hinic_flow_mem *hinic_flow_mem_ptr; struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); @@ -1819,6 +2182,32 @@ static int hinic_flow_destroy(struct rte_eth_dev *dev, rte_free(ntuple_filter_ptr); } break; + case RTE_ETH_FILTER_ETHERTYPE: + ethertype_filter_ptr = (struct hinic_ethertype_filter_ele *) + pmd_flow->rule; + rte_memcpy(ðertype_filter, + ðertype_filter_ptr->filter_info, + sizeof(struct rte_eth_ethertype_filter)); + ret = hinic_add_del_ethertype_filter(dev, + ðertype_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&nic_dev->filter_ethertype_list, + ethertype_filter_ptr, entries); + rte_free(ethertype_filter_ptr); + } + break; + case RTE_ETH_FILTER_FDIR: + fdir_rule_ptr = (struct hinic_fdir_rule_ele *)pmd_flow->rule; + rte_memcpy(&fdir_rule, + &fdir_rule_ptr->filter_info, + sizeof(struct hinic_fdir_rule)); + ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE); + if (!ret) { + TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, + fdir_rule_ptr, entries); + rte_free(fdir_rule_ptr); + } + break; default: PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", filter_type); From patchwork Mon Sep 30 14:00:49 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60198 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8AF231BE9D; Mon, 30 Sep 2019 15:49:12 +0200 (CEST) Received: from huawei.com (szxga05-in.huawei.com [45.249.212.191]) by dpdk.org (Postfix) with ESMTP id 807E51BE96 for ; Mon, 30 Sep 2019 15:49:09 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id 85CEC8F211783D5E231B for ; Mon, 30 Sep 2019 21:49:08 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:49:00 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:49 +0800 Message-ID: <5af3e8a1552e9a2fb23522255c72ca02ca60df95.1569850827.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 11/19] net/hinic: flush fdir filter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Supports to flush fdir filter. Destroy all flow rules associated with a port on hinic. Signed-off-by: Xiaoyun wang --- drivers/net/hinic/hinic_pmd_ethdev.c | 16 ++++ drivers/net/hinic/hinic_pmd_ethdev.h | 3 +- drivers/net/hinic/hinic_pmd_flow.c | 140 +++++++++++++++++++++++++++++++++++ 3 files changed, 158 insertions(+), 1 deletion(-) diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c index f2f0fa9..d0bcbe5 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.c +++ b/drivers/net/hinic/hinic_pmd_ethdev.c @@ -341,6 +341,9 @@ static int hinic_dev_configure(struct rte_eth_dev *dev) return err; } + /*clear fdir filter flag in function table*/ + hinic_free_fdir_filter(nic_dev); + return HINIC_OK; } @@ -1115,6 +1118,8 @@ static void hinic_dev_stop(struct rte_eth_dev *dev) /* clean root context */ hinic_free_qp_ctxts(nic_dev->hwdev); + hinic_free_fdir_filter(nic_dev); + /* free mbuf */ hinic_free_all_rx_mbuf(dev); hinic_free_all_tx_mbuf(dev); @@ -2816,6 +2821,7 @@ static int hinic_func_init(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev; struct rte_ether_addr *eth_addr; struct hinic_nic_dev *nic_dev; + struct hinic_filter_info *filter_info; u32 mac_size; int rc; @@ -2907,6 +2913,16 @@ static int hinic_func_init(struct rte_eth_dev *eth_dev) } hinic_set_bit(HINIC_DEV_INTR_EN, &nic_dev->dev_status); + /* initialize filter info */ + filter_info = &nic_dev->filter; + memset(filter_info, 0, sizeof(struct hinic_filter_info)); + /* initialize 5tuple filter list */ + TAILQ_INIT(&filter_info->fivetuple_list); + TAILQ_INIT(&nic_dev->filter_ntuple_list); + TAILQ_INIT(&nic_dev->filter_ethertype_list); + TAILQ_INIT(&nic_dev->filter_fdir_rule_list); + TAILQ_INIT(&nic_dev->hinic_flow_list); + hinic_set_bit(HINIC_DEV_INIT, &nic_dev->dev_status); PMD_DRV_LOG(INFO, "Initialize %s in primary successfully", eth_dev->data->name); diff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h index b7fcef6..dd96667 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.h +++ b/drivers/net/hinic/hinic_pmd_ethdev.h @@ -173,7 +173,6 @@ struct hinic_nic_dev { u32 rx_mode_status; /* promisc or allmulticast */ unsigned long dev_status; - /* dpdk only */ char proc_dev_name[HINIC_DEV_NAME_LEN]; /* PF0->COS4, PF1->COS5, PF2->COS6, PF3->COS7, * vf: the same with associate pf @@ -187,4 +186,6 @@ struct hinic_nic_dev { struct hinic_flow_mem_list hinic_flow_list; }; +void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev); + #endif /* _HINIC_PMD_ETHDEV_H_ */ diff --git a/drivers/net/hinic/hinic_pmd_flow.c b/drivers/net/hinic/hinic_pmd_flow.c index b2eff2e..49c9d87 100644 --- a/drivers/net/hinic/hinic_pmd_flow.c +++ b/drivers/net/hinic/hinic_pmd_flow.c @@ -1456,6 +1456,40 @@ static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev) &vrrp_rule, &vrrp_action); } +/** + * Clear all fdir configuration. + * + * @param nic_dev + * The hardware interface of a Ethernet device. + * + * @return + * 0 on success, + * negative error value otherwise. + */ +void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev) +{ + struct hinic_filter_info *filter_info = + HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev); + + if (filter_info->type_mask & + (1 << HINIC_PKT_TYPE_FIND_ID(PKT_BGPD_DPORT_TYPE))) + hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT); + + if (filter_info->type_mask & + (1 << HINIC_PKT_TYPE_FIND_ID(PKT_BGPD_SPORT_TYPE))) + hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT); + + if (filter_info->type_mask & + (1 << HINIC_PKT_TYPE_FIND_ID(PKT_VRRP_TYPE))) + hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP); + + if (filter_info->type_mask & + (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE))) + hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP); + + hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false); +} + static int hinic_filter_info_init(struct hinic_5tuple_filter *filter, struct hinic_filter_info *filter_info) @@ -2238,8 +2272,114 @@ static int hinic_flow_destroy(struct rte_eth_dev *dev, return ret; } +/* Remove all the n-tuple filters */ +static void hinic_clear_all_ntuple_filter(struct rte_eth_dev *dev) +{ + struct hinic_filter_info *filter_info = + HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct hinic_5tuple_filter *p_5tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) + hinic_remove_5tuple_filter(dev, p_5tuple); +} + +/* Remove all the ether type filters */ +static void hinic_clear_all_ethertype_filter(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct hinic_filter_info *filter_info = + HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev); + int ret = 0; + + if (filter_info->type_mask & + (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE))) { + hinic_ethertype_filter_remove(filter_info, + HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE)); + ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_LACP_TYPE, + filter_info->qid, false, true); + + (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP); + } + + if (filter_info->type_mask & + (1 << HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE))) { + hinic_ethertype_filter_remove(filter_info, + HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE)); + ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_ARP_TYPE, + filter_info->qid, false, true); + } + + if (ret) + PMD_DRV_LOG(ERR, "Clear ethertype failed, filter type: 0x%x", + filter_info->pkt_type); +} + +/* Remove all the ether type filters */ +static void hinic_clear_all_fdir_filter(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false); +} + +static void hinic_filterlist_flush(struct rte_eth_dev *dev) +{ + struct hinic_ntuple_filter_ele *ntuple_filter_ptr; + struct hinic_ethertype_filter_ele *ethertype_filter_ptr; + struct hinic_fdir_rule_ele *fdir_rule_ptr; + struct hinic_flow_mem *hinic_flow_mem_ptr; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + while ((ntuple_filter_ptr = + TAILQ_FIRST(&nic_dev->filter_ntuple_list))) { + TAILQ_REMOVE(&nic_dev->filter_ntuple_list, ntuple_filter_ptr, + entries); + rte_free(ntuple_filter_ptr); + } + + while ((ethertype_filter_ptr = + TAILQ_FIRST(&nic_dev->filter_ethertype_list))) { + TAILQ_REMOVE(&nic_dev->filter_ethertype_list, + ethertype_filter_ptr, + entries); + rte_free(ethertype_filter_ptr); + } + + while ((fdir_rule_ptr = + TAILQ_FIRST(&nic_dev->filter_fdir_rule_list))) { + TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, fdir_rule_ptr, + entries); + rte_free(fdir_rule_ptr); + } + + while ((hinic_flow_mem_ptr = + TAILQ_FIRST(&nic_dev->hinic_flow_list))) { + TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, + entries); + rte_free(hinic_flow_mem_ptr->flow); + rte_free(hinic_flow_mem_ptr); + } +} + +/* Destroy all flow rules associated with a port on hinic. */ +static int hinic_flow_flush(struct rte_eth_dev *dev, + __rte_unused struct rte_flow_error *error) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + hinic_clear_all_ntuple_filter(dev); + hinic_clear_all_ethertype_filter(dev); + hinic_clear_all_fdir_filter(dev); + hinic_filterlist_flush(dev); + + PMD_DRV_LOG(INFO, "Flush flow succeed, func_id: 0x%x", + hinic_global_func_id(nic_dev->hwdev)); + return 0; +} + const struct rte_flow_ops hinic_flow_ops = { .validate = hinic_flow_validate, .create = hinic_flow_create, .destroy = hinic_flow_destroy, + .flush = hinic_flow_flush, }; From patchwork Mon Sep 30 14:00:50 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60199 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id ECE911BE3D; Mon, 30 Sep 2019 15:49:27 +0200 (CEST) Received: from huawei.com (szxga05-in.huawei.com [45.249.212.191]) by dpdk.org (Postfix) with ESMTP id 8B4727CBC for ; Mon, 30 Sep 2019 15:49:25 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id C171155F6C44EC586924 for ; Mon, 30 Sep 2019 21:49:23 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:49:12 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:50 +0800 Message-ID: X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 12/19] net/hinic: set link down and up X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch supports setting link down and up. Signed-off-by: Xiaoyun wang --- doc/guides/nics/hinic.rst | 1 + drivers/net/hinic/base/hinic_pmd_cmd.h | 2 + drivers/net/hinic/base/hinic_pmd_niccfg.c | 38 +++++++++++++++++++ drivers/net/hinic/base/hinic_pmd_niccfg.h | 9 +++++ drivers/net/hinic/hinic_pmd_ethdev.c | 62 +++++++++++++++++++++++++++++++ 5 files changed, 112 insertions(+) diff --git a/doc/guides/nics/hinic.rst b/doc/guides/nics/hinic.rst index 8ff4171..9a9fcbe 100644 --- a/doc/guides/nics/hinic.rst +++ b/doc/guides/nics/hinic.rst @@ -31,6 +31,7 @@ Features - Unicast MAC filter - Multicast MAC filter - Flow director +- Set Link down or up Prerequisites ------------- diff --git a/drivers/net/hinic/base/hinic_pmd_cmd.h b/drivers/net/hinic/base/hinic_pmd_cmd.h index 6b3dcf3..55d4292 100644 --- a/drivers/net/hinic/base/hinic_pmd_cmd.h +++ b/drivers/net/hinic/base/hinic_pmd_cmd.h @@ -119,6 +119,8 @@ enum hinic_port_cmd { HINIC_PORT_CMD_SET_IPSU_MAC = 0xcb, HINIC_PORT_CMD_GET_IPSU_MAC = 0xcc, + HINIC_PORT_CMD_SET_XSFP_STATUS = 0xD4, + HINIC_PORT_CMD_GET_LINK_MODE = 0xD9, HINIC_PORT_CMD_SET_SPEED = 0xDA, HINIC_PORT_CMD_SET_AUTONEG = 0xDB, diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.c b/drivers/net/hinic/base/hinic_pmd_niccfg.c index 53d981b..9f23fdd 100644 --- a/drivers/net/hinic/base/hinic_pmd_niccfg.c +++ b/drivers/net/hinic/base/hinic_pmd_niccfg.c @@ -1613,6 +1613,44 @@ int hinic_get_link_mode(void *hwdev, u32 *supported, u32 *advertised) } /** + * hinic_set_xsfp_tx_status - Enable or disable the fiber in + * tx direction when set link up or down. + * + * @param hwdev + * The hardware interface of a nic device. + * @param enable + * Enable or Disable. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_set_xsfp_tx_status(void *hwdev, bool enable) +{ + struct hinic_set_xsfp_status xsfp_status; + u16 out_size = sizeof(struct hinic_set_xsfp_status); + int err; + + memset(&xsfp_status, 0, sizeof(xsfp_status)); + xsfp_status.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + xsfp_status.port_id = hinic_global_func_id(hwdev); + xsfp_status.xsfp_tx_dis = ((enable == 0) ? 1 : 0); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_XSFP_STATUS, + &xsfp_status, sizeof(struct hinic_set_xsfp_status), + &xsfp_status, &out_size); + if (err || !out_size || xsfp_status.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to %s port xsfp status, err: %d, status: 0x%x, out size: 0x%x\n", + enable ? "Disable" : "Enable", err, + xsfp_status.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +/** * hinic_flush_qp_res - Flush tx && rx chip resources in case of set vport * fake failed when device start. * diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.h b/drivers/net/hinic/base/hinic_pmd_niccfg.h index e8ce332..beab630 100644 --- a/drivers/net/hinic/base/hinic_pmd_niccfg.h +++ b/drivers/net/hinic/base/hinic_pmd_niccfg.h @@ -578,6 +578,13 @@ struct hinic_link_mode_cmd { u16 advertised; }; +struct hinic_set_xsfp_status { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u32 port_id; + u32 xsfp_tx_dis; /* 0: tx enable; 1: tx disable */ +}; + struct hinic_clear_qp_resource { struct hinic_mgmt_msg_head mgmt_msg_head; @@ -807,6 +814,8 @@ int hinic_set_link_status_follow(void *hwdev, int hinic_get_link_mode(void *hwdev, u32 *supported, u32 *advertised); +int hinic_set_xsfp_tx_status(void *hwdev, bool enable); + int hinic_flush_qp_res(void *hwdev); int hinic_init_function_table(void *hwdev, u16 rx_buf_sz); diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c index d0bcbe5..ea39ff0 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.c +++ b/drivers/net/hinic/hinic_pmd_ethdev.c @@ -883,6 +883,66 @@ static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete) } /** + * DPDK callback to bring the link UP. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int hinic_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + int ret; + + ret = hinic_set_xsfp_tx_status(nic_dev->hwdev, true); + if (ret) { + PMD_DRV_LOG(ERR, "Enable port tx xsfp failed, dev_name: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + return ret; + } + + /* link status follow phy port status, up will open pma */ + ret = hinic_set_port_enable(nic_dev->hwdev, true); + if (ret) + PMD_DRV_LOG(ERR, "Set mac link up failed, dev_name: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + + return ret; +} + +/** + * DPDK callback to bring the link DOWN. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int hinic_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + int ret; + + ret = hinic_set_xsfp_tx_status(nic_dev->hwdev, false); + if (ret) { + PMD_DRV_LOG(ERR, "Disable port tx xsfp failed, dev_name: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + return ret; + } + + /* link status follow phy port status, up will close pma */ + ret = hinic_set_port_enable(nic_dev->hwdev, false); + if (ret) + PMD_DRV_LOG(ERR, "Set mac link down failed, dev_name: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + + return ret; +} + +/** * DPDK callback to start the device. * * @param dev @@ -2756,6 +2816,8 @@ static void hinic_dev_close(struct rte_eth_dev *dev) .rx_queue_setup = hinic_rx_queue_setup, .tx_queue_setup = hinic_tx_queue_setup, .dev_start = hinic_dev_start, + .dev_set_link_up = hinic_dev_set_link_up, + .dev_set_link_down = hinic_dev_set_link_down, .link_update = hinic_link_update, .rx_queue_release = hinic_rx_queue_release, .tx_queue_release = hinic_tx_queue_release, From patchwork Mon Sep 30 14:00:51 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60200 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id BD9021BEB0; Mon, 30 Sep 2019 15:49:31 +0200 (CEST) Received: from huawei.com (szxga05-in.huawei.com [45.249.212.191]) by dpdk.org (Postfix) with ESMTP id 3D6FC1BEAE for ; Mon, 30 Sep 2019 15:49:30 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id CBDDB8C87AC06EEA30D7 for ; Mon, 30 Sep 2019 21:49:28 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:49:18 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:51 +0800 Message-ID: <78e7874ff668111d2230aa696e141baf0b1865f6.1569850827.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 13/19] net/hinic: get firmware version X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds support for getting firmware version. Signed-off-by: Xiaoyun wang --- doc/guides/nics/features/hinic.ini | 1 + doc/guides/nics/hinic.rst | 1 + drivers/net/hinic/base/hinic_pmd_cmd.h | 2 ++ drivers/net/hinic/base/hinic_pmd_niccfg.c | 40 +++++++++++++++++++++++++++++++ drivers/net/hinic/base/hinic_pmd_niccfg.h | 13 ++++++++++ drivers/net/hinic/hinic_pmd_ethdev.c | 23 ++++++++++++++++++ 6 files changed, 80 insertions(+) diff --git a/doc/guides/nics/features/hinic.ini b/doc/guides/nics/features/hinic.ini index a7cb33e..2c9d845 100644 --- a/doc/guides/nics/features/hinic.ini +++ b/doc/guides/nics/features/hinic.ini @@ -33,6 +33,7 @@ Basic stats = Y Extended stats = Y Stats per queue = Y Flow director = Y +FW version = Y Linux UIO = Y Linux VFIO = Y BSD nic_uio = N diff --git a/doc/guides/nics/hinic.rst b/doc/guides/nics/hinic.rst index 9a9fcbe..ed607eb 100644 --- a/doc/guides/nics/hinic.rst +++ b/doc/guides/nics/hinic.rst @@ -32,6 +32,7 @@ Features - Multicast MAC filter - Flow director - Set Link down or up +- FW version Prerequisites ------------- diff --git a/drivers/net/hinic/base/hinic_pmd_cmd.h b/drivers/net/hinic/base/hinic_pmd_cmd.h index 55d4292..e0633bd 100644 --- a/drivers/net/hinic/base/hinic_pmd_cmd.h +++ b/drivers/net/hinic/base/hinic_pmd_cmd.h @@ -85,6 +85,8 @@ enum hinic_port_cmd { HINIC_PORT_CMD_GET_JUMBO_FRAME_SIZE = 0x4a, HINIC_PORT_CMD_SET_JUMBO_FRAME_SIZE, + HINIC_PORT_CMD_GET_MGMT_VERSION = 0x58, + HINIC_PORT_CMD_GET_PORT_TYPE = 0x5b, HINIC_PORT_CMD_GET_VPORT_ENABLE = 0x5c, diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.c b/drivers/net/hinic/base/hinic_pmd_niccfg.c index 9f23fdd..2dc431e 100644 --- a/drivers/net/hinic/base/hinic_pmd_niccfg.c +++ b/drivers/net/hinic/base/hinic_pmd_niccfg.c @@ -1293,6 +1293,46 @@ int hinic_set_rx_mode(void *hwdev, u32 enable) return 0; } +/** + * hinic_get_mgmt_version - Get mgmt module version from chip. + * + * @param hwdev + * The hardware interface of a nic device. + * @param fw + * Firmware version. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_get_mgmt_version(void *hwdev, char *fw) +{ + struct hinic_version_info fw_ver; + u16 out_size = sizeof(fw_ver); + int err; + + if (!hwdev || !fw) { + PMD_DRV_LOG(ERR, "Hwdev or fw is NULL"); + return -EINVAL; + } + + memset(&fw_ver, 0, sizeof(fw_ver)); + fw_ver.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION, + &fw_ver, sizeof(fw_ver), &fw_ver, + &out_size); + if (err || !out_size || fw_ver.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n", + err, fw_ver.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + snprintf(fw, HINIC_MGMT_VERSION_MAX_LEN, "%s", fw_ver.ver); + + return 0; +} + int hinic_set_rx_csum_offload(void *hwdev, u32 en) { struct hinic_checksum_offload rx_csum_cfg; diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.h b/drivers/net/hinic/base/hinic_pmd_niccfg.h index beab630..b9e037e 100644 --- a/drivers/net/hinic/base/hinic_pmd_niccfg.h +++ b/drivers/net/hinic/base/hinic_pmd_niccfg.h @@ -471,6 +471,17 @@ struct hinic_rx_mode_config { u32 rx_mode; }; +#define HINIC_MGMT_VERSION_MAX_LEN 32 +#define HINIC_COMPILE_TIME_LEN 20 +#define HINIC_FW_VERSION_NAME 16 + +struct hinic_version_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u8 ver[HINIC_FW_VERSION_NAME]; + u8 time[HINIC_COMPILE_TIME_LEN]; +}; + /* rss */ struct nic_rss_indirect_tbl { u32 group_index; @@ -807,6 +818,8 @@ int hinic_get_rss_type(void *hwdev, u32 tmpl_idx, int hinic_set_rx_mode(void *hwdev, u32 enable); +int hinic_get_mgmt_version(void *hwdev, char *fw); + int hinic_set_rx_csum_offload(void *hwdev, u32 en); int hinic_set_link_status_follow(void *hwdev, diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c index ea39ff0..48ec467 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.c +++ b/drivers/net/hinic/hinic_pmd_ethdev.c @@ -752,6 +752,27 @@ static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa) return 0; } +static int hinic_fw_version_get(struct rte_eth_dev *dev, char *fw_version, + size_t fw_size) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + char fw_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0}; + int err; + + err = hinic_get_mgmt_version(nic_dev->hwdev, fw_ver); + if (err) { + PMD_DRV_LOG(ERR, "Failed to get fw version\n"); + return -EINVAL; + } + + if (fw_size < strlen(fw_ver) + 1) + return (strlen(fw_ver) + 1); + + snprintf(fw_version, fw_size, "%s", fw_ver); + + return 0; +} + static int hinic_config_rx_mode(struct hinic_nic_dev *nic_dev, u32 rx_mode_ctrl) { int err; @@ -2813,6 +2834,7 @@ static void hinic_dev_close(struct rte_eth_dev *dev) static const struct eth_dev_ops hinic_pmd_ops = { .dev_configure = hinic_dev_configure, .dev_infos_get = hinic_dev_infos_get, + .fw_version_get = hinic_fw_version_get, .rx_queue_setup = hinic_rx_queue_setup, .tx_queue_setup = hinic_tx_queue_setup, .dev_start = hinic_dev_start, @@ -2849,6 +2871,7 @@ static void hinic_dev_close(struct rte_eth_dev *dev) static const struct eth_dev_ops hinic_pmd_vf_ops = { .dev_configure = hinic_dev_configure, .dev_infos_get = hinic_dev_infos_get, + .fw_version_get = hinic_fw_version_get, .rx_queue_setup = hinic_rx_queue_setup, .tx_queue_setup = hinic_tx_queue_setup, .dev_start = hinic_dev_start, From patchwork Mon Sep 30 14:00:52 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60201 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 9B2A71BEB5; Mon, 30 Sep 2019 15:49:37 +0200 (CEST) Received: from huawei.com (szxga06-in.huawei.com [45.249.212.32]) by dpdk.org (Postfix) with ESMTP id 5A7B91BEB5 for ; Mon, 30 Sep 2019 15:49:35 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id D99CCC3294274DBD50EE for ; Mon, 30 Sep 2019 21:49:33 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:49:25 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:52 +0800 Message-ID: <65d97c88b84da4e12f68d521136e9f92189f0073.1569850827.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 14/19] net/hinic: support inner L3 checksum offload X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch supports inner L3 checksum offload for VXLAN packets, modifies rx checksum offload. Signed-off-by: Xiaoyun wang --- drivers/net/hinic/hinic_pmd_ethdev.h | 1 + drivers/net/hinic/hinic_pmd_rx.c | 10 +- drivers/net/hinic/hinic_pmd_tx.c | 190 ++++++++++++++++++++++------------- 3 files changed, 127 insertions(+), 74 deletions(-) diff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h index dd96667..3e3f3b3 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.h +++ b/drivers/net/hinic/hinic_pmd_ethdev.h @@ -178,6 +178,7 @@ struct hinic_nic_dev { * vf: the same with associate pf */ u32 default_cos; + u32 rx_csum_en; struct hinic_filter_info filter; struct hinic_ntuple_filter_list filter_ntuple_list; diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c index 08e02ae..37b4f5c 100644 --- a/drivers/net/hinic/hinic_pmd_rx.c +++ b/drivers/net/hinic/hinic_pmd_rx.c @@ -658,7 +658,6 @@ int hinic_rx_configure(struct rte_eth_dev *dev) struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); struct rte_eth_rss_conf rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf; - u32 csum_en = 0; int err; if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) { @@ -678,9 +677,10 @@ int hinic_rx_configure(struct rte_eth_dev *dev) /* Enable both L3/L4 rx checksum offload */ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM) - csum_en = HINIC_RX_CSUM_OFFLOAD_EN; + nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN; - err = hinic_set_rx_csum_offload(nic_dev->hwdev, csum_en); + err = hinic_set_rx_csum_offload(nic_dev->hwdev, + HINIC_RX_CSUM_OFFLOAD_EN); if (err) goto rx_csum_ofl_err; @@ -781,6 +781,10 @@ static inline uint64_t hinic_rx_csum(uint32_t status, struct hinic_rxq *rxq) { uint32_t checksum_err; uint64_t flags; + struct hinic_nic_dev *nic_dev = rxq->nic_dev; + + if (unlikely(!(nic_dev->rx_csum_en & HINIC_RX_CSUM_OFFLOAD_EN))) + return PKT_RX_IP_CKSUM_UNKNOWN; /* most case checksum is ok */ checksum_err = HINIC_GET_RX_CSUM_ERR(status); diff --git a/drivers/net/hinic/hinic_pmd_tx.c b/drivers/net/hinic/hinic_pmd_tx.c index 0ef7add..26f481f 100644 --- a/drivers/net/hinic/hinic_pmd_tx.c +++ b/drivers/net/hinic/hinic_pmd_tx.c @@ -20,6 +20,9 @@ #include "hinic_pmd_tx.h" /* packet header and tx offload info */ +#define ETHER_LEN_NO_VLAN 14 +#define ETHER_LEN_WITH_VLAN 18 +#define HEADER_LEN_OFFSET 2 #define VXLANLEN 8 #define MAX_PLD_OFFSET 221 #define MAX_SINGLE_SGE_SIZE 65536 @@ -34,6 +37,9 @@ #define HINIC_TSO_PKT_MAX_SGE 127 /* tso max sge 127 */ #define HINIC_TSO_SEG_NUM_INVALID(num) ((num) > HINIC_TSO_PKT_MAX_SGE) +#define HINIC_TX_OUTER_CHECKSUM_FLAG_SET 1 +#define HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET 0 + /* sizeof(struct hinic_sq_bufdesc) == 16, shift 4 */ #define HINIC_BUF_DESC_SIZE(nr_descs) (SIZE_8BYTES(((u32)nr_descs) << 4)) @@ -476,16 +482,16 @@ static inline bool hinic_is_tso_sge_valid(struct rte_mbuf *mbuf, hinic_set_l4_csum_info(struct hinic_sq_task *task, u32 *queue_info, struct hinic_tx_offload_info *poff_info) { - u32 tcp_udp_cs, sctp; + u32 tcp_udp_cs, sctp = 0; u16 l2hdr_len; - sctp = 0; if (unlikely(poff_info->inner_l4_type == SCTP_OFFLOAD_ENABLE)) sctp = 1; tcp_udp_cs = poff_info->inner_l4_tcp_udp; - if (poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) { + if (poff_info->tunnel_type == TUNNEL_UDP_CSUM || + poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) { l2hdr_len = poff_info->outer_l2_len; task->pkt_info2 |= @@ -665,50 +671,6 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) return (struct hinic_sq_wqe *)WQ_WQE_ADDR(wq, cur_pi); } -static inline int -hinic_validate_tx_offload(const struct rte_mbuf *m) -{ - uint64_t ol_flags = m->ol_flags; - uint64_t inner_l3_offset = m->l2_len; - - /* just support vxlan offload */ - if ((ol_flags & PKT_TX_TUNNEL_MASK) && - !(ol_flags & PKT_TX_TUNNEL_VXLAN)) - return -ENOTSUP; - - if (ol_flags & PKT_TX_OUTER_IP_CKSUM) - inner_l3_offset += m->outer_l2_len + m->outer_l3_len; - - /* Headers are fragmented */ - if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len) - return -ENOTSUP; - - /* IP checksum can be counted only for IPv4 packet */ - if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6)) - return -EINVAL; - - /* IP type not set when required */ - if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) { - if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6))) - return -EINVAL; - } - - /* Check requirements for TSO packet */ - if (ol_flags & PKT_TX_TCP_SEG) { - if (m->tso_segsz == 0 || - ((ol_flags & PKT_TX_IPV4) && - !(ol_flags & PKT_TX_IP_CKSUM))) - return -EINVAL; - } - - /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */ - if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) && - !(ol_flags & PKT_TX_OUTER_IPV4)) - return -EINVAL; - - return 0; -} - static inline uint16_t hinic_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags) { @@ -760,6 +722,65 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) return __rte_raw_cksum_reduce(sum); } +static inline void +hinic_get_pld_offset(struct rte_mbuf *m, struct hinic_tx_offload_info *off_info, + int outer_cs_flag) +{ + uint64_t ol_flags = m->ol_flags; + + if (outer_cs_flag == 1) { + if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) { + off_info->payload_offset = m->outer_l2_len + + m->outer_l3_len + m->l2_len + m->l3_len; + } else if ((ol_flags & PKT_TX_TCP_CKSUM) || + (ol_flags & PKT_TX_TCP_SEG)) { + off_info->payload_offset = m->outer_l2_len + + m->outer_l3_len + m->l2_len + + m->l3_len + m->l4_len; + } + } else { + if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) { + off_info->payload_offset = m->l2_len + m->l3_len; + } else if ((ol_flags & PKT_TX_TCP_CKSUM) || + (ol_flags & PKT_TX_TCP_SEG)) { + off_info->payload_offset = m->l2_len + m->l3_len + + m->l4_len; + } + } +} + +static inline void +hinic_analyze_tx_info(struct rte_mbuf *mbuf, + struct hinic_tx_offload_info *off_info) +{ + struct rte_ether_hdr *eth_hdr; + struct rte_vlan_hdr *vlan_hdr; + struct rte_ipv4_hdr *ip4h; + u16 pkt_type; + u8 *hdr; + + hdr = (u8 *)rte_pktmbuf_mtod(mbuf, u8*); + eth_hdr = (struct rte_ether_hdr *)hdr; + pkt_type = rte_be_to_cpu_16(eth_hdr->ether_type); + + if (pkt_type == RTE_ETHER_TYPE_VLAN) { + off_info->outer_l2_len = ETHER_LEN_WITH_VLAN; + vlan_hdr = (struct rte_vlan_hdr *)(hdr + 1); + pkt_type = rte_be_to_cpu_16(vlan_hdr->eth_proto); + } else { + off_info->outer_l2_len = ETHER_LEN_NO_VLAN; + } + + if (pkt_type == RTE_ETHER_TYPE_IPV4) { + ip4h = (struct rte_ipv4_hdr *)(hdr + off_info->outer_l2_len); + off_info->outer_l3_len = (ip4h->version_ihl & 0xf) << + HEADER_LEN_OFFSET; + } else if (pkt_type == RTE_ETHER_TYPE_IPV6) { + /* not support ipv6 extension header */ + off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr); + } +} + static inline int hinic_tx_offload_pkt_prepare(struct rte_mbuf *m, struct hinic_tx_offload_info *off_info) @@ -771,42 +792,66 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) struct rte_ether_hdr *eth_hdr; struct rte_vlan_hdr *vlan_hdr; u16 eth_type = 0; - uint64_t inner_l3_offset = m->l2_len; + uint64_t inner_l3_offset; uint64_t ol_flags = m->ol_flags; - /* Does packet set any of available offloads */ + /* Check if the packets set available offload flags */ if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK)) return 0; - if (unlikely(hinic_validate_tx_offload(m))) + /* Support only vxlan offload */ + if ((ol_flags & PKT_TX_TUNNEL_MASK) && + !(ol_flags & PKT_TX_TUNNEL_VXLAN)) + return -ENOTSUP; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + if (rte_validate_tx_offload(m) != 0) return -EINVAL; +#endif - if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || - (ol_flags & PKT_TX_OUTER_IPV6) || - (ol_flags & PKT_TX_TUNNEL_VXLAN)) { - inner_l3_offset += m->outer_l2_len + m->outer_l3_len; - off_info->outer_l2_len = m->outer_l2_len; - off_info->outer_l3_len = m->outer_l3_len; - /* just support vxlan tunneling pkt */ - off_info->inner_l2_len = m->l2_len - VXLANLEN - - sizeof(struct rte_udp_hdr); - off_info->inner_l3_len = m->l3_len; - off_info->inner_l4_len = m->l4_len; - off_info->tunnel_length = m->l2_len; - off_info->payload_offset = m->outer_l2_len + - m->outer_l3_len + m->l2_len + m->l3_len; - off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + if (ol_flags & PKT_TX_TUNNEL_VXLAN) { + if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || + (ol_flags & PKT_TX_OUTER_IPV6)) { + inner_l3_offset = m->l2_len + m->outer_l2_len + + m->outer_l3_len; + off_info->outer_l2_len = m->outer_l2_len; + off_info->outer_l3_len = m->outer_l3_len; + /* just support vxlan tunneling pkt */ + off_info->inner_l2_len = m->l2_len - VXLANLEN - + sizeof(*udp_hdr); + off_info->inner_l3_len = m->l3_len; + off_info->inner_l4_len = m->l4_len; + off_info->tunnel_length = m->l2_len; + off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + + hinic_get_pld_offset(m, off_info, + HINIC_TX_OUTER_CHECKSUM_FLAG_SET); + } else { + inner_l3_offset = m->l2_len; + hinic_analyze_tx_info(m, off_info); + /* just support vxlan tunneling pkt */ + off_info->inner_l2_len = m->l2_len - VXLANLEN - + sizeof(*udp_hdr) - off_info->outer_l2_len - + off_info->outer_l3_len; + off_info->inner_l3_len = m->l3_len; + off_info->inner_l4_len = m->l4_len; + off_info->tunnel_length = m->l2_len - + off_info->outer_l2_len - off_info->outer_l3_len; + off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + + hinic_get_pld_offset(m, off_info, + HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET); + } } else { + inner_l3_offset = m->l2_len; off_info->inner_l2_len = m->l2_len; off_info->inner_l3_len = m->l3_len; off_info->inner_l4_len = m->l4_len; off_info->tunnel_type = NOT_TUNNEL; - off_info->payload_offset = m->l2_len + m->l3_len; - } - if (((ol_flags & PKT_TX_L4_MASK) != PKT_TX_SCTP_CKSUM) && - ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM)) - off_info->payload_offset += m->l4_len; + hinic_get_pld_offset(m, off_info, + HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET); + } /* invalid udp or tcp header */ if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET)) @@ -855,6 +900,10 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) udp_hdr->dgram_cksum = hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags); } + } else if (ol_flags & PKT_TX_OUTER_IPV4) { + off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + off_info->inner_l4_tcp_udp = 1; + off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD; } if (ol_flags & PKT_TX_IPV4) @@ -892,7 +941,6 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) off_info->inner_l4_type = UDP_OFFLOAD_ENABLE; off_info->inner_l4_tcp_udp = 1; - off_info->inner_l4_len = sizeof(struct rte_udp_hdr); } else if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG)) { if (ol_flags & PKT_TX_IPV4) { From patchwork Mon Sep 30 14:00:53 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60202 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 050EA1BEBB; Mon, 30 Sep 2019 15:49:52 +0200 (CEST) Received: from huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id 72B731BEBB for ; Mon, 30 Sep 2019 15:49:50 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 1B015D35D3B56AF4475E for ; Mon, 30 Sep 2019 21:49:49 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:49:38 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:53 +0800 Message-ID: <53c8ab182b21c48588d67f6fe6bcdcbb02fb9fb6.1569850827.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 15/19] net/hinic: support LRO offload X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch supports LRO offload. Signed-off-by: Xiaoyun wang --- doc/guides/nics/features/hinic.ini | 1 + doc/guides/nics/hinic.rst | 1 + drivers/net/hinic/hinic_pmd_ethdev.c | 24 ++++++++++++++++++------ 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/doc/guides/nics/features/hinic.ini b/doc/guides/nics/features/hinic.ini index 2c9d845..65a335a 100644 --- a/doc/guides/nics/features/hinic.ini +++ b/doc/guides/nics/features/hinic.ini @@ -13,6 +13,7 @@ MTU update = Y Jumbo frame = Y Scattered Rx = Y TSO = Y +LRO = Y Promiscuous mode = Y Allmulticast mode = Y Unicast MAC filter = Y diff --git a/doc/guides/nics/hinic.rst b/doc/guides/nics/hinic.rst index ed607eb..9fb80d4 100644 --- a/doc/guides/nics/hinic.rst +++ b/doc/guides/nics/hinic.rst @@ -33,6 +33,7 @@ Features - Flow director - Set Link down or up - FW version +- LRO Prerequisites ------------- diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c index 48ec467..765e661 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.c +++ b/drivers/net/hinic/hinic_pmd_ethdev.c @@ -70,6 +70,9 @@ #define HINIC_PKTLEN_TO_MTU(pktlen) \ ((pktlen) - (ETH_HLEN + ETH_CRC_LEN)) +/* lro numer limit for one packet */ +#define HINIC_LRO_WQE_NUM_DEFAULT 8 + /* Driver-specific log messages type */ int hinic_logtype; @@ -731,7 +734,8 @@ static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa) DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_VLAN_FILTER | DEV_RX_OFFLOAD_SCATTER | - DEV_RX_OFFLOAD_JUMBO_FRAME; + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_TCP_LRO; info->tx_queue_offload_capa = 0; info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | @@ -792,6 +796,7 @@ static int hinic_rxtx_configure(struct rte_eth_dev *dev) { int err; struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + bool lro_en; /* rx configure, if rss enable, need to init default configuration */ err = hinic_rx_configure(dev); @@ -808,6 +813,18 @@ static int hinic_rxtx_configure(struct rte_eth_dev *dev) goto set_rx_mode_fail; } + /* config lro */ + lro_en = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? + true : false; + + err = hinic_set_rx_lro(nic_dev->hwdev, lro_en, lro_en, + HINIC_LRO_WQE_NUM_DEFAULT); + if (err) { + PMD_DRV_LOG(ERR, "%s lro failed, err: %d", + lro_en ? "Enable" : "Disable", err); + goto set_rx_mode_fail; + } + return HINIC_OK; set_rx_mode_fail: @@ -2478,11 +2495,6 @@ static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev) if (err) return err; - /* disable LRO */ - err = hinic_set_rx_lro(nic_dev->hwdev, 0, 0, (u8)0); - if (err) - return err; - /* Set pause enable, and up will disable pfc. */ err = hinic_set_default_pause_feature(nic_dev); if (err) From patchwork Mon Sep 30 14:00:54 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60203 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EEC0F1BECE; Mon, 30 Sep 2019 15:50:11 +0200 (CEST) Received: from huawei.com (szxga07-in.huawei.com [45.249.212.35]) by dpdk.org (Postfix) with ESMTP id 441961BECE for ; Mon, 30 Sep 2019 15:50:10 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id 6A74F295C597689F77C6 for ; Mon, 30 Sep 2019 21:50:09 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:50:02 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:54 +0800 Message-ID: <8eb91972e38215dc935b8c82c1e6fb4cb7d74ad7.1569850827.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 16/19] net/hinic: add hinic PMD doc files X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add new supported features to rst file and add features to ini file. Signed-off-by: Xiaoyun wang --- doc/guides/nics/features/hinic.ini | 2 ++ doc/guides/rel_notes/release_19_11.rst | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/doc/guides/nics/features/hinic.ini b/doc/guides/nics/features/hinic.ini index 65a335a..dc02b4b 100644 --- a/doc/guides/nics/features/hinic.ini +++ b/doc/guides/nics/features/hinic.ini @@ -34,7 +34,9 @@ Basic stats = Y Extended stats = Y Stats per queue = Y Flow director = Y +Flow control = Y FW version = Y +Multiprocess aware = Y Linux UIO = Y Linux VFIO = Y BSD nic_uio = N diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst index cd4e350..0a349b9 100644 --- a/doc/guides/rel_notes/release_19_11.rst +++ b/doc/guides/rel_notes/release_19_11.rst @@ -287,3 +287,12 @@ Tested Platforms * Added support for VLAN set PCP offload command. * Added support for VLAN set VID offload command. +* **Updated the Huawei hinic driver.** + + Updated the Huawei hinic driver with new features and improvements, including: + + * Enabled SR-IOV - Partially supported at this point, VFIO only. + * Supported VLAN filter and VLAN offload. + * Supported Unicast MAC filter and Multicast MAC filter. + * Supported FW version get. + * Supported Flow director for LACP, VRRP, BGP and so on. From patchwork Mon Sep 30 14:00:55 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60204 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 100931BEE0; Mon, 30 Sep 2019 15:50:29 +0200 (CEST) Received: from huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id 8EAB01BEE0 for ; Mon, 30 Sep 2019 15:50:20 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id 8303A40A7CEB052C776E for ; Mon, 30 Sep 2019 21:50:19 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:50:09 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:55 +0800 Message-ID: X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 17/19] net/hinic/base: optimize aeq interfaces X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch deletes ceq interfaces that not needed, fix aeq buges in some scenarios. Signed-off-by: Xiaoyun wang --- drivers/net/hinic/base/hinic_csr.h | 29 +--- drivers/net/hinic/base/hinic_pmd_cmd.h | 5 +- drivers/net/hinic/base/hinic_pmd_eqs.c | 245 +++++++++------------------------ drivers/net/hinic/base/hinic_pmd_eqs.h | 5 +- 4 files changed, 67 insertions(+), 217 deletions(-) diff --git a/drivers/net/hinic/base/hinic_csr.h b/drivers/net/hinic/base/hinic_csr.h index b63e52b..2626f69 100644 --- a/drivers/net/hinic/base/hinic_csr.h +++ b/drivers/net/hinic/base/hinic_csr.h @@ -48,16 +48,12 @@ /* EQ registers */ #define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200 -#define HINIC_CEQ_MTT_OFF_BASE_ADDR 0x400 #define HINIC_EQ_MTT_OFF_STRIDE 0x40 #define HINIC_CSR_AEQ_MTT_OFF(id) \ (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) -#define HINIC_CSR_CEQ_MTT_OFF(id) \ - (HINIC_CEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) - #define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8 #define HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ @@ -68,23 +64,11 @@ (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) -#define HINIC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ - (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ - (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) - -#define HINIC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ - (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ - (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) - #define HINIC_EQ_HI_PHYS_ADDR_REG(type, q_id, pg_num) \ - ((u32)((type == HINIC_AEQ) ? \ - HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) : \ - HINIC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num))) + ((u32)(HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num))) #define HINIC_EQ_LO_PHYS_ADDR_REG(type, q_id, pg_num) \ - ((u32)((type == HINIC_AEQ) ? \ - HINIC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) : \ - HINIC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num))) + ((u32)(HINIC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num))) #define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00 #define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04 @@ -105,15 +89,6 @@ #define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \ (HINIC_AEQ_CONS_IDX_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) -#define HINIC_CEQ_CONS_IDX_0_ADDR_BASE 0x1008 -#define HINIC_CEQ_CONS_IDX_1_ADDR_BASE 0x100C - -#define HINIC_CSR_CEQ_CONS_IDX_ADDR(idx) \ - (HINIC_CEQ_CONS_IDX_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_CEQ_PROD_IDX_ADDR(idx) \ - (HINIC_CEQ_CONS_IDX_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - /* API CMD registers */ #define HINIC_CSR_API_CMD_BASE 0xF000 diff --git a/drivers/net/hinic/base/hinic_pmd_cmd.h b/drivers/net/hinic/base/hinic_pmd_cmd.h index e0633bd..c025851 100644 --- a/drivers/net/hinic/base/hinic_pmd_cmd.h +++ b/drivers/net/hinic/base/hinic_pmd_cmd.h @@ -5,10 +5,7 @@ #ifndef _HINIC_PORT_CMD_H_ #define _HINIC_PORT_CMD_H_ -enum hinic_eq_type { - HINIC_AEQ, - HINIC_CEQ -}; +#define HINIC_AEQ 0 enum hinic_resp_aeq_num { HINIC_AEQ0 = 0, diff --git a/drivers/net/hinic/base/hinic_pmd_eqs.c b/drivers/net/hinic/base/hinic_pmd_eqs.c index 8d216cf..abe0dae 100644 --- a/drivers/net/hinic/base/hinic_pmd_eqs.c +++ b/drivers/net/hinic/base/hinic_pmd_eqs.c @@ -43,32 +43,6 @@ ((val) & (~(AEQ_CTRL_1_##member##_MASK \ << AEQ_CTRL_1_##member##_SHIFT))) -#define CEQ_CTRL_0_INTR_IDX_SHIFT 0 -#define CEQ_CTRL_0_DMA_ATTR_SHIFT 12 -#define CEQ_CTRL_0_LIMIT_KICK_SHIFT 20 -#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24 -#define CEQ_CTRL_0_INTR_MODE_SHIFT 31 - -#define CEQ_CTRL_0_INTR_IDX_MASK 0x3FFU -#define CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU -#define CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU -#define CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U -#define CEQ_CTRL_0_INTR_MODE_MASK 0x1U - -#define CEQ_CTRL_0_SET(val, member) \ - (((val) & CEQ_CTRL_0_##member##_MASK) << \ - CEQ_CTRL_0_##member##_SHIFT) - -#define CEQ_CTRL_1_LEN_SHIFT 0 -#define CEQ_CTRL_1_PAGE_SIZE_SHIFT 28 - -#define CEQ_CTRL_1_LEN_MASK 0x1FFFFFU -#define CEQ_CTRL_1_PAGE_SIZE_MASK 0xFU - -#define CEQ_CTRL_1_SET(val, member) \ - (((val) & CEQ_CTRL_1_##member##_MASK) << \ - CEQ_CTRL_1_##member##_SHIFT) - #define EQ_CONS_IDX_CONS_IDX_SHIFT 0 #define EQ_CONS_IDX_XOR_CHKSUM_SHIFT 24 #define EQ_CONS_IDX_INT_ARMED_SHIFT 31 @@ -90,13 +64,11 @@ #define EQ_CONS_IDX(eq) ((eq)->cons_idx | \ ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT)) -#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ - HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) :\ - HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) +#define EQ_CONS_IDX_REG_ADDR(eq) \ + (HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id)) -#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ - HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) :\ - HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) +#define EQ_PROD_IDX_REG_ADDR(eq) \ + (HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id)) #define GET_EQ_NUM_PAGES(eq, size) \ ((u16)(ALIGN((eq)->eq_len * (u32)(eq)->elem_size, (size)) \ @@ -111,9 +83,6 @@ #define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq))) #define AEQ_DMA_ATTR_DEFAULT 0 -#define CEQ_DMA_ATTR_DEFAULT 0 - -#define CEQ_LMT_KICK_DEFAULT 0 #define EQ_WRAPPED_SHIFT 20 @@ -137,7 +106,7 @@ static u8 eq_cons_idx_checksum_set(u32 val) * set_eq_cons_idx - write the cons idx to the hw * @eq: The event queue to update the cons idx for * @arm_state: indicate whether report interrupts when generate eq element - **/ + */ static void set_eq_cons_idx(struct hinic_eq *eq, u32 arm_state) { u32 eq_cons_idx, eq_wrap_ci, val; @@ -172,125 +141,60 @@ static void set_eq_cons_idx(struct hinic_eq *eq, u32 arm_state) /** * eq_update_ci - update the cons idx of event queue * @eq: the event queue to update the cons idx for - **/ + */ void eq_update_ci(struct hinic_eq *eq) { set_eq_cons_idx(eq, HINIC_EQ_ARMED); } -struct hinic_ceq_ctrl_reg { - struct hinic_mgmt_msg_head mgmt_msg_head; - - u16 func_id; - u16 q_id; - u32 ctrl0; - u32 ctrl1; -}; - -static int set_ceq_ctrl_reg(struct hinic_hwdev *hwdev, u16 q_id, - u32 ctrl0, u32 ctrl1) -{ - struct hinic_ceq_ctrl_reg ceq_ctrl; - u16 in_size = sizeof(ceq_ctrl); - - memset(&ceq_ctrl, 0, in_size); - ceq_ctrl.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; - ceq_ctrl.func_id = hinic_global_func_id(hwdev); - ceq_ctrl.q_id = q_id; - ceq_ctrl.ctrl0 = ctrl0; - ceq_ctrl.ctrl1 = ctrl1; - - return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, - HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP, - &ceq_ctrl, in_size, NULL, NULL, 0); -} - /** * set_eq_ctrls - setting eq's ctrls registers * @eq: the event queue for setting - **/ -static int set_eq_ctrls(struct hinic_eq *eq) + */ +static void set_aeq_ctrls(struct hinic_eq *eq) { - enum hinic_eq_type type = eq->type; struct hinic_hwif *hwif = eq->hwdev->hwif; struct irq_info *eq_irq = &eq->eq_irq; u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size; u32 pci_intf_idx = HINIC_PCI_INTF_IDX(hwif); - int ret = 0; - - if (type == HINIC_AEQ) { - /* set ctrl0 */ - addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); - - val = hinic_hwif_read_reg(hwif, addr); - - val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) & - AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & - AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & - AEQ_CTRL_0_CLEAR(val, INTR_MODE); - - ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | - AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | - AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | - AEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE); - - val |= ctrl0; - hinic_hwif_write_reg(hwif, addr, val); + /* set ctrl0 */ + addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); - /* set ctrl1 */ - addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); + val = hinic_hwif_read_reg(hwif, addr); - page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); - elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); + val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) & + AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & + AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & + AEQ_CTRL_0_CLEAR(val, INTR_MODE); - ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) | - AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | - AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + AEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE); - hinic_hwif_write_reg(hwif, addr, ctrl1); - } else { - ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | - CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | - CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) | - CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | - CEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE); + val |= ctrl0; - page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + hinic_hwif_write_reg(hwif, addr, val); - ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN) | - CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + /* set ctrl1 */ + addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); - /* set ceq ctrl reg through mgmt cpu */ - ret = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1); - } - - return ret; -} - -/** - * ceq_elements_init - Initialize all the elements in the ceq - * @eq: the event queue - * @init_val: value to init with it the elements - **/ -static void ceq_elements_init(struct hinic_eq *eq, u32 init_val) -{ - u16 i; - u32 *ceqe; + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); - for (i = 0; i < eq->eq_len; i++) { - ceqe = GET_CEQ_ELEM(eq, i); - *(ceqe) = cpu_to_be32(init_val); - } + ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) | + AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | + AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); - rte_wmb(); /* Write the init values */ + hinic_hwif_write_reg(hwif, addr, ctrl1); } /** * aeq_elements_init - initialize all the elements in the aeq * @eq: the event queue * @init_val: value to init with it the elements - **/ + */ static void aeq_elements_init(struct hinic_eq *eq, u32 init_val) { struct hinic_aeq_elem *aeqe; @@ -307,7 +211,7 @@ static void aeq_elements_init(struct hinic_eq *eq, u32 init_val) /** * alloc_eq_pages - allocate the pages for the queue * @eq: the event queue - **/ + */ static int alloc_eq_pages(struct hinic_eq *eq) { struct hinic_hwif *hwif = eq->hwdev->hwif; @@ -355,10 +259,7 @@ static int alloc_eq_pages(struct hinic_eq *eq) init_val = EQ_WRAPPED(eq); - if (eq->type == HINIC_AEQ) - aeq_elements_init(eq, init_val); - else - ceq_elements_init(eq, init_val); + aeq_elements_init(eq, init_val); return 0; @@ -375,7 +276,7 @@ static int alloc_eq_pages(struct hinic_eq *eq) /** * free_eq_pages - free the pages of the queue * @eq: the event queue - **/ + */ static void free_eq_pages(struct hinic_eq *eq) { struct hinic_hwdev *hwdev = eq->hwdev; @@ -393,7 +294,7 @@ static void free_eq_pages(struct hinic_eq *eq) #define MSIX_ENTRY_IDX_0 (0) /** - * init_eq - initialize eq + * init_aeq - initialize aeq * @eq: the event queue * @hwdev: the pointer to the private hardware device object * @q_id: Queue id number @@ -402,36 +303,31 @@ static void free_eq_pages(struct hinic_eq *eq) * @page_size: the page size of the event queue * @entry: msix entry associated with the event queue * Return: 0 - Success, Negative - failure - **/ -static int init_eq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id, - u16 q_len, enum hinic_eq_type type, u32 page_size, + */ +static int init_aeq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id, + u16 q_len, u32 page_size, __rte_unused struct irq_info *entry) { int err = 0; eq->hwdev = hwdev; eq->q_id = q_id; - eq->type = type; + eq->type = HINIC_AEQ; eq->page_size = page_size; eq->eq_len = q_len; /* clear eq_len to force eqe drop in hardware */ - if (eq->type == HINIC_AEQ) { - hinic_hwif_write_reg(eq->hwdev->hwif, - HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); - } else { - err = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); - if (err) { - PMD_DRV_LOG(ERR, "Set ceq control registers ctrl0[0] ctrl1[0] failed"); - return err; - } - } + hinic_hwif_write_reg(eq->hwdev->hwif, + HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); + + /* Clear PI and CI, also clear the ARM bit */ + hinic_hwif_write_reg(eq->hwdev->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0); + hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); eq->cons_idx = 0; eq->wrapped = 0; - eq->elem_size = (type == HINIC_AEQ) ? - HINIC_AEQE_SIZE : HINIC_CEQE_SIZE; + eq->elem_size = HINIC_AEQE_SIZE; eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size); eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, page_size); @@ -452,16 +348,10 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id, return err; } - /* pmd use MSIX_ENTRY_IDX_0*/ + /* pmd use MSIX_ENTRY_IDX_0 */ eq->eq_irq.msix_entry_idx = MSIX_ENTRY_IDX_0; - err = set_eq_ctrls(eq); - if (err) { - PMD_DRV_LOG(ERR, "Init eq control registers failed"); - goto init_eq_ctrls_err; - } - - hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); + set_aeq_ctrls(eq); set_eq_cons_idx(eq, HINIC_EQ_ARMED); if (eq->q_id == 0) @@ -470,32 +360,23 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id, eq->poll_retry_nr = HINIC_RETRY_NUM; return 0; - -init_eq_ctrls_err: - free_eq_pages(eq); - - return err; } /** - * remove_eq - remove eq + * remove_aeq - remove aeq * @eq: the event queue - **/ -static void remove_eq(struct hinic_eq *eq) + */ +static void remove_aeq(struct hinic_eq *eq) { struct irq_info *entry = &eq->eq_irq; - if (eq->type == HINIC_AEQ) { - if (eq->q_id == 0) - hinic_set_msix_state(eq->hwdev, entry->msix_entry_idx, - HINIC_MSIX_DISABLE); + if (eq->q_id == 0) + hinic_set_msix_state(eq->hwdev, entry->msix_entry_idx, + HINIC_MSIX_DISABLE); - /* clear eq_len to avoid hw access host memory */ - hinic_hwif_write_reg(eq->hwdev->hwif, - HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); - } else { - (void)set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); - } + /* clear eq_len to avoid hw access host memory */ + hinic_hwif_write_reg(eq->hwdev->hwif, + HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); /* update cons_idx to avoid invalid interrupt */ eq->cons_idx = (u16)hinic_hwif_read_reg(eq->hwdev->hwif, @@ -511,7 +392,7 @@ static void remove_eq(struct hinic_eq *eq) * @num_aeqs: number of aeq * @msix_entries: msix entries associated with the event queues * Return: 0 - Success, Negative - failure - **/ + */ static int hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs, struct irq_info *msix_entries) @@ -529,9 +410,9 @@ static void remove_eq(struct hinic_eq *eq) aeqs->num_aeqs = num_aeqs; for (q_id = HINIC_AEQN_START; q_id < num_aeqs; q_id++) { - err = init_eq(&aeqs->aeq[q_id], hwdev, q_id, - HINIC_DEFAULT_AEQ_LEN, HINIC_AEQ, - HINIC_EQ_PAGE_SIZE, &msix_entries[q_id]); + err = init_aeq(&aeqs->aeq[q_id], hwdev, q_id, + HINIC_DEFAULT_AEQ_LEN, HINIC_EQ_PAGE_SIZE, + &msix_entries[q_id]); if (err) { PMD_DRV_LOG(ERR, "Init aeq %d failed", q_id); goto init_aeq_err; @@ -542,7 +423,7 @@ static void remove_eq(struct hinic_eq *eq) init_aeq_err: for (i = 0; i < q_id; i++) - remove_eq(&aeqs->aeq[i]); + remove_aeq(&aeqs->aeq[i]); kfree(aeqs); @@ -552,7 +433,7 @@ static void remove_eq(struct hinic_eq *eq) /** * hinic_aeqs_free - free all the aeqs * @hwdev: the pointer to the private hardware device object - **/ + */ static void hinic_aeqs_free(struct hinic_hwdev *hwdev) { struct hinic_aeqs *aeqs = hwdev->aeqs; @@ -560,7 +441,7 @@ static void hinic_aeqs_free(struct hinic_hwdev *hwdev) /* hinic pmd use aeq[1~3], aeq[0] used in kernel only */ for (q_id = HINIC_AEQN_START; q_id < aeqs->num_aeqs ; q_id++) - remove_eq(&aeqs->aeq[q_id]); + remove_aeq(&aeqs->aeq[q_id]); kfree(aeqs); } diff --git a/drivers/net/hinic/base/hinic_pmd_eqs.h b/drivers/net/hinic/base/hinic_pmd_eqs.h index fdb9854..16046ec 100644 --- a/drivers/net/hinic/base/hinic_pmd_eqs.h +++ b/drivers/net/hinic/base/hinic_pmd_eqs.h @@ -21,9 +21,6 @@ #define HINIC_DEFAULT_AEQ_LEN 64 -#define HINIC_RECV_NEXT_AEQE HINIC_ERROR -#define HINIC_RECV_DONE HINIC_OK - #define GET_EQ_ELEMENT(eq, idx) \ (((u8 *)(eq)->virt_addr[(idx) / (eq)->num_elem_in_pg]) + \ (((u32)(idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) @@ -58,7 +55,7 @@ enum hinic_aeq_type { struct hinic_eq { struct hinic_hwdev *hwdev; u16 q_id; - enum hinic_eq_type type; + u16 type; u32 page_size; u16 eq_len; From patchwork Mon Sep 30 14:00:56 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60205 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D280F1BEEB; Mon, 30 Sep 2019 15:50:32 +0200 (CEST) Received: from huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id 140861BE3D for ; Mon, 30 Sep 2019 15:50:31 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id ABD207B2CE7AF707F419 for ; Mon, 30 Sep 2019 21:50:29 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:50:22 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:56 +0800 Message-ID: <8dea773bd37c783e349692afae166cfa1cf9a8b5.1569850827.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 18/19] net/hinic: optimize RX performance X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch optimizes receive packets performance on arm platform. Signed-off-by: Xiaoyun wang --- drivers/net/hinic/hinic_pmd_rx.c | 5 +---- drivers/net/hinic/hinic_pmd_rx.h | 11 +++++++++++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c index 37b4f5c..b3c2eb4 100644 --- a/drivers/net/hinic/hinic_pmd_rx.c +++ b/drivers/net/hinic/hinic_pmd_rx.c @@ -972,13 +972,10 @@ u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts) while (pkts < nb_pkts) { /* 2. current ci is done */ rx_cqe = &rxq->rx_cqe[sw_ci]; - status = rx_cqe->status; + status = __atomic_load_n(&rx_cqe->status, __ATOMIC_ACQUIRE); if (!HINIC_GET_RX_DONE_BE(status)) break; - /* read other cqe member after status */ - rte_rmb(); - /* convert cqe and get packet length */ hinic_rq_cqe_be_to_cpu32(&cqe, (volatile void *)rx_cqe); vlan_len = cqe.vlan_len; diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h index fe2735b..fa27e91 100644 --- a/drivers/net/hinic/hinic_pmd_rx.h +++ b/drivers/net/hinic/hinic_pmd_rx.h @@ -28,6 +28,7 @@ struct hinic_rq_ctrl { u32 ctrl_fmt; }; +#if defined(__X86_64_SSE__) struct hinic_rq_cqe { u32 status; u32 vlan_len; @@ -36,6 +37,16 @@ struct hinic_rq_cqe { u32 rsvd[4]; }; +#elif defined(__ARM64_NEON__) +struct hinic_rq_cqe { + u32 status; + u32 vlan_len; + u32 offload_type; + u32 rss_hash; + + u32 rsvd[4]; +} __rte_cache_aligned; +#endif struct hinic_rq_cqe_sect { struct hinic_sge sge; From patchwork Mon Sep 30 14:00:57 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wangxiaoyun (Cloud)" X-Patchwork-Id: 60206 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C1D061BE96; Mon, 30 Sep 2019 15:50:43 +0200 (CEST) Received: from huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id 284694CA7 for ; Mon, 30 Sep 2019 15:50:41 +0200 (CEST) Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id C9F2BEA6EDE5FF08ABD9 for ; Mon, 30 Sep 2019 21:50:39 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 21:50:31 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Mon, 30 Sep 2019 22:00:57 +0800 Message-ID: <6acc3c19963cd2ea85d394102bd5d3a5f3aed0b7.1569850827.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v3 19/19] net/hinic: add support for getting rxq or txq info X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds support for getting rxq or txq info. Signed-off-by: Xiaoyun wang --- drivers/net/hinic/hinic_pmd_ethdev.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c index 765e661..1361a99 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.c +++ b/drivers/net/hinic/hinic_pmd_ethdev.c @@ -53,6 +53,11 @@ #define HINIC_MIN_RX_BUF_SIZE 1024 #define HINIC_MAX_UC_MAC_ADDRS 128 #define HINIC_MAX_MC_MAC_ADDRS 2048 + +#define HINIC_DEFAULT_BURST_SIZE 32 +#define HINIC_DEFAULT_NB_QUEUES 1 +#define HINIC_DEFAULT_RING_SIZE 1024 + /* * vlan_id is a 12 bit number. * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. @@ -753,6 +758,14 @@ static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa) info->rx_desc_lim = hinic_rx_desc_lim; info->tx_desc_lim = hinic_tx_desc_lim; + /* Driver-preferred Rx/Tx parameters */ + info->default_rxportconf.burst_size = HINIC_DEFAULT_BURST_SIZE; + info->default_txportconf.burst_size = HINIC_DEFAULT_BURST_SIZE; + info->default_rxportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES; + info->default_txportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES; + info->default_rxportconf.ring_size = HINIC_DEFAULT_RING_SIZE; + info->default_txportconf.ring_size = HINIC_DEFAULT_RING_SIZE; + return 0; } @@ -2157,6 +2170,23 @@ static int hinic_dev_xstats_get(struct rte_eth_dev *dev, return count; } +static void hinic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct hinic_rxq *rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->nb_desc = rxq->q_depth; +} + +static void hinic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct hinic_txq *txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->q_depth; +} + /** * DPDK callback to retrieve names of extended device statistics * @@ -2873,6 +2903,8 @@ static void hinic_dev_close(struct rte_eth_dev *dev) .xstats_get = hinic_dev_xstats_get, .xstats_reset = hinic_dev_xstats_reset, .xstats_get_names = hinic_dev_xstats_get_names, + .rxq_info_get = hinic_rxq_info_get, + .txq_info_get = hinic_txq_info_get, .mac_addr_set = hinic_set_mac_addr, .mac_addr_remove = hinic_mac_addr_remove, .mac_addr_add = hinic_mac_addr_add, @@ -2906,6 +2938,8 @@ static void hinic_dev_close(struct rte_eth_dev *dev) .xstats_get = hinic_dev_xstats_get, .xstats_reset = hinic_dev_xstats_reset, .xstats_get_names = hinic_dev_xstats_get_names, + .rxq_info_get = hinic_rxq_info_get, + .txq_info_get = hinic_txq_info_get, .mac_addr_set = hinic_set_mac_addr, .mac_addr_remove = hinic_mac_addr_remove, .mac_addr_add = hinic_mac_addr_add,