From patchwork Fri Dec 6 05:57:01 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149052 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2824845E16; Fri, 6 Dec 2024 07:05:16 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id AA83640E32; Fri, 6 Dec 2024 07:04:52 +0100 (CET) Received: from mxct.zte.com.cn (mxct.zte.com.cn [183.62.165.209]) by mails.dpdk.org (Postfix) with ESMTP id 109BC40A87 for ; Fri, 6 Dec 2024 07:04:49 +0100 (CET) Received: from mse-fl2.zte.com.cn (unknown [10.5.228.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxct.zte.com.cn (FangMail) with ESMTPS id 4Y4LM01f96z50FY3; Fri, 6 Dec 2024 14:04:44 +0800 (CST) Received: from szxlzmapp04.zte.com.cn ([10.5.231.166]) by mse-fl2.zte.com.cn with SMTP id 4B664Ltn033144; Fri, 6 Dec 2024 14:04:21 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:23 +0800 X-Zmail-TransId: 3e81675293e7001-7138a From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 01/15] net/zxdh: zxdh np init implementation Date: Fri, 6 Dec 2024 13:57:01 +0800 Message-ID: <20241206055715.506961-2-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl2.zte.com.cn 4B664Ltn033144 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 675293FC.002/4Y4LM01f96z50FY3 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org (np)network Processor initialize resources in host, and initialize a channel for some tables insert/get/del. Signed-off-by: Junlong Wang --- drivers/net/zxdh/meson.build | 1 + drivers/net/zxdh/zxdh_ethdev.c | 236 ++++++++++++++++++++-- drivers/net/zxdh/zxdh_ethdev.h | 28 +++ drivers/net/zxdh/zxdh_msg.c | 45 +++++ drivers/net/zxdh/zxdh_msg.h | 37 ++++ drivers/net/zxdh/zxdh_np.c | 347 +++++++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_np.h | 198 +++++++++++++++++++ drivers/net/zxdh/zxdh_pci.c | 2 +- drivers/net/zxdh/zxdh_pci.h | 6 +- drivers/net/zxdh/zxdh_queue.c | 2 +- drivers/net/zxdh/zxdh_queue.h | 14 +- 11 files changed, 883 insertions(+), 33 deletions(-) create mode 100644 drivers/net/zxdh/zxdh_np.c create mode 100644 drivers/net/zxdh/zxdh_np.h -- 2.27.0 diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build index c9960f4c73..ab24a3145c 100644 --- a/drivers/net/zxdh/meson.build +++ b/drivers/net/zxdh/meson.build @@ -19,4 +19,5 @@ sources = files( 'zxdh_msg.c', 'zxdh_pci.c', 'zxdh_queue.c', + 'zxdh_np.c', ) diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index c786198535..740e579da8 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "zxdh_ethdev.h" #include "zxdh_logs.h" @@ -12,8 +13,15 @@ #include "zxdh_msg.h" #include "zxdh_common.h" #include "zxdh_queue.h" +#include "zxdh_np.h" struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS]; +struct zxdh_shared_data *zxdh_shared_data; +const char *ZXDH_PMD_SHARED_DATA_MZ = "zxdh_pmd_shared_data"; +rte_spinlock_t zxdh_shared_data_lock = RTE_SPINLOCK_INITIALIZER; +struct zxdh_dtb_shared_data g_dtb_data = {0}; + +#define ZXDH_INVALID_DTBQUE 0xFFFF uint16_t zxdh_vport_to_vfid(union zxdh_virport_num v) @@ -406,14 +414,14 @@ zxdh_features_update(struct zxdh_hw *hw, ZXDH_VTPCI_OPS(hw)->set_features(hw, req_features); if ((rx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) && - !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM)) { + !zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM)) { PMD_DRV_LOG(ERR, "rx checksum not available on this host"); return -ENOTSUP; } if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) && - (!vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) || - !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6))) { + (!zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) || + !zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6))) { PMD_DRV_LOG(ERR, "Large Receive Offload not available on this host"); return -ENOTSUP; } @@ -421,20 +429,20 @@ zxdh_features_update(struct zxdh_hw *hw, } static bool -rx_offload_enabled(struct zxdh_hw *hw) +zxdh_rx_offload_enabled(struct zxdh_hw *hw) { - return vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM) || - vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) || - vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6); + return zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM) || + zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) || + zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6); } static bool -tx_offload_enabled(struct zxdh_hw *hw) +zxdh_tx_offload_enabled(struct zxdh_hw *hw) { - return vtpci_with_feature(hw, ZXDH_NET_F_CSUM) || - vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO4) || - vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO6) || - vtpci_with_feature(hw, ZXDH_NET_F_HOST_UFO); + return zxdh_pci_with_feature(hw, ZXDH_NET_F_CSUM) || + zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_TSO4) || + zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_TSO6) || + zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_UFO); } static void @@ -466,7 +474,7 @@ zxdh_dev_free_mbufs(struct rte_eth_dev *dev) continue; PMD_DRV_LOG(DEBUG, "Before freeing %s[%d] used and unused buf", type, i); - while ((buf = zxdh_virtqueue_detach_unused(vq)) != NULL) + while ((buf = zxdh_queue_detach_unused(vq)) != NULL) rte_pktmbuf_free(buf); } } @@ -550,9 +558,9 @@ zxdh_init_vring(struct zxdh_virtqueue *vq) vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); vq->vq_free_cnt = vq->vq_nentries; memset(vq->vq_descx, 0, sizeof(struct zxdh_vq_desc_extra) * vq->vq_nentries); - vring_init_packed(&vq->vq_packed.ring, ring_mem, ZXDH_PCI_VRING_ALIGN, size); - vring_desc_init_packed(vq, size); - virtqueue_disable_intr(vq); + zxdh_vring_init_packed(&vq->vq_packed.ring, ring_mem, ZXDH_PCI_VRING_ALIGN, size); + zxdh_vring_desc_init_packed(vq, size); + zxdh_queue_disable_intr(vq); } static int32_t @@ -621,7 +629,7 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx) /* * Reserve a memzone for vring elements */ - size = vring_size(hw, vq_size, ZXDH_PCI_VRING_ALIGN); + size = zxdh_vring_size(hw, vq_size, ZXDH_PCI_VRING_ALIGN); vq->vq_ring_size = RTE_ALIGN_CEIL(size, ZXDH_PCI_VRING_ALIGN); PMD_DRV_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size); @@ -694,7 +702,8 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx) /* first indirect descriptor is always the tx header */ struct zxdh_vring_packed_desc *start_dp = txr[i].tx_packed_indir; - vring_desc_init_indirect_packed(start_dp, RTE_DIM(txr[i].tx_packed_indir)); + zxdh_vring_desc_init_indirect_packed(start_dp, + RTE_DIM(txr[i].tx_packed_indir)); start_dp->addr = txvq->zxdh_net_hdr_mem + i * sizeof(*txr) + offsetof(struct zxdh_tx_region, tx_hdr); /* length will be updated to actual pi hdr size when xmit pkt */ @@ -792,8 +801,8 @@ zxdh_dev_configure(struct rte_eth_dev *dev) } } - hw->has_tx_offload = tx_offload_enabled(hw); - hw->has_rx_offload = rx_offload_enabled(hw); + hw->has_tx_offload = zxdh_tx_offload_enabled(hw); + hw->has_rx_offload = zxdh_rx_offload_enabled(hw); nr_vq = dev->data->nb_rx_queues + dev->data->nb_tx_queues; if (nr_vq == hw->queue_num) @@ -881,7 +890,7 @@ zxdh_init_device(struct rte_eth_dev *eth_dev) rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, ð_dev->data->mac_addrs[0]); /* If host does not support both status and MSI-X then disable LSC */ - if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS) && hw->use_msix != ZXDH_MSIX_NONE) + if (zxdh_pci_with_feature(hw, ZXDH_NET_F_STATUS) && hw->use_msix != ZXDH_MSIX_NONE) eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; else eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; @@ -913,6 +922,183 @@ zxdh_agent_comm(struct rte_eth_dev *eth_dev, struct zxdh_hw *hw) return 0; } +static int +zxdh_np_dtb_res_init(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_bar_offset_params param = {0}; + struct zxdh_bar_offset_res res = {0}; + int ret = 0; + + if (g_dtb_data.init_done) { + PMD_DRV_LOG(DEBUG, "DTB res already init done, dev %s no need init", + dev->device->name); + return 0; + } + g_dtb_data.queueid = ZXDH_INVALID_DTBQUE; + g_dtb_data.bind_device = dev; + g_dtb_data.dev_refcnt++; + g_dtb_data.init_done = 1; + + ZXDH_DEV_INIT_CTRL_T *dpp_ctrl = rte_malloc(NULL, sizeof(*dpp_ctrl) + + sizeof(ZXDH_DTB_ADDR_INFO_T) * 256, 0); + + if (dpp_ctrl == NULL) { + PMD_DRV_LOG(ERR, "dev %s annot allocate memory for dpp_ctrl", dev->device->name); + ret = -ENOMEM; + goto free_res; + } + memset(dpp_ctrl, 0, sizeof(*dpp_ctrl) + sizeof(ZXDH_DTB_ADDR_INFO_T) * 256); + + dpp_ctrl->queue_id = 0xff; + dpp_ctrl->vport = hw->vport.vport; + dpp_ctrl->vector = ZXDH_MSIX_INTR_DTB_VEC; + strcpy((char *)dpp_ctrl->port_name, dev->device->name); + dpp_ctrl->pcie_vir_addr = (uint32_t)hw->bar_addr[0]; + + param.pcie_id = hw->pcie_id; + param.virt_addr = hw->bar_addr[0] + ZXDH_CTRLCH_OFFSET; + param.type = ZXDH_URI_NP; + + ret = zxdh_get_bar_offset(¶m, &res); + if (ret) { + PMD_DRV_LOG(ERR, "dev %s get npbar offset failed", dev->device->name); + goto free_res; + } + dpp_ctrl->np_bar_len = res.bar_length; + dpp_ctrl->np_bar_offset = res.bar_offset; + + if (!g_dtb_data.dtb_table_conf_mz) { + const struct rte_memzone *conf_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_conf_mz", + ZXDH_DTB_TABLE_CONF_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE); + + if (conf_mz == NULL) { + PMD_DRV_LOG(ERR, + "dev %s annot allocate memory for dtb table conf", + dev->device->name); + ret = -ENOMEM; + goto free_res; + } + dpp_ctrl->down_vir_addr = conf_mz->addr_64; + dpp_ctrl->down_phy_addr = conf_mz->iova; + g_dtb_data.dtb_table_conf_mz = conf_mz; + } + + if (!g_dtb_data.dtb_table_dump_mz) { + const struct rte_memzone *dump_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_dump_mz", + ZXDH_DTB_TABLE_DUMP_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE); + + if (dump_mz == NULL) { + PMD_DRV_LOG(ERR, + "dev %s Cannot allocate memory for dtb table dump", + dev->device->name); + ret = -ENOMEM; + goto free_res; + } + dpp_ctrl->dump_vir_addr = dump_mz->addr_64; + dpp_ctrl->dump_phy_addr = dump_mz->iova; + g_dtb_data.dtb_table_dump_mz = dump_mz; + } + + ret = zxdh_np_host_init(0, dpp_ctrl); + if (ret) { + PMD_DRV_LOG(ERR, "dev %s dpp host np init failed .ret %d", dev->device->name, ret); + goto free_res; + } + + PMD_DRV_LOG(DEBUG, "dev %s dpp host np init ok.dtb queue %d", + dev->device->name, dpp_ctrl->queue_id); + g_dtb_data.queueid = dpp_ctrl->queue_id; + rte_free(dpp_ctrl); + return 0; + +free_res: + rte_free(dpp_ctrl); + return ret; +} + +static int +zxdh_init_shared_data(void) +{ + const struct rte_memzone *mz; + int ret = 0; + + rte_spinlock_lock(&zxdh_shared_data_lock); + if (zxdh_shared_data == NULL) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + /* Allocate shared memory. */ + mz = rte_memzone_reserve(ZXDH_PMD_SHARED_DATA_MZ, + sizeof(*zxdh_shared_data), SOCKET_ID_ANY, 0); + if (mz == NULL) { + PMD_DRV_LOG(ERR, "Cannot allocate zxdh shared data"); + ret = -rte_errno; + goto error; + } + zxdh_shared_data = mz->addr; + memset(zxdh_shared_data, 0, sizeof(*zxdh_shared_data)); + rte_spinlock_init(&zxdh_shared_data->lock); + } else { /* Lookup allocated shared memory. */ + mz = rte_memzone_lookup(ZXDH_PMD_SHARED_DATA_MZ); + if (mz == NULL) { + PMD_DRV_LOG(ERR, "Cannot attach zxdh shared data"); + ret = -rte_errno; + goto error; + } + zxdh_shared_data = mz->addr; + } + } + +error: + rte_spinlock_unlock(&zxdh_shared_data_lock); + return ret; +} + +static int +zxdh_init_once(void) +{ + struct zxdh_shared_data *sd = zxdh_shared_data; + int ret = 0; + + if (zxdh_init_shared_data()) + return -1; + + rte_spinlock_lock(&sd->lock); + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + if (!sd->init_done) { + ++sd->secondary_cnt; + sd->init_done = true; + } + goto out; + } + /* RTE_PROC_PRIMARY */ + if (!sd->init_done) + sd->init_done = true; + sd->dev_refcnt++; +out: + rte_spinlock_unlock(&sd->lock); + return ret; +} + +static int +zxdh_np_init(struct rte_eth_dev *eth_dev) +{ + struct zxdh_hw *hw = eth_dev->data->dev_private; + int ret = 0; + + if (hw->is_pf) { + ret = zxdh_np_dtb_res_init(eth_dev); + if (ret) { + PMD_DRV_LOG(ERR, "np dtb init failed, ret:%d ", ret); + return ret; + } + } + if (zxdh_shared_data != NULL) + zxdh_shared_data->np_init_done = 1; + + PMD_DRV_LOG(DEBUG, "np init ok "); + return 0; +} + static int zxdh_eth_dev_init(struct rte_eth_dev *eth_dev) { @@ -950,6 +1136,10 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev) hw->is_pf = 1; } + ret = zxdh_init_once(); + if (ret != 0) + goto err_zxdh_init; + ret = zxdh_init_device(eth_dev); if (ret < 0) goto err_zxdh_init; @@ -977,6 +1167,10 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev) if (ret != 0) goto err_zxdh_init; + ret = zxdh_np_init(eth_dev); + if (ret) + goto err_zxdh_init; + ret = zxdh_configure_intr(eth_dev); if (ret != 0) goto err_zxdh_init; diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h index 7658cbb461..6fdb5fb767 100644 --- a/drivers/net/zxdh/zxdh_ethdev.h +++ b/drivers/net/zxdh/zxdh_ethdev.h @@ -35,6 +35,10 @@ #define ZXDH_MBUF_BURST_SZ 64 +#define ZXDH_MAX_BASE_DTB_TABLE_COUNT 30 +#define ZXDH_DTB_TABLE_DUMP_SIZE (32 * (16 + 16 * 1024)) +#define ZXDH_DTB_TABLE_CONF_SIZE (32 * (16 + 16 * 1024)) + union zxdh_virport_num { uint16_t vport; struct { @@ -89,6 +93,30 @@ struct zxdh_hw { uint8_t has_rx_offload; }; +struct zxdh_dtb_shared_data { + int init_done; + char name[32]; + uint16_t queueid; + uint16_t vport; + uint32_t vector; + const struct rte_memzone *dtb_table_conf_mz; + const struct rte_memzone *dtb_table_dump_mz; + const struct rte_memzone *dtb_table_bulk_dump_mz[ZXDH_MAX_BASE_DTB_TABLE_COUNT]; + struct rte_eth_dev *bind_device; + uint32_t dev_refcnt; +}; + +/* Shared data between primary and secondary processes. */ +struct zxdh_shared_data { + rte_spinlock_t lock; /* Global spinlock for primary and secondary processes. */ + int32_t init_done; /* Whether primary has done initialization. */ + unsigned int secondary_cnt; /* Number of secondary processes init'd. */ + + int32_t np_init_done; + uint32_t dev_refcnt; + struct zxdh_dtb_shared_data *dtb_data; +}; + uint16_t zxdh_vport_to_vfid(union zxdh_virport_num v); #endif /* ZXDH_ETHDEV_H */ diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c index 53cf972f86..a0a005b178 100644 --- a/drivers/net/zxdh/zxdh_msg.c +++ b/drivers/net/zxdh/zxdh_msg.c @@ -1035,3 +1035,48 @@ zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev) rte_free(recved_msg); return ZXDH_BAR_MSG_OK; } + +int zxdh_get_bar_offset(struct zxdh_bar_offset_params *paras, + struct zxdh_bar_offset_res *res) +{ + uint16_t check_token = 0; + uint16_t sum_res = 0; + int ret = 0; + + if (!paras) + return ZXDH_BAR_MSG_ERR_NULL; + + struct zxdh_offset_get_msg send_msg = { + .pcie_id = paras->pcie_id, + .type = paras->type, + }; + struct zxdh_pci_bar_msg in = {0}; + + in.payload_addr = &send_msg; + in.payload_len = sizeof(send_msg); + in.virt_addr = paras->virt_addr; + in.src = ZXDH_MSG_CHAN_END_PF; + in.dst = ZXDH_MSG_CHAN_END_RISC; + in.module_id = ZXDH_BAR_MODULE_OFFSET_GET; + in.src_pcieid = paras->pcie_id; + + struct zxdh_bar_recv_msg recv_msg = {0}; + struct zxdh_msg_recviver_mem result = { + .recv_buffer = &recv_msg, + .buffer_len = sizeof(recv_msg), + }; + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret != ZXDH_BAR_MSG_OK) + return -ret; + + check_token = recv_msg.offset_reps.check; + sum_res = zxdh_bar_get_sum((uint8_t *)&send_msg, sizeof(send_msg)); + + if (check_token != sum_res) { + PMD_MSG_LOG(ERR, "expect token: 0x%x, get token: 0x%x", sum_res, check_token); + return ZXDH_BAR_MSG_ERR_REPLY; + } + res->bar_offset = recv_msg.offset_reps.offset; + res->bar_length = recv_msg.offset_reps.length; + return ZXDH_BAR_MSG_OK; +} diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h index 530ee406b1..fbc79e8f9d 100644 --- a/drivers/net/zxdh/zxdh_msg.h +++ b/drivers/net/zxdh/zxdh_msg.h @@ -131,6 +131,26 @@ enum ZXDH_TBL_MSG_TYPE { ZXDH_TBL_TYPE_NON, }; +enum pciebar_layout_type { + ZXDH_URI_VQM = 0, + ZXDH_URI_SPINLOCK = 1, + ZXDH_URI_FWCAP = 2, + ZXDH_URI_FWSHR = 3, + ZXDH_URI_DRS_SEC = 4, + ZXDH_URI_RSV = 5, + ZXDH_URI_CTRLCH = 6, + ZXDH_URI_1588 = 7, + ZXDH_URI_QBV = 8, + ZXDH_URI_MACPCS = 9, + ZXDH_URI_RDMA = 10, + ZXDH_URI_MNP = 11, + ZXDH_URI_MSPM = 12, + ZXDH_URI_MVQM = 13, + ZXDH_URI_MDPI = 14, + ZXDH_URI_NP = 15, + ZXDH_URI_MAX, +}; + struct zxdh_msix_para { uint16_t pcie_id; uint16_t vector_risc; @@ -174,6 +194,17 @@ struct zxdh_bar_offset_reps { uint32_t length; } __rte_packed; +struct zxdh_bar_offset_params { + uint64_t virt_addr; /* Bar space control space virtual address */ + uint16_t pcie_id; + uint16_t type; /* Module types corresponding to PCIBAR planning */ +}; + +struct zxdh_bar_offset_res { + uint32_t bar_offset; + uint32_t bar_length; +}; + struct zxdh_bar_recv_msg { uint8_t reps_ok; uint16_t reps_len; @@ -204,9 +235,15 @@ struct zxdh_bar_msg_header { uint16_t dst_pcieid; /* used in PF-->VF */ }; +struct zxdh_offset_get_msg { + uint16_t pcie_id; + uint16_t type; +}; + typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len, void *reps_buffer, uint16_t *reps_len, void *dev); +int zxdh_get_bar_offset(struct zxdh_bar_offset_params *paras, struct zxdh_bar_offset_res *res); int zxdh_msg_chan_init(void); int zxdh_bar_msg_chan_exit(void); int zxdh_msg_chan_hwlock_init(struct rte_eth_dev *dev); diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c new file mode 100644 index 0000000000..9c50039fb1 --- /dev/null +++ b/drivers/net/zxdh/zxdh_np.c @@ -0,0 +1,347 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#include +#include + +#include +#include + +#include "zxdh_np.h" +#include "zxdh_logs.h" + +static uint64_t g_np_bar_offset; +static ZXDH_DEV_MGR_T g_dev_mgr = {0}; +static ZXDH_SDT_MGR_T g_sdt_mgr = {0}; +ZXDH_PPU_CLS_BITMAP_T g_ppu_cls_bit_map[ZXDH_DEV_CHANNEL_MAX]; +ZXDH_DTB_MGR_T *p_dpp_dtb_mgr[ZXDH_DEV_CHANNEL_MAX] = {NULL}; + +#define ZXDH_COMM_ASSERT(x) assert(x) +#define ZXDH_SDT_MGR_PTR_GET() (&g_sdt_mgr) +#define ZXDH_SDT_SOFT_TBL_GET(id) (g_sdt_mgr.sdt_tbl_array[id]) + +#define ZXDH_COMM_CHECK_DEV_POINT(dev_id, point)\ +do {\ + if (NULL == (point)) {\ + PMD_DRV_LOG(ERR, "dev: %d ZXIC %s:%d[Error:POINT NULL] !"\ + "FUNCTION : %s!", (dev_id), __FILE__, __LINE__, __func__);\ + ZXDH_COMM_ASSERT(0);\ + } \ +} while (0) + +#define ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, becall)\ +do {\ + if ((rc) != 0) {\ + PMD_DRV_LOG(ERR, "dev: %d ZXIC %s:%d !"\ + "-- %s Call %s Fail!", (dev_id), __FILE__, __LINE__, __func__, becall);\ + ZXDH_COMM_ASSERT(0);\ + } \ +} while (0) + +#define ZXDH_COMM_CHECK_POINT_NO_ASSERT(point)\ +do {\ + if ((point) == NULL) {\ + PMD_DRV_LOG(ERR, "ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!",\ + __FILE__, __LINE__, __func__);\ + } \ +} while (0) + +#define ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, becall)\ +do {\ + if ((rc) != 0) {\ + PMD_DRV_LOG(ERR, "ZXIC %s:%d !-- %s Call %s"\ + " Fail!", __FILE__, __LINE__, __func__, becall);\ + } \ +} while (0) + +#define ZXDH_COMM_CHECK_RC(rc, becall)\ +do {\ + if ((rc) != 0) {\ + PMD_DRV_LOG(ERR, "ZXIC %s:%d!-- %s Call %s "\ + "Fail!", __FILE__, __LINE__, __func__, becall);\ + ZXDH_COMM_ASSERT(0);\ + } \ +} while (0) + +static uint32_t +zxdh_np_dev_init(void) +{ + if (g_dev_mgr.is_init) { + PMD_DRV_LOG(ERR, "Dev is already initialized."); + return 0; + } + + g_dev_mgr.device_num = 0; + g_dev_mgr.is_init = 1; + + return 0; +} + +static uint32_t +zxdh_np_dev_add(uint32_t dev_id, ZXDH_DEV_TYPE_E dev_type, + ZXDH_DEV_ACCESS_TYPE_E access_type, uint64_t pcie_addr, + uint64_t riscv_addr, uint64_t dma_vir_addr, + uint64_t dma_phy_addr) +{ + ZXDH_DEV_CFG_T *p_dev_info = NULL; + ZXDH_DEV_MGR_T *p_dev_mgr = NULL; + + p_dev_mgr = &g_dev_mgr; + if (!p_dev_mgr->is_init) { + PMD_DRV_LOG(ERR, "ErrorCode[ 0x%x]: Device Manager is not init!!!", + ZXDH_RC_DEV_MGR_NOT_INIT); + return ZXDH_RC_DEV_MGR_NOT_INIT; + } + + if (p_dev_mgr->p_dev_array[dev_id] != NULL) { + /* device is already exist. */ + PMD_DRV_LOG(ERR, "Device is added again!!!"); + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + } else { + /* device is new. */ + p_dev_info = (ZXDH_DEV_CFG_T *)malloc(sizeof(ZXDH_DEV_CFG_T)); + ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_dev_info); + p_dev_mgr->p_dev_array[dev_id] = p_dev_info; + p_dev_mgr->device_num++; + } + + p_dev_info->device_id = dev_id; + p_dev_info->dev_type = dev_type; + p_dev_info->access_type = access_type; + p_dev_info->pcie_addr = pcie_addr; + p_dev_info->riscv_addr = riscv_addr; + p_dev_info->dma_vir_addr = dma_vir_addr; + p_dev_info->dma_phy_addr = dma_phy_addr; + + return 0; +} + +static uint32_t +zxdh_np_dev_agent_status_set(uint32_t dev_id, uint32_t agent_flag) +{ + ZXDH_DEV_MGR_T *p_dev_mgr = NULL; + ZXDH_DEV_CFG_T *p_dev_info = NULL; + + p_dev_mgr = &g_dev_mgr; + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + + if (p_dev_info == NULL) + return ZXDH_DEV_TYPE_INVALID; + p_dev_info->agent_flag = agent_flag; + + return 0; +} + +static uint32_t +zxdh_np_sdt_mgr_init(void) +{ + if (!g_sdt_mgr.is_init) { + g_sdt_mgr.channel_num = 0; + g_sdt_mgr.is_init = 1; + memset(g_sdt_mgr.sdt_tbl_array, 0, ZXDH_DEV_CHANNEL_MAX * + sizeof(ZXDH_SDT_SOFT_TABLE_T *)); + } + + return 0; +} + +static uint32_t +zxdh_np_sdt_mgr_create(uint32_t dev_id) +{ + ZXDH_SDT_SOFT_TABLE_T *p_sdt_tbl_temp = NULL; + ZXDH_SDT_MGR_T *p_sdt_mgr = NULL; + + p_sdt_mgr = ZXDH_SDT_MGR_PTR_GET(); + + if (ZXDH_SDT_SOFT_TBL_GET(dev_id) == NULL) { + p_sdt_tbl_temp = malloc(sizeof(ZXDH_SDT_SOFT_TABLE_T)); + + p_sdt_tbl_temp->device_id = dev_id; + memset(p_sdt_tbl_temp->sdt_array, 0, ZXDH_DEV_SDT_ID_MAX * sizeof(ZXDH_SDT_ITEM_T)); + + ZXDH_SDT_SOFT_TBL_GET(dev_id) = p_sdt_tbl_temp; + + p_sdt_mgr->channel_num++; + } else { + PMD_DRV_LOG(ERR, "Error: %s for dev[%d]" + "is called repeatedly!", __func__, dev_id); + return -1; + } + + return 0; +} + +static uint32_t +zxdh_np_sdt_init(uint32_t dev_num, uint32_t *dev_id_array) +{ + uint32_t rc = 0; + uint32_t i = 0; + + zxdh_np_sdt_mgr_init(); + + for (i = 0; i < dev_num; i++) { + rc = zxdh_np_sdt_mgr_create(dev_id_array[i]); + ZXDH_COMM_CHECK_RC(rc, "zxdh_sdt_mgr_create"); + } + + return 0; +} + +static uint32_t +zxdh_np_ppu_parse_cls_bitmap(uint32_t dev_id, + uint32_t bitmap) +{ + uint32_t cls_id = 0; + uint32_t mem_id = 0; + uint32_t cls_use = 0; + uint32_t instr_mem = 0; + + for (cls_id = 0; cls_id < ZXDH_PPU_CLUSTER_NUM; cls_id++) { + cls_use = (bitmap >> cls_id) & 0x1; + g_ppu_cls_bit_map[dev_id].cls_use[cls_id] = cls_use; + } + + for (mem_id = 0; mem_id < ZXDH_PPU_INSTR_MEM_NUM; mem_id++) { + instr_mem = (bitmap >> (mem_id * 2)) & 0x3; + g_ppu_cls_bit_map[dev_id].instr_mem[mem_id] = ((instr_mem > 0) ? 1 : 0); + } + + return 0; +} + +static ZXDH_DTB_MGR_T * +zxdh_np_dtb_mgr_get(uint32_t dev_id) +{ + if (dev_id >= ZXDH_DEV_CHANNEL_MAX) + return NULL; + else + return p_dpp_dtb_mgr[dev_id]; +} + +static uint32_t +zxdh_np_dtb_soft_init(uint32_t dev_id) +{ + ZXDH_DTB_MGR_T *p_dtb_mgr = NULL; + + p_dtb_mgr = zxdh_np_dtb_mgr_get(dev_id); + if (p_dtb_mgr == NULL) { + p_dpp_dtb_mgr[dev_id] = (ZXDH_DTB_MGR_T *)malloc(sizeof(ZXDH_DTB_MGR_T)); + memset(p_dpp_dtb_mgr[dev_id], 0, sizeof(ZXDH_DTB_MGR_T)); + p_dtb_mgr = zxdh_np_dtb_mgr_get(dev_id); + if (p_dtb_mgr == NULL) + return 1; + } + + return 0; +} + +static unsigned int +zxdh_np_base_soft_init(unsigned int dev_id, ZXDH_SYS_INIT_CTRL_T *p_init_ctrl) +{ + unsigned int rt = 0; + unsigned int access_type = 0; + unsigned int dev_id_array[ZXDH_DEV_CHANNEL_MAX] = {0}; + unsigned int agent_flag = 0; + + rt = zxdh_np_dev_init(); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_dev_init"); + + if (p_init_ctrl->flags & ZXDH_INIT_FLAG_ACCESS_TYPE) + access_type = ZXDH_DEV_ACCESS_TYPE_RISCV; + else + access_type = ZXDH_DEV_ACCESS_TYPE_PCIE; + + if (p_init_ctrl->flags & ZXDH_INIT_FLAG_AGENT_FLAG) + agent_flag = ZXDH_DEV_AGENT_ENABLE; + else + agent_flag = ZXDH_DEV_AGENT_DISABLE; + + rt = zxdh_np_dev_add(dev_id, + p_init_ctrl->device_type, + access_type, + p_init_ctrl->pcie_vir_baddr, + p_init_ctrl->riscv_vir_baddr, + p_init_ctrl->dma_vir_baddr, + p_init_ctrl->dma_phy_baddr); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_dev_add"); + + rt = zxdh_np_dev_agent_status_set(dev_id, agent_flag); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_dev_agent_status_set"); + + dev_id_array[0] = dev_id; + rt = zxdh_np_sdt_init(1, dev_id_array); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_sdt_init"); + + rt = zxdh_np_ppu_parse_cls_bitmap(dev_id, ZXDH_PPU_CLS_ALL_START); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_ppu_parse_cls_bitmap"); + + rt = zxdh_np_dtb_soft_init(dev_id); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_dtb_soft_init"); + + return 0; +} + +static uint32_t +zxdh_np_dev_vport_set(uint32_t dev_id, uint32_t vport) +{ + ZXDH_DEV_MGR_T *p_dev_mgr = NULL; + ZXDH_DEV_CFG_T *p_dev_info = NULL; + + p_dev_mgr = &g_dev_mgr; + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + p_dev_info->vport = vport; + + return 0; +} + +static uint32_t +zxdh_np_dev_agent_addr_set(uint32_t dev_id, uint64_t agent_addr) +{ + ZXDH_DEV_MGR_T *p_dev_mgr = NULL; + ZXDH_DEV_CFG_T *p_dev_info = NULL; + + p_dev_mgr = &g_dev_mgr; + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + p_dev_info->agent_addr = agent_addr; + + return 0; +} + +static uint64_t +zxdh_np_addr_calc(uint64_t pcie_vir_baddr, uint32_t bar_offset) +{ + uint64_t np_addr = 0; + + np_addr = ((pcie_vir_baddr + bar_offset) > ZXDH_PCIE_NP_MEM_SIZE) + ? (pcie_vir_baddr + bar_offset - ZXDH_PCIE_NP_MEM_SIZE) : 0; + g_np_bar_offset = bar_offset; + + return np_addr; +} + +int +zxdh_np_host_init(uint32_t dev_id, + ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl) +{ + unsigned int rc = 0; + uint64_t agent_addr = 0; + ZXDH_SYS_INIT_CTRL_T sys_init_ctrl = {0}; + + ZXDH_COMM_CHECK_POINT_NO_ASSERT(p_dev_init_ctrl); + + sys_init_ctrl.flags = (ZXDH_DEV_ACCESS_TYPE_PCIE << 0) | (ZXDH_DEV_AGENT_ENABLE << 10); + sys_init_ctrl.pcie_vir_baddr = zxdh_np_addr_calc(p_dev_init_ctrl->pcie_vir_addr, + p_dev_init_ctrl->np_bar_offset); + sys_init_ctrl.device_type = ZXDH_DEV_TYPE_CHIP; + rc = zxdh_np_base_soft_init(dev_id, &sys_init_ctrl); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_base_soft_init"); + + rc = zxdh_np_dev_vport_set(dev_id, p_dev_init_ctrl->vport); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dev_vport_set"); + + agent_addr = ZXDH_PCIE_AGENT_ADDR_OFFSET + p_dev_init_ctrl->pcie_vir_addr; + rc = zxdh_np_dev_agent_addr_set(dev_id, agent_addr); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dev_agent_addr_set"); + return 0; +} diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h new file mode 100644 index 0000000000..573eafe796 --- /dev/null +++ b/drivers/net/zxdh/zxdh_np.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 ZTE Corporation + */ + +#ifndef ZXDH_NP_H +#define ZXDH_NP_H + +#include + +#define ZXDH_PORT_NAME_MAX (32) +#define ZXDH_DEV_CHANNEL_MAX (2) +#define ZXDH_DEV_SDT_ID_MAX (256U) +/*DTB*/ +#define ZXDH_DTB_QUEUE_ITEM_NUM_MAX (32) +#define ZXDH_DTB_QUEUE_NUM_MAX (128) + +#define ZXDH_PPU_CLS_ALL_START (0x3F) +#define ZXDH_PPU_CLUSTER_NUM (6) +#define ZXDH_PPU_INSTR_MEM_NUM (3) +#define ZXDH_SDT_CFG_LEN (2) + +#define ZXDH_RC_DEV_BASE (0x600) +#define ZXDH_RC_DEV_PARA_INVALID (ZXDH_RC_DEV_BASE | 0x0) +#define ZXDH_RC_DEV_RANGE_INVALID (ZXDH_RC_DEV_BASE | 0x1) +#define ZXDH_RC_DEV_CALL_FUNC_FAIL (ZXDH_RC_DEV_BASE | 0x2) +#define ZXDH_RC_DEV_TYPE_INVALID (ZXDH_RC_DEV_BASE | 0x3) +#define ZXDH_RC_DEV_CONNECT_FAIL (ZXDH_RC_DEV_BASE | 0x4) +#define ZXDH_RC_DEV_MSG_INVALID (ZXDH_RC_DEV_BASE | 0x5) +#define ZXDH_RC_DEV_NOT_EXIST (ZXDH_RC_DEV_BASE | 0x6) +#define ZXDH_RC_DEV_MGR_NOT_INIT (ZXDH_RC_DEV_BASE | 0x7) +#define ZXDH_RC_DEV_CFG_NOT_INIT (ZXDH_RC_DEV_BASE | 0x8) + +#define ZXDH_SYS_VF_NP_BASE_OFFSET 0 +#define ZXDH_PCIE_DTB4K_ADDR_OFFSET (0x6000) +#define ZXDH_PCIE_NP_MEM_SIZE (0x2000000) +#define ZXDH_PCIE_AGENT_ADDR_OFFSET (0x2000) + +#define ZXDH_INIT_FLAG_ACCESS_TYPE (1 << 0) +#define ZXDH_INIT_FLAG_SERDES_DOWN_TP (1 << 1) +#define ZXDH_INIT_FLAG_DDR_BACKDOOR (1 << 2) +#define ZXDH_INIT_FLAG_SA_MODE (1 << 3) +#define ZXDH_INIT_FLAG_SA_MESH (1 << 4) +#define ZXDH_INIT_FLAG_SA_SERDES_MODE (1 << 5) +#define ZXDH_INIT_FLAG_INT_DEST_MODE (1 << 6) +#define ZXDH_INIT_FLAG_LIF0_MODE (1 << 7) +#define ZXDH_INIT_FLAG_DMA_ENABLE (1 << 8) +#define ZXDH_INIT_FLAG_TM_IMEM_FLAG (1 << 9) +#define ZXDH_INIT_FLAG_AGENT_FLAG (1 << 10) + +typedef enum zxdh_module_init_e { + ZXDH_MODULE_INIT_NPPU = 0, + ZXDH_MODULE_INIT_PPU, + ZXDH_MODULE_INIT_SE, + ZXDH_MODULE_INIT_ETM, + ZXDH_MODULE_INIT_DLB, + ZXDH_MODULE_INIT_TRPG, + ZXDH_MODULE_INIT_TSN, + ZXDH_MODULE_INIT_MAX +} ZXDH_MODULE_INIT_E; + +typedef enum zxdh_dev_type_e { + ZXDH_DEV_TYPE_SIM = 0, + ZXDH_DEV_TYPE_VCS = 1, + ZXDH_DEV_TYPE_CHIP = 2, + ZXDH_DEV_TYPE_FPGA = 3, + ZXDH_DEV_TYPE_PCIE_ACC = 4, + ZXDH_DEV_TYPE_INVALID, +} ZXDH_DEV_TYPE_E; + +typedef enum zxdh_dev_access_type_e { + ZXDH_DEV_ACCESS_TYPE_PCIE = 0, + ZXDH_DEV_ACCESS_TYPE_RISCV = 1, + ZXDH_DEV_ACCESS_TYPE_INVALID, +} ZXDH_DEV_ACCESS_TYPE_E; + +typedef enum zxdh_dev_agent_flag_e { + ZXDH_DEV_AGENT_DISABLE = 0, + ZXDH_DEV_AGENT_ENABLE = 1, + ZXDH_DEV_AGENT_INVALID, +} ZXDH_DEV_AGENT_FLAG_E; + +typedef struct zxdh_dtb_tab_up_user_addr_t { + uint32_t user_flag; + uint64_t phy_addr; + uint64_t vir_addr; +} ZXDH_DTB_TAB_UP_USER_ADDR_T; + +typedef struct zxdh_dtb_tab_up_info_t { + uint64_t start_phy_addr; + uint64_t start_vir_addr; + uint32_t item_size; + uint32_t wr_index; + uint32_t rd_index; + uint32_t data_len[ZXDH_DTB_QUEUE_ITEM_NUM_MAX]; + ZXDH_DTB_TAB_UP_USER_ADDR_T user_addr[ZXDH_DTB_QUEUE_ITEM_NUM_MAX]; +} ZXDH_DTB_TAB_UP_INFO_T; + +typedef struct zxdh_dtb_tab_down_info_t { + uint64_t start_phy_addr; + uint64_t start_vir_addr; + uint32_t item_size; + uint32_t wr_index; + uint32_t rd_index; +} ZXDH_DTB_TAB_DOWN_INFO_T; + +typedef struct zxdh_dtb_queue_info_t { + uint32_t init_flag; + uint32_t vport; + uint32_t vector; + ZXDH_DTB_TAB_UP_INFO_T tab_up; + ZXDH_DTB_TAB_DOWN_INFO_T tab_down; +} ZXDH_DTB_QUEUE_INFO_T; + +typedef struct zxdh_dtb_mgr_t { + ZXDH_DTB_QUEUE_INFO_T queue_info[ZXDH_DTB_QUEUE_NUM_MAX]; +} ZXDH_DTB_MGR_T; + +typedef struct zxdh_ppu_cls_bitmap_t { + uint32_t cls_use[ZXDH_PPU_CLUSTER_NUM]; + uint32_t instr_mem[ZXDH_PPU_INSTR_MEM_NUM]; +} ZXDH_PPU_CLS_BITMAP_T; + +typedef struct dpp_sdt_item_t { + uint32_t valid; + uint32_t table_cfg[ZXDH_SDT_CFG_LEN]; +} ZXDH_SDT_ITEM_T; + +typedef struct dpp_sdt_soft_table_t { + uint32_t device_id; + ZXDH_SDT_ITEM_T sdt_array[ZXDH_DEV_SDT_ID_MAX]; +} ZXDH_SDT_SOFT_TABLE_T; + +typedef struct zxdh_sys_init_ctrl_t { + ZXDH_DEV_TYPE_E device_type; + uint32_t flags; + uint32_t sa_id; + uint32_t case_num; + uint32_t lif0_port_type; + uint32_t lif1_port_type; + uint64_t pcie_vir_baddr; + uint64_t riscv_vir_baddr; + uint64_t dma_vir_baddr; + uint64_t dma_phy_baddr; +} ZXDH_SYS_INIT_CTRL_T; + +typedef struct dpp_dev_cfg_t { + uint32_t device_id; + ZXDH_DEV_TYPE_E dev_type; + uint32_t chip_ver; + uint32_t access_type; + uint32_t agent_flag; + uint32_t vport; + uint64_t pcie_addr; + uint64_t riscv_addr; + uint64_t dma_vir_addr; + uint64_t dma_phy_addr; + uint64_t agent_addr; + uint32_t init_flags[ZXDH_MODULE_INIT_MAX]; +} ZXDH_DEV_CFG_T; + +typedef struct zxdh_dev_mngr_t { + uint32_t device_num; + uint32_t is_init; + ZXDH_DEV_CFG_T *p_dev_array[ZXDH_DEV_CHANNEL_MAX]; +} ZXDH_DEV_MGR_T; + +typedef struct zxdh_dtb_addr_info_t { + uint32_t sdt_no; + uint32_t size; + uint32_t phy_addr; + uint32_t vir_addr; +} ZXDH_DTB_ADDR_INFO_T; + +typedef struct zxdh_dev_init_ctrl_t { + uint32_t vport; + char port_name[ZXDH_PORT_NAME_MAX]; + uint32_t vector; + uint32_t queue_id; + uint32_t np_bar_offset; + uint32_t np_bar_len; + uint32_t pcie_vir_addr; + uint32_t down_phy_addr; + uint32_t down_vir_addr; + uint32_t dump_phy_addr; + uint32_t dump_vir_addr; + uint32_t dump_sdt_num; + ZXDH_DTB_ADDR_INFO_T dump_addr_info[]; +} ZXDH_DEV_INIT_CTRL_T; + +typedef struct zxdh_sdt_mgr_t { + uint32_t channel_num; + uint32_t is_init; + ZXDH_SDT_SOFT_TABLE_T *sdt_tbl_array[ZXDH_DEV_CHANNEL_MAX]; +} ZXDH_SDT_MGR_T; + +int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl); + +#endif /* ZXDH_NP_H */ diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c index 06d3f92b20..250e67d560 100644 --- a/drivers/net/zxdh/zxdh_pci.c +++ b/drivers/net/zxdh/zxdh_pci.c @@ -159,7 +159,7 @@ zxdh_setup_queue(struct zxdh_hw *hw, struct zxdh_virtqueue *vq) desc_addr = vq->vq_ring_mem; avail_addr = desc_addr + vq->vq_nentries * sizeof(struct zxdh_vring_desc); - if (vtpci_packed_queue(vq->hw)) { + if (zxdh_pci_packed_queue(vq->hw)) { used_addr = RTE_ALIGN_CEIL((avail_addr + sizeof(struct zxdh_vring_packed_desc_event)), ZXDH_PCI_VRING_ALIGN); diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h index ed6fd89742..d6487a574f 100644 --- a/drivers/net/zxdh/zxdh_pci.h +++ b/drivers/net/zxdh/zxdh_pci.h @@ -114,15 +114,15 @@ struct zxdh_pci_common_cfg { }; static inline int32_t -vtpci_with_feature(struct zxdh_hw *hw, uint64_t bit) +zxdh_pci_with_feature(struct zxdh_hw *hw, uint64_t bit) { return (hw->guest_features & (1ULL << bit)) != 0; } static inline int32_t -vtpci_packed_queue(struct zxdh_hw *hw) +zxdh_pci_packed_queue(struct zxdh_hw *hw) { - return vtpci_with_feature(hw, ZXDH_F_RING_PACKED); + return zxdh_pci_with_feature(hw, ZXDH_F_RING_PACKED); } struct zxdh_pci_ops { diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c index 462a88b23c..b4ef90ea36 100644 --- a/drivers/net/zxdh/zxdh_queue.c +++ b/drivers/net/zxdh/zxdh_queue.c @@ -13,7 +13,7 @@ #include "zxdh_msg.h" struct rte_mbuf * -zxdh_virtqueue_detach_unused(struct zxdh_virtqueue *vq) +zxdh_queue_detach_unused(struct zxdh_virtqueue *vq) { struct rte_mbuf *cookie = NULL; int32_t idx = 0; diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h index 1088bf08fc..1304d5e4ea 100644 --- a/drivers/net/zxdh/zxdh_queue.h +++ b/drivers/net/zxdh/zxdh_queue.h @@ -206,11 +206,11 @@ struct zxdh_tx_region { }; static inline size_t -vring_size(struct zxdh_hw *hw, uint32_t num, unsigned long align) +zxdh_vring_size(struct zxdh_hw *hw, uint32_t num, unsigned long align) { size_t size; - if (vtpci_packed_queue(hw)) { + if (zxdh_pci_packed_queue(hw)) { size = num * sizeof(struct zxdh_vring_packed_desc); size += sizeof(struct zxdh_vring_packed_desc_event); size = RTE_ALIGN_CEIL(size, align); @@ -226,7 +226,7 @@ vring_size(struct zxdh_hw *hw, uint32_t num, unsigned long align) } static inline void -vring_init_packed(struct zxdh_vring_packed *vr, uint8_t *p, +zxdh_vring_init_packed(struct zxdh_vring_packed *vr, uint8_t *p, unsigned long align, uint32_t num) { vr->num = num; @@ -238,7 +238,7 @@ vring_init_packed(struct zxdh_vring_packed *vr, uint8_t *p, } static inline void -vring_desc_init_packed(struct zxdh_virtqueue *vq, int32_t n) +zxdh_vring_desc_init_packed(struct zxdh_virtqueue *vq, int32_t n) { int32_t i = 0; @@ -251,7 +251,7 @@ vring_desc_init_packed(struct zxdh_virtqueue *vq, int32_t n) } static inline void -vring_desc_init_indirect_packed(struct zxdh_vring_packed_desc *dp, int32_t n) +zxdh_vring_desc_init_indirect_packed(struct zxdh_vring_packed_desc *dp, int32_t n) { int32_t i = 0; @@ -262,7 +262,7 @@ vring_desc_init_indirect_packed(struct zxdh_vring_packed_desc *dp, int32_t n) } static inline void -virtqueue_disable_intr(struct zxdh_virtqueue *vq) +zxdh_queue_disable_intr(struct zxdh_virtqueue *vq) { if (vq->vq_packed.event_flags_shadow != ZXDH_RING_EVENT_FLAGS_DISABLE) { vq->vq_packed.event_flags_shadow = ZXDH_RING_EVENT_FLAGS_DISABLE; @@ -270,7 +270,7 @@ virtqueue_disable_intr(struct zxdh_virtqueue *vq) } } -struct rte_mbuf *zxdh_virtqueue_detach_unused(struct zxdh_virtqueue *vq); +struct rte_mbuf *zxdh_queue_detach_unused(struct zxdh_virtqueue *vq); int32_t zxdh_free_queues(struct rte_eth_dev *dev); int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx); From patchwork Fri Dec 6 05:57:02 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149056 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D7E9B45E16; Fri, 6 Dec 2024 07:06:00 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6FC6240E6E; Fri, 6 Dec 2024 07:05:01 +0100 (CET) Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.40]) by mails.dpdk.org (Postfix) with ESMTP id 77C3B40E1D for ; Fri, 6 Dec 2024 07:04:54 +0100 (CET) Received: from mse-fl2.zte.com.cn (unknown [10.5.228.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxhk.zte.com.cn (FangMail) with ESMTPS id 4Y4LM43x7qz8RTZS; Fri, 6 Dec 2024 14:04:48 +0800 (CST) Received: from szxlzmapp07.zte.com.cn ([10.5.230.251]) by mse-fl2.zte.com.cn with SMTP id 4B664MRf033155; Fri, 6 Dec 2024 14:04:22 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:24 +0800 X-Zmail-TransId: 3e81675293e8001-7138d From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 02/15] net/zxdh: zxdh np uninit implementation Date: Fri, 6 Dec 2024 13:57:02 +0800 Message-ID: <20241206055715.506961-3-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl2.zte.com.cn 4B664MRf033155 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 67529400.002/4Y4LM43x7qz8RTZS X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org (np)network processor release resources in host. Signed-off-by: Junlong Wang --- drivers/net/zxdh/zxdh_ethdev.c | 48 ++++ drivers/net/zxdh/zxdh_np.c | 490 +++++++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_np.h | 107 +++++++ 3 files changed, 645 insertions(+) -- 2.27.0 diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index 740e579da8..df5b8b7d55 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -841,6 +841,51 @@ zxdh_dev_configure(struct rte_eth_dev *dev) return ret; } +static void +zxdh_np_dtb_data_res_free(struct zxdh_hw *hw) +{ + struct rte_eth_dev *dev = hw->eth_dev; + int ret = 0; + int i = 0; + + if (g_dtb_data.init_done && g_dtb_data.bind_device == dev) { + ret = zxdh_np_online_uninit(0, dev->data->name, g_dtb_data.queueid); + if (ret) + PMD_DRV_LOG(ERR, "%s dpp_np_online_uninstall failed", dev->data->name); + + if (g_dtb_data.dtb_table_conf_mz) + rte_memzone_free(g_dtb_data.dtb_table_conf_mz); + + if (g_dtb_data.dtb_table_dump_mz) { + rte_memzone_free(g_dtb_data.dtb_table_dump_mz); + g_dtb_data.dtb_table_dump_mz = NULL; + } + + for (i = 0; i < ZXDH_MAX_BASE_DTB_TABLE_COUNT; i++) { + if (g_dtb_data.dtb_table_bulk_dump_mz[i]) { + rte_memzone_free(g_dtb_data.dtb_table_bulk_dump_mz[i]); + g_dtb_data.dtb_table_bulk_dump_mz[i] = NULL; + } + } + g_dtb_data.init_done = 0; + g_dtb_data.bind_device = NULL; + } + if (zxdh_shared_data != NULL) + zxdh_shared_data->np_init_done = 0; +} + +static void +zxdh_np_uninit(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + + if (!g_dtb_data.init_done && !g_dtb_data.dev_refcnt) + return; + + if (--g_dtb_data.dev_refcnt == 0) + zxdh_np_dtb_data_res_free(hw); +} + static int zxdh_dev_close(struct rte_eth_dev *dev) { @@ -848,6 +893,7 @@ zxdh_dev_close(struct rte_eth_dev *dev) int ret = 0; zxdh_intr_release(dev); + zxdh_np_uninit(dev); zxdh_pci_reset(hw); zxdh_dev_free_mbufs(dev); @@ -1013,6 +1059,7 @@ zxdh_np_dtb_res_init(struct rte_eth_dev *dev) return 0; free_res: + zxdh_np_dtb_data_res_free(hw); rte_free(dpp_ctrl); return ret; } @@ -1179,6 +1226,7 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev) err_zxdh_init: zxdh_intr_release(eth_dev); + zxdh_np_uninit(eth_dev); zxdh_bar_msg_chan_exit(); rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c index 9c50039fb1..a603c88049 100644 --- a/drivers/net/zxdh/zxdh_np.c +++ b/drivers/net/zxdh/zxdh_np.c @@ -16,11 +16,22 @@ static ZXDH_DEV_MGR_T g_dev_mgr = {0}; static ZXDH_SDT_MGR_T g_sdt_mgr = {0}; ZXDH_PPU_CLS_BITMAP_T g_ppu_cls_bit_map[ZXDH_DEV_CHANNEL_MAX]; ZXDH_DTB_MGR_T *p_dpp_dtb_mgr[ZXDH_DEV_CHANNEL_MAX] = {NULL}; +ZXDH_RISCV_DTB_MGR *p_riscv_dtb_queue_mgr[ZXDH_DEV_CHANNEL_MAX] = {NULL}; +ZXDH_TLB_MGR_T *g_p_dpp_tlb_mgr[ZXDH_DEV_CHANNEL_MAX] = {NULL}; +ZXDH_REG_T g_dpp_reg_info[4] = {0}; #define ZXDH_COMM_ASSERT(x) assert(x) #define ZXDH_SDT_MGR_PTR_GET() (&g_sdt_mgr) #define ZXDH_SDT_SOFT_TBL_GET(id) (g_sdt_mgr.sdt_tbl_array[id]) +#define ZXDH_COMM_MASK_BIT(_bitnum_)\ + (0x1U << (_bitnum_)) + +#define ZXDH_COMM_GET_BIT_MASK(_inttype_, _bitqnt_)\ + ((_inttype_)(((_bitqnt_) < 32))) + +#define ZXDH_REG_DATA_MAX (128) + #define ZXDH_COMM_CHECK_DEV_POINT(dev_id, point)\ do {\ if (NULL == (point)) {\ @@ -345,3 +356,482 @@ zxdh_np_host_init(uint32_t dev_id, ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dev_agent_addr_set"); return 0; } + +static ZXDH_RISCV_DTB_MGR * +zxdh_np_riscv_dtb_queue_mgr_get(uint32_t dev_id) +{ + if (dev_id >= ZXDH_DEV_CHANNEL_MAX) + return NULL; + else + return p_riscv_dtb_queue_mgr[dev_id]; +} + +static uint32_t +zxdh_np_riscv_dtb_mgr_queue_info_delete(uint32_t dev_id, uint32_t queue_id) +{ + ZXDH_RISCV_DTB_MGR *p_riscv_dtb_mgr = NULL; + + p_riscv_dtb_mgr = zxdh_np_riscv_dtb_queue_mgr_get(dev_id); + if (p_riscv_dtb_mgr == NULL) + return 1; + + p_riscv_dtb_mgr->queue_alloc_count--; + p_riscv_dtb_mgr->queue_user_info[queue_id].alloc_flag = 0; + p_riscv_dtb_mgr->queue_user_info[queue_id].queue_id = 0xFF; + p_riscv_dtb_mgr->queue_user_info[queue_id].vport = 0; + memset(p_riscv_dtb_mgr->queue_user_info[queue_id].user_name, 0, ZXDH_PORT_NAME_MAX); + + return 0; +} + +static uint32_t +zxdh_np_dev_get_dev_type(uint32_t dev_id) +{ + ZXDH_DEV_MGR_T *p_dev_mgr = NULL; + ZXDH_DEV_CFG_T *p_dev_info = NULL; + + p_dev_mgr = &g_dev_mgr; + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + + if (p_dev_info == NULL) + return 0xffff; + + return p_dev_info->dev_type; +} + +static uint32_t +zxdh_np_comm_read_bits(uint8_t *p_base, uint32_t base_size_bit, + uint32_t *p_data, uint32_t start_bit, uint32_t end_bit) +{ + uint32_t len = 0; + uint32_t start_byte_index = 0; + uint32_t end_byte_index = 0; + uint32_t byte_num = 0; + uint32_t buffer_size = 0; + + if (0 != (base_size_bit % 8)) + return 1; + + if (start_bit > end_bit) + return 1; + + if (base_size_bit < end_bit) + return 1; + + len = end_bit - start_bit + 1; + buffer_size = base_size_bit / 8; + while (0 != (buffer_size & (buffer_size - 1))) + buffer_size += 1; + + *p_data = 0; + end_byte_index = (end_bit >> 3); + start_byte_index = (start_bit >> 3); + + if (start_byte_index == end_byte_index) { + *p_data = (uint32_t)(((p_base[start_byte_index] >> (7U - (end_bit & 7))) + & (0xff >> (8U - len))) & 0xff); + return 0; + } + + if (start_bit & 7) { + *p_data = (p_base[start_byte_index] & (0xff >> (start_bit & 7))) & UINT8_MAX; + start_byte_index++; + } + + for (byte_num = start_byte_index; byte_num < end_byte_index; byte_num++) { + *p_data <<= 8; + *p_data += p_base[byte_num]; + } + + *p_data <<= 1 + (end_bit & 7); + *p_data += ((p_base[byte_num & (buffer_size - 1)] & (0xff << (7 - (end_bit & 7)))) >> + (7 - (end_bit & 7))) & 0xff; + + return 0; +} + +static uint32_t +zxdh_np_comm_read_bits_ex(uint8_t *p_base, uint32_t base_size_bit, + uint32_t *p_data, uint32_t msb_start_pos, uint32_t len) +{ + uint32_t rtn = 0; + + rtn = zxdh_np_comm_read_bits(p_base, + base_size_bit, + p_data, + (base_size_bit - 1 - msb_start_pos), + (base_size_bit - 1 - msb_start_pos + len - 1)); + return rtn; +} + +static uint32_t +zxdh_np_reg_read(uint32_t dev_id, uint32_t reg_no, + uint32_t m_offset, uint32_t n_offset, void *p_data) +{ + uint32_t rc = 0; + uint32_t i = 0; + uint32_t p_buff[ZXDH_REG_DATA_MAX] = {0}; + ZXDH_REG_T *p_reg_info = NULL; + ZXDH_FIELD_T *p_field_info = NULL; + + if (reg_no < 4) { + p_reg_info = &g_dpp_reg_info[reg_no]; + p_field_info = p_reg_info->p_fields; + for (i = 0; i < p_reg_info->field_num; i++) { + rc = zxdh_np_comm_read_bits_ex((uint8_t *)p_buff, + p_reg_info->width * 8, + (uint32_t *)p_data + i, + p_field_info[i].msb_pos, + p_field_info[i].len); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_read_bits_ex"); + PMD_DRV_LOG(ERR, "dev_id %d(%d)(%d)is ok!", dev_id, m_offset, n_offset); + } + } + return 0; +} + +static uint32_t +zxdh_np_dtb_queue_vm_info_get(uint32_t dev_id, + uint32_t queue_id, + ZXDH_DTB_QUEUE_VM_INFO_T *p_vm_info) +{ + uint32_t rc = 0; + + ZXDH_DTB4K_DTB_ENQ_CFG_EPID_V_FUNC_NUM_0_127_T vm_info = {0}; + + rc = zxdh_np_reg_read(dev_id, ZXDH_DTB_CFG_EPID_V_FUNC_NUM, + 0, queue_id, &vm_info); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_reg_read"); + + p_vm_info->dbi_en = vm_info.dbi_en; + p_vm_info->queue_en = vm_info.queue_en; + p_vm_info->epid = vm_info.cfg_epid; + p_vm_info->vector = vm_info.cfg_vector; + p_vm_info->vfunc_num = vm_info.cfg_vfunc_num; + p_vm_info->func_num = vm_info.cfg_func_num; + p_vm_info->vfunc_active = vm_info.cfg_vfunc_active; + + return 0; +} + +static uint32_t +zxdh_np_comm_write_bits(uint8_t *p_base, uint32_t base_size_bit, + uint32_t data, uint32_t start_bit, uint32_t end_bit) +{ + uint32_t start_byte_index = 0; + uint32_t end_byte_index = 0; + uint8_t mask_value = 0; + uint32_t byte_num = 0; + uint32_t buffer_size = 0; + + if (0 != (base_size_bit % 8)) + return 1; + + if (start_bit > end_bit) + return 1; + + if (base_size_bit < end_bit) + return 1; + + buffer_size = base_size_bit / 8; + + while (0 != (buffer_size & (buffer_size - 1))) + buffer_size += 1; + + end_byte_index = (end_bit >> 3); + start_byte_index = (start_bit >> 3); + + if (start_byte_index == end_byte_index) { + mask_value = ((0xFE << (7 - (start_bit & 7))) & 0xff); + mask_value |= (((1 << (7 - (end_bit & 7))) - 1) & 0xff); + p_base[end_byte_index] &= mask_value; + p_base[end_byte_index] |= (((data << (7 - (end_bit & 7)))) & 0xff); + return 0; + } + + if (7 != (end_bit & 7)) { + mask_value = ((0x7f >> (end_bit & 7)) & 0xff); + p_base[end_byte_index] &= mask_value; + p_base[end_byte_index] |= ((data << (7 - (end_bit & 7))) & 0xff); + end_byte_index--; + data >>= 1 + (end_bit & 7); + } + + for (byte_num = end_byte_index; byte_num > start_byte_index; byte_num--) { + p_base[byte_num & (buffer_size - 1)] = data & 0xff; + data >>= 8; + } + + mask_value = ((0xFE << (7 - (start_bit & 7))) & 0xff); + p_base[byte_num] &= mask_value; + p_base[byte_num] |= data; + + return 0; +} + +static uint32_t +zxdh_np_comm_write_bits_ex(uint8_t *p_base, + uint32_t base_size_bit, + uint32_t data, + uint32_t msb_start_pos, + uint32_t len) +{ + uint32_t rtn = 0; + + rtn = zxdh_np_comm_write_bits(p_base, + base_size_bit, + data, + (base_size_bit - 1 - msb_start_pos), + (base_size_bit - 1 - msb_start_pos + len - 1)); + + return rtn; +} + +static uint32_t +zxdh_np_reg_write(uint32_t dev_id, uint32_t reg_no, + uint32_t m_offset, uint32_t n_offset, void *p_data) +{ + uint32_t rc = 0; + uint32_t i = 0; + uint32_t p_buff[ZXDH_REG_DATA_MAX] = {0}; + uint32_t temp_data = 0; + ZXDH_REG_T *p_reg_info = NULL; + ZXDH_FIELD_T *p_field_info = NULL; + + if (reg_no < 4) { + p_reg_info = &g_dpp_reg_info[reg_no]; + p_field_info = p_reg_info->p_fields; + + for (i = 0; i < p_reg_info->field_num; i++) { + if (p_field_info[i].len <= 32) { + temp_data = *((uint32_t *)p_data + i); + rc = zxdh_np_comm_write_bits_ex((uint8_t *)p_buff, + p_reg_info->width * 8, + temp_data, + p_field_info[i].msb_pos, + p_field_info[i].len); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_comm_write_bits_ex"); + PMD_DRV_LOG(ERR, "dev_id %d(%d)(%d)is ok!", + dev_id, m_offset, n_offset); + } + } + } + + return 0; +} + +static uint32_t +zxdh_np_dtb_queue_vm_info_set(uint32_t dev_id, + uint32_t queue_id, + ZXDH_DTB_QUEUE_VM_INFO_T *p_vm_info) +{ + uint32_t rc = 0; + ZXDH_DTB4K_DTB_ENQ_CFG_EPID_V_FUNC_NUM_0_127_T vm_info = {0}; + + vm_info.dbi_en = p_vm_info->dbi_en; + vm_info.queue_en = p_vm_info->queue_en; + vm_info.cfg_epid = p_vm_info->epid; + vm_info.cfg_vector = p_vm_info->vector; + vm_info.cfg_vfunc_num = p_vm_info->vfunc_num; + vm_info.cfg_func_num = p_vm_info->func_num; + vm_info.cfg_vfunc_active = p_vm_info->vfunc_active; + + rc = zxdh_np_reg_write(dev_id, ZXDH_DTB_CFG_EPID_V_FUNC_NUM, + 0, queue_id, &vm_info); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_reg_write"); + + return 0; +} + +static uint32_t +zxdh_np_dtb_queue_enable_set(uint32_t dev_id, + uint32_t queue_id, + uint32_t enable) +{ + uint32_t rc = 0; + ZXDH_DTB_QUEUE_VM_INFO_T vm_info = {0}; + + rc = zxdh_np_dtb_queue_vm_info_get(dev_id, queue_id, &vm_info); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_dtb_queue_vm_info_get"); + + vm_info.queue_en = enable; + rc = zxdh_np_dtb_queue_vm_info_set(dev_id, queue_id, &vm_info); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_dtb_queue_vm_info_set"); + + return rc; +} + +static uint32_t +zxdh_np_riscv_dpp_dtb_queue_id_release(uint32_t dev_id, + char pName[ZXDH_PORT_NAME_MAX], uint32_t queue_id) +{ + ZXDH_RISCV_DTB_MGR *p_riscv_dtb_mgr = NULL; + + p_riscv_dtb_mgr = zxdh_np_riscv_dtb_queue_mgr_get(dev_id); + if (p_riscv_dtb_mgr == NULL) + return 1; + + if (zxdh_np_dev_get_dev_type(dev_id) == ZXDH_DEV_TYPE_SIM) + return 0; + + if (p_riscv_dtb_mgr->queue_user_info[queue_id].alloc_flag != 1) { + PMD_DRV_LOG(ERR, "queue %d not alloc!", queue_id); + return 2; + } + + if (strcmp(p_riscv_dtb_mgr->queue_user_info[queue_id].user_name, pName) != 0) { + PMD_DRV_LOG(ERR, "queue %d name %s error!", queue_id, pName); + return 3; + } + zxdh_np_dtb_queue_enable_set(dev_id, queue_id, 0); + zxdh_np_riscv_dtb_mgr_queue_info_delete(dev_id, queue_id); + + return 0; +} + +static uint32_t +zxdh_np_dtb_queue_unused_item_num_get(uint32_t dev_id, + uint32_t queue_id, + uint32_t *p_item_num) +{ + uint32_t rc = 0; + + if (zxdh_np_dev_get_dev_type(dev_id) == ZXDH_DEV_TYPE_SIM) { + *p_item_num = 32; + return 0; + } + + rc = zxdh_np_reg_read(dev_id, ZXDH_DTB_INFO_QUEUE_BUF_SPACE, + 0, queue_id, p_item_num); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read"); + return 0; +} + +static uint32_t +zxdh_np_dtb_queue_id_free(uint32_t dev_id, + uint32_t queue_id) +{ + uint32_t rc = 0; + uint32_t item_num = 0; + ZXDH_DTB_MGR_T *p_dtb_mgr = NULL; + + p_dtb_mgr = p_dpp_dtb_mgr[dev_id]; + if (p_dtb_mgr == NULL) + return 1; + + rc = zxdh_np_dtb_queue_unused_item_num_get(dev_id, queue_id, &item_num); + + p_dtb_mgr->queue_info[queue_id].init_flag = 0; + p_dtb_mgr->queue_info[queue_id].vport = 0; + p_dtb_mgr->queue_info[queue_id].vector = 0; + + return rc; +} + +static uint32_t +zxdh_np_dtb_queue_release(uint32_t devid, + char pname[32], + uint32_t queueid) +{ + uint32_t rc = 0; + + ZXDH_COMM_CHECK_DEV_POINT(devid, pname); + + rc = zxdh_np_riscv_dpp_dtb_queue_id_release(devid, pname, queueid); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_riscv_dpp_dtb_queue_id_release"); + + rc = zxdh_np_dtb_queue_id_free(devid, queueid); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_dtb_queue_id_free"); + + return rc; +} + +static uint32_t +zxdh_np_dtb_mgr_destroy(uint32_t dev_id) +{ + if (p_dpp_dtb_mgr[dev_id] != NULL) { + free(p_dpp_dtb_mgr[dev_id]); + p_dpp_dtb_mgr[dev_id] = NULL; + } + + return 0; +} + +static uint32_t +zxdh_np_tlb_mgr_destroy(uint32_t dev_id) +{ + if (g_p_dpp_tlb_mgr[dev_id] != NULL) { + free(g_p_dpp_tlb_mgr[dev_id]); + g_p_dpp_tlb_mgr[dev_id] = NULL; + } + + return 0; +} + +static uint32_t +zxdh_np_sdt_mgr_destroy(uint32_t dev_id) +{ + ZXDH_SDT_SOFT_TABLE_T *p_sdt_tbl_temp = NULL; + ZXDH_SDT_MGR_T *p_sdt_mgr = NULL; + + p_sdt_tbl_temp = ZXDH_SDT_SOFT_TBL_GET(dev_id); + p_sdt_mgr = ZXDH_SDT_MGR_PTR_GET(); + + if (p_sdt_tbl_temp != NULL) + free(p_sdt_tbl_temp); + + ZXDH_SDT_SOFT_TBL_GET(dev_id) = NULL; + + p_sdt_mgr->channel_num--; + + return 0; +} + +static uint32_t +zxdh_np_dev_del(uint32_t dev_id) +{ + ZXDH_DEV_CFG_T *p_dev_info = NULL; + ZXDH_DEV_MGR_T *p_dev_mgr = NULL; + + p_dev_mgr = &g_dev_mgr; + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + + if (p_dev_info != NULL) { + free(p_dev_info); + p_dev_mgr->p_dev_array[dev_id] = NULL; + p_dev_mgr->device_num--; + } + + return 0; +} + +int +zxdh_np_online_uninit(uint32_t dev_id, + char *port_name, + uint32_t queue_id) +{ + uint32_t rc = 0; + + rc = zxdh_np_dtb_queue_release(dev_id, port_name, queue_id); + if (rc != 0) + PMD_DRV_LOG(ERR, "%s:dtb release error," + "port name %s queue id %d. ", __func__, port_name, queue_id); + + rc = zxdh_np_dtb_mgr_destroy(dev_id); + if (rc != 0) + PMD_DRV_LOG(ERR, "zxdh_dtb_mgr_destroy error!"); + + rc = zxdh_np_tlb_mgr_destroy(dev_id); + if (rc != 0) + PMD_DRV_LOG(ERR, "zxdh_tlb_mgr_destroy error!"); + + rc = zxdh_np_sdt_mgr_destroy(dev_id); + if (rc != 0) + PMD_DRV_LOG(ERR, "zxdh_sdt_mgr_destroy error!"); + + rc = zxdh_np_dev_del(dev_id); + if (rc != 0) + PMD_DRV_LOG(ERR, "zxdh_dev_del error!"); + + return 0; +} diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h index 573eafe796..dc0e867827 100644 --- a/drivers/net/zxdh/zxdh_np.h +++ b/drivers/net/zxdh/zxdh_np.h @@ -47,6 +47,11 @@ #define ZXDH_INIT_FLAG_TM_IMEM_FLAG (1 << 9) #define ZXDH_INIT_FLAG_AGENT_FLAG (1 << 10) +#define ZXDH_ACL_TBL_ID_MIN (0) +#define ZXDH_ACL_TBL_ID_MAX (7) +#define ZXDH_ACL_TBL_ID_NUM (8U) +#define ZXDH_ACL_BLOCK_NUM (8U) + typedef enum zxdh_module_init_e { ZXDH_MODULE_INIT_NPPU = 0, ZXDH_MODULE_INIT_PPU, @@ -67,6 +72,15 @@ typedef enum zxdh_dev_type_e { ZXDH_DEV_TYPE_INVALID, } ZXDH_DEV_TYPE_E; +typedef enum zxdh_reg_info_e { + ZXDH_DTB_CFG_QUEUE_DTB_HADDR = 0, + ZXDH_DTB_CFG_QUEUE_DTB_LADDR = 1, + ZXDH_DTB_CFG_QUEUE_DTB_LEN = 2, + ZXDH_DTB_INFO_QUEUE_BUF_SPACE = 3, + ZXDH_DTB_CFG_EPID_V_FUNC_NUM = 4, + ZXDH_REG_ENUM_MAX_VALUE +} ZXDH_REG_INFO_E; + typedef enum zxdh_dev_access_type_e { ZXDH_DEV_ACCESS_TYPE_PCIE = 0, ZXDH_DEV_ACCESS_TYPE_RISCV = 1, @@ -79,6 +93,26 @@ typedef enum zxdh_dev_agent_flag_e { ZXDH_DEV_AGENT_INVALID, } ZXDH_DEV_AGENT_FLAG_E; +typedef enum zxdh_acl_pri_mode_e { + ZXDH_ACL_PRI_EXPLICIT = 1, + ZXDH_ACL_PRI_IMPLICIT, + ZXDH_ACL_PRI_SPECIFY, + ZXDH_ACL_PRI_INVALID, +} ZXDH_ACL_PRI_MODE_E; + +typedef struct zxdh_d_node { + void *data; + struct zxdh_d_node *prev; + struct zxdh_d_node *next; +} ZXDH_D_NODE; + +typedef struct zxdh_d_head { + uint32_t used; + uint32_t maxnum; + ZXDH_D_NODE *p_next; + ZXDH_D_NODE *p_prev; +} ZXDH_D_HEAD; + typedef struct zxdh_dtb_tab_up_user_addr_t { uint32_t user_flag; uint64_t phy_addr; @@ -193,6 +227,79 @@ typedef struct zxdh_sdt_mgr_t { ZXDH_SDT_SOFT_TABLE_T *sdt_tbl_array[ZXDH_DEV_CHANNEL_MAX]; } ZXDH_SDT_MGR_T; +typedef struct zxdh_riscv_dtb_queue_USER_info_t { + uint32_t alloc_flag; + uint32_t queue_id; + uint32_t vport; + char user_name[ZXDH_PORT_NAME_MAX]; +} ZXDH_RISCV_DTB_QUEUE_USER_INFO_T; + +typedef struct zxdh_riscv_dtb_mgr { + uint32_t queue_alloc_count; + uint32_t queue_index; + ZXDH_RISCV_DTB_QUEUE_USER_INFO_T queue_user_info[ZXDH_DTB_QUEUE_NUM_MAX]; +} ZXDH_RISCV_DTB_MGR; + +typedef struct zxdh_dtb_queue_vm_info_t { + uint32_t dbi_en; + uint32_t queue_en; + uint32_t epid; + uint32_t vfunc_num; + uint32_t vector; + uint32_t func_num; + uint32_t vfunc_active; +} ZXDH_DTB_QUEUE_VM_INFO_T; + +typedef struct zxdh_dtb4k_dtb_enq_cfg_epid_v_func_num_0_127_t { + uint32_t dbi_en; + uint32_t queue_en; + uint32_t cfg_epid; + uint32_t cfg_vfunc_num; + uint32_t cfg_vector; + uint32_t cfg_func_num; + uint32_t cfg_vfunc_active; +} ZXDH_DTB4K_DTB_ENQ_CFG_EPID_V_FUNC_NUM_0_127_T; + + +typedef uint32_t (*ZXDH_REG_WRITE)(uint32_t dev_id, uint32_t addr, uint32_t *p_data); +typedef uint32_t (*ZXDH_REG_READ)(uint32_t dev_id, uint32_t addr, uint32_t *p_data); + +typedef struct zxdh_field_t { + const char *p_name; + uint32_t flags; + uint16_t msb_pos; + + uint16_t len; + uint32_t default_value; + uint32_t default_step; +} ZXDH_FIELD_T; + +typedef struct zxdh_reg_t { + const char *reg_name; + uint32_t reg_no; + uint32_t module_no; + uint32_t flags; + uint32_t array_type; + uint32_t addr; + uint32_t width; + uint32_t m_size; + uint32_t n_size; + uint32_t m_step; + uint32_t n_step; + uint32_t field_num; + ZXDH_FIELD_T *p_fields; + + ZXDH_REG_WRITE p_write_fun; + ZXDH_REG_READ p_read_fun; +} ZXDH_REG_T; + +typedef struct zxdh_tlb_mgr_t { + uint32_t entry_num; + uint32_t va_width; + uint32_t pa_width; +} ZXDH_TLB_MGR_T; + int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl); +int zxdh_np_online_uninit(uint32_t dev_id, char *port_name, uint32_t queue_id); #endif /* ZXDH_NP_H */ From patchwork Fri Dec 6 05:57:03 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149057 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A775D45E16; Fri, 6 Dec 2024 07:06:10 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D740E410DD; Fri, 6 Dec 2024 07:05:02 +0100 (CET) Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.40]) by mails.dpdk.org (Postfix) with ESMTP id 7743040E50 for ; Fri, 6 Dec 2024 07:04:55 +0100 (CET) Received: from mse-fl2.zte.com.cn (unknown [10.5.228.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxhk.zte.com.cn (FangMail) with ESMTPS id 4Y4LM54YtHz8RTZH; Fri, 6 Dec 2024 14:04:49 +0800 (CST) Received: from szxlzmapp03.zte.com.cn ([10.5.231.207]) by mse-fl2.zte.com.cn with SMTP id 4B664N0i033166; Fri, 6 Dec 2024 14:04:23 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:25 +0800 X-Zmail-TransId: 3e81675293e9001-71392 From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 03/15] net/zxdh: port tables init implementations Date: Fri, 6 Dec 2024 13:57:03 +0800 Message-ID: <20241206055715.506961-4-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl2.zte.com.cn 4B664N0i033166 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 67529401.002/4Y4LM54YtHz8RTZH X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org insert port tables in host. Signed-off-by: Junlong Wang --- drivers/net/zxdh/meson.build | 1 + drivers/net/zxdh/zxdh_ethdev.c | 24 ++ drivers/net/zxdh/zxdh_msg.c | 63 ++++ drivers/net/zxdh/zxdh_msg.h | 72 ++++ drivers/net/zxdh/zxdh_np.c | 666 ++++++++++++++++++++++++++++++++- drivers/net/zxdh/zxdh_np.h | 212 ++++++++++- drivers/net/zxdh/zxdh_pci.h | 2 + drivers/net/zxdh/zxdh_tables.c | 104 +++++ drivers/net/zxdh/zxdh_tables.h | 148 ++++++++ 9 files changed, 1289 insertions(+), 3 deletions(-) create mode 100644 drivers/net/zxdh/zxdh_tables.c create mode 100644 drivers/net/zxdh/zxdh_tables.h -- 2.27.0 diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build index ab24a3145c..5b3af87c5b 100644 --- a/drivers/net/zxdh/meson.build +++ b/drivers/net/zxdh/meson.build @@ -20,4 +20,5 @@ sources = files( 'zxdh_pci.c', 'zxdh_queue.c', 'zxdh_np.c', + 'zxdh_tables.c', ) diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index df5b8b7d55..9f3a5bcf9c 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -14,6 +14,7 @@ #include "zxdh_common.h" #include "zxdh_queue.h" #include "zxdh_np.h" +#include "zxdh_tables.h" struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS]; struct zxdh_shared_data *zxdh_shared_data; @@ -1146,6 +1147,25 @@ zxdh_np_init(struct rte_eth_dev *eth_dev) return 0; } +static int +zxdh_tables_init(struct rte_eth_dev *dev) +{ + int ret = 0; + + ret = zxdh_port_attr_init(dev); + if (ret) { + PMD_DRV_LOG(ERR, "zxdh_port_attr_init failed"); + return ret; + } + + ret = zxdh_panel_table_init(dev); + if (ret) { + PMD_DRV_LOG(ERR, " panel table init failed"); + return ret; + } + return ret; +} + static int zxdh_eth_dev_init(struct rte_eth_dev *eth_dev) { @@ -1222,6 +1242,10 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev) if (ret != 0) goto err_zxdh_init; + ret = zxdh_tables_init(eth_dev); + if (ret != 0) + goto err_zxdh_init; + return ret; err_zxdh_init: diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c index a0a005b178..1aed979de3 100644 --- a/drivers/net/zxdh/zxdh_msg.c +++ b/drivers/net/zxdh/zxdh_msg.c @@ -14,6 +14,7 @@ #include "zxdh_ethdev.h" #include "zxdh_logs.h" #include "zxdh_msg.h" +#include "zxdh_pci.h" #define ZXDH_REPS_INFO_FLAG_USABLE 0x00 #define ZXDH_BAR_SEQID_NUM_MAX 256 @@ -100,6 +101,7 @@ #define ZXDH_BAR_CHAN_MSG_EMEC 1 #define ZXDH_BAR_CHAN_MSG_NO_ACK 0 #define ZXDH_BAR_CHAN_MSG_ACK 1 +#define ZXDH_MSG_REPS_OK 0xff uint8_t subchan_id_tbl[ZXDH_BAR_MSG_SRC_NUM][ZXDH_BAR_MSG_DST_NUM] = { {ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_SEND}, @@ -1080,3 +1082,64 @@ int zxdh_get_bar_offset(struct zxdh_bar_offset_params *paras, res->bar_length = recv_msg.offset_reps.length; return ZXDH_BAR_MSG_OK; } + +int zxdh_vf_send_msg_to_pf(struct rte_eth_dev *dev, void *msg_req, + uint16_t msg_req_len, void *reply, uint16_t reply_len) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_pci_bar_msg in = {0}; + struct zxdh_msg_recviver_mem result = {0}; + struct zxdh_msg_reply_info reply_info = {0}; + int ret = 0; + + if (reply) { + RTE_ASSERT(reply_len < sizeof(zxdh_msg_reply_info)); + result.recv_buffer = reply; + result.buffer_len = reply_len; + } else { + result.recv_buffer = &reply_info; + result.buffer_len = sizeof(reply_info); + } + + struct zxdh_msg_reply_head *reply_head = + &(((struct zxdh_msg_reply_info *)result.recv_buffer)->reply_head); + struct zxdh_msg_reply_body *reply_body = + &(((struct zxdh_msg_reply_info *)result.recv_buffer)->reply_body); + + in.virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_MSG_CHAN_PFVFSHARE_OFFSET); + in.payload_addr = msg_req; + in.payload_len = msg_req_len; + in.src = ZXDH_MSG_CHAN_END_VF; + in.dst = ZXDH_MSG_CHAN_END_PF; + in.module_id = ZXDH_MODULE_BAR_MSG_TO_PF; + in.src_pcieid = hw->pcie_id; + in.dst_pcieid = ZXDH_PF_PCIE_ID(hw->pcie_id); + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret != ZXDH_BAR_MSG_OK) { + PMD_MSG_LOG(ERR, + "vf[%d] send bar msg to pf failed.ret %d", hw->vport.vfid, ret); + return -EAGAIN; + } + if (reply_head->flag != ZXDH_MSG_REPS_OK) { + PMD_MSG_LOG(ERR, "vf[%d] get pf reply failed: reply_head flag : 0x%x(0xff is OK).replylen %d", + hw->vport.vfid, reply_head->flag, reply_head->reps_len); + return -EAGAIN; + } + if (reply_body->flag != ZXDH_REPS_SUCC) { + PMD_MSG_LOG(ERR, "vf[%d] msg processing failed", hw->vfid); + return -EAGAIN; + } + return 0; +} + +void zxdh_msg_head_build(struct zxdh_hw *hw, enum zxdh_msg_type type, + struct zxdh_msg_info *msg_info) +{ + struct zxdh_msg_head *msghead = &msg_info->msg_head; + + msghead->msg_type = type; + msghead->vport = hw->vport.vport; + msghead->vf_id = hw->vport.vfid; + msghead->pcieid = hw->pcie_id; +} diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h index fbc79e8f9d..35ed5d1a1c 100644 --- a/drivers/net/zxdh/zxdh_msg.h +++ b/drivers/net/zxdh/zxdh_msg.h @@ -33,6 +33,19 @@ #define ZXDH_BAR_MSG_PAYLOAD_MAX_LEN \ (ZXDH_BAR_MSG_ADDR_CHAN_INTERVAL - sizeof(struct zxdh_bar_msg_header)) +#define ZXDH_MSG_ADDR_CHAN_INTERVAL (2 * 1024) /* channel size */ +#define ZXDH_MSG_PAYLOAD_MAX_LEN \ + (ZXDH_MSG_ADDR_CHAN_INTERVAL - sizeof(struct zxdh_bar_msg_header)) + +#define ZXDH_MSG_REPLYBODY_HEAD sizeof(enum zxdh_reps_flag) +#define ZXDH_MSG_HEADER_SIZE 4 +#define ZXDH_MSG_REPLY_BODY_MAX_LEN \ + (ZXDH_MSG_PAYLOAD_MAX_LEN - sizeof(struct zxdh_msg_reply_head)) + +#define ZXDH_MSG_HEAD_LEN 8 +#define ZXDH_MSG_REQ_BODY_MAX_LEN \ + (ZXDH_MSG_PAYLOAD_MAX_LEN - ZXDH_MSG_HEAD_LEN) + enum ZXDH_DRIVER_TYPE { ZXDH_MSG_CHAN_END_MPF = 0, ZXDH_MSG_CHAN_END_PF, @@ -151,6 +164,13 @@ enum pciebar_layout_type { ZXDH_URI_MAX, }; +enum zxdh_msg_type { + ZXDH_NULL = 0, + ZXDH_VF_PORT_INIT = 1, + + ZXDH_MSG_TYPE_END, +} __rte_packed; + struct zxdh_msix_para { uint16_t pcie_id; uint16_t vector_risc; @@ -240,6 +260,54 @@ struct zxdh_offset_get_msg { uint16_t type; }; +struct zxdh_msg_reply_head { + uint8_t flag; + uint16_t reps_len; + uint8_t resvd; +} __rte_packed; + +enum zxdh_reps_flag { + ZXDH_REPS_FAIL, + ZXDH_REPS_SUCC = 0xaa, +} __rte_packed; + +struct zxdh_msg_reply_body { + enum zxdh_reps_flag flag; + union { + uint8_t reply_data[ZXDH_MSG_REPLY_BODY_MAX_LEN - sizeof(enum zxdh_reps_flag)]; + } __rte_packed; +} __rte_packed; + +struct zxdh_msg_reply_info { + struct zxdh_msg_reply_head reply_head; + struct zxdh_msg_reply_body reply_body; +} __rte_packed; + +struct zxdh_vf_init_msg { + uint8_t link_up; + uint8_t rsv; + uint16_t base_qid; + uint8_t rss_enable; +} __rte_packed; + +struct zxdh_msg_head { + enum zxdh_msg_type msg_type; + uint16_t vport; + uint16_t vf_id; + uint16_t pcieid; +} __rte_packed; + +struct zxdh_msg_info { + union { + uint8_t head_len[ZXDH_MSG_HEAD_LEN]; + struct zxdh_msg_head msg_head; + }; + union { + uint8_t datainfo[ZXDH_MSG_REQ_BODY_MAX_LEN]; + struct zxdh_vf_init_msg vf_init_msg; + } __rte_packed data; +} __rte_packed; + typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len, void *reps_buffer, uint16_t *reps_len, void *dev); @@ -253,5 +321,9 @@ int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, struct zxdh_msg_recviver_mem *result); int zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev); +void zxdh_msg_head_build(struct zxdh_hw *hw, enum zxdh_msg_type type, + struct zxdh_msg_info *msg_info); +int zxdh_vf_send_msg_to_pf(struct rte_eth_dev *dev, void *msg_req, + uint16_t msg_req_len, void *reply, uint16_t reply_len); #endif /* ZXDH_MSG_H */ diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c index a603c88049..6b8168da6f 100644 --- a/drivers/net/zxdh/zxdh_np.c +++ b/drivers/net/zxdh/zxdh_np.c @@ -7,18 +7,23 @@ #include #include +#include +#include #include "zxdh_np.h" #include "zxdh_logs.h" static uint64_t g_np_bar_offset; -static ZXDH_DEV_MGR_T g_dev_mgr = {0}; -static ZXDH_SDT_MGR_T g_sdt_mgr = {0}; +static ZXDH_DEV_MGR_T g_dev_mgr; +static ZXDH_SDT_MGR_T g_sdt_mgr; +static uint32_t g_dpp_dtb_int_enable; +static uint32_t g_table_type[ZXDH_DEV_CHANNEL_MAX][ZXDH_DEV_SDT_ID_MAX]; ZXDH_PPU_CLS_BITMAP_T g_ppu_cls_bit_map[ZXDH_DEV_CHANNEL_MAX]; ZXDH_DTB_MGR_T *p_dpp_dtb_mgr[ZXDH_DEV_CHANNEL_MAX] = {NULL}; ZXDH_RISCV_DTB_MGR *p_riscv_dtb_queue_mgr[ZXDH_DEV_CHANNEL_MAX] = {NULL}; ZXDH_TLB_MGR_T *g_p_dpp_tlb_mgr[ZXDH_DEV_CHANNEL_MAX] = {NULL}; ZXDH_REG_T g_dpp_reg_info[4] = {0}; +ZXDH_DTB_TABLE_T g_dpp_dtb_table_info[4] = {0}; #define ZXDH_COMM_ASSERT(x) assert(x) #define ZXDH_SDT_MGR_PTR_GET() (&g_sdt_mgr) @@ -75,6 +80,98 @@ do {\ } \ } while (0) +#define ZXDH_COMM_CHECK_POINT(point)\ +do {\ + if ((point) == NULL) {\ + PMD_DRV_LOG(ERR, "ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!",\ + __FILE__, __LINE__, __func__);\ + ZXDH_COMM_ASSERT(0);\ + } \ +} while (0) + + +#define ZXDH_COMM_CHECK_POINT_MEMORY_FREE(point, ptr)\ +do {\ + if ((point) == NULL) {\ + PMD_DRV_LOG(ERR, "ZXIC %s:%d[Error:POINT NULL] !"\ + "FUNCTION : %s!", __FILE__, __LINE__, __func__);\ + rte_free(ptr);\ + ZXDH_COMM_ASSERT(0);\ + } \ +} while (0) + +#define ZXDH_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, becall, ptr)\ +do {\ + if ((rc) != 0) {\ + PMD_DRV_LOG(ERR, "ZXICP %s:%d, %s Call"\ + " %s Fail!", __FILE__, __LINE__, __func__, becall);\ + rte_free(ptr);\ + } \ +} while (0) + +#define ZXDH_COMM_CONVERT16(w_data) \ + (((w_data) & 0xff) << 8) + +#define ZXDH_DTB_TAB_UP_VIR_ADDR_GET(DEV_ID, QUEUE_ID, INDEX) \ + ((INDEX) * p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.item_size) + +#define ZXDH_DTB_TAB_DOWN_VIR_ADDR_GET(DEV_ID, QUEUE_ID, INDEX) \ + ((INDEX) * p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_down.item_size) + +#define ZXDH_DTB_TAB_DOWN_WR_INDEX_GET(DEV_ID, QUEUE_ID) \ + (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_down.wr_index) + +#define ZXDH_DTB_TAB_DOWN_PHY_ADDR_GET(DEV_ID, QUEUE_ID, INDEX) \ + (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_down.start_phy_addr) + +#define ZXDH_DTB_QUEUE_INIT_FLAG_GET(DEV_ID, QUEUE_ID) \ + (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].init_flag) + +static uint32_t +zxdh_np_comm_is_big_endian(void) +{ + ZXDH_ENDIAN_U c_data; + + c_data.a = 1; + + if (c_data.b == 1) + return 0; + else + return 1; +} + +static void +zxdh_np_comm_swap(uint8_t *p_uc_data, uint32_t dw_byte_len) +{ + uint32_t dw_byte_num = 0; + uint8_t uc_byte_mode = 0; + uint32_t uc_is_big_flag = 0; + uint32_t i = 0; + uint16_t *p_w_tmp = NULL; + uint32_t *p_dw_tmp = NULL; + + + p_dw_tmp = (uint32_t *)(p_uc_data); + + uc_is_big_flag = zxdh_np_comm_is_big_endian(); + + if (uc_is_big_flag) + return; + + dw_byte_num = dw_byte_len >> 2; + uc_byte_mode = dw_byte_len % 4 & 0xff; + + for (i = 0; i < dw_byte_num; i++) { + (*p_dw_tmp) = ZXDH_COMM_CONVERT16(*p_dw_tmp); + p_dw_tmp++; + } + + if (uc_byte_mode > 1) { + p_w_tmp = (uint16_t *)(p_dw_tmp); + (*p_w_tmp) = ZXDH_COMM_CONVERT16(*p_w_tmp); + } +} + static uint32_t zxdh_np_dev_init(void) { @@ -835,3 +932,568 @@ zxdh_np_online_uninit(uint32_t dev_id, return 0; } + +static uint32_t +zxdh_np_sdt_tbl_type_get(uint32_t dev_id, uint32_t sdt_no) +{ + return g_table_type[dev_id][sdt_no]; +} + + +static ZXDH_DTB_TABLE_T * +zxdh_np_table_info_get(uint32_t table_type) +{ + return &g_dpp_dtb_table_info[table_type]; +} + +static uint32_t +zxdh_np_dtb_write_table_cmd(uint32_t dev_id, + ZXDH_DTB_TABLE_INFO_E table_type, + void *p_cmd_data, + void *p_cmd_buff) +{ + uint32_t rc = 0; + uint32_t field_cnt = 0; + ZXDH_DTB_TABLE_T *p_table_info; + ZXDH_DTB_FIELD_T *p_field_info = NULL; + uint32_t temp_data = 0; + + ZXDH_COMM_CHECK_POINT(p_cmd_data); + ZXDH_COMM_CHECK_POINT(p_cmd_buff); + p_table_info = zxdh_np_table_info_get(table_type); + p_field_info = p_table_info->p_fields; + ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_table_info); + + for (field_cnt = 0; field_cnt < p_table_info->field_num; field_cnt++) { + temp_data = *((uint32_t *)p_cmd_data + field_cnt) & ZXDH_COMM_GET_BIT_MASK(uint32_t, + p_field_info[field_cnt].len); + + rc = zxdh_np_comm_write_bits_ex((uint8_t *)p_cmd_buff, + ZXDH_DTB_TABLE_CMD_SIZE_BIT, + temp_data, + p_field_info[field_cnt].lsb_pos, + p_field_info[field_cnt].len); + + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_write_bits"); + } + + return 0; +} + +static uint32_t +zxdh_np_dtb_smmu0_write_entry_data(uint32_t dev_id, + uint32_t mode, + uint32_t addr, + uint32_t *p_data, + ZXDH_DTB_ENTRY_T *p_entry) +{ + uint32_t rc = 0; + ZXDH_DTB_ERAM_TABLE_FORM_T dtb_eram_form_info = {0}; + + dtb_eram_form_info.valid = ZXDH_DTB_TABLE_VALID; + dtb_eram_form_info.type_mode = ZXDH_DTB_TABLE_MODE_ERAM; + dtb_eram_form_info.data_mode = mode; + dtb_eram_form_info.cpu_wr = 1; + dtb_eram_form_info.addr = addr; + dtb_eram_form_info.cpu_rd = 0; + dtb_eram_form_info.cpu_rd_mode = 0; + + if (ZXDH_ERAM128_OPR_128b == mode) { + p_entry->data_in_cmd_flag = 0; + p_entry->data_size = 128 / 8; + + rc = zxdh_np_dtb_write_table_cmd(dev_id, ZXDH_DTB_TABLE_ERAM_128, + &dtb_eram_form_info, p_entry->cmd); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_table_cmd"); + + memcpy(p_entry->data, p_data, 128 / 8); + } else if (ZXDH_ERAM128_OPR_64b == mode) { + p_entry->data_in_cmd_flag = 1; + p_entry->data_size = 64 / 8; + dtb_eram_form_info.data_l = *(p_data + 1); + dtb_eram_form_info.data_h = *(p_data); + + rc = zxdh_np_dtb_write_table_cmd(dev_id, ZXDH_DTB_TABLE_ERAM_64, + &dtb_eram_form_info, p_entry->cmd); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_table_cmd"); + + } else if (ZXDH_ERAM128_OPR_1b == mode) { + p_entry->data_in_cmd_flag = 1; + p_entry->data_size = 1; + dtb_eram_form_info.data_h = *(p_data); + + rc = zxdh_np_dtb_write_table_cmd(dev_id, ZXDH_DTB_TABLE_ERAM_1, + &dtb_eram_form_info, p_entry->cmd); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_table_cmd"); + } + + return 0; +} + +static uint32_t +zxdh_np_dtb_se_smmu0_ind_write(uint32_t dev_id, + uint32_t base_addr, + uint32_t index, + uint32_t wrt_mode, + uint32_t *p_data, + ZXDH_DTB_ENTRY_T *p_entry) +{ + uint32_t rc = 0; + uint32_t temp_idx = 0; + uint32_t dtb_ind_addr = 0; + + switch (wrt_mode) { + case ZXDH_ERAM128_OPR_128b: + { + if ((0xFFFFFFFF - (base_addr)) < (index)) { + PMD_DRV_LOG(ERR, "ICM %s:%d[Error:VALUE[val0=0x%x]" + "INVALID] [val1=0x%x] ! FUNCTION :%s !", __FILE__, __LINE__, + base_addr, index, __func__); + + return ZXDH_PAR_CHK_INVALID_INDEX; + } + if (base_addr + index > ZXDH_SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + PMD_DRV_LOG(ERR, "dpp_se_smmu0_ind_write : index out of range !"); + return 1; + } + temp_idx = index << 7; + break; + } + + case ZXDH_ERAM128_OPR_64b: + { + if ((base_addr + (index >> 1)) > ZXDH_SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + PMD_DRV_LOG(ERR, "dpp_se_smmu0_ind_write : index out of range !"); + return 1; + } + temp_idx = index << 6; + break; + } + + case ZXDH_ERAM128_OPR_1b: + { + if ((base_addr + (index >> 7)) > ZXDH_SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + PMD_DRV_LOG(ERR, "dpp_se_smmu0_ind_write : index out of range !"); + return 1; + } + + temp_idx = index; + } + } + + dtb_ind_addr = ((base_addr << 7) & ZXDH_ERAM128_BADDR_MASK) + temp_idx; + + PMD_DRV_LOG(INFO, " dtb eram item 1bit addr: 0x%x", dtb_ind_addr); + + rc = zxdh_np_dtb_smmu0_write_entry_data(dev_id, + wrt_mode, + dtb_ind_addr, + p_data, + p_entry); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu0_write_entry_data"); + + return 0; +} + +static uint32_t +zxdh_np_eram_dtb_len_get(uint32_t mode) +{ + uint32_t dtb_len = 0; + + switch (mode) { + case ZXDH_ERAM128_OPR_128b: + { + dtb_len += 2; + break; + } + case ZXDH_ERAM128_OPR_64b: + case ZXDH_ERAM128_OPR_1b: + { + dtb_len += 1; + break; + } + default: + break; + } + + return dtb_len; +} + +static uint32_t +zxdh_np_dtb_eram_one_entry(uint32_t dev_id, + uint32_t sdt_no, + uint32_t del_en, + void *pdata, + uint32_t *p_dtb_len, + ZXDH_DTB_ENTRY_T *p_dtb_one_entry) +{ + uint32_t rc = 0; + uint32_t base_addr = 0; + uint32_t index = 0; + uint32_t opr_mode = 0; + uint32_t buff[ZXDH_SMMU0_READ_REG_MAX_NUM] = {0}; + + ZXDH_SDTTBL_ERAM_T sdt_eram = {0}; + ZXDH_DTB_ERAM_ENTRY_INFO_T *peramdata = NULL; + + ZXDH_COMM_CHECK_POINT(pdata); + ZXDH_COMM_CHECK_POINT(p_dtb_one_entry); + ZXDH_COMM_CHECK_POINT(p_dtb_len); + + peramdata = (ZXDH_DTB_ERAM_ENTRY_INFO_T *)pdata; + index = peramdata->index; + base_addr = sdt_eram.eram_base_addr; + opr_mode = sdt_eram.eram_mode; + + switch (opr_mode) { + case ZXDH_ERAM128_TBL_128b: + { + opr_mode = ZXDH_ERAM128_OPR_128b; + break; + } + case ZXDH_ERAM128_TBL_64b: + { + opr_mode = ZXDH_ERAM128_OPR_64b; + break; + } + + case ZXDH_ERAM128_TBL_1b: + { + opr_mode = ZXDH_ERAM128_OPR_1b; + break; + } + } + + if (del_en) { + memset((uint8_t *)buff, 0, sizeof(buff)); + rc = zxdh_np_dtb_se_smmu0_ind_write(dev_id, + base_addr, + index, + opr_mode, + buff, + p_dtb_one_entry); + ZXDH_COMM_CHECK_DEV_RC(sdt_no, rc, "zxdh_dtb_se_smmu0_ind_write"); + } else { + rc = zxdh_np_dtb_se_smmu0_ind_write(dev_id, + base_addr, + index, + opr_mode, + peramdata->p_data, + p_dtb_one_entry); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_dtb_se_smmu0_ind_write"); + } + *p_dtb_len = zxdh_np_eram_dtb_len_get(opr_mode); + + return 0; +} + +static uint32_t +zxdh_np_dtb_data_write(uint8_t *p_data_buff, + uint32_t addr_offset, + ZXDH_DTB_ENTRY_T *entry) +{ + ZXDH_COMM_CHECK_POINT(p_data_buff); + ZXDH_COMM_CHECK_POINT(entry); + + uint8_t *p_cmd = p_data_buff + addr_offset; + uint32_t cmd_size = ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8; + + uint8_t *p_data = p_cmd + cmd_size; + uint32_t data_size = entry->data_size; + + uint8_t *cmd = (uint8_t *)entry->cmd; + uint8_t *data = (uint8_t *)entry->data; + + rte_memcpy(p_cmd, cmd, cmd_size); + + if (!entry->data_in_cmd_flag) { + zxdh_np_comm_swap(data, data_size); + rte_memcpy(p_data, data, data_size); + } + + return 0; +} + +static uint32_t +zxdh_np_dtb_queue_enable_get(uint32_t dev_id, + uint32_t queue_id, + uint32_t *enable) +{ + uint32_t rc = 0; + ZXDH_DTB_QUEUE_VM_INFO_T vm_info = {0}; + + rc = zxdh_np_dtb_queue_vm_info_get(dev_id, queue_id, &vm_info); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_dtb_queue_vm_info_get"); + + *enable = vm_info.queue_en; + return rc; +} + +static uint32_t +zxdh_np_dtb_item_buff_wr(uint32_t dev_id, + uint32_t queue_id, + uint32_t dir_flag, + uint32_t index, + uint32_t pos, + uint32_t len, + uint32_t *p_data) +{ + uint64_t addr = 0; + + if (dir_flag == 1) + addr = ZXDH_DTB_TAB_UP_VIR_ADDR_GET(dev_id, queue_id, index) + + ZXDH_DTB_ITEM_ACK_SIZE + pos * 4; + else + addr = ZXDH_DTB_TAB_DOWN_VIR_ADDR_GET(dev_id, queue_id, index) + + ZXDH_DTB_ITEM_ACK_SIZE + pos * 4; + + memcpy((uint8_t *)(addr), p_data, len * 4); + + return 0; +} + +static uint32_t +zxdh_np_dtb_item_ack_rd(uint32_t dev_id, + uint32_t queue_id, + uint32_t dir_flag, + uint32_t index, + uint32_t pos, + uint32_t *p_data) +{ + uint64_t addr = 0; + uint32_t val = 0; + + if (dir_flag == 1) + addr = ZXDH_DTB_TAB_UP_VIR_ADDR_GET(dev_id, queue_id, index) + pos * 4; + else + addr = ZXDH_DTB_TAB_DOWN_VIR_ADDR_GET(dev_id, queue_id, index) + pos * 4; + + val = *((volatile uint32_t *)(addr)); + + *p_data = val; + + return 0; +} + +static uint32_t +zxdh_np_dtb_item_ack_wr(uint32_t dev_id, + uint32_t queue_id, + uint32_t dir_flag, + uint32_t index, + uint32_t pos, + uint32_t data) +{ + uint64_t addr = 0; + + if (dir_flag == 1) + addr = ZXDH_DTB_TAB_UP_VIR_ADDR_GET(dev_id, queue_id, index) + pos * 4; + else + addr = ZXDH_DTB_TAB_DOWN_VIR_ADDR_GET(dev_id, queue_id, index) + pos * 4; + + *((volatile uint32_t *)(addr)) = data; + + return 0; +} + +static uint32_t +zxdh_np_dtb_queue_item_info_set(uint32_t dev_id, + uint32_t queue_id, + ZXDH_DTB_QUEUE_ITEM_INFO_T *p_item_info) +{ + uint32_t rc = 0; + ZXDH_DTB_QUEUE_LEN_T dtb_len = {0}; + + dtb_len.cfg_dtb_cmd_type = p_item_info->cmd_type; + dtb_len.cfg_dtb_cmd_int_en = p_item_info->int_en; + dtb_len.cfg_queue_dtb_len = p_item_info->data_len; + + rc = zxdh_np_reg_write(dev_id, ZXDH_DTB_CFG_QUEUE_DTB_LEN, + 0, queue_id, (void *)&dtb_len); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_write"); + return 0; +} + +static uint32_t +zxdh_np_dtb_tab_down_info_set(uint32_t dev_id, + uint32_t queue_id, + uint32_t int_flag, + uint32_t data_len, + uint32_t *p_data, + uint32_t *p_item_index) +{ + uint32_t rc = 0; + uint32_t i = 0; + uint32_t queue_en = 0; + uint32_t ack_vale = 0; + uint32_t item_index = 0; + uint32_t unused_item_num = 0; + ZXDH_DTB_QUEUE_ITEM_INFO_T item_info = {0}; + + if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) { + PMD_DRV_LOG(ERR, "dtb queue %d is not init.", queue_id); + return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT; + } + + if (data_len % 4 != 0) + return ZXDH_RC_DTB_PARA_INVALID; + + rc = zxdh_np_dtb_queue_enable_get(dev_id, queue_id, &queue_en); + if (!queue_en) { + PMD_DRV_LOG(ERR, "the queue %d is not enable!,rc=%d", queue_id, rc); + return ZXDH_RC_DTB_QUEUE_NOT_ENABLE; + } + + rc = zxdh_np_dtb_queue_unused_item_num_get(dev_id, queue_id, &unused_item_num); + if (unused_item_num == 0) + return ZXDH_RC_DTB_QUEUE_ITEM_HW_EMPTY; + + for (i = 0; i < ZXDH_DTB_QUEUE_ITEM_NUM_MAX; i++) { + item_index = ZXDH_DTB_TAB_DOWN_WR_INDEX_GET(dev_id, queue_id) % + ZXDH_DTB_QUEUE_ITEM_NUM_MAX; + + rc = zxdh_np_dtb_item_ack_rd(dev_id, queue_id, 0, + item_index, 0, &ack_vale); + + ZXDH_DTB_TAB_DOWN_WR_INDEX_GET(dev_id, queue_id)++; + + if ((ack_vale >> 8) == ZXDH_DTB_TAB_ACK_UNUSED_MASK) + break; + } + + if (i == ZXDH_DTB_QUEUE_ITEM_NUM_MAX) + return ZXDH_RC_DTB_QUEUE_ITEM_SW_EMPTY; + + rc = zxdh_np_dtb_item_buff_wr(dev_id, queue_id, 0, + item_index, 0, data_len, p_data); + + rc = zxdh_np_dtb_item_ack_wr(dev_id, queue_id, 0, + item_index, 0, ZXDH_DTB_TAB_ACK_IS_USING_MASK); + + item_info.cmd_vld = 1; + item_info.cmd_type = 0; + item_info.int_en = int_flag; + item_info.data_len = data_len / 4; + item_info.data_hddr = ((ZXDH_DTB_TAB_DOWN_PHY_ADDR_GET(dev_id, queue_id, + item_index) >> 4) >> 32) & 0xffffffff; + item_info.data_laddr = (ZXDH_DTB_TAB_DOWN_PHY_ADDR_GET(dev_id, queue_id, + item_index) >> 4) & 0xffffffff; + + rc = zxdh_np_dtb_queue_item_info_set(dev_id, queue_id, &item_info); + *p_item_index = item_index; + + return 0; +} + +static uint32_t +zxdh_np_dtb_write_down_table_data(uint32_t dev_id, + uint32_t queue_id, + uint32_t down_table_len, + uint8_t *p_down_table_buff, + uint32_t *p_element_id) +{ + uint32_t rc = 0; + uint32_t dtb_interrupt_status = 0; + + dtb_interrupt_status = g_dpp_dtb_int_enable; + + rc = zxdh_np_dtb_tab_down_info_set(dev_id, + queue_id, + dtb_interrupt_status, + down_table_len / 4, + (uint32_t *)p_down_table_buff, + p_element_id); + return rc; +} + +int +zxdh_np_dtb_table_entry_write(uint32_t dev_id, + uint32_t queue_id, + uint32_t entrynum, + ZXDH_DTB_USER_ENTRY_T *down_entries) +{ + uint32_t rc = 0; + uint32_t entry_index = 0; + uint32_t sdt_no = 0; + uint32_t tbl_type = 0; + uint32_t element_id = 0xff; + uint32_t one_dtb_len = 0; + uint32_t dtb_len = 0; + uint32_t addr_offset = 0; + uint32_t max_size = 0; + uint8_t *p_data_buff = NULL; + + uint8_t *p_data_buff_ex = NULL; + ZXDH_DTB_LPM_ENTRY_T lpm_entry = {0}; + + uint8_t entry_cmd[ZXDH_DTB_TABLE_CMD_SIZE_BIT] = {0}; + uint8_t entry_data[ZXDH_ETCAM_WIDTH_MAX] = {0}; + ZXDH_DTB_USER_ENTRY_T *pentry = NULL; + ZXDH_DTB_ENTRY_T dtb_one_entry = {0}; + + p_data_buff = (uint8_t *)rte_malloc(NULL, ZXDH_DTB_TABLE_DATA_BUFF_SIZE, 0); + ZXDH_COMM_CHECK_POINT(p_data_buff); + memset(p_data_buff, 0, ZXDH_DTB_TABLE_DATA_BUFF_SIZE); + + p_data_buff_ex = (uint8_t *)rte_malloc(NULL, ZXDH_DTB_TABLE_DATA_BUFF_SIZE, 0); + ZXDH_COMM_CHECK_POINT_MEMORY_FREE(p_data_buff_ex, p_data_buff); + memset(p_data_buff_ex, 0, ZXDH_DTB_TABLE_DATA_BUFF_SIZE); + + memset((uint8_t *)&lpm_entry, 0x0, sizeof(ZXDH_DTB_LPM_ENTRY_T)); + memset((uint8_t *)&dtb_one_entry, 0x0, sizeof(ZXDH_DTB_ENTRY_T)); + memset(entry_cmd, 0x0, sizeof(entry_cmd)); + memset(entry_data, 0x0, sizeof(entry_data)); + dtb_one_entry.cmd = entry_cmd; + dtb_one_entry.data = entry_data; + + max_size = (ZXDH_DTB_TABLE_DATA_BUFF_SIZE / 16) - 1; + + for (entry_index = 0; entry_index < entrynum; entry_index++) { + pentry = down_entries + entry_index; + sdt_no = pentry->sdt_no; + tbl_type = zxdh_np_sdt_tbl_type_get(dev_id, sdt_no); + switch (tbl_type) { + case ZXDH_SDT_TBLT_ERAM: + { + rc = zxdh_np_dtb_eram_one_entry(dev_id, sdt_no, ZXDH_DTB_ITEM_ADD_OR_UPDATE, + pentry->p_entry_data, &one_dtb_len, &dtb_one_entry); + break; + } + default: + { + PMD_DRV_LOG(ERR, "SDT table_type[ %d ] is invalid!", tbl_type); + rte_free(p_data_buff); + rte_free(p_data_buff_ex); + return 1; + } + } + + addr_offset = dtb_len * ZXDH_DTB_LEN_POS_SETP; + dtb_len += one_dtb_len; + if (dtb_len > max_size) { + rte_free(p_data_buff); + rte_free(p_data_buff_ex); + PMD_DRV_LOG(ERR, " %s error dtb_len>%u!", __func__, + max_size); + return ZXDH_RC_DTB_DOWN_LEN_INVALID; + } + rc = zxdh_np_dtb_data_write(p_data_buff, addr_offset, &dtb_one_entry); + memset(entry_cmd, 0x0, sizeof(entry_cmd)); + memset(entry_data, 0x0, sizeof(entry_data)); + } + + if (dtb_len == 0) { + rte_free(p_data_buff); + rte_free(p_data_buff_ex); + return ZXDH_RC_DTB_DOWN_LEN_INVALID; + } + + rc = zxdh_np_dtb_write_down_table_data(dev_id, + queue_id, + dtb_len * 16, + p_data_buff, + &element_id); + rte_free(p_data_buff); + rte_free(p_data_buff_ex); + + return rc; +} diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h index dc0e867827..02c27df887 100644 --- a/drivers/net/zxdh/zxdh_np.h +++ b/drivers/net/zxdh/zxdh_np.h @@ -7,6 +7,9 @@ #include +#define ZXDH_DISABLE (0) +#define ZXDH_ENABLE (1) + #define ZXDH_PORT_NAME_MAX (32) #define ZXDH_DEV_CHANNEL_MAX (2) #define ZXDH_DEV_SDT_ID_MAX (256U) @@ -52,6 +55,94 @@ #define ZXDH_ACL_TBL_ID_NUM (8U) #define ZXDH_ACL_BLOCK_NUM (8U) +#define ZXDH_SMMU0_READ_REG_MAX_NUM (4) + +#define ZXDH_DTB_ITEM_ACK_SIZE (16) +#define ZXDH_DTB_ITEM_BUFF_SIZE (16 * 1024) +#define ZXDH_DTB_ITEM_SIZE (16 + 16 * 1024) +#define ZXDH_DTB_TAB_UP_SIZE ((16 + 16 * 1024) * 32) +#define ZXDH_DTB_TAB_DOWN_SIZE ((16 + 16 * 1024) * 32) + +#define ZXDH_DTB_TAB_UP_ACK_VLD_MASK (0x555555) +#define ZXDH_DTB_TAB_DOWN_ACK_VLD_MASK (0x5a5a5a) +#define ZXDH_DTB_TAB_ACK_IS_USING_MASK (0x11111100) +#define ZXDH_DTB_TAB_ACK_UNUSED_MASK (0x0) +#define ZXDH_DTB_TAB_ACK_SUCCESS_MASK (0xff) +#define ZXDH_DTB_TAB_ACK_FAILED_MASK (0x1) +#define ZXDH_DTB_TAB_ACK_CHECK_VALUE (0x12345678) + +#define ZXDH_DTB_TAB_ACK_VLD_SHIFT (104) +#define ZXDH_DTB_TAB_ACK_STATUS_SHIFT (96) +#define ZXDH_DTB_LEN_POS_SETP (16) +#define ZXDH_DTB_ITEM_ADD_OR_UPDATE (0) +#define ZXDH_DTB_ITEM_DELETE (1) + +#define ZXDH_ETCAM_LEN_SIZE (6) +#define ZXDH_ETCAM_BLOCK_NUM (8) +#define ZXDH_ETCAM_TBLID_NUM (8) +#define ZXDH_ETCAM_RAM_NUM (8) +#define ZXDH_ETCAM_RAM_WIDTH (80U) +#define ZXDH_ETCAM_WR_MASK_MAX (((uint32_t)1 << ZXDH_ETCAM_RAM_NUM) - 1) +#define ZXDH_ETCAM_WIDTH_MIN (ZXDH_ETCAM_RAM_WIDTH) +#define ZXDH_ETCAM_WIDTH_MAX (ZXDH_ETCAM_RAM_NUM * ZXDH_ETCAM_RAM_WIDTH) + +#define ZXDH_DTB_TABLE_DATA_BUFF_SIZE (uint16_t)(1024 * 16) +#define ZXDH_DTB_TABLE_CMD_SIZE_BIT (128) + +#define ZXDH_SE_SMMU0_ERAM_BLOCK_NUM (32) +#define ZXDH_SE_SMMU0_ERAM_ADDR_NUM_PER_BLOCK (0x4000) +#define ZXDH_SE_SMMU0_ERAM_ADDR_NUM_TOTAL \ + (ZXDH_SE_SMMU0_ERAM_BLOCK_NUM * ZXDH_SE_SMMU0_ERAM_ADDR_NUM_PER_BLOCK) + +/**errco code */ +#define ZXDH_RC_BASE (0x1000U) +#define ZXDH_PARAMETER_CHK_BASE (ZXDH_RC_BASE | 0x200) +#define ZXDH_PAR_CHK_POINT_NULL (ZXDH_PARAMETER_CHK_BASE | 0x001) +#define ZXDH_PAR_CHK_ARGIN_ZERO (ZXDH_PARAMETER_CHK_BASE | 0x002) +#define ZXDH_PAR_CHK_ARGIN_OVERFLOW (ZXDH_PARAMETER_CHK_BASE | 0x003) +#define ZXDH_PAR_CHK_ARGIN_ERROR (ZXDH_PARAMETER_CHK_BASE | 0x004) +#define ZXDH_PAR_CHK_INVALID_INDEX (ZXDH_PARAMETER_CHK_BASE | 0x005) +#define ZXDH_PAR_CHK_INVALID_RANGE (ZXDH_PARAMETER_CHK_BASE | 0x006) +#define ZXDH_PAR_CHK_INVALID_DEV_ID (ZXDH_PARAMETER_CHK_BASE | 0x007) +#define ZXDH_PAR_CHK_INVALID_PARA (ZXDH_PARAMETER_CHK_BASE | 0x008) + +#define ZXDH_ERAM128_BADDR_MASK (0x3FFFF80) + +#define ZXDH_DTB_TABLE_MODE_ERAM (0) +#define ZXDH_DTB_TABLE_MODE_DDR (1) +#define ZXDH_DTB_TABLE_MODE_ZCAM (2) +#define ZXDH_DTB_TABLE_MODE_ETCAM (3) +#define ZXDH_DTB_TABLE_MODE_MC_HASH (4) +#define ZXDH_DTB_TABLE_VALID (1) + +/* DTB module error code */ +#define ZXDH_RC_DTB_BASE (0xd00) +#define ZXDH_RC_DTB_MGR_EXIST (ZXDH_RC_DTB_BASE | 0x0) +#define ZXDH_RC_DTB_MGR_NOT_EXIST (ZXDH_RC_DTB_BASE | 0x1) +#define ZXDH_RC_DTB_QUEUE_RES_EMPTY (ZXDH_RC_DTB_BASE | 0x2) +#define ZXDH_RC_DTB_QUEUE_BUFF_SIZE_ERR (ZXDH_RC_DTB_BASE | 0x3) +#define ZXDH_RC_DTB_QUEUE_ITEM_HW_EMPTY (ZXDH_RC_DTB_BASE | 0x4) +#define ZXDH_RC_DTB_QUEUE_ITEM_SW_EMPTY (ZXDH_RC_DTB_BASE | 0x5) +#define ZXDH_RC_DTB_TAB_UP_BUFF_EMPTY (ZXDH_RC_DTB_BASE | 0x6) +#define ZXDH_RC_DTB_TAB_DOWN_BUFF_EMPTY (ZXDH_RC_DTB_BASE | 0x7) +#define ZXDH_RC_DTB_TAB_UP_TRANS_ERR (ZXDH_RC_DTB_BASE | 0x8) +#define ZXDH_RC_DTB_TAB_DOWN_TRANS_ERR (ZXDH_RC_DTB_BASE | 0x9) +#define ZXDH_RC_DTB_QUEUE_IS_WORKING (ZXDH_RC_DTB_BASE | 0xa) +#define ZXDH_RC_DTB_QUEUE_IS_NOT_INIT (ZXDH_RC_DTB_BASE | 0xb) +#define ZXDH_RC_DTB_MEMORY_ALLOC_ERR (ZXDH_RC_DTB_BASE | 0xc) +#define ZXDH_RC_DTB_PARA_INVALID (ZXDH_RC_DTB_BASE | 0xd) +#define ZXDH_RC_DMA_RANGE_INVALID (ZXDH_RC_DTB_BASE | 0xe) +#define ZXDH_RC_DMA_RCV_DATA_EMPTY (ZXDH_RC_DTB_BASE | 0xf) +#define ZXDH_RC_DTB_LPM_INSERT_FAIL (ZXDH_RC_DTB_BASE | 0x10) +#define ZXDH_RC_DTB_LPM_DELETE_FAIL (ZXDH_RC_DTB_BASE | 0x11) +#define ZXDH_RC_DTB_DOWN_LEN_INVALID (ZXDH_RC_DTB_BASE | 0x12) +#define ZXDH_RC_DTB_DOWN_HASH_CONFLICT (ZXDH_RC_DTB_BASE | 0x13) +#define ZXDH_RC_DTB_QUEUE_NOT_ALLOC (ZXDH_RC_DTB_BASE | 0x14) +#define ZXDH_RC_DTB_QUEUE_NAME_ERROR (ZXDH_RC_DTB_BASE | 0x15) +#define ZXDH_RC_DTB_DUMP_SIZE_SMALL (ZXDH_RC_DTB_BASE | 0x16) +#define ZXDH_RC_DTB_SEARCH_VPORT_QUEUE_ZERO (ZXDH_RC_DTB_BASE | 0x17) +#define ZXDH_RC_DTB_QUEUE_NOT_ENABLE (ZXDH_RC_DTB_BASE | 0x18) + typedef enum zxdh_module_init_e { ZXDH_MODULE_INIT_NPPU = 0, ZXDH_MODULE_INIT_PPU, @@ -260,7 +351,6 @@ typedef struct zxdh_dtb4k_dtb_enq_cfg_epid_v_func_num_0_127_t { uint32_t cfg_vfunc_active; } ZXDH_DTB4K_DTB_ENQ_CFG_EPID_V_FUNC_NUM_0_127_T; - typedef uint32_t (*ZXDH_REG_WRITE)(uint32_t dev_id, uint32_t addr, uint32_t *p_data); typedef uint32_t (*ZXDH_REG_READ)(uint32_t dev_id, uint32_t addr, uint32_t *p_data); @@ -299,7 +389,127 @@ typedef struct zxdh_tlb_mgr_t { uint32_t pa_width; } ZXDH_TLB_MGR_T; +typedef enum zxdh_eram128_tbl_mode_e { + ZXDH_ERAM128_TBL_1b = 0, + ZXDH_ERAM128_TBL_32b = 1, + ZXDH_ERAM128_TBL_64b = 2, + ZXDH_ERAM128_TBL_128b = 3, + ZXDH_ERAM128_TBL_2b = 4, + ZXDH_ERAM128_TBL_4b = 5, + ZXDH_ERAM128_TBL_8b = 6, + ZXDH_ERAM128_TBL_16b = 7 +} ZXDH_ERAM128_TBL_MODE_E; + +typedef enum zxdh_eram128_opr_mode_e { + ZXDH_ERAM128_OPR_128b = 0, + ZXDH_ERAM128_OPR_64b = 1, + ZXDH_ERAM128_OPR_1b = 2, + ZXDH_ERAM128_OPR_32b = 3 + +} ZXDH_ERAM128_OPR_MODE_E; + +typedef enum zxdh_dtb_table_info_e { + ZXDH_DTB_TABLE_DDR = 0, + ZXDH_DTB_TABLE_ERAM_1 = 1, + ZXDH_DTB_TABLE_ERAM_64 = 2, + ZXDH_DTB_TABLE_ERAM_128 = 3, + ZXDH_DTB_TABLE_ZCAM = 4, + ZXDH_DTB_TABLE_ETCAM = 5, + ZXDH_DTB_TABLE_MC_HASH = 6, + ZXDH_DTB_TABLE_ENUM_MAX +} ZXDH_DTB_TABLE_INFO_E; + +typedef enum zxdh_sdt_table_type_e { + ZXDH_SDT_TBLT_INVALID = 0, + ZXDH_SDT_TBLT_ERAM = 1, + ZXDH_SDT_TBLT_DDR3 = 2, + ZXDH_SDT_TBLT_HASH = 3, + ZXDH_SDT_TBLT_LPM = 4, + ZXDH_SDT_TBLT_ETCAM = 5, + ZXDH_SDT_TBLT_PORTTBL = 6, + ZXDH_SDT_TBLT_MAX = 7, +} ZXDH_SDT_TABLE_TYPE_E; + +typedef struct zxdh_dtb_lpm_entry_t { + uint32_t dtb_len0; + uint8_t *p_data_buff0; + uint32_t dtb_len1; + uint8_t *p_data_buff1; +} ZXDH_DTB_LPM_ENTRY_T; + +typedef struct zxdh_dtb_entry_t { + uint8_t *cmd; + uint8_t *data; + uint32_t data_in_cmd_flag; + uint32_t data_size; +} ZXDH_DTB_ENTRY_T; + +typedef struct zxdh_dtb_eram_table_form_t { + uint32_t valid; + uint32_t type_mode; + uint32_t data_mode; + uint32_t cpu_wr; + uint32_t cpu_rd; + uint32_t cpu_rd_mode; + uint32_t addr; + uint32_t data_h; + uint32_t data_l; +} ZXDH_DTB_ERAM_TABLE_FORM_T; + +typedef struct zxdh_sdt_tbl_eram_t { + uint32_t table_type; + uint32_t eram_mode; + uint32_t eram_base_addr; + uint32_t eram_table_depth; + uint32_t eram_clutch_en; +} ZXDH_SDTTBL_ERAM_T; + +typedef union zxdh_endian_u { + unsigned int a; + unsigned char b; +} ZXDH_ENDIAN_U; + +typedef struct zxdh_dtb_field_t { + const char *p_name; + uint16_t lsb_pos; + uint16_t len; +} ZXDH_DTB_FIELD_T; + +typedef struct zxdh_dtb_table_t { + const char *table_type; + uint32_t table_no; + uint32_t field_num; + ZXDH_DTB_FIELD_T *p_fields; +} ZXDH_DTB_TABLE_T; + +typedef struct zxdh_dtb_queue_item_info_t { + uint32_t cmd_vld; + uint32_t cmd_type; + uint32_t int_en; + uint32_t data_len; + uint32_t data_laddr; + uint32_t data_hddr; +} ZXDH_DTB_QUEUE_ITEM_INFO_T; + +typedef struct zxdh_dtb_queue_len_t { + uint32_t cfg_dtb_cmd_type; + uint32_t cfg_dtb_cmd_int_en; + uint32_t cfg_queue_dtb_len; +} ZXDH_DTB_QUEUE_LEN_T; + +typedef struct zxdh_dtb_eram_entry_info_t { + uint32_t index; + uint32_t *p_data; +} ZXDH_DTB_ERAM_ENTRY_INFO_T; + +typedef struct zxdh_dtb_user_entry_t { + uint32_t sdt_no; + void *p_entry_data; +} ZXDH_DTB_USER_ENTRY_T; + int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl); int zxdh_np_online_uninit(uint32_t dev_id, char *port_name, uint32_t queue_id); +int zxdh_np_dtb_table_entry_write(uint32_t dev_id, uint32_t queue_id, + uint32_t entrynum, ZXDH_DTB_USER_ENTRY_T *down_entries); #endif /* ZXDH_NP_H */ diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h index d6487a574f..e3f13cb17d 100644 --- a/drivers/net/zxdh/zxdh_pci.h +++ b/drivers/net/zxdh/zxdh_pci.h @@ -12,6 +12,8 @@ #include "zxdh_ethdev.h" +#define ZXDH_PF_PCIE_ID(pcie_id) (((pcie_id) & 0xff00) | 1 << 11) + enum zxdh_msix_status { ZXDH_MSIX_NONE = 0, ZXDH_MSIX_DISABLED = 1, diff --git a/drivers/net/zxdh/zxdh_tables.c b/drivers/net/zxdh/zxdh_tables.c new file mode 100644 index 0000000000..4284fefe3a --- /dev/null +++ b/drivers/net/zxdh/zxdh_tables.c @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#include "zxdh_ethdev.h" +#include "zxdh_msg.h" +#include "zxdh_np.h" +#include "zxdh_tables.h" +#include "zxdh_logs.h" + +#define ZXDH_SDT_VPORT_ATT_TABLE 1 +#define ZXDH_SDT_PANEL_ATT_TABLE 2 + +int zxdh_set_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr) +{ + int ret = 0; + + ZXDH_DTB_ERAM_ENTRY_INFO_T entry = {vfid, (uint32_t *)port_attr}; + ZXDH_DTB_USER_ENTRY_T user_entry_write = {ZXDH_SDT_VPORT_ATT_TABLE, (void *)&entry}; + + ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO, + g_dtb_data.queueid, 1, &user_entry_write); + if (ret != 0) + PMD_DRV_LOG(ERR, "write vport_att failed vfid:%d failed", vfid); + + return ret; +} + +int +zxdh_port_attr_init(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_port_attr_table port_attr = {0}; + struct zxdh_msg_info msg_info = {0}; + int ret = 0; + + if (hw->is_pf) { + port_attr.hit_flag = 1; + port_attr.phy_port = hw->phyport; + port_attr.pf_vfid = zxdh_vport_to_vfid(hw->vport); + port_attr.rss_enable = 0; + if (!hw->is_pf) + port_attr.is_vf = 1; + + port_attr.mtu = dev->data->mtu; + port_attr.mtu_enable = 1; + port_attr.is_up = 0; + if (!port_attr.rss_enable) + port_attr.port_base_qid = 0; + + ret = zxdh_set_port_attr(hw->vfid, &port_attr); + if (ret) { + PMD_DRV_LOG(ERR, "write port_attr failed"); + return -EAGAIN; + } + } else { + struct zxdh_vf_init_msg *vf_init_msg = &msg_info.data.vf_init_msg; + + zxdh_msg_head_build(hw, ZXDH_VF_PORT_INIT, &msg_info); + msg_info.msg_head.msg_type = ZXDH_VF_PORT_INIT; + vf_init_msg->link_up = 1; + vf_init_msg->base_qid = 0; + vf_init_msg->rss_enable = 0; + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "vf port_init failed"); + return -EAGAIN; + } + } + return ret; +}; + +int zxdh_panel_table_init(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + + if (!hw->is_pf) + return 0; + + struct zxdh_panel_table panel; + + memset(&panel, 0, sizeof(panel)); + panel.hit_flag = 1; + panel.pf_vfid = zxdh_vport_to_vfid(hw->vport); + panel.mtu_enable = 1; + panel.mtu = dev->data->mtu; + + ZXDH_DTB_ERAM_ENTRY_INFO_T panel_entry = { + .index = hw->phyport, + .p_data = (uint32_t *)&panel + }; + ZXDH_DTB_USER_ENTRY_T entry = { + .sdt_no = ZXDH_SDT_PANEL_ATT_TABLE, + .p_entry_data = (void *)&panel_entry + }; + int ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO, g_dtb_data.queueid, 1, &entry); + + if (ret) { + PMD_DRV_LOG(ERR, "Insert eram-panel failed, code:%u", ret); + return -EAGAIN; + } + + return ret; +} diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h new file mode 100644 index 0000000000..5d34af2f05 --- /dev/null +++ b/drivers/net/zxdh/zxdh_tables.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#ifndef ZXDH_TABLES_H +#define ZXDH_TABLES_H + +#include + +extern struct zxdh_dtb_shared_data g_dtb_data; + +#define ZXDH_DEVICE_NO 0 + +struct zxdh_port_attr_table { +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + uint8_t byte4_rsv1: 1; + uint8_t ingress_meter_enable: 1; + uint8_t egress_meter_enable: 1; + uint8_t byte4_rsv2: 2; + uint8_t fd_enable: 1; + uint8_t vepa_enable: 1; + uint8_t spoof_check_enable: 1; + + uint8_t inline_sec_offload: 1; + uint8_t ovs_enable: 1; + uint8_t lag_enable: 1; + uint8_t is_passthrough: 1; + uint8_t is_vf: 1; + uint8_t virtion_version: 2; + uint8_t virtio_enable: 1; + + uint8_t accelerator_offload_flag: 1; + uint8_t lro_offload: 1; + uint8_t ip_fragment_offload: 1; + uint8_t tcp_udp_checksum_offload: 1; + uint8_t ip_checksum_offload: 1; + uint8_t outer_ip_checksum_offload: 1; + uint8_t is_up: 1; + uint8_t rsv1: 1; + + uint8_t rsv3 : 1; + uint8_t rdma_offload_enable: 1; + uint8_t vlan_filter_enable: 1; + uint8_t vlan_strip_offload: 1; + uint8_t qinq_valn_strip_offload: 1; + uint8_t rss_enable: 1; + uint8_t mtu_enable: 1; + uint8_t hit_flag: 1; + + uint16_t mtu; + + uint16_t port_base_qid : 12; + uint16_t hash_search_index : 3; + uint16_t rsv: 1; + + uint8_t rss_hash_factor; + + uint8_t hash_alg: 4; + uint8_t phy_port: 4; + + uint16_t lag_id : 3; + uint16_t pf_vfid : 11; + uint16_t ingress_tm_enable : 1; + uint16_t egress_tm_enable : 1; + + uint16_t tpid; + + uint16_t vhca : 10; + uint16_t uplink_port : 6; +#else + uint8_t rsv3 : 1; + uint8_t rdma_offload_enable: 1; + uint8_t vlan_filter_enable: 1; + uint8_t vlan_strip_offload: 1; + uint8_t qinq_valn_strip_offload: 1; + uint8_t rss_enable: 1; + uint8_t mtu_enable: 1; + uint8_t hit_flag: 1; + + uint8_t accelerator_offload_flag: 1; + uint8_t lro_offload: 1; + uint8_t ip_fragment_offload: 1; + uint8_t tcp_udp_checksum_offload: 1; + uint8_t ip_checksum_offload: 1; + uint8_t outer_ip_checksum_offload: 1; + uint8_t is_up: 1; + uint8_t rsv1: 1; + + uint8_t inline_sec_offload: 1; + uint8_t ovs_enable: 1; + uint8_t lag_enable: 1; + uint8_t is_passthrough: 1; + uint8_t is_vf: 1; + uint8_t virtion_version: 2; + uint8_t virtio_enable: 1; + + uint8_t byte4_rsv1: 1; + uint8_t ingress_meter_enable: 1; + uint8_t egress_meter_enable: 1; + uint8_t byte4_rsv2: 2; + uint8_t fd_enable: 1; + uint8_t vepa_enable: 1; + uint8_t spoof_check_enable: 1; + + uint16_t port_base_qid : 12; + uint16_t hash_search_index : 3; + uint16_t rsv: 1; + + uint16_t mtu; + + uint16_t lag_id : 3; + uint16_t pf_vfid : 11; + uint16_t ingress_tm_enable : 1; + uint16_t egress_tm_enable : 1; + + uint8_t hash_alg: 4; + uint8_t phy_port: 4; + + uint8_t rss_hash_factor; + + uint16_t tpid; + + uint16_t vhca : 10; + uint16_t uplink_port : 6; +#endif +}; + +struct zxdh_panel_table { + uint16_t port_vfid_1588 : 11, + rsv2 : 5; + uint16_t pf_vfid : 11, + rsv1 : 1, + enable_1588_tc : 2, + trust_mode : 1, + hit_flag : 1; + uint32_t mtu : 16, + mtu_enable : 1, + rsv : 3, + tm_base_queue : 12; + uint32_t rsv_1; + uint32_t rsv_2; +}; /* 16B */ + +int zxdh_port_attr_init(struct rte_eth_dev *dev); +int zxdh_panel_table_init(struct rte_eth_dev *dev); +int zxdh_set_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr); + +#endif /* ZXDH_TABLES_H */ From patchwork Fri Dec 6 05:57:04 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149053 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DF54845E16; Fri, 6 Dec 2024 07:05:29 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2A40D40E4C; Fri, 6 Dec 2024 07:04:55 +0100 (CET) Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.35]) by mails.dpdk.org (Postfix) with ESMTP id 76EA040E13 for ; Fri, 6 Dec 2024 07:04:52 +0100 (CET) Received: from mse-fl2.zte.com.cn (unknown [10.5.228.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxhk.zte.com.cn (FangMail) with ESMTPS id 4Y4LM74z1Mz5B1L5; Fri, 6 Dec 2024 14:04:51 +0800 (CST) Received: from szxlzmapp01.zte.com.cn ([10.5.231.85]) by mse-fl2.zte.com.cn with SMTP id 4B664O8J033167; Fri, 6 Dec 2024 14:04:24 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:26 +0800 X-Zmail-TransId: 3e81675293e9001-71397 From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 04/15] net/zxdh: port tables unint implementations Date: Fri, 6 Dec 2024 13:57:04 +0800 Message-ID: <20241206055715.506961-5-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl2.zte.com.cn 4B664O8J033167 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 67529403.001/4Y4LM74z1Mz5B1L5 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org delete port tables in host. Signed-off-by: Junlong Wang --- drivers/net/zxdh/zxdh_ethdev.c | 19 ++++++ drivers/net/zxdh/zxdh_msg.h | 1 + drivers/net/zxdh/zxdh_np.c | 113 +++++++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_np.h | 9 +++ drivers/net/zxdh/zxdh_tables.c | 33 +++++++++- drivers/net/zxdh/zxdh_tables.h | 1 + 6 files changed, 175 insertions(+), 1 deletion(-) -- 2.27.0 diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index 9f3a5bcf9c..63eac7781c 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -842,6 +842,19 @@ zxdh_dev_configure(struct rte_eth_dev *dev) return ret; } +static int +zxdh_tables_uninit(struct rte_eth_dev *dev) +{ + int ret = 0; + + ret = zxdh_port_attr_uninit(dev); + if (ret) { + PMD_DRV_LOG(ERR, "zxdh_port_attr_uninit failed"); + return ret; + } + return ret; +} + static void zxdh_np_dtb_data_res_free(struct zxdh_hw *hw) { @@ -893,6 +906,12 @@ zxdh_dev_close(struct rte_eth_dev *dev) struct zxdh_hw *hw = dev->data->dev_private; int ret = 0; + ret = zxdh_tables_uninit(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "%s :unint port %s failed ", __func__, dev->device->name); + return -1; + } + zxdh_intr_release(dev); zxdh_np_uninit(dev); zxdh_pci_reset(hw); diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h index 35ed5d1a1c..9997417f28 100644 --- a/drivers/net/zxdh/zxdh_msg.h +++ b/drivers/net/zxdh/zxdh_msg.h @@ -167,6 +167,7 @@ enum pciebar_layout_type { enum zxdh_msg_type { ZXDH_NULL = 0, ZXDH_VF_PORT_INIT = 1, + ZXDH_VF_PORT_UNINIT = 2, ZXDH_MSG_TYPE_END, } __rte_packed; diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c index 6b8168da6f..242a6901ed 100644 --- a/drivers/net/zxdh/zxdh_np.c +++ b/drivers/net/zxdh/zxdh_np.c @@ -22,6 +22,7 @@ ZXDH_PPU_CLS_BITMAP_T g_ppu_cls_bit_map[ZXDH_DEV_CHANNEL_MAX]; ZXDH_DTB_MGR_T *p_dpp_dtb_mgr[ZXDH_DEV_CHANNEL_MAX] = {NULL}; ZXDH_RISCV_DTB_MGR *p_riscv_dtb_queue_mgr[ZXDH_DEV_CHANNEL_MAX] = {NULL}; ZXDH_TLB_MGR_T *g_p_dpp_tlb_mgr[ZXDH_DEV_CHANNEL_MAX] = {NULL}; +ZXDH_SDT_TBL_DATA_T g_sdt_info[ZXDH_DEV_CHANNEL_MAX][ZXDH_DEV_SDT_ID_MAX]; ZXDH_REG_T g_dpp_reg_info[4] = {0}; ZXDH_DTB_TABLE_T g_dpp_dtb_table_info[4] = {0}; @@ -1497,3 +1498,115 @@ zxdh_np_dtb_table_entry_write(uint32_t dev_id, return rc; } + +static uint32_t +zxdh_np_sdt_tbl_data_get(uint32_t dev_id, uint32_t sdt_no, ZXDH_SDT_TBL_DATA_T *p_sdt_data) +{ + uint32_t rc = 0; + + p_sdt_data->data_high32 = g_sdt_info[dev_id][sdt_no].data_high32; + p_sdt_data->data_low32 = g_sdt_info[dev_id][sdt_no].data_low32; + + return rc; +} + +int +zxdh_np_dtb_table_entry_delete(uint32_t dev_id, + uint32_t queue_id, + uint32_t entrynum, + ZXDH_DTB_USER_ENTRY_T *delete_entries) +{ + uint32_t rc = 0; + uint32_t entry_index = 0; + uint32_t sdt_no = 0; + uint32_t tbl_type = 0; + uint32_t element_id = 0xff; + uint32_t one_dtb_len = 0; + uint32_t dtb_len = 0; + uint32_t addr_offset = 0; + uint32_t max_size = 0; + uint8_t *p_data_buff = NULL; + uint8_t *p_data_buff_ex = NULL; + ZXDH_DTB_LPM_ENTRY_T lpm_entry = {0}; + + uint8_t entry_cmd[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0}; + uint8_t entry_data[ZXDH_ETCAM_WIDTH_MAX / 8] = {0}; + ZXDH_SDT_TBL_DATA_T sdt_tbl = {0}; + ZXDH_DTB_USER_ENTRY_T *pentry = NULL; + ZXDH_DTB_ENTRY_T dtb_one_entry = {0}; + + ZXDH_COMM_CHECK_POINT(delete_entries); + + p_data_buff = (uint8_t *)rte_malloc(NULL, ZXDH_DTB_TABLE_DATA_BUFF_SIZE, 0); + ZXDH_COMM_CHECK_POINT(p_data_buff); + memset(p_data_buff, 0, ZXDH_DTB_TABLE_DATA_BUFF_SIZE); + + p_data_buff_ex = + (uint8_t *)rte_malloc(NULL, ZXDH_DTB_TABLE_DATA_BUFF_SIZE * sizeof(uint8_t), 0); + memset(p_data_buff_ex, 0, ZXDH_DTB_TABLE_DATA_BUFF_SIZE); + + memset((uint8_t *)&lpm_entry, 0x0, sizeof(ZXDH_DTB_LPM_ENTRY_T)); + + memset((uint8_t *)&dtb_one_entry, 0x0, sizeof(ZXDH_DTB_ENTRY_T)); + memset(entry_cmd, 0x0, sizeof(entry_cmd)); + memset(entry_data, 0x0, sizeof(entry_data)); + dtb_one_entry.cmd = entry_cmd; + dtb_one_entry.data = entry_data; + + max_size = (ZXDH_DTB_TABLE_DATA_BUFF_SIZE / 16) - 1; + + for (entry_index = 0; entry_index < entrynum; entry_index++) { + pentry = delete_entries + entry_index; + + sdt_no = pentry->sdt_no; + rc = zxdh_np_sdt_tbl_data_get(dev_id, sdt_no, &sdt_tbl); + switch (tbl_type) { + case ZXDH_SDT_TBLT_ERAM: + { + rc = zxdh_np_dtb_eram_one_entry(dev_id, sdt_no, ZXDH_DTB_ITEM_DELETE, + pentry->p_entry_data, &one_dtb_len, &dtb_one_entry); + break; + } + + default: + { + PMD_DRV_LOG(ERR, "SDT table_type[ %d ] is invalid!", tbl_type); + rte_free(p_data_buff); + rte_free(p_data_buff_ex); + return 1; + } + } + + addr_offset = dtb_len * ZXDH_DTB_LEN_POS_SETP; + dtb_len += one_dtb_len; + if (dtb_len > max_size) { + rte_free(p_data_buff); + rte_free(p_data_buff_ex); + PMD_DRV_LOG(ERR, " %s error dtb_len>%u!", __func__, + max_size); + return ZXDH_RC_DTB_DOWN_LEN_INVALID; + } + + rc = zxdh_np_dtb_data_write(p_data_buff, addr_offset, &dtb_one_entry); + memset(entry_cmd, 0x0, sizeof(entry_cmd)); + memset(entry_data, 0x0, sizeof(entry_data)); + } + + if (dtb_len == 0) { + rte_free(p_data_buff); + rte_free(p_data_buff_ex); + return ZXDH_RC_DTB_DOWN_LEN_INVALID; + } + + rc = zxdh_np_dtb_write_down_table_data(dev_id, + queue_id, + dtb_len * 16, + p_data_buff, + &element_id); + rte_free(p_data_buff); + ZXDH_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, + "dpp_dtb_write_down_table_data", p_data_buff_ex); + + rte_free(p_data_buff_ex); + return 0; +} diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h index 02c27df887..3cb9580254 100644 --- a/drivers/net/zxdh/zxdh_np.h +++ b/drivers/net/zxdh/zxdh_np.h @@ -54,6 +54,8 @@ #define ZXDH_ACL_TBL_ID_MAX (7) #define ZXDH_ACL_TBL_ID_NUM (8U) #define ZXDH_ACL_BLOCK_NUM (8U) +#define ZXDH_SDT_H_TBL_TYPE_BT_POS (29) +#define ZXDH_SDT_H_TBL_TYPE_BT_LEN (3) #define ZXDH_SMMU0_READ_REG_MAX_NUM (4) @@ -507,9 +509,16 @@ typedef struct zxdh_dtb_user_entry_t { void *p_entry_data; } ZXDH_DTB_USER_ENTRY_T; +typedef struct zxdh_sdt_tbl_data_t { + uint32_t data_high32; + uint32_t data_low32; +} ZXDH_SDT_TBL_DATA_T; + int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl); int zxdh_np_online_uninit(uint32_t dev_id, char *port_name, uint32_t queue_id); int zxdh_np_dtb_table_entry_write(uint32_t dev_id, uint32_t queue_id, uint32_t entrynum, ZXDH_DTB_USER_ENTRY_T *down_entries); +int zxdh_np_dtb_table_entry_delete(uint32_t dev_id, uint32_t queue_id, + uint32_t entrynum, ZXDH_DTB_USER_ENTRY_T *delete_entries); #endif /* ZXDH_NP_H */ diff --git a/drivers/net/zxdh/zxdh_tables.c b/drivers/net/zxdh/zxdh_tables.c index 4284fefe3a..e28823c657 100644 --- a/drivers/net/zxdh/zxdh_tables.c +++ b/drivers/net/zxdh/zxdh_tables.c @@ -70,7 +70,38 @@ zxdh_port_attr_init(struct rte_eth_dev *dev) return ret; }; -int zxdh_panel_table_init(struct rte_eth_dev *dev) +int +zxdh_port_attr_uninit(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_msg_info msg_info = {0}; + struct zxdh_port_attr_table port_attr = {0}; + int ret = 0; + + if (hw->is_pf == 1) { + ZXDH_DTB_ERAM_ENTRY_INFO_T port_attr_entry = {hw->vfid, (uint32_t *)&port_attr}; + ZXDH_DTB_USER_ENTRY_T entry = { + .sdt_no = ZXDH_SDT_VPORT_ATT_TABLE, + .p_entry_data = (void *)&port_attr_entry + }; + ret = zxdh_np_dtb_table_entry_delete(ZXDH_DEVICE_NO, g_dtb_data.queueid, 1, &entry); + if (ret) { + PMD_DRV_LOG(ERR, "delete port attr table failed"); + return -ret; + } + } else { + zxdh_msg_head_build(hw, ZXDH_VF_PORT_UNINIT, &msg_info); + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "vf port tables uninit failed"); + return -ret; + } + } + return ret; +} + +int +zxdh_panel_table_init(struct rte_eth_dev *dev) { struct zxdh_hw *hw = dev->data->dev_private; diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h index 5d34af2f05..5e9b36faee 100644 --- a/drivers/net/zxdh/zxdh_tables.h +++ b/drivers/net/zxdh/zxdh_tables.h @@ -144,5 +144,6 @@ struct zxdh_panel_table { int zxdh_port_attr_init(struct rte_eth_dev *dev); int zxdh_panel_table_init(struct rte_eth_dev *dev); int zxdh_set_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr); +int zxdh_port_attr_uninit(struct rte_eth_dev *dev); #endif /* ZXDH_TABLES_H */ From patchwork Fri Dec 6 05:57:05 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149050 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 86C8D45E16; Fri, 6 Dec 2024 07:04:45 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1DDF9402C9; Fri, 6 Dec 2024 07:04:45 +0100 (CET) Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.40]) by mails.dpdk.org (Postfix) with ESMTP id CEEA3402A8 for ; Fri, 6 Dec 2024 07:04:42 +0100 (CET) Received: from mse-fl1.zte.com.cn (unknown [10.5.228.132]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxhk.zte.com.cn (FangMail) with ESMTPS id 4Y4LLx4GZbz8RTZR; Fri, 6 Dec 2024 14:04:41 +0800 (CST) Received: from szxlzmapp03.zte.com.cn ([10.5.231.207]) by mse-fl1.zte.com.cn with SMTP id 4B664OdE083481; Fri, 6 Dec 2024 14:04:24 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:26 +0800 X-Zmail-TransId: 3e81675293ea001-7139a From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 05/15] net/zxdh: rx/tx queue setup and intr enable Date: Fri, 6 Dec 2024 13:57:05 +0800 Message-ID: <20241206055715.506961-6-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl1.zte.com.cn 4B664OdE083481 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 675293F9.003/4Y4LLx4GZbz8RTZR X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org rx/tx queue setup and intr enable implementations. Signed-off-by: Junlong Wang --- drivers/net/zxdh/zxdh_ethdev.c | 4 + drivers/net/zxdh/zxdh_queue.c | 149 +++++++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_queue.h | 33 ++++++++ 3 files changed, 186 insertions(+) -- 2.27.0 diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index 63eac7781c..f123e05ccf 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -934,6 +934,10 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = { .dev_configure = zxdh_dev_configure, .dev_close = zxdh_dev_close, .dev_infos_get = zxdh_dev_infos_get, + .rx_queue_setup = zxdh_dev_rx_queue_setup, + .tx_queue_setup = zxdh_dev_tx_queue_setup, + .rx_queue_intr_enable = zxdh_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = zxdh_dev_rx_queue_intr_disable, }; static int32_t diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c index b4ef90ea36..af21f046ad 100644 --- a/drivers/net/zxdh/zxdh_queue.c +++ b/drivers/net/zxdh/zxdh_queue.c @@ -12,6 +12,11 @@ #include "zxdh_common.h" #include "zxdh_msg.h" +#define ZXDH_MBUF_MIN_SIZE sizeof(struct zxdh_net_hdr_dl) +#define ZXDH_MBUF_SIZE_4K 4096 +#define ZXDH_RX_FREE_THRESH 32 +#define ZXDH_TX_FREE_THRESH 32 + struct rte_mbuf * zxdh_queue_detach_unused(struct zxdh_virtqueue *vq) { @@ -125,3 +130,147 @@ zxdh_free_queues(struct rte_eth_dev *dev) return 0; } + +static int +zxdh_check_mempool(struct rte_mempool *mp, uint16_t offset, uint16_t min_length) +{ + uint16_t data_room_size; + + if (mp == NULL) + return -EINVAL; + data_room_size = rte_pktmbuf_data_room_size(mp); + if (data_room_size < offset + min_length) { + PMD_RX_LOG(ERR, + "%s mbuf_data_room_size %u < %u (%u + %u)", + mp->name, data_room_size, + offset + min_length, offset, min_length); + return -EINVAL; + } + return 0; +} + +int32_t +zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + uint32_t socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint16_t vtpci_logic_qidx = 2 * queue_idx + ZXDH_RQ_QUEUE_IDX; + struct zxdh_virtqueue *vq = hw->vqs[vtpci_logic_qidx]; + int32_t ret = 0; + + if (rx_conf->rx_deferred_start) { + PMD_RX_LOG(ERR, "Rx deferred start is not supported"); + return -EINVAL; + } + uint16_t rx_free_thresh = rx_conf->rx_free_thresh; + + if (rx_free_thresh == 0) + rx_free_thresh = RTE_MIN(vq->vq_nentries / 4, ZXDH_RX_FREE_THRESH); + + /* rx_free_thresh must be multiples of four. */ + if (rx_free_thresh & 0x3) { + PMD_RX_LOG(ERR, "(rx_free_thresh=%u port=%u queue=%u)", + rx_free_thresh, dev->data->port_id, queue_idx); + return -EINVAL; + } + /* rx_free_thresh must be less than the number of RX entries */ + if (rx_free_thresh >= vq->vq_nentries) { + PMD_RX_LOG(ERR, "RX entries (%u). (rx_free_thresh=%u port=%u queue=%u)", + vq->vq_nentries, rx_free_thresh, dev->data->port_id, queue_idx); + return -EINVAL; + } + vq->vq_free_thresh = rx_free_thresh; + nb_desc = ZXDH_QUEUE_DEPTH; + + vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); + struct zxdh_virtnet_rx *rxvq = &vq->rxq; + + rxvq->queue_id = vtpci_logic_qidx; + + int mbuf_min_size = ZXDH_MBUF_MIN_SIZE; + + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) + mbuf_min_size = ZXDH_MBUF_SIZE_4K; + + ret = zxdh_check_mempool(mp, RTE_PKTMBUF_HEADROOM, mbuf_min_size); + if (ret != 0) { + PMD_RX_LOG(ERR, + "rxq setup but mpool size too small(<%d) failed", mbuf_min_size); + return -EINVAL; + } + rxvq->mpool = mp; + if (queue_idx < dev->data->nb_rx_queues) + dev->data->rx_queues[queue_idx] = rxvq; + + return 0; +} + +int32_t +zxdh_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + uint32_t socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf) +{ + uint16_t vtpci_logic_qidx = 2 * queue_idx + ZXDH_TQ_QUEUE_IDX; + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_virtqueue *vq = hw->vqs[vtpci_logic_qidx]; + struct zxdh_virtnet_tx *txvq = NULL; + uint16_t tx_free_thresh = 0; + + if (tx_conf->tx_deferred_start) { + PMD_TX_LOG(ERR, "Tx deferred start is not supported"); + return -EINVAL; + } + + nb_desc = ZXDH_QUEUE_DEPTH; + + vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); + + txvq = &vq->txq; + txvq->queue_id = vtpci_logic_qidx; + + tx_free_thresh = tx_conf->tx_free_thresh; + if (tx_free_thresh == 0) + tx_free_thresh = RTE_MIN(vq->vq_nentries / 4, ZXDH_TX_FREE_THRESH); + + /* tx_free_thresh must be less than the number of TX entries minus 3 */ + if (tx_free_thresh >= (vq->vq_nentries - 3)) { + PMD_TX_LOG(ERR, "TX entries - 3 (%u). (tx_free_thresh=%u port=%u queue=%u)", + vq->vq_nentries - 3, tx_free_thresh, dev->data->port_id, queue_idx); + return -EINVAL; + } + + vq->vq_free_thresh = tx_free_thresh; + + if (queue_idx < dev->data->nb_tx_queues) + dev->data->tx_queues[queue_idx] = txvq; + + return 0; +} + +int32_t +zxdh_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_virtnet_rx *rxvq = dev->data->rx_queues[queue_id]; + struct zxdh_virtqueue *vq = rxvq->vq; + + zxdh_queue_enable_intr(vq); + zxdh_mb(hw->weak_barriers); + return 0; +} + +int32_t +zxdh_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct zxdh_virtnet_rx *rxvq = dev->data->rx_queues[queue_id]; + struct zxdh_virtqueue *vq = rxvq->vq; + + zxdh_queue_disable_intr(vq); + return 0; +} diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h index 1304d5e4ea..2f602d894f 100644 --- a/drivers/net/zxdh/zxdh_queue.h +++ b/drivers/net/zxdh/zxdh_queue.h @@ -8,6 +8,7 @@ #include #include +#include #include "zxdh_ethdev.h" #include "zxdh_rxtx.h" @@ -30,6 +31,7 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 }; #define ZXDH_RING_EVENT_FLAGS_DESC 0x2 #define ZXDH_VQ_RING_DESC_CHAIN_END 32768 +#define ZXDH_QUEUE_DEPTH 1024 /* * ring descriptors: 16 bytes. @@ -270,8 +272,39 @@ zxdh_queue_disable_intr(struct zxdh_virtqueue *vq) } } +static inline void +zxdh_queue_enable_intr(struct zxdh_virtqueue *vq) +{ + if (vq->vq_packed.event_flags_shadow == ZXDH_RING_EVENT_FLAGS_DISABLE) { + vq->vq_packed.event_flags_shadow = ZXDH_RING_EVENT_FLAGS_DISABLE; + vq->vq_packed.ring.driver->desc_event_flags = vq->vq_packed.event_flags_shadow; + } +} + +static inline void +zxdh_mb(uint8_t weak_barriers) +{ + if (weak_barriers) + rte_atomic_thread_fence(rte_memory_order_seq_cst); + else + rte_mb(); +} + struct rte_mbuf *zxdh_queue_detach_unused(struct zxdh_virtqueue *vq); int32_t zxdh_free_queues(struct rte_eth_dev *dev); int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx); +int32_t zxdh_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + uint32_t socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf); +int32_t zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + uint32_t socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +int32_t zxdh_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); +int32_t zxdh_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); #endif /* ZXDH_QUEUE_H */ From patchwork Fri Dec 6 05:57:06 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149051 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4719E45E16; Fri, 6 Dec 2024 07:05:03 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id F14D540E0B; Fri, 6 Dec 2024 07:04:49 +0100 (CET) Received: from mxct.zte.com.cn (mxct.zte.com.cn [183.62.165.209]) by mails.dpdk.org (Postfix) with ESMTP id C30E840DDB for ; Fri, 6 Dec 2024 07:04:47 +0100 (CET) Received: from mse-fl1.zte.com.cn (unknown [10.5.228.132]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxct.zte.com.cn (FangMail) with ESMTPS id 4Y4LLy2r36z50FXx; Fri, 6 Dec 2024 14:04:42 +0800 (CST) Received: from szxlzmapp05.zte.com.cn ([10.5.230.85]) by mse-fl1.zte.com.cn with SMTP id 4B664P5V083515; Fri, 6 Dec 2024 14:04:25 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:27 +0800 X-Zmail-TransId: 3e81675293eb001-7139d From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 06/15] net/zxdh: dev start/stop ops implementations Date: Fri, 6 Dec 2024 13:57:06 +0800 Message-ID: <20241206055715.506961-7-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl1.zte.com.cn 4B664P5V083515 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 675293FA.002/4Y4LLy2r36z50FXx X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org dev start/stop implementations, start/stop the rx/tx queues. Signed-off-by: Junlong Wang --- doc/guides/nics/features/zxdh.ini | 2 + doc/guides/nics/zxdh.rst | 2 + drivers/net/zxdh/zxdh_ethdev.c | 61 ++++++++++++++++++++ drivers/net/zxdh/zxdh_pci.c | 24 ++++++++ drivers/net/zxdh/zxdh_pci.h | 1 + drivers/net/zxdh/zxdh_queue.c | 93 +++++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_queue.h | 68 ++++++++++++++++++++++ 7 files changed, 251 insertions(+) -- 2.27.0 diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini index 05c8091ed7..874541c589 100644 --- a/doc/guides/nics/features/zxdh.ini +++ b/doc/guides/nics/features/zxdh.ini @@ -7,3 +7,5 @@ Linux = Y x86-64 = Y ARMv8 = Y +SR-IOV = Y +Multiprocess = Y diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst index 2144753d75..eb970a888f 100644 --- a/doc/guides/nics/zxdh.rst +++ b/doc/guides/nics/zxdh.rst @@ -18,6 +18,8 @@ Features Features of the ZXDH PMD are: - Multi arch support: x86_64, ARMv8. +- Multiple queues for TX and RX +- SR-IOV VF Driver compilation and testing diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index f123e05ccf..a9c0d083fe 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -900,12 +900,35 @@ zxdh_np_uninit(struct rte_eth_dev *dev) zxdh_np_dtb_data_res_free(hw); } +static int +zxdh_dev_stop(struct rte_eth_dev *dev) +{ + int ret = 0; + + if (dev->data->dev_started == 0) + return 0; + + ret = zxdh_intr_disable(dev); + if (ret) { + PMD_DRV_LOG(ERR, "intr disable failed"); + return -1; + } + + return 0; +} + static int zxdh_dev_close(struct rte_eth_dev *dev) { struct zxdh_hw *hw = dev->data->dev_private; int ret = 0; + ret = zxdh_dev_stop(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "%s stop port %s failed.", __func__, dev->device->name); + return -1; + } + ret = zxdh_tables_uninit(dev); if (ret != 0) { PMD_DRV_LOG(ERR, "%s :unint port %s failed ", __func__, dev->device->name); @@ -929,9 +952,47 @@ zxdh_dev_close(struct rte_eth_dev *dev) return ret; } +static int +zxdh_dev_start(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_virtqueue *vq; + int32_t ret; + uint16_t logic_qidx; + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + logic_qidx = 2 * i + ZXDH_RQ_QUEUE_IDX; + ret = zxdh_dev_rx_queue_setup_finish(dev, logic_qidx); + if (ret < 0) + return ret; + } + ret = zxdh_intr_enable(dev); + if (ret) { + PMD_DRV_LOG(ERR, "interrupt enable failed"); + return -EIO; + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + logic_qidx = 2 * i + ZXDH_RQ_QUEUE_IDX; + vq = hw->vqs[logic_qidx]; + /* Flush the old packets */ + zxdh_queue_rxvq_flush(vq); + zxdh_queue_notify(vq); + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + logic_qidx = 2 * i + ZXDH_TQ_QUEUE_IDX; + vq = hw->vqs[logic_qidx]; + zxdh_queue_notify(vq); + } + return 0; +} + /* dev_ops for zxdh, bare necessities for basic operation */ static const struct eth_dev_ops zxdh_eth_dev_ops = { .dev_configure = zxdh_dev_configure, + .dev_start = zxdh_dev_start, + .dev_stop = zxdh_dev_stop, .dev_close = zxdh_dev_close, .dev_infos_get = zxdh_dev_infos_get, .rx_queue_setup = zxdh_dev_rx_queue_setup, diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c index 250e67d560..83164a5c79 100644 --- a/drivers/net/zxdh/zxdh_pci.c +++ b/drivers/net/zxdh/zxdh_pci.c @@ -202,6 +202,29 @@ zxdh_del_queue(struct zxdh_hw *hw, struct zxdh_virtqueue *vq) rte_write16(0, &hw->common_cfg->queue_enable); } +static void +zxdh_notify_queue(struct zxdh_hw *hw, struct zxdh_virtqueue *vq) +{ + uint32_t notify_data = 0; + + if (!zxdh_pci_with_feature(hw, ZXDH_F_NOTIFICATION_DATA)) { + rte_write16(vq->vq_queue_index, vq->notify_addr); + return; + } + + if (zxdh_pci_with_feature(hw, ZXDH_F_RING_PACKED)) { + notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags & + ZXDH_VRING_PACKED_DESC_F_AVAIL)) << 31) | + ((uint32_t)vq->vq_avail_idx << 16) | + vq->vq_queue_index; + } else { + notify_data = ((uint32_t)vq->vq_avail_idx << 16) | vq->vq_queue_index; + } + PMD_DRV_LOG(DEBUG, "queue:%d notify_data 0x%x notify_addr 0x%p", + vq->vq_queue_index, notify_data, vq->notify_addr); + rte_write32(notify_data, vq->notify_addr); +} + const struct zxdh_pci_ops zxdh_dev_pci_ops = { .read_dev_cfg = zxdh_read_dev_config, .write_dev_cfg = zxdh_write_dev_config, @@ -216,6 +239,7 @@ const struct zxdh_pci_ops zxdh_dev_pci_ops = { .set_queue_num = zxdh_set_queue_num, .setup_queue = zxdh_setup_queue, .del_queue = zxdh_del_queue, + .notify_queue = zxdh_notify_queue, }; uint8_t diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h index e3f13cb17d..5c5f72b90e 100644 --- a/drivers/net/zxdh/zxdh_pci.h +++ b/drivers/net/zxdh/zxdh_pci.h @@ -144,6 +144,7 @@ struct zxdh_pci_ops { int32_t (*setup_queue)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq); void (*del_queue)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq); + void (*notify_queue)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq); }; struct zxdh_hw_internal { diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c index af21f046ad..d45fd78dad 100644 --- a/drivers/net/zxdh/zxdh_queue.c +++ b/drivers/net/zxdh/zxdh_queue.c @@ -274,3 +274,96 @@ zxdh_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) zxdh_queue_disable_intr(vq); return 0; } + +int32_t +zxdh_enqueue_recv_refill_packed(struct zxdh_virtqueue *vq, struct rte_mbuf **cookie, uint16_t num) +{ + struct zxdh_vring_packed_desc *start_dp = vq->vq_packed.ring.desc; + struct zxdh_hw *hw = vq->hw; + struct zxdh_vq_desc_extra *dxp; + uint16_t flags = vq->vq_packed.cached_flags; + int32_t i; + uint16_t idx; + + for (i = 0; i < num; i++) { + idx = vq->vq_avail_idx; + dxp = &vq->vq_descx[idx]; + dxp->cookie = (void *)cookie[i]; + dxp->ndescs = 1; + /* rx pkt fill in data_off */ + start_dp[idx].addr = rte_mbuf_iova_get(cookie[i]) + RTE_PKTMBUF_HEADROOM; + start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM; + vq->vq_desc_head_idx = dxp->next; + if (vq->vq_desc_head_idx == ZXDH_VQ_RING_DESC_CHAIN_END) + vq->vq_desc_tail_idx = vq->vq_desc_head_idx; + zxdh_queue_store_flags_packed(&start_dp[idx], flags, hw->weak_barriers); + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED; + flags = vq->vq_packed.cached_flags; + } + } + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); + return 0; +} + +int32_t +zxdh_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t logic_qidx) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_virtqueue *vq = hw->vqs[logic_qidx]; + struct zxdh_virtnet_rx *rxvq = &vq->rxq; + uint16_t desc_idx; + int32_t error = 0; + + /* Allocate blank mbufs for the each rx descriptor */ + memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf)); + for (desc_idx = 0; desc_idx < ZXDH_MBUF_BURST_SZ; desc_idx++) + vq->sw_ring[vq->vq_nentries + desc_idx] = &rxvq->fake_mbuf; + + while (!zxdh_queue_full(vq)) { + uint16_t free_cnt = vq->vq_free_cnt; + + free_cnt = RTE_MIN(ZXDH_MBUF_BURST_SZ, free_cnt); + struct rte_mbuf *new_pkts[free_cnt]; + + if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt) == 0)) { + error = zxdh_enqueue_recv_refill_packed(vq, new_pkts, free_cnt); + if (unlikely(error)) { + int32_t i; + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + } else { + PMD_DRV_LOG(ERR, "port %d rxq %d allocated bufs from %s failed", + hw->port_id, logic_qidx, rxvq->mpool->name); + break; + } + } + return 0; +} + +void +zxdh_queue_rxvq_flush(struct zxdh_virtqueue *vq) +{ + struct zxdh_vq_desc_extra *dxp = NULL; + uint16_t i = 0; + struct zxdh_vring_packed_desc *descs = vq->vq_packed.ring.desc; + int32_t cnt = 0; + + i = vq->vq_used_cons_idx; + while (zxdh_desc_used(&descs[i], vq) && cnt++ < vq->vq_nentries) { + dxp = &vq->vq_descx[descs[i].id]; + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + vq->vq_free_cnt++; + vq->vq_used_cons_idx++; + if (vq->vq_used_cons_idx >= vq->vq_nentries) { + vq->vq_used_cons_idx -= vq->vq_nentries; + vq->vq_packed.used_wrap_counter ^= 1; + } + i = vq->vq_used_cons_idx; + } +} diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h index 2f602d894f..343ab60c1a 100644 --- a/drivers/net/zxdh/zxdh_queue.h +++ b/drivers/net/zxdh/zxdh_queue.h @@ -25,6 +25,11 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 }; #define ZXDH_VRING_DESC_F_WRITE 2 /* This flag means the descriptor was made available by the driver */ #define ZXDH_VRING_PACKED_DESC_F_AVAIL (1 << (7)) +#define ZXDH_VRING_PACKED_DESC_F_USED (1 << (15)) + +/* Frequently used combinations */ +#define ZXDH_VRING_PACKED_DESC_F_AVAIL_USED \ + (ZXDH_VRING_PACKED_DESC_F_AVAIL | ZXDH_VRING_PACKED_DESC_F_USED) #define ZXDH_RING_EVENT_FLAGS_ENABLE 0x0 #define ZXDH_RING_EVENT_FLAGS_DISABLE 0x1 @@ -32,6 +37,8 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 }; #define ZXDH_VQ_RING_DESC_CHAIN_END 32768 #define ZXDH_QUEUE_DEPTH 1024 +#define ZXDH_RQ_QUEUE_IDX 0 +#define ZXDH_TQ_QUEUE_IDX 1 /* * ring descriptors: 16 bytes. @@ -290,6 +297,63 @@ zxdh_mb(uint8_t weak_barriers) rte_mb(); } +static inline int32_t +zxdh_queue_full(const struct zxdh_virtqueue *vq) +{ + return (vq->vq_free_cnt == 0); +} + +static inline void +zxdh_queue_store_flags_packed(struct zxdh_vring_packed_desc *dp, + uint16_t flags, uint8_t weak_barriers) +{ + if (weak_barriers) { + #ifdef RTE_ARCH_X86_64 + rte_io_wmb(); + dp->flags = flags; + #else + rte_atomic_store_explicit(&dp->flags, flags, rte_memory_order_release); + #endif + } else { + rte_io_wmb(); + dp->flags = flags; + } +} + +static inline uint16_t +zxdh_queue_fetch_flags_packed(struct zxdh_vring_packed_desc *dp, + uint8_t weak_barriers) +{ + uint16_t flags; + if (weak_barriers) { + #ifdef RTE_ARCH_X86_64 + flags = dp->flags; + rte_io_rmb(); + #else + flags = rte_atomic_load_explicit(&dp->flags, rte_memory_order_acquire); + #endif + } else { + flags = dp->flags; + rte_io_rmb(); + } + + return flags; +} + +static inline int32_t +zxdh_desc_used(struct zxdh_vring_packed_desc *desc, struct zxdh_virtqueue *vq) +{ + uint16_t flags = zxdh_queue_fetch_flags_packed(desc, vq->hw->weak_barriers); + uint16_t used = !!(flags & ZXDH_VRING_PACKED_DESC_F_USED); + uint16_t avail = !!(flags & ZXDH_VRING_PACKED_DESC_F_AVAIL); + return avail == used && used == vq->vq_packed.used_wrap_counter; +} + +static inline void zxdh_queue_notify(struct zxdh_virtqueue *vq) +{ + ZXDH_VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq); +} + struct rte_mbuf *zxdh_queue_detach_unused(struct zxdh_virtqueue *vq); int32_t zxdh_free_queues(struct rte_eth_dev *dev); int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx); @@ -306,5 +370,9 @@ int32_t zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev, struct rte_mempool *mp); int32_t zxdh_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); int32_t zxdh_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); +int32_t zxdh_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t logic_qidx); +void zxdh_queue_rxvq_flush(struct zxdh_virtqueue *vq); +int32_t zxdh_enqueue_recv_refill_packed(struct zxdh_virtqueue *vq, + struct rte_mbuf **cookie, uint16_t num); #endif /* ZXDH_QUEUE_H */ From patchwork Fri Dec 6 05:57:07 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149058 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3CA8745E16; Fri, 6 Dec 2024 07:06:20 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0C21640E18; Fri, 6 Dec 2024 07:05:04 +0100 (CET) Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.40]) by mails.dpdk.org (Postfix) with ESMTP id A8B3C40E20 for ; Fri, 6 Dec 2024 07:04:56 +0100 (CET) Received: from mse-fl2.zte.com.cn (unknown [10.5.228.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxhk.zte.com.cn (FangMail) with ESMTPS id 4Y4LMC4pglz8RV6G; Fri, 6 Dec 2024 14:04:55 +0800 (CST) Received: from szxlzmapp01.zte.com.cn ([10.5.231.85]) by mse-fl2.zte.com.cn with SMTP id 4B664QAj033226; Fri, 6 Dec 2024 14:04:26 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:28 +0800 X-Zmail-TransId: 3e81675293ec001-713a0 From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 07/15] net/zxdh: provided dev simple tx implementations Date: Fri, 6 Dec 2024 13:57:07 +0800 Message-ID: <20241206055715.506961-8-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl2.zte.com.cn 4B664QAj033226 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 67529407.001/4Y4LMC4pglz8RV6G X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org provided dev simple tx implementations. Signed-off-by: Junlong Wang --- drivers/net/zxdh/meson.build | 1 + drivers/net/zxdh/zxdh_ethdev.c | 20 ++ drivers/net/zxdh/zxdh_queue.h | 25 +++ drivers/net/zxdh/zxdh_rxtx.c | 395 +++++++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_rxtx.h | 4 + 5 files changed, 445 insertions(+) create mode 100644 drivers/net/zxdh/zxdh_rxtx.c -- 2.27.0 diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build index 5b3af87c5b..20b2cf484a 100644 --- a/drivers/net/zxdh/meson.build +++ b/drivers/net/zxdh/meson.build @@ -21,4 +21,5 @@ sources = files( 'zxdh_queue.c', 'zxdh_np.c', 'zxdh_tables.c', + 'zxdh_rxtx.c', ) diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index a9c0d083fe..c32de633db 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -15,6 +15,7 @@ #include "zxdh_queue.h" #include "zxdh_np.h" #include "zxdh_tables.h" +#include "zxdh_rxtx.h" struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS]; struct zxdh_shared_data *zxdh_shared_data; @@ -952,6 +953,24 @@ zxdh_dev_close(struct rte_eth_dev *dev) return ret; } +static int32_t +zxdh_set_rxtx_funcs(struct rte_eth_dev *eth_dev) +{ + struct zxdh_hw *hw = eth_dev->data->dev_private; + + if (!zxdh_pci_packed_queue(hw)) { + PMD_DRV_LOG(ERR, " port %u not support packed queue", eth_dev->data->port_id); + return -1; + } + if (!zxdh_pci_with_feature(hw, ZXDH_NET_F_MRG_RXBUF)) { + PMD_DRV_LOG(ERR, " port %u not support rx mergeable", eth_dev->data->port_id); + return -1; + } + eth_dev->tx_pkt_prepare = zxdh_xmit_pkts_prepare; + eth_dev->tx_pkt_burst = &zxdh_xmit_pkts_packed; + return 0; +} + static int zxdh_dev_start(struct rte_eth_dev *dev) { @@ -967,6 +986,7 @@ zxdh_dev_start(struct rte_eth_dev *dev) if (ret < 0) return ret; } + zxdh_set_rxtx_funcs(dev); ret = zxdh_intr_enable(dev); if (ret) { PMD_DRV_LOG(ERR, "interrupt enable failed"); diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h index 343ab60c1a..1bd292e235 100644 --- a/drivers/net/zxdh/zxdh_queue.h +++ b/drivers/net/zxdh/zxdh_queue.h @@ -21,6 +21,15 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 }; #define ZXDH_TQ_QUEUE_IDX 1 #define ZXDH_MAX_TX_INDIRECT 8 +/* This marks a buffer as continuing via the next field. */ +#define ZXDH_VRING_DESC_F_NEXT 1 + +/* This marks a buffer as write-only (otherwise read-only). */ +#define ZXDH_VRING_DESC_F_WRITE 2 + +/* This means the buffer contains a list of buffer descriptors. */ +#define ZXDH_VRING_DESC_F_INDIRECT 4 + /* This marks a buffer as write-only (otherwise read-only). */ #define ZXDH_VRING_DESC_F_WRITE 2 /* This flag means the descriptor was made available by the driver */ @@ -34,11 +43,16 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 }; #define ZXDH_RING_EVENT_FLAGS_ENABLE 0x0 #define ZXDH_RING_EVENT_FLAGS_DISABLE 0x1 #define ZXDH_RING_EVENT_FLAGS_DESC 0x2 +#define ZXDH_RING_F_INDIRECT_DESC 28 #define ZXDH_VQ_RING_DESC_CHAIN_END 32768 #define ZXDH_QUEUE_DEPTH 1024 #define ZXDH_RQ_QUEUE_IDX 0 #define ZXDH_TQ_QUEUE_IDX 1 +#define ZXDH_TYPE_HDR_SIZE sizeof(struct zxdh_type_hdr) +#define ZXDH_PI_HDR_SIZE sizeof(struct zxdh_pi_hdr) +#define ZXDH_DL_NET_HDR_SIZE sizeof(struct zxdh_net_hdr_dl) +#define ZXDH_UL_NET_HDR_SIZE sizeof(struct zxdh_net_hdr_ul) /* * ring descriptors: 16 bytes. @@ -354,6 +368,17 @@ static inline void zxdh_queue_notify(struct zxdh_virtqueue *vq) ZXDH_VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq); } +static inline int32_t +zxdh_queue_kick_prepare_packed(struct zxdh_virtqueue *vq) +{ + uint16_t flags = 0; + + zxdh_mb(vq->hw->weak_barriers); + flags = vq->vq_packed.ring.device->desc_event_flags; + + return (flags != ZXDH_RING_EVENT_FLAGS_DISABLE); +} + struct rte_mbuf *zxdh_queue_detach_unused(struct zxdh_virtqueue *vq); int32_t zxdh_free_queues(struct rte_eth_dev *dev); int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx); diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c new file mode 100644 index 0000000000..01e9b19798 --- /dev/null +++ b/drivers/net/zxdh/zxdh_rxtx.c @@ -0,0 +1,395 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#include +#include + +#include + +#include "zxdh_logs.h" +#include "zxdh_pci.h" +#include "zxdh_queue.h" + +#define ZXDH_PKT_FORM_CPU 0x20 /* 1-cpu 0-np */ +#define ZXDH_NO_IP_FRAGMENT 0x2000 /* ip fragment flag */ +#define ZXDH_NO_IPID_UPDATE 0x4000 /* ipid update flag */ + +#define ZXDH_PI_L3TYPE_IP 0x00 +#define ZXDH_PI_L3TYPE_IPV6 0x40 +#define ZXDH_PI_L3TYPE_NOIP 0x80 +#define ZXDH_PI_L3TYPE_RSV 0xC0 +#define ZXDH_PI_L3TYPE_MASK 0xC0 + +#define ZXDH_PCODE_MASK 0x1F +#define ZXDH_PCODE_IP_PKT_TYPE 0x01 +#define ZXDH_PCODE_TCP_PKT_TYPE 0x02 +#define ZXDH_PCODE_UDP_PKT_TYPE 0x03 +#define ZXDH_PCODE_NO_IP_PKT_TYPE 0x09 +#define ZXDH_PCODE_NO_REASSMBLE_TCP_PKT_TYPE 0x0C + +#define ZXDH_TX_MAX_SEGS 31 +#define ZXDH_RX_MAX_SEGS 31 + +static void +zxdh_xmit_cleanup_inorder_packed(struct zxdh_virtqueue *vq, int32_t num) +{ + uint16_t used_idx = 0; + uint16_t id = 0; + uint16_t curr_id = 0; + uint16_t free_cnt = 0; + uint16_t size = vq->vq_nentries; + struct zxdh_vring_packed_desc *desc = vq->vq_packed.ring.desc; + struct zxdh_vq_desc_extra *dxp = NULL; + + used_idx = vq->vq_used_cons_idx; + /* desc_is_used has a load-acquire or rte_io_rmb inside + * and wait for used desc in virtqueue. + */ + while (num > 0 && zxdh_desc_used(&desc[used_idx], vq)) { + id = desc[used_idx].id; + do { + curr_id = used_idx; + dxp = &vq->vq_descx[used_idx]; + used_idx += dxp->ndescs; + free_cnt += dxp->ndescs; + num -= dxp->ndescs; + if (used_idx >= size) { + used_idx -= size; + vq->vq_packed.used_wrap_counter ^= 1; + } + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + } while (curr_id != id); + } + vq->vq_used_cons_idx = used_idx; + vq->vq_free_cnt += free_cnt; +} + +static void +zxdh_ring_free_id_packed(struct zxdh_virtqueue *vq, uint16_t id) +{ + struct zxdh_vq_desc_extra *dxp = NULL; + + dxp = &vq->vq_descx[id]; + vq->vq_free_cnt += dxp->ndescs; + + if (vq->vq_desc_tail_idx == ZXDH_VQ_RING_DESC_CHAIN_END) + vq->vq_desc_head_idx = id; + else + vq->vq_descx[vq->vq_desc_tail_idx].next = id; + + vq->vq_desc_tail_idx = id; + dxp->next = ZXDH_VQ_RING_DESC_CHAIN_END; +} + +static void +zxdh_xmit_cleanup_normal_packed(struct zxdh_virtqueue *vq, int32_t num) +{ + uint16_t used_idx = 0; + uint16_t id = 0; + uint16_t size = vq->vq_nentries; + struct zxdh_vring_packed_desc *desc = vq->vq_packed.ring.desc; + struct zxdh_vq_desc_extra *dxp = NULL; + + used_idx = vq->vq_used_cons_idx; + /* desc_is_used has a load-acquire or rte_io_rmb inside + * and wait for used desc in virtqueue. + */ + while (num-- && zxdh_desc_used(&desc[used_idx], vq)) { + id = desc[used_idx].id; + dxp = &vq->vq_descx[id]; + vq->vq_used_cons_idx += dxp->ndescs; + if (vq->vq_used_cons_idx >= size) { + vq->vq_used_cons_idx -= size; + vq->vq_packed.used_wrap_counter ^= 1; + } + zxdh_ring_free_id_packed(vq, id); + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + used_idx = vq->vq_used_cons_idx; + } +} + +static void +zxdh_xmit_cleanup_packed(struct zxdh_virtqueue *vq, int32_t num, int32_t in_order) +{ + if (in_order) + zxdh_xmit_cleanup_inorder_packed(vq, num); + else + zxdh_xmit_cleanup_normal_packed(vq, num); +} + +static uint8_t +zxdh_xmit_get_ptype(struct rte_mbuf *m) +{ + uint8_t pcode = ZXDH_PCODE_NO_IP_PKT_TYPE; + uint8_t l3_ptype = ZXDH_PI_L3TYPE_NOIP; + + if ((m->packet_type & RTE_PTYPE_INNER_L3_MASK) == RTE_PTYPE_INNER_L3_IPV4 || + ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) && + (m->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4)) { + l3_ptype = ZXDH_PI_L3TYPE_IP; + pcode = ZXDH_PCODE_IP_PKT_TYPE; + } else if ((m->packet_type & RTE_PTYPE_INNER_L3_MASK) == RTE_PTYPE_INNER_L3_IPV6 || + ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) && + (m->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV6)) { + l3_ptype = ZXDH_PI_L3TYPE_IPV6; + pcode = ZXDH_PCODE_IP_PKT_TYPE; + } else { + goto end; + } + if ((m->packet_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_TCP || + ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) && + (m->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)) + pcode = ZXDH_PCODE_TCP_PKT_TYPE; + else if ((m->packet_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_UDP || + ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) && + (m->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)) + pcode = ZXDH_PCODE_UDP_PKT_TYPE; + +end: + return l3_ptype | ZXDH_PKT_FORM_CPU | pcode; +} + +static void zxdh_xmit_fill_net_hdr(struct rte_mbuf *cookie, + struct zxdh_net_hdr_dl *hdr) +{ + uint16_t pkt_flag_lw16 = ZXDH_NO_IPID_UPDATE; + uint16_t l3_offset; + uint32_t ol_flag = 0; + + hdr->pi_hdr.pkt_flag_lw16 = rte_be_to_cpu_16(pkt_flag_lw16); + + hdr->pi_hdr.pkt_type = zxdh_xmit_get_ptype(cookie); + l3_offset = ZXDH_DL_NET_HDR_SIZE + cookie->outer_l2_len + + cookie->outer_l3_len + cookie->l2_len; + hdr->pi_hdr.l3_offset = rte_be_to_cpu_16(l3_offset); + hdr->pi_hdr.l4_offset = rte_be_to_cpu_16(l3_offset + cookie->l3_len); + + hdr->pd_hdr.ol_flag = rte_be_to_cpu_32(ol_flag); +} + +static inline void zxdh_enqueue_xmit_packed_fast(struct zxdh_virtnet_tx *txvq, + struct rte_mbuf *cookie, int32_t in_order) +{ + struct zxdh_virtqueue *vq = txvq->vq; + uint16_t id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx; + struct zxdh_vq_desc_extra *dxp = &vq->vq_descx[id]; + uint16_t flags = vq->vq_packed.cached_flags; + struct zxdh_net_hdr_dl *hdr = NULL; + + dxp->ndescs = 1; + dxp->cookie = cookie; + hdr = rte_pktmbuf_mtod_offset(cookie, struct zxdh_net_hdr_dl *, -ZXDH_DL_NET_HDR_SIZE); + zxdh_xmit_fill_net_hdr(cookie, hdr); + + uint16_t idx = vq->vq_avail_idx; + struct zxdh_vring_packed_desc *dp = &vq->vq_packed.ring.desc[idx]; + + dp->addr = rte_pktmbuf_iova(cookie) - ZXDH_DL_NET_HDR_SIZE; + dp->len = cookie->data_len + ZXDH_DL_NET_HDR_SIZE; + dp->id = id; + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED; + } + vq->vq_free_cnt--; + if (!in_order) { + vq->vq_desc_head_idx = dxp->next; + if (vq->vq_desc_head_idx == ZXDH_VQ_RING_DESC_CHAIN_END) + vq->vq_desc_tail_idx = ZXDH_VQ_RING_DESC_CHAIN_END; + } + zxdh_queue_store_flags_packed(dp, flags, vq->hw->weak_barriers); +} + +static inline void zxdh_enqueue_xmit_packed(struct zxdh_virtnet_tx *txvq, + struct rte_mbuf *cookie, + uint16_t needed, + int32_t use_indirect, + int32_t in_order) +{ + struct zxdh_tx_region *txr = txvq->zxdh_net_hdr_mz->addr; + struct zxdh_virtqueue *vq = txvq->vq; + struct zxdh_vring_packed_desc *start_dp = vq->vq_packed.ring.desc; + void *hdr = NULL; + uint16_t head_idx = vq->vq_avail_idx; + uint16_t idx = head_idx; + uint16_t prev = head_idx; + uint16_t head_flags = cookie->next ? ZXDH_VRING_DESC_F_NEXT : 0; + uint16_t seg_num = cookie->nb_segs; + uint16_t id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx; + struct zxdh_vring_packed_desc *head_dp = &vq->vq_packed.ring.desc[idx]; + struct zxdh_vq_desc_extra *dxp = &vq->vq_descx[id]; + + dxp->ndescs = needed; + dxp->cookie = cookie; + head_flags |= vq->vq_packed.cached_flags; + /* if offload disabled, it is not zeroed below, do it now */ + + if (use_indirect) { + /** + * setup tx ring slot to point to indirect + * descriptor list stored in reserved region. + * the first slot in indirect ring is already + * preset to point to the header in reserved region + **/ + start_dp[idx].addr = + txvq->zxdh_net_hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr); + start_dp[idx].len = (seg_num + 1) * sizeof(struct zxdh_vring_packed_desc); + /* Packed descriptor id needs to be restored when inorder. */ + if (in_order) + start_dp[idx].id = idx; + + /* reset flags for indirect desc */ + head_flags = ZXDH_VRING_DESC_F_INDIRECT; + head_flags |= vq->vq_packed.cached_flags; + hdr = (void *)&txr[idx].tx_hdr; + /* loop below will fill in rest of the indirect elements */ + start_dp = txr[idx].tx_packed_indir; + start_dp->len = ZXDH_DL_NET_HDR_SIZE; /* update actual net or type hdr size */ + idx = 1; + } else { + /* setup first tx ring slot to point to header stored in reserved region. */ + start_dp[idx].addr = txvq->zxdh_net_hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); + start_dp[idx].len = ZXDH_DL_NET_HDR_SIZE; + head_flags |= ZXDH_VRING_DESC_F_NEXT; + hdr = (void *)&txr[idx].tx_hdr; + idx++; + if (idx >= vq->vq_nentries) { + idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED; + } + } + zxdh_xmit_fill_net_hdr(cookie, (struct zxdh_net_hdr_dl *)hdr); + + do { + start_dp[idx].addr = rte_pktmbuf_iova(cookie); + start_dp[idx].len = cookie->data_len; + if (likely(idx != head_idx)) { + uint16_t flags = cookie->next ? ZXDH_VRING_DESC_F_NEXT : 0; + flags |= vq->vq_packed.cached_flags; + start_dp[idx].flags = flags; + } + prev = idx; + idx++; + if (idx >= vq->vq_nentries) { + idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED; + } + } while ((cookie = cookie->next) != NULL); + start_dp[prev].id = id; + if (use_indirect) { + idx = head_idx; + if (++idx >= vq->vq_nentries) { + idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED; + } + } + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); + vq->vq_avail_idx = idx; + if (!in_order) { + vq->vq_desc_head_idx = dxp->next; + if (vq->vq_desc_head_idx == ZXDH_VQ_RING_DESC_CHAIN_END) + vq->vq_desc_tail_idx = ZXDH_VQ_RING_DESC_CHAIN_END; + } + zxdh_queue_store_flags_packed(head_dp, head_flags, vq->hw->weak_barriers); +} + +uint16_t +zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct zxdh_virtnet_tx *txvq = tx_queue; + struct zxdh_virtqueue *vq = txvq->vq; + struct zxdh_hw *hw = vq->hw; + uint16_t nb_tx = 0; + + bool in_order = zxdh_pci_with_feature(hw, ZXDH_F_IN_ORDER); + + if (nb_pkts > vq->vq_free_cnt) + zxdh_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt, in_order); + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + struct rte_mbuf *txm = tx_pkts[nb_tx]; + int32_t can_push = 0; + int32_t use_indirect = 0; + int32_t slots = 0; + int32_t need = 0; + + /* optimize ring usage */ + if ((zxdh_pci_with_feature(hw, ZXDH_F_ANY_LAYOUT) || + zxdh_pci_with_feature(hw, ZXDH_F_VERSION_1)) && + rte_mbuf_refcnt_read(txm) == 1 && + RTE_MBUF_DIRECT(txm) && + txm->nb_segs == 1 && + rte_pktmbuf_headroom(txm) >= ZXDH_DL_NET_HDR_SIZE && + rte_is_aligned(rte_pktmbuf_mtod(txm, char *), + alignof(struct zxdh_net_hdr_dl))) { + can_push = 1; + } else if (zxdh_pci_with_feature(hw, ZXDH_RING_F_INDIRECT_DESC) && + txm->nb_segs < ZXDH_MAX_TX_INDIRECT) { + use_indirect = 1; + } + /** + * How many main ring entries are needed to this Tx? + * indirect => 1 + * any_layout => number of segments + * default => number of segments + 1 + **/ + slots = use_indirect ? 1 : (txm->nb_segs + !can_push); + need = slots - vq->vq_free_cnt; + /* Positive value indicates it need free vring descriptors */ + if (unlikely(need > 0)) { + zxdh_xmit_cleanup_packed(vq, need, in_order); + need = slots - vq->vq_free_cnt; + if (unlikely(need > 0)) { + PMD_TX_LOG(ERR, "port[ep:%d, pf:%d, vf:%d, vfid:%d, pcieid:%d], que:%d[pch:%d]. No free tx desc to xmit", + hw->vport.epid, hw->vport.pfid, hw->vport.vfid, + hw->vfid, hw->pcie_id, txvq->queue_id, + hw->channel_context[txvq->queue_id].ph_chno); + break; + } + } + /* Enqueue Packet buffers */ + if (can_push) + zxdh_enqueue_xmit_packed_fast(txvq, txm, in_order); + else + zxdh_enqueue_xmit_packed(txvq, txm, slots, use_indirect, in_order); + } + if (likely(nb_tx)) { + if (unlikely(zxdh_queue_kick_prepare_packed(vq))) { + zxdh_queue_notify(vq); + PMD_TX_LOG(DEBUG, "Notified backend after xmit"); + } + } + return nb_tx; +} + +uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx; + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + struct rte_mbuf *m = tx_pkts[nb_tx]; + int32_t error; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + error = rte_validate_tx_offload(m); + if (unlikely(error)) { + rte_errno = -error; + break; + } +#endif + + error = rte_net_intel_cksum_prepare(m); + if (unlikely(error)) { + rte_errno = -error; + break; + } + } + return nb_tx; +} diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h index de9353b223..e07e01e821 100644 --- a/drivers/net/zxdh/zxdh_rxtx.h +++ b/drivers/net/zxdh/zxdh_rxtx.h @@ -44,4 +44,8 @@ struct zxdh_virtnet_tx { const struct rte_memzone *mz; /* mem zone to populate TX ring. */ } __rte_packed; +uint16_t zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + #endif /* ZXDH_RXTX_H */ From patchwork Fri Dec 6 05:57:08 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149060 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id C629945E16; Fri, 6 Dec 2024 07:06:46 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 83F00427A7; Fri, 6 Dec 2024 07:05:09 +0100 (CET) Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.40]) by mails.dpdk.org (Postfix) with ESMTP id D270640ECF for ; Fri, 6 Dec 2024 07:05:01 +0100 (CET) Received: from mse-fl2.zte.com.cn (unknown [10.5.228.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxhk.zte.com.cn (FangMail) with ESMTPS id 4Y4LMD095Qz8RV6H; Fri, 6 Dec 2024 14:04:56 +0800 (CST) Received: from szxlzmapp03.zte.com.cn ([10.5.231.207]) by mse-fl2.zte.com.cn with SMTP id 4B664R0h033228; Fri, 6 Dec 2024 14:04:27 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:29 +0800 X-Zmail-TransId: 3e81675293ed001-713a7 From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 08/15] net/zxdh: provided dev simple rx implementations Date: Fri, 6 Dec 2024 13:57:08 +0800 Message-ID: <20241206055715.506961-9-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl2.zte.com.cn 4B664R0h033228 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 67529408.000/4Y4LMD095Qz8RV6H X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org provided dev simple rx implementations. Signed-off-by: Junlong Wang --- doc/guides/nics/features/zxdh.ini | 1 + doc/guides/nics/zxdh.rst | 1 + drivers/net/zxdh/zxdh_ethdev.c | 2 + drivers/net/zxdh/zxdh_rxtx.c | 311 ++++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_rxtx.h | 2 + 5 files changed, 317 insertions(+) -- 2.27.0 diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini index 874541c589..85c5c8fd32 100644 --- a/doc/guides/nics/features/zxdh.ini +++ b/doc/guides/nics/features/zxdh.ini @@ -9,3 +9,4 @@ x86-64 = Y ARMv8 = Y SR-IOV = Y Multiprocess = Y +Scattered Rx = Y \ No newline at end of file diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst index eb970a888f..f42db9c1f1 100644 --- a/doc/guides/nics/zxdh.rst +++ b/doc/guides/nics/zxdh.rst @@ -20,6 +20,7 @@ Features of the ZXDH PMD are: - Multi arch support: x86_64, ARMv8. - Multiple queues for TX and RX - SR-IOV VF +- Scattered and gather for TX and RX Driver compilation and testing diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index c32de633db..226b9d6b67 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -968,6 +968,8 @@ zxdh_set_rxtx_funcs(struct rte_eth_dev *eth_dev) } eth_dev->tx_pkt_prepare = zxdh_xmit_pkts_prepare; eth_dev->tx_pkt_burst = &zxdh_xmit_pkts_packed; + eth_dev->rx_pkt_burst = &zxdh_recv_pkts_packed; + return 0; } diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c index 01e9b19798..07ef708112 100644 --- a/drivers/net/zxdh/zxdh_rxtx.c +++ b/drivers/net/zxdh/zxdh_rxtx.c @@ -31,6 +31,93 @@ #define ZXDH_TX_MAX_SEGS 31 #define ZXDH_RX_MAX_SEGS 31 +uint32_t zxdh_outer_l2_type[16] = { + 0, + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_TIMESYNC, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L2_ETHER_LLDP, + RTE_PTYPE_L2_ETHER_NSH, + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L2_ETHER_QINQ, + RTE_PTYPE_L2_ETHER_PPPOE, + RTE_PTYPE_L2_ETHER_FCOE, + RTE_PTYPE_L2_ETHER_MPLS, +}; + +uint32_t zxdh_outer_l3_type[16] = { + 0, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, +}; + +uint32_t zxdh_outer_l4_type[16] = { + 0, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_NONFRAG, + RTE_PTYPE_L4_IGMP, +}; + +uint32_t zxdh_tunnel_type[16] = { + 0, + RTE_PTYPE_TUNNEL_IP, + RTE_PTYPE_TUNNEL_GRE, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_NVGRE, + RTE_PTYPE_TUNNEL_GENEVE, + RTE_PTYPE_TUNNEL_GRENAT, + RTE_PTYPE_TUNNEL_GTPC, + RTE_PTYPE_TUNNEL_GTPU, + RTE_PTYPE_TUNNEL_ESP, + RTE_PTYPE_TUNNEL_L2TP, + RTE_PTYPE_TUNNEL_VXLAN_GPE, + RTE_PTYPE_TUNNEL_MPLS_IN_GRE, + RTE_PTYPE_TUNNEL_MPLS_IN_UDP, +}; + +uint32_t zxdh_inner_l2_type[16] = { + 0, + RTE_PTYPE_INNER_L2_ETHER, + 0, + 0, + 0, + 0, + RTE_PTYPE_INNER_L2_ETHER_VLAN, + RTE_PTYPE_INNER_L2_ETHER_QINQ, + 0, + 0, + 0, +}; + +uint32_t zxdh_inner_l3_type[16] = { + 0, + RTE_PTYPE_INNER_L3_IPV4, + RTE_PTYPE_INNER_L3_IPV4_EXT, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, +}; + +uint32_t zxdh_inner_l4_type[16] = { + 0, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_INNER_L4_FRAG, + RTE_PTYPE_INNER_L4_SCTP, + RTE_PTYPE_INNER_L4_ICMP, + 0, + 0, +}; + static void zxdh_xmit_cleanup_inorder_packed(struct zxdh_virtqueue *vq, int32_t num) { @@ -393,3 +480,227 @@ uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **t } return nb_tx; } + +static uint16_t zxdh_dequeue_burst_rx_packed(struct zxdh_virtqueue *vq, + struct rte_mbuf **rx_pkts, + uint32_t *len, + uint16_t num) +{ + struct zxdh_vring_packed_desc *desc = vq->vq_packed.ring.desc; + struct rte_mbuf *cookie = NULL; + uint16_t i, used_idx; + uint16_t id; + + for (i = 0; i < num; i++) { + used_idx = vq->vq_used_cons_idx; + /** + * desc_is_used has a load-acquire or rte_io_rmb inside + * and wait for used desc in virtqueue. + */ + if (!zxdh_desc_used(&desc[used_idx], vq)) + return i; + len[i] = desc[used_idx].len; + id = desc[used_idx].id; + cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie; + vq->vq_descx[id].cookie = NULL; + if (unlikely(cookie == NULL)) { + PMD_RX_LOG(ERR, + "vring descriptor with no mbuf cookie at %u", vq->vq_used_cons_idx); + break; + } + rx_pkts[i] = cookie; + vq->vq_free_cnt++; + vq->vq_used_cons_idx++; + if (vq->vq_used_cons_idx >= vq->vq_nentries) { + vq->vq_used_cons_idx -= vq->vq_nentries; + vq->vq_packed.used_wrap_counter ^= 1; + } + } + return i; +} + +static int32_t zxdh_rx_update_mbuf(struct rte_mbuf *m, struct zxdh_net_hdr_ul *hdr) +{ + struct zxdh_pd_hdr_ul *pd_hdr = &hdr->pd_hdr; + struct zxdh_pi_hdr *pi_hdr = &hdr->pi_hdr; + uint32_t idx = 0; + + m->pkt_len = rte_be_to_cpu_16(pi_hdr->ul.pkt_len); + + uint16_t pkt_type_outer = rte_be_to_cpu_16(pd_hdr->pkt_type_out); + + idx = (pkt_type_outer >> 12) & 0xF; + m->packet_type = zxdh_outer_l2_type[idx]; + idx = (pkt_type_outer >> 8) & 0xF; + m->packet_type |= zxdh_outer_l3_type[idx]; + idx = (pkt_type_outer >> 4) & 0xF; + m->packet_type |= zxdh_outer_l4_type[idx]; + idx = pkt_type_outer & 0xF; + m->packet_type |= zxdh_tunnel_type[idx]; + + uint16_t pkt_type_inner = rte_be_to_cpu_16(pd_hdr->pkt_type_in); + + if (pkt_type_inner) { + idx = (pkt_type_inner >> 12) & 0xF; + m->packet_type |= zxdh_inner_l2_type[idx]; + idx = (pkt_type_inner >> 8) & 0xF; + m->packet_type |= zxdh_inner_l3_type[idx]; + idx = (pkt_type_inner >> 4) & 0xF; + m->packet_type |= zxdh_inner_l4_type[idx]; + } + + return 0; +} + +static inline void zxdh_discard_rxbuf(struct zxdh_virtqueue *vq, struct rte_mbuf *m) +{ + int32_t error = 0; + /* + * Requeue the discarded mbuf. This should always be + * successful since it was just dequeued. + */ + error = zxdh_enqueue_recv_refill_packed(vq, &m, 1); + if (unlikely(error)) { + PMD_RX_LOG(ERR, "cannot enqueue discarded mbuf"); + rte_pktmbuf_free(m); + } +} + +uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct zxdh_virtnet_rx *rxvq = rx_queue; + struct zxdh_virtqueue *vq = rxvq->vq; + struct zxdh_hw *hw = vq->hw; + struct rte_eth_dev *dev = hw->eth_dev; + struct rte_mbuf *rxm = NULL; + struct rte_mbuf *prev = NULL; + uint32_t len[ZXDH_MBUF_BURST_SZ] = {0}; + struct rte_mbuf *rcv_pkts[ZXDH_MBUF_BURST_SZ] = {NULL}; + uint32_t nb_enqueued = 0; + uint32_t seg_num = 0; + uint32_t seg_res = 0; + uint16_t hdr_size = 0; + int32_t error = 0; + uint16_t nb_rx = 0; + uint16_t num = nb_pkts; + + if (unlikely(num > ZXDH_MBUF_BURST_SZ)) + num = ZXDH_MBUF_BURST_SZ; + + num = zxdh_dequeue_burst_rx_packed(vq, rcv_pkts, len, num); + uint16_t i; + uint16_t rcvd_pkt_len = 0; + + for (i = 0; i < num; i++) { + rxm = rcv_pkts[i]; + + struct zxdh_net_hdr_ul *header = + (struct zxdh_net_hdr_ul *)((char *)rxm->buf_addr + + RTE_PKTMBUF_HEADROOM); + + seg_num = header->type_hdr.num_buffers; + if (seg_num == 0) { + PMD_RX_LOG(ERR, "dequeue %d pkt, No.%d pkt seg_num is %d", num, i, seg_num); + seg_num = 1; + } + /* bit[0:6]-pd_len unit:2B */ + uint16_t pd_len = header->type_hdr.pd_len << 1; + /* Private queue only handle type hdr */ + hdr_size = pd_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM + hdr_size; + rxm->nb_segs = seg_num; + rxm->ol_flags = 0; + rxm->vlan_tci = 0; + rcvd_pkt_len = (uint32_t)(len[i] - hdr_size); + rxm->data_len = (uint16_t)(len[i] - hdr_size); + rxm->port = rxvq->port_id; + rx_pkts[nb_rx] = rxm; + prev = rxm; + /* Update rte_mbuf according to pi/pd header */ + if (zxdh_rx_update_mbuf(rxm, header) < 0) { + zxdh_discard_rxbuf(vq, rxm); + continue; + } + seg_res = seg_num - 1; + /* Merge remaining segments */ + while (seg_res != 0 && i < (num - 1)) { + i++; + rxm = rcv_pkts[i]; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->data_len = (uint16_t)(len[i]); + + rcvd_pkt_len += (uint32_t)(len[i]); + prev->next = rxm; + prev = rxm; + rxm->next = NULL; + seg_res -= 1; + } + + if (!seg_res) { + if (rcvd_pkt_len != rx_pkts[nb_rx]->pkt_len) { + PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d.", + rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len); + zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]); + continue; + } + nb_rx++; + } + } + /* Last packet still need merge segments */ + while (seg_res != 0) { + uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, ZXDH_MBUF_BURST_SZ); + uint16_t extra_idx = 0; + + rcv_cnt = zxdh_dequeue_burst_rx_packed(vq, rcv_pkts, len, rcv_cnt); + if (unlikely(rcv_cnt == 0)) { + PMD_RX_LOG(ERR, "No enough segments for packet."); + rte_pktmbuf_free(rx_pkts[nb_rx]); + break; + } + while (extra_idx < rcv_cnt) { + rxm = rcv_pkts[extra_idx]; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->pkt_len = (uint32_t)(len[extra_idx]); + rxm->data_len = (uint16_t)(len[extra_idx]); + prev->next = rxm; + prev = rxm; + rxm->next = NULL; + rcvd_pkt_len += len[extra_idx]; + extra_idx += 1; + } + seg_res -= rcv_cnt; + if (!seg_res) { + if (rcvd_pkt_len != rx_pkts[nb_rx]->pkt_len) { + PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d.", + rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len); + zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]); + continue; + } + nb_rx++; + } + } + + /* Allocate new mbuf for the used descriptor */ + if (likely(!zxdh_queue_full(vq))) { + /* free_cnt may include mrg descs */ + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) { + error = zxdh_enqueue_recv_refill_packed(vq, new_pkts, free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { + dev->data->rx_mbuf_alloc_failed += free_cnt; + } + } + if (likely(nb_enqueued)) { + if (unlikely(zxdh_queue_kick_prepare_packed(vq))) + zxdh_queue_notify(vq); + } + return nb_rx; +} diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h index e07e01e821..6c1c132479 100644 --- a/drivers/net/zxdh/zxdh_rxtx.h +++ b/drivers/net/zxdh/zxdh_rxtx.h @@ -47,5 +47,7 @@ struct zxdh_virtnet_tx { uint16_t zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); #endif /* ZXDH_RXTX_H */ From patchwork Fri Dec 6 05:57:09 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149055 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 45C4745E16; Fri, 6 Dec 2024 07:05:51 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0B9F640E7C; Fri, 6 Dec 2024 07:05:00 +0100 (CET) Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.40]) by mails.dpdk.org (Postfix) with ESMTP id 18ADC40E3B for ; Fri, 6 Dec 2024 07:04:53 +0100 (CET) Received: from mse-fl1.zte.com.cn (unknown [10.5.228.132]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxhk.zte.com.cn (FangMail) with ESMTPS id 4Y4LM26kLNz8RTZF; Fri, 6 Dec 2024 14:04:46 +0800 (CST) Received: from szxlzmapp03.zte.com.cn ([10.5.231.207]) by mse-fl1.zte.com.cn with SMTP id 4B664Rg3083537; Fri, 6 Dec 2024 14:04:27 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:29 +0800 X-Zmail-TransId: 3e81675293ed001-713aa From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 09/15] net/zxdh: link info update, set link up/down Date: Fri, 6 Dec 2024 13:57:09 +0800 Message-ID: <20241206055715.506961-10-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl1.zte.com.cn 4B664Rg3083537 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 675293FE.003/4Y4LM26kLNz8RTZF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org provided link info update, set link up /down, and link intr. Signed-off-by: Junlong Wang --- doc/guides/nics/features/zxdh.ini | 4 +- doc/guides/nics/zxdh.rst | 3 + drivers/net/zxdh/meson.build | 1 + drivers/net/zxdh/zxdh_ethdev.c | 13 ++ drivers/net/zxdh/zxdh_ethdev.h | 2 + drivers/net/zxdh/zxdh_ethdev_ops.c | 166 ++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_ethdev_ops.h | 14 +++ drivers/net/zxdh/zxdh_msg.c | 58 ++++++++- drivers/net/zxdh/zxdh_msg.h | 41 +++++++ drivers/net/zxdh/zxdh_np.c | 183 +++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_np.h | 20 ++++ drivers/net/zxdh/zxdh_tables.c | 14 +++ drivers/net/zxdh/zxdh_tables.h | 2 + 13 files changed, 519 insertions(+), 2 deletions(-) create mode 100644 drivers/net/zxdh/zxdh_ethdev_ops.c create mode 100644 drivers/net/zxdh/zxdh_ethdev_ops.h -- 2.27.0 diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini index 85c5c8fd32..f052fde413 100644 --- a/doc/guides/nics/features/zxdh.ini +++ b/doc/guides/nics/features/zxdh.ini @@ -9,4 +9,6 @@ x86-64 = Y ARMv8 = Y SR-IOV = Y Multiprocess = Y -Scattered Rx = Y \ No newline at end of file +Scattered Rx = Y +Link status = Y +Link status event = Y diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst index f42db9c1f1..fdbc3b3923 100644 --- a/doc/guides/nics/zxdh.rst +++ b/doc/guides/nics/zxdh.rst @@ -21,6 +21,9 @@ Features of the ZXDH PMD are: - Multiple queues for TX and RX - SR-IOV VF - Scattered and gather for TX and RX +- Link Auto-negotiation +- Link state information +- Set Link down or up Driver compilation and testing diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build index 20b2cf484a..48f8f5e1ee 100644 --- a/drivers/net/zxdh/meson.build +++ b/drivers/net/zxdh/meson.build @@ -22,4 +22,5 @@ sources = files( 'zxdh_np.c', 'zxdh_tables.c', 'zxdh_rxtx.c', + 'zxdh_ethdev_ops.c', ) diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index 226b9d6b67..57ee2f7c55 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -16,6 +16,7 @@ #include "zxdh_np.h" #include "zxdh_tables.h" #include "zxdh_rxtx.h" +#include "zxdh_ethdev_ops.h" struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS]; struct zxdh_shared_data *zxdh_shared_data; @@ -105,9 +106,16 @@ static void zxdh_devconf_intr_handler(void *param) { struct rte_eth_dev *dev = param; + struct zxdh_hw *hw = dev->data->dev_private; + + uint8_t isr = zxdh_pci_isr(hw); if (zxdh_intr_unmask(dev) < 0) PMD_DRV_LOG(ERR, "interrupt enable failed"); + if (isr & ZXDH_PCI_ISR_CONFIG) { + if (zxdh_dev_link_update(dev, 0) == 0) + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + } } @@ -1007,6 +1015,8 @@ zxdh_dev_start(struct rte_eth_dev *dev) vq = hw->vqs[logic_qidx]; zxdh_queue_notify(vq); } + zxdh_dev_set_link_up(dev); + return 0; } @@ -1021,6 +1031,9 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = { .tx_queue_setup = zxdh_dev_tx_queue_setup, .rx_queue_intr_enable = zxdh_dev_rx_queue_intr_enable, .rx_queue_intr_disable = zxdh_dev_rx_queue_intr_disable, + .link_update = zxdh_dev_link_update, + .dev_set_link_up = zxdh_dev_set_link_up, + .dev_set_link_down = zxdh_dev_set_link_down, }; static int32_t diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h index 6fdb5fb767..cf2bc207e9 100644 --- a/drivers/net/zxdh/zxdh_ethdev.h +++ b/drivers/net/zxdh/zxdh_ethdev.h @@ -70,6 +70,7 @@ struct zxdh_hw { uint64_t guest_features; uint32_t max_queue_pairs; uint32_t speed; + uint32_t speed_mode; uint32_t notify_off_multiplier; uint16_t *notify_base; uint16_t pcie_id; @@ -91,6 +92,7 @@ struct zxdh_hw { uint8_t panel_id; uint8_t has_tx_offload; uint8_t has_rx_offload; + uint8_t admin_status; }; struct zxdh_dtb_shared_data { diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c new file mode 100644 index 0000000000..635868c4c0 --- /dev/null +++ b/drivers/net/zxdh/zxdh_ethdev_ops.c @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#include "zxdh_ethdev.h" +#include "zxdh_pci.h" +#include "zxdh_msg.h" +#include "zxdh_ethdev_ops.h" +#include "zxdh_tables.h" +#include "zxdh_logs.h" + +static int32_t zxdh_config_port_status(struct rte_eth_dev *dev, uint16_t link_status) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_port_attr_table port_attr = {0}; + struct zxdh_msg_info msg_info = {0}; + int32_t ret = 0; + + if (hw->is_pf) { + ret = zxdh_get_port_attr(hw->vfid, &port_attr); + if (ret) { + PMD_DRV_LOG(ERR, "write port_attr failed"); + return -EAGAIN; + } + port_attr.is_up = link_status; + + ret = zxdh_set_port_attr(hw->vfid, &port_attr); + if (ret) { + PMD_DRV_LOG(ERR, "write port_attr failed"); + return -EAGAIN; + } + } else { + struct zxdh_port_attr_set_msg *port_attr_msg = &msg_info.data.port_attr_msg; + + zxdh_msg_head_build(hw, ZXDH_PORT_ATTRS_SET, &msg_info); + port_attr_msg->mode = ZXDH_PORT_ATTR_IS_UP_FLAG; + port_attr_msg->value = link_status; + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d", + hw->vport.vport, ZXDH_PORT_ATTR_IS_UP_FLAG); + return ret; + } + } + return ret; +} + +static int32_t +zxdh_link_info_get(struct rte_eth_dev *dev, struct rte_eth_link *link) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_msg_info msg_info = {0}; + struct zxdh_msg_reply_info reply_info = {0}; + uint16_t status = 0; + int32_t ret = 0; + + if (zxdh_pci_with_feature(hw, ZXDH_NET_F_STATUS)) + zxdh_pci_read_dev_config(hw, offsetof(struct zxdh_net_config, status), + &status, sizeof(status)); + + link->link_status = status; + + if (status == RTE_ETH_LINK_DOWN) { + link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; + link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; + } else { + zxdh_agent_msg_build(hw, ZXDH_MAC_LINK_GET, &msg_info); + + ret = zxdh_send_msg_to_riscv(dev, &msg_info, sizeof(struct zxdh_msg_info), + &reply_info, sizeof(struct zxdh_msg_reply_info), + ZXDH_BAR_MODULE_MAC); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d", + hw->vport.vport, ZXDH_MAC_LINK_GET); + return -EAGAIN; + } + link->link_speed = reply_info.reply_body.link_msg.speed; + hw->speed_mode = reply_info.reply_body.link_msg.speed_modes; + if ((reply_info.reply_body.link_msg.duplex & RTE_ETH_LINK_FULL_DUPLEX) == + RTE_ETH_LINK_FULL_DUPLEX) + link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; + else + link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX; + } + hw->speed = link->link_speed; + + return 0; +} + +static int zxdh_set_link_status(struct rte_eth_dev *dev, uint8_t link_status) +{ + uint16_t curr_link_status = dev->data->dev_link.link_status; + + struct rte_eth_link link; + struct zxdh_hw *hw = dev->data->dev_private; + int32_t ret = 0; + + if (link_status == curr_link_status) { + PMD_DRV_LOG(INFO, "curr_link_status %u", curr_link_status); + return 0; + } + + hw->admin_status = link_status; + ret = zxdh_link_info_get(dev, &link); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to get link status from hw"); + return ret; + } + dev->data->dev_link.link_status = hw->admin_status & link.link_status; + + if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP) { + dev->data->dev_link.link_speed = link.link_speed; + dev->data->dev_link.link_duplex = link.link_duplex; + } else { + dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; + dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; + } + return zxdh_config_port_status(dev, dev->data->dev_link.link_status); +} + +int zxdh_dev_set_link_up(struct rte_eth_dev *dev) +{ + int ret = zxdh_set_link_status(dev, RTE_ETH_LINK_UP); + + if (ret) + PMD_DRV_LOG(ERR, "Set link up failed, code:%d", ret); + + return ret; +} + +int32_t zxdh_dev_link_update(struct rte_eth_dev *dev, int32_t wait_to_complete __rte_unused) +{ + struct rte_eth_link link; + struct zxdh_hw *hw = dev->data->dev_private; + int32_t ret = 0; + + memset(&link, 0, sizeof(link)); + link.link_duplex = hw->duplex; + link.link_speed = hw->speed; + link.link_autoneg = RTE_ETH_LINK_AUTONEG; + + ret = zxdh_link_info_get(dev, &link); + if (ret != 0) { + PMD_DRV_LOG(ERR, " Failed to get link status from hw"); + return ret; + } + link.link_status &= hw->admin_status; + if (link.link_status == RTE_ETH_LINK_DOWN) + link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; + + ret = zxdh_config_port_status(dev, link.link_status); + if (ret != 0) { + PMD_DRV_LOG(ERR, "set port attr %d failed.", link.link_status); + return ret; + } + return rte_eth_linkstatus_set(dev, &link); +} + +int zxdh_dev_set_link_down(struct rte_eth_dev *dev) +{ + int ret = zxdh_set_link_status(dev, RTE_ETH_LINK_DOWN); + + if (ret) + PMD_DRV_LOG(ERR, "Set link down failed"); + return ret; +} diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h new file mode 100644 index 0000000000..c6d6ca56fd --- /dev/null +++ b/drivers/net/zxdh/zxdh_ethdev_ops.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#ifndef ZXDH_ETHDEV_OPS_H +#define ZXDH_ETHDEV_OPS_H + +#include "zxdh_ethdev.h" + +int zxdh_dev_set_link_up(struct rte_eth_dev *dev); +int zxdh_dev_set_link_down(struct rte_eth_dev *dev); +int32_t zxdh_dev_link_update(struct rte_eth_dev *dev, int32_t wait_to_complete __rte_unused); + +#endif /* ZXDH_ETHDEV_OPS_H */ diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c index 1aed979de3..be7bf46728 100644 --- a/drivers/net/zxdh/zxdh_msg.c +++ b/drivers/net/zxdh/zxdh_msg.c @@ -1083,7 +1083,7 @@ int zxdh_get_bar_offset(struct zxdh_bar_offset_params *paras, return ZXDH_BAR_MSG_OK; } -int zxdh_vf_send_msg_to_pf(struct rte_eth_dev *dev, void *msg_req, +int zxdh_vf_send_msg_to_pf(struct rte_eth_dev *dev, void *msg_req, uint16_t msg_req_len, void *reply, uint16_t reply_len) { struct zxdh_hw *hw = dev->data->dev_private; @@ -1133,6 +1133,50 @@ int zxdh_vf_send_msg_to_pf(struct rte_eth_dev *dev, void *msg_req, return 0; } +int32_t zxdh_send_msg_to_riscv(struct rte_eth_dev *dev, void *msg_req, + uint16_t msg_req_len, void *reply, uint16_t reply_len, + enum ZXDH_BAR_MODULE_ID module_id) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_pci_bar_msg in = {0}; + struct zxdh_msg_recviver_mem result = {0}; + struct zxdh_msg_reply_info reply_info = {0}; + + if (reply) { + RTE_ASSERT(reply_len < sizeof(zxdh_msg_reply_info)); + result.recv_buffer = reply; + result.buffer_len = reply_len; + } else { + result.recv_buffer = &reply_info; + result.buffer_len = sizeof(reply_info); + } + struct zxdh_msg_reply_head *reply_head = + &(((struct zxdh_msg_reply_info *)result.recv_buffer)->reply_head); + struct zxdh_msg_reply_body *reply_body = + &(((struct zxdh_msg_reply_info *)result.recv_buffer)->reply_body); + in.payload_addr = &msg_req; + in.payload_len = msg_req_len; + in.virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET); + in.src = hw->is_pf ? ZXDH_MSG_CHAN_END_PF : ZXDH_MSG_CHAN_END_VF; + in.dst = ZXDH_MSG_CHAN_END_RISC; + in.module_id = module_id; + in.src_pcieid = hw->pcie_id; + if (zxdh_bar_chan_sync_msg_send(&in, &result) != ZXDH_BAR_MSG_OK) { + PMD_MSG_LOG(ERR, "Failed to send sync messages or receive response"); + return -EAGAIN; + } + if (reply_head->flag != ZXDH_MSG_REPS_OK) { + PMD_MSG_LOG(ERR, "vf[%d] get pf reply failed: reply_head flag : 0x%x(0xff is OK).replylen %d", + hw->vport.vfid, reply_head->flag, reply_head->reps_len); + return -EAGAIN; + } + if (reply_body->flag != ZXDH_REPS_SUCC) { + PMD_MSG_LOG(ERR, "vf[%d] msg processing failed", hw->vfid); + return -EAGAIN; + } + return 0; +} + void zxdh_msg_head_build(struct zxdh_hw *hw, enum zxdh_msg_type type, struct zxdh_msg_info *msg_info) { @@ -1143,3 +1187,15 @@ void zxdh_msg_head_build(struct zxdh_hw *hw, enum zxdh_msg_type type, msghead->vf_id = hw->vport.vfid; msghead->pcieid = hw->pcie_id; } + +void zxdh_agent_msg_build(struct zxdh_hw *hw, enum zxdh_agent_msg_type type, + struct zxdh_msg_info *msg_info) +{ + struct zxdh_agent_msg_head *agent_head = &msg_info->agent_msg_head; + + agent_head->msg_type = type; + agent_head->panel_id = hw->panel_id; + agent_head->phyport = hw->phyport; + agent_head->vf_id = hw->vfid; + agent_head->pcie_id = hw->pcie_id; +} diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h index 9997417f28..66c337443b 100644 --- a/drivers/net/zxdh/zxdh_msg.h +++ b/drivers/net/zxdh/zxdh_msg.h @@ -164,11 +164,18 @@ enum pciebar_layout_type { ZXDH_URI_MAX, }; +/* riscv msg opcodes */ +enum zxdh_agent_msg_type { + ZXDH_MAC_LINK_GET = 14, +} __rte_packed; + enum zxdh_msg_type { ZXDH_NULL = 0, ZXDH_VF_PORT_INIT = 1, ZXDH_VF_PORT_UNINIT = 2, + ZXDH_PORT_ATTRS_SET = 25, + ZXDH_MSG_TYPE_END, } __rte_packed; @@ -261,6 +268,16 @@ struct zxdh_offset_get_msg { uint16_t type; }; +struct zxdh_link_info_msg { + uint8_t autoneg; + uint8_t link_state; + uint8_t blink_enable; + uint8_t duplex; + uint32_t speed_modes; + uint32_t speed; +} __rte_packed; + + struct zxdh_msg_reply_head { uint8_t flag; uint16_t reps_len; @@ -276,6 +293,7 @@ struct zxdh_msg_reply_body { enum zxdh_reps_flag flag; union { uint8_t reply_data[ZXDH_MSG_REPLY_BODY_MAX_LEN - sizeof(enum zxdh_reps_flag)]; + struct zxdh_link_info_msg link_msg; } __rte_packed; } __rte_packed; @@ -291,6 +309,21 @@ struct zxdh_vf_init_msg { uint8_t rss_enable; } __rte_packed; +struct zxdh_port_attr_set_msg { + uint32_t mode; + uint32_t value; + uint8_t allmulti_follow; +} __rte_packed; + +struct zxdh_agent_msg_head { + enum zxdh_agent_msg_type msg_type; + uint8_t panel_id; + uint8_t phyport; + uint8_t rsv; + uint16_t vf_id; + uint16_t pcie_id; +} __rte_packed; + struct zxdh_msg_head { enum zxdh_msg_type msg_type; uint16_t vport; @@ -302,10 +335,13 @@ struct zxdh_msg_info { union { uint8_t head_len[ZXDH_MSG_HEAD_LEN]; struct zxdh_msg_head msg_head; + struct zxdh_agent_msg_head agent_msg_head; }; union { uint8_t datainfo[ZXDH_MSG_REQ_BODY_MAX_LEN]; struct zxdh_vf_init_msg vf_init_msg; + struct zxdh_port_attr_set_msg port_attr_msg; + struct zxdh_link_info_msg link_msg; } __rte_packed data; } __rte_packed; @@ -326,5 +362,10 @@ void zxdh_msg_head_build(struct zxdh_hw *hw, enum zxdh_msg_type type, struct zxdh_msg_info *msg_info); int zxdh_vf_send_msg_to_pf(struct rte_eth_dev *dev, void *msg_req, uint16_t msg_req_len, void *reply, uint16_t reply_len); +void zxdh_agent_msg_build(struct zxdh_hw *hw, enum zxdh_agent_msg_type type, + struct zxdh_msg_info *msg_info); +int32_t zxdh_send_msg_to_riscv(struct rte_eth_dev *dev, void *msg_req, + uint16_t msg_req_len, void *reply, uint16_t reply_len, + enum ZXDH_BAR_MODULE_ID module_id); #endif /* ZXDH_MSG_H */ diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c index 242a6901ed..2a4d38b846 100644 --- a/drivers/net/zxdh/zxdh_np.c +++ b/drivers/net/zxdh/zxdh_np.c @@ -36,6 +36,16 @@ ZXDH_DTB_TABLE_T g_dpp_dtb_table_info[4] = {0}; #define ZXDH_COMM_GET_BIT_MASK(_inttype_, _bitqnt_)\ ((_inttype_)(((_bitqnt_) < 32))) +#define ZXDH_COMM_UINT32_GET_BITS(_uidst_, _uisrc_, _uistartpos_, _uilen_)\ + ((_uidst_) = (((_uisrc_) >> (_uistartpos_)) & \ + (ZXDH_COMM_GET_BIT_MASK(uint32_t, (_uilen_))))) + +#define ZXDH_COMM_UINT32_WRITE_BITS(_uidst_, _uisrc_, _uistartpos_, _uilen_)\ + (((_uidst_) & ~(ZXDH_COMM_GET_BIT_MASK(uint32_t, (_uilen_)) << (_uistartpos_)))) + +#define ZXDH_COMM_CONVERT32(dw_data) \ + (((dw_data) & 0xff) << 24) + #define ZXDH_REG_DATA_MAX (128) #define ZXDH_COMM_CHECK_DEV_POINT(dev_id, point)\ @@ -1610,3 +1620,176 @@ zxdh_np_dtb_table_entry_delete(uint32_t dev_id, rte_free(p_data_buff_ex); return 0; } + +static uint32_t +zxdh_np_sdt_tbl_data_parser(uint32_t sdt_hig32, uint32_t sdt_low32, void *p_sdt_info) +{ + uint32_t tbl_type = 0; + uint32_t clutch_en = 0; + + ZXDH_SDTTBL_ERAM_T *p_sdt_eram = NULL; + ZXDH_SDTTBL_PORTTBL_T *p_sdt_porttbl = NULL; + + + ZXDH_COMM_UINT32_GET_BITS(tbl_type, sdt_hig32, + ZXDH_SDT_H_TBL_TYPE_BT_POS, ZXDH_SDT_H_TBL_TYPE_BT_LEN); + ZXDH_COMM_UINT32_GET_BITS(clutch_en, sdt_low32, 0, 1); + + switch (tbl_type) { + case ZXDH_SDT_TBLT_ERAM: + { + p_sdt_eram = (ZXDH_SDTTBL_ERAM_T *)p_sdt_info; + p_sdt_eram->table_type = tbl_type; + p_sdt_eram->eram_clutch_en = clutch_en; + break; + } + + case ZXDH_SDT_TBLT_PORTTBL: + { + p_sdt_porttbl = (ZXDH_SDTTBL_PORTTBL_T *)p_sdt_info; + p_sdt_porttbl->table_type = tbl_type; + p_sdt_porttbl->porttbl_clutch_en = clutch_en; + break; + } + default: + { + PMD_DRV_LOG(ERR, "SDT table_type[ %d ] is invalid!", tbl_type); + return 1; + } + } + + return 0; +} + +static uint32_t +zxdh_np_soft_sdt_tbl_get(uint32_t dev_id, uint32_t sdt_no, void *p_sdt_info) +{ + uint32_t rc = 0; + ZXDH_SDT_TBL_DATA_T sdt_tbl = {0}; + + rc = zxdh_np_sdt_tbl_data_get(dev_id, sdt_no, &sdt_tbl); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_sdt_tbl_data_get"); + + rc = zxdh_np_sdt_tbl_data_parser(sdt_tbl.data_high32, sdt_tbl.data_low32, p_sdt_info); + + if (rc != 0) + PMD_DRV_LOG(ERR, "dpp sdt [%d] tbl_data_parser error.", sdt_no); + + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_sdt_tbl_data_parser"); + + return rc; +} + +static uint32_t +zxdh_np_eram_index_cal(uint32_t eram_mode, uint32_t index, + uint32_t *p_row_index, uint32_t *p_col_index) +{ + uint32_t rc = 0; + uint32_t row_index = 0; + uint32_t col_index = 0; + + switch (eram_mode) { + case ZXDH_ERAM128_TBL_128b: + { + row_index = index; + break; + } + case ZXDH_ERAM128_TBL_64b: + { + row_index = (index >> 1); + col_index = index & 0x1; + break; + } + case ZXDH_ERAM128_TBL_1b: + { + row_index = (index >> 7); + col_index = index & 0x7F; + break; + } + } + *p_row_index = row_index; + *p_col_index = col_index; + + return rc; +} + +static uint32_t +zxdh_np_dtb_eram_data_get(uint32_t dev_id, uint32_t queue_id, uint32_t sdt_no, + ZXDH_DTB_ERAM_ENTRY_INFO_T *p_dump_eram_entry) +{ + uint32_t rc = 0; + uint32_t rd_mode = 0; + uint32_t row_index = 0; + uint32_t col_index = 0; + uint32_t temp_data[4] = {0}; + uint32_t index = p_dump_eram_entry->index; + uint32_t *p_data = p_dump_eram_entry->p_data; + + ZXDH_SDTTBL_ERAM_T sdt_eram_info = {0}; + + rc = zxdh_np_soft_sdt_tbl_get(queue_id, sdt_no, &sdt_eram_info); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_soft_sdt_tbl_get"); + rd_mode = sdt_eram_info.eram_mode; + + rc = zxdh_np_eram_index_cal(rd_mode, index, &row_index, &col_index); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dtb_eram_index_cal"); + + switch (rd_mode) { + case ZXDH_ERAM128_TBL_128b: + { + memcpy(p_data, temp_data, (128 / 8)); + break; + } + + case ZXDH_ERAM128_TBL_64b: + { + memcpy(p_data, temp_data + ((1 - col_index) << 1), (64 / 8)); + break; + } + + case ZXDH_ERAM128_TBL_1b: + { + ZXDH_COMM_UINT32_GET_BITS(p_data[0], *(temp_data + + (3 - col_index / 32)), (col_index % 32), 1); + break; + } + } + return rc; +} + +int +zxdh_np_dtb_table_entry_get(uint32_t dev_id, + uint32_t queue_id, + ZXDH_DTB_USER_ENTRY_T *get_entry, + uint32_t srh_mode) +{ + uint32_t rc = 0; + uint32_t sdt_no = 0; + uint32_t tbl_type = 0; + ZXDH_SDT_TBL_DATA_T sdt_tbl = {0}; + + memset(&sdt_tbl, 0x0, sizeof(ZXDH_SDT_TBL_DATA_T)); + sdt_no = get_entry->sdt_no; + rc = zxdh_np_sdt_tbl_data_get(srh_mode, sdt_no, &sdt_tbl); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_sdt_tbl_data_get"); + ZXDH_COMM_UINT32_GET_BITS(tbl_type, sdt_tbl.data_high32, + ZXDH_SDT_H_TBL_TYPE_BT_POS, ZXDH_SDT_H_TBL_TYPE_BT_LEN); + switch (tbl_type) { + case ZXDH_SDT_TBLT_ERAM: + { + rc = zxdh_np_dtb_eram_data_get(dev_id, + queue_id, + sdt_no, + (ZXDH_DTB_ERAM_ENTRY_INFO_T *)get_entry->p_entry_data); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_eram_data_get"); + break; + } + default: + { + PMD_DRV_LOG(ERR, "SDT table_type[ %d ] is invalid!", tbl_type); + return 1; + } + } + + return 0; +} diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h index 3cb9580254..3a7a830d7d 100644 --- a/drivers/net/zxdh/zxdh_np.h +++ b/drivers/net/zxdh/zxdh_np.h @@ -514,11 +514,31 @@ typedef struct zxdh_sdt_tbl_data_t { uint32_t data_low32; } ZXDH_SDT_TBL_DATA_T; +typedef struct zxdh_sdt_tbl_etcam_t { + uint32_t table_type; + uint32_t etcam_id; + uint32_t etcam_key_mode; + uint32_t etcam_table_id; + uint32_t no_as_rsp_mode; + uint32_t as_en; + uint32_t as_eram_baddr; + uint32_t as_rsp_mode; + uint32_t etcam_table_depth; + uint32_t etcam_clutch_en; +} ZXDH_SDTTBL_ETCAM_T; + +typedef struct zxdh_sdt_tbl_porttbl_t { + uint32_t table_type; + uint32_t porttbl_clutch_en; +} ZXDH_SDTTBL_PORTTBL_T; + int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl); int zxdh_np_online_uninit(uint32_t dev_id, char *port_name, uint32_t queue_id); int zxdh_np_dtb_table_entry_write(uint32_t dev_id, uint32_t queue_id, uint32_t entrynum, ZXDH_DTB_USER_ENTRY_T *down_entries); int zxdh_np_dtb_table_entry_delete(uint32_t dev_id, uint32_t queue_id, uint32_t entrynum, ZXDH_DTB_USER_ENTRY_T *delete_entries); +int zxdh_np_dtb_table_entry_get(uint32_t dev_id, uint32_t queue_id, + ZXDH_DTB_USER_ENTRY_T *get_entry, uint32_t srh_mode); #endif /* ZXDH_NP_H */ diff --git a/drivers/net/zxdh/zxdh_tables.c b/drivers/net/zxdh/zxdh_tables.c index e28823c657..15098e723d 100644 --- a/drivers/net/zxdh/zxdh_tables.c +++ b/drivers/net/zxdh/zxdh_tables.c @@ -26,6 +26,20 @@ int zxdh_set_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr) return ret; } +int zxdh_get_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr) +{ + int ret = 0; + + ZXDH_DTB_ERAM_ENTRY_INFO_T entry = {vfid, (uint32_t *)port_attr}; + ZXDH_DTB_USER_ENTRY_T user_entry_get = {ZXDH_SDT_VPORT_ATT_TABLE, &entry}; + + ret = zxdh_np_dtb_table_entry_get(ZXDH_DEVICE_NO, g_dtb_data.queueid, &user_entry_get, 1); + if (ret != 0) + PMD_DRV_LOG(ERR, "get port_attr vfid:%d failed, ret:%d ", vfid, ret); + + return ret; +} + int zxdh_port_attr_init(struct rte_eth_dev *dev) { diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h index 5e9b36faee..7f592beb3c 100644 --- a/drivers/net/zxdh/zxdh_tables.h +++ b/drivers/net/zxdh/zxdh_tables.h @@ -10,6 +10,7 @@ extern struct zxdh_dtb_shared_data g_dtb_data; #define ZXDH_DEVICE_NO 0 +#define ZXDH_PORT_ATTR_IS_UP_FLAG 35 struct zxdh_port_attr_table { #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN @@ -144,6 +145,7 @@ struct zxdh_panel_table { int zxdh_port_attr_init(struct rte_eth_dev *dev); int zxdh_panel_table_init(struct rte_eth_dev *dev); int zxdh_set_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr); +int zxdh_get_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr); int zxdh_port_attr_uninit(struct rte_eth_dev *dev); #endif /* ZXDH_TABLES_H */ From patchwork Fri Dec 6 05:57:10 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149059 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A7DFD45E16; Fri, 6 Dec 2024 07:06:35 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id DD6AE42796; Fri, 6 Dec 2024 07:05:07 +0100 (CET) Received: from mxct.zte.com.cn (mxct.zte.com.cn [183.62.165.209]) by mails.dpdk.org (Postfix) with ESMTP id F0C5C40E6E for ; Fri, 6 Dec 2024 07:04:57 +0100 (CET) Received: from mse-fl2.zte.com.cn (unknown [10.5.228.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxct.zte.com.cn (FangMail) with ESMTPS id 4Y4LMB2Zf4z50FXx; Fri, 6 Dec 2024 14:04:54 +0800 (CST) Received: from szxlzmapp01.zte.com.cn ([10.5.231.85]) by mse-fl2.zte.com.cn with SMTP id 4B664SKl033233; Fri, 6 Dec 2024 14:04:28 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:30 +0800 X-Zmail-TransId: 3e81675293ee001-713ad From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 10/15] net/zxdh: mac set/add/remove ops implementations Date: Fri, 6 Dec 2024 13:57:10 +0800 Message-ID: <20241206055715.506961-11-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl2.zte.com.cn 4B664SKl033233 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 67529406.000/4Y4LMB2Zf4z50FXx X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org provided mac set/add/remove ops. Signed-off-by: Junlong Wang --- doc/guides/nics/features/zxdh.ini | 2 + doc/guides/nics/zxdh.rst | 2 + drivers/net/zxdh/zxdh_common.c | 24 +++ drivers/net/zxdh/zxdh_common.h | 1 + drivers/net/zxdh/zxdh_ethdev.c | 28 ++++ drivers/net/zxdh/zxdh_ethdev.h | 3 + drivers/net/zxdh/zxdh_ethdev_ops.c | 233 +++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_ethdev_ops.h | 4 + drivers/net/zxdh/zxdh_msg.h | 11 ++ drivers/net/zxdh/zxdh_np.h | 5 + drivers/net/zxdh/zxdh_tables.c | 196 ++++++++++++++++++++++++ drivers/net/zxdh/zxdh_tables.h | 36 +++++ 12 files changed, 545 insertions(+) -- 2.27.0 diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini index f052fde413..d5f3bac917 100644 --- a/doc/guides/nics/features/zxdh.ini +++ b/doc/guides/nics/features/zxdh.ini @@ -12,3 +12,5 @@ Multiprocess = Y Scattered Rx = Y Link status = Y Link status event = Y +Unicast MAC filter = Y +Multicast MAC filter = Y \ No newline at end of file diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst index fdbc3b3923..e0b0776aca 100644 --- a/doc/guides/nics/zxdh.rst +++ b/doc/guides/nics/zxdh.rst @@ -24,6 +24,8 @@ Features of the ZXDH PMD are: - Link Auto-negotiation - Link state information - Set Link down or up +- Unicast MAC filter +- Multicast MAC filter Driver compilation and testing diff --git a/drivers/net/zxdh/zxdh_common.c b/drivers/net/zxdh/zxdh_common.c index 4f18c97ed7..75883a8897 100644 --- a/drivers/net/zxdh/zxdh_common.c +++ b/drivers/net/zxdh/zxdh_common.c @@ -256,6 +256,30 @@ zxdh_panelid_get(struct rte_eth_dev *dev, uint8_t *panelid) return ret; } +static int +zxdh_get_res_hash_id(struct zxdh_res_para *in, uint8_t *hash_id) +{ + uint8_t reps = 0; + uint16_t reps_len = 0; + + if (zxdh_get_res_info(in, ZXDH_TBL_FIELD_HASHID, &reps, &reps_len) != ZXDH_BAR_MSG_OK) + return -1; + + *hash_id = reps; + return ZXDH_BAR_MSG_OK; +} + +int32_t +zxdh_hashidx_get(struct rte_eth_dev *dev, uint8_t *hash_idx) +{ + struct zxdh_res_para param; + + zxdh_fill_res_para(dev, ¶m); + int32_t ret = zxdh_get_res_hash_id(¶m, hash_idx); + + return ret; +} + uint32_t zxdh_read_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg) { diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h index 72c29e1522..826f1fb95d 100644 --- a/drivers/net/zxdh/zxdh_common.h +++ b/drivers/net/zxdh/zxdh_common.h @@ -22,6 +22,7 @@ struct zxdh_res_para { int32_t zxdh_phyport_get(struct rte_eth_dev *dev, uint8_t *phyport); int32_t zxdh_panelid_get(struct rte_eth_dev *dev, uint8_t *pannelid); +int32_t zxdh_hashidx_get(struct rte_eth_dev *dev, uint8_t *hash_idx); uint32_t zxdh_read_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg); void zxdh_write_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val); void zxdh_release_lock(struct zxdh_hw *hw); diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index 57ee2f7c55..ad3eb85676 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -981,6 +981,23 @@ zxdh_set_rxtx_funcs(struct rte_eth_dev *eth_dev) return 0; } +static int +zxdh_mac_config(struct rte_eth_dev *eth_dev) +{ + struct zxdh_hw *hw = eth_dev->data->dev_private; + int ret = 0; + + if (hw->is_pf) { + ret = zxdh_set_mac_table(hw->vport.vport, + ð_dev->data->mac_addrs[0], hw->hash_search_index); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to add mac: port 0x%x", hw->vport.vport); + return ret; + } + } + return ret; +} + static int zxdh_dev_start(struct rte_eth_dev *dev) { @@ -1016,6 +1033,9 @@ zxdh_dev_start(struct rte_eth_dev *dev) zxdh_queue_notify(vq); } zxdh_dev_set_link_up(dev); + ret = zxdh_mac_config(hw->eth_dev); + if (ret) + PMD_DRV_LOG(ERR, " mac config failed"); return 0; } @@ -1034,6 +1054,9 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = { .link_update = zxdh_dev_link_update, .dev_set_link_up = zxdh_dev_set_link_up, .dev_set_link_down = zxdh_dev_set_link_down, + .mac_addr_add = zxdh_dev_mac_addr_add, + .mac_addr_remove = zxdh_dev_mac_addr_remove, + .mac_addr_set = zxdh_dev_mac_addr_set, }; static int32_t @@ -1079,6 +1102,11 @@ zxdh_agent_comm(struct rte_eth_dev *eth_dev, struct zxdh_hw *hw) hw->vfid = zxdh_vport_to_vfid(hw->vport); + if (zxdh_hashidx_get(eth_dev, &hw->hash_search_index) != 0) { + PMD_DRV_LOG(ERR, "Failed to get hash idx"); + return -1; + } + if (zxdh_panelid_get(eth_dev, &hw->panel_id) != 0) { PMD_DRV_LOG(ERR, "Failed to get panel_id"); return -1; diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h index cf2bc207e9..3306fdfa99 100644 --- a/drivers/net/zxdh/zxdh_ethdev.h +++ b/drivers/net/zxdh/zxdh_ethdev.h @@ -78,6 +78,8 @@ struct zxdh_hw { uint16_t port_id; uint16_t vfid; uint16_t queue_num; + uint16_t mc_num; + uint16_t uc_num; uint8_t *isr; uint8_t weak_barriers; @@ -90,6 +92,7 @@ struct zxdh_hw { uint8_t msg_chan_init; uint8_t phyport; uint8_t panel_id; + uint8_t hash_search_index; uint8_t has_tx_offload; uint8_t has_rx_offload; uint8_t admin_status; diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c index 635868c4c0..d1d232b411 100644 --- a/drivers/net/zxdh/zxdh_ethdev_ops.c +++ b/drivers/net/zxdh/zxdh_ethdev_ops.c @@ -164,3 +164,236 @@ int zxdh_dev_set_link_down(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "Set link down failed"); return ret; } + +int zxdh_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr) +{ + struct zxdh_hw *hw = (struct zxdh_hw *)dev->data->dev_private; + struct rte_ether_addr *old_addr = &dev->data->mac_addrs[0]; + struct zxdh_msg_info msg_info = {0}; + uint16_t ret = 0; + + if (!rte_is_valid_assigned_ether_addr(addr)) { + PMD_DRV_LOG(ERR, "mac address is invalid!"); + return -EINVAL; + } + + if (hw->is_pf) { + ret = zxdh_del_mac_table(hw->vport.vport, old_addr, hw->hash_search_index); + if (ret) { + PMD_DRV_LOG(ERR, "mac_addr_add failed, code:%d", ret); + return -ret; + } + hw->uc_num--; + + ret = zxdh_set_mac_table(hw->vport.vport, addr, hw->hash_search_index); + if (ret) { + PMD_DRV_LOG(ERR, "mac_addr_add failed, code:%d", ret); + return -ret; + } + hw->uc_num++; + } else { + struct zxdh_mac_filter *mac_filter = &msg_info.data.mac_filter_msg; + + mac_filter->filter_flag = ZXDH_MAC_UNFILTER; + mac_filter->mac_flag = true; + rte_memcpy(&mac_filter->mac, old_addr, sizeof(struct rte_ether_addr)); + zxdh_msg_head_build(hw, ZXDH_MAC_DEL, &msg_info); + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ", + hw->vport.vport, ZXDH_MAC_DEL); + return ret; + } + hw->uc_num--; + PMD_DRV_LOG(INFO, "Success to send msg: port 0x%x msg type %d", + hw->vport.vport, ZXDH_MAC_DEL); + + mac_filter->filter_flag = ZXDH_MAC_UNFILTER; + rte_memcpy(&mac_filter->mac, addr, sizeof(struct rte_ether_addr)); + zxdh_msg_head_build(hw, ZXDH_MAC_ADD, &msg_info); + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ", + hw->vport.vport, ZXDH_MAC_ADD); + return ret; + } + hw->uc_num++; + } + rte_ether_addr_copy(addr, (struct rte_ether_addr *)hw->mac_addr); + return ret; +} + +int zxdh_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t vmdq __rte_unused) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_msg_info msg_info = {0}; + uint16_t i, ret; + + if (index >= ZXDH_MAX_MAC_ADDRS) { + PMD_DRV_LOG(ERR, "Add mac index (%u) is out of range", index); + return -EINVAL; + } + + for (i = 0; (i != ZXDH_MAX_MAC_ADDRS); ++i) { + if (memcmp(&dev->data->mac_addrs[i], mac_addr, sizeof(*mac_addr))) + continue; + + PMD_DRV_LOG(INFO, "MAC address already configured"); + return -EADDRINUSE; + } + + if (hw->is_pf) { + if (rte_is_unicast_ether_addr(mac_addr)) { + if (hw->uc_num < ZXDH_MAX_UC_MAC_ADDRS) { + ret = zxdh_set_mac_table(hw->vport.vport, + mac_addr, hw->hash_search_index); + if (ret) { + PMD_DRV_LOG(ERR, "mac_addr_add failed, code:%d", ret); + return -ret; + } + hw->uc_num++; + } else { + PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d", + ZXDH_MAX_MC_MAC_ADDRS); + return -EINVAL; + } + } else { + if (hw->mc_num < ZXDH_MAX_MC_MAC_ADDRS) { + ret = zxdh_set_mac_table(hw->vport.vport, + mac_addr, hw->hash_search_index); + if (ret) { + PMD_DRV_LOG(ERR, "mac_addr_add failed, code:%d", ret); + return -ret; + } + hw->mc_num++; + } else { + PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d", + ZXDH_MAX_MC_MAC_ADDRS); + return -EINVAL; + } + } + } else { + struct zxdh_mac_filter *mac_filter = &msg_info.data.mac_filter_msg; + + mac_filter->filter_flag = ZXDH_MAC_FILTER; + rte_memcpy(&mac_filter->mac, mac_addr, sizeof(struct rte_ether_addr)); + zxdh_msg_head_build(hw, ZXDH_MAC_ADD, &msg_info); + if (rte_is_unicast_ether_addr(mac_addr)) { + if (hw->uc_num < ZXDH_MAX_UC_MAC_ADDRS) { + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, + sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d", + hw->vport.vport, ZXDH_MAC_ADD); + return -ret; + } + hw->uc_num++; + } else { + PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d", + ZXDH_MAX_MC_MAC_ADDRS); + return -EINVAL; + } + } else { + if (hw->mc_num < ZXDH_MAX_MC_MAC_ADDRS) { + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, + sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d", + hw->vport.vport, ZXDH_MAC_ADD); + return -ret; + } + hw->mc_num++; + } else { + PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d", + ZXDH_MAX_MC_MAC_ADDRS); + return -EINVAL; + } + } + } + dev->data->mac_addrs[index] = *mac_addr; + return 0; +} +/** + * Fun: + */ +void zxdh_dev_mac_addr_remove(struct rte_eth_dev *dev __rte_unused, uint32_t index __rte_unused) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_msg_info msg_info = {0}; + struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index]; + uint16_t ret = 0; + + if (index >= ZXDH_MAX_MAC_ADDRS) + return; + + if (hw->is_pf) { + if (rte_is_unicast_ether_addr(mac_addr)) { + if (hw->uc_num <= ZXDH_MAX_UC_MAC_ADDRS) { + ret = zxdh_del_mac_table(hw->vport.vport, + mac_addr, hw->hash_search_index); + if (ret) { + PMD_DRV_LOG(ERR, "mac_addr_del failed, code:%d", ret); + return; + } + hw->uc_num--; + } else { + PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d", + ZXDH_MAX_MC_MAC_ADDRS); + return; + } + } else { + if (hw->mc_num <= ZXDH_MAX_MC_MAC_ADDRS) { + ret = zxdh_del_mac_table(hw->vport.vport, + mac_addr, hw->hash_search_index); + if (ret) { + PMD_DRV_LOG(ERR, "mac_addr_del failed, code:%d", ret); + return; + } + hw->mc_num--; + } else { + PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d", + ZXDH_MAX_MC_MAC_ADDRS); + return; + } + } + } else { + struct zxdh_mac_filter *mac_filter = &msg_info.data.mac_filter_msg; + + mac_filter->filter_flag = ZXDH_MAC_FILTER; + rte_memcpy(&mac_filter->mac, mac_addr, sizeof(struct rte_ether_addr)); + zxdh_msg_head_build(hw, ZXDH_MAC_DEL, &msg_info); + if (rte_is_unicast_ether_addr(mac_addr)) { + if (hw->uc_num <= ZXDH_MAX_UC_MAC_ADDRS) { + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, + sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d", + hw->vport.vport, ZXDH_MAC_DEL); + return; + } + hw->uc_num--; + } else { + PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d", + ZXDH_MAX_MC_MAC_ADDRS); + return; + } + } else { + if (hw->mc_num <= ZXDH_MAX_MC_MAC_ADDRS) { + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, + sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d", + hw->vport.vport, ZXDH_MAC_DEL); + return; + } + hw->mc_num--; + } else { + PMD_DRV_LOG(ERR, "MC_MAC is out of range, MAX_MC_MAC:%d", + ZXDH_MAX_MC_MAC_ADDRS); + return; + } + } + } + memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr)); +} diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h index c6d6ca56fd..4630bb70db 100644 --- a/drivers/net/zxdh/zxdh_ethdev_ops.h +++ b/drivers/net/zxdh/zxdh_ethdev_ops.h @@ -10,5 +10,9 @@ int zxdh_dev_set_link_up(struct rte_eth_dev *dev); int zxdh_dev_set_link_down(struct rte_eth_dev *dev); int32_t zxdh_dev_link_update(struct rte_eth_dev *dev, int32_t wait_to_complete __rte_unused); +int zxdh_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t vmdq); +int zxdh_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr); +void zxdh_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); #endif /* ZXDH_ETHDEV_OPS_H */ diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h index 66c337443b..5b4af7d841 100644 --- a/drivers/net/zxdh/zxdh_msg.h +++ b/drivers/net/zxdh/zxdh_msg.h @@ -45,6 +45,8 @@ #define ZXDH_MSG_HEAD_LEN 8 #define ZXDH_MSG_REQ_BODY_MAX_LEN \ (ZXDH_MSG_PAYLOAD_MAX_LEN - ZXDH_MSG_HEAD_LEN) +#define ZXDH_MAC_FILTER 0xaa +#define ZXDH_MAC_UNFILTER 0xff enum ZXDH_DRIVER_TYPE { ZXDH_MSG_CHAN_END_MPF = 0, @@ -173,6 +175,8 @@ enum zxdh_msg_type { ZXDH_NULL = 0, ZXDH_VF_PORT_INIT = 1, ZXDH_VF_PORT_UNINIT = 2, + ZXDH_MAC_ADD = 3, + ZXDH_MAC_DEL = 4, ZXDH_PORT_ATTRS_SET = 25, @@ -315,6 +319,12 @@ struct zxdh_port_attr_set_msg { uint8_t allmulti_follow; } __rte_packed; +struct zxdh_mac_filter { + uint8_t mac_flag; + uint8_t filter_flag; + struct rte_ether_addr mac; +} __rte_packed; + struct zxdh_agent_msg_head { enum zxdh_agent_msg_type msg_type; uint8_t panel_id; @@ -342,6 +352,7 @@ struct zxdh_msg_info { struct zxdh_vf_init_msg vf_init_msg; struct zxdh_port_attr_set_msg port_attr_msg; struct zxdh_link_info_msg link_msg; + struct zxdh_mac_filter mac_filter_msg; } __rte_packed data; } __rte_packed; diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h index 3a7a830d7d..7295b709ce 100644 --- a/drivers/net/zxdh/zxdh_np.h +++ b/drivers/net/zxdh/zxdh_np.h @@ -509,6 +509,11 @@ typedef struct zxdh_dtb_user_entry_t { void *p_entry_data; } ZXDH_DTB_USER_ENTRY_T; +typedef struct zxdh_dtb_hash_entry_info_t { + uint8_t *p_actu_key; + uint8_t *p_rst; +} ZXDH_DTB_HASH_ENTRY_INFO_T; + typedef struct zxdh_sdt_tbl_data_t { uint32_t data_high32; uint32_t data_low32; diff --git a/drivers/net/zxdh/zxdh_tables.c b/drivers/net/zxdh/zxdh_tables.c index 15098e723d..117f3cf12e 100644 --- a/drivers/net/zxdh/zxdh_tables.c +++ b/drivers/net/zxdh/zxdh_tables.c @@ -11,6 +11,10 @@ #define ZXDH_SDT_VPORT_ATT_TABLE 1 #define ZXDH_SDT_PANEL_ATT_TABLE 2 +#define ZXDH_MAC_HASH_INDEX_BASE 64 +#define ZXDH_MAC_HASH_INDEX(index) (ZXDH_MAC_HASH_INDEX_BASE + (index)) +#define ZXDH_MC_GROUP_NUM 4 + int zxdh_set_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr) { int ret = 0; @@ -147,3 +151,195 @@ zxdh_panel_table_init(struct rte_eth_dev *dev) return ret; } + +int +zxdh_set_mac_table(uint16_t vport, struct rte_ether_addr *addr, uint8_t hash_search_idx) +{ + struct zxdh_mac_unicast_table unicast_table = {0}; + struct zxdh_mac_multicast_table multicast_table = {0}; + union zxdh_virport_num vport_num = (union zxdh_virport_num)vport; + uint32_t ret; + uint16_t group_id = 0; + + if (rte_is_unicast_ether_addr(addr)) { + rte_memcpy(unicast_table.key.dmac_addr, addr, sizeof(struct rte_ether_addr)); + unicast_table.entry.hit_flag = 0; + unicast_table.entry.vfid = vport_num.vfid; + + ZXDH_DTB_HASH_ENTRY_INFO_T dtb_hash_entry = { + .p_actu_key = (uint8_t *)&unicast_table.key, + .p_rst = (uint8_t *)&unicast_table.entry + }; + ZXDH_DTB_USER_ENTRY_T entry_get = { + .sdt_no = ZXDH_MAC_HASH_INDEX(hash_search_idx), + .p_entry_data = (void *)&dtb_hash_entry + }; + + ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO, + g_dtb_data.queueid, 1, &entry_get); + if (ret) { + PMD_DRV_LOG(ERR, "Insert mac_table failed"); + return -ret; + } + } else { + for (group_id = 0; group_id < 4; group_id++) { + multicast_table.key.vf_group_id = group_id; + rte_memcpy(multicast_table.key.mac_addr, addr, + sizeof(struct rte_ether_addr)); + ZXDH_DTB_HASH_ENTRY_INFO_T dtb_hash_entry = { + .p_actu_key = (uint8_t *)&multicast_table.key, + .p_rst = (uint8_t *)&multicast_table.entry + }; + + ZXDH_DTB_USER_ENTRY_T entry_get = { + .sdt_no = ZXDH_MAC_HASH_INDEX(hash_search_idx), + .p_entry_data = (void *)&dtb_hash_entry + }; + + ret = zxdh_np_dtb_table_entry_get(ZXDH_DEVICE_NO, g_dtb_data.queueid, + &entry_get, 1); + uint8_t index = (vport_num.vfid % 64) / 32; + if (ret == 0) { + if (vport_num.vf_flag) { + if (group_id == vport_num.vfid / 64) + multicast_table.entry.mc_bitmap[index] |= + rte_cpu_to_be_32(UINT32_C(1) << + (31 - (vport_num.vfid % 64) % 32)); + } else { + if (group_id == vport_num.vfid / 64) + multicast_table.entry.mc_pf_enable = + rte_cpu_to_be_32((1 << 30)); + } + } else { + if (vport_num.vf_flag) { + if (group_id == vport_num.vfid / 64) + multicast_table.entry.mc_bitmap[index] |= + rte_cpu_to_be_32(UINT32_C(1) << + (31 - (vport_num.vfid % 64) % 32)); + else + multicast_table.entry.mc_bitmap[index] = false; + } else { + if (group_id == vport_num.vfid / 64) + multicast_table.entry.mc_pf_enable = + rte_cpu_to_be_32((1 << 30)); + else + multicast_table.entry.mc_pf_enable = false; + } + } + + ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO, g_dtb_data.queueid, + 1, &entry_get); + if (ret) { + PMD_DRV_LOG(ERR, "add mac_table failed, code:%d", ret); + return -ret; + } + } + } + return 0; +} + +int +zxdh_del_mac_table(uint16_t vport, struct rte_ether_addr *addr, uint8_t hash_search_idx) +{ + struct zxdh_mac_unicast_table unicast_table = {0}; + struct zxdh_mac_multicast_table multicast_table = {0}; + union zxdh_virport_num vport_num = (union zxdh_virport_num)vport; + uint32_t ret, del_flag = 0; + uint16_t group_id = 0; + + if (rte_is_unicast_ether_addr(addr)) { + rte_memcpy(unicast_table.key.dmac_addr, addr, sizeof(struct rte_ether_addr)); + unicast_table.entry.hit_flag = 0; + unicast_table.entry.vfid = vport_num.vfid; + + ZXDH_DTB_HASH_ENTRY_INFO_T dtb_hash_entry = { + .p_actu_key = (uint8_t *)&unicast_table.key, + .p_rst = (uint8_t *)&unicast_table.entry + }; + + ZXDH_DTB_USER_ENTRY_T entry_get = { + .sdt_no = ZXDH_MAC_HASH_INDEX(hash_search_idx), + .p_entry_data = (void *)&dtb_hash_entry + }; + + ret = zxdh_np_dtb_table_entry_delete(ZXDH_DEVICE_NO, + g_dtb_data.queueid, 1, &entry_get); + if (ret) { + PMD_DRV_LOG(ERR, "delete l2_fwd_hash_table failed, code:%d", ret); + return -ret; + } + } else { + multicast_table.key.vf_group_id = vport_num.vfid / 64; + rte_memcpy(multicast_table.key.mac_addr, addr, sizeof(struct rte_ether_addr)); + + ZXDH_DTB_HASH_ENTRY_INFO_T dtb_hash_entry = { + .p_actu_key = (uint8_t *)&multicast_table.key, + .p_rst = (uint8_t *)&multicast_table.entry + }; + + ZXDH_DTB_USER_ENTRY_T entry_get = { + .sdt_no = ZXDH_MAC_HASH_INDEX(hash_search_idx), + .p_entry_data = (void *)&dtb_hash_entry + }; + uint8_t index = (vport_num.vfid % 64) / 32; + + ret = zxdh_np_dtb_table_entry_get(ZXDH_DEVICE_NO, + g_dtb_data.queueid, &entry_get, 1); + if (vport_num.vf_flag) + multicast_table.entry.mc_bitmap[index] &= + ~(rte_cpu_to_be_32(UINT32_C(1) << + (31 - (vport_num.vfid % 64) % 32))); + else + multicast_table.entry.mc_pf_enable = 0; + + ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO, + g_dtb_data.queueid, 1, &entry_get); + if (ret) { + PMD_DRV_LOG(ERR, "mac_addr_add mc_table failed, code:%d", ret); + return -ret; + } + + for (group_id = 0; group_id < ZXDH_MC_GROUP_NUM; group_id++) { + multicast_table.key.vf_group_id = group_id; + rte_memcpy(multicast_table.key.mac_addr, addr, + sizeof(struct rte_ether_addr)); + ZXDH_DTB_HASH_ENTRY_INFO_T dtb_hash_entry = { + .p_actu_key = (uint8_t *)&multicast_table.key, + .p_rst = (uint8_t *)&multicast_table.entry + }; + ZXDH_DTB_USER_ENTRY_T entry_get = { + .sdt_no = ZXDH_MAC_HASH_INDEX(hash_search_idx), + .p_entry_data = (void *)&dtb_hash_entry + }; + + ret = zxdh_np_dtb_table_entry_get(ZXDH_DEVICE_NO, g_dtb_data.queueid, + &entry_get, 1); + if (multicast_table.entry.mc_bitmap[0] == 0 && + multicast_table.entry.mc_bitmap[1] == 0 && + multicast_table.entry.mc_pf_enable == 0) { + if (group_id == (ZXDH_MC_GROUP_NUM - 1)) + del_flag = 1; + } else { + break; + } + } + if (del_flag) { + for (group_id = 0; group_id < ZXDH_MC_GROUP_NUM; group_id++) { + multicast_table.key.vf_group_id = group_id; + rte_memcpy(multicast_table.key.mac_addr, addr, + sizeof(struct rte_ether_addr)); + ZXDH_DTB_HASH_ENTRY_INFO_T dtb_hash_entry = { + .p_actu_key = (uint8_t *)&multicast_table.key, + .p_rst = (uint8_t *)&multicast_table.entry + }; + ZXDH_DTB_USER_ENTRY_T entry_get = { + .sdt_no = ZXDH_MAC_HASH_INDEX(hash_search_idx), + .p_entry_data = (void *)&dtb_hash_entry + }; + ret = zxdh_np_dtb_table_entry_delete(ZXDH_DEVICE_NO, + g_dtb_data.queueid, 1, &entry_get); + } + } + } + return 0; +} diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h index 7f592beb3c..a99eb2bec6 100644 --- a/drivers/net/zxdh/zxdh_tables.h +++ b/drivers/net/zxdh/zxdh_tables.h @@ -142,10 +142,46 @@ struct zxdh_panel_table { uint32_t rsv_2; }; /* 16B */ +struct zxdh_mac_unicast_key { + uint16_t rsv; + uint8_t dmac_addr[6]; +}; + +struct zxdh_mac_unicast_entry { + uint8_t rsv1 : 7, + hit_flag : 1; + uint8_t rsv; + uint16_t vfid; +}; + +struct zxdh_mac_unicast_table { + struct zxdh_mac_unicast_key key; + struct zxdh_mac_unicast_entry entry; +}; + +struct zxdh_mac_multicast_key { + uint8_t rsv; + uint8_t vf_group_id; + uint8_t mac_addr[6]; +}; + +struct zxdh_mac_multicast_entry { + uint32_t mc_pf_enable; + uint32_t rsv1; + uint32_t mc_bitmap[2]; +}; + +struct zxdh_mac_multicast_table { + struct zxdh_mac_multicast_key key; + struct zxdh_mac_multicast_entry entry; +}; + int zxdh_port_attr_init(struct rte_eth_dev *dev); int zxdh_panel_table_init(struct rte_eth_dev *dev); int zxdh_set_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr); int zxdh_get_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr); int zxdh_port_attr_uninit(struct rte_eth_dev *dev); +int zxdh_set_mac_table(uint16_t vport, struct rte_ether_addr *addr, uint8_t hash_search_idx); +int zxdh_del_mac_table(uint16_t vport, struct rte_ether_addr *addr, uint8_t hash_search_idx); #endif /* ZXDH_TABLES_H */ From patchwork Fri Dec 6 05:57:11 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149061 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A50BA45E16; Fri, 6 Dec 2024 07:06:57 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 8F2AB427AE; Fri, 6 Dec 2024 07:05:10 +0100 (CET) Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.40]) by mails.dpdk.org (Postfix) with ESMTP id 4B8F340EE1 for ; Fri, 6 Dec 2024 07:05:02 +0100 (CET) Received: from mse-fl2.zte.com.cn (unknown [10.5.228.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxhk.zte.com.cn (FangMail) with ESMTPS id 4Y4LMK1Tvzz8RV6R; Fri, 6 Dec 2024 14:05:01 +0800 (CST) Received: from szxlzmapp01.zte.com.cn ([10.5.231.85]) by mse-fl2.zte.com.cn with SMTP id 4B664Tnx033258; Fri, 6 Dec 2024 14:04:29 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:31 +0800 X-Zmail-TransId: 3e81675293ef001-713b0 From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 11/15] net/zxdh: promiscuous/allmulticast ops implementations Date: Fri, 6 Dec 2024 13:57:11 +0800 Message-ID: <20241206055715.506961-12-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl2.zte.com.cn 4B664Tnx033258 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 6752940D.001/4Y4LMK1Tvzz8RV6R X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org provided promiscuous/allmulticast ops. Signed-off-by: Junlong Wang --- doc/guides/nics/features/zxdh.ini | 4 +- doc/guides/nics/zxdh.rst | 2 + drivers/net/zxdh/zxdh_ethdev.c | 15 ++ drivers/net/zxdh/zxdh_ethdev.h | 2 + drivers/net/zxdh/zxdh_ethdev_ops.c | 132 +++++++++++++++++ drivers/net/zxdh/zxdh_ethdev_ops.h | 4 + drivers/net/zxdh/zxdh_msg.h | 10 ++ drivers/net/zxdh/zxdh_tables.c | 219 +++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_tables.h | 22 +++ 9 files changed, 409 insertions(+), 1 deletion(-) -- 2.27.0 diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini index d5f3bac917..38b715aa7c 100644 --- a/doc/guides/nics/features/zxdh.ini +++ b/doc/guides/nics/features/zxdh.ini @@ -13,4 +13,6 @@ Scattered Rx = Y Link status = Y Link status event = Y Unicast MAC filter = Y -Multicast MAC filter = Y \ No newline at end of file +Multicast MAC filter = Y +Promiscuous mode = Y +Allmulticast mode = Y \ No newline at end of file diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst index e0b0776aca..0399df1302 100644 --- a/doc/guides/nics/zxdh.rst +++ b/doc/guides/nics/zxdh.rst @@ -26,6 +26,8 @@ Features of the ZXDH PMD are: - Set Link down or up - Unicast MAC filter - Multicast MAC filter +- Promiscuous mode +- Multicast mode Driver compilation and testing diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index ad3eb85676..b2c3b42176 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -861,6 +861,11 @@ zxdh_tables_uninit(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "zxdh_port_attr_uninit failed"); return ret; } + ret = zxdh_promisc_table_uninit(dev); + if (ret) { + PMD_DRV_LOG(ERR, "del promisc_table failed, code:%d", ret); + return ret; + } return ret; } @@ -1057,6 +1062,10 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = { .mac_addr_add = zxdh_dev_mac_addr_add, .mac_addr_remove = zxdh_dev_mac_addr_remove, .mac_addr_set = zxdh_dev_mac_addr_set, + .promiscuous_enable = zxdh_dev_promiscuous_enable, + .promiscuous_disable = zxdh_dev_promiscuous_disable, + .allmulticast_enable = zxdh_dev_allmulticast_enable, + .allmulticast_disable = zxdh_dev_allmulticast_disable, }; static int32_t @@ -1310,6 +1319,12 @@ zxdh_tables_init(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, " panel table init failed"); return ret; } + ret = zxdh_promisc_table_init(dev); + if (ret) { + PMD_DRV_LOG(ERR, "promisc_table_init failed"); + return ret; + } + return ret; } diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h index 3306fdfa99..76c5a37dfa 100644 --- a/drivers/net/zxdh/zxdh_ethdev.h +++ b/drivers/net/zxdh/zxdh_ethdev.h @@ -96,6 +96,8 @@ struct zxdh_hw { uint8_t has_tx_offload; uint8_t has_rx_offload; uint8_t admin_status; + uint8_t promisc_status; + uint8_t allmulti_status; }; struct zxdh_dtb_shared_data { diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c index d1d232b411..c8c54c07f1 100644 --- a/drivers/net/zxdh/zxdh_ethdev_ops.c +++ b/drivers/net/zxdh/zxdh_ethdev_ops.c @@ -397,3 +397,135 @@ void zxdh_dev_mac_addr_remove(struct rte_eth_dev *dev __rte_unused, uint32_t ind } memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr)); } + +int zxdh_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_msg_info msg_info = {0}; + int16_t ret = 0; + + if (hw->promisc_status == 0) { + if (hw->is_pf) { + ret = zxdh_dev_unicast_table_set(hw, hw->vport.vport, true); + if (hw->allmulti_status == 0) + ret = zxdh_dev_multicast_table_set(hw, hw->vport.vport, true); + + } else { + struct zxdh_port_promisc_msg *promisc_msg = &msg_info.data.port_promisc_msg; + + zxdh_msg_head_build(hw, ZXDH_PORT_PROMISC_SET, &msg_info); + promisc_msg->mode = ZXDH_PROMISC_MODE; + promisc_msg->value = true; + if (hw->allmulti_status == 0) + promisc_msg->mc_follow = true; + + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d", + hw->vport.vport, ZXDH_PROMISC_MODE); + return ret; + } + } + hw->promisc_status = 1; + } + return ret; +} +/** + * Fun: + */ +int zxdh_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + int16_t ret = 0; + struct zxdh_msg_info msg_info = {0}; + + if (hw->promisc_status == 1) { + if (hw->is_pf) { + ret = zxdh_dev_unicast_table_set(hw, hw->vport.vport, false); + if (hw->allmulti_status == 0) + ret = zxdh_dev_multicast_table_set(hw, hw->vport.vport, false); + + } else { + struct zxdh_port_promisc_msg *promisc_msg = &msg_info.data.port_promisc_msg; + + zxdh_msg_head_build(hw, ZXDH_PORT_PROMISC_SET, &msg_info); + promisc_msg->mode = ZXDH_PROMISC_MODE; + promisc_msg->value = false; + if (hw->allmulti_status == 0) + promisc_msg->mc_follow = true; + + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d", + hw->vport.vport, ZXDH_PROMISC_MODE); + return ret; + } + } + hw->promisc_status = 0; + } + return ret; +} +/** + * Fun: + */ +int zxdh_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + int16_t ret = 0; + struct zxdh_msg_info msg_info = {0}; + + if (hw->allmulti_status == 0) { + if (hw->is_pf) { + ret = zxdh_dev_multicast_table_set(hw, hw->vport.vport, true); + } else { + struct zxdh_port_promisc_msg *promisc_msg = &msg_info.data.port_promisc_msg; + + zxdh_msg_head_build(hw, ZXDH_PORT_PROMISC_SET, &msg_info); + + promisc_msg->mode = ZXDH_ALLMULTI_MODE; + promisc_msg->value = true; + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d", + hw->vport.vport, ZXDH_ALLMULTI_MODE); + return ret; + } + } + hw->allmulti_status = 1; + } + return ret; +} + +int zxdh_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + int16_t ret = 0; + struct zxdh_msg_info msg_info = {0}; + + if (hw->allmulti_status == 1) { + if (hw->is_pf) { + if (hw->promisc_status == 1) + goto end; + ret = zxdh_dev_multicast_table_set(hw, hw->vport.vport, false); + } else { + struct zxdh_port_promisc_msg *promisc_msg = &msg_info.data.port_promisc_msg; + + zxdh_msg_head_build(hw, ZXDH_PORT_PROMISC_SET, &msg_info); + if (hw->promisc_status == 1) + goto end; + promisc_msg->mode = ZXDH_ALLMULTI_MODE; + promisc_msg->value = false; + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d", + hw->vport.vport, ZXDH_ALLMULTI_MODE); + return ret; + } + } + hw->allmulti_status = 0; + } + return ret; +end: + hw->allmulti_status = 0; + return ret; +} diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h index 4630bb70db..394ddedc0e 100644 --- a/drivers/net/zxdh/zxdh_ethdev_ops.h +++ b/drivers/net/zxdh/zxdh_ethdev_ops.h @@ -14,5 +14,9 @@ int zxdh_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_ad uint32_t index, uint32_t vmdq); int zxdh_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr); void zxdh_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); +int zxdh_dev_promiscuous_enable(struct rte_eth_dev *dev); +int zxdh_dev_promiscuous_disable(struct rte_eth_dev *dev); +int zxdh_dev_allmulticast_enable(struct rte_eth_dev *dev); +int zxdh_dev_allmulticast_disable(struct rte_eth_dev *dev); #endif /* ZXDH_ETHDEV_OPS_H */ diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h index 5b4af7d841..002314ef19 100644 --- a/drivers/net/zxdh/zxdh_msg.h +++ b/drivers/net/zxdh/zxdh_msg.h @@ -47,6 +47,8 @@ (ZXDH_MSG_PAYLOAD_MAX_LEN - ZXDH_MSG_HEAD_LEN) #define ZXDH_MAC_FILTER 0xaa #define ZXDH_MAC_UNFILTER 0xff +#define ZXDH_PROMISC_MODE 1 +#define ZXDH_ALLMULTI_MODE 2 enum ZXDH_DRIVER_TYPE { ZXDH_MSG_CHAN_END_MPF = 0, @@ -179,6 +181,7 @@ enum zxdh_msg_type { ZXDH_MAC_DEL = 4, ZXDH_PORT_ATTRS_SET = 25, + ZXDH_PORT_PROMISC_SET = 26, ZXDH_MSG_TYPE_END, } __rte_packed; @@ -325,6 +328,12 @@ struct zxdh_mac_filter { struct rte_ether_addr mac; } __rte_packed; +struct zxdh_port_promisc_msg { + uint8_t mode; + uint8_t value; + uint8_t mc_follow; +} __rte_packed; + struct zxdh_agent_msg_head { enum zxdh_agent_msg_type msg_type; uint8_t panel_id; @@ -353,6 +362,7 @@ struct zxdh_msg_info { struct zxdh_port_attr_set_msg port_attr_msg; struct zxdh_link_info_msg link_msg; struct zxdh_mac_filter mac_filter_msg; + struct zxdh_port_promisc_msg port_promisc_msg; } __rte_packed data; } __rte_packed; diff --git a/drivers/net/zxdh/zxdh_tables.c b/drivers/net/zxdh/zxdh_tables.c index 117f3cf12e..788df41d40 100644 --- a/drivers/net/zxdh/zxdh_tables.c +++ b/drivers/net/zxdh/zxdh_tables.c @@ -10,10 +10,15 @@ #define ZXDH_SDT_VPORT_ATT_TABLE 1 #define ZXDH_SDT_PANEL_ATT_TABLE 2 +#define ZXDH_SDT_BROCAST_ATT_TABLE 6 +#define ZXDH_SDT_UNICAST_ATT_TABLE 10 +#define ZXDH_SDT_MULTICAST_ATT_TABLE 11 #define ZXDH_MAC_HASH_INDEX_BASE 64 #define ZXDH_MAC_HASH_INDEX(index) (ZXDH_MAC_HASH_INDEX_BASE + (index)) #define ZXDH_MC_GROUP_NUM 4 +#define ZXDH_BASE_VFID 1152 +#define ZXDH_TABLE_HIT_FLAG 128 int zxdh_set_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr) { @@ -343,3 +348,217 @@ zxdh_del_mac_table(uint16_t vport, struct rte_ether_addr *addr, uint8_t hash_se } return 0; } + +int zxdh_promisc_table_init(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint32_t ret, vf_group_id = 0; + struct zxdh_brocast_table brocast_table = {0}; + struct zxdh_unitcast_table uc_table = {0}; + struct zxdh_multicast_table mc_table = {0}; + + if (!hw->is_pf) + return 0; + + for (; vf_group_id < 4; vf_group_id++) { + brocast_table.flag = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG); + ZXDH_DTB_ERAM_ENTRY_INFO_T eram_brocast_entry = { + .index = ((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id, + .p_data = (uint32_t *)&brocast_table + }; + ZXDH_DTB_USER_ENTRY_T entry_brocast = { + .sdt_no = ZXDH_SDT_BROCAST_ATT_TABLE, + .p_entry_data = (void *)&eram_brocast_entry + }; + + ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO, + g_dtb_data.queueid, 1, &entry_brocast); + if (ret) { + PMD_DRV_LOG(ERR, "write brocast table failed"); + return ret; + } + + uc_table.uc_flood_pf_enable = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG); + ZXDH_DTB_ERAM_ENTRY_INFO_T eram_uc_entry = { + .index = ((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id, + .p_data = (uint32_t *)&uc_table + }; + ZXDH_DTB_USER_ENTRY_T entry_unicast = { + .sdt_no = ZXDH_SDT_UNICAST_ATT_TABLE, + .p_entry_data = (void *)&eram_uc_entry + }; + + ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO, + g_dtb_data.queueid, 1, &entry_unicast); + if (ret) { + PMD_DRV_LOG(ERR, "write unicast table failed"); + return ret; + } + + mc_table.mc_flood_pf_enable = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG); + ZXDH_DTB_ERAM_ENTRY_INFO_T eram_mc_entry = { + .index = ((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id, + .p_data = (uint32_t *)&mc_table + }; + ZXDH_DTB_USER_ENTRY_T entry_multicast = { + .sdt_no = ZXDH_SDT_MULTICAST_ATT_TABLE, + .p_entry_data = (void *)&eram_mc_entry + }; + + ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO, g_dtb_data.queueid, + 1, &entry_multicast); + if (ret) { + PMD_DRV_LOG(ERR, "write multicast table failed"); + return ret; + } + } + + return ret; +} + +int zxdh_promisc_table_uninit(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint32_t ret, vf_group_id = 0; + struct zxdh_brocast_table brocast_table = {0}; + struct zxdh_unitcast_table uc_table = {0}; + struct zxdh_multicast_table mc_table = {0}; + + if (!hw->is_pf) + return 0; + + for (; vf_group_id < 4; vf_group_id++) { + brocast_table.flag = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG); + ZXDH_DTB_ERAM_ENTRY_INFO_T eram_brocast_entry = { + .index = ((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id, + .p_data = (uint32_t *)&brocast_table + }; + ZXDH_DTB_USER_ENTRY_T entry_brocast = { + .sdt_no = ZXDH_SDT_BROCAST_ATT_TABLE, + .p_entry_data = (void *)&eram_brocast_entry + }; + + ret = zxdh_np_dtb_table_entry_delete(ZXDH_DEVICE_NO, + g_dtb_data.queueid, 1, &entry_brocast); + if (ret) { + PMD_DRV_LOG(ERR, "write brocast table failed"); + return ret; + } + + uc_table.uc_flood_pf_enable = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG); + ZXDH_DTB_ERAM_ENTRY_INFO_T eram_uc_entry = { + .index = ((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id, + .p_data = (uint32_t *)&uc_table + }; + ZXDH_DTB_USER_ENTRY_T entry_unicast = { + .sdt_no = ZXDH_SDT_UNICAST_ATT_TABLE, + .p_entry_data = (void *)&eram_uc_entry + }; + + ret = zxdh_np_dtb_table_entry_delete(ZXDH_DEVICE_NO, + g_dtb_data.queueid, 1, &entry_unicast); + if (ret) { + PMD_DRV_LOG(ERR, "write unicast table failed"); + return ret; + } + + mc_table.mc_flood_pf_enable = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG); + ZXDH_DTB_ERAM_ENTRY_INFO_T eram_mc_entry = { + .index = ((hw->vfid - ZXDH_BASE_VFID) << 2) + vf_group_id, + .p_data = (uint32_t *)&mc_table + }; + ZXDH_DTB_USER_ENTRY_T entry_multicast = { + .sdt_no = ZXDH_SDT_MULTICAST_ATT_TABLE, + .p_entry_data = (void *)&eram_mc_entry + }; + + ret = zxdh_np_dtb_table_entry_delete(ZXDH_DEVICE_NO, g_dtb_data.queueid, + 1, &entry_multicast); + if (ret) { + PMD_DRV_LOG(ERR, "write multicast table failed"); + return ret; + } + } + + return ret; +} + +int zxdh_dev_unicast_table_set(struct zxdh_hw *hw, uint16_t vport, bool enable) +{ + int16_t ret = 0; + struct zxdh_unitcast_table uc_table = {0}; + union zxdh_virport_num vport_num = (union zxdh_virport_num)vport; + + ZXDH_DTB_ERAM_ENTRY_INFO_T uc_table_entry = { + .index = ((hw->vfid - ZXDH_BASE_VFID) << 2) + vport_num.vfid / 64, + .p_data = (uint32_t *)&uc_table + }; + ZXDH_DTB_USER_ENTRY_T entry = { + .sdt_no = ZXDH_SDT_UNICAST_ATT_TABLE, + .p_entry_data = (void *)&uc_table_entry + }; + + ret = zxdh_np_dtb_table_entry_get(ZXDH_DEVICE_NO, g_dtb_data.queueid, &entry, 1); + if (ret) { + PMD_DRV_LOG(ERR, "unicast_table_get_failed:%d", hw->vfid); + return -ret; + } + + if (vport_num.vf_flag) { + if (enable) + uc_table.bitmap[(vport_num.vfid % 64) / 32] |= + UINT32_C(1) << (31 - (vport_num.vfid % 64) % 32); + else + uc_table.bitmap[(vport_num.vfid % 64) / 32] &= + ~(UINT32_C(1) << (31 - (vport_num.vfid % 64) % 32)); + } else { + uc_table.uc_flood_pf_enable = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG + (enable << 6)); + } + + ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO, g_dtb_data.queueid, 1, &entry); + if (ret) { + PMD_DRV_LOG(ERR, "unicast_table_set_failed:%d", hw->vfid); + return -ret; + } + return 0; +} + +int zxdh_dev_multicast_table_set(struct zxdh_hw *hw, uint16_t vport, bool enable) +{ + int16_t ret = 0; + struct zxdh_multicast_table mc_table = {0}; + union zxdh_virport_num vport_num = (union zxdh_virport_num)vport; + + ZXDH_DTB_ERAM_ENTRY_INFO_T mc_table_entry = { + .index = ((hw->vfid - ZXDH_BASE_VFID) << 2) + vport_num.vfid / 64, + .p_data = (uint32_t *)&mc_table + }; + ZXDH_DTB_USER_ENTRY_T entry = { + .sdt_no = ZXDH_SDT_MULTICAST_ATT_TABLE, + .p_entry_data = (void *)&mc_table_entry + }; + + ret = zxdh_np_dtb_table_entry_get(ZXDH_DEVICE_NO, g_dtb_data.queueid, &entry, 1); + if (ret) { + PMD_DRV_LOG(ERR, "allmulti_table_get_failed:%d", hw->vfid); + return -ret; + } + + if (vport_num.vf_flag) { + if (enable) + mc_table.bitmap[(vport_num.vfid % 64) / 32] |= + UINT32_C(1) << (31 - (vport_num.vfid % 64) % 32); + else + mc_table.bitmap[(vport_num.vfid % 64) / 32] &= + ~(UINT32_C(1) << (31 - (vport_num.vfid % 64) % 32)); + + } else { + mc_table.mc_flood_pf_enable = rte_be_to_cpu_32(ZXDH_TABLE_HIT_FLAG + (enable << 6)); + } + ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO, g_dtb_data.queueid, 1, &entry); + if (ret) { + PMD_DRV_LOG(ERR, "allmulti_table_set_failed:%d", hw->vfid); + return -ret; + } + return 0; +} diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h index a99eb2bec6..f5767eb2ba 100644 --- a/drivers/net/zxdh/zxdh_tables.h +++ b/drivers/net/zxdh/zxdh_tables.h @@ -176,12 +176,34 @@ struct zxdh_mac_multicast_table { struct zxdh_mac_multicast_entry entry; }; +struct zxdh_brocast_table { + uint32_t flag; + uint32_t rsv; + uint32_t bitmap[2]; +}; + +struct zxdh_unitcast_table { + uint32_t uc_flood_pf_enable; + uint32_t rsv; + uint32_t bitmap[2]; +}; + +struct zxdh_multicast_table { + uint32_t mc_flood_pf_enable; + uint32_t rsv; + uint32_t bitmap[2]; +}; + int zxdh_port_attr_init(struct rte_eth_dev *dev); int zxdh_panel_table_init(struct rte_eth_dev *dev); int zxdh_set_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr); int zxdh_get_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr); int zxdh_port_attr_uninit(struct rte_eth_dev *dev); +int zxdh_promisc_table_init(struct rte_eth_dev *dev); +int zxdh_promisc_table_uninit(struct rte_eth_dev *dev); int zxdh_set_mac_table(uint16_t vport, struct rte_ether_addr *addr, uint8_t hash_search_idx); int zxdh_del_mac_table(uint16_t vport, struct rte_ether_addr *addr, uint8_t hash_search_idx); +int zxdh_dev_unicast_table_set(struct zxdh_hw *hw, uint16_t vport, bool enable); +int zxdh_dev_multicast_table_set(struct zxdh_hw *hw, uint16_t vport, bool enable); #endif /* ZXDH_TABLES_H */ From patchwork Fri Dec 6 05:57:12 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149054 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 876E645E16; Fri, 6 Dec 2024 07:05:37 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3A3BB40E50; Fri, 6 Dec 2024 07:04:57 +0100 (CET) Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.40]) by mails.dpdk.org (Postfix) with ESMTP id 8DC2040E2E for ; Fri, 6 Dec 2024 07:04:52 +0100 (CET) Received: from mse-fl1.zte.com.cn (unknown [10.5.228.132]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxhk.zte.com.cn (FangMail) with ESMTPS id 4Y4LM74rsHz8QrkZ; Fri, 6 Dec 2024 14:04:51 +0800 (CST) Received: from szxlzmapp03.zte.com.cn ([10.5.231.207]) by mse-fl1.zte.com.cn with SMTP id 4B664U2X083619; Fri, 6 Dec 2024 14:04:30 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:32 +0800 X-Zmail-TransId: 3e81675293f0001-713b3 From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 12/15] net/zxdh: vlan filter, vlan offload ops implementations Date: Fri, 6 Dec 2024 13:57:12 +0800 Message-ID: <20241206055715.506961-13-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl1.zte.com.cn 4B664U2X083619 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 67529403.007/4Y4LM74rsHz8QrkZ X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org provided vlan filter, vlan offload ops. Signed-off-by: Junlong Wang --- doc/guides/nics/features/zxdh.ini | 4 +- doc/guides/nics/zxdh.rst | 3 + drivers/net/zxdh/zxdh_ethdev.c | 40 +++++- drivers/net/zxdh/zxdh_ethdev_ops.c | 221 +++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_ethdev_ops.h | 2 + drivers/net/zxdh/zxdh_msg.h | 22 +++ drivers/net/zxdh/zxdh_rxtx.c | 18 +++ drivers/net/zxdh/zxdh_tables.c | 94 ++++++++++++ drivers/net/zxdh/zxdh_tables.h | 10 +- 9 files changed, 410 insertions(+), 4 deletions(-) -- 2.27.0 diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini index 38b715aa7c..d8d0261726 100644 --- a/doc/guides/nics/features/zxdh.ini +++ b/doc/guides/nics/features/zxdh.ini @@ -15,4 +15,6 @@ Link status event = Y Unicast MAC filter = Y Multicast MAC filter = Y Promiscuous mode = Y -Allmulticast mode = Y \ No newline at end of file +Allmulticast mode = Y +VLAN filter = Y +VLAN offload = Y \ No newline at end of file diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst index 0399df1302..3a7585d123 100644 --- a/doc/guides/nics/zxdh.rst +++ b/doc/guides/nics/zxdh.rst @@ -28,6 +28,9 @@ Features of the ZXDH PMD are: - Multicast MAC filter - Promiscuous mode - Multicast mode +- VLAN filter and VLAN offload +- VLAN stripping and inserting +- QINQ stripping and inserting Driver compilation and testing diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index b2c3b42176..3e6cfc1d6b 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -759,6 +759,34 @@ zxdh_alloc_queues(struct rte_eth_dev *dev, uint16_t nr_vq) return 0; } +static int +zxdh_vlan_offload_configure(struct rte_eth_dev *dev) +{ + int ret; + int mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | RTE_ETH_QINQ_STRIP_MASK; + + ret = zxdh_dev_vlan_offload_set(dev, mask); + if (ret) { + PMD_DRV_LOG(ERR, "vlan offload set error"); + return -1; + } + + return 0; +} + +static int +zxdh_dev_conf_offload(struct rte_eth_dev *dev) +{ + int ret = 0; + + ret = zxdh_vlan_offload_configure(dev); + if (ret) { + PMD_DRV_LOG(ERR, "zxdh_vlan_offload_configure failed"); + return ret; + } + + return 0; +} static int32_t zxdh_dev_configure(struct rte_eth_dev *dev) @@ -816,7 +844,7 @@ zxdh_dev_configure(struct rte_eth_dev *dev) nr_vq = dev->data->nb_rx_queues + dev->data->nb_tx_queues; if (nr_vq == hw->queue_num) - return 0; + goto end; PMD_DRV_LOG(DEBUG, "queue changed need reset "); /* Reset the device although not necessary at startup */ @@ -848,6 +876,8 @@ zxdh_dev_configure(struct rte_eth_dev *dev) zxdh_pci_reinit_complete(hw); +end: + zxdh_dev_conf_offload(dev); return ret; } @@ -1066,6 +1096,8 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = { .promiscuous_disable = zxdh_dev_promiscuous_disable, .allmulticast_enable = zxdh_dev_allmulticast_enable, .allmulticast_disable = zxdh_dev_allmulticast_disable, + .vlan_filter_set = zxdh_dev_vlan_filter_set, + .vlan_offload_set = zxdh_dev_vlan_offload_set, }; static int32_t @@ -1325,6 +1357,12 @@ zxdh_tables_init(struct rte_eth_dev *dev) return ret; } + ret = zxdh_vlan_filter_table_init(dev); + if (ret) { + PMD_DRV_LOG(ERR, " panel table init failed"); + return ret; + } + return ret; } diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c index c8c54c07f1..094770984c 100644 --- a/drivers/net/zxdh/zxdh_ethdev_ops.c +++ b/drivers/net/zxdh/zxdh_ethdev_ops.c @@ -2,6 +2,8 @@ * Copyright(c) 2024 ZTE Corporation */ +#include + #include "zxdh_ethdev.h" #include "zxdh_pci.h" #include "zxdh_msg.h" @@ -9,6 +11,8 @@ #include "zxdh_tables.h" #include "zxdh_logs.h" +#define ZXDH_VLAN_FILTER_GROUPS 64 + static int32_t zxdh_config_port_status(struct rte_eth_dev *dev, uint16_t link_status) { struct zxdh_hw *hw = dev->data->dev_private; @@ -529,3 +533,220 @@ int zxdh_dev_allmulticast_disable(struct rte_eth_dev *dev) hw->allmulti_status = 0; return ret; } + +int zxdh_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct zxdh_hw *hw = (struct zxdh_hw *)dev->data->dev_private; + uint16_t idx = 0; + uint16_t bit_idx = 0; + uint8_t msg_type = 0; + int ret = 0; + + vlan_id &= RTE_VLAN_ID_MASK; + if (vlan_id == 0 || vlan_id == RTE_ETHER_MAX_VLAN_ID) { + PMD_DRV_LOG(ERR, "vlan id (%d) is reserved", vlan_id); + return -EINVAL; + } + + if (dev->data->dev_started == 0) { + PMD_DRV_LOG(ERR, "vlan_filter dev not start"); + return -1; + } + + idx = vlan_id / ZXDH_VLAN_FILTER_GROUPS; + bit_idx = vlan_id % ZXDH_VLAN_FILTER_GROUPS; + + if (on) { + if (dev->data->vlan_filter_conf.ids[idx] & (1ULL << bit_idx)) { + PMD_DRV_LOG(ERR, "vlan:%d has already added.", vlan_id); + return 0; + } + msg_type = ZXDH_VLAN_FILTER_ADD; + } else { + if (!(dev->data->vlan_filter_conf.ids[idx] & (1ULL << bit_idx))) { + PMD_DRV_LOG(ERR, "vlan:%d has already deleted.", vlan_id); + return 0; + } + msg_type = ZXDH_VLAN_FILTER_DEL; + } + + if (hw->is_pf) { + ret = zxdh_vlan_filter_table_set(hw->vport.vport, vlan_id, on); + if (ret) { + PMD_DRV_LOG(ERR, "vlan_id:%d table set failed.", vlan_id); + return -1; + } + } else { + struct zxdh_msg_info msg = {0}; + zxdh_msg_head_build(hw, msg_type, &msg); + msg.data.vlan_filter_msg.vlan_id = vlan_id; + ret = zxdh_vf_send_msg_to_pf(dev, &msg, sizeof(struct zxdh_msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ", + hw->vport.vport, msg_type); + return ret; + } + } + + if (on) + dev->data->vlan_filter_conf.ids[idx] |= (1ULL << bit_idx); + else + dev->data->vlan_filter_conf.ids[idx] &= ~(1ULL << bit_idx); + + return 0; +} + +int zxdh_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct rte_eth_rxmode *rxmode; + struct zxdh_msg_info msg = {0}; + struct zxdh_port_attr_table port_attr = {0}; + int ret = 0; + + rxmode = &dev->data->dev_conf.rxmode; + if (mask & RTE_ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { + if (hw->is_pf) { + ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr); + port_attr.vlan_filter_enable = true; + ret = zxdh_set_port_attr(hw->vport.vfid, &port_attr); + if (ret) { + PMD_DRV_LOG(ERR, "port %d vlan filter offload set failed", + hw->vport.vfid); + return -EAGAIN; + } + } else { + msg.data.vlan_filter_set_msg.enable = true; + zxdh_msg_head_build(hw, ZXDH_VLAN_FILTER_SET, &msg); + ret = zxdh_vf_send_msg_to_pf(hw->eth_dev, &msg, + sizeof(struct zxdh_msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "port %d vlan filter offload set failed", + hw->vport.vfid); + return -EAGAIN; + } + } + } else { + if (hw->is_pf) { + ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr); + port_attr.vlan_filter_enable = false; + ret = zxdh_set_port_attr(hw->vport.vfid, &port_attr); + if (ret) { + PMD_DRV_LOG(ERR, "port %d vlan filter offload set failed", + hw->vport.vfid); + return -EAGAIN; + } + } else { + msg.data.vlan_filter_set_msg.enable = true; + zxdh_msg_head_build(hw, ZXDH_VLAN_FILTER_SET, &msg); + ret = zxdh_vf_send_msg_to_pf(hw->eth_dev, &msg, + sizeof(struct zxdh_msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "port %d vlan filter offload set failed", + hw->vport.vfid); + return -EAGAIN; + } + } + } + } + + if (mask & RTE_ETH_VLAN_STRIP_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { + if (hw->is_pf) { + ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr); + port_attr.vlan_strip_offload = true; + ret = zxdh_set_port_attr(hw->vport.vfid, &port_attr); + if (ret) { + PMD_DRV_LOG(ERR, "port %d vlan strip offload set failed", + hw->vport.vfid); + return -EAGAIN; + } + } else { + msg.data.vlan_offload_msg.enable = true; + msg.data.vlan_offload_msg.type = ZXDH_VLAN_STRIP_MSG_TYPE; + zxdh_msg_head_build(hw, ZXDH_VLAN_OFFLOAD, &msg); + ret = zxdh_vf_send_msg_to_pf(hw->eth_dev, &msg, + sizeof(struct zxdh_msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "port %d vlan strip offload set failed", + hw->vport.vfid); + return -EAGAIN; + } + } + } else { + if (hw->is_pf) { + ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr); + port_attr.vlan_strip_offload = false; + ret = zxdh_set_port_attr(hw->vport.vfid, &port_attr); + if (ret) { + PMD_DRV_LOG(ERR, "port %d vlan strip offload set failed", + hw->vport.vfid); + return -EAGAIN; + } + } else { + msg.data.vlan_offload_msg.enable = false; + msg.data.vlan_offload_msg.type = ZXDH_VLAN_STRIP_MSG_TYPE; + zxdh_msg_head_build(hw, ZXDH_VLAN_OFFLOAD, &msg); + ret = zxdh_vf_send_msg_to_pf(hw->eth_dev, &msg, + sizeof(struct zxdh_msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "port %d vlan strip offload set failed", + hw->vport.vfid); + return -EAGAIN; + } + } + } + } + + if (mask & RTE_ETH_QINQ_STRIP_MASK) { + memset(&msg, 0, sizeof(struct zxdh_msg_info)); + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) { + if (hw->is_pf) { + ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr); + port_attr.qinq_strip_offload = true; + ret = zxdh_set_port_attr(hw->vport.vfid, &port_attr); + if (ret) { + PMD_DRV_LOG(ERR, "port %d qinq offload set failed", + hw->vport.vfid); + return -EAGAIN; + } + } else { + msg.data.vlan_offload_msg.enable = true; + msg.data.vlan_offload_msg.type = ZXDH_QINQ_STRIP_MSG_TYPE; + zxdh_msg_head_build(hw, ZXDH_VLAN_OFFLOAD, &msg); + ret = zxdh_vf_send_msg_to_pf(hw->eth_dev, &msg, + sizeof(struct zxdh_msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "port %d qinq offload set failed", + hw->vport.vfid); + return -EAGAIN; + } + } + } else { + if (hw->is_pf) { + ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr); + port_attr.qinq_strip_offload = true; + ret = zxdh_set_port_attr(hw->vport.vfid, &port_attr); + if (ret) { + PMD_DRV_LOG(ERR, "port %d qinq offload set failed", + hw->vport.vfid); + return -EAGAIN; + } + } else { + msg.data.vlan_offload_msg.enable = false; + msg.data.vlan_offload_msg.type = ZXDH_QINQ_STRIP_MSG_TYPE; + zxdh_msg_head_build(hw, ZXDH_VLAN_OFFLOAD, &msg); + ret = zxdh_vf_send_msg_to_pf(hw->eth_dev, &msg, + sizeof(struct zxdh_msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "port %d qinq offload set failed", + hw->vport.vfid); + return -EAGAIN; + } + } + } + } + + return ret; +} diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h index 394ddedc0e..058d271ab3 100644 --- a/drivers/net/zxdh/zxdh_ethdev_ops.h +++ b/drivers/net/zxdh/zxdh_ethdev_ops.h @@ -18,5 +18,7 @@ int zxdh_dev_promiscuous_enable(struct rte_eth_dev *dev); int zxdh_dev_promiscuous_disable(struct rte_eth_dev *dev); int zxdh_dev_allmulticast_enable(struct rte_eth_dev *dev); int zxdh_dev_allmulticast_disable(struct rte_eth_dev *dev); +int zxdh_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); +int zxdh_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask); #endif /* ZXDH_ETHDEV_OPS_H */ diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h index 002314ef19..bed16d31a0 100644 --- a/drivers/net/zxdh/zxdh_msg.h +++ b/drivers/net/zxdh/zxdh_msg.h @@ -49,6 +49,8 @@ #define ZXDH_MAC_UNFILTER 0xff #define ZXDH_PROMISC_MODE 1 #define ZXDH_ALLMULTI_MODE 2 +#define ZXDH_VLAN_STRIP_MSG_TYPE 0 +#define ZXDH_QINQ_STRIP_MSG_TYPE 1 enum ZXDH_DRIVER_TYPE { ZXDH_MSG_CHAN_END_MPF = 0, @@ -179,6 +181,10 @@ enum zxdh_msg_type { ZXDH_VF_PORT_UNINIT = 2, ZXDH_MAC_ADD = 3, ZXDH_MAC_DEL = 4, + ZXDH_VLAN_FILTER_SET = 17, + ZXDH_VLAN_FILTER_ADD = 18, + ZXDH_VLAN_FILTER_DEL = 19, + ZXDH_VLAN_OFFLOAD = 21, ZXDH_PORT_ATTRS_SET = 25, ZXDH_PORT_PROMISC_SET = 26, @@ -343,6 +349,19 @@ struct zxdh_agent_msg_head { uint16_t pcie_id; } __rte_packed; +struct zxdh_vlan_filter { + uint16_t vlan_id; +}; + +struct zxdh_vlan_filter_set { + uint8_t enable; +}; + +struct zxdh_vlan_offload { + uint8_t enable; + uint8_t type; +} __rte_packed; + struct zxdh_msg_head { enum zxdh_msg_type msg_type; uint16_t vport; @@ -363,6 +382,9 @@ struct zxdh_msg_info { struct zxdh_link_info_msg link_msg; struct zxdh_mac_filter mac_filter_msg; struct zxdh_port_promisc_msg port_promisc_msg; + struct zxdh_vlan_filter vlan_filter_msg; + struct zxdh_vlan_filter_set vlan_filter_set_msg; + struct zxdh_vlan_offload vlan_offload_msg; } __rte_packed data; } __rte_packed; diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c index 07ef708112..be5865ac85 100644 --- a/drivers/net/zxdh/zxdh_rxtx.c +++ b/drivers/net/zxdh/zxdh_rxtx.c @@ -11,6 +11,9 @@ #include "zxdh_pci.h" #include "zxdh_queue.h" +#define ZXDH_SVLAN_TPID 0x88a8 +#define ZXDH_CVLAN_TPID 0x8100 + #define ZXDH_PKT_FORM_CPU 0x20 /* 1-cpu 0-np */ #define ZXDH_NO_IP_FRAGMENT 0x2000 /* ip fragment flag */ #define ZXDH_NO_IPID_UPDATE 0x4000 /* ipid update flag */ @@ -21,6 +24,9 @@ #define ZXDH_PI_L3TYPE_RSV 0xC0 #define ZXDH_PI_L3TYPE_MASK 0xC0 +#define ZXDH_PD_OFFLOAD_SVLAN_INSERT (1 << 14) +#define ZXDH_PD_OFFLOAD_CVLAN_INSERT (1 << 13) + #define ZXDH_PCODE_MASK 0x1F #define ZXDH_PCODE_IP_PKT_TYPE 0x01 #define ZXDH_PCODE_TCP_PKT_TYPE 0x02 @@ -258,6 +264,18 @@ static void zxdh_xmit_fill_net_hdr(struct rte_mbuf *cookie, hdr->pi_hdr.l3_offset = rte_be_to_cpu_16(l3_offset); hdr->pi_hdr.l4_offset = rte_be_to_cpu_16(l3_offset + cookie->l3_len); + if (cookie->ol_flags & RTE_MBUF_F_TX_VLAN) { + ol_flag |= ZXDH_PD_OFFLOAD_CVLAN_INSERT; + hdr->pi_hdr.vlan_id = rte_be_to_cpu_16(cookie->vlan_tci); + hdr->pd_hdr.cvlan_insert = + rte_be_to_cpu_32((ZXDH_CVLAN_TPID << 16) | cookie->vlan_tci); + } + if (cookie->ol_flags & RTE_MBUF_F_TX_QINQ) { + ol_flag |= ZXDH_PD_OFFLOAD_SVLAN_INSERT; + hdr->pd_hdr.svlan_insert = + rte_be_to_cpu_32((ZXDH_SVLAN_TPID << 16) | cookie->vlan_tci_outer); + } + hdr->pd_hdr.ol_flag = rte_be_to_cpu_32(ol_flag); } diff --git a/drivers/net/zxdh/zxdh_tables.c b/drivers/net/zxdh/zxdh_tables.c index 788df41d40..93dc956597 100644 --- a/drivers/net/zxdh/zxdh_tables.c +++ b/drivers/net/zxdh/zxdh_tables.c @@ -10,6 +10,7 @@ #define ZXDH_SDT_VPORT_ATT_TABLE 1 #define ZXDH_SDT_PANEL_ATT_TABLE 2 +#define ZXDH_SDT_VLAN_ATT_TABLE 4 #define ZXDH_SDT_BROCAST_ATT_TABLE 6 #define ZXDH_SDT_UNICAST_ATT_TABLE 10 #define ZXDH_SDT_MULTICAST_ATT_TABLE 11 @@ -19,6 +20,10 @@ #define ZXDH_MC_GROUP_NUM 4 #define ZXDH_BASE_VFID 1152 #define ZXDH_TABLE_HIT_FLAG 128 +#define ZXDH_FIRST_VLAN_GROUP_BITS 23 +#define ZXDH_VLAN_GROUP_BITS 31 +#define ZXDH_VLAN_GROUP_NUM 35 +#define ZXDH_VLAN_FILTER_VLANID_STEP 120 int zxdh_set_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr) { @@ -562,3 +567,92 @@ int zxdh_dev_multicast_table_set(struct zxdh_hw *hw, uint16_t vport, bool enable } return 0; } + +int zxdh_vlan_filter_table_init(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_vlan_filter_table vlan_table = {0}; + int16_t ret = 0; + + for (uint8_t vlan_group = 0; vlan_group < ZXDH_VLAN_GROUP_NUM; vlan_group++) { + if (vlan_group == 0) { + vlan_table.vlans[0] |= (1 << ZXDH_FIRST_VLAN_GROUP_BITS); + vlan_table.vlans[0] |= (1 << ZXDH_VLAN_GROUP_BITS); + + } else { + vlan_table.vlans[0] = 0; + } + uint32_t index = (vlan_group << 11) | hw->vport.vfid; + ZXDH_DTB_ERAM_ENTRY_INFO_T entry_data = { + .index = index, + .p_data = (uint32_t *)&vlan_table + }; + ZXDH_DTB_USER_ENTRY_T user_entry = {ZXDH_SDT_VLAN_ATT_TABLE, &entry_data}; + + ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO, + g_dtb_data.queueid, 1, &user_entry); + if (ret != 0) { + PMD_DRV_LOG(ERR, + "[vfid:%d], vlan_group:%d, init vlan filter table failed", + hw->vport.vfid, vlan_group); + ret = -1; + } + } + return ret; +} + +int zxdh_vlan_filter_table_set(uint16_t vport, uint16_t vlan_id, uint8_t enable) +{ + struct zxdh_vlan_filter_table vlan_table = {0}; + union zxdh_virport_num vport_num = (union zxdh_virport_num)vport; + int ret = 0; + + memset(&vlan_table, 0, sizeof(struct zxdh_vlan_filter_table)); + int table_num = vlan_id / ZXDH_VLAN_FILTER_VLANID_STEP; + uint32_t index = (table_num << 11) | vport_num.vfid; + uint16_t group = (vlan_id - table_num * ZXDH_VLAN_FILTER_VLANID_STEP) / 8 + 1; + + uint8_t val = sizeof(struct zxdh_vlan_filter_table) / sizeof(uint32_t); + uint8_t vlan_tbl_index = group / + ((sizeof(struct zxdh_vlan_filter_table) / sizeof(uint32_t))); + uint16_t used_group = vlan_tbl_index * val; + + used_group = (used_group == 0 ? 0 : (used_group - 1)); + + ZXDH_DTB_ERAM_ENTRY_INFO_T entry_data = {index, (uint32_t *)&vlan_table}; + ZXDH_DTB_USER_ENTRY_T user_entry_get = {ZXDH_SDT_VLAN_ATT_TABLE, &entry_data}; + + ret = zxdh_np_dtb_table_entry_get(ZXDH_DEVICE_NO, g_dtb_data.queueid, &user_entry_get, 1); + if (ret) { + PMD_DRV_LOG(ERR, "get vlan table failed"); + return -1; + } + uint16_t relative_vlan_id = vlan_id - table_num * ZXDH_VLAN_FILTER_VLANID_STEP; + uint32_t *base_group = &vlan_table.vlans[0]; + + *base_group |= 1 << 31; + base_group = &vlan_table.vlans[vlan_tbl_index]; + uint8_t valid_bits = (vlan_tbl_index == 0 ? + ZXDH_FIRST_VLAN_GROUP_BITS : ZXDH_VLAN_GROUP_BITS) + 1; + + uint8_t shift_left = (valid_bits - (relative_vlan_id - used_group * 8) % valid_bits) - 1; + + if (enable) + *base_group |= 1 << shift_left; + else + *base_group &= ~(1 << shift_left); + + + ZXDH_DTB_USER_ENTRY_T user_entry_write = { + .sdt_no = ZXDH_SDT_VLAN_ATT_TABLE, + .p_entry_data = &entry_data + }; + + ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO, + g_dtb_data.queueid, 1, &user_entry_write); + if (ret != 0) { + PMD_DRV_LOG(ERR, "write vlan table failed"); + return -1; + } + return 0; +} diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h index f5767eb2ba..85e95a876f 100644 --- a/drivers/net/zxdh/zxdh_tables.h +++ b/drivers/net/zxdh/zxdh_tables.h @@ -43,7 +43,7 @@ struct zxdh_port_attr_table { uint8_t rdma_offload_enable: 1; uint8_t vlan_filter_enable: 1; uint8_t vlan_strip_offload: 1; - uint8_t qinq_valn_strip_offload: 1; + uint8_t qinq_strip_offload: 1; uint8_t rss_enable: 1; uint8_t mtu_enable: 1; uint8_t hit_flag: 1; @@ -73,7 +73,7 @@ struct zxdh_port_attr_table { uint8_t rdma_offload_enable: 1; uint8_t vlan_filter_enable: 1; uint8_t vlan_strip_offload: 1; - uint8_t qinq_valn_strip_offload: 1; + uint8_t qinq_strip_offload: 1; uint8_t rss_enable: 1; uint8_t mtu_enable: 1; uint8_t hit_flag: 1; @@ -194,6 +194,10 @@ struct zxdh_multicast_table { uint32_t bitmap[2]; }; +struct zxdh_vlan_filter_table { + uint32_t vlans[4]; +}; + int zxdh_port_attr_init(struct rte_eth_dev *dev); int zxdh_panel_table_init(struct rte_eth_dev *dev); int zxdh_set_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr); @@ -205,5 +209,7 @@ int zxdh_set_mac_table(uint16_t vport, struct rte_ether_addr *addr, uint8_t has int zxdh_del_mac_table(uint16_t vport, struct rte_ether_addr *addr, uint8_t hash_search_idx); int zxdh_dev_unicast_table_set(struct zxdh_hw *hw, uint16_t vport, bool enable); int zxdh_dev_multicast_table_set(struct zxdh_hw *hw, uint16_t vport, bool enable); +int zxdh_vlan_filter_table_init(struct rte_eth_dev *dev); +int zxdh_vlan_filter_table_set(uint16_t vport, uint16_t vlan_id, uint8_t enable); #endif /* ZXDH_TABLES_H */ From patchwork Fri Dec 6 05:57:13 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149063 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 909C545E16; Fri, 6 Dec 2024 07:07:26 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2B31C427BC; Fri, 6 Dec 2024 07:05:17 +0100 (CET) Received: from mxct.zte.com.cn (mxct.zte.com.cn [183.62.165.209]) by mails.dpdk.org (Postfix) with ESMTP id E06B6427A8 for ; Fri, 6 Dec 2024 07:05:09 +0100 (CET) Received: from mse-fl2.zte.com.cn (unknown [10.5.228.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxct.zte.com.cn (FangMail) with ESMTPS id 4Y4LMQ1xz4z50LNb; Fri, 6 Dec 2024 14:05:06 +0800 (CST) Received: from szxlzmapp04.zte.com.cn ([10.5.231.166]) by mse-fl2.zte.com.cn with SMTP id 4B664Vw1033301; Fri, 6 Dec 2024 14:04:31 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:33 +0800 X-Zmail-TransId: 3e81675293f0001-713b8 From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 13/15] net/zxdh: rss hash config/update, reta update/get Date: Fri, 6 Dec 2024 13:57:13 +0800 Message-ID: <20241206055715.506961-14-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl2.zte.com.cn 4B664Vw1033301 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 67529412.000/4Y4LMQ1xz4z50LNb X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org provided rss hash config/update, reta update/get ops. Signed-off-by: Junlong Wang --- doc/guides/nics/features/zxdh.ini | 5 +- doc/guides/nics/zxdh.rst | 1 + drivers/net/zxdh/zxdh_ethdev.c | 43 ++++ drivers/net/zxdh/zxdh_ethdev.h | 1 + drivers/net/zxdh/zxdh_ethdev_ops.c | 316 +++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_ethdev_ops.h | 8 + drivers/net/zxdh/zxdh_msg.h | 22 ++ drivers/net/zxdh/zxdh_tables.c | 82 ++++++++ drivers/net/zxdh/zxdh_tables.h | 7 + 9 files changed, 484 insertions(+), 1 deletion(-) -- 2.27.0 diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini index d8d0261726..5cae08f611 100644 --- a/doc/guides/nics/features/zxdh.ini +++ b/doc/guides/nics/features/zxdh.ini @@ -17,4 +17,7 @@ Multicast MAC filter = Y Promiscuous mode = Y Allmulticast mode = Y VLAN filter = Y -VLAN offload = Y \ No newline at end of file +VLAN offload = Y +RSS hash = Y +RSS reta update = Y +Inner RSS = Y diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst index 3a7585d123..3cc6a1d348 100644 --- a/doc/guides/nics/zxdh.rst +++ b/doc/guides/nics/zxdh.rst @@ -31,6 +31,7 @@ Features of the ZXDH PMD are: - VLAN filter and VLAN offload - VLAN stripping and inserting - QINQ stripping and inserting +- Receive Side Scaling (RSS) Driver compilation and testing diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index 3e6cfc1d6b..87502adf74 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -788,6 +788,39 @@ zxdh_dev_conf_offload(struct rte_eth_dev *dev) return 0; } +static int +zxdh_rss_qid_config(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_port_attr_table port_attr = {0}; + struct zxdh_msg_info msg_info = {0}; + int ret = 0; + + if (hw->is_pf) { + ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr); + port_attr.port_base_qid = hw->channel_context[0].ph_chno & 0xfff; + + ret = zxdh_set_port_attr(hw->vport.vfid, &port_attr); + if (ret) { + PMD_DRV_LOG(ERR, "PF:%d port_base_qid insert failed", hw->vfid); + return ret; + } + } else { + struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_msg; + + zxdh_msg_head_build(hw, ZXDH_PORT_ATTRS_SET, &msg_info); + attr_msg->mode = ZXDH_PORT_BASE_QID_FLAG; + attr_msg->value = hw->channel_context[0].ph_chno & 0xfff; + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ", + hw->vport.vport, ZXDH_PORT_BASE_QID_FLAG); + return ret; + } + } + return ret; +} + static int32_t zxdh_dev_configure(struct rte_eth_dev *dev) { @@ -874,6 +907,12 @@ zxdh_dev_configure(struct rte_eth_dev *dev) return -1; } + ret = zxdh_rss_qid_config(dev); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to configure base qid!"); + return -1; + } + zxdh_pci_reinit_complete(hw); end: @@ -1098,6 +1137,10 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = { .allmulticast_disable = zxdh_dev_allmulticast_disable, .vlan_filter_set = zxdh_dev_vlan_filter_set, .vlan_offload_set = zxdh_dev_vlan_offload_set, + .reta_update = zxdh_dev_rss_reta_update, + .reta_query = zxdh_dev_rss_reta_query, + .rss_hash_update = zxdh_rss_hash_update, + .rss_hash_conf_get = zxdh_rss_hash_conf_get, }; static int32_t diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h index 76c5a37dfa..f558a1502d 100644 --- a/drivers/net/zxdh/zxdh_ethdev.h +++ b/drivers/net/zxdh/zxdh_ethdev.h @@ -80,6 +80,7 @@ struct zxdh_hw { uint16_t queue_num; uint16_t mc_num; uint16_t uc_num; + uint16_t *rss_reta; uint8_t *isr; uint8_t weak_barriers; diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c index 094770984c..ef31957923 100644 --- a/drivers/net/zxdh/zxdh_ethdev_ops.c +++ b/drivers/net/zxdh/zxdh_ethdev_ops.c @@ -12,6 +12,29 @@ #include "zxdh_logs.h" #define ZXDH_VLAN_FILTER_GROUPS 64 +#define ZXDH_INVALID_LOGIC_QID 0xFFFFU + +#define ZXDH_ETH_RSS_L2 RTE_ETH_RSS_L2_PAYLOAD +#define ZXDH_ETH_RSS_IP \ + (RTE_ETH_RSS_IPV4 | \ + RTE_ETH_RSS_FRAG_IPV4 | \ + RTE_ETH_RSS_IPV6 | \ + RTE_ETH_RSS_FRAG_IPV6) +#define ZXDH_ETH_RSS_TCP (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP) +#define ZXDH_ETH_RSS_UDP (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP) +#define ZXDH_ETH_RSS_SCTP (RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP) + +#define ZXDH_HF_F5_ETH (ZXDH_ETH_RSS_TCP | ZXDH_ETH_RSS_UDP | ZXDH_ETH_RSS_SCTP) +#define ZXDH_HF_F3_ETH ZXDH_ETH_RSS_IP +#define ZXDH_HF_MAC_VLAN_ETH ZXDH_ETH_RSS_L2 + +/* Supported RSS */ +#define ZXDH_RSS_HF ((ZXDH_HF_MAC_VLAN_ETH | ZXDH_HF_F3_ETH | ZXDH_HF_F5_ETH)) +#define ZXDH_RSS_HF_MASK (~(ZXDH_RSS_HF)) +#define ZXDH_HF_F5 1 +#define ZXDH_HF_F3 2 +#define ZXDH_HF_MAC_VLAN 4 +#define ZXDH_HF_ALL 0 static int32_t zxdh_config_port_status(struct rte_eth_dev *dev, uint16_t link_status) { @@ -750,3 +773,296 @@ int zxdh_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) return ret; } + +int +zxdh_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_msg_info msg = {0}; + uint16_t old_reta[RTE_ETH_RSS_RETA_SIZE_256]; + uint16_t idx; + uint16_t i; + uint16_t pos; + int ret; + + if (reta_size != RTE_ETH_RSS_RETA_SIZE_256) { + PMD_DRV_LOG(ERR, "reta_size is illegal(%u).reta_size should be 256", reta_size); + return -EINVAL; + } + if (!hw->rss_reta) { + hw->rss_reta = rte_zmalloc(NULL, RTE_ETH_RSS_RETA_SIZE_256 * sizeof(uint16_t), 4); + if (hw->rss_reta == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate RSS reta"); + return -ENOMEM; + } + } + for (idx = 0, i = 0; (i < reta_size); ++i) { + idx = i / RTE_ETH_RETA_GROUP_SIZE; + pos = i % RTE_ETH_RETA_GROUP_SIZE; + if (((reta_conf[idx].mask >> pos) & 0x1) == 0) + continue; + if (reta_conf[idx].reta[pos] > dev->data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "reta table value err(%u >= %u)", + reta_conf[idx].reta[pos], dev->data->nb_rx_queues); + return -EINVAL; + } + if (hw->rss_reta[i] != reta_conf[idx].reta[pos]) + break; + } + if (i == reta_size) { + PMD_DRV_LOG(DEBUG, "reta table same with buffered table"); + return 0; + } + memcpy(old_reta, hw->rss_reta, sizeof(old_reta)); + + for (idx = 0, i = 0; i < reta_size; ++i) { + idx = i / RTE_ETH_RETA_GROUP_SIZE; + pos = i % RTE_ETH_RETA_GROUP_SIZE; + if (((reta_conf[idx].mask >> pos) & 0x1) == 0) + continue; + hw->rss_reta[i] = reta_conf[idx].reta[pos]; + } + + zxdh_msg_head_build(hw, ZXDH_RSS_RETA_SET, &msg); + for (i = 0; i < reta_size; i++) + msg.data.rss_reta.reta[i] = + (hw->channel_context[hw->rss_reta[i] * 2].ph_chno); + + + if (hw->is_pf) { + ret = zxdh_rss_table_set(hw->vport.vport, &msg.data.rss_reta); + if (ret) { + PMD_DRV_LOG(ERR, "rss reta table set failed"); + return -EINVAL; + } + } else { + ret = zxdh_vf_send_msg_to_pf(dev, &msg, sizeof(struct zxdh_msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "vf rss reta table set failed"); + return -EINVAL; + } + } + return ret; +} + +static uint16_t +zxdh_hw_qid_to_logic_qid(struct rte_eth_dev *dev, uint16_t qid) +{ + struct zxdh_hw *hw = (struct zxdh_hw *)dev->data->dev_private; + uint16_t rx_queues = dev->data->nb_rx_queues; + uint16_t i; + + for (i = 0; i < rx_queues; i++) { + if (qid == hw->channel_context[i * 2].ph_chno) + return i; + } + return ZXDH_INVALID_LOGIC_QID; +} + +int +zxdh_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct zxdh_hw *hw = (struct zxdh_hw *)dev->data->dev_private; + struct zxdh_msg_info msg = {0}; + struct zxdh_msg_reply_info reply_msg = {0}; + uint16_t idx; + uint16_t i; + int ret = 0; + uint16_t qid_logic; + + ret = (!reta_size || reta_size > RTE_ETH_RSS_RETA_SIZE_256); + if (ret) { + PMD_DRV_LOG(ERR, "request reta size(%u) not same with buffered(%u)", + reta_size, RTE_ETH_RSS_RETA_SIZE_256); + return -EINVAL; + } + + /* Fill each entry of the table even if its bit is not set. */ + for (idx = 0, i = 0; (i != reta_size); ++i) { + idx = i / RTE_ETH_RETA_GROUP_SIZE; + reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] = hw->rss_reta[i]; + } + + + + zxdh_msg_head_build(hw, ZXDH_RSS_RETA_GET, &msg); + + if (hw->is_pf) { + ret = zxdh_rss_table_get(hw->vport.vport, &reply_msg.reply_body.rss_reta); + if (ret) { + PMD_DRV_LOG(ERR, "rss reta table set failed"); + return -EINVAL; + } + } else { + ret = zxdh_vf_send_msg_to_pf(dev, &msg, sizeof(struct zxdh_msg_info), + &reply_msg, sizeof(struct zxdh_msg_reply_info)); + if (ret) { + PMD_DRV_LOG(ERR, "vf rss reta table get failed"); + return -EINVAL; + } + } + + struct zxdh_rss_reta *reta_table = &reply_msg.reply_body.rss_reta; + + for (idx = 0, i = 0; i < reta_size; ++i) { + idx = i / RTE_ETH_RETA_GROUP_SIZE; + + qid_logic = zxdh_hw_qid_to_logic_qid(dev, reta_table->reta[i]); + if (qid_logic == ZXDH_INVALID_LOGIC_QID) { + PMD_DRV_LOG(ERR, "rsp phy reta qid (%u) is illegal(%u)", + reta_table->reta[i], qid_logic); + return -EINVAL; + } + reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] = qid_logic; + } + return 0; +} + +static uint32_t +zxdh_rss_hf_to_hw(uint64_t hf) +{ + uint32_t hw_hf = 0; + + if (hf & ZXDH_HF_MAC_VLAN_ETH) + hw_hf |= ZXDH_HF_MAC_VLAN; + if (hf & ZXDH_HF_F3_ETH) + hw_hf |= ZXDH_HF_F3; + if (hf & ZXDH_HF_F5_ETH) + hw_hf |= ZXDH_HF_F5; + + if (hw_hf == (ZXDH_HF_MAC_VLAN | ZXDH_HF_F3 | ZXDH_HF_F5)) + hw_hf = ZXDH_HF_ALL; + return hw_hf; +} + +static uint64_t +zxdh_rss_hf_to_eth(uint32_t hw_hf) +{ + uint64_t hf = 0; + + if (hw_hf == ZXDH_HF_ALL) + return (ZXDH_HF_MAC_VLAN_ETH | ZXDH_HF_F3_ETH | ZXDH_HF_F5_ETH); + + if (hw_hf & ZXDH_HF_MAC_VLAN) + hf |= ZXDH_HF_MAC_VLAN_ETH; + if (hw_hf & ZXDH_HF_F3) + hf |= ZXDH_HF_F3_ETH; + if (hw_hf & ZXDH_HF_F5) + hf |= ZXDH_HF_F5_ETH; + + return hf; +} + +int +zxdh_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct rte_eth_rss_conf *old_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf; + struct zxdh_msg_info msg = {0}; + struct zxdh_port_attr_table port_attr = {0}; + uint32_t hw_hf_new, hw_hf_old; + int need_update_hf = 0; + int ret = 0; + + ret = rss_conf->rss_hf & ZXDH_RSS_HF_MASK; + if (ret) { + PMD_DRV_LOG(ERR, "Not support some hash function (%08lx)", rss_conf->rss_hf); + return -EINVAL; + } + + hw_hf_new = zxdh_rss_hf_to_hw(rss_conf->rss_hf); + hw_hf_old = zxdh_rss_hf_to_hw(old_rss_conf->rss_hf); + + if ((hw_hf_new != hw_hf_old || !!rss_conf->rss_hf)) + need_update_hf = 1; + + if (need_update_hf) { + if (hw->is_pf) { + ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr); + port_attr.rss_enable = !!rss_conf->rss_hf; + ret = zxdh_set_port_attr(hw->vport.vfid, &port_attr); + if (ret) { + PMD_DRV_LOG(ERR, "rss enable set failed"); + return -EINVAL; + } + } else { + msg.data.rss_enable.enable = !!rss_conf->rss_hf; + zxdh_msg_head_build(hw, ZXDH_RSS_ENABLE, &msg); + ret = zxdh_vf_send_msg_to_pf(dev, &msg, + sizeof(struct zxdh_msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "rss enable set failed"); + return -EINVAL; + } + } + + + if (hw->is_pf) { + ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr); + port_attr.rss_hash_factor = hw_hf_new; + ret = zxdh_set_port_attr(hw->vport.vfid, &port_attr); + if (ret) { + PMD_DRV_LOG(ERR, "rss hash factor set failed"); + return -EINVAL; + } + } else { + msg.data.rss_hf.rss_hf = hw_hf_new; + zxdh_msg_head_build(hw, ZXDH_RSS_HF_SET, &msg); + ret = zxdh_vf_send_msg_to_pf(dev, &msg, + sizeof(struct zxdh_msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "rss hash factor set failed"); + return -EINVAL; + } + } + old_rss_conf->rss_hf = rss_conf->rss_hf; + } + + return 0; +} + +int +zxdh_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) +{ + struct zxdh_hw *hw = (struct zxdh_hw *)dev->data->dev_private; + struct rte_eth_rss_conf *old_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf; + struct zxdh_msg_info msg = {0}; + struct zxdh_msg_reply_info reply_msg = {0}; + struct zxdh_port_attr_table port_attr = {0}; + int ret; + uint32_t hw_hf; + + if (rss_conf == NULL) { + PMD_DRV_LOG(ERR, "rss conf is NULL"); + return -ENOMEM; + } + + hw_hf = zxdh_rss_hf_to_hw(old_rss_conf->rss_hf); + rss_conf->rss_hf = zxdh_rss_hf_to_eth(hw_hf); + + zxdh_msg_head_build(hw, ZXDH_RSS_HF_GET, &msg); + if (hw->is_pf) { + ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr); + if (ret) { + PMD_DRV_LOG(ERR, "rss hash factor set failed"); + return -EINVAL; + } + reply_msg.reply_body.rss_hf.rss_hf = port_attr.rss_hash_factor; + } else { + zxdh_msg_head_build(hw, ZXDH_RSS_HF_SET, &msg); + ret = zxdh_vf_send_msg_to_pf(dev, &msg, sizeof(struct zxdh_msg_info), + &reply_msg, sizeof(struct zxdh_msg_reply_info)); + if (ret) { + PMD_DRV_LOG(ERR, "rss hash factor set failed"); + return -EINVAL; + } + } + rss_conf->rss_hf = zxdh_rss_hf_to_eth(reply_msg.reply_body.rss_hf.rss_hf); + + return 0; +} diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h index 058d271ab3..ef89c0d325 100644 --- a/drivers/net/zxdh/zxdh_ethdev_ops.h +++ b/drivers/net/zxdh/zxdh_ethdev_ops.h @@ -20,5 +20,13 @@ int zxdh_dev_allmulticast_enable(struct rte_eth_dev *dev); int zxdh_dev_allmulticast_disable(struct rte_eth_dev *dev); int zxdh_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); int zxdh_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask); +int zxdh_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +int zxdh_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +int zxdh_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); +int zxdh_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); #endif /* ZXDH_ETHDEV_OPS_H */ diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h index bed16d31a0..57092abe92 100644 --- a/drivers/net/zxdh/zxdh_msg.h +++ b/drivers/net/zxdh/zxdh_msg.h @@ -181,6 +181,11 @@ enum zxdh_msg_type { ZXDH_VF_PORT_UNINIT = 2, ZXDH_MAC_ADD = 3, ZXDH_MAC_DEL = 4, + ZXDH_RSS_ENABLE = 7, + ZXDH_RSS_RETA_SET = 8, + ZXDH_RSS_RETA_GET = 9, + ZXDH_RSS_HF_SET = 15, + ZXDH_RSS_HF_GET = 16, ZXDH_VLAN_FILTER_SET = 17, ZXDH_VLAN_FILTER_ADD = 18, ZXDH_VLAN_FILTER_DEL = 19, @@ -290,6 +295,14 @@ struct zxdh_link_info_msg { uint32_t speed; } __rte_packed; +struct zxdh_rss_reta { + uint32_t reta[RTE_ETH_RSS_RETA_SIZE_256]; +}; + +struct zxdh_rss_hf { + uint32_t rss_hf; +}; + struct zxdh_msg_reply_head { uint8_t flag; @@ -307,6 +320,8 @@ struct zxdh_msg_reply_body { union { uint8_t reply_data[ZXDH_MSG_REPLY_BODY_MAX_LEN - sizeof(enum zxdh_reps_flag)]; struct zxdh_link_info_msg link_msg; + struct zxdh_rss_hf rss_hf; + struct zxdh_rss_reta rss_reta; } __rte_packed; } __rte_packed; @@ -362,6 +377,10 @@ struct zxdh_vlan_offload { uint8_t type; } __rte_packed; +struct zxdh_rss_enable { + uint8_t enable; +}; + struct zxdh_msg_head { enum zxdh_msg_type msg_type; uint16_t vport; @@ -385,6 +404,9 @@ struct zxdh_msg_info { struct zxdh_vlan_filter vlan_filter_msg; struct zxdh_vlan_filter_set vlan_filter_set_msg; struct zxdh_vlan_offload vlan_offload_msg; + struct zxdh_rss_reta rss_reta; + struct zxdh_rss_enable rss_enable; + struct zxdh_rss_hf rss_hf; } __rte_packed data; } __rte_packed; diff --git a/drivers/net/zxdh/zxdh_tables.c b/drivers/net/zxdh/zxdh_tables.c index 93dc956597..e8e483a02a 100644 --- a/drivers/net/zxdh/zxdh_tables.c +++ b/drivers/net/zxdh/zxdh_tables.c @@ -10,6 +10,7 @@ #define ZXDH_SDT_VPORT_ATT_TABLE 1 #define ZXDH_SDT_PANEL_ATT_TABLE 2 +#define ZXDH_SDT_RSS_ATT_TABLE 3 #define ZXDH_SDT_VLAN_ATT_TABLE 4 #define ZXDH_SDT_BROCAST_ATT_TABLE 6 #define ZXDH_SDT_UNICAST_ATT_TABLE 10 @@ -656,3 +657,84 @@ int zxdh_vlan_filter_table_set(uint16_t vport, uint16_t vlan_id, uint8_t enable) } return 0; } + +int +zxdh_rss_table_set(uint16_t vport, struct zxdh_rss_reta *rss_reta) +{ + struct zxdh_rss_to_vqid_table rss_vqid = {0}; + union zxdh_virport_num vport_num = (union zxdh_virport_num)vport; + int ret = 0; + + for (uint16_t i = 0; i < RTE_ETH_RSS_RETA_SIZE_256 / 8; i++) { + for (uint16_t j = 0; j < 8; j++) { + #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + if (j % 2 == 0) + rss_vqid.vqm_qid[j + 1] = rss_reta->reta[i * 8 + j]; + else + rss_vqid.vqm_qid[j - 1] = rss_reta->reta[i * 8 + j]; + #else + rss_vqid.vqm_qid[j] = rss_init->reta[i * 8 + j]; + #endif + } + + #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + rss_vqid.vqm_qid[1] |= 0x8000; + #else + rss_vqid.vqm_qid[0] |= 0x8000; + #endif + ZXDH_DTB_ERAM_ENTRY_INFO_T entry = { + .index = vport_num.vfid * 32 + i, + .p_data = (uint32_t *)&rss_vqid + }; + ZXDH_DTB_USER_ENTRY_T user_entry_write = { + .sdt_no = ZXDH_SDT_RSS_ATT_TABLE, + .p_entry_data = &entry + }; + ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO, + g_dtb_data.queueid, 1, &user_entry_write); + if (ret != 0) { + PMD_DRV_LOG(ERR, "write rss base qid failed vfid:%d", vport_num.vfid); + return ret; + } + } + return 0; +} + +int +zxdh_rss_table_get(uint16_t vport, struct zxdh_rss_reta *rss_reta) +{ + struct zxdh_rss_to_vqid_table rss_vqid = {0}; + union zxdh_virport_num vport_num = (union zxdh_virport_num)vport; + int ret = 0; + + for (uint16_t i = 0; i < RTE_ETH_RSS_RETA_SIZE_256 / 8; i++) { + ZXDH_DTB_ERAM_ENTRY_INFO_T entry = {vport_num.vfid * 32 + i, (uint32_t *)&rss_vqid}; + ZXDH_DTB_USER_ENTRY_T user_entry = {ZXDH_SDT_RSS_ATT_TABLE, &entry}; + + ret = zxdh_np_dtb_table_entry_get(ZXDH_DEVICE_NO, + g_dtb_data.queueid, &user_entry, 1); + if (ret != 0) { + PMD_DRV_LOG(ERR, "get rss tbl failed, vfid:%d", vport_num.vfid); + return -1; + } + + #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + rss_vqid.vqm_qid[1] &= 0x7FFF; + #else + rss_vqid.vqm_qid[0] &= 0x7FFF; + #endif + uint8_t size = sizeof(struct zxdh_rss_to_vqid_table) / sizeof(uint16_t); + + for (int j = 0; j < size; j++) { + #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + if (j % 2 == 0) + rss_reta->reta[i * 8 + j] = rss_vqid.vqm_qid[j + 1]; + else + rss_reta->reta[i * 8 + j] = rss_vqid.vqm_qid[j - 1]; + #else + rss_reta->reta[i * 8 + j] = rss_vqid.vqm_qid[j]; + #endif + } + } + return 0; +} diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h index 85e95a876f..649ede33e8 100644 --- a/drivers/net/zxdh/zxdh_tables.h +++ b/drivers/net/zxdh/zxdh_tables.h @@ -10,6 +10,7 @@ extern struct zxdh_dtb_shared_data g_dtb_data; #define ZXDH_DEVICE_NO 0 +#define ZXDH_PORT_BASE_QID_FLAG 10 #define ZXDH_PORT_ATTR_IS_UP_FLAG 35 struct zxdh_port_attr_table { @@ -198,6 +199,10 @@ struct zxdh_vlan_filter_table { uint32_t vlans[4]; }; +struct zxdh_rss_to_vqid_table { + uint16_t vqm_qid[8]; +}; + int zxdh_port_attr_init(struct rte_eth_dev *dev); int zxdh_panel_table_init(struct rte_eth_dev *dev); int zxdh_set_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr); @@ -211,5 +216,7 @@ int zxdh_dev_unicast_table_set(struct zxdh_hw *hw, uint16_t vport, bool enable); int zxdh_dev_multicast_table_set(struct zxdh_hw *hw, uint16_t vport, bool enable); int zxdh_vlan_filter_table_init(struct rte_eth_dev *dev); int zxdh_vlan_filter_table_set(uint16_t vport, uint16_t vlan_id, uint8_t enable); +int zxdh_rss_table_set(uint16_t vport, struct zxdh_rss_reta *rss_reta); +int zxdh_rss_table_get(uint16_t vport, struct zxdh_rss_reta *rss_reta); #endif /* ZXDH_TABLES_H */ From patchwork Fri Dec 6 05:57:14 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149062 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D042445E16; Fri, 6 Dec 2024 07:07:14 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2A5E1427A3; Fri, 6 Dec 2024 07:05:14 +0100 (CET) Received: from mxct.zte.com.cn (mxct.zte.com.cn [183.62.165.209]) by mails.dpdk.org (Postfix) with ESMTP id C33A74278F for ; Fri, 6 Dec 2024 07:05:07 +0100 (CET) Received: from mse-fl2.zte.com.cn (unknown [10.5.228.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxct.zte.com.cn (FangMail) with ESMTPS id 4Y4LMN14lVz50FXn; Fri, 6 Dec 2024 14:05:04 +0800 (CST) Received: from szxlzmapp03.zte.com.cn ([10.5.231.207]) by mse-fl2.zte.com.cn with SMTP id 4B664Wch033303; Fri, 6 Dec 2024 14:04:32 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:33 +0800 X-Zmail-TransId: 3e81675293f1001-713bb From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 14/15] net/zxdh: basic stats ops implementations Date: Fri, 6 Dec 2024 13:57:14 +0800 Message-ID: <20241206055715.506961-15-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl2.zte.com.cn 4B664Wch033303 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 67529410.001/4Y4LMN14lVz50FXn X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org basic stats ops implementations. Signed-off-by: Junlong Wang --- doc/guides/nics/features/zxdh.ini | 2 + doc/guides/nics/zxdh.rst | 1 + drivers/net/zxdh/zxdh_ethdev.c | 2 + drivers/net/zxdh/zxdh_ethdev_ops.c | 353 +++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_ethdev_ops.h | 27 +++ drivers/net/zxdh/zxdh_msg.h | 15 ++ drivers/net/zxdh/zxdh_np.c | 349 ++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_np.h | 30 +++ drivers/net/zxdh/zxdh_queue.h | 2 + drivers/net/zxdh/zxdh_rxtx.c | 82 ++++++- drivers/net/zxdh/zxdh_tables.h | 5 + 11 files changed, 866 insertions(+), 2 deletions(-) -- 2.27.0 diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini index 5cae08f611..39c2473652 100644 --- a/doc/guides/nics/features/zxdh.ini +++ b/doc/guides/nics/features/zxdh.ini @@ -21,3 +21,5 @@ VLAN offload = Y RSS hash = Y RSS reta update = Y Inner RSS = Y +Basic stats = Y +Stats per queue = Y \ No newline at end of file diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst index 3cc6a1d348..c8a52b587c 100644 --- a/doc/guides/nics/zxdh.rst +++ b/doc/guides/nics/zxdh.rst @@ -32,6 +32,7 @@ Features of the ZXDH PMD are: - VLAN stripping and inserting - QINQ stripping and inserting - Receive Side Scaling (RSS) +- Port hardware statistics Driver compilation and testing diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index 87502adf74..82f81d1ded 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -1141,6 +1141,8 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = { .reta_query = zxdh_dev_rss_reta_query, .rss_hash_update = zxdh_rss_hash_update, .rss_hash_conf_get = zxdh_rss_hash_conf_get, + .stats_get = zxdh_dev_stats_get, + .stats_reset = zxdh_dev_stats_reset, }; static int32_t diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c index ef31957923..6156c94f2c 100644 --- a/drivers/net/zxdh/zxdh_ethdev_ops.c +++ b/drivers/net/zxdh/zxdh_ethdev_ops.c @@ -10,6 +10,8 @@ #include "zxdh_ethdev_ops.h" #include "zxdh_tables.h" #include "zxdh_logs.h" +#include "zxdh_rxtx.h" +#include "zxdh_np.h" #define ZXDH_VLAN_FILTER_GROUPS 64 #define ZXDH_INVALID_LOGIC_QID 0xFFFFU @@ -36,6 +38,108 @@ #define ZXDH_HF_MAC_VLAN 4 #define ZXDH_HF_ALL 0 +struct zxdh_hw_mac_stats { + uint64_t rx_total; + uint64_t rx_pause; + uint64_t rx_unicast; + uint64_t rx_multicast; + uint64_t rx_broadcast; + uint64_t rx_vlan; + uint64_t rx_size_64; + uint64_t rx_size_65_127; + uint64_t rx_size_128_255; + uint64_t rx_size_256_511; + uint64_t rx_size_512_1023; + uint64_t rx_size_1024_1518; + uint64_t rx_size_1519_mru; + uint64_t rx_undersize; + uint64_t rx_oversize; + uint64_t rx_fragment; + uint64_t rx_jabber; + uint64_t rx_control; + uint64_t rx_eee; + + uint64_t tx_total; + uint64_t tx_pause; + uint64_t tx_unicast; + uint64_t tx_multicast; + uint64_t tx_broadcast; + uint64_t tx_vlan; + uint64_t tx_size_64; + uint64_t tx_size_65_127; + uint64_t tx_size_128_255; + uint64_t tx_size_256_511; + uint64_t tx_size_512_1023; + uint64_t tx_size_1024_1518; + uint64_t tx_size_1519_mtu; + uint64_t tx_undersize; + uint64_t tx_oversize; + uint64_t tx_fragment; + uint64_t tx_jabber; + uint64_t tx_control; + uint64_t tx_eee; + + uint64_t rx_error; + uint64_t rx_fcs_error; + uint64_t rx_drop; + + uint64_t tx_error; + uint64_t tx_fcs_error; + uint64_t tx_drop; + +} __rte_packed; + +struct zxdh_hw_mac_bytes { + uint64_t rx_total_bytes; + uint64_t rx_good_bytes; + uint64_t tx_total_bytes; + uint64_t tx_good_bytes; +} __rte_packed; + +struct zxdh_np_stats_data { + uint64_t n_pkts_dropped; + uint64_t n_bytes_dropped; +}; + +struct zxdh_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned int offset; +}; + +static const struct zxdh_xstats_name_off zxdh_rxq_stat_strings[] = { + {"good_packets", offsetof(struct zxdh_virtnet_rx, stats.packets)}, + {"good_bytes", offsetof(struct zxdh_virtnet_rx, stats.bytes)}, + {"errors", offsetof(struct zxdh_virtnet_rx, stats.errors)}, + {"multicast_packets", offsetof(struct zxdh_virtnet_rx, stats.multicast)}, + {"broadcast_packets", offsetof(struct zxdh_virtnet_rx, stats.broadcast)}, + {"truncated_err", offsetof(struct zxdh_virtnet_rx, stats.truncated_err)}, + {"undersize_packets", offsetof(struct zxdh_virtnet_rx, stats.size_bins[0])}, + {"size_64_packets", offsetof(struct zxdh_virtnet_rx, stats.size_bins[1])}, + {"size_65_127_packets", offsetof(struct zxdh_virtnet_rx, stats.size_bins[2])}, + {"size_128_255_packets", offsetof(struct zxdh_virtnet_rx, stats.size_bins[3])}, + {"size_256_511_packets", offsetof(struct zxdh_virtnet_rx, stats.size_bins[4])}, + {"size_512_1023_packets", offsetof(struct zxdh_virtnet_rx, stats.size_bins[5])}, + {"size_1024_1518_packets", offsetof(struct zxdh_virtnet_rx, stats.size_bins[6])}, + {"size_1519_max_packets", offsetof(struct zxdh_virtnet_rx, stats.size_bins[7])}, +}; + +static const struct zxdh_xstats_name_off zxdh_txq_stat_strings[] = { + {"good_packets", offsetof(struct zxdh_virtnet_tx, stats.packets)}, + {"good_bytes", offsetof(struct zxdh_virtnet_tx, stats.bytes)}, + {"errors", offsetof(struct zxdh_virtnet_tx, stats.errors)}, + {"multicast_packets", offsetof(struct zxdh_virtnet_tx, stats.multicast)}, + {"broadcast_packets", offsetof(struct zxdh_virtnet_tx, stats.broadcast)}, + {"truncated_err", offsetof(struct zxdh_virtnet_tx, stats.truncated_err)}, + {"undersize_packets", offsetof(struct zxdh_virtnet_tx, stats.size_bins[0])}, + {"size_64_packets", offsetof(struct zxdh_virtnet_tx, stats.size_bins[1])}, + {"size_65_127_packets", offsetof(struct zxdh_virtnet_tx, stats.size_bins[2])}, + {"size_128_255_packets", offsetof(struct zxdh_virtnet_tx, stats.size_bins[3])}, + {"size_256_511_packets", offsetof(struct zxdh_virtnet_tx, stats.size_bins[4])}, + {"size_512_1023_packets", offsetof(struct zxdh_virtnet_tx, stats.size_bins[5])}, + {"size_1024_1518_packets", offsetof(struct zxdh_virtnet_tx, stats.size_bins[6])}, + {"size_1519_max_packets", offsetof(struct zxdh_virtnet_tx, stats.size_bins[7])}, +}; + static int32_t zxdh_config_port_status(struct rte_eth_dev *dev, uint16_t link_status) { struct zxdh_hw *hw = dev->data->dev_private; @@ -1066,3 +1170,252 @@ zxdh_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_con return 0; } + +static int32_t +zxdh_hw_vqm_stats_get(struct rte_eth_dev *dev, enum zxdh_agent_msg_type opcode, + struct zxdh_hw_vqm_stats *hw_stats) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_msg_info msg_info = {0}; + struct zxdh_msg_reply_info reply_info = {0}; + enum ZXDH_BAR_MODULE_ID module_id; + int ret = 0; + + switch (opcode) { + case ZXDH_VQM_DEV_STATS_GET: + case ZXDH_VQM_QUEUE_STATS_GET: + case ZXDH_VQM_QUEUE_STATS_RESET: + module_id = ZXDH_BAR_MODULE_VQM; + break; + case ZXDH_MAC_STATS_GET: + case ZXDH_MAC_STATS_RESET: + module_id = ZXDH_BAR_MODULE_MAC; + break; + default: + PMD_DRV_LOG(ERR, "invalid opcode %u", opcode); + return -1; + } + + zxdh_agent_msg_build(hw, opcode, &msg_info); + + ret = zxdh_send_msg_to_riscv(dev, &msg_info, sizeof(struct zxdh_msg_info), + &reply_info, sizeof(struct zxdh_msg_reply_info), module_id); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get hw stats"); + return -EAGAIN; + } + struct zxdh_msg_reply_body *reply_body = &reply_info.reply_body; + + rte_memcpy(hw_stats, &reply_body->vqm_stats, sizeof(struct zxdh_hw_vqm_stats)); + return 0; +} + +static int zxdh_hw_mac_stats_get(struct rte_eth_dev *dev, + struct zxdh_hw_mac_stats *mac_stats, + struct zxdh_hw_mac_bytes *mac_bytes) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint64_t virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_MAC_OFFSET); + uint64_t stats_addr = 0; + uint64_t bytes_addr = 0; + + if (hw->speed <= RTE_ETH_SPEED_NUM_25G) { + stats_addr = virt_addr + ZXDH_MAC_STATS_OFFSET + 352 * (hw->phyport % 4); + bytes_addr = virt_addr + ZXDH_MAC_BYTES_OFFSET + 32 * (hw->phyport % 4); + } else { + stats_addr = virt_addr + ZXDH_MAC_STATS_OFFSET + 352 * 4; + bytes_addr = virt_addr + ZXDH_MAC_BYTES_OFFSET + 32 * 4; + } + + rte_memcpy(mac_stats, (void *)stats_addr, sizeof(struct zxdh_hw_mac_stats)); + rte_memcpy(mac_bytes, (void *)bytes_addr, sizeof(struct zxdh_hw_mac_bytes)); + return 0; +} + +static void zxdh_data_hi_to_lo(uint64_t *data) +{ + uint32_t n_data_hi; + uint32_t n_data_lo; + + n_data_lo = *data >> 32; + n_data_hi = *data; + *data = (uint64_t)(rte_le_to_cpu_32(n_data_hi)) << 32 | + rte_le_to_cpu_32(n_data_lo); +} + +static int zxdh_np_stats_get(struct rte_eth_dev *dev, struct zxdh_hw_np_stats *np_stats) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_np_stats_data stats_data; + uint32_t stats_id = zxdh_vport_to_vfid(hw->vport); + uint32_t idx = 0; + int ret = 0; + + idx = stats_id + ZXDH_BROAD_STATS_EGRESS_BASE; + ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid, + 0, idx, (uint32_t *)&np_stats->np_tx_broadcast); + if (ret) + return ret; + zxdh_data_hi_to_lo(&np_stats->np_tx_broadcast); + + idx = stats_id + ZXDH_BROAD_STATS_INGRESS_BASE; + memset(&stats_data, 0, sizeof(stats_data)); + ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid, + 0, idx, (uint32_t *)&np_stats->np_rx_broadcast); + if (ret) + return ret; + zxdh_data_hi_to_lo(&np_stats->np_rx_broadcast); + + idx = stats_id + ZXDH_MTU_STATS_EGRESS_BASE; + memset(&stats_data, 0, sizeof(stats_data)); + ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid, + 1, idx, (uint32_t *)&stats_data); + if (ret) + return ret; + + np_stats->np_tx_mtu_drop_pkts = stats_data.n_pkts_dropped; + np_stats->np_tx_mtu_drop_bytes = stats_data.n_bytes_dropped; + zxdh_data_hi_to_lo(&np_stats->np_tx_mtu_drop_pkts); + zxdh_data_hi_to_lo(&np_stats->np_tx_mtu_drop_bytes); + + idx = stats_id + ZXDH_MTU_STATS_INGRESS_BASE; + memset(&stats_data, 0, sizeof(stats_data)); + ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid, + 1, idx, (uint32_t *)&stats_data); + if (ret) + return ret; + np_stats->np_rx_mtu_drop_pkts = stats_data.n_pkts_dropped; + np_stats->np_rx_mtu_drop_bytes = stats_data.n_bytes_dropped; + zxdh_data_hi_to_lo(&np_stats->np_rx_mtu_drop_pkts); + zxdh_data_hi_to_lo(&np_stats->np_rx_mtu_drop_bytes); + + return 0; +} + +static int +zxdh_hw_np_stats_get(struct rte_eth_dev *dev, struct zxdh_hw_np_stats *np_stats) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_msg_info msg_info = {0}; + struct zxdh_msg_reply_info reply_info = {0}; + int ret = 0; + + if (hw->is_pf) { + ret = zxdh_np_stats_get(dev, np_stats); + if (ret) { + PMD_DRV_LOG(ERR, "get np stats failed"); + return -1; + } + } else { + zxdh_msg_head_build(hw, ZXDH_GET_NP_STATS, &msg_info); + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(struct zxdh_msg_info), + &reply_info, sizeof(struct zxdh_msg_reply_info)); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to send msg: port 0x%x msg type ZXDH_PORT_METER_STAT_GET", + hw->vport.vport); + return -1; + } + memcpy(np_stats, &reply_info.reply_body.np_stats, sizeof(struct zxdh_hw_np_stats)); + } + return ret; +} + +int +zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_hw_vqm_stats vqm_stats = {0}; + struct zxdh_hw_np_stats np_stats = {0}; + struct zxdh_hw_mac_stats mac_stats = {0}; + struct zxdh_hw_mac_bytes mac_bytes = {0}; + uint32_t i = 0; + + zxdh_hw_vqm_stats_get(dev, ZXDH_VQM_DEV_STATS_GET, &vqm_stats); + if (hw->is_pf) + zxdh_hw_mac_stats_get(dev, &mac_stats, &mac_bytes); + + zxdh_hw_np_stats_get(dev, &np_stats); + + stats->ipackets = vqm_stats.rx_total; + stats->opackets = vqm_stats.tx_total; + stats->ibytes = vqm_stats.rx_bytes; + stats->obytes = vqm_stats.tx_bytes; + stats->imissed = vqm_stats.rx_drop + mac_stats.rx_drop; + stats->ierrors = vqm_stats.rx_error + mac_stats.rx_error + np_stats.np_rx_mtu_drop_pkts; + stats->oerrors = vqm_stats.tx_error + mac_stats.tx_error + np_stats.np_tx_mtu_drop_pkts; + + stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; + for (i = 0; (i < dev->data->nb_rx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) { + struct zxdh_virtnet_rx *rxvq = dev->data->rx_queues[i]; + + if (rxvq == NULL) + continue; + stats->q_ipackets[i] = *(uint64_t *)(((char *)rxvq) + + zxdh_rxq_stat_strings[0].offset); + stats->q_ibytes[i] = *(uint64_t *)(((char *)rxvq) + + zxdh_rxq_stat_strings[1].offset); + stats->q_errors[i] = *(uint64_t *)(((char *)rxvq) + + zxdh_rxq_stat_strings[2].offset); + stats->q_errors[i] += *(uint64_t *)(((char *)rxvq) + + zxdh_rxq_stat_strings[5].offset); + } + + for (i = 0; (i < dev->data->nb_tx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) { + struct zxdh_virtnet_tx *txvq = dev->data->tx_queues[i]; + + if (txvq == NULL) + continue; + stats->q_opackets[i] = *(uint64_t *)(((char *)txvq) + + zxdh_txq_stat_strings[0].offset); + stats->q_obytes[i] = *(uint64_t *)(((char *)txvq) + + zxdh_txq_stat_strings[1].offset); + stats->q_errors[i] += *(uint64_t *)(((char *)txvq) + + zxdh_txq_stat_strings[2].offset); + stats->q_errors[i] += *(uint64_t *)(((char *)txvq) + + zxdh_txq_stat_strings[5].offset); + } + return 0; +} + +static int zxdh_hw_stats_reset(struct rte_eth_dev *dev, enum zxdh_agent_msg_type opcode) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_msg_info msg_info = {0}; + struct zxdh_msg_reply_info reply_info = {0}; + enum ZXDH_BAR_MODULE_ID module_id; + int ret = 0; + + switch (opcode) { + case ZXDH_VQM_DEV_STATS_RESET: + module_id = ZXDH_BAR_MODULE_VQM; + break; + case ZXDH_MAC_STATS_RESET: + module_id = ZXDH_BAR_MODULE_MAC; + break; + default: + PMD_DRV_LOG(ERR, "invalid opcode %u", opcode); + return -1; + } + + zxdh_agent_msg_build(hw, opcode, &msg_info); + + ret = zxdh_send_msg_to_riscv(dev, &msg_info, sizeof(struct zxdh_msg_info), + &reply_info, sizeof(struct zxdh_msg_reply_info), module_id); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to reset hw stats"); + return -EAGAIN; + } + return 0; +} + +int zxdh_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + + zxdh_hw_stats_reset(dev, ZXDH_VQM_DEV_STATS_RESET); + if (hw->is_pf) + zxdh_hw_stats_reset(dev, ZXDH_MAC_STATS_RESET); + + return 0; +} diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h index ef89c0d325..dad84934fc 100644 --- a/drivers/net/zxdh/zxdh_ethdev_ops.h +++ b/drivers/net/zxdh/zxdh_ethdev_ops.h @@ -5,8 +5,33 @@ #ifndef ZXDH_ETHDEV_OPS_H #define ZXDH_ETHDEV_OPS_H +#include + #include "zxdh_ethdev.h" +struct zxdh_hw_vqm_stats { + uint64_t rx_total; + uint64_t tx_total; + uint64_t rx_bytes; + uint64_t tx_bytes; + uint64_t rx_error; + uint64_t tx_error; + uint64_t rx_drop; +} __rte_packed; + +struct zxdh_hw_np_stats { + uint64_t np_rx_broadcast; + uint64_t np_tx_broadcast; + uint64_t np_rx_mtu_drop_pkts; + uint64_t np_tx_mtu_drop_pkts; + uint64_t np_rx_mtu_drop_bytes; + uint64_t np_tx_mtu_drop_bytes; + uint64_t np_rx_mtr_drop_pkts; + uint64_t np_tx_mtr_drop_pkts; + uint64_t np_rx_mtr_drop_bytes; + uint64_t np_tx_mtr_drop_bytes; +}; + int zxdh_dev_set_link_up(struct rte_eth_dev *dev); int zxdh_dev_set_link_down(struct rte_eth_dev *dev); int32_t zxdh_dev_link_update(struct rte_eth_dev *dev, int32_t wait_to_complete __rte_unused); @@ -28,5 +53,7 @@ int zxdh_dev_rss_reta_query(struct rte_eth_dev *dev, uint16_t reta_size); int zxdh_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); int zxdh_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); +int zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +int zxdh_dev_stats_reset(struct rte_eth_dev *dev); #endif /* ZXDH_ETHDEV_OPS_H */ diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h index 57092abe92..5530fc70a7 100644 --- a/drivers/net/zxdh/zxdh_msg.h +++ b/drivers/net/zxdh/zxdh_msg.h @@ -9,8 +9,13 @@ #include +#include "zxdh_ethdev_ops.h" + #define ZXDH_BAR0_INDEX 0 #define ZXDH_CTRLCH_OFFSET (0x2000) +#define ZXDH_MAC_OFFSET (0x24000) +#define ZXDH_MAC_STATS_OFFSET (0x1408) +#define ZXDH_MAC_BYTES_OFFSET (0xb000) #define ZXDH_MSG_CHAN_PFVFSHARE_OFFSET (ZXDH_CTRLCH_OFFSET + 0x1000) #define ZXDH_MSIX_INTR_MSG_VEC_BASE 1 @@ -172,7 +177,13 @@ enum pciebar_layout_type { /* riscv msg opcodes */ enum zxdh_agent_msg_type { + ZXDH_MAC_STATS_GET = 10, + ZXDH_MAC_STATS_RESET, ZXDH_MAC_LINK_GET = 14, + ZXDH_VQM_DEV_STATS_GET = 21, + ZXDH_VQM_DEV_STATS_RESET, + ZXDH_VQM_QUEUE_STATS_GET = 24, + ZXDH_VQM_QUEUE_STATS_RESET, } __rte_packed; enum zxdh_msg_type { @@ -194,6 +205,8 @@ enum zxdh_msg_type { ZXDH_PORT_ATTRS_SET = 25, ZXDH_PORT_PROMISC_SET = 26, + ZXDH_GET_NP_STATS = 31, + ZXDH_MSG_TYPE_END, } __rte_packed; @@ -322,6 +335,8 @@ struct zxdh_msg_reply_body { struct zxdh_link_info_msg link_msg; struct zxdh_rss_hf rss_hf; struct zxdh_rss_reta rss_reta; + struct zxdh_hw_vqm_stats vqm_stats; + struct zxdh_hw_np_stats np_stats; } __rte_packed; } __rte_packed; diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c index 2a4d38b846..34b7732105 100644 --- a/drivers/net/zxdh/zxdh_np.c +++ b/drivers/net/zxdh/zxdh_np.c @@ -25,6 +25,7 @@ ZXDH_TLB_MGR_T *g_p_dpp_tlb_mgr[ZXDH_DEV_CHANNEL_MAX] = {NULL}; ZXDH_SDT_TBL_DATA_T g_sdt_info[ZXDH_DEV_CHANNEL_MAX][ZXDH_DEV_SDT_ID_MAX]; ZXDH_REG_T g_dpp_reg_info[4] = {0}; ZXDH_DTB_TABLE_T g_dpp_dtb_table_info[4] = {0}; +ZXDH_PPU_STAT_CFG_T g_ppu_stat_cfg = {0}; #define ZXDH_COMM_ASSERT(x) assert(x) #define ZXDH_SDT_MGR_PTR_GET() (&g_sdt_mgr) @@ -46,6 +47,18 @@ ZXDH_DTB_TABLE_T g_dpp_dtb_table_info[4] = {0}; #define ZXDH_COMM_CONVERT32(dw_data) \ (((dw_data) & 0xff) << 24) +#define ZXDH_DTB_TAB_UP_WR_INDEX_GET(DEV_ID, QUEUE_ID) \ + (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.wr_index) + +#define ZXDH_DTB_TAB_UP_USER_PHY_ADDR_FLAG_GET(DEV_ID, QUEUE_ID, INDEX) \ + (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.user_addr[(INDEX)].user_flag) + +#define ZXDH_DTB_TAB_UP_USER_PHY_ADDR_GET(DEV_ID, QUEUE_ID, INDEX) \ + (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.user_addr[(INDEX)].phy_addr) + +#define ZXDH_DTB_TAB_UP_DATA_LEN_GET(DEV_ID, QUEUE_ID, INDEX) \ + (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.data_len[(INDEX)]) + #define ZXDH_REG_DATA_MAX (128) #define ZXDH_COMM_CHECK_DEV_POINT(dev_id, point)\ @@ -1793,3 +1806,339 @@ zxdh_np_dtb_table_entry_get(uint32_t dev_id, return 0; } + +static uint32_t +zxdh_np_stat_cfg_soft_get(uint32_t dev_id, + ZXDH_PPU_STAT_CFG_T *p_stat_cfg) +{ + ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_stat_cfg); + + p_stat_cfg->ddr_base_addr = g_ppu_stat_cfg.ddr_base_addr; + p_stat_cfg->eram_baddr = g_ppu_stat_cfg.eram_baddr; + p_stat_cfg->eram_depth = g_ppu_stat_cfg.eram_depth; + p_stat_cfg->ppu_addr_offset = g_ppu_stat_cfg.ppu_addr_offset; + + return 0; +} + +static uint32_t +zxdh_np_dtb_tab_up_info_set(uint32_t dev_id, + uint32_t queue_id, + uint32_t item_index, + uint32_t int_flag, + uint32_t data_len, + uint32_t desc_len, + uint32_t *p_desc_data) +{ + uint32_t queue_en = 0; + ZXDH_DTB_QUEUE_ITEM_INFO_T item_info = {0}; + + zxdh_np_dtb_queue_enable_get(dev_id, queue_id, &queue_en); + if (!queue_en) { + PMD_DRV_LOG(ERR, "the queue %d is not enable!", queue_id); + return ZXDH_RC_DTB_QUEUE_NOT_ENABLE; + } + + if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) { + PMD_DRV_LOG(ERR, "dtb queue %d is not init.", queue_id); + return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT; + } + + if (desc_len % 4 != 0) + return ZXDH_RC_DTB_PARA_INVALID; + + zxdh_np_dtb_item_buff_wr(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE, + item_index, 0, desc_len, p_desc_data); + + ZXDH_DTB_TAB_UP_DATA_LEN_GET(dev_id, queue_id, item_index) = data_len; + + item_info.cmd_vld = 1; + item_info.cmd_type = ZXDH_DTB_DIR_UP_TYPE; + item_info.int_en = int_flag; + item_info.data_len = desc_len / 4; + + if (zxdh_np_dev_get_dev_type(dev_id) == ZXDH_DEV_TYPE_SIM) + return 0; + + zxdh_np_dtb_queue_item_info_set(dev_id, queue_id, &item_info); + + return 0; +} + +static uint32_t +zxdh_np_dtb_write_dump_desc_info(uint32_t dev_id, + uint32_t queue_id, + uint32_t queue_element_id, + uint32_t *p_dump_info, + uint32_t data_len, + uint32_t desc_len, + uint32_t *p_dump_data) +{ + uint32_t rc = 0; + uint32_t dtb_interrupt_status = 0; + + ZXDH_COMM_CHECK_POINT(p_dump_data); + rc = zxdh_np_dtb_tab_up_info_set(dev_id, + queue_id, + queue_element_id, + dtb_interrupt_status, + data_len, + desc_len, + p_dump_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, "the queue %d element id %d dump" + " info set failed!", queue_id, queue_element_id); + zxdh_np_dtb_item_ack_wr(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE, + queue_element_id, 0, ZXDH_DTB_TAB_ACK_UNUSED_MASK); + } + + return 0; +} + +static uint32_t +zxdh_np_dtb_tab_up_free_item_get(uint32_t dev_id, + uint32_t queue_id, + uint32_t *p_item_index) +{ + uint32_t i = 0; + uint32_t ack_vale = 0; + uint32_t item_index = 0; + uint32_t unused_item_num = 0; + + if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) { + PMD_DRV_LOG(ERR, "dtb queue %d is not init.", queue_id); + return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT; + } + + zxdh_np_dtb_queue_unused_item_num_get(dev_id, queue_id, &unused_item_num); + + if (unused_item_num == 0) + return ZXDH_RC_DTB_QUEUE_ITEM_HW_EMPTY; + + for (i = 0; i < ZXDH_DTB_QUEUE_ITEM_NUM_MAX; i++) { + item_index = ZXDH_DTB_TAB_UP_WR_INDEX_GET(dev_id, queue_id) % + ZXDH_DTB_QUEUE_ITEM_NUM_MAX; + + zxdh_np_dtb_item_ack_rd(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE, item_index, + 0, &ack_vale); + + ZXDH_DTB_TAB_UP_WR_INDEX_GET(dev_id, queue_id)++; + + if ((ack_vale >> 8) == ZXDH_DTB_TAB_ACK_UNUSED_MASK) + break; + } + + if (i == ZXDH_DTB_QUEUE_ITEM_NUM_MAX) + return ZXDH_RC_DTB_QUEUE_ITEM_SW_EMPTY; + + zxdh_np_dtb_item_ack_wr(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE, item_index, + 0, ZXDH_DTB_TAB_ACK_IS_USING_MASK); + + *p_item_index = item_index; + + + return 0; +} + +static uint32_t +zxdh_np_dtb_tab_up_item_addr_get(uint32_t dev_id, + uint32_t queue_id, + uint32_t item_index, + uint32_t *p_phy_haddr, + uint32_t *p_phy_laddr) +{ + uint64_t addr = 0; + + if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) { + PMD_DRV_LOG(ERR, "dtb queue %d is not init.", queue_id); + return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT; + } + + if (ZXDH_DTB_TAB_UP_USER_PHY_ADDR_FLAG_GET(dev_id, queue_id, item_index) == + ZXDH_DTB_TAB_UP_USER_ADDR_TYPE) + addr = ZXDH_DTB_TAB_UP_USER_PHY_ADDR_GET(dev_id, queue_id, item_index); + else + addr = ZXDH_DTB_ITEM_ACK_SIZE; + + *p_phy_haddr = (addr >> 32) & 0xffffffff; + *p_phy_laddr = addr & 0xffffffff; + + return 0; +} + +static uint32_t +zxdh_np_dtb_se_smmu0_dma_dump(uint32_t dev_id, + uint32_t queue_id, + uint32_t base_addr, + uint32_t depth, + uint32_t *p_data, + uint32_t *element_id) +{ + uint32_t rc = 0; + uint32_t dump_dst_phy_haddr = 0; + uint32_t dump_dst_phy_laddr = 0; + uint32_t queue_item_index = 0; + uint32_t data_len = 0; + uint32_t desc_len = 0; + + uint8_t form_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0}; + + rc = zxdh_np_dtb_tab_up_free_item_get(dev_id, queue_id, &queue_item_index); + if (rc != 0) { + PMD_DRV_LOG(ERR, "dpp_dtb_tab_up_free_item_get failed = %d!", base_addr); + return ZXDH_RC_DTB_QUEUE_ITEM_SW_EMPTY; + } + + *element_id = queue_item_index; + + rc = zxdh_np_dtb_tab_up_item_addr_get(dev_id, queue_id, queue_item_index, + &dump_dst_phy_haddr, &dump_dst_phy_laddr); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_item_addr_get"); + + data_len = depth * 128 / 32; + desc_len = ZXDH_DTB_LEN_POS_SETP / 4; + + + rc = zxdh_np_dtb_write_dump_desc_info(dev_id, queue_id, queue_item_index, + (uint32_t *)form_buff, data_len, desc_len, p_data); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_dump_desc_info"); + + return 0; +} + +static uint32_t +zxdh_np_dtb_se_smmu0_ind_read(uint32_t dev_id, + uint32_t queue_id, + uint32_t base_addr, + uint32_t index, + uint32_t rd_mode, + uint32_t *p_data) +{ + uint32_t rc = 0; + + uint32_t row_index = 0; + uint32_t col_index = 0; + uint32_t temp_data[4] = {0}; + uint32_t eram_dump_base_addr = 0; + uint32_t element_id = 0; + + switch (rd_mode) { + case ZXDH_ERAM128_OPR_128b: + { + row_index = index; + break; + } + + case ZXDH_ERAM128_OPR_64b: + { + row_index = (index >> 1); + col_index = index & 0x1; + break; + } + + case ZXDH_ERAM128_OPR_1b: + { + row_index = (index >> 7); + col_index = index & 0x7F; + break; + } + } + + eram_dump_base_addr = base_addr + row_index; + + rc = zxdh_np_dtb_se_smmu0_dma_dump(dev_id, + queue_id, + eram_dump_base_addr, + 1, + temp_data, + &element_id); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_np_dtb_se_smmu0_dma_dump"); + + switch (rd_mode) { + case ZXDH_ERAM128_OPR_128b: + { + memcpy(p_data, temp_data, (128 / 8)); + break; + } + + case ZXDH_ERAM128_OPR_64b: + { + memcpy(p_data, temp_data + ((1 - col_index) << 1), (64 / 8)); + break; + } + + case ZXDH_ERAM128_OPR_1b: + { + ZXDH_COMM_UINT32_GET_BITS(p_data[0], *(temp_data + + (3 - col_index / 32)), (col_index % 32), 1); + break; + } + } + + return rc; +} + +static uint32_t +zxdh_np_dtb_stat_smmu0_int_read(uint32_t dev_id, + uint32_t queue_id, + uint32_t smmu0_base_addr, + ZXDH_STAT_CNT_MODE_E rd_mode, + uint32_t index, + uint32_t *p_data) +{ + uint32_t rc = 0; + + uint32_t eram_rd_mode = 0; + + ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_data); + + if (rd_mode == ZXDH_STAT_128_MODE) + eram_rd_mode = ZXDH_ERAM128_OPR_128b; + else + eram_rd_mode = ZXDH_ERAM128_OPR_64b; + + rc = zxdh_np_dtb_se_smmu0_ind_read(dev_id, + queue_id, + smmu0_base_addr, + index, + eram_rd_mode, + p_data); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_np_dtb_se_smmu0_ind_read"); + + return rc; +} + +int +zxdh_np_dtb_stats_get(uint32_t dev_id, + uint32_t queue_id, + ZXDH_STAT_CNT_MODE_E rd_mode, + uint32_t index, + uint32_t *p_data) +{ + uint32_t rc = 0; + uint32_t ppu_eram_baddr = 0; + uint32_t ppu_eram_depth = 0; + ZXDH_PPU_STAT_CFG_T stat_cfg = {0}; + + ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_data); + + memset(&stat_cfg, 0x0, sizeof(stat_cfg)); + + rc = zxdh_np_stat_cfg_soft_get(dev_id, &stat_cfg); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_stat_cfg_soft_get"); + + ppu_eram_depth = stat_cfg.eram_depth; + ppu_eram_baddr = stat_cfg.eram_baddr; + + if ((index >> (ZXDH_STAT_128_MODE - rd_mode)) < ppu_eram_depth) { + rc = zxdh_np_dtb_stat_smmu0_int_read(dev_id, + queue_id, + ppu_eram_baddr, + rd_mode, + index, + p_data); + ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_stat_smmu0_int_read"); + } + + return rc; +} diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h index 7295b709ce..fd59a46491 100644 --- a/drivers/net/zxdh/zxdh_np.h +++ b/drivers/net/zxdh/zxdh_np.h @@ -432,6 +432,18 @@ typedef enum zxdh_sdt_table_type_e { ZXDH_SDT_TBLT_MAX = 7, } ZXDH_SDT_TABLE_TYPE_E; +typedef enum zxdh_dtb_dir_type_e { + ZXDH_DTB_DIR_DOWN_TYPE = 0, + ZXDH_DTB_DIR_UP_TYPE = 1, + ZXDH_DTB_DIR_TYPE_MAX, +} ZXDH_DTB_DIR_TYPE_E; + +typedef enum zxdh_dtb_tab_up_user_addr_type_e { + ZXDH_DTB_TAB_UP_NOUSER_ADDR_TYPE = 0, + ZXDH_DTB_TAB_UP_USER_ADDR_TYPE = 1, + ZXDH_DTB_TAB_UP_USER_ADDR_TYPE_MAX, +} ZXDH_DTB_TAB_UP_USER_ADDR_TYPE_E; + typedef struct zxdh_dtb_lpm_entry_t { uint32_t dtb_len0; uint8_t *p_data_buff0; @@ -537,6 +549,19 @@ typedef struct zxdh_sdt_tbl_porttbl_t { uint32_t porttbl_clutch_en; } ZXDH_SDTTBL_PORTTBL_T; +typedef struct zxdh_ppu_stat_cfg_t { + uint32_t eram_baddr; + uint32_t eram_depth; + uint32_t ddr_base_addr; + uint32_t ppu_addr_offset; +} ZXDH_PPU_STAT_CFG_T; + +typedef enum zxdh_stat_cnt_mode_e { + ZXDH_STAT_64_MODE = 0, + ZXDH_STAT_128_MODE = 1, + ZXDH_STAT_MAX_MODE, +} ZXDH_STAT_CNT_MODE_E; + int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl); int zxdh_np_online_uninit(uint32_t dev_id, char *port_name, uint32_t queue_id); int zxdh_np_dtb_table_entry_write(uint32_t dev_id, uint32_t queue_id, @@ -545,5 +570,10 @@ int zxdh_np_dtb_table_entry_delete(uint32_t dev_id, uint32_t queue_id, uint32_t entrynum, ZXDH_DTB_USER_ENTRY_T *delete_entries); int zxdh_np_dtb_table_entry_get(uint32_t dev_id, uint32_t queue_id, ZXDH_DTB_USER_ENTRY_T *get_entry, uint32_t srh_mode); +int zxdh_np_dtb_stats_get(uint32_t dev_id, + uint32_t queue_id, + ZXDH_STAT_CNT_MODE_E rd_mode, + uint32_t index, + uint32_t *p_data); #endif /* ZXDH_NP_H */ diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h index 1bd292e235..af616d115b 100644 --- a/drivers/net/zxdh/zxdh_queue.h +++ b/drivers/net/zxdh/zxdh_queue.h @@ -53,6 +53,8 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 }; #define ZXDH_PI_HDR_SIZE sizeof(struct zxdh_pi_hdr) #define ZXDH_DL_NET_HDR_SIZE sizeof(struct zxdh_net_hdr_dl) #define ZXDH_UL_NET_HDR_SIZE sizeof(struct zxdh_net_hdr_ul) +#define ZXDH_PD_HDR_SIZE_MAX 256 +#define ZXDH_PD_HDR_SIZE_MIN ZXDH_TYPE_HDR_SIZE /* * ring descriptors: 16 bytes. diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c index be5865ac85..c7a765a881 100644 --- a/drivers/net/zxdh/zxdh_rxtx.c +++ b/drivers/net/zxdh/zxdh_rxtx.c @@ -405,6 +405,40 @@ static inline void zxdh_enqueue_xmit_packed(struct zxdh_virtnet_tx *txvq, zxdh_queue_store_flags_packed(head_dp, head_flags, vq->hw->weak_barriers); } +static void +zxdh_update_packet_stats(struct zxdh_virtnet_stats *stats, struct rte_mbuf *mbuf) +{ + uint32_t s = mbuf->pkt_len; + struct rte_ether_addr *ea = NULL; + + stats->bytes += s; + + if (s == 64) { + stats->size_bins[1]++; + } else if (s > 64 && s < 1024) { + uint32_t bin; + + /* count zeros, and offset into correct bin */ + bin = (sizeof(s) * 8) - rte_clz32(s) - 5; + stats->size_bins[bin]++; + } else { + if (s < 64) + stats->size_bins[0]++; + else if (s < 1519) + stats->size_bins[6]++; + else + stats->size_bins[7]++; + } + + ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *); + if (rte_is_multicast_ether_addr(ea)) { + if (rte_is_broadcast_ether_addr(ea)) + stats->broadcast++; + else + stats->multicast++; + } +} + uint16_t zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -458,12 +492,19 @@ zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkt break; } } + if (txm->nb_segs > ZXDH_TX_MAX_SEGS) { + PMD_TX_LOG(ERR, "%dsegs dropped", txm->nb_segs); + txvq->stats.truncated_err += nb_pkts - nb_tx; + break; + } /* Enqueue Packet buffers */ if (can_push) zxdh_enqueue_xmit_packed_fast(txvq, txm, in_order); else zxdh_enqueue_xmit_packed(txvq, txm, slots, use_indirect, in_order); + zxdh_update_packet_stats(&txvq->stats, txm); } + txvq->stats.packets += nb_tx; if (likely(nb_tx)) { if (unlikely(zxdh_queue_kick_prepare_packed(vq))) { zxdh_queue_notify(vq); @@ -473,9 +514,10 @@ zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkt return nb_tx; } -uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, +uint16_t zxdh_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + struct zxdh_virtnet_tx *txvq = tx_queue; uint16_t nb_tx; for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { @@ -495,6 +537,12 @@ uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **t rte_errno = -error; break; } + if (m->nb_segs > ZXDH_TX_MAX_SEGS) { + PMD_TX_LOG(ERR, "%d segs dropped", m->nb_segs); + txvq->stats.truncated_err += nb_pkts - nb_tx; + rte_errno = ENOMEM; + break; + } } return nb_tx; } @@ -570,7 +618,7 @@ static int32_t zxdh_rx_update_mbuf(struct rte_mbuf *m, struct zxdh_net_hdr_ul *h return 0; } -static inline void zxdh_discard_rxbuf(struct zxdh_virtqueue *vq, struct rte_mbuf *m) +static void zxdh_discard_rxbuf(struct zxdh_virtqueue *vq, struct rte_mbuf *m) { int32_t error = 0; /* @@ -612,6 +660,13 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, for (i = 0; i < num; i++) { rxm = rcv_pkts[i]; + if (unlikely(len[i] < ZXDH_UL_NET_HDR_SIZE)) { + nb_enqueued++; + PMD_RX_LOG(ERR, "RX, len:%u err", len[i]); + zxdh_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; + continue; + } struct zxdh_net_hdr_ul *header = (struct zxdh_net_hdr_ul *)((char *)rxm->buf_addr + @@ -622,8 +677,22 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, PMD_RX_LOG(ERR, "dequeue %d pkt, No.%d pkt seg_num is %d", num, i, seg_num); seg_num = 1; } + if (seg_num > ZXDH_RX_MAX_SEGS) { + PMD_RX_LOG(ERR, "dequeue %d pkt, No.%d pkt seg_num is %d", num, i, seg_num); + nb_enqueued++; + zxdh_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; + continue; + } /* bit[0:6]-pd_len unit:2B */ uint16_t pd_len = header->type_hdr.pd_len << 1; + if (pd_len > ZXDH_PD_HDR_SIZE_MAX || pd_len < ZXDH_PD_HDR_SIZE_MIN) { + PMD_RX_LOG(ERR, "pd_len:%d is invalid", pd_len); + nb_enqueued++; + zxdh_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; + continue; + } /* Private queue only handle type hdr */ hdr_size = pd_len; rxm->data_off = RTE_PKTMBUF_HEADROOM + hdr_size; @@ -638,6 +707,7 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, /* Update rte_mbuf according to pi/pd header */ if (zxdh_rx_update_mbuf(rxm, header) < 0) { zxdh_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; continue; } seg_res = seg_num - 1; @@ -660,8 +730,11 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d.", rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len); zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]); + rxvq->stats.errors++; + rxvq->stats.truncated_err++; continue; } + zxdh_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]); nb_rx++; } } @@ -674,6 +747,7 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, if (unlikely(rcv_cnt == 0)) { PMD_RX_LOG(ERR, "No enough segments for packet."); rte_pktmbuf_free(rx_pkts[nb_rx]); + rxvq->stats.errors++; break; } while (extra_idx < rcv_cnt) { @@ -693,11 +767,15 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d.", rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len); zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]); + rxvq->stats.errors++; + rxvq->stats.truncated_err++; continue; } + zxdh_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]); nb_rx++; } } + rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ if (likely(!zxdh_queue_full(vq))) { diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h index 649ede33e8..675c7871ae 100644 --- a/drivers/net/zxdh/zxdh_tables.h +++ b/drivers/net/zxdh/zxdh_tables.h @@ -13,6 +13,11 @@ extern struct zxdh_dtb_shared_data g_dtb_data; #define ZXDH_PORT_BASE_QID_FLAG 10 #define ZXDH_PORT_ATTR_IS_UP_FLAG 35 +#define ZXDH_MTU_STATS_EGRESS_BASE 0x8481 +#define ZXDH_MTU_STATS_INGRESS_BASE 0x8981 +#define ZXDH_BROAD_STATS_EGRESS_BASE 0xC902 +#define ZXDH_BROAD_STATS_INGRESS_BASE 0xD102 + struct zxdh_port_attr_table { #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN uint8_t byte4_rsv1: 1; From patchwork Fri Dec 6 05:57:15 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junlong Wang X-Patchwork-Id: 149064 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DB73245E16; Fri, 6 Dec 2024 07:07:38 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 90B60427C8; Fri, 6 Dec 2024 07:05:18 +0100 (CET) Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.35]) by mails.dpdk.org (Postfix) with ESMTP id 9EC4840E5E for ; Fri, 6 Dec 2024 07:05:13 +0100 (CET) Received: from mse-fl2.zte.com.cn (unknown [10.5.228.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mxhk.zte.com.cn (FangMail) with ESMTPS id 4Y4LMW5H2vz5B1Gw; Fri, 6 Dec 2024 14:05:11 +0800 (CST) Received: from szxlzmapp07.zte.com.cn ([10.5.230.251]) by mse-fl2.zte.com.cn with SMTP id 4B664WiF033304; Fri, 6 Dec 2024 14:04:32 +0800 (+08) (envelope-from wang.junlong1@zte.com.cn) Received: from localhost.localdomain (unknown [192.168.6.15]) by smtp (Zmail) with SMTP; Fri, 6 Dec 2024 14:04:34 +0800 X-Zmail-TransId: 3e81675293f2001-713be From: Junlong Wang To: ferruh.yigit@amd.com Cc: dev@dpdk.org, Junlong Wang Subject: [PATCH v1 15/15] net/zxdh: mtu update ops implementations Date: Fri, 6 Dec 2024 13:57:15 +0800 Message-ID: <20241206055715.506961-16-wang.junlong1@zte.com.cn> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241206055715.506961-1-wang.junlong1@zte.com.cn> References: <20241104115856.2795213-2-wang.junlong1@zte.com.cn> <20241206055715.506961-1-wang.junlong1@zte.com.cn> MIME-Version: 1.0 X-MAIL: mse-fl2.zte.com.cn 4B664WiF033304 X-Fangmail-Anti-Spam-Filtered: true X-Fangmail-MID-QID: 67529417.005/4Y4LMW5H2vz5B1Gw X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org mtu update ops implementations. Signed-off-by: Junlong Wang --- doc/guides/nics/features/zxdh.ini | 3 +- doc/guides/nics/zxdh.rst | 2 + drivers/net/zxdh/zxdh_ethdev.c | 1 + drivers/net/zxdh/zxdh_ethdev_ops.c | 79 ++++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_ethdev_ops.h | 1 + drivers/net/zxdh/zxdh_tables.c | 42 ++++++++++++++++ drivers/net/zxdh/zxdh_tables.h | 4 ++ 7 files changed, 131 insertions(+), 1 deletion(-) -- 2.27.0 diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini index 39c2473652..b9bdb73ddf 100644 --- a/doc/guides/nics/features/zxdh.ini +++ b/doc/guides/nics/features/zxdh.ini @@ -22,4 +22,5 @@ RSS hash = Y RSS reta update = Y Inner RSS = Y Basic stats = Y -Stats per queue = Y \ No newline at end of file +Stats per queue = Y +MTU update = Y \ No newline at end of file diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst index c8a52b587c..58e0c49a2e 100644 --- a/doc/guides/nics/zxdh.rst +++ b/doc/guides/nics/zxdh.rst @@ -33,6 +33,8 @@ Features of the ZXDH PMD are: - QINQ stripping and inserting - Receive Side Scaling (RSS) - Port hardware statistics +- MTU update +- Jumbo frames Driver compilation and testing diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index 82f81d1ded..8f39f41c4e 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -1143,6 +1143,7 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = { .rss_hash_conf_get = zxdh_rss_hash_conf_get, .stats_get = zxdh_dev_stats_get, .stats_reset = zxdh_dev_stats_reset, + .mtu_set = zxdh_dev_mtu_set, }; static int32_t diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c index 6156c94f2c..cca16001f7 100644 --- a/drivers/net/zxdh/zxdh_ethdev_ops.c +++ b/drivers/net/zxdh/zxdh_ethdev_ops.c @@ -12,6 +12,7 @@ #include "zxdh_logs.h" #include "zxdh_rxtx.h" #include "zxdh_np.h" +#include "zxdh_queue.h" #define ZXDH_VLAN_FILTER_GROUPS 64 #define ZXDH_INVALID_LOGIC_QID 0xFFFFU @@ -37,6 +38,7 @@ #define ZXDH_HF_F3 2 #define ZXDH_HF_MAC_VLAN 4 #define ZXDH_HF_ALL 0 +#define ZXDH_ETHER_MIN_MTU 68 struct zxdh_hw_mac_stats { uint64_t rx_total; @@ -1419,3 +1421,80 @@ int zxdh_dev_stats_reset(struct rte_eth_dev *dev) return 0; } + +int zxdh_dev_mtu_set(struct rte_eth_dev *dev, uint16_t new_mtu) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_panel_table panel = {0}; + struct zxdh_port_attr_table vport_att = {0}; + uint16_t vfid = zxdh_vport_to_vfid(hw->vport); + uint16_t max_mtu = 0; + int ret = 0; + + max_mtu = ZXDH_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN - RTE_VLAN_HLEN - ZXDH_DL_NET_HDR_SIZE; + if (new_mtu < ZXDH_ETHER_MIN_MTU || new_mtu > max_mtu) { + PMD_DRV_LOG(ERR, "invalid mtu:%d, range[%d, %d]", + new_mtu, ZXDH_ETHER_MIN_MTU, max_mtu); + return -EINVAL; + } + + if (dev->data->mtu == new_mtu) + return 0; + + if (hw->is_pf) { + memset(&panel, 0, sizeof(panel)); + memset(&vport_att, 0, sizeof(vport_att)); + ret = zxdh_get_panel_attr(dev, &panel); + if (ret != 0) { + PMD_DRV_LOG(ERR, "get_panel_attr ret:%d", ret); + return -1; + } + + ret = zxdh_get_port_attr(vfid, &vport_att); + if (ret != 0) { + PMD_DRV_LOG(ERR, + "[vfid:%d] zxdh_dev_mtu, get vport dpp_ret:%d", vfid, ret); + return -1; + } + + panel.mtu = new_mtu; + panel.mtu_enable = 1; + ret = zxdh_set_panel_attr(dev, &panel); + if (ret != 0) { + PMD_DRV_LOG(ERR, "set zxdh_dev_mtu failed, ret:%u", ret); + return ret; + } + + vport_att.mtu_enable = 1; + vport_att.mtu = new_mtu; + ret = zxdh_set_port_attr(vfid, &vport_att); + if (ret != 0) { + PMD_DRV_LOG(ERR, + "[vfid:%d] zxdh_dev_mtu, set vport dpp_ret:%d", vfid, ret); + return ret; + } + } else { + struct zxdh_msg_info msg_info = {0}; + struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_msg; + + zxdh_msg_head_build(hw, ZXDH_PORT_ATTRS_SET, &msg_info); + attr_msg->mode = ZXDH_PORT_MTU_EN_FLAG; + attr_msg->value = 1; + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ", + hw->vport.vport, ZXDH_PORT_MTU_EN_FLAG); + return ret; + } + attr_msg->mode = ZXDH_PORT_MTU_FLAG; + attr_msg->value = new_mtu; + ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ", + hw->vport.vport, ZXDH_PORT_MTU_FLAG); + return ret; + } + } + dev->data->mtu = new_mtu; + return 0; +} diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h index dad84934fc..3f37c35178 100644 --- a/drivers/net/zxdh/zxdh_ethdev_ops.h +++ b/drivers/net/zxdh/zxdh_ethdev_ops.h @@ -55,5 +55,6 @@ int zxdh_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_c int zxdh_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); int zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); int zxdh_dev_stats_reset(struct rte_eth_dev *dev); +int zxdh_dev_mtu_set(struct rte_eth_dev *dev, uint16_t new_mtu); #endif /* ZXDH_ETHDEV_OPS_H */ diff --git a/drivers/net/zxdh/zxdh_tables.c b/drivers/net/zxdh/zxdh_tables.c index e8e483a02a..6587c868c7 100644 --- a/drivers/net/zxdh/zxdh_tables.c +++ b/drivers/net/zxdh/zxdh_tables.c @@ -55,6 +55,48 @@ int zxdh_get_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr) return ret; } +int zxdh_get_panel_attr(struct rte_eth_dev *dev, struct zxdh_panel_table *panel_att) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint8_t index_phy_port = hw->phyport; + + ZXDH_DTB_ERAM_ENTRY_INFO_T panel_entry = { + .index = index_phy_port, + .p_data = (uint32_t *)panel_att + }; + ZXDH_DTB_USER_ENTRY_T entry = { + .sdt_no = ZXDH_SDT_PANEL_ATT_TABLE, + .p_entry_data = (void *)&panel_entry + }; + int ret = zxdh_np_dtb_table_entry_get(ZXDH_DEVICE_NO, g_dtb_data.queueid, &entry, 1); + + if (ret != 0) + PMD_DRV_LOG(ERR, "get eram-panel failed, ret:%d ", ret); + + return ret; +} + +int zxdh_set_panel_attr(struct rte_eth_dev *dev, struct zxdh_panel_table *panel_att) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint8_t index_phy_port = hw->phyport; + + ZXDH_DTB_ERAM_ENTRY_INFO_T panel_entry = { + .index = index_phy_port, + .p_data = (uint32_t *)panel_att + }; + ZXDH_DTB_USER_ENTRY_T entry = { + .sdt_no = ZXDH_SDT_PANEL_ATT_TABLE, + .p_entry_data = (void *)&panel_entry + }; + int ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO, g_dtb_data.queueid, 1, &entry); + + if (ret) + PMD_DRV_LOG(ERR, "Insert eram-panel failed, code:%u", ret); + + return ret; +} + int zxdh_port_attr_init(struct rte_eth_dev *dev) { diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h index 675c7871ae..d176ec2ed3 100644 --- a/drivers/net/zxdh/zxdh_tables.h +++ b/drivers/net/zxdh/zxdh_tables.h @@ -10,8 +10,10 @@ extern struct zxdh_dtb_shared_data g_dtb_data; #define ZXDH_DEVICE_NO 0 +#define ZXDH_PORT_MTU_FLAG 9 #define ZXDH_PORT_BASE_QID_FLAG 10 #define ZXDH_PORT_ATTR_IS_UP_FLAG 35 +#define ZXDH_PORT_MTU_EN_FLAG 42 #define ZXDH_MTU_STATS_EGRESS_BASE 0x8481 #define ZXDH_MTU_STATS_INGRESS_BASE 0x8981 @@ -223,5 +225,7 @@ int zxdh_vlan_filter_table_init(struct rte_eth_dev *dev); int zxdh_vlan_filter_table_set(uint16_t vport, uint16_t vlan_id, uint8_t enable); int zxdh_rss_table_set(uint16_t vport, struct zxdh_rss_reta *rss_reta); int zxdh_rss_table_get(uint16_t vport, struct zxdh_rss_reta *rss_reta); +int zxdh_get_panel_attr(struct rte_eth_dev *dev, struct zxdh_panel_table *panel_att); +int zxdh_set_panel_attr(struct rte_eth_dev *dev, struct zxdh_panel_table *panel_att); #endif /* ZXDH_TABLES_H */