From patchwork Fri Sep 1 09:34:47 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Renyong Wan X-Patchwork-Id: 131051 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D46A14221E; Fri, 1 Sep 2023 11:36:29 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3ACE8402C1; Fri, 1 Sep 2023 11:35:50 +0200 (CEST) Received: from VLXDG1SPAM1.ramaxel.com (email.ramaxel.com [221.4.138.186]) by mails.dpdk.org (Postfix) with ESMTP id 447DA402B0 for ; Fri, 1 Sep 2023 11:35:48 +0200 (CEST) Received: from V12DG1MBS03.ramaxel.local ([172.26.18.33]) by VLXDG1SPAM1.ramaxel.com with ESMTP id 3819ZPIm069820; Fri, 1 Sep 2023 17:35:25 +0800 (GMT-8) (envelope-from wanry@3snic.com) Received: from localhost.localdomain (10.64.136.151) by V12DG1MBS03.ramaxel.local (172.26.18.33) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2375.17; Fri, 1 Sep 2023 17:35:24 +0800 From: To: CC: , Renyong Wan , Steven Song Subject: [PATCH v4 05/32] net/sssnic: add event queue Date: Fri, 1 Sep 2023 17:34:47 +0800 Message-ID: <20230901093514.224824-6-wanry@3snic.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230901093514.224824-1-wanry@3snic.com> References: <20230901093514.224824-1-wanry@3snic.com> MIME-Version: 1.0 X-Originating-IP: [10.64.136.151] X-ClientProxiedBy: V12DG1MBS03.ramaxel.local (172.26.18.33) To V12DG1MBS03.ramaxel.local (172.26.18.33) X-DNSRBL: X-SPAM-SOURCE-CHECK: pass X-MAIL: VLXDG1SPAM1.ramaxel.com 3819ZPIm069820 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Renyong Wan Event queue is intended for receiving event from hardware as well as mailbox response message. Signed-off-by: Steven Song Signed-off-by: Renyong Wan --- v4: * Fixed dereferencing type-punned pointer. * Fixed coding style issue of COMPLEX_MACRO. --- drivers/net/sssnic/base/meson.build | 1 + drivers/net/sssnic/base/sssnic_eventq.c | 432 ++++++++++++++++++++++++ drivers/net/sssnic/base/sssnic_eventq.h | 84 +++++ drivers/net/sssnic/base/sssnic_hw.c | 9 +- drivers/net/sssnic/base/sssnic_hw.h | 5 + drivers/net/sssnic/base/sssnic_reg.h | 51 +++ drivers/net/sssnic/sssnic_ethdev.c | 1 + 7 files changed, 582 insertions(+), 1 deletion(-) create mode 100644 drivers/net/sssnic/base/sssnic_eventq.c create mode 100644 drivers/net/sssnic/base/sssnic_eventq.h diff --git a/drivers/net/sssnic/base/meson.build b/drivers/net/sssnic/base/meson.build index 3e64112c72..7758faa482 100644 --- a/drivers/net/sssnic/base/meson.build +++ b/drivers/net/sssnic/base/meson.build @@ -3,6 +3,7 @@ sources = [ 'sssnic_hw.c', + 'sssnic_eventq.c' ] c_args = cflags diff --git a/drivers/net/sssnic/base/sssnic_eventq.c b/drivers/net/sssnic/base/sssnic_eventq.c new file mode 100644 index 0000000000..a74b74f756 --- /dev/null +++ b/drivers/net/sssnic/base/sssnic_eventq.c @@ -0,0 +1,432 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2022 Shenzhen 3SNIC Information Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../sssnic_log.h" +#include "sssnic_hw.h" +#include "sssnic_reg.h" +#include "sssnic_eventq.h" + +#define SSSNIC_EVENTQ_DEF_DEPTH 64 +#define SSSNIC_EVENTQ_NUM_PAGES 4 +#define SSSNIC_EVENTQ_MAX_PAGE_SZ 0x400000 +#define SSSNIC_EVENTQ_MIN_PAGE_SZ 0x1000 + +#define SSSNIC_EVENT_ADDR(base_addr, event_sz, idx) \ + ((struct sssnic_event *)(((uint8_t *)(base_addr)) + ((idx) * (event_sz)))) + +static inline struct sssnic_event * +sssnic_eventq_peek(struct sssnic_eventq *eq) +{ + uint16_t page = eq->ci / eq->page_len; + uint16_t idx = eq->ci % eq->page_len; + + return SSSNIC_EVENT_ADDR(eq->pages[page]->addr, eq->entry_size, idx); +} + +static inline void +sssnic_eventq_reg_write(struct sssnic_eventq *eq, uint32_t reg, uint32_t val) +{ + sssnic_cfg_reg_write(eq->hw, reg, val); +} + +static inline uint32_t +sssnic_eventq_reg_read(struct sssnic_eventq *eq, uint32_t reg) +{ + return sssnic_cfg_reg_read(eq->hw, reg); +} + +static inline void +sssnic_eventq_reg_write64(struct sssnic_eventq *eq, uint32_t reg, uint64_t val) +{ + sssnic_cfg_reg_write(eq->hw, reg, (uint32_t)((val >> 16) >> 16)); + sssnic_cfg_reg_write(eq->hw, reg + sizeof(uint32_t), (uint32_t)val); +} + +/* all eventq registers that to be access must be selected first */ +static inline void +sssnic_eventq_reg_select(struct sssnic_eventq *eq) +{ + sssnic_eventq_reg_write(eq, SSSNIC_EVENTQ_IDX_SEL_REG, eq->qid); +} + +static const struct rte_memzone * +sssnic_eventq_page_alloc(struct sssnic_eventq *eq, int page_idx) +{ + const struct rte_memzone *mz = NULL; + char mz_name[RTE_MEMZONE_NAMESIZE]; + + snprintf(mz_name, sizeof(mz_name), "sssnic%u_eq%d_page%d", + SSSNIC_ETH_PORT_ID(eq->hw), eq->qid, page_idx); + mz = rte_memzone_reserve_aligned(mz_name, eq->page_size, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG, eq->page_size); + return mz; +} + +static uint32_t +sssnic_eventq_page_size_calc(uint32_t depth, uint32_t entry_size) +{ + uint32_t pages = SSSNIC_EVENTQ_NUM_PAGES; + uint32_t size; + + size = RTE_ALIGN(depth * entry_size, SSSNIC_EVENTQ_MIN_PAGE_SZ); + if (size <= pages * SSSNIC_EVENTQ_MIN_PAGE_SZ) { + /* use minimum page size */ + return SSSNIC_EVENTQ_MIN_PAGE_SZ; + } + + /* Calculate how many pages of minimum size page the big size page covers */ + size = RTE_ALIGN(size / pages, SSSNIC_EVENTQ_MIN_PAGE_SZ); + pages = rte_fls_u32(size / SSSNIC_EVENTQ_MIN_PAGE_SZ); + + return SSSNIC_EVENTQ_MIN_PAGE_SZ * pages; +} + +static int +sssnic_eventq_pages_setup(struct sssnic_eventq *eq) +{ + const struct rte_memzone *mz; + struct sssnic_event *ev; + int i, j; + + eq->pages = rte_zmalloc(NULL, + eq->num_pages * sizeof(struct rte_memzone *), 1); + if (eq->pages == NULL) { + PMD_DRV_LOG(ERR, "Could not alloc memory for pages"); + return -ENOMEM; + } + + for (i = 0; i < eq->num_pages; i++) { + mz = sssnic_eventq_page_alloc(eq, i); + if (mz == NULL) { + PMD_DRV_LOG(ERR, + "Could not alloc DMA memory for eventq page %d", + i); + goto alloc_dma_fail; + } + /* init eventq entries */ + for (j = 0; j < eq->page_len; j++) { + ev = SSSNIC_EVENT_ADDR(mz->addr, eq->entry_size, j); + ev->desc.u32 = 0; + } + eq->pages[i] = mz; + sssnic_eventq_reg_write64(eq, + SSSNIC_EVENTQ_PAGE_ADDR_REG + i * sizeof(uint64_t), + mz->iova); + } + + return 0; + +alloc_dma_fail: + while (i--) + rte_memzone_free(eq->pages[i]); + rte_free(eq->pages); + return -ENOMEM; +} + +static void +sssnic_eventq_pages_cleanup(struct sssnic_eventq *eq) +{ + int i; + + if (eq->pages == NULL) + return; + for (i = 0; i < eq->num_pages; i++) + rte_memzone_free(eq->pages[i]); + rte_free(eq->pages); + eq->pages = NULL; +} + +static void +sssnic_eventq_ctrl_setup(struct sssnic_eventq *eq) +{ + struct sssnic_hw *hw = eq->hw; + struct sssnic_eventq_ctrl0_reg ctrl_0; + struct sssnic_eventq_ctrl1_reg ctrl_1; + + ctrl_0.u32 = sssnic_eventq_reg_read(eq, SSSNIC_EVENTQ_CTRL0_REG); + ctrl_0.intr_idx = eq->msix_entry; + ctrl_0.dma_attr = SSSNIC_REG_EVENTQ_DEF_DMA_ATTR; + ctrl_0.pci_idx = hw->attr.pci_idx; + ctrl_0.intr_mode = SSSNIC_REG_EVENTQ_INTR_MODE_0; + sssnic_eventq_reg_write(eq, SSSNIC_EVENTQ_CTRL0_REG, ctrl_0.u32); + + ctrl_1.page_size = rte_log2_u32(eq->page_size >> 12); + ctrl_1.depth = eq->depth; + ctrl_1.entry_size = rte_log2_u32(eq->entry_size >> 5); + sssnic_eventq_reg_write(eq, SSSNIC_EVENTQ_CTRL1_REG, ctrl_1.u32); +} + +/* synchronize current software CI to hardware. + * @ informed: indate event will be informed by interrupt. + * 0: not to be informed + * 1: informed by interrupt + */ +static void +sssnic_eventq_ci_update(struct sssnic_eventq *eq, int informed) +{ + struct sssnic_eventq_ci_ctrl_reg reg; + + reg.u32 = 0; + if (eq->qid == 0) + reg.informed = !!informed; + reg.qid = eq->qid; + reg.ci = eq->ci_wrapped; + sssnic_eventq_reg_write(eq, SSSNIC_EVENTQ_CI_CTRL_REG, reg.u32); +} + +static int +sssnic_eventq_init(struct sssnic_hw *hw, struct sssnic_eventq *eq, uint16_t qid) +{ + int ret; + + if (hw == NULL || eq == NULL) { + PMD_DRV_LOG(ERR, + "Bad parameter for event queue initialization."); + return -EINVAL; + } + + eq->hw = hw; + eq->msix_entry = 0; /* eventq uses msix 0 in PMD driver */ + eq->qid = qid; + eq->depth = SSSNIC_EVENTQ_DEF_DEPTH; + eq->entry_size = SSSNIC_EVENT_SIZE; + eq->page_size = sssnic_eventq_page_size_calc(eq->depth, eq->entry_size); + eq->page_len = eq->page_size / eq->entry_size; + if (eq->page_len & (eq->page_len - 1)) { + PMD_DRV_LOG(ERR, "Invalid page length: %d, must be power of 2", + eq->page_len); + return -EINVAL; + } + eq->num_pages = RTE_ALIGN((eq->depth * eq->entry_size), eq->page_size) / + eq->page_size; + if (eq->num_pages > SSSNIC_EVENTQ_NUM_PAGES) { + PMD_DRV_LOG(ERR, + "Invalid number of pages: %d, can't be more than %d pages.", + eq->num_pages, SSSNIC_EVENTQ_NUM_PAGES); + return -EINVAL; + } + + /* select the eq which registers to be acesss */ + sssnic_eventq_reg_select(eq); + rte_wmb(); + /* clear entries in eventq */ + sssnic_eventq_reg_write(eq, SSSNIC_EVENTQ_CTRL1_REG, 0); + rte_wmb(); + /* reset pi to 0 */ + sssnic_eventq_reg_write(eq, SSSNIC_EVENTQ_PROD_IDX_REG, 0); + + ret = sssnic_eventq_pages_setup(eq); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to setup eventq pages!"); + return ret; + } + sssnic_eventq_ctrl_setup(eq); + sssnic_eventq_ci_update(eq, 1); + if (qid == 0) + sssnic_msix_state_set(eq->hw, 0, SSSNIC_MSIX_ENABLE); + + PMD_DRV_LOG(DEBUG, + "eventq %u: q_depth=%u, entry_size=%u, num_pages=%u, page_size=%u, page_len=%u", + qid, eq->depth, eq->entry_size, eq->num_pages, eq->page_size, + eq->page_len); + + return 0; +} + +static void +sssnic_eventq_shutdown(struct sssnic_eventq *eq) +{ + if (eq->qid == 0) + sssnic_msix_state_set(eq->hw, 0, SSSNIC_MSIX_DISABLE); + + sssnic_eventq_reg_select(eq); + rte_wmb(); + + sssnic_eventq_reg_write(eq, SSSNIC_EVENTQ_CTRL1_REG, 0); + eq->ci = sssnic_eventq_reg_read(eq, SSSNIC_EVENTQ_PROD_IDX_REG); + sssnic_eventq_ci_update(eq, 0); + sssnic_eventq_pages_cleanup(eq); +} + +static void +sssnic_event_be_to_cpu_32(struct sssnic_event *in, struct sssnic_event *out) +{ + uint32_t i; + uint32_t count; + uint32_t *dw_in = (uint32_t *)in; + uint32_t *dw_out = (uint32_t *)out; + + count = SSSNIC_EVENT_SIZE / sizeof(uint32_t); + for (i = 0; i < count; i++) { + *dw_out = rte_be_to_cpu_32(*dw_in); + dw_out++; + dw_in++; + } +} + +static int +sssinc_event_handle(struct sssnic_eventq *eq, struct sssnic_event *event) +{ + struct sssnic_event ev; + sssnic_event_handler_func_t *func; + void *data; + + sssnic_event_be_to_cpu_32(event, &ev); + if (ev.desc.code < SSSNIC_EVENT_CODE_MIN || + ev.desc.code > SSSNIC_EVENT_CODE_MAX) { + PMD_DRV_LOG(ERR, "Event code %d is not supported", + ev.desc.code); + return -1; + } + + func = eq->handlers[ev.desc.code].func; + data = eq->handlers[ev.desc.code].data; + if (func == NULL) { + PMD_DRV_LOG(NOTICE, + "Could not find handler for event qid:%u code:%d", + eq->qid, ev.desc.code); + return -1; + } + + return func(eq, &ev, data); +} + +/* Poll one valid event in timeout_ms */ +static struct sssnic_event * +sssnic_eventq_poll(struct sssnic_eventq *eq, uint32_t timeout_ms) +{ + struct sssnic_event *event; + struct sssnic_eventd desc; + uint64_t end; + + if (timeout_ms > 0) + end = rte_get_timer_cycles() + + rte_get_timer_hz() * timeout_ms / 1000; + + do { + event = sssnic_eventq_peek(eq); + desc.u32 = rte_be_to_cpu_32(event->desc.u32); + if (desc.wrapped != eq->wrapped) + return event; + + if (timeout_ms > 0) + rte_delay_us_sleep(1000); + } while ((timeout_ms > 0) && + (((long)(rte_get_timer_cycles() - end)) < 0)); + + return NULL; +} + +/* Take one or more events to handle. */ +int +sssnic_eventq_flush(struct sssnic_hw *hw, uint16_t qid, uint32_t timeout_ms) +{ + int found = 0; + uint32_t i = 0; + int done = 0; + struct sssnic_event *event; + struct sssnic_eventq *eq; + + if (qid >= hw->num_eventqs) { + PMD_DRV_LOG(ERR, + "Bad parameter, event queue id must be less than %u", + hw->num_eventqs); + return -EINVAL; + } + + eq = &hw->eventqs[qid]; + for (i = 0; i < eq->depth; i++) { + event = sssnic_eventq_poll(eq, timeout_ms); + if (event == NULL) + break; + done = sssinc_event_handle(eq, event); + eq->ci++; + if (eq->ci == eq->depth) { + eq->ci = 0; + eq->wrapped = !eq->wrapped; + } + + found++; + if (done == SSSNIC_EVENT_DONE) + break; + } + + SSSNIC_DEBUG("found:%d, done:%d, ci:%u, depth:%u, wrapped:%u", found, + done, eq->ci, eq->depth, eq->wrapped); + + if (!found) + return -ETIME; + + sssnic_eventq_ci_update(eq, 1); + + if (event == NULL || done != SSSNIC_EVENT_DONE) + return -ETIME; + + return 0; +} + +int +sssnic_eventq_all_init(struct sssnic_hw *hw) +{ + struct sssnic_eventq *eventqs; + int num_eventqs; + int i = 0; + int ret; + + PMD_INIT_FUNC_TRACE(); + + num_eventqs = hw->attr.num_aeq; + eventqs = rte_zmalloc(NULL, sizeof(struct sssnic_eventq) * num_eventqs, + 1); + if (eventqs == NULL) { + PMD_DRV_LOG(ERR, "Could not alloc memory for event queue"); + return -ENOMEM; + } + + for (i = 0; i < num_eventqs; i++) { + ret = sssnic_eventq_init(hw, &eventqs[i], i); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to initialize event queue: %d", + i); + goto init_eventq_fail; + } + } + hw->eventqs = eventqs; + hw->num_eventqs = num_eventqs; + + PMD_DRV_LOG(INFO, "Initialized %d event queues", num_eventqs); + + return 0; + +init_eventq_fail: + while (i--) + sssnic_eventq_shutdown(&eventqs[i]); + rte_free(eventqs); + return ret; +} + +void +sssnic_eventq_all_shutdown(struct sssnic_hw *hw) +{ + int i; + + PMD_INIT_FUNC_TRACE(); + + if (hw->eventqs == NULL) + return; + + for (i = 0; i < hw->num_eventqs; i++) + sssnic_eventq_shutdown(&hw->eventqs[i]); + rte_free(hw->eventqs); + hw->eventqs = NULL; +} diff --git a/drivers/net/sssnic/base/sssnic_eventq.h b/drivers/net/sssnic/base/sssnic_eventq.h new file mode 100644 index 0000000000..a196c10f48 --- /dev/null +++ b/drivers/net/sssnic/base/sssnic_eventq.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2022 Shenzhen 3SNIC Information Technology Co., Ltd. + */ + +#ifndef _SSSNIC_EVENTQ_H_ +#define _SSSNIC_EVENTQ_H_ + +#define SSSNIC_MAX_NUM_EVENTQ 4 +#define SSSNIC_MIN_NUM_EVENTQ 2 + +#define SSSNIC_EVENT_DESC_SIZE sizeof(uint32_t) +#define SSSNIC_EVENT_SIZE 64 +#define SSSNIC_EVENT_DATA_SIZE (SSSNIC_EVENT_SIZE - SSSNIC_EVENT_DESC_SIZE) + +enum sssnic_event_code { + SSSNIC_EVENT_CODE_RESVD = 0, + SSSNIC_EVENT_FROM_FUNC = 1, /* event from PF and VF */ + SSSNIC_EVENT_FROM_MPU = 2, /* event form management processor unit*/ +}; +#define SSSNIC_EVENT_CODE_MIN SSSNIC_EVENT_FROM_FUNC +#define SSSNIC_EVENT_CODE_MAX SSSNIC_EVENT_FROM_MPU + +struct sssnic_eventq; +struct sssnic_event; + +/* Indicate that sssnic event has been finished to handle */ +#define SSSNIC_EVENT_DONE 1 + +typedef int sssnic_event_handler_func_t(struct sssnic_eventq *eq, + struct sssnic_event *ev, void *data); + +struct sssnic_event_handler { + sssnic_event_handler_func_t *func; + void *data; +}; + +struct sssnic_eventq { + struct sssnic_hw *hw; + uint16_t qid; + uint16_t entry_size; + uint32_t depth; /* max number of entries in eventq */ + uint16_t page_len; /* number of entries in a page */ + uint16_t num_pages; /* number pages to store event entries */ + uint32_t page_size; + const struct rte_memzone **pages; + union { + uint32_t ci_wrapped; + struct { + uint32_t ci : 19; + uint32_t wrapped : 1; + uint32_t resvd : 12; + }; + }; + uint16_t msix_entry; + struct sssnic_event_handler handlers[SSSNIC_EVENT_CODE_MAX + 1]; +}; + +/* event descriptor */ +struct sssnic_eventd { + union { + uint32_t u32; + struct { + uint32_t code : 7; + uint32_t src : 1; + uint32_t size : 8; + uint32_t resvd : 15; + uint32_t wrapped : 1; + }; + }; +}; + +/* event entry */ +struct sssnic_event { + uint8_t data[SSSNIC_EVENT_DATA_SIZE]; + struct sssnic_eventd desc; +}; + +int sssnic_eventq_flush(struct sssnic_hw *hw, uint16_t qid, + uint32_t timeout_ms); + +int sssnic_eventq_all_init(struct sssnic_hw *hw); +void sssnic_eventq_all_shutdown(struct sssnic_hw *hw); + +#endif /* _SSSNIC_EVENTQ_H_ */ diff --git a/drivers/net/sssnic/base/sssnic_hw.c b/drivers/net/sssnic/base/sssnic_hw.c index 8b7bba7644..44e04486a5 100644 --- a/drivers/net/sssnic/base/sssnic_hw.c +++ b/drivers/net/sssnic/base/sssnic_hw.c @@ -9,6 +9,7 @@ #include "../sssnic_log.h" #include "sssnic_hw.h" #include "sssnic_reg.h" +#include "sssnic_eventq.h" static int wait_for_sssnic_hw_ready(struct sssnic_hw *hw) @@ -196,12 +197,18 @@ sssnic_hw_init(struct sssnic_hw *hw) return ret; } + ret = sssnic_eventq_all_init(hw); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to initialize event queues"); + return ret; + } + return -EINVAL; } void sssnic_hw_shutdown(struct sssnic_hw *hw) { - RTE_SET_USED(hw); PMD_INIT_FUNC_TRACE(); + sssnic_eventq_all_shutdown(hw); } diff --git a/drivers/net/sssnic/base/sssnic_hw.h b/drivers/net/sssnic/base/sssnic_hw.h index 65d4d562b4..6caf3a6d66 100644 --- a/drivers/net/sssnic/base/sssnic_hw.h +++ b/drivers/net/sssnic/base/sssnic_hw.h @@ -51,8 +51,13 @@ struct sssnic_hw { uint8_t *db_base_addr; uint8_t *db_mem_len; struct sssnic_hw_attr attr; + struct sssnic_eventq *eventqs; + uint8_t num_eventqs; + uint16_t eth_port_id; }; +#define SSSNIC_ETH_PORT_ID(hw) ((hw)->eth_port_id) + int sssnic_hw_init(struct sssnic_hw *hw); void sssnic_hw_shutdown(struct sssnic_hw *hw); void sssnic_msix_state_set(struct sssnic_hw *hw, uint16_t msix_id, int state); diff --git a/drivers/net/sssnic/base/sssnic_reg.h b/drivers/net/sssnic/base/sssnic_reg.h index 77d83292eb..e38d39a691 100644 --- a/drivers/net/sssnic/base/sssnic_reg.h +++ b/drivers/net/sssnic/base/sssnic_reg.h @@ -18,6 +18,14 @@ #define SSSNIC_MSIX_CTRL_REG 0x58 +#define SSSNIC_EVENTQ_CI_CTRL_REG 0x50 +#define SSSNIC_EVENTQ_IDX_SEL_REG 0x210 +#define SSSNIC_EVENTQ_CTRL0_REG 0x200 +#define SSSNIC_EVENTQ_CTRL1_REG 0x204 +#define SSSNIC_EVENTQ_CONS_IDX_REG 0x208 +#define SSSNIC_EVENTQ_PROD_IDX_REG 0x20c +#define SSSNIC_EVENTQ_PAGE_ADDR_REG 0x240 + /* registers of mgmt */ #define SSSNIC_AF_ELECTION_REG 0x6000 #define SSSNIC_MF_ELECTION_REG 0x6020 @@ -142,6 +150,49 @@ struct sssnic_msix_ctrl_reg { }; }; +#define SSSNIC_REG_EVENTQ_INTR_MODE_0 0 /* armed mode */ +#define SSSNIC_REG_EVENTQ_INTR_MODE_1 1 /* allway mode */ +#define SSSNIC_REG_EVENTQ_DEF_DMA_ATTR 0 +struct sssnic_eventq_ctrl0_reg { + union { + uint32_t u32; + struct { + uint32_t intr_idx : 10; + uint32_t resvd_0 : 2; + uint32_t dma_attr : 6; + uint32_t resvd_1 : 2; + uint32_t pci_idx : 1; + uint32_t resvd_2 : 8; + uint32_t intr_mode : 1; + }; + }; +}; + +struct sssnic_eventq_ctrl1_reg { + union { + uint32_t u32; + struct { + uint32_t depth : 21; + uint32_t resvd_0 : 3; + uint32_t entry_size : 2; + uint32_t resvd_1 : 2; + uint32_t page_size : 4; + }; + }; +}; + +struct sssnic_eventq_ci_ctrl_reg { + union { + uint32_t u32; + struct { + uint32_t ci : 21; + uint32_t informed : 1; + uint32_t resvd_0 : 8; + uint32_t qid : 2; + }; + }; +}; + static inline uint32_t sssnic_cfg_reg_read(struct sssnic_hw *hw, uint32_t reg) { diff --git a/drivers/net/sssnic/sssnic_ethdev.c b/drivers/net/sssnic/sssnic_ethdev.c index e198b1e1d0..460ff604aa 100644 --- a/drivers/net/sssnic/sssnic_ethdev.c +++ b/drivers/net/sssnic/sssnic_ethdev.c @@ -40,6 +40,7 @@ sssnic_ethdev_init(struct rte_eth_dev *ethdev) } netdev->hw = hw; hw->pci_dev = pci_dev; + hw->eth_port_id = ethdev->data->port_id; ret = sssnic_hw_init(hw); if (ret != 0) { rte_free(hw);