From patchwork Mon Feb 12 13:47:39 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nagadheeraj Rottela X-Patchwork-Id: 136594 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2C67943B01; Mon, 12 Feb 2024 14:48:17 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 398B340DC9; Mon, 12 Feb 2024 14:48:03 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 8E53140A79 for ; Mon, 12 Feb 2024 14:48:01 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 41C2U7cn023155; Mon, 12 Feb 2024 05:48:01 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding:content-type; s= pfpt0220; bh=F9f8P0Hd1RrWedPllQBKMuFrx8fjA93arBbjU3NwDxE=; b=UJx ofgQOw1Smfj56YtXq4NPClkk1aTpU/GJ9cXn+TiYHEG+S4kuADhkZ7E5VPnqxaPu 3ctulW0sc2up/6rBvoOgt7opY2Z39nC/aBpEvsTvxNbKZHai+Kxy00lU05qDaGYT oY7J4BnQfiFHJBEHEz+sC3IkztSYnx0CAiktgI4j/himgnya2jh9JvNCnWgWpU8Y T2EuwEzJPXLeRGNaUfjz/ouMffv4vpCJ01cUJelnHGmaw2n5irRLLVe7v1XnNsWU 7mZhT8fQtBwr79ZPeZNCRdtPwizLA5tVCDoClxKmJMKaQJh+1Cs/u8Fh97kYwbs7 vTV11DA3Jo13/Y8+RDg== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3w69hkc8bm-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Mon, 12 Feb 2024 05:48:00 -0800 (PST) Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.48; Mon, 12 Feb 2024 05:47:58 -0800 Received: from hyd1399.caveonetworks.com.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.48 via Frontend Transport; Mon, 12 Feb 2024 05:47:56 -0800 From: Nagadheeraj Rottela To: , , CC: , Nagadheeraj Rottela Subject: [PATCH v2 3/7] common/nitrox: add compress hardware queue management Date: Mon, 12 Feb 2024 19:17:39 +0530 Message-ID: <20240212134743.15153-4-rnagadheeraj@marvell.com> X-Mailer: git-send-email 2.42.0 In-Reply-To: <20240212134743.15153-1-rnagadheeraj@marvell.com> References: <20240212134743.15153-1-rnagadheeraj@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: LGR-7mMDlvy80Cf2RHS-flgODGnYE2lo X-Proofpoint-GUID: LGR-7mMDlvy80Cf2RHS-flgODGnYE2lo X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-02-12_10,2024-02-12_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add compress device ring initialization and cleanup code. Signed-off-by: Nagadheeraj Rottela --- drivers/common/nitrox/nitrox_csr.h | 12 +++ drivers/common/nitrox/nitrox_hal.c | 116 +++++++++++++++++++++++++++++ drivers/common/nitrox/nitrox_hal.h | 115 ++++++++++++++++++++++++++++ drivers/common/nitrox/nitrox_qp.c | 53 +++++++++++-- drivers/common/nitrox/nitrox_qp.h | 35 ++++++++- 5 files changed, 322 insertions(+), 9 deletions(-) diff --git a/drivers/common/nitrox/nitrox_csr.h b/drivers/common/nitrox/nitrox_csr.h index de7a3c6713..97c797c2e2 100644 --- a/drivers/common/nitrox/nitrox_csr.h +++ b/drivers/common/nitrox/nitrox_csr.h @@ -25,6 +25,18 @@ /* AQM Virtual Function Registers */ #define AQMQ_QSZX(_i) (0x20008UL + ((_i) * 0x40000UL)) +/* ZQM virtual function registers */ +#define ZQMQ_DRBLX(_i) (0x30000UL + ((_i) * 0x40000UL)) +#define ZQMQ_QSZX(_i) (0x30008UL + ((_i) * 0x40000UL)) +#define ZQMQ_BADRX(_i) (0x30010UL + ((_i) * 0x40000UL)) +#define ZQMQ_NXT_CMDX(_i) (0x30018UL + ((_i) * 0x40000UL)) +#define ZQMQ_CMD_CNTX(_i) (0x30020UL + ((_i) * 0x40000UL)) +#define ZQMQ_CMP_THRX(_i) (0x30028UL + ((_i) * 0x40000UL)) +#define ZQMQ_CMP_CNTX(_i) (0x30030UL + ((_i) * 0x40000UL)) +#define ZQMQ_TIMER_LDX(_i) (0x30038UL + ((_i) * 0x40000UL)) +#define ZQMQ_ENX(_i) (0x30048UL + ((_i) * 0x40000UL)) +#define ZQMQ_ACTIVITY_STATX(_i) (0x30050UL + ((_i) * 0x40000UL)) + static inline uint64_t nitrox_read_csr(uint8_t *bar_addr, uint64_t offset) { diff --git a/drivers/common/nitrox/nitrox_hal.c b/drivers/common/nitrox/nitrox_hal.c index 433f3adb20..451549a664 100644 --- a/drivers/common/nitrox/nitrox_hal.c +++ b/drivers/common/nitrox/nitrox_hal.c @@ -9,6 +9,7 @@ #include "nitrox_hal.h" #include "nitrox_csr.h" +#include "nitrox_logs.h" #define MAX_VF_QUEUES 8 #define MAX_PF_QUEUES 64 @@ -164,6 +165,121 @@ setup_nps_pkt_solicit_output_port(uint8_t *bar_addr, uint16_t port) } } +int +zqmq_input_ring_disable(uint8_t *bar_addr, uint16_t ring) +{ + union zqmq_activity_stat zqmq_activity_stat; + union zqmq_en zqmq_en; + union zqmq_cmp_cnt zqmq_cmp_cnt; + uint64_t reg_addr; + int max_retries = 5; + + /* clear queue enable */ + reg_addr = ZQMQ_ENX(ring); + zqmq_en.u64 = nitrox_read_csr(bar_addr, reg_addr); + zqmq_en.s.queue_enable = 0; + nitrox_write_csr(bar_addr, reg_addr, zqmq_en.u64); + rte_delay_us_block(100); + + /* wait for queue active to clear */ + reg_addr = ZQMQ_ACTIVITY_STATX(ring); + zqmq_activity_stat.u64 = nitrox_read_csr(bar_addr, reg_addr); + while (zqmq_activity_stat.s.queue_active && max_retries--) { + rte_delay_ms(10); + zqmq_activity_stat.u64 = nitrox_read_csr(bar_addr, reg_addr); + } + + if (zqmq_activity_stat.s.queue_active) { + NITROX_LOG(ERR, "Failed to disable zqmq ring %d\n", ring); + return -EBUSY; + } + + /* clear commands completed count */ + reg_addr = ZQMQ_CMP_CNTX(ring); + zqmq_cmp_cnt.u64 = nitrox_read_csr(bar_addr, reg_addr); + nitrox_write_csr(bar_addr, reg_addr, zqmq_cmp_cnt.u64); + rte_delay_us_block(CSR_DELAY); + return 0; +} + +int +setup_zqmq_input_ring(uint8_t *bar_addr, uint16_t ring, uint32_t rsize, + phys_addr_t raddr) +{ + union zqmq_drbl zqmq_drbl; + union zqmq_qsz zqmq_qsz; + union zqmq_en zqmq_en; + union zqmq_cmp_thr zqmq_cmp_thr; + union zqmq_timer_ld zqmq_timer_ld; + uint64_t reg_addr = 0; + int max_retries = 5; + int err = 0; + + err = zqmq_input_ring_disable(bar_addr, ring); + if (err) + return err; + + /* clear doorbell count */ + reg_addr = ZQMQ_DRBLX(ring); + zqmq_drbl.u64 = 0; + zqmq_drbl.s.dbell_count = 0xFFFFFFFF; + nitrox_write_csr(bar_addr, reg_addr, zqmq_drbl.u64); + rte_delay_us_block(CSR_DELAY); + + reg_addr = ZQMQ_NXT_CMDX(ring); + nitrox_write_csr(bar_addr, reg_addr, 0); + rte_delay_us_block(CSR_DELAY); + + /* write queue length */ + reg_addr = ZQMQ_QSZX(ring); + zqmq_qsz.u64 = 0; + zqmq_qsz.s.host_queue_size = rsize; + nitrox_write_csr(bar_addr, reg_addr, zqmq_qsz.u64); + rte_delay_us_block(CSR_DELAY); + + /* write queue base address */ + reg_addr = ZQMQ_BADRX(ring); + nitrox_write_csr(bar_addr, reg_addr, raddr); + rte_delay_us_block(CSR_DELAY); + + /* write commands completed threshold */ + reg_addr = ZQMQ_CMP_THRX(ring); + zqmq_cmp_thr.u64 = 0; + zqmq_cmp_thr.s.commands_completed_threshold = 0; + nitrox_write_csr(bar_addr, reg_addr, zqmq_cmp_thr.u64); + rte_delay_us_block(CSR_DELAY); + + /* write timer load value */ + reg_addr = ZQMQ_TIMER_LDX(ring); + zqmq_timer_ld.u64 = 0; + zqmq_timer_ld.s.timer_load_value = 0; + nitrox_write_csr(bar_addr, reg_addr, zqmq_timer_ld.u64); + rte_delay_us_block(CSR_DELAY); + + reg_addr = ZQMQ_ENX(ring); + zqmq_en.u64 = nitrox_read_csr(bar_addr, reg_addr); + zqmq_en.s.queue_enable = 1; + nitrox_write_csr(bar_addr, reg_addr, zqmq_en.u64); + rte_delay_us_block(100); + + /* enable queue */ + zqmq_en.u64 = 0; + zqmq_en.u64 = nitrox_read_csr(bar_addr, reg_addr); + while (!zqmq_en.s.queue_enable && max_retries--) { + rte_delay_ms(10); + zqmq_en.u64 = nitrox_read_csr(bar_addr, reg_addr); + } + + if (!zqmq_en.s.queue_enable) { + NITROX_LOG(ERR, "Failed to enable zqmq ring %d\n", ring); + err = -EFAULT; + } else { + err = 0; + } + + return err; +} + int vf_get_vf_config_mode(uint8_t *bar_addr) { diff --git a/drivers/common/nitrox/nitrox_hal.h b/drivers/common/nitrox/nitrox_hal.h index dcfbd11d85..2367b967e5 100644 --- a/drivers/common/nitrox/nitrox_hal.h +++ b/drivers/common/nitrox/nitrox_hal.h @@ -146,6 +146,101 @@ union aqmq_qsz { } s; }; +union zqmq_activity_stat { + uint64_t u64; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t raz : 63; + uint64_t queue_active : 1; +#else + uint64_t queue_active : 1; + uint64_t raz : 63; +#endif + } s; +}; + +union zqmq_en { + uint64_t u64; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t raz : 63; + uint64_t queue_enable : 1; +#else + uint64_t queue_enable : 1; + uint64_t raz : 63; +#endif + } s; +}; + +union zqmq_cmp_cnt { + uint64_t u64; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t raz : 30; + uint64_t resend : 1; + uint64_t completion_status : 1; + uint64_t commands_completed_count: 32; +#else + uint64_t commands_completed_count: 32; + uint64_t completion_status : 1; + uint64_t resend : 1; + uint64_t raz : 30; +#endif + } s; +}; + +union zqmq_drbl { + uint64_t u64; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t raz : 32; + uint64_t dbell_count : 32; +#else + uint64_t dbell_count : 32; + uint64_t raz : 32; +#endif + } s; +}; + +union zqmq_qsz { + uint64_t u64; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t raz : 32; + uint64_t host_queue_size: 32; +#else + uint64_t host_queue_size: 32; + uint64_t raz : 32; +#endif + } s; +}; + +union zqmq_cmp_thr { + uint64_t u64; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t raz : 32; + uint64_t commands_completed_threshold : 32; +#else + uint64_t commands_completed_threshold : 32; + uint64_t raz : 32; +#endif + } s; +}; + +union zqmq_timer_ld { + uint64_t u64; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t raz : 32; + uint64_t timer_load_value: 32; +#else + uint64_t timer_load_value: 32; + uint64_t raz : 32; +#endif + } s; +}; + enum nitrox_vf_mode { NITROX_MODE_PF = 0x0, NITROX_MODE_VF16 = 0x1, @@ -154,6 +249,23 @@ enum nitrox_vf_mode { NITROX_MODE_VF128 = 0x4, }; +static inline int +inc_zqmq_next_cmd(uint8_t *bar_addr, uint16_t ring) +{ + uint64_t reg_addr = 0; + uint64_t val; + + reg_addr = ZQMQ_NXT_CMDX(ring); + val = nitrox_read_csr(bar_addr, reg_addr); + val++; + nitrox_write_csr(bar_addr, reg_addr, val); + rte_delay_us_block(CSR_DELAY); + if (nitrox_read_csr(bar_addr, reg_addr) != val) + return -EIO; + + return 0; +} + int vf_get_vf_config_mode(uint8_t *bar_addr); int vf_config_mode_to_nr_queues(enum nitrox_vf_mode vf_mode); void setup_nps_pkt_input_ring(uint8_t *bar_addr, uint16_t ring, uint32_t rsize, @@ -161,5 +273,8 @@ void setup_nps_pkt_input_ring(uint8_t *bar_addr, uint16_t ring, uint32_t rsize, void setup_nps_pkt_solicit_output_port(uint8_t *bar_addr, uint16_t port); void nps_pkt_input_ring_disable(uint8_t *bar_addr, uint16_t ring); void nps_pkt_solicited_port_disable(uint8_t *bar_addr, uint16_t port); +int setup_zqmq_input_ring(uint8_t *bar_addr, uint16_t ring, uint32_t rsize, + phys_addr_t raddr); +int zqmq_input_ring_disable(uint8_t *bar_addr, uint16_t ring); #endif /* _NITROX_HAL_H_ */ diff --git a/drivers/common/nitrox/nitrox_qp.c b/drivers/common/nitrox/nitrox_qp.c index 5e85ccbd51..6ec0781f1a 100644 --- a/drivers/common/nitrox/nitrox_qp.c +++ b/drivers/common/nitrox/nitrox_qp.c @@ -2,7 +2,7 @@ * Copyright(C) 2019 Marvell International Ltd. */ -#include +#include #include #include "nitrox_qp.h" @@ -20,6 +20,7 @@ nitrox_setup_cmdq(struct nitrox_qp *qp, uint8_t *bar_addr, const struct rte_memzone *mz; size_t cmdq_size = qp->count * instr_size; uint64_t offset; + int err = 0; snprintf(mz_name, sizeof(mz_name), "%s_cmdq_%d", dev_name, qp->qno); mz = rte_memzone_reserve_aligned(mz_name, cmdq_size, socket_id, @@ -32,14 +33,34 @@ nitrox_setup_cmdq(struct nitrox_qp *qp, uint8_t *bar_addr, return -ENOMEM; } + switch (qp->type) { + case NITROX_QUEUE_SE: + offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(qp->qno); + qp->cmdq.dbell_csr_addr = NITROX_CSR_ADDR(bar_addr, offset); + setup_nps_pkt_input_ring(bar_addr, qp->qno, qp->count, + mz->iova); + setup_nps_pkt_solicit_output_port(bar_addr, qp->qno); + break; + case NITROX_QUEUE_ZIP: + offset = ZQMQ_DRBLX(qp->qno); + qp->cmdq.dbell_csr_addr = NITROX_CSR_ADDR(bar_addr, offset); + err = setup_zqmq_input_ring(bar_addr, qp->qno, qp->count, + mz->iova); + break; + default: + NITROX_LOG(ERR, "Invalid queue type %d\n", qp->type); + err = -EINVAL; + break; + } + + if (err) { + rte_memzone_free(mz); + return err; + } + qp->cmdq.mz = mz; - offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(qp->qno); - qp->cmdq.dbell_csr_addr = NITROX_CSR_ADDR(bar_addr, offset); qp->cmdq.ring = mz->addr; qp->cmdq.instr_size = instr_size; - setup_nps_pkt_input_ring(bar_addr, qp->qno, qp->count, mz->iova); - setup_nps_pkt_solicit_output_port(bar_addr, qp->qno); - return 0; } @@ -62,8 +83,23 @@ nitrox_setup_ridq(struct nitrox_qp *qp, int socket_id) static int nitrox_release_cmdq(struct nitrox_qp *qp, uint8_t *bar_addr) { - nps_pkt_solicited_port_disable(bar_addr, qp->qno); - nps_pkt_input_ring_disable(bar_addr, qp->qno); + int err = 0; + + switch (qp->type) { + case NITROX_QUEUE_SE: + nps_pkt_solicited_port_disable(bar_addr, qp->qno); + nps_pkt_input_ring_disable(bar_addr, qp->qno); + break; + case NITROX_QUEUE_ZIP: + err = zqmq_input_ring_disable(bar_addr, qp->qno); + break; + default: + err = -EINVAL; + } + + if (err) + return err; + return rte_memzone_free(qp->cmdq.mz); } @@ -83,6 +119,7 @@ nitrox_qp_setup(struct nitrox_qp *qp, uint8_t *bar_addr, const char *dev_name, return -EINVAL; } + qp->bar_addr = bar_addr; qp->count = count; qp->head = qp->tail = 0; rte_atomic16_init(&qp->pending_count); diff --git a/drivers/common/nitrox/nitrox_qp.h b/drivers/common/nitrox/nitrox_qp.h index d42d53f92b..177bcd7705 100644 --- a/drivers/common/nitrox/nitrox_qp.h +++ b/drivers/common/nitrox/nitrox_qp.h @@ -8,9 +8,16 @@ #include #include +#include "nitrox_hal.h" struct nitrox_softreq; +enum nitrox_queue_type { + NITROX_QUEUE_SE, + NITROX_QUEUE_AE, + NITROX_QUEUE_ZIP, +}; + struct command_queue { const struct rte_memzone *mz; uint8_t *dbell_csr_addr; @@ -22,14 +29,23 @@ struct rid { struct nitrox_softreq *sr; }; +struct nitrox_qp_stats { + uint64_t enqueued_count; + uint64_t dequeued_count; + uint64_t enqueue_err_count; + uint64_t dequeue_err_count; +}; + struct nitrox_qp { + enum nitrox_queue_type type; + uint8_t *bar_addr; struct command_queue cmdq; struct rid *ridq; uint32_t count; uint32_t head; uint32_t tail; struct rte_mempool *sr_mp; - struct rte_cryptodev_stats stats; + struct nitrox_qp_stats stats; uint16_t qno; rte_atomic16_t pending_count; }; @@ -89,6 +105,23 @@ nitrox_qp_enqueue(struct nitrox_qp *qp, void *instr, struct nitrox_softreq *sr) rte_atomic16_inc(&qp->pending_count); } +static inline int +nitrox_qp_enqueue_sr(struct nitrox_qp *qp, struct nitrox_softreq *sr) +{ + uint32_t head = qp->head % qp->count; + int err; + + err = inc_zqmq_next_cmd(qp->bar_addr, qp->qno); + if (unlikely(err)) + return err; + + qp->head++; + qp->ridq[head].sr = sr; + rte_smp_wmb(); + rte_atomic16_inc(&qp->pending_count); + return 0; +} + static inline void nitrox_qp_dequeue(struct nitrox_qp *qp) {