From patchwork Thu Apr 1 12:37:37 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 90385 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E46A7A0548; Thu, 1 Apr 2021 14:40:46 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4D27C1411E9; Thu, 1 Apr 2021 14:39:16 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id AC1331411E8 for ; Thu, 1 Apr 2021 14:39:14 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id 131CPLGL019096 for ; Thu, 1 Apr 2021 05:39:13 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-type; s=pfpt0220; bh=zSvxGSRCup0YIpoU+IICOTVhQJLRNiLkno5ajKU45BA=; b=Uk3wno3CB05Csk8f1/5O7MaaPgA3AfA57cP0XYUrQZnPh753hun1qpm7/qFlSG2MEgi2 qqSig9QOw3ZvNTgEJ6xl/UPMey4u6VHvGS8WPB8wGnoXiu4ipsC3+7rNdqQfT7vXuP9Z 5ULg+JrYlvgSFXd6GAkUUl9qv/Y4+IftFB/RNtcypdFdYXDbPYxnrPgyYBPWcjDuxj2c A4uq4TfJultE5DfddvU7cw+h+RbSbxfeAwU21OhjJ62z2aP9Ls4BphinxIUtKN4+vcdo s3LIPwFJ3d0dkXQIey95AAKxIHJ/dMv7R1pBE4QUxdbIs7e7iEZPFbdmxo/nKul+N4zu +Q== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0b-0016f401.pphosted.com with ESMTP id 37n28jje17-2 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Thu, 01 Apr 2021 05:39:13 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Thu, 1 Apr 2021 05:39:11 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Thu, 1 Apr 2021 05:39:11 -0700 Received: from hyd1588t430.marvell.com (unknown [10.29.52.204]) by maili.marvell.com (Postfix) with ESMTP id D69A03F7040; Thu, 1 Apr 2021 05:39:08 -0700 (PDT) From: Nithin Dabilpuram To: CC: , , , , , , Date: Thu, 1 Apr 2021 18:07:37 +0530 Message-ID: <20210401123817.14348-13-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.8.4 In-Reply-To: <20210401123817.14348-1-ndabilpuram@marvell.com> References: <20210305133918.8005-1-ndabilpuram@marvell.com> <20210401123817.14348-1-ndabilpuram@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: F_JMdOwVMZh65p-m-jLVdntbUrbVUipx X-Proofpoint-ORIG-GUID: F_JMdOwVMZh65p-m-jLVdntbUrbVUipx X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.369, 18.0.761 definitions=2021-04-01_05:2021-03-31, 2021-04-01 signatures=0 Subject: [dpdk-dev] [PATCH v3 12/52] common/cnxk: add npa pool HW ops X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Ashwin Sekhar T K Add APIs for creating, destroying, modifying NPA pools. Signed-off-by: Ashwin Sekhar T K --- drivers/common/cnxk/roc_npa.c | 421 ++++++++++++++++++++++++++++++++++++++++ drivers/common/cnxk/roc_npa.h | 146 ++++++++++++++ drivers/common/cnxk/version.map | 5 + 3 files changed, 572 insertions(+) diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c index 0d4a56a..80f5a78 100644 --- a/drivers/common/cnxk/roc_npa.c +++ b/drivers/common/cnxk/roc_npa.c @@ -5,6 +5,427 @@ #include "roc_api.h" #include "roc_priv.h" +void +roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova, + uint64_t end_iova) +{ + const uint64_t start = roc_npa_aura_handle_to_base(aura_handle) + + NPA_LF_POOL_OP_PTR_START0; + const uint64_t end = roc_npa_aura_handle_to_base(aura_handle) + + NPA_LF_POOL_OP_PTR_END0; + uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle); + struct npa_lf *lf = idev_npa_obj_get(); + struct npa_aura_lim *lim; + + PLT_ASSERT(lf); + lim = lf->aura_lim; + + lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova); + lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova); + + roc_store_pair(lim[reg].ptr_start, reg, start); + roc_store_pair(lim[reg].ptr_end, reg, end); +} + +static int +npa_aura_pool_init(struct mbox *mbox, uint32_t aura_id, struct npa_aura_s *aura, + struct npa_pool_s *pool) +{ + struct npa_aq_enq_req *aura_init_req, *pool_init_req; + struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp; + struct mbox_dev *mdev = &mbox->dev[0]; + int rc = -ENOSPC, off; + + aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox); + if (aura_init_req == NULL) + return rc; + aura_init_req->aura_id = aura_id; + aura_init_req->ctype = NPA_AQ_CTYPE_AURA; + aura_init_req->op = NPA_AQ_INSTOP_INIT; + mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura)); + + pool_init_req = mbox_alloc_msg_npa_aq_enq(mbox); + if (pool_init_req == NULL) + return rc; + pool_init_req->aura_id = aura_id; + pool_init_req->ctype = NPA_AQ_CTYPE_POOL; + pool_init_req->op = NPA_AQ_INSTOP_INIT; + mbox_memcpy(&pool_init_req->pool, pool, sizeof(*pool)); + + rc = mbox_process(mbox); + if (rc < 0) + return rc; + + off = mbox->rx_start + + PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + aura_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off); + off = mbox->rx_start + aura_init_rsp->hdr.next_msgoff; + pool_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off); + + if (aura_init_rsp->hdr.rc == 0 && pool_init_rsp->hdr.rc == 0) + return 0; + else + return NPA_ERR_AURA_POOL_INIT; +} + +static int +npa_aura_pool_fini(struct mbox *mbox, uint32_t aura_id, uint64_t aura_handle) +{ + struct npa_aq_enq_req *aura_req, *pool_req; + struct npa_aq_enq_rsp *aura_rsp, *pool_rsp; + struct mbox_dev *mdev = &mbox->dev[0]; + struct ndc_sync_op *ndc_req; + int rc = -ENOSPC, off; + uint64_t ptr; + + /* Procedure for disabling an aura/pool */ + plt_delay_us(10); + + /* Clear all the pointers from the aura */ + do { + ptr = roc_npa_aura_op_alloc(aura_handle, 0); + } while (ptr); + + pool_req = mbox_alloc_msg_npa_aq_enq(mbox); + if (pool_req == NULL) + return rc; + pool_req->aura_id = aura_id; + pool_req->ctype = NPA_AQ_CTYPE_POOL; + pool_req->op = NPA_AQ_INSTOP_WRITE; + pool_req->pool.ena = 0; + pool_req->pool_mask.ena = ~pool_req->pool_mask.ena; + + aura_req = mbox_alloc_msg_npa_aq_enq(mbox); + if (aura_req == NULL) + return rc; + aura_req->aura_id = aura_id; + aura_req->ctype = NPA_AQ_CTYPE_AURA; + aura_req->op = NPA_AQ_INSTOP_WRITE; + aura_req->aura.ena = 0; + aura_req->aura_mask.ena = ~aura_req->aura_mask.ena; + + rc = mbox_process(mbox); + if (rc < 0) + return rc; + + off = mbox->rx_start + + PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + pool_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off); + + off = mbox->rx_start + pool_rsp->hdr.next_msgoff; + aura_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off); + + if (aura_rsp->hdr.rc != 0 || pool_rsp->hdr.rc != 0) + return NPA_ERR_AURA_POOL_FINI; + + /* Sync NDC-NPA for LF */ + ndc_req = mbox_alloc_msg_ndc_sync_op(mbox); + if (ndc_req == NULL) + return -ENOSPC; + ndc_req->npa_lf_sync = 1; + rc = mbox_process(mbox); + if (rc) { + plt_err("Error on NDC-NPA LF sync, rc %d", rc); + return NPA_ERR_AURA_POOL_FINI; + } + return 0; +} + +static inline char * +npa_stack_memzone_name(struct npa_lf *lf, int pool_id, char *name) +{ + snprintf(name, PLT_MEMZONE_NAMESIZE, "roc_npa_stack_%x_%d", lf->pf_func, + pool_id); + return name; +} + +static inline const struct plt_memzone * +npa_stack_dma_alloc(struct npa_lf *lf, char *name, int pool_id, size_t size) +{ + const char *mz_name = npa_stack_memzone_name(lf, pool_id, name); + + return plt_memzone_reserve_cache_align(mz_name, size); +} + +static inline int +npa_stack_dma_free(struct npa_lf *lf, char *name, int pool_id) +{ + const struct plt_memzone *mz; + + mz = plt_memzone_lookup(npa_stack_memzone_name(lf, pool_id, name)); + if (mz == NULL) + return NPA_ERR_PARAM; + + return plt_memzone_free(mz); +} + +static inline int +bitmap_ctzll(uint64_t slab) +{ + if (slab == 0) + return 0; + + return __builtin_ctzll(slab); +} + +static int +npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size, + const uint32_t block_count, struct npa_aura_s *aura, + struct npa_pool_s *pool, uint64_t *aura_handle) +{ + int rc, aura_id, pool_id, stack_size, alloc_size; + char name[PLT_MEMZONE_NAMESIZE]; + const struct plt_memzone *mz; + uint64_t slab; + uint32_t pos; + + /* Sanity check */ + if (!lf || !block_size || !block_count || !pool || !aura || + !aura_handle) + return NPA_ERR_PARAM; + + /* Block size should be cache line aligned and in range of 128B-128KB */ + if (block_size % ROC_ALIGN || block_size < 128 || + block_size > 128 * 1024) + return NPA_ERR_INVALID_BLOCK_SZ; + + pos = 0; + slab = 0; + /* Scan from the beginning */ + plt_bitmap_scan_init(lf->npa_bmp); + /* Scan bitmap to get the free pool */ + rc = plt_bitmap_scan(lf->npa_bmp, &pos, &slab); + /* Empty bitmap */ + if (rc == 0) { + plt_err("Mempools exhausted"); + return NPA_ERR_AURA_ID_ALLOC; + } + + /* Get aura_id from resource bitmap */ + aura_id = pos + bitmap_ctzll(slab); + /* Mark pool as reserved */ + plt_bitmap_clear(lf->npa_bmp, aura_id); + + /* Configuration based on each aura has separate pool(aura-pool pair) */ + pool_id = aura_id; + rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools || + aura_id >= (int)BIT_ULL(6 + lf->aura_sz)) ? + NPA_ERR_AURA_ID_ALLOC : + 0; + if (rc) + goto exit; + + /* Allocate stack memory */ + stack_size = (block_count + lf->stack_pg_ptrs - 1) / lf->stack_pg_ptrs; + alloc_size = stack_size * lf->stack_pg_bytes; + + mz = npa_stack_dma_alloc(lf, name, pool_id, alloc_size); + if (mz == NULL) { + rc = NPA_ERR_ALLOC; + goto aura_res_put; + } + + /* Update aura fields */ + aura->pool_addr = pool_id; /* AF will translate to associated poolctx */ + aura->ena = 1; + aura->shift = __builtin_clz(block_count) - 8; + aura->limit = block_count; + aura->pool_caching = 1; + aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER); + aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER); + aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER); + aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS); + /* Many to one reduction */ + aura->err_qint_idx = aura_id % lf->qints; + + /* Update pool fields */ + pool->stack_base = mz->iova; + pool->ena = 1; + pool->buf_size = block_size / ROC_ALIGN; + pool->stack_max_pages = stack_size; + pool->shift = __builtin_clz(block_count) - 8; + pool->ptr_start = 0; + pool->ptr_end = ~0; + pool->stack_caching = 1; + pool->err_int_ena = BIT(NPA_POOL_ERR_INT_OVFLS); + pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_RANGE); + pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_PERR); + + /* Many to one reduction */ + pool->err_qint_idx = pool_id % lf->qints; + + /* Issue AURA_INIT and POOL_INIT op */ + rc = npa_aura_pool_init(lf->mbox, aura_id, aura, pool); + if (rc) + goto stack_mem_free; + + *aura_handle = roc_npa_aura_handle_gen(aura_id, lf->base); + /* Update aura count */ + roc_npa_aura_op_cnt_set(*aura_handle, 0, block_count); + /* Read it back to make sure aura count is updated */ + roc_npa_aura_op_cnt_get(*aura_handle); + + return 0; + +stack_mem_free: + plt_memzone_free(mz); +aura_res_put: + plt_bitmap_set(lf->npa_bmp, aura_id); +exit: + return rc; +} + +int +roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size, + uint32_t block_count, struct npa_aura_s *aura, + struct npa_pool_s *pool) +{ + struct npa_aura_s defaura; + struct npa_pool_s defpool; + struct idev_cfg *idev; + struct npa_lf *lf; + int rc; + + lf = idev_npa_obj_get(); + if (lf == NULL) { + rc = NPA_ERR_DEVICE_NOT_BOUNDED; + goto error; + } + + idev = idev_get_cfg(); + if (idev == NULL) { + rc = NPA_ERR_ALLOC; + goto error; + } + + if (aura == NULL) { + memset(&defaura, 0, sizeof(struct npa_aura_s)); + aura = &defaura; + } + if (pool == NULL) { + memset(&defpool, 0, sizeof(struct npa_pool_s)); + defpool.nat_align = 1; + defpool.buf_offset = 1; + pool = &defpool; + } + + rc = npa_aura_pool_pair_alloc(lf, block_size, block_count, aura, pool, + aura_handle); + if (rc) { + plt_err("Failed to alloc pool or aura rc=%d", rc); + goto error; + } + + plt_npa_dbg("lf=%p block_sz=%d block_count=%d aura_handle=0x%" PRIx64, + lf, block_size, block_count, *aura_handle); + + /* Just hold the reference of the object */ + __atomic_fetch_add(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST); +error: + return rc; +} + +int +roc_npa_aura_limit_modify(uint64_t aura_handle, uint16_t aura_limit) +{ + struct npa_aq_enq_req *aura_req; + struct npa_lf *lf; + int rc; + + lf = idev_npa_obj_get(); + if (lf == NULL) + return NPA_ERR_DEVICE_NOT_BOUNDED; + + aura_req = mbox_alloc_msg_npa_aq_enq(lf->mbox); + if (aura_req == NULL) + return -ENOMEM; + aura_req->aura_id = roc_npa_aura_handle_to_aura(aura_handle); + aura_req->ctype = NPA_AQ_CTYPE_AURA; + aura_req->op = NPA_AQ_INSTOP_WRITE; + + aura_req->aura.limit = aura_limit; + aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit); + rc = mbox_process(lf->mbox); + + return rc; +} + +static int +npa_aura_pool_pair_free(struct npa_lf *lf, uint64_t aura_handle) +{ + char name[PLT_MEMZONE_NAMESIZE]; + int aura_id, pool_id, rc; + + if (!lf || !aura_handle) + return NPA_ERR_PARAM; + + aura_id = roc_npa_aura_handle_to_aura(aura_handle); + pool_id = aura_id; + rc = npa_aura_pool_fini(lf->mbox, aura_id, aura_handle); + rc |= npa_stack_dma_free(lf, name, pool_id); + + plt_bitmap_set(lf->npa_bmp, aura_id); + + return rc; +} + +int +roc_npa_pool_destroy(uint64_t aura_handle) +{ + struct npa_lf *lf = idev_npa_obj_get(); + int rc = 0; + + plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, aura_handle); + rc = npa_aura_pool_pair_free(lf, aura_handle); + if (rc) + plt_err("Failed to destroy pool or aura rc=%d", rc); + + /* Release the reference of npa */ + rc |= npa_lf_fini(); + return rc; +} + +int +roc_npa_pool_range_update_check(uint64_t aura_handle) +{ + uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle); + struct npa_lf *lf; + struct npa_aura_lim *lim; + __io struct npa_pool_s *pool; + struct npa_aq_enq_req *req; + struct npa_aq_enq_rsp *rsp; + int rc; + + lf = idev_npa_obj_get(); + if (lf == NULL) + return NPA_ERR_PARAM; + + lim = lf->aura_lim; + + req = mbox_alloc_msg_npa_aq_enq(lf->mbox); + if (req == NULL) + return -ENOSPC; + + req->aura_id = aura_id; + req->ctype = NPA_AQ_CTYPE_POOL; + req->op = NPA_AQ_INSTOP_READ; + + rc = mbox_process_msg(lf->mbox, (void *)&rsp); + if (rc) { + plt_err("Failed to get pool(0x%" PRIx64 ") context", aura_id); + return rc; + } + + pool = &rsp->pool; + if (lim[aura_id].ptr_start != pool->ptr_start || + lim[aura_id].ptr_end != pool->ptr_end) { + plt_err("Range update failed on pool(0x%" PRIx64 ")", aura_id); + return NPA_ERR_PARAM; + } + + return 0; +} + static inline int npa_attach(struct mbox *mbox) { diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h index 029f966..6983849 100644 --- a/drivers/common/cnxk/roc_npa.h +++ b/drivers/common/cnxk/roc_npa.h @@ -6,6 +6,140 @@ #define _ROC_NPA_H_ #define ROC_AURA_ID_MASK (BIT_ULL(16) - 1) +#define ROC_AURA_OP_LIMIT_MASK (BIT_ULL(36) - 1) + +/* + * Generate 64bit handle to have optimized alloc and free aura operation. + * 0 - ROC_AURA_ID_MASK for storing the aura_id. + * [ROC_AURA_ID_MASK+1, (2^64 - 1)] for storing the lf base address. + * This scheme is valid when OS can give ROC_AURA_ID_MASK + * aligned address for lf base address. + */ +static inline uint64_t +roc_npa_aura_handle_gen(uint32_t aura_id, uintptr_t addr) +{ + uint64_t val; + + val = aura_id & ROC_AURA_ID_MASK; + return (uint64_t)addr | val; +} + +static inline uint64_t +roc_npa_aura_handle_to_aura(uint64_t aura_handle) +{ + return aura_handle & ROC_AURA_ID_MASK; +} + +static inline uintptr_t +roc_npa_aura_handle_to_base(uint64_t aura_handle) +{ + return (uintptr_t)(aura_handle & ~ROC_AURA_ID_MASK); +} + +static inline uint64_t +roc_npa_aura_op_alloc(uint64_t aura_handle, const int drop) +{ + uint64_t wdata = roc_npa_aura_handle_to_aura(aura_handle); + int64_t *addr; + + if (drop) + wdata |= BIT_ULL(63); /* DROP */ + + addr = (int64_t *)(roc_npa_aura_handle_to_base(aura_handle) + + NPA_LF_AURA_OP_ALLOCX(0)); + return roc_atomic64_add_nosync(wdata, addr); +} + +static inline void +roc_npa_aura_op_free(uint64_t aura_handle, const int fabs, uint64_t iova) +{ + uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle); + const uint64_t addr = + roc_npa_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_FREE0; + if (fabs) + reg |= BIT_ULL(63); /* FABS */ + + roc_store_pair(iova, reg, addr); +} + +static inline uint64_t +roc_npa_aura_op_cnt_get(uint64_t aura_handle) +{ + uint64_t wdata; + int64_t *addr; + uint64_t reg; + + wdata = roc_npa_aura_handle_to_aura(aura_handle) << 44; + addr = (int64_t *)(roc_npa_aura_handle_to_base(aura_handle) + + NPA_LF_AURA_OP_CNT); + reg = roc_atomic64_add_nosync(wdata, addr); + + if (reg & BIT_ULL(42) /* OP_ERR */) + return 0; + else + return reg & 0xFFFFFFFFF; +} + +static inline void +roc_npa_aura_op_cnt_set(uint64_t aura_handle, const int sign, uint64_t count) +{ + uint64_t reg = count & (BIT_ULL(36) - 1); + + if (sign) + reg |= BIT_ULL(43); /* CNT_ADD */ + + reg |= (roc_npa_aura_handle_to_aura(aura_handle) << 44); + + plt_write64(reg, roc_npa_aura_handle_to_base(aura_handle) + + NPA_LF_AURA_OP_CNT); +} + +static inline uint64_t +roc_npa_aura_op_limit_get(uint64_t aura_handle) +{ + uint64_t wdata; + int64_t *addr; + uint64_t reg; + + wdata = roc_npa_aura_handle_to_aura(aura_handle) << 44; + addr = (int64_t *)(roc_npa_aura_handle_to_base(aura_handle) + + NPA_LF_AURA_OP_LIMIT); + reg = roc_atomic64_add_nosync(wdata, addr); + + if (reg & BIT_ULL(42) /* OP_ERR */) + return 0; + else + return reg & ROC_AURA_OP_LIMIT_MASK; +} + +static inline void +roc_npa_aura_op_limit_set(uint64_t aura_handle, uint64_t limit) +{ + uint64_t reg = limit & ROC_AURA_OP_LIMIT_MASK; + + reg |= (roc_npa_aura_handle_to_aura(aura_handle) << 44); + + plt_write64(reg, roc_npa_aura_handle_to_base(aura_handle) + + NPA_LF_AURA_OP_LIMIT); +} + +static inline uint64_t +roc_npa_aura_op_available(uint64_t aura_handle) +{ + uint64_t wdata; + uint64_t reg; + int64_t *addr; + + wdata = roc_npa_aura_handle_to_aura(aura_handle) << 44; + addr = (int64_t *)(roc_npa_aura_handle_to_base(aura_handle) + + NPA_LF_POOL_OP_AVAILABLE); + reg = roc_atomic64_add_nosync(wdata, addr); + + if (reg & BIT_ULL(42) /* OP_ERR */) + return 0; + else + return reg & 0xFFFFFFFFF; +} struct roc_npa { struct plt_pci_device *pci_dev; @@ -17,6 +151,18 @@ struct roc_npa { int __roc_api roc_npa_dev_init(struct roc_npa *roc_npa); int __roc_api roc_npa_dev_fini(struct roc_npa *roc_npa); +/* NPA pool */ +int __roc_api roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size, + uint32_t block_count, struct npa_aura_s *aura, + struct npa_pool_s *pool); +int __roc_api roc_npa_aura_limit_modify(uint64_t aura_handle, + uint16_t aura_limit); +int __roc_api roc_npa_pool_destroy(uint64_t aura_handle); +int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle); +void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle, + uint64_t start_iova, + uint64_t end_iova); + /* Debug */ int __roc_api roc_npa_ctx_dump(void); int __roc_api roc_npa_dump(void); diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map index 3571db3..e2c0de9 100644 --- a/drivers/common/cnxk/version.map +++ b/drivers/common/cnxk/version.map @@ -11,10 +11,15 @@ INTERNAL { roc_idev_npa_maxpools_set; roc_idev_num_lmtlines_get; roc_model; + roc_npa_aura_limit_modify; + roc_npa_aura_op_range_set; roc_npa_ctx_dump; roc_npa_dev_fini; roc_npa_dev_init; roc_npa_dump; + roc_npa_pool_create; + roc_npa_pool_destroy; + roc_npa_pool_range_update_check; roc_plt_init; local: *;