From patchwork Fri Mar 5 13:38:41 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 88542 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 813E5A0547; Fri, 5 Mar 2021 14:42:24 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 684C822A393; Fri, 5 Mar 2021 14:40:18 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id E12D322A360 for ; Fri, 5 Mar 2021 14:40:16 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id 125Ddk1H008288 for ; Fri, 5 Mar 2021 05:40:16 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-type; s=pfpt0220; bh=8V0ZQ0ARTH5Y7lMHryvxtMn6vDhT2bOss3iptO2rmWU=; b=Ukb8ZfJiKXcjRrWJKgHiO38HCK5FI/Yrx102R/nXspwNTFF8dlSBnqpJ6wF9cDRSuL/V jY2hbX2RAO3NxOjmGXFTOQ2frPyq5nUVdbRwEgXEqaMBBBNi/9R5f2wxkqTxn8Ny6t/y Q9q9qfxveR8n6XWWaPzKe7ofj3LEH2wnXrlYSjXUb7ZXVk/S3RdraWAXFCVop+/+y0UT fCVSKt0tYucGnnJbuYjIHoBXPRMWmTEt6ACp+pTYPHLFdYhJ1GKUCmd8EbK1iUav8h+r TM6rB3sInLUsbpXtbMjDZ76JzOSv01SnSkLKUJ4oZPpHn0x6vScvz9n0p0Ria2f9mPSO RA== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0b-0016f401.pphosted.com with ESMTP id 370p7p0dcc-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Fri, 05 Mar 2021 05:40:16 -0800 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Fri, 5 Mar 2021 05:40:14 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Fri, 5 Mar 2021 05:40:14 -0800 Received: from hyd1588t430.marvell.com (unknown [10.29.52.204]) by maili.marvell.com (Postfix) with ESMTP id BC5DD3F7041; Fri, 5 Mar 2021 05:40:11 -0800 (PST) From: Nithin Dabilpuram To: CC: , , , , , , Date: Fri, 5 Mar 2021 19:08:41 +0530 Message-ID: <20210305133918.8005-16-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.8.4 In-Reply-To: <20210305133918.8005-1-ndabilpuram@marvell.com> References: <20210305133918.8005-1-ndabilpuram@marvell.com> MIME-Version: 1.0 X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.369, 18.0.761 definitions=2021-03-05_08:2021-03-03, 2021-03-05 signatures=0 Subject: [dpdk-dev] [PATCH 15/52] common/cnxk: add npa batch alloc/free support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Ashwin Sekhar T K Add APIs to do allocations/frees in batch from NPA pool. Signed-off-by: Ashwin Sekhar T K --- drivers/common/cnxk/roc_npa.h | 217 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 217 insertions(+) diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h index a7815e5..ab0b135 100644 --- a/drivers/common/cnxk/roc_npa.h +++ b/drivers/common/cnxk/roc_npa.h @@ -8,6 +8,9 @@ #define ROC_AURA_ID_MASK (BIT_ULL(16) - 1) #define ROC_AURA_OP_LIMIT_MASK (BIT_ULL(36) - 1) +#define ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS 512 +#define ROC_CN10K_NPA_BATCH_FREE_MAX_PTRS 15 + /* 16 CASP instructions can be outstanding in CN9k, but we use only 15 * outstanding CASPs as we run out of registers. */ @@ -180,6 +183,114 @@ roc_npa_pool_op_performance_counter(uint64_t aura_handle, const int drop) return reg & 0xFFFFFFFFFFFF; } +static inline int +roc_npa_aura_batch_alloc_issue(uint64_t aura_handle, uint64_t *buf, + unsigned int num, const int dis_wait, + const int drop) +{ + unsigned int i; + int64_t *addr; + uint64_t res; + union { + uint64_t u; + struct npa_batch_alloc_compare_s compare_s; + } cmp; + + if (num > ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS) + return -1; + + /* Zero first word of every cache line */ + for (i = 0; i < num; i += (ROC_ALIGN / sizeof(uint64_t))) + buf[i] = 0; + + addr = (int64_t *)(roc_npa_aura_handle_to_base(aura_handle) + + NPA_LF_AURA_BATCH_ALLOC); + cmp.u = 0; + cmp.compare_s.aura = roc_npa_aura_handle_to_aura(aura_handle); + cmp.compare_s.drop = drop; + cmp.compare_s.stype = ALLOC_STYPE_STSTP; + cmp.compare_s.dis_wait = dis_wait; + cmp.compare_s.count = num; + + res = roc_atomic64_cas(cmp.u, (uint64_t)buf, addr); + if (res != ALLOC_RESULT_ACCEPTED && res != ALLOC_RESULT_NOCORE) + return -1; + + return 0; +} + +static inline unsigned int +roc_npa_aura_batch_alloc_count(uint64_t *aligned_buf, unsigned int num) +{ + unsigned int count, i; + + if (num > ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS) + return 0; + + count = 0; + /* Check each ROC cache line one by one */ + for (i = 0; i < num; i += (ROC_ALIGN >> 3)) { + struct npa_batch_alloc_status_s *status; + int ccode; + + status = (struct npa_batch_alloc_status_s *)&aligned_buf[i]; + + /* Status is updated in first 7 bits of each 128 byte cache + * line. Wait until the status gets updated. + */ + do { + ccode = (volatile int)status->ccode; + } while (ccode == ALLOC_CCODE_INVAL); + + count += status->count; + } + + return count; +} + +static inline unsigned int +roc_npa_aura_batch_alloc_extract(uint64_t *buf, uint64_t *aligned_buf, + unsigned int num) +{ + unsigned int count, i; + + if (num > ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS) + return 0; + + count = 0; + /* Check each ROC cache line one by one */ + for (i = 0; i < num; i += (ROC_ALIGN >> 3)) { + struct npa_batch_alloc_status_s *status; + int line_count, ccode; + + status = (struct npa_batch_alloc_status_s *)&aligned_buf[i]; + + /* Status is updated in first 7 bits of each 128 byte cache + * line. Wait until the status gets updated. + */ + do { + ccode = (volatile int)status->ccode; + } while (ccode == ALLOC_CCODE_INVAL); + + line_count = status->count; + + /* Clear the status from the cache line */ + status->ccode = 0; + status->count = 0; + + /* 'Compress' the allocated buffers as there can + * be 'holes' at the end of the 128 byte cache + * lines. + */ + memmove(&buf[count], &aligned_buf[i], + line_count * sizeof(uint64_t)); + + count += line_count; + } + + return count; +} + static inline void roc_npa_aura_op_bulk_free(uint64_t aura_handle, uint64_t const *buf, unsigned int num, const int fabs) @@ -194,6 +305,112 @@ roc_npa_aura_op_bulk_free(uint64_t aura_handle, uint64_t const *buf, } static inline unsigned int +roc_npa_aura_op_batch_alloc(uint64_t aura_handle, uint64_t *buf, + uint64_t *aligned_buf, unsigned int num, + const int dis_wait, const int drop, + const int partial) +{ + unsigned int count, chunk, num_alloc; + + /* The buffer should be 128 byte cache line aligned */ + if (((uint64_t)aligned_buf & (ROC_ALIGN - 1)) != 0) + return 0; + + count = 0; + while (num) { + chunk = (num > ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS) ? + ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS : + num; + + if (roc_npa_aura_batch_alloc_issue(aura_handle, aligned_buf, + chunk, dis_wait, drop)) + break; + + num_alloc = roc_npa_aura_batch_alloc_extract(buf, aligned_buf, + chunk); + + count += num_alloc; + buf += num_alloc; + num -= num_alloc; + + if (num_alloc != chunk) + break; + } + + /* If the requested number of pointers was not allocated and if partial + * alloc is not desired, then free allocated pointers. + */ + if (unlikely(num != 0 && !partial)) { + roc_npa_aura_op_bulk_free(aura_handle, buf - count, count, 1); + count = 0; + } + + return count; +} + +static inline void +roc_npa_aura_batch_free(uint64_t aura_handle, uint64_t const *buf, + unsigned int num, const int fabs, uint64_t lmt_addr, + uint64_t lmt_id) +{ + uint64_t addr, tar_addr, free0; + volatile uint64_t *lmt_data; + unsigned int i; + + if (num > ROC_CN10K_NPA_BATCH_FREE_MAX_PTRS) + return; + + lmt_data = (uint64_t *)lmt_addr; + + addr = roc_npa_aura_handle_to_base(aura_handle) + + NPA_LF_AURA_BATCH_FREE0; + + /* + * NPA_LF_AURA_BATCH_FREE0 + * + * 63 63 62 33 32 32 31 20 19 0 + * ----------------------------------------- + * | FABS | Rsvd | COUNT_EOT | Rsvd | AURA | + * ----------------------------------------- + */ + free0 = roc_npa_aura_handle_to_aura(aura_handle); + if (fabs) + free0 |= (0x1UL << 63); + if (num & 0x1) + free0 |= (0x1UL << 32); + + /* tar_addr[4:6] is LMTST size-1 in units of 128b */ + tar_addr = addr | ((num >> 1) << 4); + + lmt_data[0] = free0; + for (i = 0; i < num; i++) + lmt_data[i + 1] = buf[i]; + + roc_lmt_submit_steorl(lmt_id, tar_addr); + plt_io_wmb(); +} + +static inline void +roc_npa_aura_op_batch_free(uint64_t aura_handle, uint64_t const *buf, + unsigned int num, const int fabs, uint64_t lmt_addr, + uint64_t lmt_id) +{ + unsigned int chunk; + + while (num) { + chunk = (num >= ROC_CN10K_NPA_BATCH_FREE_MAX_PTRS) ? + ROC_CN10K_NPA_BATCH_FREE_MAX_PTRS : + num; + + roc_npa_aura_batch_free(aura_handle, buf, chunk, fabs, lmt_addr, + lmt_id); + + buf += chunk; + num -= chunk; + } +} + +static inline unsigned int roc_npa_aura_bulk_alloc(uint64_t aura_handle, uint64_t *buf, unsigned int num, const int drop) {