From patchwork Sun Jan 7 15:40:11 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Srikanth Yalavarthi X-Patchwork-Id: 135788 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id CFD3F43857; Sun, 7 Jan 2024 16:40:31 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0FCC8406B4; Sun, 7 Jan 2024 16:40:28 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 1D9FC402A9 for ; Sun, 7 Jan 2024 16:40:25 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 407EEcgV028095 for ; Sun, 7 Jan 2024 07:40:25 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding:content-type; s= pfpt0220; bh=RxWqyrecLz/13LtKbTmvH1e09KdJ+eKpBkoHlm66UxE=; b=ClC UtGAmthtyStyo3JTpDDew3ikjLVMlpN6bBatM7acZ+ZkiiY19DASY2K/0F9050+b n1PsxDjIMG490Q12fsa4U2uRVgmstCBUC6VGi5/dkIOHc00joUi9D+Za/lJuhUbo NZzVrOJTyBMekHx6pj44eb7AW846LEJpOTIcxPVk3LeoVKkXBN9WKkpx148WQwxI 5Mu2/zgEVV/Yr22ZVH+ud+3/ZVWodCErP5muWMGlLtyP2CuqQZlCnk4RZ/otKfRp PAkbGQuglR+pavaiDtKzv8p7ObfXbYbPWfwewACEcSSpxyoG5gcfNk640Mc5gnai bLpv7TP7ptN4rg/GaGg== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3vf53qjkkh-2 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Sun, 07 Jan 2024 07:40:25 -0800 (PST) Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.48; Sun, 7 Jan 2024 07:40:23 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.48 via Frontend Transport; Sun, 7 Jan 2024 07:40:23 -0800 Received: from ml-host-33.caveonetworks.com (unknown [10.110.143.233]) by maili.marvell.com (Postfix) with ESMTP id 906153F70A1; Sun, 7 Jan 2024 07:40:23 -0800 (PST) From: Srikanth Yalavarthi To: Pavan Nikhilesh , Shijith Thotton , Srikanth Yalavarthi CC: , , , , Subject: [PATCH 2/4] event/cnxk: implement queue pair add and delete Date: Sun, 7 Jan 2024 07:40:11 -0800 Message-ID: <20240107154013.4676-3-syalavarthi@marvell.com> X-Mailer: git-send-email 2.42.0 In-Reply-To: <20240107154013.4676-1-syalavarthi@marvell.com> References: <20240107154013.4676-1-syalavarthi@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: FNl0i5ZN7ythL_6OGByV4MWtzaQU-biF X-Proofpoint-GUID: FNl0i5ZN7ythL_6OGByV4MWtzaQU-biF X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.997,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2023-12-09_02,2023-12-07_01,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Added structures for ML event adapter. Implemented ML event adapter queue-pair add and delete functions. Signed-off-by: Srikanth Yalavarthi --- drivers/event/cnxk/cn10k_eventdev.c | 103 ++++++++++++++++++++++++++++ drivers/event/cnxk/cnxk_eventdev.h | 4 ++ drivers/ml/cnxk/cnxk_ml_ops.h | 12 ++++ 3 files changed, 119 insertions(+) diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c index 09eff569052..201972cec9e 100644 --- a/drivers/event/cnxk/cn10k_eventdev.c +++ b/drivers/event/cnxk/cn10k_eventdev.c @@ -1033,6 +1033,107 @@ cn10k_ml_adapter_caps_get(const struct rte_eventdev *event_dev, const struct rte return 0; } +static int +ml_adapter_qp_free(struct cnxk_ml_qp *qp) +{ + rte_mempool_free(qp->mla.req_mp); + qp->mla.enabled = false; + + return 0; +} + +static int +ml_adapter_qp_setup(const struct rte_ml_dev *mldev, struct cnxk_ml_qp *qp) +{ + char name[RTE_MEMPOOL_NAMESIZE]; + uint32_t cache_size, nb_req; + unsigned int req_size; + + snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_mla_req_%u_%u", mldev->data->dev_id, qp->id); + req_size = sizeof(struct cn10k_ml_req); + cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE; + nb_req = cache_size * rte_lcore_count(); + qp->mla.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size, 0, NULL, NULL, NULL, + NULL, rte_socket_id(), 0); + if (qp->mla.req_mp == NULL) + return -ENOMEM; + + qp->mla.enabled = true; + + return 0; +} + +static int +cn10k_ml_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_ml_dev *mldev, + int32_t queue_pair_id) +{ + struct cnxk_ml_qp *qp; + + CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", EINVAL); + CNXK_VALID_DEV_OR_ERR_RET(mldev->device, "ml_cn10k", EINVAL); + + if (queue_pair_id == -1) { + uint16_t qp_id; + + for (qp_id = 0; qp_id < mldev->data->nb_queue_pairs; qp_id++) { + qp = mldev->data->queue_pairs[qp_id]; + if (qp->mla.enabled) + ml_adapter_qp_free(qp); + } + } else { + qp = mldev->data->queue_pairs[queue_pair_id]; + if (qp->mla.enabled) + ml_adapter_qp_free(qp); + } + + return 0; +} + +static int +cn10k_ml_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rte_ml_dev *mldev, + int32_t queue_pair_id, const struct rte_event *event) +{ + struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev); + uint32_t adptr_xae_cnt = 0; + struct cnxk_ml_qp *qp; + int ret; + + PLT_SET_USED(event); + + CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", EINVAL); + CNXK_VALID_DEV_OR_ERR_RET(mldev->device, "ml_cn10k", EINVAL); + + sso_evdev->is_mla_internal_port = 1; + cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); + + if (queue_pair_id == -1) { + uint16_t qp_id; + + for (qp_id = 0; qp_id < mldev->data->nb_queue_pairs; qp_id++) { + qp = mldev->data->queue_pairs[qp_id]; + ret = ml_adapter_qp_setup(mldev, qp); + if (ret != 0) { + cn10k_ml_adapter_qp_del(event_dev, mldev, -1); + return ret; + } + adptr_xae_cnt += qp->mla.req_mp->size; + } + } else { + qp = mldev->data->queue_pairs[queue_pair_id]; + ret = ml_adapter_qp_setup(mldev, qp); + if (ret != 0) + return ret; + + adptr_xae_cnt = qp->mla.req_mp->size; + } + + /* Update ML adapter XAE count */ + sso_evdev->adptr_xae_cnt += adptr_xae_cnt; + cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev); + + return ret; +} + static struct eventdev_ops cn10k_sso_dev_ops = { .dev_infos_get = cn10k_sso_info_get, .dev_configure = cn10k_sso_dev_configure, @@ -1075,6 +1176,8 @@ static struct eventdev_ops cn10k_sso_dev_ops = { .crypto_adapter_vector_limits_get = cn10k_crypto_adapter_vec_limits, .ml_adapter_caps_get = cn10k_ml_adapter_caps_get, + .ml_adapter_queue_pair_add = cn10k_ml_adapter_qp_add, + .ml_adapter_queue_pair_del = cn10k_ml_adapter_qp_del, .xstats_get = cnxk_sso_xstats_get, .xstats_reset = cnxk_sso_xstats_reset, diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h index d42d1afa1a1..bc51e952c9a 100644 --- a/drivers/event/cnxk/cnxk_eventdev.h +++ b/drivers/event/cnxk/cnxk_eventdev.h @@ -124,6 +124,10 @@ struct cnxk_sso_evdev { uint32_t gw_mode; uint16_t stash_cnt; struct cnxk_sso_stash *stash_parse_data; + /* Crypto adapter */ + uint8_t is_ca_internal_port; + /* ML adapter */ + uint8_t is_mla_internal_port; } __rte_cache_aligned; /* Event port a.k.a GWS */ diff --git a/drivers/ml/cnxk/cnxk_ml_ops.h b/drivers/ml/cnxk/cnxk_ml_ops.h index 7b49793a574..81f91df2a80 100644 --- a/drivers/ml/cnxk/cnxk_ml_ops.h +++ b/drivers/ml/cnxk/cnxk_ml_ops.h @@ -5,6 +5,7 @@ #ifndef _CNXK_ML_OPS_H_ #define _CNXK_ML_OPS_H_ +#include #include #include @@ -56,6 +57,14 @@ struct cnxk_ml_queue { uint64_t wait_cycles; }; +struct cnxk_ml_adapter_info { + /**< Set if queue pair is added to ML adapter */ + bool enabled; + + /**< ML in-flight request mempool */ + struct rte_mempool *req_mp; +}; + /* Queue-pair structure */ struct cnxk_ml_qp { /* ID */ @@ -69,6 +78,9 @@ struct cnxk_ml_qp { /* Statistics per queue-pair */ struct rte_ml_dev_stats stats; + + /**< ML adapter related info */ + struct cnxk_ml_adapter_info mla; }; extern struct rte_ml_dev_ops cnxk_ml_ops;