From patchwork Tue Sep 12 16:26:31 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 131349 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id BD9E14257B; Tue, 12 Sep 2023 10:08:11 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E6EA7402DE; Tue, 12 Sep 2023 10:08:06 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 282094027E for ; Tue, 12 Sep 2023 10:08:05 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1694506085; x=1726042085; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=cy5tZuQcX9ZESbAJTmfFDW+3ldw7/RdQOttB0i4KnzY=; b=aY7dRNJI382PVA/T+vSO7RRsStyweoyWxM5SSEApdPTjD57YNHIhHQA8 LcsIz/5v8m5R6oI6ojMFPhqTM8SlAxIk4Qs3HYhPxgZJEYbwpWQCEoguj t/9ubPoKmbd+IJLVw59T7VOm5RJiU1veq7vUkNHdCMgHYAUKOMjrJtb0X 66WRJ6oXDZRl1XHtSOTHhEuRQC98icweORc0PnlKlKSg9LEhzjecIRzEL XKnRTSRbwogPBVzZu19SktmzD6BmGpzO0Oyvqm+wemDOxnXRq3kRq4eGK RSi5uJ0H9Sl7YQCJiooUJ0ws2acA2ZdizFBpChRH9R1q45EfB5Vbgew6s w==; X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="375639558" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="375639558" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Sep 2023 01:08:04 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="858702555" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="858702555" Received: from dpdk-beileix-icelake.sh.intel.com ([10.67.116.248]) by fmsmga002.fm.intel.com with ESMTP; 12 Sep 2023 01:08:02 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing , Qi Zhang Subject: [PATCH v5 01/10] net/cpfl: refine devargs parse and process Date: Tue, 12 Sep 2023 16:26:31 +0000 Message-Id: <20230912162640.1439383-2-beilei.xing@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230912162640.1439383-1-beilei.xing@intel.com> References: <20230908111701.1022724-1-beilei.xing@intel.com> <20230912162640.1439383-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing 1. Keep devargs in adapter. 2. Refine handling the case with no vport be specified in devargs. 3. Separate devargs parse and devargs process Signed-off-by: Qi Zhang Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.c | 154 ++++++++++++++++++--------------- drivers/net/cpfl/cpfl_ethdev.h | 1 + 2 files changed, 84 insertions(+), 71 deletions(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index c4ca9343c3..46b3a52e49 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -1407,12 +1407,12 @@ parse_bool(const char *key, const char *value, void *args) } static int -cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, - struct cpfl_devargs *cpfl_args) +cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) { struct rte_devargs *devargs = pci_dev->device.devargs; + struct cpfl_devargs *cpfl_args = &adapter->devargs; struct rte_kvargs *kvlist; - int i, ret; + int ret; cpfl_args->req_vport_nb = 0; @@ -1445,31 +1445,6 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap if (ret != 0) goto fail; - /* check parsed devargs */ - if (adapter->cur_vport_nb + cpfl_args->req_vport_nb > - adapter->max_vport_nb) { - PMD_INIT_LOG(ERR, "Total vport number can't be > %d", - adapter->max_vport_nb); - ret = -EINVAL; - goto fail; - } - - for (i = 0; i < cpfl_args->req_vport_nb; i++) { - if (cpfl_args->req_vports[i] > adapter->max_vport_nb - 1) { - PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d", - cpfl_args->req_vports[i], adapter->max_vport_nb - 1); - ret = -EINVAL; - goto fail; - } - - if (adapter->cur_vports & RTE_BIT32(cpfl_args->req_vports[i])) { - PMD_INIT_LOG(ERR, "Vport %d has been requested", - cpfl_args->req_vports[i]); - ret = -EINVAL; - goto fail; - } - } - fail: rte_kvargs_free(kvlist); return ret; @@ -1915,15 +1890,79 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter) adapter->vports = NULL; } +static int +cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_devargs *devargs = &adapter->devargs; + int i; + + /* refine vport number, at least 1 vport */ + if (devargs->req_vport_nb == 0) { + devargs->req_vport_nb = 1; + devargs->req_vports[0] = 0; + } + + /* check parsed devargs */ + if (adapter->cur_vport_nb + devargs->req_vport_nb > + adapter->max_vport_nb) { + PMD_INIT_LOG(ERR, "Total vport number can't be > %d", + adapter->max_vport_nb); + return -EINVAL; + } + + for (i = 0; i < devargs->req_vport_nb; i++) { + if (devargs->req_vports[i] > adapter->max_vport_nb - 1) { + PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d", + devargs->req_vports[i], adapter->max_vport_nb - 1); + return -EINVAL; + } + + if (adapter->cur_vports & RTE_BIT32(devargs->req_vports[i])) { + PMD_INIT_LOG(ERR, "Vport %d has been requested", + devargs->req_vports[i]); + return -EINVAL; + } + } + + return 0; +} + +static int +cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) +{ + struct cpfl_vport_param vport_param; + char name[RTE_ETH_NAME_MAX_LEN]; + int ret, i; + + for (i = 0; i < adapter->devargs.req_vport_nb; i++) { + vport_param.adapter = adapter; + vport_param.devarg_id = adapter->devargs.req_vports[i]; + vport_param.idx = cpfl_vport_idx_alloc(adapter); + if (vport_param.idx == CPFL_INVALID_VPORT_IDX) { + PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id); + break; + } + snprintf(name, sizeof(name), "net_%s_vport_%d", + pci_dev->device.name, + adapter->devargs.req_vports[i]); + ret = rte_eth_dev_create(&pci_dev->device, name, + sizeof(struct cpfl_vport), + NULL, NULL, cpfl_dev_vport_init, + &vport_param); + if (ret != 0) + PMD_DRV_LOG(ERR, "Failed to create vport %d", + vport_param.devarg_id); + } + + return 0; +} + static int cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { - struct cpfl_vport_param vport_param; struct cpfl_adapter_ext *adapter; - struct cpfl_devargs devargs; - char name[RTE_ETH_NAME_MAX_LEN]; - int i, retval; + int retval; if (!cpfl_adapter_list_init) { rte_spinlock_init(&cpfl_adapter_lock); @@ -1938,6 +1977,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, return -ENOMEM; } + retval = cpfl_parse_devargs(pci_dev, adapter); + if (retval != 0) { + PMD_INIT_LOG(ERR, "Failed to parse private devargs"); + return retval; + } + retval = cpfl_adapter_ext_init(pci_dev, adapter); if (retval != 0) { PMD_INIT_LOG(ERR, "Failed to init adapter."); @@ -1948,49 +1993,16 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, TAILQ_INSERT_TAIL(&cpfl_adapter_list, adapter, next); rte_spinlock_unlock(&cpfl_adapter_lock); - retval = cpfl_parse_devargs(pci_dev, adapter, &devargs); + retval = cpfl_vport_devargs_process(adapter); if (retval != 0) { - PMD_INIT_LOG(ERR, "Failed to parse private devargs"); + PMD_INIT_LOG(ERR, "Failed to process vport devargs"); goto err; } - if (devargs.req_vport_nb == 0) { - /* If no vport devarg, create vport 0 by default. */ - vport_param.adapter = adapter; - vport_param.devarg_id = 0; - vport_param.idx = cpfl_vport_idx_alloc(adapter); - if (vport_param.idx == CPFL_INVALID_VPORT_IDX) { - PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id); - return 0; - } - snprintf(name, sizeof(name), "cpfl_%s_vport_0", - pci_dev->device.name); - retval = rte_eth_dev_create(&pci_dev->device, name, - sizeof(struct cpfl_vport), - NULL, NULL, cpfl_dev_vport_init, - &vport_param); - if (retval != 0) - PMD_DRV_LOG(ERR, "Failed to create default vport 0"); - } else { - for (i = 0; i < devargs.req_vport_nb; i++) { - vport_param.adapter = adapter; - vport_param.devarg_id = devargs.req_vports[i]; - vport_param.idx = cpfl_vport_idx_alloc(adapter); - if (vport_param.idx == CPFL_INVALID_VPORT_IDX) { - PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id); - break; - } - snprintf(name, sizeof(name), "cpfl_%s_vport_%d", - pci_dev->device.name, - devargs.req_vports[i]); - retval = rte_eth_dev_create(&pci_dev->device, name, - sizeof(struct cpfl_vport), - NULL, NULL, cpfl_dev_vport_init, - &vport_param); - if (retval != 0) - PMD_DRV_LOG(ERR, "Failed to create vport %d", - vport_param.devarg_id); - } + retval = cpfl_vport_create(pci_dev, adapter); + if (retval != 0) { + PMD_INIT_LOG(ERR, "Failed to create vports."); + goto err; } return 0; diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index 2e42354f70..b637bf2e45 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -115,6 +115,7 @@ struct cpfl_adapter_ext { uint16_t cur_vport_nb; uint16_t used_vecs_num; + struct cpfl_devargs devargs; }; TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext); From patchwork Tue Sep 12 16:26:32 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 131350 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6DFA64257B; Tue, 12 Sep 2023 10:08:18 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 10180402EC; Tue, 12 Sep 2023 10:08:09 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id D4A76402DB for ; Tue, 12 Sep 2023 10:08:06 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1694506087; x=1726042087; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=QcB+av6GqqsA0/+lfYG8YXW8db//SwqmhxfB3QnruHQ=; b=j6sN/4Bi77SQgSuH8R9QG0kgWRreQhnTkle6lefzgrkcZS2cnk0dFsvm ptRiPrD7XQRAbCFDCeQY6hRkLNP+/R1JbCbNssoEfTatyy2nMRCpQajBG Vj1z/3iK5fYG5oIr1IfRL/X8oyQAceoEU5G2jEsOjIQILH6u+bqFsiGGU aDCUF2sG7sV4muD9zdMRBI4ExKVPQp6W6A26iZssYQao6I+I+DmjhZLxA xLjBT3BuTWvN+5bGx47ke0FBY9qrA7+y1tSXkoqfIKHZKrbGsTQmBYRXB f+2AvbzVTz+juY2wJTriSrkAyC7B0Wd26UK7Wta+c5IgqMGGv0hjlftTU w==; X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="375639559" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="375639559" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Sep 2023 01:08:06 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="858702559" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="858702559" Received: from dpdk-beileix-icelake.sh.intel.com ([10.67.116.248]) by fmsmga002.fm.intel.com with ESMTP; 12 Sep 2023 01:08:04 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing , Qi Zhang Subject: [PATCH v5 02/10] net/cpfl: introduce interface structure Date: Tue, 12 Sep 2023 16:26:32 +0000 Message-Id: <20230912162640.1439383-3-beilei.xing@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230912162640.1439383-1-beilei.xing@intel.com> References: <20230908111701.1022724-1-beilei.xing@intel.com> <20230912162640.1439383-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing Introduce cplf interface structure to distinguish vport and port representor. Signed-off-by: Qi Zhang Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.c | 3 +++ drivers/net/cpfl/cpfl_ethdev.h | 15 +++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 46b3a52e49..92fe92c00f 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -1803,6 +1803,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) goto err; } + cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT; + cpfl_vport->itf.adapter = adapter; + cpfl_vport->itf.data = dev->data; adapter->vports[param->idx] = cpfl_vport; adapter->cur_vports |= RTE_BIT32(param->devarg_id); adapter->cur_vport_nb++; diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index b637bf2e45..feb1edc4b8 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -86,7 +86,18 @@ struct p2p_queue_chunks_info { uint32_t rx_buf_qtail_spacing; }; +enum cpfl_itf_type { + CPFL_ITF_TYPE_VPORT, +}; + +struct cpfl_itf { + enum cpfl_itf_type type; + struct cpfl_adapter_ext *adapter; + void *data; +}; + struct cpfl_vport { + struct cpfl_itf itf; struct idpf_vport base; struct p2p_queue_chunks_info *p2p_q_chunks_info; @@ -124,5 +135,9 @@ TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext); RTE_DEV_TO_PCI((eth_dev)->device) #define CPFL_ADAPTER_TO_EXT(p) \ container_of((p), struct cpfl_adapter_ext, base) +#define CPFL_DEV_TO_VPORT(dev) \ + ((struct cpfl_vport *)((dev)->data->dev_private)) +#define CPFL_DEV_TO_ITF(dev) \ + ((struct cpfl_itf *)((dev)->data->dev_private)) #endif /* _CPFL_ETHDEV_H_ */ From patchwork Tue Sep 12 16:26:33 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 131351 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6A9144257B; Tue, 12 Sep 2023 10:08:25 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 430D8402F2; Tue, 12 Sep 2023 10:08:11 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id D6F49402E6 for ; Tue, 12 Sep 2023 10:08:08 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1694506089; x=1726042089; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=p+0fVlUA/ASwEbyoRIPBJBIjItOVPlAJA1wjm1OOETg=; b=Ls6BsMrR2IsS4Uv2Ow8KIgMDWZkWRUN9z0yj7RG4LEsfVHXctJD5i0QF uHYhTIoQbP7alEUEjgGW9KyH9ovlyXQ78DhVsQYG5D1j9Woh725Rn0hq2 brUdjCmAb1Gi/jHeBoKIO0HR71HfRgfYfeQL/yN53FHIRdJ9OMl50vl1/ T7cQ+JE8UaQRzi/7YIJHrDRM7mPphMLRfvLIpmPmMml8EmM/Z1mZVtjLF C+eNXmGObEtVnKHwmS7dNsIXP/XmRJIbIyDogrf+wnwqragWGhM6tdBNe 0HpbMfYawS1CWDz2nWBbE9tnZBhZ2/bUSRSz9pOnurYWAoDrOxQgiypDB A==; X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="375639560" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="375639560" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Sep 2023 01:08:08 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="858702564" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="858702564" Received: from dpdk-beileix-icelake.sh.intel.com ([10.67.116.248]) by fmsmga002.fm.intel.com with ESMTP; 12 Sep 2023 01:08:06 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing , Qi Zhang Subject: [PATCH v5 03/10] net/cpfl: refine handle virtual channel message Date: Tue, 12 Sep 2023 16:26:33 +0000 Message-Id: <20230912162640.1439383-4-beilei.xing@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230912162640.1439383-1-beilei.xing@intel.com> References: <20230908111701.1022724-1-beilei.xing@intel.com> <20230912162640.1439383-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing Refine handle virtual channel event message. Signed-off-by: Qi Zhang Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.c | 48 +++++++++++++++++----------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 92fe92c00f..31a5822d2c 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -1450,40 +1450,52 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap return ret; } -static struct idpf_vport * +static struct cpfl_vport * cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id) { - struct idpf_vport *vport = NULL; + struct cpfl_vport *vport = NULL; int i; for (i = 0; i < adapter->cur_vport_nb; i++) { - vport = &adapter->vports[i]->base; - if (vport->vport_id != vport_id) + vport = adapter->vports[i]; + if (vport == NULL) + continue; + if (vport->base.vport_id != vport_id) continue; else return vport; } - return vport; + return NULL; } static void -cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen) +cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen) { struct virtchnl2_event *vc_event = (struct virtchnl2_event *)msg; - struct rte_eth_dev_data *data = vport->dev_data; - struct rte_eth_dev *dev = &rte_eth_devices[data->port_id]; + struct cpfl_vport *vport; + struct rte_eth_dev_data *data; + struct rte_eth_dev *dev; if (msglen < sizeof(struct virtchnl2_event)) { PMD_DRV_LOG(ERR, "Error event"); return; } + vport = cpfl_find_vport(adapter, vc_event->vport_id); + if (!vport) { + PMD_DRV_LOG(ERR, "Can't find vport."); + return; + } + + data = vport->itf.data; + dev = &rte_eth_devices[data->port_id]; + switch (vc_event->event) { case VIRTCHNL2_EVENT_LINK_CHANGE: PMD_DRV_LOG(DEBUG, "VIRTCHNL2_EVENT_LINK_CHANGE"); - vport->link_up = !!(vc_event->link_status); - vport->link_speed = vc_event->link_speed; + vport->base.link_up = !!(vc_event->link_status); + vport->base.link_speed = vc_event->link_speed; cpfl_dev_link_update(dev, 0); break; default: @@ -1498,10 +1510,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter) struct idpf_adapter *base = &adapter->base; struct idpf_dma_mem *dma_mem = NULL; struct idpf_hw *hw = &base->hw; - struct virtchnl2_event *vc_event; struct idpf_ctlq_msg ctlq_msg; enum idpf_mbx_opc mbx_op; - struct idpf_vport *vport; uint16_t pending = 1; uint32_t vc_op; int ret; @@ -1523,18 +1533,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter) switch (mbx_op) { case idpf_mbq_opc_send_msg_to_peer_pf: if (vc_op == VIRTCHNL2_OP_EVENT) { - if (ctlq_msg.data_len < sizeof(struct virtchnl2_event)) { - PMD_DRV_LOG(ERR, "Error event"); - return; - } - vc_event = (struct virtchnl2_event *)base->mbx_resp; - vport = cpfl_find_vport(adapter, vc_event->vport_id); - if (!vport) { - PMD_DRV_LOG(ERR, "Can't find vport."); - return; - } - cpfl_handle_event_msg(vport, base->mbx_resp, - ctlq_msg.data_len); + cpfl_handle_vchnl_event_msg(adapter, adapter->base.mbx_resp, + ctlq_msg.data_len); } else { if (vc_op == base->pend_cmd) notify_cmd(base, base->cmd_retval); From patchwork Tue Sep 12 16:26:34 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 131352 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4AAAE4257B; Tue, 12 Sep 2023 10:08:34 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id CFAFF4068A; Tue, 12 Sep 2023 10:08:12 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id D5333402EF for ; Tue, 12 Sep 2023 10:08:10 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1694506091; x=1726042091; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=S5/WvtXWXZH/xh+ewEwC3knyw1KE33oBNQsKyT3KzV0=; b=Zdc2Si1YtvzW3aQXFOqRSoxvnqz4+UtF31d0HXIN+J/CaMYKqhqUL5WR 7oJtq03+SDJhgF5+jGGnRlbgiQLSQXd+dmAsx0a8/t1zEspoMmUv5GIkp Js2I46VGcjAYhGqMEat5oWa8G9NA3UZT71R3/YMYNOnk0ND72s34aJNbO WwlnFApDdqaeQxYgNOE3qj+kLDFWtNPu2klDDp7VtLkJIW5CP3nKXHgQg HwU2GBJeoQDpXrrFGO0FjA26tum7yToRqIPm26dHLgkSroWC4s7nGdD9H a3XdgTcErAQQ95iMO4F9PENqrBaM+1pxYDFJz/GVBFc8Nx0ynZXxCRfqj Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="375639563" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="375639563" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Sep 2023 01:08:10 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="858702569" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="858702569" Received: from dpdk-beileix-icelake.sh.intel.com ([10.67.116.248]) by fmsmga002.fm.intel.com with ESMTP; 12 Sep 2023 01:08:08 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing Subject: [PATCH v5 04/10] net/cpfl: introduce CP channel API Date: Tue, 12 Sep 2023 16:26:34 +0000 Message-Id: <20230912162640.1439383-5-beilei.xing@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230912162640.1439383-1-beilei.xing@intel.com> References: <20230908111701.1022724-1-beilei.xing@intel.com> <20230912162640.1439383-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing The CPCHNL2 defines the API (v2) used for communication between the CPF driver and its on-chip management software. The CPFL PMD is a specific CPF driver to utilize CPCHNL2 for device configuration and event probing. Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_cpchnl.h | 340 +++++++++++++++++++++++++++++++++ 1 file changed, 340 insertions(+) create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h diff --git a/drivers/net/cpfl/cpfl_cpchnl.h b/drivers/net/cpfl/cpfl_cpchnl.h new file mode 100644 index 0000000000..2eefcbcc10 --- /dev/null +++ b/drivers/net/cpfl/cpfl_cpchnl.h @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#ifndef _CPFL_CPCHNL_H_ +#define _CPFL_CPCHNL_H_ + +/** @brief Command Opcodes + * Values are to be different from virtchnl.h opcodes + */ +enum cpchnl2_ops { + /* vport info */ + CPCHNL2_OP_GET_VPORT_LIST = 0x8025, + CPCHNL2_OP_GET_VPORT_INFO = 0x8026, + + /* DPHMA Event notifications */ + CPCHNL2_OP_EVENT = 0x8050, +}; + +/* Note! This affects the size of structs below */ +#define CPCHNL2_MAX_TC_AMOUNT 8 + +#define CPCHNL2_ETH_LENGTH_OF_ADDRESS 6 + +#define CPCHNL2_FUNC_TYPE_PF 0 +#define CPCHNL2_FUNC_TYPE_SRIOV 1 + +/* vport statuses - must match the DB ones - see enum cp_vport_status*/ +#define CPCHNL2_VPORT_STATUS_CREATED 0 +#define CPCHNL2_VPORT_STATUS_ENABLED 1 +#define CPCHNL2_VPORT_STATUS_DISABLED 2 +#define CPCHNL2_VPORT_STATUS_DESTROYED 3 + +/* Queue Groups Extension */ +/**************************************************/ + +#define MAX_Q_REGIONS 16 +/* TBD - with current structure sizes, in order not to exceed 4KB ICQH buffer + * no more than 11 queue groups are allowed per a single vport.. + * More will be possible only with future msg fragmentation. + */ +#define MAX_Q_VPORT_GROUPS 11 + +#define CPCHNL2_CHECK_STRUCT_LEN(n, X) enum static_assert_enum_##X \ + { static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) } + +struct cpchnl2_queue_chunk { + u32 type; /* 0:QUEUE_TYPE_TX, 1:QUEUE_TYPE_RX */ /* enum nsl_lan_queue_type */ + u32 start_queue_id; + u32 num_queues; + u8 pad[4]; +}; +CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_queue_chunk); + +/* structure to specify several chunks of contiguous queues */ +struct cpchnl2_queue_grp_chunks { + u16 num_chunks; + u8 reserved[6]; + struct cpchnl2_queue_chunk chunks[MAX_Q_REGIONS]; +}; +CPCHNL2_CHECK_STRUCT_LEN(264, cpchnl2_queue_grp_chunks); + +struct cpchnl2_rx_queue_group_info { + /* User can ask to update rss_lut size originally allocated + * by CreateVport command. New size will be returned if allocation succeeded, + * otherwise original rss_size from CreateVport will be returned. + */ + u16 rss_lut_size; + u8 pad[6]; /*Future extension purpose*/ +}; +CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_rx_queue_group_info); + +struct cpchnl2_tx_queue_group_info { + u8 tx_tc; /*TX TC queue group will be connected to*/ + /* Each group can have its own priority, value 0-7, while each group with unique + * priority is strict priority. It can be single set of queue groups which configured with + * same priority, then they are assumed part of WFQ arbitration group and are expected to be + * assigned with weight. + */ + u8 priority; + /* Determines if queue group is expected to be Strict Priority according to its priority */ + u8 is_sp; + u8 pad; + /* Peak Info Rate Weight in case Queue Group is part of WFQ arbitration set. + * The weights of the groups are independent of each other. Possible values: 1-200. + */ + u16 pir_weight; + /* Future extension purpose for CIR only */ + u8 cir_pad[2]; + u8 pad2[8]; /* Future extension purpose*/ +}; +CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_tx_queue_group_info); + +struct cpchnl2_queue_group_id { + /* Queue group ID - depended on it's type: + * Data & p2p - is an index which is relative to Vport. + * Config & Mailbox - is an ID which is relative to func. + * This ID is used in future calls, i.e. delete. + * Requested by host and assigned by Control plane. + */ + u16 queue_group_id; + /* Functional type: see CPCHNL2_QUEUE_GROUP_TYPE definitions */ + u16 queue_group_type; + u8 pad[4]; +}; +CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_queue_group_id); + +struct cpchnl2_queue_group_info { + /* IN */ + struct cpchnl2_queue_group_id qg_id; + + /* IN, Number of queues of different types in the group. */ + u16 num_tx_q; + u16 num_tx_complq; + u16 num_rx_q; + u16 num_rx_bufq; + + struct cpchnl2_tx_queue_group_info tx_q_grp_info; + struct cpchnl2_rx_queue_group_info rx_q_grp_info; + + u8 egress_port; + u8 pad[39]; /*Future extension purpose*/ + struct cpchnl2_queue_grp_chunks chunks; +}; +CPCHNL2_CHECK_STRUCT_LEN(344, cpchnl2_queue_group_info); + +struct cpchnl2_queue_groups { + u16 num_queue_groups; /* Number of queue groups in struct below */ + u8 pad[6]; + /* group information , number is determined by param above */ + struct cpchnl2_queue_group_info groups[MAX_Q_VPORT_GROUPS]; +}; +CPCHNL2_CHECK_STRUCT_LEN(3792, cpchnl2_queue_groups); + +/** + * @brief function types + */ +enum cpchnl2_func_type { + CPCHNL2_FTYPE_LAN_PF = 0, + CPCHNL2_FTYPE_LAN_VF = 1, + CPCHNL2_FTYPE_LAN_MAX +}; + +/** + * @brief containing vport id & type + */ +struct cpchnl2_vport_id { + u32 vport_id; + u16 vport_type; + u8 pad[2]; +}; +CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_vport_id); + +struct cpchnl2_func_id { + /* Function type: 0 - LAN PF, 1 - LAN VF, Rest - "reserved" */ + u8 func_type; + /* Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs + * and 8-12 CPFs are valid + */ + u8 pf_id; + /* Valid only if "type" above is VF, indexing is relative to PF specified above. */ + u16 vf_id; + u8 pad[4]; +}; +CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_func_id); + +/* Note! Do not change the fields and especially their order as should eventually + * be aligned to 32bit. Must match the virtchnl structure definition. + * If should change, change also the relevant FAS and virtchnl code, under permission. + */ +struct cpchnl2_vport_info { + u16 vport_index; + /* VSI index, global indexing aligned to HW. + * Index of HW VSI is allocated by HMA during "CreateVport" virtChnl command. + * Relevant for VSI backed Vports only, not relevant for vport_type = "Qdev". + */ + u16 vsi_id; + u8 vport_status; /* enum cpchnl2_vport_status */ + /* 0 - LAN PF, 1 - LAN VF. Rest - reserved. Can be later expanded to other PEs */ + u8 func_type; + /* Valid only if "type" above is VF, indexing is relative to PF specified above. */ + u16 vf_id; + /* Always relevant, indexing is according to LAN PE 0-15, + * while only 0-4 APFs and 8-12 CPFs are valid. + */ + u8 pf_id; + u8 rss_enabled; /* if RSS is enabled for Vport. Driven by Node Policy. Currently '0' */ + /* MAC Address assigned for this vport, all 0s for "Qdev" Vport type */ + u8 mac_addr[CPCHNL2_ETH_LENGTH_OF_ADDRESS]; + u16 vmrl_id; + /* Indicates if IMC created SEM MAC rule for this Vport. + * Currently this is done by IMC for all Vport of type "Default" only, + * but can be different in the future. + */ + u8 sem_mac_rule_exist; + /* Bitmask to inform which TC is valid. + * 0x1 << TCnum. 1b: valid else 0. + * Driven by Node Policy on system level, then Sysetm level TCs are + * reported to IDPF and it can enable Vport level TCs on TX according + * to Syetm enabled ones. + * If TC aware mode - bit set for valid TC. + * otherwise =1 (only bit 0 is set. represents the VSI + */ + u8 tx_tc_bitmask; + /* For each valid TC, TEID of VPORT node over TC in TX LAN WS. + * If TC aware mode - up to 8 TC TEIDs. Otherwise vport_tc_teid[0] shall hold VSI TEID + */ + u32 vport_tc_teid[CPCHNL2_MAX_TC_AMOUNT]; + /* For each valid TC, bandwidth in mbps. + * Default BW per Vport is from Node policy + * If TC aware mode -per TC. Otherwise, bandwidth[0] holds VSI bandwidth + */ + u32 bandwidth[CPCHNL2_MAX_TC_AMOUNT]; + /* From Node Policy. */ + u16 max_mtu; + u16 default_rx_qid; /* Default LAN RX Queue ID */ + u16 vport_flags; /* see: VPORT_FLAGS */ + u8 egress_port; + u8 pad_reserved[5]; +}; +CPCHNL2_CHECK_STRUCT_LEN(96, cpchnl2_vport_info); + +/* + * CPCHNL2_OP_GET_VPORT_LIST + */ + +/** + * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode request + * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved (see enum cpchnl2_func_type) + * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12 + * CPFs are valid + * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above + */ +struct cpchnl2_get_vport_list_request { + u8 func_type; + u8 pf_id; + u16 vf_id; + u8 pad[4]; +}; +CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_get_vport_list_request); + +/** + * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode response + * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved. Can be later extended to + * other PE types + * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12 + * CPFs are valid + * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above + * @param nof_vports Number of vports created on the function + * @param vports array of the IDs and types. vport ID is elative to its func (PF/VF). same as in + * Create Vport + * vport_type: Aligned to VirtChnl types: Default, SIOV, etc. + */ +struct cpchnl2_get_vport_list_response { + u8 func_type; + u8 pf_id; + u16 vf_id; + u16 nof_vports; + u8 pad[2]; + struct cpchnl2_vport_id vports[]; +}; +CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_get_vport_list_response); + +/* + * CPCHNL2_OP_GET_VPORT_INFO + */ +/** + * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode request + * @param vport a structure containing vport_id (relative to function) and type + * @param func a structure containing function type, pf_id, vf_id + */ +struct cpchnl2_get_vport_info_request { + struct cpchnl2_vport_id vport; + struct cpchnl2_func_id func; +}; +CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_get_vport_info_request); + +/** + * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode response + * @param vport a structure containing vport_id (relative to function) and type to get info for + * @param info a structure all the information for a given vport + * @param queue_groups a structure containing all the queue groups of the given vport + */ +struct cpchnl2_get_vport_info_response { + struct cpchnl2_vport_id vport; + struct cpchnl2_vport_info info; + struct cpchnl2_queue_groups queue_groups; +}; +CPCHNL2_CHECK_STRUCT_LEN(3896, cpchnl2_get_vport_info_response); + + /* Cpchnl events + * Sends event message to inform the peer of notification that may affect it. + * No direct response is expected from the peer, though it may generate other + * messages in response to this one. + */ +enum cpchnl2_event { + CPCHNL2_EVENT_UNKNOWN = 0, + CPCHNL2_EVENT_VPORT_CREATED, + CPCHNL2_EVENT_VPORT_DESTROYED, + CPCHNL2_EVENT_VPORT_ENABLED, + CPCHNL2_EVENT_VPORT_DISABLED, + CPCHNL2_PKG_EVENT, + CPCHNL2_EVENT_ADD_QUEUE_GROUPS, + CPCHNL2_EVENT_DEL_QUEUE_GROUPS, + CPCHNL2_EVENT_ADD_QUEUES, + CPCHNL2_EVENT_DEL_QUEUES +}; + +/* + * This is for CPCHNL2_EVENT_VPORT_CREATED + */ +struct cpchnl2_event_vport_created { + struct cpchnl2_vport_id vport; /* Vport identifier to point to specific Vport */ + struct cpchnl2_vport_info info; /* Vport configuration info */ + struct cpchnl2_queue_groups queue_groups; /* Vport assign queue groups configuration info */ +}; +CPCHNL2_CHECK_STRUCT_LEN(3896, cpchnl2_event_vport_created); + +/* + * This is for CPCHNL2_EVENT_VPORT_DESTROYED + */ +struct cpchnl2_event_vport_destroyed { + /* Vport identifier to point to specific Vport */ + struct cpchnl2_vport_id vport; + struct cpchnl2_func_id func; +}; +CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_event_vport_destroyed); + +struct cpchnl2_event_info { + struct { + s32 type; /* See enum cpchnl2_event */ + uint8_t reserved[4]; /* Reserved */ + } header; + union { + struct cpchnl2_event_vport_created vport_created; + struct cpchnl2_event_vport_destroyed vport_destroyed; + } data; +}; + +#endif /* _CPFL_CPCHNL_H_ */ From patchwork Tue Sep 12 16:26:35 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 131353 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 200C84257B; Tue, 12 Sep 2023 10:08:41 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 240B0402F1; Tue, 12 Sep 2023 10:08:16 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id AA7A84067E for ; Tue, 12 Sep 2023 10:08:12 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1694506092; x=1726042092; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=fku0Odt8rIXRqThBWMru9DZoE4mMDac2rDFyHqwMyJQ=; b=FoG7T5rxyMV7KIUg+jnln4DdAsBzphDOXeQJ4/K82JRmsXmOVKYAJjNy q26+9brUE3DFUUtpHpFPoKRc4MsAjUsstMElnvuhjUPLoodEU/51qGFb2 9o9ls/ZUeDfmoVDjbP4Q56J6M1NcjPQH9+mSqCHfcft4vXx36MDR9v+/U V26kOGLKuPZTm5sntDnSbBIkG5Vov2c1wX5FRgHr9XsCJDm/uwhlTg4Z5 9vx5AMXgY2Jc/fe9//wFp6ZMSuQIAOQx7vqUWjUexNTXKYsrkxmVPGiUf Aut9MBBcmgksLH1P1ywIQmD83BQ2DDY0+rB3sS7dsQawQe0x8aIqpetPX A==; X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="375639565" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="375639565" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Sep 2023 01:08:12 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="858702573" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="858702573" Received: from dpdk-beileix-icelake.sh.intel.com ([10.67.116.248]) by fmsmga002.fm.intel.com with ESMTP; 12 Sep 2023 01:08:10 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing , Qi Zhang Subject: [PATCH v5 05/10] net/cpfl: enable vport mapping Date: Tue, 12 Sep 2023 16:26:35 +0000 Message-Id: <20230912162640.1439383-6-beilei.xing@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230912162640.1439383-1-beilei.xing@intel.com> References: <20230908111701.1022724-1-beilei.xing@intel.com> <20230912162640.1439383-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing 1. Handle cpchnl event for vport create/destroy 2. Use hash table to store vport_id to vport_info mapping 3. Use spinlock for thread safe. Signed-off-by: Qi Zhang Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.c | 157 +++++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_ethdev.h | 21 ++++- drivers/net/cpfl/meson.build | 2 +- 3 files changed, 177 insertions(+), 3 deletions(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 31a5822d2c..ad21f901bb 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "cpfl_ethdev.h" #include "cpfl_rxtx.h" @@ -1504,6 +1505,108 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint } } +static int +cpfl_vport_info_create(struct cpfl_adapter_ext *adapter, + struct cpfl_vport_id *vport_identity, + struct cpchnl2_vport_info *vport_info) +{ + struct cpfl_vport_info *info = NULL; + int ret; + + rte_spinlock_lock(&adapter->vport_map_lock); + ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info); + if (ret >= 0) { + PMD_DRV_LOG(WARNING, "vport already exist, overwrite info anyway"); + /* overwrite info */ + if (info) + info->vport_info = *vport_info; + goto fini; + } + + info = rte_zmalloc(NULL, sizeof(*info), 0); + if (info == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory for vport map info"); + ret = -ENOMEM; + goto err; + } + + info->vport_info = *vport_info; + + ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, info); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add vport map into hash"); + rte_free(info); + goto err; + } + +fini: + rte_spinlock_unlock(&adapter->vport_map_lock); + return 0; +err: + rte_spinlock_unlock(&adapter->vport_map_lock); + return ret; +} + +static int +cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id *vport_identity) +{ + struct cpfl_vport_info *info; + int ret; + + rte_spinlock_lock(&adapter->vport_map_lock); + ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info); + if (ret < 0) { + PMD_DRV_LOG(ERR, "vport id not exist"); + goto err; + } + + rte_hash_del_key(adapter->vport_map_hash, vport_identity); + rte_spinlock_unlock(&adapter->vport_map_lock); + rte_free(info); + + return 0; + +err: + rte_spinlock_unlock(&adapter->vport_map_lock); + return ret; +} + +static void +cpfl_handle_cpchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen) +{ + struct cpchnl2_event_info *cpchnl2_event = (struct cpchnl2_event_info *)msg; + struct cpchnl2_vport_info *info; + struct cpfl_vport_id vport_identity = { 0 }; + + if (msglen < sizeof(struct cpchnl2_event_info)) { + PMD_DRV_LOG(ERR, "Error event"); + return; + } + + switch (cpchnl2_event->header.type) { + case CPCHNL2_EVENT_VPORT_CREATED: + vport_identity.vport_id = cpchnl2_event->data.vport_created.vport.vport_id; + info = &cpchnl2_event->data.vport_created.info; + vport_identity.func_type = info->func_type; + vport_identity.pf_id = info->pf_id; + vport_identity.vf_id = info->vf_id; + if (cpfl_vport_info_create(adapter, &vport_identity, info)) + PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_CREATED"); + break; + case CPCHNL2_EVENT_VPORT_DESTROYED: + vport_identity.vport_id = cpchnl2_event->data.vport_destroyed.vport.vport_id; + vport_identity.func_type = cpchnl2_event->data.vport_destroyed.func.func_type; + vport_identity.pf_id = cpchnl2_event->data.vport_destroyed.func.pf_id; + vport_identity.vf_id = cpchnl2_event->data.vport_destroyed.func.vf_id; + if (cpfl_vport_info_destroy(adapter, &vport_identity)) + PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_DESTROY"); + break; + default: + PMD_DRV_LOG(ERR, " unknown event received %u", cpchnl2_event->header.type); + break; + } +} + static void cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter) { @@ -1535,6 +1638,9 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter) if (vc_op == VIRTCHNL2_OP_EVENT) { cpfl_handle_vchnl_event_msg(adapter, adapter->base.mbx_resp, ctlq_msg.data_len); + } else if (vc_op == CPCHNL2_OP_EVENT) { + cpfl_handle_cpchnl_event_msg(adapter, adapter->base.mbx_resp, + ctlq_msg.data_len); } else { if (vc_op == base->pend_cmd) notify_cmd(base, base->cmd_retval); @@ -1610,6 +1716,48 @@ static struct virtchnl2_get_capabilities req_caps = { .other_caps = VIRTCHNL2_CAP_WB_ON_ITR }; +static int +cpfl_vport_map_init(struct cpfl_adapter_ext *adapter) +{ + char hname[32]; + + snprintf(hname, 32, "%s-vport", adapter->name); + + rte_spinlock_init(&adapter->vport_map_lock); + +#define CPFL_VPORT_MAP_HASH_ENTRY_NUM 2048 + + struct rte_hash_parameters params = { + .name = adapter->name, + .entries = CPFL_VPORT_MAP_HASH_ENTRY_NUM, + .key_len = sizeof(struct cpfl_vport_id), + .hash_func = rte_hash_crc, + .socket_id = SOCKET_ID_ANY, + }; + + adapter->vport_map_hash = rte_hash_create(¶ms); + + if (adapter->vport_map_hash == NULL) { + PMD_INIT_LOG(ERR, "Failed to create vport map hash"); + return -EINVAL; + } + + return 0; +} + +static void +cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter) +{ + const void *key = NULL; + struct cpfl_vport_map_info *info; + uint32_t iter = 0; + + while (rte_hash_iterate(adapter->vport_map_hash, &key, (void **)&info, &iter) >= 0) + rte_free(info); + + rte_hash_free(adapter->vport_map_hash); +} + static int cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) { @@ -1634,6 +1782,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a goto err_adapter_init; } + ret = cpfl_vport_map_init(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init vport map"); + goto err_vport_map_init; + } + rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter); adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ? @@ -1658,6 +1812,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a err_vports_alloc: rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter); + cpfl_vport_map_uninit(adapter); +err_vport_map_init: idpf_adapter_deinit(base); err_adapter_init: return ret; @@ -1887,6 +2043,7 @@ static void cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter) { rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter); + cpfl_vport_map_uninit(adapter); idpf_adapter_deinit(&adapter->base); rte_free(adapter->vports); diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index feb1edc4b8..de86c49016 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -10,16 +10,18 @@ #include #include #include +#include #include #include -#include "cpfl_logs.h" - #include #include #include #include +#include "cpfl_logs.h" +#include "cpfl_cpchnl.h" + /* Currently, backend supports up to 8 vports */ #define CPFL_MAX_VPORT_NUM 8 @@ -86,6 +88,18 @@ struct p2p_queue_chunks_info { uint32_t rx_buf_qtail_spacing; }; +struct cpfl_vport_id { + uint32_t vport_id; + uint8_t func_type; + uint8_t pf_id; + uint16_t vf_id; +}; + +struct cpfl_vport_info { + struct cpchnl2_vport_info vport_info; + bool enabled; +}; + enum cpfl_itf_type { CPFL_ITF_TYPE_VPORT, }; @@ -127,6 +141,9 @@ struct cpfl_adapter_ext { uint16_t used_vecs_num; struct cpfl_devargs devargs; + + rte_spinlock_t vport_map_lock; + struct rte_hash *vport_map_hash; }; TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext); diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index 8d62ebfd77..28167bb81d 100644 --- a/drivers/net/cpfl/meson.build +++ b/drivers/net/cpfl/meson.build @@ -11,7 +11,7 @@ if dpdk_conf.get('RTE_IOVA_IN_MBUF') == 0 subdir_done() endif -deps += ['common_idpf'] +deps += ['hash', 'common_idpf'] sources = files( 'cpfl_ethdev.c', From patchwork Tue Sep 12 16:26:36 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 131354 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 280294257B; Tue, 12 Sep 2023 10:08:48 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5C0E440647; Tue, 12 Sep 2023 10:08:18 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id E5FF940693 for ; Tue, 12 Sep 2023 10:08:14 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1694506095; x=1726042095; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=9JFeyaIM8QJCwDjJb+6HGRyI/IrnpSUF5+EwuRDHvLQ=; b=PZ2hefxQfjFXTIAVxLYJdtdEAYFDmQaL2WAZbIAQEiKdM9hK4//ILmgA ijJhE0YdDM9AjcSzCwhnTcekvmq4ZQXbvNRRot/ZQp4hkEHHtymhzBn4F Lsy6GCkmXGOXZq+fyANybriPWdeQn80Y3o0iMfh8au5kCbdg+bZLKHlTL WGQtTMUPQeV+//j0q6KMe4sgGFZXxMZc1mQmqsICXwnnUTmOFYkpn0Oke ENdY9tj7VM29rl5pndxxgDK8N0u+pfELIwe1wUQn1K2p6WS4jzol04pbz naRvx0p9sjlmPy7YjhGTqbFkJaFdpQKoF6ALSG2U9Tek13LRfMQksEOUp A==; X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="375639571" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="375639571" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Sep 2023 01:08:14 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="858702577" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="858702577" Received: from dpdk-beileix-icelake.sh.intel.com ([10.67.116.248]) by fmsmga002.fm.intel.com with ESMTP; 12 Sep 2023 01:08:12 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing Subject: [PATCH v5 06/10] net/cpfl: support vport list/info get Date: Tue, 12 Sep 2023 16:26:36 +0000 Message-Id: <20230912162640.1439383-7-beilei.xing@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230912162640.1439383-1-beilei.xing@intel.com> References: <20230908111701.1022724-1-beilei.xing@intel.com> <20230912162640.1439383-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing Support cp channel ops CPCHNL2_OP_CPF_GET_VPORT_LIST and CPCHNL2_OP_CPF_GET_VPORT_INFO. Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.h | 8 ++++ drivers/net/cpfl/cpfl_vchnl.c | 72 ++++++++++++++++++++++++++++++++++ drivers/net/cpfl/meson.build | 1 + 3 files changed, 81 insertions(+) create mode 100644 drivers/net/cpfl/cpfl_vchnl.c diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index de86c49016..4975c05a55 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -148,6 +148,14 @@ struct cpfl_adapter_ext { TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext); +int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter, + struct cpfl_vport_id *vi, + struct cpchnl2_get_vport_list_response *response); +int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter, + struct cpchnl2_vport_id *vport_id, + struct cpfl_vport_id *vi, + struct cpchnl2_get_vport_info_response *response); + #define CPFL_DEV_TO_PCI(eth_dev) \ RTE_DEV_TO_PCI((eth_dev)->device) #define CPFL_ADAPTER_TO_EXT(p) \ diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c new file mode 100644 index 0000000000..a21a4a451f --- /dev/null +++ b/drivers/net/cpfl/cpfl_vchnl.c @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#include "cpfl_ethdev.h" +#include + +int +cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter, + struct cpfl_vport_id *vi, + struct cpchnl2_get_vport_list_response *response) +{ + struct cpchnl2_get_vport_list_request request; + struct idpf_cmd_info args; + int err; + + memset(&request, 0, sizeof(request)); + request.func_type = vi->func_type; + request.pf_id = vi->pf_id; + request.vf_id = vi->vf_id; + + memset(&args, 0, sizeof(args)); + args.ops = CPCHNL2_OP_GET_VPORT_LIST; + args.in_args = (uint8_t *)&request; + args.in_args_size = sizeof(struct cpchnl2_get_vport_list_request); + args.out_buffer = adapter->base.mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(&adapter->base, &args); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_LIST"); + return err; + } + + rte_memcpy(response, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE); + + return 0; +} + +int +cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter, + struct cpchnl2_vport_id *vport_id, + struct cpfl_vport_id *vi, + struct cpchnl2_get_vport_info_response *response) +{ + struct cpchnl2_get_vport_info_request request; + struct idpf_cmd_info args; + int err; + + request.vport.vport_id = vport_id->vport_id; + request.vport.vport_type = vport_id->vport_type; + request.func.func_type = vi->func_type; + request.func.pf_id = vi->pf_id; + request.func.vf_id = vi->vf_id; + + memset(&args, 0, sizeof(args)); + args.ops = CPCHNL2_OP_GET_VPORT_INFO; + args.in_args = (uint8_t *)&request; + args.in_args_size = sizeof(struct cpchnl2_get_vport_info_request); + args.out_buffer = adapter->base.mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(&adapter->base, &args); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_INFO"); + return err; + } + + rte_memcpy(response, args.out_buffer, sizeof(*response)); + + return 0; +} diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index 28167bb81d..2f0f5d8434 100644 --- a/drivers/net/cpfl/meson.build +++ b/drivers/net/cpfl/meson.build @@ -16,6 +16,7 @@ deps += ['hash', 'common_idpf'] sources = files( 'cpfl_ethdev.c', 'cpfl_rxtx.c', + 'cpfl_vchnl.c', ) if arch_subdir == 'x86' From patchwork Tue Sep 12 16:26:37 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 131355 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A1D444257B; Tue, 12 Sep 2023 10:08:56 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 047D6406B8; Tue, 12 Sep 2023 10:08:22 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 6B5E840698 for ; Tue, 12 Sep 2023 10:08:17 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1694506097; x=1726042097; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=X//ZrO/Nll0/JMjFpxi32wiAdHBbsHQo3FKb8TqXLHo=; b=godM4j8bOV42IazTXsXRoZ595b0eQdvH928ypwd/XHDZFn8WnLWfn42A aQXLRuFGwo/gi1Je1HelQtJMUMt0bwArPvznQsQnJpV7XMvvHggptWDVA 3uwVA/wPSX3287nu+Q2uSBlCOmI0xacLQvyp7ASH/xnv40sCd428loris L19Um6mVeT/MGIr6GzkA7UzKE713v5k4ldNWHEtTMTcs5QYeE6B3KZKcA VdjkDgZM2JvfkOGokG6ahj1dZ5ZM0ReTbR9vbRUsm0UBD6ttUfX9GkjB6 27ohkYqA4V39IV/F+P40zTjEY7AEwSi4PCYjxLbl0mYtZhXG7uotAccG7 w==; X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="375639575" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="375639575" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Sep 2023 01:08:17 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="858702582" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="858702582" Received: from dpdk-beileix-icelake.sh.intel.com ([10.67.116.248]) by fmsmga002.fm.intel.com with ESMTP; 12 Sep 2023 01:08:14 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing , Qi Zhang Subject: [PATCH v5 07/10] net/cpfl: parse representor devargs Date: Tue, 12 Sep 2023 16:26:37 +0000 Message-Id: <20230912162640.1439383-8-beilei.xing@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230912162640.1439383-1-beilei.xing@intel.com> References: <20230908111701.1022724-1-beilei.xing@intel.com> <20230912162640.1439383-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing Format: [[c]pf]vf controller_id: 0 : host (default) 1: acc pf_id: 0 : apf (default) 1 : cpf Example: representor=c0pf0vf[0-3] -- host > apf > vf 0,1,2,3 same as pf0vf[0-3] and vf[0-3] if omit default value. representor=c0pf0 -- host > apf same as pf0 if omit default value. representor=c1pf0 -- accelerator core > apf multiple representor devargs are supported. e.g.: create 4 representors for 4 vfs on host APF and one representor for APF on accelerator core. -- representor=vf[0-3],representor=c1pf0 Signed-off-by: Qi Zhang Signed-off-by: Beilei Xing --- doc/guides/nics/cpfl.rst | 36 +++++ doc/guides/rel_notes/release_23_11.rst | 3 + drivers/net/cpfl/cpfl_ethdev.c | 179 +++++++++++++++++++++++++ drivers/net/cpfl/cpfl_ethdev.h | 8 ++ 4 files changed, 226 insertions(+) diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst index 39a2b603f3..83a18c3f2e 100644 --- a/doc/guides/nics/cpfl.rst +++ b/doc/guides/nics/cpfl.rst @@ -92,6 +92,42 @@ Runtime Configuration Then the PMD will configure Tx queue with single queue mode. Otherwise, split queue mode is chosen by default. +- ``representor`` (default ``not enabled``) + + The cpfl PMD supports the creation of APF/CPF/VF port representors. + Each port representor corresponds to a single function of that device. + Using the ``devargs`` option ``representor`` the user can specify + which functions to create port representors. + + Format is:: + + [[c]pf]vf + + Controller_id 0 is host (default), while 1 is accelerator core. + Pf_id 0 is APF (default), while 1 is CPF. + Default value can be omitted. + + Create 4 representors for 4 vfs on host APF:: + + -a BDF,representor=c0pf0vf[0-3] + + Or:: + + -a BDF,representor=pf0vf[0-3] + + Or:: + + -a BDF,representor=vf[0-3] + + Create a representor for CPF on accelerator core:: + + -a BDF,representor=c1pf1 + + Multiple representor devargs are supported. Create 4 representors for 4 + vfs on host APF and one representor for CPF on accelerator core:: + + -a BDF,representor=vf[0-3],representor=c1pf1 + Driver compilation and testing ------------------------------ diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst index 333e1d95a2..3d9be208d0 100644 --- a/doc/guides/rel_notes/release_23_11.rst +++ b/doc/guides/rel_notes/release_23_11.rst @@ -78,6 +78,9 @@ New Features * build: Optional libraries can now be selected with the new ``enable_libs`` build option similarly to the existing ``enable_drivers`` build option. +* **Updated Intel cpfl driver.** + + * Added support for port representor. Removed Items ------------- diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index ad21f901bb..eb57e355d2 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -13,8 +13,10 @@ #include #include "cpfl_ethdev.h" +#include #include "cpfl_rxtx.h" +#define CPFL_REPRESENTOR "representor" #define CPFL_TX_SINGLE_Q "tx_single" #define CPFL_RX_SINGLE_Q "rx_single" #define CPFL_VPORT "vport" @@ -25,6 +27,7 @@ struct cpfl_adapter_list cpfl_adapter_list; bool cpfl_adapter_list_init; static const char * const cpfl_valid_args[] = { + CPFL_REPRESENTOR, CPFL_TX_SINGLE_Q, CPFL_RX_SINGLE_Q, CPFL_VPORT, @@ -1407,6 +1410,128 @@ parse_bool(const char *key, const char *value, void *args) return 0; } +static int +enlist(uint16_t *list, uint16_t *len_list, const uint16_t max_list, uint16_t val) +{ + uint16_t i; + + for (i = 0; i < *len_list; i++) { + if (list[i] == val) + return 0; + } + if (*len_list >= max_list) + return -1; + list[(*len_list)++] = val; + return 0; +} + +static const char * +process_range(const char *str, uint16_t *list, uint16_t *len_list, + const uint16_t max_list) +{ + uint16_t lo, hi, val; + int result, n = 0; + const char *pos = str; + + result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n); + if (result == 1) { + if (enlist(list, len_list, max_list, lo) != 0) + return NULL; + } else if (result == 2) { + if (lo > hi) + return NULL; + for (val = lo; val <= hi; val++) { + if (enlist(list, len_list, max_list, val) != 0) + return NULL; + } + } else { + return NULL; + } + return pos + n; +} + +static const char * +process_list(const char *str, uint16_t *list, uint16_t *len_list, const uint16_t max_list) +{ + const char *pos = str; + + if (*pos == '[') + pos++; + while (1) { + pos = process_range(pos, list, len_list, max_list); + if (pos == NULL) + return NULL; + if (*pos != ',') /* end of list */ + break; + pos++; + } + if (*str == '[' && *pos != ']') + return NULL; + if (*pos == ']') + pos++; + return pos; +} + +static int +parse_repr(const char *key __rte_unused, const char *value, void *args) +{ + struct cpfl_devargs *devargs = args; + struct rte_eth_devargs *eth_da; + const char *str = value; + + if (devargs->repr_args_num == CPFL_REPR_ARG_NUM_MAX) + return -EINVAL; + + eth_da = &devargs->repr_args[devargs->repr_args_num]; + + if (str[0] == 'c') { + str += 1; + str = process_list(str, eth_da->mh_controllers, + ð_da->nb_mh_controllers, + RTE_DIM(eth_da->mh_controllers)); + if (str == NULL) + goto done; + } + if (str[0] == 'p' && str[1] == 'f') { + eth_da->type = RTE_ETH_REPRESENTOR_PF; + str += 2; + str = process_list(str, eth_da->ports, + ð_da->nb_ports, RTE_DIM(eth_da->ports)); + if (str == NULL || str[0] == '\0') + goto done; + } else if (eth_da->nb_mh_controllers > 0) { + /* 'c' must followed by 'pf'. */ + str = NULL; + goto done; + } + if (str[0] == 'v' && str[1] == 'f') { + eth_da->type = RTE_ETH_REPRESENTOR_VF; + str += 2; + } else if (str[0] == 's' && str[1] == 'f') { + eth_da->type = RTE_ETH_REPRESENTOR_SF; + str += 2; + } else { + /* 'pf' must followed by 'vf' or 'sf'. */ + if (eth_da->type == RTE_ETH_REPRESENTOR_PF) { + str = NULL; + goto done; + } + eth_da->type = RTE_ETH_REPRESENTOR_VF; + } + str = process_list(str, eth_da->representor_ports, + ð_da->nb_representor_ports, + RTE_DIM(eth_da->representor_ports)); +done: + if (str == NULL) { + RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str); + return -1; + } + + devargs->repr_args_num++; + + return 0; +} + static int cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) { @@ -1431,6 +1556,12 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap return -EINVAL; } + cpfl_args->repr_args_num = 0; + ret = rte_kvargs_process(kvlist, CPFL_REPRESENTOR, &parse_repr, cpfl_args); + + if (ret != 0) + goto fail; + ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport, cpfl_args); if (ret != 0) @@ -2087,6 +2218,48 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter) return 0; } +static int +cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_devargs *devargs = &adapter->devargs; + int i, j; + + /* check and refine repr args */ + for (i = 0; i < devargs->repr_args_num; i++) { + struct rte_eth_devargs *eth_da = &devargs->repr_args[i]; + + /* set default host_id to xeon host */ + if (eth_da->nb_mh_controllers == 0) { + eth_da->nb_mh_controllers = 1; + eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST; + } else { + for (j = 0; j < eth_da->nb_mh_controllers; j++) { + if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) { + PMD_INIT_LOG(ERR, "Invalid Host ID %d", + eth_da->mh_controllers[j]); + return -EINVAL; + } + } + } + + /* set default pf to APF */ + if (eth_da->nb_ports == 0) { + eth_da->nb_ports = 1; + eth_da->ports[0] = CPFL_PF_TYPE_APF; + } else { + for (j = 0; j < eth_da->nb_ports; j++) { + if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) { + PMD_INIT_LOG(ERR, "Invalid Host ID %d", + eth_da->ports[j]); + return -EINVAL; + } + } + } + } + + return 0; +} + static int cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) { @@ -2165,6 +2338,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, goto err; } + retval = cpfl_repr_devargs_process(adapter); + if (retval != 0) { + PMD_INIT_LOG(ERR, "Failed to process repr devargs"); + goto err; + } + return 0; err: diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index 4975c05a55..b03666f5ea 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -60,16 +60,24 @@ #define IDPF_DEV_ID_CPF 0x1453 #define VIRTCHNL2_QUEUE_GROUP_P2P 0x100 +#define CPFL_HOST_ID_HOST 0 +#define CPFL_HOST_ID_ACC 1 +#define CPFL_PF_TYPE_APF 0 +#define CPFL_PF_TYPE_CPF 1 + struct cpfl_vport_param { struct cpfl_adapter_ext *adapter; uint16_t devarg_id; /* arg id from user */ uint16_t idx; /* index in adapter->vports[]*/ }; +#define CPFL_REPR_ARG_NUM_MAX 4 /* Struct used when parse driver specific devargs */ struct cpfl_devargs { uint16_t req_vports[CPFL_MAX_VPORT_NUM]; uint16_t req_vport_nb; + uint8_t repr_args_num; + struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX]; }; struct p2p_queue_chunks_info { From patchwork Tue Sep 12 16:26:38 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 131356 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 838E54257B; Tue, 12 Sep 2023 10:09:02 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2DFD240A6C; Tue, 12 Sep 2023 10:08:23 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id C5512406B8 for ; Tue, 12 Sep 2023 10:08:19 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1694506099; x=1726042099; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=f4ZOyKdAdAjFQkg5HnCqqMbHp7bys2IqFDgHZZqZX84=; b=JM+xPxHtWrRRyVSwgwYP4yXIX2J6rJ4LzvTw1BpRyg+ITFjBtdZtJIsL 1hjNJ5UQybb+Hr2qNJSVlauLYekwoaZEY4Qs0K0LtGlqyOFrYqyIeGY94 LOB3pKJfjb2ILTGrdzqO9TnZb6DJK3GDDJLCYFAiLJwZ4ttLzkBBOLTEu iAjjwdmY7IDckY6izlHKxk1HA+irnFmBU8sML1vHYevpO7OaucJGQXzyh FeY2+DpU6EO+YOApqOoZihg7tXiFSVacQEaJWyWKy7gwXeba7s6RaamT/ SUcTR3gd53TlI+t7nmU/JaCKdSseMEeD8IDxNUFipAahq16I+VHNvEOsO Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="375639596" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="375639596" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Sep 2023 01:08:19 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="858702589" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="858702589" Received: from dpdk-beileix-icelake.sh.intel.com ([10.67.116.248]) by fmsmga002.fm.intel.com with ESMTP; 12 Sep 2023 01:08:17 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing , Qi Zhang Subject: [PATCH v5 08/10] net/cpfl: support probe again Date: Tue, 12 Sep 2023 16:26:38 +0000 Message-Id: <20230912162640.1439383-9-beilei.xing@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230912162640.1439383-1-beilei.xing@intel.com> References: <20230908111701.1022724-1-beilei.xing@intel.com> <20230912162640.1439383-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing Only representor will be parsed for probe again. Signed-off-by: Qi Zhang Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.c | 69 +++++++++++++++++++++++++++------- 1 file changed, 56 insertions(+), 13 deletions(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index eb57e355d2..47c4c5c796 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -26,7 +26,7 @@ rte_spinlock_t cpfl_adapter_lock; struct cpfl_adapter_list cpfl_adapter_list; bool cpfl_adapter_list_init; -static const char * const cpfl_valid_args[] = { +static const char * const cpfl_valid_args_first[] = { CPFL_REPRESENTOR, CPFL_TX_SINGLE_Q, CPFL_RX_SINGLE_Q, @@ -34,6 +34,11 @@ static const char * const cpfl_valid_args[] = { NULL }; +static const char * const cpfl_valid_args_again[] = { + CPFL_REPRESENTOR, + NULL +}; + uint32_t cpfl_supported_speeds[] = { RTE_ETH_SPEED_NUM_NONE, RTE_ETH_SPEED_NUM_10M, @@ -1533,7 +1538,7 @@ parse_repr(const char *key __rte_unused, const char *value, void *args) } static int -cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) +cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first) { struct rte_devargs *devargs = pci_dev->device.devargs; struct cpfl_devargs *cpfl_args = &adapter->devargs; @@ -1545,7 +1550,8 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap if (devargs == NULL) return 0; - kvlist = rte_kvargs_parse(devargs->args, cpfl_valid_args); + kvlist = rte_kvargs_parse(devargs->args, + first ? cpfl_valid_args_first : cpfl_valid_args_again); if (kvlist == NULL) { PMD_INIT_LOG(ERR, "invalid kvargs key"); return -EINVAL; @@ -1562,6 +1568,9 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap if (ret != 0) goto fail; + if (!first) + return 0; + ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport, cpfl_args); if (ret != 0) @@ -2291,18 +2300,11 @@ cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapt } static int -cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, - struct rte_pci_device *pci_dev) +cpfl_pci_probe_first(struct rte_pci_device *pci_dev) { struct cpfl_adapter_ext *adapter; int retval; - if (!cpfl_adapter_list_init) { - rte_spinlock_init(&cpfl_adapter_lock); - TAILQ_INIT(&cpfl_adapter_list); - cpfl_adapter_list_init = true; - } - adapter = rte_zmalloc("cpfl_adapter_ext", sizeof(struct cpfl_adapter_ext), 0); if (adapter == NULL) { @@ -2310,7 +2312,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, return -ENOMEM; } - retval = cpfl_parse_devargs(pci_dev, adapter); + retval = cpfl_parse_devargs(pci_dev, adapter, true); if (retval != 0) { PMD_INIT_LOG(ERR, "Failed to parse private devargs"); return retval; @@ -2355,6 +2357,46 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, return retval; } +static int +cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) +{ + int ret; + + ret = cpfl_parse_devargs(pci_dev, adapter, false); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to parse private devargs"); + return ret; + } + + ret = cpfl_repr_devargs_process(adapter); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to process reprenstor devargs"); + return ret; + } + + return 0; +} + +static int +cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + struct cpfl_adapter_ext *adapter; + + if (!cpfl_adapter_list_init) { + rte_spinlock_init(&cpfl_adapter_lock); + TAILQ_INIT(&cpfl_adapter_list); + cpfl_adapter_list_init = true; + } + + adapter = cpfl_find_adapter_ext(pci_dev); + + if (adapter == NULL) + return cpfl_pci_probe_first(pci_dev); + else + return cpfl_pci_probe_again(pci_dev, adapter); +} + static int cpfl_pci_remove(struct rte_pci_device *pci_dev) { @@ -2377,7 +2419,8 @@ cpfl_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_cpfl_pmd = { .id_table = pci_id_cpfl_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | + RTE_PCI_DRV_PROBE_AGAIN, .probe = cpfl_pci_probe, .remove = cpfl_pci_remove, }; From patchwork Tue Sep 12 16:26:39 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 131357 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 227694257B; Tue, 12 Sep 2023 10:09:08 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 66CEC4069F; Tue, 12 Sep 2023 10:08:25 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id B508340A67 for ; Tue, 12 Sep 2023 10:08:22 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1694506102; x=1726042102; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=U8cuU/bTNF4ORu2CMSJfjHD22ub9JpuEDzHCHRQMMSs=; b=Ri8SV+HfD5IByiVnLInnjraFS9EadD2aPDXBbDcFfyrKssFDJAJdwusg OwD0TkN9ZPD51CGtPCMazfy2WCveDugPRIean7pu20DapuuO5fib47lM3 +hocl/o5gl0xq0ddDlv2x09TyeAwJPEpLo4x3Oa9byYYMDfJUBFC6h2zJ il8isIHnL2mQ5CCtCoynK5qArfs5vWcaLL04FI5+xPgBiYaKuQll5Ja0P pWIIrSpcCe3DX671CWE7lOw2myLB7yU6M7xMBZQrVvqpqaSKX0GVlCvnJ MXcJSOGbuoUlreTo80DDdrOxD3x8t8CqOhsCSFbPZkHW8G9CZBnG9J5jP A==; X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="375639599" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="375639599" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Sep 2023 01:08:22 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="858702593" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="858702593" Received: from dpdk-beileix-icelake.sh.intel.com ([10.67.116.248]) by fmsmga002.fm.intel.com with ESMTP; 12 Sep 2023 01:08:19 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing , Qi Zhang Subject: [PATCH v5 09/10] net/cpfl: create port representor Date: Tue, 12 Sep 2023 16:26:39 +0000 Message-Id: <20230912162640.1439383-10-beilei.xing@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230912162640.1439383-1-beilei.xing@intel.com> References: <20230908111701.1022724-1-beilei.xing@intel.com> <20230912162640.1439383-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing Track representor request in the allowlist. Representor will only be created for active vport. Signed-off-by: Jingjing Wu Signed-off-by: Qi Zhang Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.c | 117 +++--- drivers/net/cpfl/cpfl_ethdev.h | 39 +- drivers/net/cpfl/cpfl_representor.c | 581 ++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_representor.h | 26 ++ drivers/net/cpfl/meson.build | 1 + 5 files changed, 715 insertions(+), 49 deletions(-) create mode 100644 drivers/net/cpfl/cpfl_representor.c create mode 100644 drivers/net/cpfl/cpfl_representor.h diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 47c4c5c796..375bc8098c 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -1645,10 +1645,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint } } -static int +int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id *vport_identity, - struct cpchnl2_vport_info *vport_info) + struct cpchnl2_event_vport_created *vport_created) { struct cpfl_vport_info *info = NULL; int ret; @@ -1659,7 +1659,7 @@ cpfl_vport_info_create(struct cpfl_adapter_ext *adapter, PMD_DRV_LOG(WARNING, "vport already exist, overwrite info anyway"); /* overwrite info */ if (info) - info->vport_info = *vport_info; + info->vport = *vport_created; goto fini; } @@ -1670,7 +1670,7 @@ cpfl_vport_info_create(struct cpfl_adapter_ext *adapter, goto err; } - info->vport_info = *vport_info; + info->vport = *vport_created; ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, info); if (ret < 0) { @@ -1696,7 +1696,7 @@ cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id * rte_spinlock_lock(&adapter->vport_map_lock); ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info); if (ret < 0) { - PMD_DRV_LOG(ERR, "vport id not exist"); + PMD_DRV_LOG(ERR, "vport id does not exist"); goto err; } @@ -1898,6 +1898,42 @@ cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter) rte_hash_free(adapter->vport_map_hash); } +static int +cpfl_repr_allowlist_init(struct cpfl_adapter_ext *adapter) +{ + char hname[32]; + + snprintf(hname, 32, "%s-repr_al", adapter->name); + + rte_spinlock_init(&adapter->repr_lock); + +#define CPFL_REPR_HASH_ENTRY_NUM 2048 + + struct rte_hash_parameters params = { + .name = hname, + .entries = CPFL_REPR_HASH_ENTRY_NUM, + .key_len = sizeof(struct cpfl_repr_id), + .hash_func = rte_hash_crc, + .socket_id = SOCKET_ID_ANY, + }; + + adapter->repr_allowlist_hash = rte_hash_create(¶ms); + + if (adapter->repr_allowlist_hash == NULL) { + PMD_INIT_LOG(ERR, "Failed to create repr allowlist hash"); + return -EINVAL; + } + + return 0; +} + +static void +cpfl_repr_allowlist_uninit(struct cpfl_adapter_ext *adapter) +{ + rte_hash_free(adapter->repr_allowlist_hash); +} + + static int cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) { @@ -1928,6 +1964,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a goto err_vport_map_init; } + ret = cpfl_repr_allowlist_init(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init representor allowlist"); + goto err_repr_allowlist_init; + } + rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter); adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ? @@ -1952,6 +1994,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a err_vports_alloc: rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter); + cpfl_repr_allowlist_uninit(adapter); +err_repr_allowlist_init: cpfl_vport_map_uninit(adapter); err_vport_map_init: idpf_adapter_deinit(base); @@ -2227,48 +2271,6 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter) return 0; } -static int -cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter) -{ - struct cpfl_devargs *devargs = &adapter->devargs; - int i, j; - - /* check and refine repr args */ - for (i = 0; i < devargs->repr_args_num; i++) { - struct rte_eth_devargs *eth_da = &devargs->repr_args[i]; - - /* set default host_id to xeon host */ - if (eth_da->nb_mh_controllers == 0) { - eth_da->nb_mh_controllers = 1; - eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST; - } else { - for (j = 0; j < eth_da->nb_mh_controllers; j++) { - if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) { - PMD_INIT_LOG(ERR, "Invalid Host ID %d", - eth_da->mh_controllers[j]); - return -EINVAL; - } - } - } - - /* set default pf to APF */ - if (eth_da->nb_ports == 0) { - eth_da->nb_ports = 1; - eth_da->ports[0] = CPFL_PF_TYPE_APF; - } else { - for (j = 0; j < eth_da->nb_ports; j++) { - if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) { - PMD_INIT_LOG(ERR, "Invalid Host ID %d", - eth_da->ports[j]); - return -EINVAL; - } - } - } - } - - return 0; -} - static int cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) { @@ -2304,6 +2306,7 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev) { struct cpfl_adapter_ext *adapter; int retval; + uint16_t port_id; adapter = rte_zmalloc("cpfl_adapter_ext", sizeof(struct cpfl_adapter_ext), 0); @@ -2343,11 +2346,23 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev) retval = cpfl_repr_devargs_process(adapter); if (retval != 0) { PMD_INIT_LOG(ERR, "Failed to process repr devargs"); - goto err; + goto close_ethdev; } + retval = cpfl_repr_create(pci_dev, adapter); + if (retval != 0) { + PMD_INIT_LOG(ERR, "Failed to create representors "); + goto close_ethdev; + } + + return 0; +close_ethdev: + /* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */ + RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) { + rte_eth_dev_close(port_id); + } err: rte_spinlock_lock(&cpfl_adapter_lock); TAILQ_REMOVE(&cpfl_adapter_list, adapter, next); @@ -2374,6 +2389,12 @@ cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *ad return ret; } + ret = cpfl_repr_create(pci_dev, adapter); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to create representors "); + return ret; + } + return 0; } diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index b03666f5ea..a4ffd51fb3 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -21,6 +21,7 @@ #include "cpfl_logs.h" #include "cpfl_cpchnl.h" +#include "cpfl_representor.h" /* Currently, backend supports up to 8 vports */ #define CPFL_MAX_VPORT_NUM 8 @@ -60,11 +61,31 @@ #define IDPF_DEV_ID_CPF 0x1453 #define VIRTCHNL2_QUEUE_GROUP_P2P 0x100 +#define CPFL_HOST_ID_NUM 2 +#define CPFL_PF_TYPE_NUM 2 #define CPFL_HOST_ID_HOST 0 #define CPFL_HOST_ID_ACC 1 #define CPFL_PF_TYPE_APF 0 #define CPFL_PF_TYPE_CPF 1 +/* Function IDs on IMC side */ +#define CPFL_HOST0_APF 0 +#define CPFL_ACC_APF_ID 4 +#define CPFL_HOST0_CPF_ID 8 +#define CPFL_ACC_CPF_ID 12 + +#define CPFL_VPORT_LAN_PF 0 +#define CPFL_VPORT_LAN_VF 1 + +/* bit[15:14] type + * bit[13] host/accelerator core + * bit[12] apf/cpf + * bit[11:0] vf + */ +#define CPFL_REPRESENTOR_ID(type, host_id, pf_id, vf_id) \ + ((((type) & 0x3) << 14) + (((host_id) & 0x1) << 13) + \ + (((pf_id) & 0x1) << 12) + ((vf_id) & 0xfff)) + struct cpfl_vport_param { struct cpfl_adapter_ext *adapter; uint16_t devarg_id; /* arg id from user */ @@ -104,12 +125,13 @@ struct cpfl_vport_id { }; struct cpfl_vport_info { - struct cpchnl2_vport_info vport_info; + struct cpchnl2_event_vport_created vport; bool enabled; }; enum cpfl_itf_type { CPFL_ITF_TYPE_VPORT, + CPFL_ITF_TYPE_REPRESENTOR, }; struct cpfl_itf { @@ -135,6 +157,13 @@ struct cpfl_vport { bool p2p_manual_bind; }; +struct cpfl_repr { + struct cpfl_itf itf; + struct cpfl_repr_id repr_id; + struct rte_ether_addr mac_addr; + struct cpfl_vport_info *vport_info; +}; + struct cpfl_adapter_ext { TAILQ_ENTRY(cpfl_adapter_ext) next; struct idpf_adapter base; @@ -152,10 +181,16 @@ struct cpfl_adapter_ext { rte_spinlock_t vport_map_lock; struct rte_hash *vport_map_hash; + + rte_spinlock_t repr_lock; + struct rte_hash *repr_allowlist_hash; }; TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext); +int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter, + struct cpfl_vport_id *vport_identity, + struct cpchnl2_event_vport_created *vport); int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id *vi, struct cpchnl2_get_vport_list_response *response); @@ -170,6 +205,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter, container_of((p), struct cpfl_adapter_ext, base) #define CPFL_DEV_TO_VPORT(dev) \ ((struct cpfl_vport *)((dev)->data->dev_private)) +#define CPFL_DEV_TO_REPR(dev) \ + ((struct cpfl_repr *)((dev)->data->dev_private)) #define CPFL_DEV_TO_ITF(dev) \ ((struct cpfl_itf *)((dev)->data->dev_private)) diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c new file mode 100644 index 0000000000..d2558c39a8 --- /dev/null +++ b/drivers/net/cpfl/cpfl_representor.c @@ -0,0 +1,581 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2022 Intel Corporation + */ + +#include "cpfl_representor.h" +#include "cpfl_rxtx.h" + +static int +cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter, + struct cpfl_repr_id *repr_id, + struct rte_eth_dev *dev) +{ + int ret; + + if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) < 0) + return -ENOENT; + + ret = rte_hash_add_key_data(adapter->repr_allowlist_hash, repr_id, dev); + + return ret; +} + +static int +cpfl_repr_allowlist_add(struct cpfl_adapter_ext *adapter, + struct cpfl_repr_id *repr_id) +{ + int ret; + + rte_spinlock_lock(&adapter->repr_lock); + if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) >= 0) { + ret = -EEXIST; + goto err; + } + + ret = rte_hash_add_key(adapter->repr_allowlist_hash, repr_id); + if (ret < 0) + goto err; + + rte_spinlock_unlock(&adapter->repr_lock); + return 0; +err: + rte_spinlock_unlock(&adapter->repr_lock); + return ret; +} + +static int +cpfl_repr_devargs_process_one(struct cpfl_adapter_ext *adapter, + struct rte_eth_devargs *eth_da) +{ + struct cpfl_repr_id repr_id; + int ret, c, p, v; + + for (c = 0; c < eth_da->nb_mh_controllers; c++) { + for (p = 0; p < eth_da->nb_ports; p++) { + repr_id.type = eth_da->type; + if (eth_da->type == RTE_ETH_REPRESENTOR_PF) { + repr_id.host_id = eth_da->mh_controllers[c]; + repr_id.pf_id = eth_da->ports[p]; + repr_id.vf_id = 0; + ret = cpfl_repr_allowlist_add(adapter, &repr_id); + if (ret == -EEXIST) + continue; + if (ret) { + PMD_DRV_LOG(ERR, "Failed to add PF repr to allowlist, " + "host_id = %d, pf_id = %d.", + repr_id.host_id, repr_id.pf_id); + return ret; + } + } else if (eth_da->type == RTE_ETH_REPRESENTOR_VF) { + for (v = 0; v < eth_da->nb_representor_ports; v++) { + repr_id.host_id = eth_da->mh_controllers[c]; + repr_id.pf_id = eth_da->ports[p]; + repr_id.vf_id = eth_da->representor_ports[v]; + ret = cpfl_repr_allowlist_add(adapter, &repr_id); + if (ret == -EEXIST) + continue; + if (ret) { + PMD_DRV_LOG(ERR, "Failed to add VF repr to allowlist, " + "host_id = %d, pf_id = %d, vf_id = %d.", + repr_id.host_id, + repr_id.pf_id, + repr_id.vf_id); + return ret; + } + } + } + } + } + + return 0; +} + +int +cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_devargs *devargs = &adapter->devargs; + int ret, i, j; + + /* check and refine repr args */ + for (i = 0; i < devargs->repr_args_num; i++) { + struct rte_eth_devargs *eth_da = &devargs->repr_args[i]; + + /* set default host_id to host */ + if (eth_da->nb_mh_controllers == 0) { + eth_da->nb_mh_controllers = 1; + eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST; + } else { + for (j = 0; j < eth_da->nb_mh_controllers; j++) { + if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) { + PMD_INIT_LOG(ERR, "Invalid Host ID %d", + eth_da->mh_controllers[j]); + return -EINVAL; + } + } + } + + /* set default pf to APF */ + if (eth_da->nb_ports == 0) { + eth_da->nb_ports = 1; + eth_da->ports[0] = CPFL_PF_TYPE_APF; + } else { + for (j = 0; j < eth_da->nb_ports; j++) { + if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) { + PMD_INIT_LOG(ERR, "Invalid Host ID %d", + eth_da->ports[j]); + return -EINVAL; + } + } + } + + ret = cpfl_repr_devargs_process_one(adapter, eth_da); + if (ret != 0) + return ret; + } + + return 0; +} + +static int +cpfl_repr_allowlist_del(struct cpfl_adapter_ext *adapter, + struct cpfl_repr_id *repr_id) +{ + int ret; + + rte_spinlock_lock(&adapter->repr_lock); + + ret = rte_hash_del_key(adapter->repr_allowlist_hash, repr_id); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to delete repr from allowlist." + "host_id = %d, type = %d, pf_id = %d, vf_id = %d", + repr_id->host_id, repr_id->type, + repr_id->pf_id, repr_id->vf_id); + goto err; + } + + rte_spinlock_unlock(&adapter->repr_lock); + return 0; +err: + rte_spinlock_unlock(&adapter->repr_lock); + return ret; +} + +static int +cpfl_repr_uninit(struct rte_eth_dev *eth_dev) +{ + struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev); + struct cpfl_adapter_ext *adapter = repr->itf.adapter; + + eth_dev->data->mac_addrs = NULL; + + cpfl_repr_allowlist_del(adapter, &repr->repr_id); + + return 0; +} + +static int +cpfl_repr_dev_configure(struct rte_eth_dev *dev) +{ + /* now only 1 RX queue is supported */ + if (dev->data->nb_rx_queues > 1) + return -EINVAL; + + return 0; +} + +static int +cpfl_repr_dev_close(struct rte_eth_dev *dev) +{ + return cpfl_repr_uninit(dev); +} + +static int +cpfl_repr_dev_info_get(struct rte_eth_dev *ethdev, + struct rte_eth_dev_info *dev_info) +{ + struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev); + + dev_info->device = ethdev->device; + dev_info->max_mac_addrs = 1; + dev_info->max_rx_queues = 1; + dev_info->max_tx_queues = 1; + dev_info->min_rx_bufsize = CPFL_MIN_BUF_SIZE; + dev_info->max_rx_pktlen = CPFL_MAX_FRAME_SIZE; + + dev_info->flow_type_rss_offloads = CPFL_RSS_OFFLOAD_ALL; + + dev_info->rx_offload_capa = + RTE_ETH_RX_OFFLOAD_VLAN_STRIP | + RTE_ETH_RX_OFFLOAD_QINQ_STRIP | + RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_SCATTER | + RTE_ETH_RX_OFFLOAD_VLAN_FILTER | + RTE_ETH_RX_OFFLOAD_RSS_HASH | + RTE_ETH_RX_OFFLOAD_TIMESTAMP; + + dev_info->tx_offload_capa = + RTE_ETH_TX_OFFLOAD_VLAN_INSERT | + RTE_ETH_TX_OFFLOAD_QINQ_INSERT | + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH, + .offloads = 0, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = CPFL_MAX_RING_DESC, + .nb_min = CPFL_MIN_RING_DESC, + .nb_align = CPFL_ALIGN_RING_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = CPFL_MAX_RING_DESC, + .nb_min = CPFL_MIN_RING_DESC, + .nb_align = CPFL_ALIGN_RING_DESC, + }; + + dev_info->switch_info.name = ethdev->device->name; + dev_info->switch_info.domain_id = 0; /* the same domain*/ + dev_info->switch_info.port_id = repr->vport_info->vport.info.vsi_id; + + return 0; +} + +static int +cpfl_repr_dev_start(struct rte_eth_dev *dev) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static int +cpfl_repr_dev_stop(struct rte_eth_dev *dev) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + + dev->data->dev_started = 0; + return 0; +} + +static int +cpfl_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t queue_id, + __rte_unused uint16_t nb_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *conf, + __rte_unused struct rte_mempool *pool) +{ + /* Dummy */ + return 0; +} + +static int +cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t queue_id, + __rte_unused uint16_t nb_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *conf) +{ + /* Dummy */ + return 0; +} + +static const struct eth_dev_ops cpfl_repr_dev_ops = { + .dev_start = cpfl_repr_dev_start, + .dev_stop = cpfl_repr_dev_stop, + .dev_configure = cpfl_repr_dev_configure, + .dev_close = cpfl_repr_dev_close, + .dev_infos_get = cpfl_repr_dev_info_get, + + .rx_queue_setup = cpfl_repr_rx_queue_setup, + .tx_queue_setup = cpfl_repr_tx_queue_setup, +}; + +static int +cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param) +{ + struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev); + struct cpfl_repr_param *param = init_param; + struct cpfl_adapter_ext *adapter = param->adapter; + + repr->repr_id = param->repr_id; + repr->vport_info = param->vport_info; + repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR; + repr->itf.adapter = adapter; + repr->itf.data = eth_dev->data; + + eth_dev->dev_ops = &cpfl_repr_dev_ops; + + eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + + eth_dev->data->representor_id = + CPFL_REPRESENTOR_ID(repr->repr_id.type, + repr->repr_id.host_id, + repr->repr_id.pf_id, + repr->repr_id.vf_id); + + eth_dev->data->mac_addrs = &repr->mac_addr; + + rte_eth_random_addr(repr->mac_addr.addr_bytes); + + return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev); +} + +static int +cpfl_func_id_get(uint8_t host_id, uint8_t pf_id) +{ + if ((host_id != CPFL_HOST_ID_HOST && + host_id != CPFL_HOST_ID_ACC) || + (pf_id != CPFL_PF_TYPE_APF && + pf_id != CPFL_PF_TYPE_CPF)) + return -EINVAL; + + static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = { + [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF, + [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID, + [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID, + [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID, + }; + + return func_id_map[host_id][pf_id]; +} + +static bool +cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id, + struct cpchnl2_vport_info *info) +{ + int func_id; + + if (repr_id->type == RTE_ETH_REPRESENTOR_PF && + info->func_type == CPFL_VPORT_LAN_PF) { + func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id); + if (func_id < 0 || func_id != info->pf_id) + return false; + else + return true; + } else if (repr_id->type == RTE_ETH_REPRESENTOR_VF && + info->func_type == CPFL_VPORT_LAN_VF) { + if (repr_id->vf_id == info->vf_id) + return true; + } + + return false; +} + +static int +cpfl_repr_vport_list_query(struct cpfl_adapter_ext *adapter, + const struct cpfl_repr_id *repr_id, + struct cpchnl2_get_vport_list_response *response) +{ + struct cpfl_vport_id vi; + int ret; + + if (repr_id->type == RTE_ETH_REPRESENTOR_PF) { + /* PF */ + vi.func_type = CPCHNL2_FUNC_TYPE_PF; + vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id); + vi.vf_id = 0; + } else { + /* VF */ + vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV; + vi.pf_id = CPFL_HOST0_APF; + vi.vf_id = repr_id->vf_id; + } + + ret = cpfl_cc_vport_list_get(adapter, &vi, response); + + return ret; +} + +static int +cpfl_repr_vport_info_query(struct cpfl_adapter_ext *adapter, + const struct cpfl_repr_id *repr_id, + struct cpchnl2_vport_id *vport_id, + struct cpchnl2_get_vport_info_response *response) +{ + struct cpfl_vport_id vi; + int ret; + + if (repr_id->type == RTE_ETH_REPRESENTOR_PF) { + /* PF */ + vi.func_type = CPCHNL2_FUNC_TYPE_PF; + vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id); + vi.vf_id = 0; + } else { + /* VF */ + vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV; + vi.pf_id = CPFL_HOST0_APF; + vi.vf_id = repr_id->vf_id; + } + + ret = cpfl_cc_vport_info_get(adapter, vport_id, &vi, response); + + return ret; +} + +static int +cpfl_repr_vport_map_update(struct cpfl_adapter_ext *adapter, + const struct cpfl_repr_id *repr_id, uint32_t vport_id, + struct cpchnl2_get_vport_info_response *response) +{ + struct cpfl_vport_id vi; + int ret; + + vi.vport_id = vport_id; + if (repr_id->type == RTE_ETH_REPRESENTOR_PF) { + /* PF */ + vi.func_type = CPCHNL2_FUNC_TYPE_PF; + vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id); + } else { + /* VF */ + vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV; + vi.pf_id = CPFL_HOST0_APF; + vi.vf_id = repr_id->vf_id; + } + + ret = cpfl_vport_info_create(adapter, &vi, (struct cpchnl2_event_vport_created *)response); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Fail to update vport map hash for representor."); + return ret; + } + + return 0; +} + +int +cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) +{ + struct rte_eth_dev *dev; + uint32_t iter = 0; + const struct cpfl_repr_id *repr_id; + const struct cpfl_vport_id *vp_id; + struct cpchnl2_get_vport_list_response *vlist_resp; + struct cpchnl2_get_vport_info_response vinfo_resp; + int ret; + + vlist_resp = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0); + if (vlist_resp == NULL) + return -ENOMEM; + + rte_spinlock_lock(&adapter->repr_lock); + + while (rte_hash_iterate(adapter->repr_allowlist_hash, + (const void **)&repr_id, (void **)&dev, &iter) >= 0) { + struct cpfl_vport_info *vi; + char name[RTE_ETH_NAME_MAX_LEN]; + uint32_t iter_iter = 0; + int i; + + /* skip representor already be created */ + if (dev != NULL) + continue; + + if (repr_id->type == RTE_ETH_REPRESENTOR_VF) + snprintf(name, sizeof(name), "net_%s_representor_c%dpf%dvf%d", + pci_dev->name, + repr_id->host_id, + repr_id->pf_id, + repr_id->vf_id); + else + snprintf(name, sizeof(name), "net_%s_representor_c%dpf%d", + pci_dev->name, + repr_id->host_id, + repr_id->pf_id); + + /* get vport list for the port representor */ + ret = cpfl_repr_vport_list_query(adapter, repr_id, vlist_resp); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d's vport list", + repr_id->host_id, repr_id->pf_id, repr_id->vf_id); + goto err; + } + + if (vlist_resp->nof_vports == 0) { + PMD_INIT_LOG(WARNING, "No matched vport for representor %s", name); + continue; + } + + /* get all vport info for the port representor */ + for (i = 0; i < vlist_resp->nof_vports; i++) { + ret = cpfl_repr_vport_info_query(adapter, repr_id, + &vlist_resp->vports[i], &vinfo_resp); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d vport[%d]'s info", + repr_id->host_id, repr_id->pf_id, repr_id->vf_id, + vlist_resp->vports[i].vport_id); + goto err; + } + + ret = cpfl_repr_vport_map_update(adapter, repr_id, + vlist_resp->vports[i].vport_id, &vinfo_resp); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to update host%d pf%d vf%d vport[%d]'s info to vport_map_hash", + repr_id->host_id, repr_id->pf_id, repr_id->vf_id, + vlist_resp->vports[i].vport_id); + goto err; + } + } + + /* find the matched vport */ + rte_spinlock_lock(&adapter->vport_map_lock); + + while (rte_hash_iterate(adapter->vport_map_hash, + (const void **)&vp_id, (void **)&vi, &iter_iter) >= 0) { + struct cpfl_repr_param param; + + if (!cpfl_match_repr_with_vport(repr_id, &vi->vport.info)) + continue; + + param.adapter = adapter; + param.repr_id = *repr_id; + param.vport_info = vi; + + ret = rte_eth_dev_create(&pci_dev->device, + name, + sizeof(struct cpfl_repr), + NULL, NULL, cpfl_repr_init, + ¶m); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to create representor %s", name); + rte_spinlock_unlock(&adapter->vport_map_lock); + goto err; + } + break; + } + + rte_spinlock_unlock(&adapter->vport_map_lock); + } + +err: + rte_spinlock_unlock(&adapter->repr_lock); + rte_free(vlist_resp); + return ret; +} diff --git a/drivers/net/cpfl/cpfl_representor.h b/drivers/net/cpfl/cpfl_representor.h new file mode 100644 index 0000000000..d3a4de531e --- /dev/null +++ b/drivers/net/cpfl/cpfl_representor.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#ifndef _CPFL_REPRESENTOR_H_ +#define _CPFL_REPRESENTOR_H_ + +#include +#include + +struct cpfl_repr_id { + uint8_t host_id; + uint8_t pf_id; + uint8_t type; + uint8_t vf_id; +}; + +struct cpfl_repr_param { + struct cpfl_adapter_ext *adapter; + struct cpfl_repr_id repr_id; + struct cpfl_vport_info *vport_info; +}; + +int cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter); +int cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter); +#endif diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index 2f0f5d8434..d8b92ae16a 100644 --- a/drivers/net/cpfl/meson.build +++ b/drivers/net/cpfl/meson.build @@ -17,6 +17,7 @@ sources = files( 'cpfl_ethdev.c', 'cpfl_rxtx.c', 'cpfl_vchnl.c', + 'cpfl_representor.c', ) if arch_subdir == 'x86' From patchwork Tue Sep 12 16:26:40 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 131358 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id F12E94257B; Tue, 12 Sep 2023 10:09:16 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E99FA40A81; Tue, 12 Sep 2023 10:08:26 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 74FFF40A6E for ; Tue, 12 Sep 2023 10:08:24 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1694506104; x=1726042104; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=5PGir8RBOi2aGxQjfVmOGQWDWEe8YDzvZbMflOjWVuA=; b=iS2zPpuKGIzhQWQzFKF6KFDPWy2jDmvuqd8K0jLQRRQcsNg65hjsp89F iRFdM7zLGiQzL3uj0CGU9A1PE3sdHnfZ+RBW0M0L9v5pIoJ/TfjK/S0RC 79KykxsfK+NfwAPhPtAxwfnffw0YHKGBNT0avgkAm4iFkPYZdwgmQDI7F +Yhdqetqw4P2OrVOgA5zVLZsIMf0V5Sw0SGCI+7DawqhQS457d+z92Uq6 Kih8nl8d+1xYQcC+QslWEFW5xgS8PY8cM5q1fbc1/RliT7aRkndz/Zvvl goA2Bk/jrH/DjtG22v+DplQuYVtJbxqkc/wZItT9aL8aBvR5qkAD645m/ Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="375639606" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="375639606" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Sep 2023 01:08:24 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10830"; a="858702597" X-IronPort-AV: E=Sophos;i="6.02,245,1688454000"; d="scan'208";a="858702597" Received: from dpdk-beileix-icelake.sh.intel.com ([10.67.116.248]) by fmsmga002.fm.intel.com with ESMTP; 12 Sep 2023 01:08:22 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing Subject: [PATCH v5 10/10] net/cpfl: support link update for representor Date: Tue, 12 Sep 2023 16:26:40 +0000 Message-Id: <20230912162640.1439383-11-beilei.xing@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230912162640.1439383-1-beilei.xing@intel.com> References: <20230908111701.1022724-1-beilei.xing@intel.com> <20230912162640.1439383-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing Add link update ops for representor. Signed-off-by: Jingjing Wu Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.h | 1 + drivers/net/cpfl/cpfl_representor.c | 89 +++++++++++++++++++++++------ 2 files changed, 71 insertions(+), 19 deletions(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index a4ffd51fb3..d0dcc0cc05 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -162,6 +162,7 @@ struct cpfl_repr { struct cpfl_repr_id repr_id; struct rte_ether_addr mac_addr; struct cpfl_vport_info *vport_info; + bool func_up; /* If the represented function is up */ }; struct cpfl_adapter_ext { diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c index d2558c39a8..4d15a26c80 100644 --- a/drivers/net/cpfl/cpfl_representor.c +++ b/drivers/net/cpfl/cpfl_representor.c @@ -308,6 +308,72 @@ cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev, return 0; } +static int +cpfl_func_id_get(uint8_t host_id, uint8_t pf_id) +{ + if ((host_id != CPFL_HOST_ID_HOST && + host_id != CPFL_HOST_ID_ACC) || + (pf_id != CPFL_PF_TYPE_APF && + pf_id != CPFL_PF_TYPE_CPF)) + return -EINVAL; + + static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = { + [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF, + [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID, + [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID, + [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID, + }; + + return func_id_map[host_id][pf_id]; +} + +static int +cpfl_repr_link_update(struct rte_eth_dev *ethdev, + int wait_to_complete) +{ + struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev); + struct rte_eth_link *dev_link = ðdev->data->dev_link; + struct cpfl_adapter_ext *adapter = repr->itf.adapter; + struct cpchnl2_get_vport_info_response response; + struct cpfl_vport_id vi; + int ret; + + if (!(ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) { + PMD_INIT_LOG(ERR, "This ethdev is not representor."); + return -EINVAL; + } + + if (wait_to_complete) { + if (repr->repr_id.type == RTE_ETH_REPRESENTOR_PF) { + /* PF */ + vi.func_type = CPCHNL2_FUNC_TYPE_PF; + vi.pf_id = cpfl_func_id_get(repr->repr_id.host_id, repr->repr_id.pf_id); + vi.vf_id = 0; + } else { + /* VF */ + vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV; + vi.pf_id = CPFL_HOST0_APF; + vi.vf_id = repr->repr_id.vf_id; + } + ret = cpfl_cc_vport_info_get(adapter, &repr->vport_info->vport.vport, + &vi, &response); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Fail to get vport info."); + return ret; + } + + if (response.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED) + repr->func_up = true; + else + repr->func_up = false; + } + + dev_link->link_status = repr->func_up ? + RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; + + return 0; +} + static const struct eth_dev_ops cpfl_repr_dev_ops = { .dev_start = cpfl_repr_dev_start, .dev_stop = cpfl_repr_dev_stop, @@ -317,6 +383,8 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = { .rx_queue_setup = cpfl_repr_rx_queue_setup, .tx_queue_setup = cpfl_repr_tx_queue_setup, + + .link_update = cpfl_repr_link_update, }; static int @@ -331,6 +399,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param) repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR; repr->itf.adapter = adapter; repr->itf.data = eth_dev->data; + if (repr->vport_info->vport.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED) + repr->func_up = true; eth_dev->dev_ops = &cpfl_repr_dev_ops; @@ -349,25 +419,6 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param) return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev); } -static int -cpfl_func_id_get(uint8_t host_id, uint8_t pf_id) -{ - if ((host_id != CPFL_HOST_ID_HOST && - host_id != CPFL_HOST_ID_ACC) || - (pf_id != CPFL_PF_TYPE_APF && - pf_id != CPFL_PF_TYPE_CPF)) - return -EINVAL; - - static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = { - [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF, - [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID, - [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID, - [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID, - }; - - return func_id_map[host_id][pf_id]; -} - static bool cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id, struct cpchnl2_vport_info *info)