From patchwork Fri Feb 3 09:43:40 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 123034 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B15B841BBB; Fri, 3 Feb 2023 11:12:23 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 718D842FBE; Fri, 3 Feb 2023 11:10:50 +0100 (CET) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id D352042DB7 for ; Fri, 3 Feb 2023 11:10:44 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1675419045; x=1706955045; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=3OVP5o10EoqfTLyTZxidYjZtDWfJjgSrroCmPO4vRZU=; b=e1nVL854vWPfkEwTU7qjSUoiWKZefQxxmCtZRqFaMEaOimHC1wZbpYuA 7HpAu4KSLYe/RkZAJwjER+zc8aXRu9691UjF90/IAlTPAPgjBnSJWYdAi RwIne5QejxokOc3ctQyjBK1iuH89y02PWdn6xeNaQYlVwBsxSMvkUZrWF RKxG0t6CkMbb0txe7rPEN1KtvVjNIM75klNOXFHieh2VGqwXUt+xD0d+C s0vmA8kY2oOmyvhR5fL/Xu2jMTMvO/8V7NJZsCTIxQUL8YevF1G4djZYF Gmrr4GGtZlrSsA1ODDxwRJRh9p2XV322kA5W0zoU4+TW4ZAk5fmAQF1Gk g==; X-IronPort-AV: E=McAfee;i="6500,9779,10609"; a="356052867" X-IronPort-AV: E=Sophos;i="5.97,270,1669104000"; d="scan'208";a="356052867" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Feb 2023 02:10:43 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10609"; a="659047932" X-IronPort-AV: E=Sophos;i="5.97,270,1669104000"; d="scan'208";a="659047932" Received: from dpdk-beileix-3.sh.intel.com ([10.67.110.253]) by orsmga007.jf.intel.com with ESMTP; 03 Feb 2023 02:10:41 -0800 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, qi.z.zhang@intel.com, Beilei Xing Subject: [PATCH v6 19/19] common/idpf: refine API name for virtual channel functions Date: Fri, 3 Feb 2023 09:43:40 +0000 Message-Id: <20230203094340.8103-20-beilei.xing@intel.com> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20230203094340.8103-1-beilei.xing@intel.com> References: <20230202095357.37929-1-beilei.xing@intel.com> <20230203094340.8103-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing This patch refines API name for all virtual channel functions. Signed-off-by: Beilei Xing --- drivers/common/idpf/idpf_common_device.c | 24 ++++---- drivers/common/idpf/idpf_common_virtchnl.c | 70 +++++++++++----------- drivers/common/idpf/idpf_common_virtchnl.h | 36 +++++------ drivers/common/idpf/version.map | 38 ++++++------ drivers/net/idpf/idpf_ethdev.c | 10 ++-- drivers/net/idpf/idpf_rxtx.c | 12 ++-- 6 files changed, 95 insertions(+), 95 deletions(-) diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c index f17b7736ae..6c5f10a8ce 100644 --- a/drivers/common/idpf/idpf_common_device.c +++ b/drivers/common/idpf/idpf_common_device.c @@ -104,7 +104,7 @@ idpf_get_pkt_type(struct idpf_adapter *adapter) uint16_t ptype_recvd = 0; int ret; - ret = idpf_vc_query_ptype_info(adapter); + ret = idpf_vc_ptype_info_query(adapter); if (ret != 0) { DRV_LOG(ERR, "Fail to query packet type information"); return ret; @@ -115,7 +115,7 @@ idpf_get_pkt_type(struct idpf_adapter *adapter) return -ENOMEM; while (ptype_recvd < IDPF_MAX_PKT_TYPE) { - ret = idpf_vc_read_one_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO, + ret = idpf_vc_one_msg_read(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO, IDPF_DFLT_MBX_BUF_SIZE, (uint8_t *)ptype_info); if (ret != 0) { DRV_LOG(ERR, "Fail to get packet type information"); @@ -333,13 +333,13 @@ idpf_adapter_init(struct idpf_adapter *adapter) goto err_mbx_resp; } - ret = idpf_vc_check_api_version(adapter); + ret = idpf_vc_api_version_check(adapter); if (ret != 0) { DRV_LOG(ERR, "Failed to check api version"); goto err_check_api; } - ret = idpf_vc_get_caps(adapter); + ret = idpf_vc_caps_get(adapter); if (ret != 0) { DRV_LOG(ERR, "Failed to get capabilities"); goto err_check_api; @@ -382,7 +382,7 @@ idpf_vport_init(struct idpf_vport *vport, struct virtchnl2_create_vport *vport_info; int i, type, ret; - ret = idpf_vc_create_vport(vport, create_vport_info); + ret = idpf_vc_vport_create(vport, create_vport_info); if (ret != 0) { DRV_LOG(ERR, "Failed to create vport."); goto err_create_vport; @@ -483,7 +483,7 @@ idpf_vport_init(struct idpf_vport *vport, rte_free(vport->rss_key); vport->rss_key = NULL; err_rss_key: - idpf_vc_destroy_vport(vport); + idpf_vc_vport_destroy(vport); err_create_vport: return ret; } @@ -500,7 +500,7 @@ idpf_vport_deinit(struct idpf_vport *vport) vport->dev_data = NULL; - idpf_vc_destroy_vport(vport); + idpf_vc_vport_destroy(vport); return 0; } @@ -509,19 +509,19 @@ idpf_vport_rss_config(struct idpf_vport *vport) { int ret; - ret = idpf_vc_set_rss_key(vport); + ret = idpf_vc_rss_key_set(vport); if (ret != 0) { DRV_LOG(ERR, "Failed to configure RSS key"); return ret; } - ret = idpf_vc_set_rss_lut(vport); + ret = idpf_vc_rss_lut_set(vport); if (ret != 0) { DRV_LOG(ERR, "Failed to configure RSS lut"); return ret; } - ret = idpf_vc_set_rss_hash(vport); + ret = idpf_vc_rss_hash_set(vport); if (ret != 0) { DRV_LOG(ERR, "Failed to configure RSS hash"); return ret; @@ -589,7 +589,7 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues) } vport->qv_map = qv_map; - ret = idpf_vc_config_irq_map_unmap(vport, nb_rx_queues, true); + ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true); if (ret != 0) { DRV_LOG(ERR, "config interrupt mapping failed"); goto config_irq_map_err; @@ -608,7 +608,7 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues) int idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues) { - idpf_vc_config_irq_map_unmap(vport, nb_rx_queues, false); + idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, false); rte_free(vport->qv_map); vport->qv_map = NULL; diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c index 299caa19f1..50e2ade89e 100644 --- a/drivers/common/idpf/idpf_common_virtchnl.c +++ b/drivers/common/idpf/idpf_common_virtchnl.c @@ -159,7 +159,7 @@ idpf_read_msg_from_cp(struct idpf_adapter *adapter, uint16_t buf_len, #define ASQ_DELAY_MS 10 int -idpf_vc_read_one_msg(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_len, +idpf_vc_one_msg_read(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_len, uint8_t *buf) { int err = 0; @@ -183,7 +183,7 @@ idpf_vc_read_one_msg(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_le } int -idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args) +idpf_vc_cmd_execute(struct idpf_adapter *adapter, struct idpf_cmd_info *args) { int err = 0; int i = 0; @@ -218,7 +218,7 @@ idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args) case VIRTCHNL2_OP_ALLOC_VECTORS: case VIRTCHNL2_OP_DEALLOC_VECTORS: /* for init virtchnl ops, need to poll the response */ - err = idpf_vc_read_one_msg(adapter, args->ops, args->out_size, args->out_buffer); + err = idpf_vc_one_msg_read(adapter, args->ops, args->out_size, args->out_buffer); clear_cmd(adapter); break; case VIRTCHNL2_OP_GET_PTYPE_INFO: @@ -251,7 +251,7 @@ idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args) } int -idpf_vc_check_api_version(struct idpf_adapter *adapter) +idpf_vc_api_version_check(struct idpf_adapter *adapter) { struct virtchnl2_version_info version, *pver; struct idpf_cmd_info args; @@ -267,7 +267,7 @@ idpf_vc_check_api_version(struct idpf_adapter *adapter) args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); if (err != 0) { DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_VERSION"); @@ -291,7 +291,7 @@ idpf_vc_check_api_version(struct idpf_adapter *adapter) } int -idpf_vc_get_caps(struct idpf_adapter *adapter) +idpf_vc_caps_get(struct idpf_adapter *adapter) { struct virtchnl2_get_capabilities caps_msg; struct idpf_cmd_info args; @@ -341,7 +341,7 @@ idpf_vc_get_caps(struct idpf_adapter *adapter) args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); if (err != 0) { DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_CAPS"); @@ -354,7 +354,7 @@ idpf_vc_get_caps(struct idpf_adapter *adapter) } int -idpf_vc_create_vport(struct idpf_vport *vport, +idpf_vc_vport_create(struct idpf_vport *vport, struct virtchnl2_create_vport *create_vport_info) { struct idpf_adapter *adapter = vport->adapter; @@ -378,7 +378,7 @@ idpf_vc_create_vport(struct idpf_vport *vport, args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); if (err != 0) { DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT"); @@ -390,7 +390,7 @@ idpf_vc_create_vport(struct idpf_vport *vport, } int -idpf_vc_destroy_vport(struct idpf_vport *vport) +idpf_vc_vport_destroy(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_vport vc_vport; @@ -406,7 +406,7 @@ idpf_vc_destroy_vport(struct idpf_vport *vport) args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); if (err != 0) DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DESTROY_VPORT"); @@ -414,7 +414,7 @@ idpf_vc_destroy_vport(struct idpf_vport *vport) } int -idpf_vc_set_rss_key(struct idpf_vport *vport) +idpf_vc_rss_key_set(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_rss_key *rss_key; @@ -439,7 +439,7 @@ idpf_vc_set_rss_key(struct idpf_vport *vport) args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); if (err != 0) DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_KEY"); @@ -448,7 +448,7 @@ idpf_vc_set_rss_key(struct idpf_vport *vport) } int -idpf_vc_set_rss_lut(struct idpf_vport *vport) +idpf_vc_rss_lut_set(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_rss_lut *rss_lut; @@ -473,7 +473,7 @@ idpf_vc_set_rss_lut(struct idpf_vport *vport) args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); if (err != 0) DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_LUT"); @@ -482,7 +482,7 @@ idpf_vc_set_rss_lut(struct idpf_vport *vport) } int -idpf_vc_set_rss_hash(struct idpf_vport *vport) +idpf_vc_rss_hash_set(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_rss_hash rss_hash; @@ -500,7 +500,7 @@ idpf_vc_set_rss_hash(struct idpf_vport *vport) args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); if (err != 0) DRV_LOG(ERR, "Failed to execute command of OP_SET_RSS_HASH"); @@ -508,7 +508,7 @@ idpf_vc_set_rss_hash(struct idpf_vport *vport) } int -idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, uint16_t nb_rxq, bool map) +idpf_vc_irq_map_unmap_config(struct idpf_vport *vport, uint16_t nb_rxq, bool map) { struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_queue_vector_maps *map_info; @@ -539,7 +539,7 @@ idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, uint16_t nb_rxq, bool map args.in_args_size = len; args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); if (err != 0) DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUE_VECTOR", map ? "MAP" : "UNMAP"); @@ -549,7 +549,7 @@ idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, uint16_t nb_rxq, bool map } int -idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors) +idpf_vc_vectors_alloc(struct idpf_vport *vport, uint16_t num_vectors) { struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_alloc_vectors *alloc_vec; @@ -569,7 +569,7 @@ idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors) args.in_args_size = len; args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); if (err != 0) DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_ALLOC_VECTORS"); @@ -579,7 +579,7 @@ idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors) } int -idpf_vc_dealloc_vectors(struct idpf_vport *vport) +idpf_vc_vectors_dealloc(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_alloc_vectors *alloc_vec; @@ -598,7 +598,7 @@ idpf_vc_dealloc_vectors(struct idpf_vport *vport) args.in_args_size = len; args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); if (err != 0) DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_DEALLOC_VECTORS"); @@ -634,7 +634,7 @@ idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid, args.in_args_size = len; args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); if (err != 0) DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES", on ? "ENABLE" : "DISABLE"); @@ -644,7 +644,7 @@ idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid, } int -idpf_vc_switch_queue(struct idpf_vport *vport, uint16_t qid, +idpf_vc_queue_switch(struct idpf_vport *vport, uint16_t qid, bool rx, bool on) { uint32_t type; @@ -688,7 +688,7 @@ idpf_vc_switch_queue(struct idpf_vport *vport, uint16_t qid, #define IDPF_RXTX_QUEUE_CHUNKS_NUM 2 int -idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable) +idpf_vc_queues_ena_dis(struct idpf_vport *vport, bool enable) { struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_del_ena_dis_queues *queue_select; @@ -746,7 +746,7 @@ idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable) args.in_args_size = len; args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); if (err != 0) DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES", enable ? "ENABLE" : "DISABLE"); @@ -756,7 +756,7 @@ idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable) } int -idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable) +idpf_vc_vport_ena_dis(struct idpf_vport *vport, bool enable) { struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_vport vc_vport; @@ -771,7 +771,7 @@ idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable) args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); if (err != 0) { DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_VPORT", enable ? "ENABLE" : "DISABLE"); @@ -781,7 +781,7 @@ idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable) } int -idpf_vc_query_ptype_info(struct idpf_adapter *adapter) +idpf_vc_ptype_info_query(struct idpf_adapter *adapter) { struct virtchnl2_get_ptype_info *ptype_info; struct idpf_cmd_info args; @@ -798,7 +798,7 @@ idpf_vc_query_ptype_info(struct idpf_adapter *adapter) args.in_args = (uint8_t *)ptype_info; args.in_args_size = len; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); if (err != 0) DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_PTYPE_INFO"); @@ -808,7 +808,7 @@ idpf_vc_query_ptype_info(struct idpf_adapter *adapter) #define IDPF_RX_BUF_STRIDE 64 int -idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq) +idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq) { struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_config_rx_queues *vc_rxqs = NULL; @@ -887,7 +887,7 @@ idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq) args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); rte_free(vc_rxqs); if (err != 0) DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES"); @@ -896,7 +896,7 @@ idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq) } int -idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq) +idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq) { struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_config_tx_queues *vc_txqs = NULL; @@ -958,7 +958,7 @@ idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq) args.out_buffer = adapter->mbx_resp; args.out_size = IDPF_DFLT_MBX_BUF_SIZE; - err = idpf_execute_vc_cmd(adapter, &args); + err = idpf_vc_cmd_execute(adapter, &args); rte_free(vc_txqs); if (err != 0) DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES"); diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h index 07755d4923..dcd855c08c 100644 --- a/drivers/common/idpf/idpf_common_virtchnl.h +++ b/drivers/common/idpf/idpf_common_virtchnl.h @@ -9,44 +9,44 @@ #include __rte_internal -int idpf_vc_check_api_version(struct idpf_adapter *adapter); +int idpf_vc_api_version_check(struct idpf_adapter *adapter); __rte_internal -int idpf_vc_get_caps(struct idpf_adapter *adapter); +int idpf_vc_caps_get(struct idpf_adapter *adapter); __rte_internal -int idpf_vc_create_vport(struct idpf_vport *vport, +int idpf_vc_vport_create(struct idpf_vport *vport, struct virtchnl2_create_vport *vport_info); __rte_internal -int idpf_vc_destroy_vport(struct idpf_vport *vport); +int idpf_vc_vport_destroy(struct idpf_vport *vport); __rte_internal -int idpf_vc_set_rss_key(struct idpf_vport *vport); +int idpf_vc_rss_key_set(struct idpf_vport *vport); __rte_internal -int idpf_vc_set_rss_lut(struct idpf_vport *vport); +int idpf_vc_rss_lut_set(struct idpf_vport *vport); __rte_internal -int idpf_vc_set_rss_hash(struct idpf_vport *vport); +int idpf_vc_rss_hash_set(struct idpf_vport *vport); __rte_internal -int idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, +int idpf_vc_irq_map_unmap_config(struct idpf_vport *vport, uint16_t nb_rxq, bool map); __rte_internal -int idpf_execute_vc_cmd(struct idpf_adapter *adapter, +int idpf_vc_cmd_execute(struct idpf_adapter *adapter, struct idpf_cmd_info *args); __rte_internal -int idpf_vc_switch_queue(struct idpf_vport *vport, uint16_t qid, +int idpf_vc_queue_switch(struct idpf_vport *vport, uint16_t qid, bool rx, bool on); __rte_internal -int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable); +int idpf_vc_queues_ena_dis(struct idpf_vport *vport, bool enable); __rte_internal -int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable); +int idpf_vc_vport_ena_dis(struct idpf_vport *vport, bool enable); __rte_internal -int idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors); +int idpf_vc_vectors_alloc(struct idpf_vport *vport, uint16_t num_vectors); __rte_internal -int idpf_vc_dealloc_vectors(struct idpf_vport *vport); +int idpf_vc_vectors_dealloc(struct idpf_vport *vport); __rte_internal -int idpf_vc_query_ptype_info(struct idpf_adapter *adapter); +int idpf_vc_ptype_info_query(struct idpf_adapter *adapter); __rte_internal -int idpf_vc_read_one_msg(struct idpf_adapter *adapter, uint32_t ops, +int idpf_vc_one_msg_read(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_len, uint8_t *buf); __rte_internal -int idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq); +int idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq); __rte_internal -int idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq); +int idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq); #endif /* _IDPF_COMMON_VIRTCHNL_H_ */ diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map index e37a40771b..1c35761611 100644 --- a/drivers/common/idpf/version.map +++ b/drivers/common/idpf/version.map @@ -31,6 +31,25 @@ INTERNAL { idpf_qc_tx_thresh_check; idpf_qc_txq_mbufs_release; + idpf_vc_api_version_check; + idpf_vc_caps_get; + idpf_vc_cmd_execute; + idpf_vc_irq_map_unmap_config; + idpf_vc_one_msg_read; + idpf_vc_ptype_info_query; + idpf_vc_queue_switch; + idpf_vc_queues_ena_dis; + idpf_vc_rss_hash_set; + idpf_vc_rss_key_set; + idpf_vc_rss_lut_set; + idpf_vc_rxq_config; + idpf_vc_txq_config; + idpf_vc_vectors_alloc; + idpf_vc_vectors_dealloc; + idpf_vc_vport_create; + idpf_vc_vport_destroy; + idpf_vc_vport_ena_dis; + idpf_vport_deinit; idpf_vport_info_init; idpf_vport_init; @@ -38,24 +57,5 @@ INTERNAL { idpf_vport_irq_unmap_config; idpf_vport_rss_config; - idpf_execute_vc_cmd; - idpf_vc_alloc_vectors; - idpf_vc_check_api_version; - idpf_vc_config_irq_map_unmap; - idpf_vc_config_rxq; - idpf_vc_config_txq; - idpf_vc_create_vport; - idpf_vc_dealloc_vectors; - idpf_vc_destroy_vport; - idpf_vc_ena_dis_queues; - idpf_vc_ena_dis_vport; - idpf_vc_get_caps; - idpf_vc_query_ptype_info; - idpf_vc_read_one_msg; - idpf_vc_set_rss_hash; - idpf_vc_set_rss_key; - idpf_vc_set_rss_lut; - idpf_vc_switch_queue; - local: *; }; diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index b324c0dc83..33f5e90743 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -299,7 +299,7 @@ idpf_dev_start(struct rte_eth_dev *dev) goto err_vec; } - ret = idpf_vc_alloc_vectors(vport, req_vecs_num); + ret = idpf_vc_vectors_alloc(vport, req_vecs_num); if (ret != 0) { PMD_DRV_LOG(ERR, "Failed to allocate interrupt vectors"); goto err_vec; @@ -321,7 +321,7 @@ idpf_dev_start(struct rte_eth_dev *dev) idpf_set_rx_function(dev); idpf_set_tx_function(dev); - ret = idpf_vc_ena_dis_vport(vport, true); + ret = idpf_vc_vport_ena_dis(vport, true); if (ret != 0) { PMD_DRV_LOG(ERR, "Failed to enable vport"); goto err_vport; @@ -336,7 +336,7 @@ idpf_dev_start(struct rte_eth_dev *dev) err_startq: idpf_vport_irq_unmap_config(vport, dev->data->nb_rx_queues); err_irq: - idpf_vc_dealloc_vectors(vport); + idpf_vc_vectors_dealloc(vport); err_vec: return ret; } @@ -349,13 +349,13 @@ idpf_dev_stop(struct rte_eth_dev *dev) if (vport->stopped == 1) return 0; - idpf_vc_ena_dis_vport(vport, false); + idpf_vc_vport_ena_dis(vport, false); idpf_stop_queues(dev); idpf_vport_irq_unmap_config(vport, dev->data->nb_rx_queues); - idpf_vc_dealloc_vectors(vport); + idpf_vc_vectors_dealloc(vport); vport->stopped = 1; diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c index 41e91b16b6..f41783daea 100644 --- a/drivers/net/idpf/idpf_rxtx.c +++ b/drivers/net/idpf/idpf_rxtx.c @@ -566,7 +566,7 @@ idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) dev->data->rx_queues[rx_queue_id]; int err = 0; - err = idpf_vc_config_rxq(vport, rxq); + err = idpf_vc_rxq_config(vport, rxq); if (err != 0) { PMD_DRV_LOG(ERR, "Fail to configure Rx queue %u", rx_queue_id); return err; @@ -580,7 +580,7 @@ idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) } /* Ready to switch the queue on */ - err = idpf_vc_switch_queue(vport, rx_queue_id, true, true); + err = idpf_vc_queue_switch(vport, rx_queue_id, true, true); if (err != 0) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", rx_queue_id); @@ -617,7 +617,7 @@ idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) dev->data->tx_queues[tx_queue_id]; int err = 0; - err = idpf_vc_config_txq(vport, txq); + err = idpf_vc_txq_config(vport, txq); if (err != 0) { PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id); return err; @@ -631,7 +631,7 @@ idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) } /* Ready to switch the queue on */ - err = idpf_vc_switch_queue(vport, tx_queue_id, false, true); + err = idpf_vc_queue_switch(vport, tx_queue_id, false, true); if (err != 0) { PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", tx_queue_id); @@ -654,7 +654,7 @@ idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (rx_queue_id >= dev->data->nb_rx_queues) return -EINVAL; - err = idpf_vc_switch_queue(vport, rx_queue_id, true, false); + err = idpf_vc_queue_switch(vport, rx_queue_id, true, false); if (err != 0) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", rx_queue_id); @@ -685,7 +685,7 @@ idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) if (tx_queue_id >= dev->data->nb_tx_queues) return -EINVAL; - err = idpf_vc_switch_queue(vport, tx_queue_id, false, false); + err = idpf_vc_queue_switch(vport, tx_queue_id, false, false); if (err != 0) { PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", tx_queue_id);