From patchwork Sun Sep 27 12:42:28 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Ting" X-Patchwork-Id: 78917 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 40517A04BC; Sun, 27 Sep 2020 14:39:14 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 70AA71D64A; Sun, 27 Sep 2020 14:38:55 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 0811B1D642 for ; Sun, 27 Sep 2020 14:38:51 +0200 (CEST) IronPort-SDR: JaZLjbm6+pHRecB6W0xu188UOmLm938IY/QR0JK29Fptppd8kgskzC3N9n21gj32Ji4lzDHwtO 83gQDJmdfE4g== X-IronPort-AV: E=McAfee;i="6000,8403,9756"; a="159255504" X-IronPort-AV: E=Sophos;i="5.77,310,1596524400"; d="scan'208";a="159255504" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Sep 2020 05:38:50 -0700 IronPort-SDR: d1F+FRH11J3ojSWIeoM1bBMOQSfqnTtyAsUDsmkVzuB8O4u4WR7CyEQF21uFdD6o5iHQ+pEjL9 9jdPj6yTCAIA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,310,1596524400"; d="scan'208";a="311459475" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.154]) by orsmga006.jf.intel.com with ESMTP; 27 Sep 2020 05:38:48 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, Ting Xu Date: Sun, 27 Sep 2020 20:42:28 +0800 Message-Id: <20200927124229.595-2-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200927124229.595-1-ting.xu@intel.com> References: <20200909072028.16726-1-ting.xu@intel.com> <20200927124229.595-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v3 1/2] net/iavf: add IAVF request queues function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add new virtchnl function to request additional queues from PF. Current default queue pairs number when creating a VF is 16. In order to support up to 256 queue pairs, enable this request queues function. Since request queues command may return event message, modify function iavf_read_msg_from_pf to identify event opcode and mark VF reset status. Signed-off-by: Ting Xu --- drivers/net/iavf/iavf.h | 17 ++++ drivers/net/iavf/iavf_ethdev.c | 12 ++- drivers/net/iavf/iavf_vchnl.c | 145 ++++++++++++++++++++++++++++++--- 3 files changed, 158 insertions(+), 16 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index d56611608..1c40f9bdf 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -107,6 +107,21 @@ struct iavf_fdir_info { /* TODO: is that correct to assume the max number to be 16 ?*/ #define IAVF_MAX_MSIX_VECTORS 16 +/* Event status from PF */ +enum pending_msg { + PFMSG_LINK_CHANGE = 0x1, + PFMSG_RESET_IMPENDING = 0x2, + PFMSG_DRIVER_CLOSE = 0x4, +}; + +/* Message type read in admin queue from PF */ +enum iavf_aq_result { + IAVF_MSG_ERR = -1, /* Meet error when accessing admin queue */ + IAVF_MSG_NON, /* Read nothing from admin queue */ + IAVF_MSG_SYS, /* Read system msg from admin queue */ + IAVF_MSG_CMD, /* Read async command result */ +}; + /* Structure to store private data specific for VF instance. */ struct iavf_info { uint16_t num_queue_pairs; @@ -123,6 +138,7 @@ struct iavf_info { volatile enum virtchnl_ops pend_cmd; /* pending command not finished */ uint32_t cmd_retval; /* return value of the cmd response from PF */ uint8_t *aq_resp; /* buffer to store the adminq response from PF */ + uint16_t pend_msg; /* flags indicates events from pf not handled yet */ /* Event from pf */ bool dev_closed; @@ -301,4 +317,5 @@ int iavf_add_del_rss_cfg(struct iavf_adapter *adapter, int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, struct rte_ether_addr *mc_addrs, uint32_t mc_addrs_num, bool add); +int iavf_request_queues(struct rte_eth_dev *dev, uint16_t num); #endif /* _IAVF_ETHDEV_H_ */ diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index a88d53ab0..33745a7b2 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -1236,7 +1236,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) } static int -iavf_check_vf_reset_done(struct iavf_hw *hw) +iavf_check_vf_reset_done(struct iavf_hw *hw, struct iavf_info *vf) { int i, reset; @@ -1253,6 +1253,10 @@ iavf_check_vf_reset_done(struct iavf_hw *hw) if (i >= IAVF_RESET_WAIT_CNT) return -1; + /* VF is not in reset or reset is completed */ + vf->vf_reset = false; + vf->pend_msg &= ~PFMSG_RESET_IMPENDING; + return 0; } @@ -1620,7 +1624,7 @@ iavf_init_vf(struct rte_eth_dev *dev) goto err; } - err = iavf_check_vf_reset_done(hw); + err = iavf_check_vf_reset_done(hw, vf); if (err) { PMD_INIT_LOG(ERR, "VF is still resetting"); goto err; @@ -1869,7 +1873,9 @@ iavf_dev_close(struct rte_eth_dev *dev) iavf_dev_stop(dev); iavf_flow_flush(dev, NULL); - iavf_flow_uninit(adapter); + /* if VF is in reset, adminq is disabled, skip the process via adminq */ + if (!vf->vf_reset) + iavf_flow_uninit(adapter); iavf_shutdown_adminq(hw); /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 7981dfa30..b1149ef4d 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "iavf.h" @@ -26,13 +27,14 @@ #define ASQ_DELAY_MS 10 /* Read data in admin queue to get msg from pf driver */ -static enum iavf_status +static enum iavf_aq_result iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, uint8_t *buf) { struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); struct iavf_arq_event_info event; + enum iavf_aq_result result = IAVF_MSG_NON; enum virtchnl_ops opcode; int ret; @@ -42,7 +44,9 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, /* Can't read any msg from adminQ */ if (ret) { PMD_DRV_LOG(DEBUG, "Can't read msg from AQ"); - return ret; + if (ret != IAVF_ERR_ADMIN_QUEUE_NO_WORK) + result = IAVF_MSG_ERR; + return result; } opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high); @@ -52,16 +56,46 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, PMD_DRV_LOG(DEBUG, "AQ from pf carries opcode %u, retval %d", opcode, vf->cmd_retval); - if (opcode != vf->pend_cmd) { - if (opcode != VIRTCHNL_OP_EVENT) { - PMD_DRV_LOG(WARNING, - "command mismatch, expect %u, get %u", - vf->pend_cmd, opcode); + if (opcode == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = + (struct virtchnl_pf_event *)event.msg_buf; + + result = IAVF_MSG_SYS; + switch (vpe->event) { + case VIRTCHNL_EVENT_LINK_CHANGE: + vf->link_up = + vpe->event_data.link_event.link_status; + vf->link_speed = + vpe->event_data.link_event.link_speed; + vf->pend_msg |= PFMSG_LINK_CHANGE; + PMD_DRV_LOG(INFO, "Link status update:%s", + vf->link_up ? "up" : "down"); + break; + case VIRTCHNL_EVENT_RESET_IMPENDING: + vf->vf_reset = true; + vf->pend_msg |= PFMSG_RESET_IMPENDING; + PMD_DRV_LOG(INFO, "vf is resetting"); + break; + case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: + vf->dev_closed = true; + vf->pend_msg |= PFMSG_DRIVER_CLOSE; + PMD_DRV_LOG(INFO, "PF driver closed"); + break; + default: + PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf", + __func__, vpe->event); + } + } else { + /* async reply msg on command issued by vf previously */ + result = IAVF_MSG_CMD; + if (opcode != vf->pend_cmd) { + PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u", + vf->pend_cmd, opcode); + result = IAVF_MSG_ERR; } - return IAVF_ERR_OPCODE_MISMATCH; } - return IAVF_SUCCESS; + return result; } static int @@ -69,6 +103,7 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) { struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + enum iavf_aq_result result; enum iavf_status ret; int err = 0; int i = 0; @@ -97,9 +132,9 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: /* for init virtchnl ops, need to poll the response */ do { - ret = iavf_read_msg_from_pf(adapter, args->out_size, + result = iavf_read_msg_from_pf(adapter, args->out_size, args->out_buffer); - if (ret == IAVF_SUCCESS) + if (result == IAVF_MSG_CMD) break; rte_delay_ms(ASQ_DELAY_MS); } while (i++ < MAX_TRY_TIMES); @@ -111,7 +146,33 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) } _clear_cmd(vf); break; - + case VIRTCHNL_OP_REQUEST_QUEUES: + /* + * ignore async reply, only wait for system message, + * vf_reset = true if get VIRTCHNL_EVENT_RESET_IMPENDING, + * if not, means request queues failed. + */ + do { + result = iavf_read_msg_from_pf(adapter, args->out_size, + args->out_buffer); + if (result == IAVF_MSG_SYS && vf->vf_reset) { + break; + } else if (result == IAVF_MSG_CMD || + result == IAVF_MSG_ERR) { + err = -1; + break; + } + rte_delay_ms(ASQ_DELAY_MS); + /* If don't read msg or read sys event, continue */ + } while (i++ < MAX_TRY_TIMES); + if (i >= MAX_TRY_TIMES || + vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { + err = -1; + PMD_DRV_LOG(ERR, "No response or return failure (%d)" + " for cmd %d", vf->cmd_retval, args->ops); + } + _clear_cmd(vf); + break; default: /* For other virtchnl ops in running time, * wait for the cmd done flag. @@ -388,7 +449,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC | VIRTCHNL_VF_OFFLOAD_FDIR_PF | - VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF; + VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; args.in_args = (uint8_t *)∩︀ args.in_args_size = sizeof(caps); @@ -1138,3 +1200,60 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, return 0; } + +int +iavf_request_queues(struct rte_eth_dev *dev, uint16_t num) +{ + struct iavf_adapter *ad = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct virtchnl_vf_res_request vfres; + struct iavf_cmd_info args; + uint16_t num_queue_pairs; + int err; + + if (vf->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) { + PMD_DRV_LOG(ERR, "request queues not supported"); + return -1; + } + + if (num == 0) { + PMD_DRV_LOG(ERR, "queue number cannot be zero"); + return -1; + } + vfres.num_queue_pairs = num; + + args.ops = VIRTCHNL_OP_REQUEST_QUEUES; + args.in_args = (u8 *)&vfres; + args.in_args_size = sizeof(vfres); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + /* + * disable interrupt to avoid the admin queue message to be read + * before iavf_read_msg_from_pf. + */ + rte_intr_disable(&pci_dev->intr_handle); + err = iavf_execute_vf_cmd(ad, &args); + rte_intr_enable(&pci_dev->intr_handle); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES"); + return err; + } + + /* request queues succeeded, vf is resetting */ + if (vf->vf_reset) { + PMD_DRV_LOG(INFO, "vf is resetting"); + return 0; + } + + /* request additional queues failed, return available number */ + num_queue_pairs = + ((struct virtchnl_vf_res_request *)args.out_buffer)->num_queue_pairs; + PMD_DRV_LOG(ERR, "request queues failed, only %u queues " + "available", num_queue_pairs); + + return -1; +} From patchwork Sun Sep 27 12:42:29 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Ting" X-Patchwork-Id: 78918 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id A914AA04BC; Sun, 27 Sep 2020 14:39:34 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D80141D66A; Sun, 27 Sep 2020 14:39:04 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id CA5A81D642 for ; Sun, 27 Sep 2020 14:38:52 +0200 (CEST) IronPort-SDR: v0D1Dr+K1oZmsqfeftN7AnDV5J4S4kxRPXRUwsy+kySnzHz1kKcpPNeXkP9FUZVcr1WqssPebd Ymi139ZACPeQ== X-IronPort-AV: E=McAfee;i="6000,8403,9756"; a="159255506" X-IronPort-AV: E=Sophos;i="5.77,310,1596524400"; d="scan'208";a="159255506" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Sep 2020 05:38:52 -0700 IronPort-SDR: euU8AA+sd2zmt6azgMo4mBZhNt1v6XGh3LTHI3KA+nYxVsJuH5c2F2zWY8NVAWePd4paoVRl2w v0PcosBJyBNA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,310,1596524400"; d="scan'208";a="311459490" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.154]) by orsmga006.jf.intel.com with ESMTP; 27 Sep 2020 05:38:50 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, Ting Xu Date: Sun, 27 Sep 2020 20:42:29 +0800 Message-Id: <20200927124229.595-3-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200927124229.595-1-ting.xu@intel.com> References: <20200909072028.16726-1-ting.xu@intel.com> <20200927124229.595-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v3 2/2] net/iavf: enable large VF configuration X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to negotiate large VF capability, configure VSI queues, enable/disable queues and IRQ mapping for large VF. Use new virtchnl opcodes and structures to support max 256 queue pairs. Request additional queues from PF first if current allocated queues are not enough. Query max RSS queue region for future RSS configuration. Signed-off-by: Ting Xu --- drivers/net/iavf/iavf.h | 23 ++- drivers/net/iavf/iavf_ethdev.c | 90 ++++++++-- drivers/net/iavf/iavf_rxtx.c | 27 ++- drivers/net/iavf/iavf_vchnl.c | 301 ++++++++++++++++++++++++++++++--- 4 files changed, 401 insertions(+), 40 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 1c40f9bdf..b2e896598 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -19,7 +19,10 @@ #define IAVF_FRAME_SIZE_MAX 9728 #define IAVF_QUEUE_BASE_ADDR_UNIT 128 -#define IAVF_MAX_NUM_QUEUES 16 +#define IAVF_MAX_NUM_QUEUES_DFLT 16 +#define IAVF_MAX_NUM_QUEUES_LV 256 +#define IAVF_RXTX_QUEUE_CHUNKS_NUM 2 +#define IAVF_CFG_Q_NUM_PER_BUF 32 #define IAVF_NUM_MACADDR_MAX 64 @@ -104,8 +107,10 @@ struct iavf_fdir_info { struct iavf_fdir_conf conf; }; -/* TODO: is that correct to assume the max number to be 16 ?*/ -#define IAVF_MAX_MSIX_VECTORS 16 +struct iavf_qv_map { + uint16_t queue_id; + uint16_t vector_id; +}; /* Event status from PF */ enum pending_msg { @@ -157,14 +162,16 @@ struct iavf_info { uint8_t *rss_key; uint16_t nb_msix; /* number of MSI-X interrupts on Rx */ uint16_t msix_base; /* msix vector base from */ - /* queue bitmask for each vector */ - uint16_t rxq_map[IAVF_MAX_MSIX_VECTORS]; + uint16_t max_rss_qregion; /* max RSS queue region supported by PF */ + struct iavf_qv_map *qv_map; /* queue vector mapping */ struct iavf_flow_list flow_list; rte_spinlock_t flow_ops_lock; struct iavf_parser_list rss_parser_list; struct iavf_parser_list dist_parser_list; struct iavf_fdir_info fdir; /* flow director info */ + /* indicate large VF support enabled or not */ + bool lv_enabled; }; #define IAVF_MAX_PKT_TYPE 1024 @@ -291,13 +298,18 @@ int iavf_enable_vlan_strip(struct iavf_adapter *adapter); int iavf_disable_vlan_strip(struct iavf_adapter *adapter); int iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid, bool rx, bool on); +int iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid, + bool rx, bool on); int iavf_enable_queues(struct iavf_adapter *adapter); +int iavf_enable_queues_lv(struct iavf_adapter *adapter); int iavf_disable_queues(struct iavf_adapter *adapter); +int iavf_disable_queues_lv(struct iavf_adapter *adapter); int iavf_configure_rss_lut(struct iavf_adapter *adapter); int iavf_configure_rss_key(struct iavf_adapter *adapter); int iavf_configure_queues(struct iavf_adapter *adapter); int iavf_get_supported_rxdid(struct iavf_adapter *adapter); int iavf_config_irq_map(struct iavf_adapter *adapter); +int iavf_config_irq_map_lv(struct iavf_adapter *adapter); void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add); int iavf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete); @@ -318,4 +330,5 @@ int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, struct rte_ether_addr *mc_addrs, uint32_t mc_addrs_num, bool add); int iavf_request_queues(struct rte_eth_dev *dev, uint16_t num); +int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter); #endif /* _IAVF_ETHDEV_H_ */ diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 33745a7b2..fdb2294a8 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -205,7 +205,7 @@ iavf_init_rss(struct iavf_adapter *adapter) rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues, - IAVF_MAX_NUM_QUEUES); + vf->max_rss_qregion); if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) { PMD_DRV_LOG(DEBUG, "RSS is not supported"); @@ -258,6 +258,9 @@ iavf_dev_configure(struct rte_eth_dev *dev) IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, + dev->data->nb_tx_queues); + int ret = 0; ad->rx_bulk_alloc_allowed = true; /* Initialize to TRUE. If any of Rx queues doesn't meet the @@ -269,6 +272,45 @@ iavf_dev_configure(struct rte_eth_dev *dev) if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + /* Large VF setting */ + if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) { + if (!(vf->vf_res->vf_cap_flags & + VIRTCHNL_VF_LARGE_NUM_QPAIRS)) { + PMD_DRV_LOG(ERR, "large VF is not supported"); + return -1; + } + + if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_LV) { + PMD_DRV_LOG(ERR, "queue pairs number cannot be larger " + "than %u", IAVF_MAX_NUM_QUEUES_LV); + return -1; + } + + ret = iavf_request_queues(dev, num_queue_pairs); + if (ret != 0) { + PMD_DRV_LOG(ERR, "request queues from PF failed"); + return ret; + } + PMD_DRV_LOG(INFO, "change queue pairs from %u to %u", + vf->vsi_res->num_queue_pairs, num_queue_pairs); + + ret = iavf_dev_reset(dev); + if (ret != 0) + return ret; + + vf->lv_enabled = true; + } + + /* Set max RSS queue region */ + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_LARGE_NUM_QPAIRS) { + if (iavf_get_max_rss_queue_region(ad) != 0) { + PMD_INIT_LOG(ERR, "get max rss queue region failed"); + return -1; + } + } else { + vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT; + } + /* Vlan stripping setting */ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) { if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) @@ -283,6 +325,7 @@ iavf_dev_configure(struct rte_eth_dev *dev) return -1; } } + return 0; } @@ -365,6 +408,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + struct iavf_qv_map *qv_map; uint16_t interval, i; int vec; @@ -385,6 +429,14 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, } } + qv_map = rte_zmalloc("qv_map", + dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0); + if (!qv_map) { + PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map", + dev->data->nb_rx_queues); + return -1; + } + if (!dev->data->dev_conf.intr_conf.rxq || !rte_intr_dp_is_en(intr_handle)) { /* Rx interrupt disabled, Map interrupt only for writeback */ @@ -415,16 +467,21 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, } IAVF_WRITE_FLUSH(hw); /* map all queues to the same interrupt */ - for (i = 0; i < dev->data->nb_rx_queues; i++) - vf->rxq_map[vf->msix_base] |= 1 << i; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + qv_map[i].queue_id = i; + qv_map[i].vector_id = vf->msix_base; + } + vf->qv_map = qv_map; } else { if (!rte_intr_allow_others(intr_handle)) { vf->nb_msix = 1; vf->msix_base = IAVF_MISC_VEC_ID; for (i = 0; i < dev->data->nb_rx_queues; i++) { - vf->rxq_map[vf->msix_base] |= 1 << i; + qv_map[i].queue_id = i; + qv_map[i].vector_id = vf->msix_base; intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID; } + vf->qv_map = qv_map; PMD_DRV_LOG(DEBUG, "vector %u are mapping to all Rx queues", vf->msix_base); @@ -437,21 +494,32 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, vf->msix_base = IAVF_RX_VEC_START; vec = IAVF_RX_VEC_START; for (i = 0; i < dev->data->nb_rx_queues; i++) { - vf->rxq_map[vec] |= 1 << i; + qv_map[i].queue_id = i; + qv_map[i].vector_id = vec; intr_handle->intr_vec[i] = vec++; if (vec >= vf->nb_msix) vec = IAVF_RX_VEC_START; } + vf->qv_map = qv_map; PMD_DRV_LOG(DEBUG, "%u vectors are mapping to %u Rx queues", vf->nb_msix, dev->data->nb_rx_queues); } } - if (iavf_config_irq_map(adapter)) { - PMD_DRV_LOG(ERR, "config interrupt mapping failed"); - return -1; + if (!vf->lv_enabled) { + if (iavf_config_irq_map(adapter)) { + PMD_DRV_LOG(ERR, "config interrupt mapping failed"); + return -1; + } + } else { + if (iavf_config_irq_map_lv(adapter)) { + PMD_DRV_LOG(ERR, "config interrupt mapping " + "for large VF failed"); + return -1; + } } + return 0; } @@ -515,6 +583,7 @@ iavf_dev_start(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "configure irq failed"); goto err_queue; } + /* re-enable intr again, because efd assign may change */ if (dev->data->dev_conf.intr_conf.rxq != 0) { rte_intr_disable(intr_handle); @@ -579,8 +648,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs; - dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs; + dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV; + dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV; dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN; dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX; dev_info->hash_key_size = vf->vf_res->rss_key_size; @@ -1658,6 +1727,7 @@ iavf_init_vf(struct rte_eth_dev *dev) PMD_INIT_LOG(ERR, "iavf_get_vf_config failed"); goto err_alloc; } + /* Allocate memort for RSS info */ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { vf->rss_key = rte_zmalloc("rss_key", diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index 7b81bf8ad..67300db5b 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -720,6 +720,7 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct iavf_rx_queue *rxq; int err = 0; @@ -743,7 +744,11 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) IAVF_WRITE_FLUSH(hw); /* Ready to switch the queue on */ - err = iavf_switch_queue(adapter, rx_queue_id, true, true); + if (!vf->lv_enabled) + err = iavf_switch_queue(adapter, rx_queue_id, true, true); + else + err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true); + if (err) PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", rx_queue_id); @@ -760,6 +765,7 @@ iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct iavf_tx_queue *txq; int err = 0; @@ -775,7 +781,10 @@ iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) IAVF_WRITE_FLUSH(hw); /* Ready to switch the queue on */ - err = iavf_switch_queue(adapter, tx_queue_id, false, true); + if (!vf->lv_enabled) + err = iavf_switch_queue(adapter, tx_queue_id, false, true); + else + err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true); if (err) PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", @@ -876,14 +885,22 @@ iavf_stop_queues(struct rte_eth_dev *dev) { struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct iavf_rx_queue *rxq; struct iavf_tx_queue *txq; int ret, i; /* Stop All queues */ - ret = iavf_disable_queues(adapter); - if (ret) - PMD_DRV_LOG(WARNING, "Fail to stop queues"); + if (!vf->lv_enabled) { + ret = iavf_disable_queues(adapter); + if (ret) + PMD_DRV_LOG(WARNING, "Fail to stop queues"); + } else { + ret = iavf_disable_queues_lv(adapter); + if (ret) + PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF"); + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index b1149ef4d..2e7d6e28d 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -450,7 +450,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC | VIRTCHNL_VF_OFFLOAD_FDIR_PF | VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | - VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | + VIRTCHNL_VF_LARGE_NUM_QPAIRS; args.in_args = (uint8_t *)∩︀ args.in_args_size = sizeof(caps); @@ -600,6 +601,138 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid, return err; } +int +iavf_enable_queues_lv(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_del_ena_dis_queues *queue_select; + struct virtchnl_queue_chunk *queue_chunk; + struct iavf_cmd_info args; + int err, len; + + len = sizeof(struct virtchnl_del_ena_dis_queues) + + sizeof(struct virtchnl_queue_chunk) * + (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1); + queue_select = rte_zmalloc("queue_select", len, 0); + if (!queue_select) + return -ENOMEM; + + queue_chunk = queue_select->chunks.chunks; + queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM; + queue_select->vport_id = vf->vsi_res->vsi_id; + + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX; + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0; + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues = + adapter->eth_dev->data->nb_tx_queues; + + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX; + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0; + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues = + adapter->eth_dev->data->nb_rx_queues; + + args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2; + args.in_args = (u8 *)queue_select; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_ENABLE_QUEUES_V2"); + return err; + } + return 0; +} + +int +iavf_disable_queues_lv(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_del_ena_dis_queues *queue_select; + struct virtchnl_queue_chunk *queue_chunk; + struct iavf_cmd_info args; + int err, len; + + len = sizeof(struct virtchnl_del_ena_dis_queues) + + sizeof(struct virtchnl_queue_chunk) * + (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1); + queue_select = rte_zmalloc("queue_select", len, 0); + if (!queue_select) + return -ENOMEM; + + queue_chunk = queue_select->chunks.chunks; + queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM; + queue_select->vport_id = vf->vsi_res->vsi_id; + + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX; + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0; + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues = + adapter->eth_dev->data->nb_tx_queues; + + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX; + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0; + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues = + adapter->eth_dev->data->nb_rx_queues; + + args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2; + args.in_args = (u8 *)queue_select; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_DISABLE_QUEUES_V2"); + return err; + } + return 0; +} + +int +iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid, + bool rx, bool on) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_del_ena_dis_queues *queue_select; + struct virtchnl_queue_chunk *queue_chunk; + struct iavf_cmd_info args; + int err, len; + + len = sizeof(struct virtchnl_del_ena_dis_queues); + queue_select = rte_zmalloc("queue_select", len, 0); + if (!queue_select) + return -ENOMEM; + + queue_chunk = queue_select->chunks.chunks; + queue_select->chunks.num_chunks = 1; + queue_select->vport_id = vf->vsi_res->vsi_id; + + if (rx) { + queue_chunk->type = VIRTCHNL_QUEUE_TYPE_RX; + queue_chunk->start_queue_id = qid; + queue_chunk->num_queues = 1; + } else { + queue_chunk->type = VIRTCHNL_QUEUE_TYPE_TX; + queue_chunk->start_queue_id = qid; + queue_chunk->num_queues = 1; + } + + if (on) + args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2; + else + args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2; + args.in_args = (u8 *)queue_select; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of %s", + on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2"); + return err; +} + int iavf_configure_rss_lut(struct iavf_adapter *adapter) { @@ -664,32 +797,26 @@ iavf_configure_rss_key(struct iavf_adapter *adapter) return err; } -int -iavf_configure_queues(struct iavf_adapter *adapter) +static int +iavf_exec_queue_cfg(struct iavf_adapter *adapter, + struct virtchnl_vsi_queue_config_info *vc_config, uint16_t count) { struct iavf_rx_queue **rxq = (struct iavf_rx_queue **)adapter->eth_dev->data->rx_queues; struct iavf_tx_queue **txq = (struct iavf_tx_queue **)adapter->eth_dev->data->tx_queues; struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); - struct virtchnl_vsi_queue_config_info *vc_config; struct virtchnl_queue_pair_info *vc_qp; struct iavf_cmd_info args; uint16_t i, size; - int err; + int err = 0; size = sizeof(*vc_config) + - sizeof(vc_config->qpair[0]) * vf->num_queue_pairs; - vc_config = rte_zmalloc("cfg_queue", size, 0); - if (!vc_config) - return -ENOMEM; + sizeof(vc_config->qpair[0]) * vc_config->num_queue_pairs; - vc_config->vsi_id = vf->vsi_res->vsi_id; - vc_config->num_queue_pairs = vf->num_queue_pairs; - - for (i = 0, vc_qp = vc_config->qpair; - i < vf->num_queue_pairs; - i++, vc_qp++) { + for (i = count * IAVF_CFG_Q_NUM_PER_BUF, vc_qp = vc_config->qpair; + i < count * IAVF_CFG_Q_NUM_PER_BUF + vc_config->num_queue_pairs; + i++, vc_qp++) { vc_qp->txq.vsi_id = vf->vsi_res->vsi_id; vc_qp->txq.queue_id = i; /* Virtchnnl configure queues by pairs */ @@ -745,8 +872,71 @@ iavf_configure_queues(struct iavf_adapter *adapter) err = iavf_execute_vf_cmd(adapter, &args); if (err) PMD_DRV_LOG(ERR, "Failed to execute command of" - " VIRTCHNL_OP_CONFIG_VSI_QUEUES"); + " VIRTCHNL_OP_CONFIG_VSI_QUEUES"); + + return err; +} + +/* Configure VSI queues. Max VF queue pairs number is 256, may + * send this command multiple times to configure all queues. + */ +int +iavf_configure_queues(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_vsi_queue_config_info *vc_config = NULL; + uint16_t i, size_full, size_inc; + uint16_t nb_cmd_full, nbq_inc; + int err = 0; + + /* Compute how many times should the command to be sent, + * including the commands with full buffer and incomplete + * buffer. + */ + nbq_inc = vf->num_queue_pairs % IAVF_CFG_Q_NUM_PER_BUF; + nb_cmd_full = vf->num_queue_pairs / IAVF_CFG_Q_NUM_PER_BUF; + + size_full = sizeof(*vc_config) + + sizeof(vc_config->qpair[0]) * IAVF_CFG_Q_NUM_PER_BUF; + size_inc = sizeof(*vc_config) + + sizeof(vc_config->qpair[0]) * nbq_inc; + + if (!nb_cmd_full) { + vc_config = rte_zmalloc("cfg_queue", size_inc, 0); + if (!vc_config) + return -ENOMEM; + + vc_config->vsi_id = vf->vsi_res->vsi_id; + vc_config->num_queue_pairs = nbq_inc; + err = iavf_exec_queue_cfg(adapter, vc_config, 0); + goto free; + } + + vc_config = rte_zmalloc("cfg_queue", size_full, 0); + if (!vc_config) + return -ENOMEM; + vc_config->vsi_id = vf->vsi_res->vsi_id; + vc_config->num_queue_pairs = IAVF_CFG_Q_NUM_PER_BUF; + + for (i = 0; i < nb_cmd_full + (nbq_inc ? 1 : 0); i++) { + if (i >= nb_cmd_full) { + /* re-allocate virtchnl msg for less queues */ + rte_free(vc_config); + vc_config = rte_zmalloc("cfg_queue", size_inc, 0); + if (!vc_config) + return -ENOMEM; + + vc_config->vsi_id = vf->vsi_res->vsi_id; + vc_config->num_queue_pairs = nbq_inc; + } + + err = iavf_exec_queue_cfg(adapter, vc_config, i); + if (err) + break; + } + +free: rte_free(vc_config); return err; } @@ -768,13 +958,14 @@ iavf_config_irq_map(struct iavf_adapter *adapter) return -ENOMEM; map_info->num_vectors = vf->nb_msix; - for (i = 0; i < vf->nb_msix; i++) { - vecmap = &map_info->vecmap[i]; + for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) { + vecmap = + &map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base]; vecmap->vsi_id = vf->vsi_res->vsi_id; vecmap->rxitr_idx = IAVF_ITR_INDEX_DEFAULT; - vecmap->vector_id = vf->msix_base + i; + vecmap->vector_id = vf->qv_map[i].vector_id; vecmap->txq_map = 0; - vecmap->rxq_map = vf->rxq_map[vf->msix_base + i]; + vecmap->rxq_map |= 1 << vf->qv_map[i].queue_id; } args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP; @@ -790,6 +981,46 @@ iavf_config_irq_map(struct iavf_adapter *adapter) return err; } +int +iavf_config_irq_map_lv(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_queue_vector_maps *map_info; + struct virtchnl_queue_vector *qv_maps; + struct iavf_cmd_info args; + int len, i, err; + + len = sizeof(struct virtchnl_queue_vector_maps) + + sizeof(struct virtchnl_queue_vector) * + (adapter->eth_dev->data->nb_rx_queues - 1); + + map_info = rte_zmalloc("map_info", len, 0); + if (!map_info) + return -ENOMEM; + + map_info->vport_id = vf->vsi_res->vsi_id; + map_info->num_qv_maps = adapter->eth_dev->data->nb_rx_queues; + for (i = 0; i < map_info->num_qv_maps; i++) { + qv_maps = &map_info->qv_maps[i]; + qv_maps->itr_idx = VIRTCHNL_ITR_IDX_0; + qv_maps->queue_type = VIRTCHNL_QUEUE_TYPE_RX; + qv_maps->queue_id = vf->qv_map[i].queue_id; + qv_maps->vector_id = vf->qv_map[i].vector_id; + } + + args.ops = VIRTCHNL_OP_MAP_QUEUE_VECTOR; + args.in_args = (u8 *)map_info; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR"); + + rte_free(map_info); + return err; +} + void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add) { @@ -1257,3 +1488,33 @@ iavf_request_queues(struct rte_eth_dev *dev, uint16_t num) return -1; } + +int +iavf_get_max_rss_queue_region(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct iavf_cmd_info args; + uint16_t qregion_width; + int err; + + args.ops = VIRTCHNL_OP_GET_MAX_RSS_QREGION; + args.in_args = NULL; + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of " + "VIRTCHNL_OP_GET_MAX_RSS_QREGION"); + return err; + } + + qregion_width = + ((struct virtchnl_max_rss_qregion *)args.out_buffer)->qregion_width; + + vf->max_rss_qregion = (uint16_t)(1 << qregion_width); + + return 0; +}