From patchwork Thu Mar 23 11:24:05 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Allain Legacy X-Patchwork-Id: 22167 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id E14D4CFA4; Thu, 23 Mar 2017 12:26:08 +0100 (CET) Received: from mail5.wrs.com (mail5.windriver.com [192.103.53.11]) by dpdk.org (Postfix) with ESMTP id 5C1C81396 for ; Thu, 23 Mar 2017 12:25:18 +0100 (CET) Received: from ALA-HCA.corp.ad.wrs.com (ala-hca.corp.ad.wrs.com [147.11.189.40]) by mail5.wrs.com (8.15.2/8.15.2) with ESMTPS id v2NBPEsN009684 (version=TLSv1 cipher=AES128-SHA bits=128 verify=OK); Thu, 23 Mar 2017 04:25:14 -0700 Received: from yow-cgts4-lx.wrs.com (128.224.145.137) by ALA-HCA.corp.ad.wrs.com (147.11.189.50) with Microsoft SMTP Server (TLS) id 14.3.294.0; Thu, 23 Mar 2017 04:25:13 -0700 From: Allain Legacy To: CC: , , , , , , , , , <3chas3@gmail.com> Date: Thu, 23 Mar 2017 07:24:05 -0400 Message-ID: <20170323112413.175202-7-allain.legacy@windriver.com> X-Mailer: git-send-email 2.12.1 In-Reply-To: <20170323112413.175202-1-allain.legacy@windriver.com> References: <1489432593-32390-1-git-send-email-allain.legacy@windriver.com> <20170323112413.175202-1-allain.legacy@windriver.com> MIME-Version: 1.0 X-Originating-IP: [128.224.145.137] Subject: [dpdk-dev] [PATCH v5 06/14] net/avp: device configuration X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Adds support for "dev_configure" operations to allow an application to configure the device. Signed-off-by: Allain Legacy Signed-off-by: Matt Peters --- doc/guides/nics/features/avp.ini | 4 + drivers/net/avp/avp_ethdev.c | 241 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 245 insertions(+) diff --git a/doc/guides/nics/features/avp.ini b/doc/guides/nics/features/avp.ini index 4353929..45a2185 100644 --- a/doc/guides/nics/features/avp.ini +++ b/doc/guides/nics/features/avp.ini @@ -4,3 +4,7 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +Link status = Y +VLAN offload = Y +Linux UIO = Y +x86-64 = Y diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c index e937fb52..d9ad3f1 100644 --- a/drivers/net/avp/avp_ethdev.c +++ b/drivers/net/avp/avp_ethdev.c @@ -61,6 +61,13 @@ +static int avp_dev_configure(struct rte_eth_dev *dev); +static void avp_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int avp_dev_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete); + #define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device) @@ -98,6 +105,15 @@ }, }; +/* + * dev_ops for avp, bare necessities for basic operation + */ +static const struct eth_dev_ops avp_eth_dev_ops = { + .dev_configure = avp_dev_configure, + .dev_infos_get = avp_dev_info_get, + .vlan_offload_set = avp_vlan_offload_set, + .link_update = avp_dev_link_update, +}; /**@{ AVP device flags */ #define AVP_F_PROMISC (1 << 1) @@ -183,6 +199,91 @@ struct avp_queue { uint64_t errors; }; +/* send a request and wait for a response + * + * @warning must be called while holding the avp->lock spinlock. + */ +static int +avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request) +{ + unsigned int retry = AVP_MAX_REQUEST_RETRY; + void *resp_addr = NULL; + unsigned int count; + int ret; + + PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id); + + request->result = -ENOTSUP; + + /* Discard any stale responses before starting a new request */ + while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1)) + PMD_DRV_LOG(DEBUG, "Discarding stale response\n"); + + rte_memcpy(avp->sync_addr, request, sizeof(*request)); + count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1); + if (count < 1) { + PMD_DRV_LOG(ERR, "Cannot send request %u to host\n", + request->req_id); + ret = -EBUSY; + goto done; + } + + while (retry--) { + /* wait for a response */ + usleep(AVP_REQUEST_DELAY_USECS); + + count = avp_fifo_count(avp->resp_q); + if (count >= 1) { + /* response received */ + break; + } + + if ((count < 1) && (retry == 0)) { + PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n", + request->req_id); + ret = -ETIME; + goto done; + } + } + + /* retrieve the response */ + count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1); + if ((count != 1) || (resp_addr != avp->host_sync_addr)) { + PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n", + count, resp_addr, avp->host_sync_addr); + ret = -ENODATA; + goto done; + } + + /* copy to user buffer */ + rte_memcpy(request, avp->sync_addr, sizeof(*request)); + ret = 0; + + PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n", + request->result, request->req_id); + +done: + return ret; +} + +static int +avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev, + struct rte_avp_device_config *config) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_avp_request request; + int ret; + + /* setup a configure request */ + memset(&request, 0, sizeof(request)); + request.req_id = RTE_AVP_REQ_CFG_DEVICE; + memcpy(&request.config, config, sizeof(request.config)); + + ret = avp_dev_process_request(avp, &request); + + return ret == 0 ? request.result : ret; +} + /* translate from host physical address to guest virtual address */ static void * avp_dev_translate_address(struct rte_eth_dev *eth_dev, @@ -298,6 +399,38 @@ struct avp_queue { return 0; } +static void +_avp_set_queue_counts(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev); + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_avp_device_info *host_info; + void *addr; + + addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr; + host_info = (struct rte_avp_device_info *)addr; + + /* + * the transmit direction is not negotiated beyond respecting the max + * number of queues because the host can handle arbitrary guest tx + * queues (host rx queues). + */ + avp->num_tx_queues = eth_dev->data->nb_tx_queues; + + /* + * the receive direction is more restrictive. The host requires a + * minimum number of guest rx queues (host tx queues) therefore + * negotiate a value that is at least as large as the host minimum + * requirement. If the host and guest values are not identical then a + * mapping will be established in the receive_queue_setup function. + */ + avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues, + eth_dev->data->nb_rx_queues); + + PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n", + avp->num_tx_queues, avp->num_rx_queues); +} + /* * create a AVP device using the supplied device info by first translating it * to guest address space(s). @@ -440,6 +573,7 @@ struct avp_queue { int ret; pci_dev = AVP_DEV_TO_PCI(eth_dev); + eth_dev->dev_ops = &avp_eth_dev_ops; if (rte_eal_process_type() != RTE_PROC_PRIMARY) { /* @@ -515,6 +649,113 @@ struct avp_queue { }; +static int +avp_dev_configure(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev); + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_avp_device_info *host_info; + struct rte_avp_device_config config; + int mask = 0; + void *addr; + int ret; + + addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr; + host_info = (struct rte_avp_device_info *)addr; + + /* Setup required number of queues */ + _avp_set_queue_counts(eth_dev); + + mask = (ETH_VLAN_STRIP_MASK | + ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK); + avp_vlan_offload_set(eth_dev, mask); + + /* update device config */ + memset(&config, 0, sizeof(config)); + config.device_id = host_info->device_id; + config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK; + config.driver_version = AVP_DPDK_DRIVER_VERSION; + config.features = avp->features; + config.num_tx_queues = avp->num_tx_queues; + config.num_rx_queues = avp->num_rx_queues; + + ret = avp_dev_ctrl_set_config(eth_dev, &config); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n", + ret); + goto unlock; + } + + avp->flags |= AVP_F_CONFIGURED; + ret = 0; + +unlock: + return ret; +} + + +static int +avp_dev_link_update(struct rte_eth_dev *eth_dev, + __rte_unused int wait_to_complete) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_eth_link *link = ð_dev->data->dev_link; + + link->link_speed = ETH_SPEED_NUM_10G; + link->link_duplex = ETH_LINK_FULL_DUPLEX; + link->link_status = !!(avp->flags & AVP_F_LINKUP); + + return -1; +} + + +static void +avp_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *dev_info) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + dev_info->driver_name = "rte_avp_pmd"; + dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + dev_info->max_rx_queues = avp->max_rx_queues; + dev_info->max_tx_queues = avp->max_tx_queues; + dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE; + dev_info->max_rx_pktlen = avp->max_rx_pkt_len; + dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS; + if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) { + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; + } +} + +static void +avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + if (mask & ETH_VLAN_STRIP_MASK) { + if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) { + if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) + avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD; + else + avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD; + } else { + PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n"); + } + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter) + PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n"); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend) + PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n"); + } +} + RTE_PMD_REGISTER_PCI(rte_avp, rte_avp_pmd.pci_drv); RTE_PMD_REGISTER_PCI_TABLE(rte_avp, pci_id_avp_map);