From patchwork Fri Mar 30 05:39:56 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yanglong Wu X-Patchwork-Id: 36764 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6BAF65F16; Fri, 30 Mar 2018 07:44:14 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by dpdk.org (Postfix) with ESMTP id 8920D4C9D for ; Fri, 30 Mar 2018 07:44:09 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 29 Mar 2018 22:44:08 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.48,380,1517904000"; d="scan'208";a="28193357" Received: from wuyanglong.sh.intel.com ([10.67.110.174]) by fmsmga007.fm.intel.com with ESMTP; 29 Mar 2018 22:44:06 -0700 From: Yanglong Wu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, wei.dai@intel.com, beilei.xing@intel.com, Yanglong Wu Date: Fri, 30 Mar 2018 13:39:56 +0800 Message-Id: <20180330053957.160367-2-yanglong.wu@intel.com> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20180330053957.160367-1-yanglong.wu@intel.com> References: <20180328020432.169643-1-yanglong.wu@intel.com> <20180330053957.160367-1-yanglong.wu@intel.com> Subject: [dpdk-dev] [PATCH v3 1/2] net/i40e: convert to new Rx offloads API X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Ethdev Rx offloads API has changed since: commit cba7f53b717d ("ethdev: introduce Rx queue offloads API") This commit support the new Rx offloads API. Signed-off-by: Yanglong Wu --- v2: Adding offload requests checking and reworking patch according to review comments --- v3: fix error --- --- drivers/net/i40e/i40e_ethdev.c | 27 +++++++++++++++++++-------- drivers/net/i40e/i40e_ethdev_vf.c | 21 +++++++++++++-------- drivers/net/i40e/i40e_flow.c | 3 ++- drivers/net/i40e/i40e_rxtx.c | 33 ++++++++++++++++++++++++++++++--- drivers/net/i40e/i40e_rxtx.h | 1 + 5 files changed, 65 insertions(+), 20 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index dc473d701..06f11dd23 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -3219,6 +3219,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; dev_info->max_mac_addrs = vsi->max_macaddrs; dev_info->max_vfs = pci_dev->max_vfs; + dev_info->rx_queue_offload_capa = 0; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_QINQ_STRIP | @@ -3226,7 +3227,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_VLAN_EXTEND | + DEV_RX_OFFLOAD_VLAN_FILTER; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | @@ -3253,6 +3257,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -3372,7 +3377,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend; + int qinq = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND; int ret = 0; if ((vlan_type != ETH_VLAN_TYPE_INNER && @@ -3420,9 +3426,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_vsi *vsi = pf->main_vsi; + struct rte_eth_rxmode *rxmode; + rxmode = &dev->data->dev_conf.rxmode; if (mask & ETH_VLAN_FILTER_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) i40e_vsi_config_vlan_filter(vsi, TRUE); else i40e_vsi_config_vlan_filter(vsi, FALSE); @@ -3430,14 +3438,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) i40e_vsi_config_vlan_stripping(vsi, TRUE); else i40e_vsi_config_vlan_stripping(vsi, FALSE); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) { i40e_vsi_config_double_vlan(vsi, TRUE); /* Set global registers with default ethertype. */ i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, @@ -3684,6 +3692,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev, struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_mac_filter_info mac_filter; struct i40e_vsi *vsi; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; int ret; /* If VMDQ not enabled or configured, return */ @@ -3702,7 +3711,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev, } rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; else mac_filter.filter_type = RTE_MAC_PERFECT_MATCH; @@ -11354,9 +11363,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } if (frame_size > ETHER_MAX_LEN) - dev_data->dev_conf.rxmode.jumbo_frame = 1; + dev_data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev_data->dev_conf.rxmode.jumbo_frame = 0; + dev_data->dev_conf.rxmode.jumbo_frame &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index c4dae4262..cb338def9 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1527,7 +1527,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev) /* For non-DPDK PF drivers, VF has no ability to disable HW * CRC strip, and is implicitly enabled by the PF. */ - if (!conf->rxmode.hw_strip_crc) { + if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) && (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) { @@ -1561,7 +1561,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask) /* Vlan stripping setting */ if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ - if (dev_conf->rxmode.hw_vlan_strip) + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) i40evf_enable_vlan_strip(dev); else i40evf_disable_vlan_strip(dev); @@ -1718,7 +1718,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq) /** * Check if the jumbo frame and maximum packet length are set correctly */ - if (dev_data->dev_conf.rxmode.jumbo_frame == 1) { + if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (rxq->max_pkt_len <= ETHER_MAX_LEN || rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must be " @@ -1738,7 +1738,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq) } } - if (dev_data->dev_conf.rxmode.enable_scatter || + if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) { dev_data->scattered_rx = 1; } @@ -2174,6 +2174,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->reta_size = ETH_RSS_RETA_SIZE_64; dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask; dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX; + dev_info->rx_queue_offload_capa = 0; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_QINQ_STRIP | @@ -2181,7 +2182,9 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_SCATTER; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | @@ -2204,6 +2207,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -2634,10 +2638,11 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } if (frame_size > ETHER_MAX_LEN) - dev_data->dev_conf.rxmode.jumbo_frame = 1; + dev_data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev_data->dev_conf.rxmode.jumbo_frame = 0; - + dev_data->dev_conf.rxmode.jumbo_frame &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; return ret; diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index b88baa9cd..aa658eb14 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -1939,7 +1939,8 @@ static uint16_t i40e_get_outer_vlan(struct rte_eth_dev *dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend; + int qinq = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND; uint64_t reg_r = 0; uint16_t reg_id; uint16_t tpid; diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 1217e5a61..4fdef69c1 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -1692,6 +1692,20 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } +static int +i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested) +{ + struct rte_eth_dev_info dev_info; + uint64_t mandatory = dev->data->dev_conf.rxmode.offloads; + uint64_t supported; /* All per port offloads */ + + dev->dev_ops->dev_infos_get(dev, &dev_info); + supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa; + if ((requested & dev_info.rx_offload_capa) != requested) + return 0; /* requested range check */ + return !((mandatory ^ requested) & supported); +} + int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1712,6 +1726,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t len, i; uint16_t reg_idx, base, bsf, tc_mapping; int q_offset, use_def_burst_func = 1; + struct rte_eth_dev_info dev_info; + + if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) { + dev->dev_ops->dev_infos_get(dev, &dev_info); + PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64 + " don't match port offloads 0x%" PRIx64 + " or supported offloads 0x%" PRIx64, + (void *)dev, rx_conf->offloads, + dev->data->dev_conf.rxmode.offloads, + dev_info.rx_offload_capa); + return -ENOTSUP; + } if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); @@ -1760,8 +1786,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->queue_id = queue_idx; rxq->reg_idx = reg_idx; rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN); rxq->drop_en = rx_conf->rx_drop_en; rxq->vsi = vsi; rxq->rx_deferred_start = rx_conf->rx_deferred_start; @@ -2469,7 +2495,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len; rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len); - if (data->dev_conf.rxmode.jumbo_frame == 1) { + if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (rxq->max_pkt_len <= ETHER_MAX_LEN || rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must " @@ -2747,6 +2773,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; + qinfo->conf.offloads = rxq->offloads; } void diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 34cd79233..cb5f8c714 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -107,6 +107,7 @@ struct i40e_rx_queue { bool rx_deferred_start; /**< don't start this queue in dev start */ uint16_t rx_using_sse; /**