[dpdk-dev,1/2] net/dpaa: Changes to support ethdev offload APIs

Message ID 20180409131952.20948-1-sunil.kori@nxp.com (mailing list archive)
State Superseded, archived
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail Compilation issues

Commit Message

Sunil Kumar Kori April 9, 2018, 1:19 p.m. UTC
  Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
---
 drivers/net/dpaa/dpaa_ethdev.c | 46 ++++++++++++++++++++++++++++++++++++++----
 1 file changed, 42 insertions(+), 4 deletions(-)
  

Comments

Ferruh Yigit April 10, 2018, 4:40 p.m. UTC | #1
On 4/9/2018 2:19 PM, Sunil Kumar Kori wrote:
> Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
> ---
>  drivers/net/dpaa/dpaa_ethdev.c | 46 ++++++++++++++++++++++++++++++++++++++----
>  1 file changed, 42 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
> index db49364..efef62c 100644
> --- a/drivers/net/dpaa/dpaa_ethdev.c
> +++ b/drivers/net/dpaa/dpaa_ethdev.c
> @@ -95,6 +95,9 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
>  
>  static struct rte_dpaa_driver rte_dpaa_pmd;
>  
> +static void
> +dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
> +
>  static inline void
>  dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
>  {
> @@ -134,13 +137,42 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
>  }
>  
>  static int
> -dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
> +dpaa_eth_dev_configure(struct rte_eth_dev *dev)
>  {
>  	struct dpaa_if *dpaa_intf = dev->data->dev_private;
> +	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
> +	struct rte_eth_dev_info dev_info;
> +	uint64_t rx_offloads = eth_conf->rxmode.offloads;
> +	uint64_t tx_offloads = eth_conf->txmode.offloads;
>  
>  	PMD_INIT_FUNC_TRACE();
>  
> -	if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
> +	dpaa_eth_dev_info(dev, &dev_info);

It is up to you but you may prefer to keep [rt]x_offload_capa in a variable or
macro so that you can use here directly without need to call dev_info, but that
is also OK if you prefer.

> +	if (dev_info.rx_offload_capa != rx_offloads) {
> +		DPAA_PMD_ERR("Some Rx offloads are not supported "
> +			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
> +			rx_offloads, dev_info.rx_offload_capa);
> +		return -ENOTSUP;
> +	}
> +
> +	if (dev_info.tx_offload_capa != tx_offloads) {
> +		DPAA_PMD_ERR("Some Tx offloads are not supported "
> +			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
> +			tx_offloads, dev_info.tx_offload_capa);
> +		return -ENOTSUP;
> +	}


dev_info.rx_offload_capa is your device's offload capability. User may prefer to
utilize or not any of these offloads. So you can't return if requested offloads
are not equal to capability, this part is wrong.

Only you need to be sure that user is not asking more than what is supported.

<...>
  

Patch

diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index db49364..efef62c 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -95,6 +95,9 @@  static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
 
 static struct rte_dpaa_driver rte_dpaa_pmd;
 
+static void
+dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
+
 static inline void
 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
 {
@@ -134,13 +137,42 @@  dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 }
 
 static int
-dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 {
 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+	struct rte_eth_dev_info dev_info;
+	uint64_t rx_offloads = eth_conf->rxmode.offloads;
+	uint64_t tx_offloads = eth_conf->txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+	dpaa_eth_dev_info(dev, &dev_info);
+	if (dev_info.rx_offload_capa != rx_offloads) {
+		DPAA_PMD_ERR("Some Rx offloads are not supported "
+			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			rx_offloads, dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
+	if (dev_info.tx_offload_capa != tx_offloads) {
+		DPAA_PMD_ERR("Some Tx offloads are not supported "
+			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			tx_offloads, dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
+
+	if (((rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) == 0) ||
+		((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) == 0) ||
+		((rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) == 0) ||
+		((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) ||
+		((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) ||
+		((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0)) {
+			DPAA_PMD_ERR(" Cksum offloading is enabled by default "
+			" Cannot be disabled. So ignoring this configuration ");
+	}
+
+	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 		    DPAA_MAX_RX_PKT_LEN) {
 			fman_if_set_maxfrm(dpaa_intf->fif,
@@ -259,11 +291,17 @@  static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->rx_offload_capa =
 		(DEV_RX_OFFLOAD_IPV4_CKSUM |
 		DEV_RX_OFFLOAD_UDP_CKSUM   |
-		DEV_RX_OFFLOAD_TCP_CKSUM);
+		DEV_RX_OFFLOAD_TCP_CKSUM)  |
+		DEV_RX_OFFLOAD_JUMBO_FRAME |
+		DEV_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa =
 		(DEV_TX_OFFLOAD_IPV4_CKSUM  |
 		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM);
+		DEV_TX_OFFLOAD_TCP_CKSUM)  |
+		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+		DEV_TX_OFFLOAD_MULTI_SEGS;
+
+	dev_info->default_rxconf.rx_drop_en = true;
 }
 
 static int dpaa_eth_link_update(struct rte_eth_dev *dev,