[dpdk-dev] ethdev: convert Tx offloads to Tx queue config
Checks
Commit Message
Tx offload will be converted to txq_flags automatically during
rte_eth_dev_info_get and rte_eth_tx_queue_info_get. So PMD can
clean the code to get rid of txq_flags at all while keep old APP
not be impacted.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
lib/librte_ethdev/rte_ethdev.c | 32 ++++++++++++++++++++++++++++++++
1 file changed, 32 insertions(+)
Comments
On 05/03/2018 09:03 AM, Qi Zhang wrote:
> Tx offload will be converted to txq_flags automatically during
> rte_eth_dev_info_get and rte_eth_tx_queue_info_get. So PMD can
> clean the code to get rid of txq_flags at all while keep old APP
> not be impacted.
>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
It is a step in right direction. It allows net/sfc to cut the code which
fills in txq_flags in default TxQ config.
Tested-by: Andrew Rybchenko <arybchenko@solarflare.com>
> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Thursday, May 3, 2018 7:03 AM
> To: Yigit, Ferruh <ferruh.yigit@intel.com>; Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: [PATCH] ethdev: convert Tx offloads to Tx queue config
>
> Tx offload will be converted to txq_flags automatically during
> rte_eth_dev_info_get and rte_eth_tx_queue_info_get. So PMD can
> clean the code to get rid of txq_flags at all while keep old APP
> not be impacted.
>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> ---
> lib/librte_ethdev/rte_ethdev.c | 32 ++++++++++++++++++++++++++++++++
> 1 file changed, 32 insertions(+)
>
> diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
> index e5605242d..a357ee09f 100644
> --- a/lib/librte_ethdev/rte_ethdev.c
> +++ b/lib/librte_ethdev/rte_ethdev.c
> @@ -1516,6 +1516,30 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
> }
>
> /**
> + * Convert from tx offloads to txq_flags.
> + */
> +static void
> +rte_eth_convert_tx_offload(const uint64_t tx_offloads, uint32_t *txq_flags)
> +{
> + uint32_t flags = 0;
> +
> + if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
> + flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
> + if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
> + flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
> + if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
> + flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
> + if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
> + flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
> + if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
> + flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
> + if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
> + flags |= ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP;
> +
> + *txq_flags = flags;
> +}
> +
> +/**
> * A conversion function from txq_flags API.
> */
> static void
> @@ -2359,6 +2383,7 @@ void
> rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
> {
> struct rte_eth_dev *dev;
> + struct rte_eth_txconf *txconf;
> const struct rte_eth_desc_lim lim = {
> .nb_max = UINT16_MAX,
> .nb_min = 0,
> @@ -2380,6 +2405,9 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
> dev_info->nb_tx_queues = dev->data->nb_tx_queues;
>
> dev_info->dev_flags = &dev->data->dev_flags;
> + txconf = &dev_info->default_txconf;
> + /* convert offload to txq_flags to support legacy app */
> + rte_eth_convert_tx_offload(txconf->offloads, &txconf->txq_flags);
> }
>
> int
> @@ -3799,6 +3827,7 @@ rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
> struct rte_eth_txq_info *qinfo)
> {
> struct rte_eth_dev *dev;
> + struct rte_eth_txconf *txconf = &qinfo->conf;
>
> RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
>
> @@ -3815,6 +3844,9 @@ rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
>
> memset(qinfo, 0, sizeof(*qinfo));
> dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
> + /* convert offload to txq_flags to support legacy app */
> + rte_eth_convert_tx_offload(txconf->offloads, &txconf->txq_flags);
> +
> return 0;
> }
>
> --
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> 2.13.6
On 5/3/2018 10:24 AM, Andrew Rybchenko wrote:
> On 05/03/2018 09:03 AM, Qi Zhang wrote:
>> Tx offload will be converted to txq_flags automatically during
>> rte_eth_dev_info_get and rte_eth_tx_queue_info_get. So PMD can
>> clean the code to get rid of txq_flags at all while keep old APP
>> not be impacted.
>>
>> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
>
> It is a step in right direction. It allows net/sfc to cut the code which
> fills in txq_flags in default TxQ config.
>
> Tested-by: Andrew Rybchenko <arybchenko@solarflare.com>
> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
On 5/4/2018 2:59 PM, Ferruh Yigit wrote:
> On 5/3/2018 10:24 AM, Andrew Rybchenko wrote:
>> On 05/03/2018 09:03 AM, Qi Zhang wrote:
>>> Tx offload will be converted to txq_flags automatically during
>>> rte_eth_dev_info_get and rte_eth_tx_queue_info_get. So PMD can
>>> clean the code to get rid of txq_flags at all while keep old APP
>>> not be impacted.
>>>
>>> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
>>
>> It is a step in right direction. It allows net/sfc to cut the code which
>> fills in txq_flags in default TxQ config.
>>
>> Tested-by: Andrew Rybchenko <arybchenko@solarflare.com>
>
>> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
>
> Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
Applied to dpdk-next-net/master, thanks.
@@ -1516,6 +1516,30 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
}
/**
+ * Convert from tx offloads to txq_flags.
+ */
+static void
+rte_eth_convert_tx_offload(const uint64_t tx_offloads, uint32_t *txq_flags)
+{
+ uint32_t flags = 0;
+
+ if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+ flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+ if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
+ flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
+ if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+ flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
+ if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
+ flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
+ if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
+ flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
+ if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ flags |= ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP;
+
+ *txq_flags = flags;
+}
+
+/**
* A conversion function from txq_flags API.
*/
static void
@@ -2359,6 +2383,7 @@ void
rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
{
struct rte_eth_dev *dev;
+ struct rte_eth_txconf *txconf;
const struct rte_eth_desc_lim lim = {
.nb_max = UINT16_MAX,
.nb_min = 0,
@@ -2380,6 +2405,9 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
dev_info->dev_flags = &dev->data->dev_flags;
+ txconf = &dev_info->default_txconf;
+ /* convert offload to txq_flags to support legacy app */
+ rte_eth_convert_tx_offload(txconf->offloads, &txconf->txq_flags);
}
int
@@ -3799,6 +3827,7 @@ rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
struct rte_eth_txq_info *qinfo)
{
struct rte_eth_dev *dev;
+ struct rte_eth_txconf *txconf = &qinfo->conf;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
@@ -3815,6 +3844,9 @@ rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
memset(qinfo, 0, sizeof(*qinfo));
dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
+ /* convert offload to txq_flags to support legacy app */
+ rte_eth_convert_tx_offload(txconf->offloads, &txconf->txq_flags);
+
return 0;
}