[dpdk-dev,v2,5/6] ixgbe: add Tx preparation
Commit Message
Signed-off-by: Tomasz Kulasek <tomaszx.kulasek@intel.com>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 3 ++
drivers/net/ixgbe/ixgbe_ethdev.h | 8 +++-
drivers/net/ixgbe/ixgbe_rxtx.c | 83 +++++++++++++++++++++++++++++++++++++-
drivers/net/ixgbe/ixgbe_rxtx.h | 2 +
4 files changed, 94 insertions(+), 2 deletions(-)
Comments
Hi Tomasz,
> ---
> drivers/net/ixgbe/ixgbe_ethdev.c | 3 ++
> drivers/net/ixgbe/ixgbe_ethdev.h | 8 +++-
> drivers/net/ixgbe/ixgbe_rxtx.c | 83 +++++++++++++++++++++++++++++++++++++-
> drivers/net/ixgbe/ixgbe_rxtx.h | 2 +
> 4 files changed, 94 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
> index fb618ef..1509979 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -515,6 +515,8 @@ static const struct rte_eth_desc_lim tx_desc_lim = {
> .nb_max = IXGBE_MAX_RING_DESC,
> .nb_min = IXGBE_MIN_RING_DESC,
> .nb_align = IXGBE_TXD_ALIGN,
> + .nb_seg_max = IXGBE_TX_MAX_SEG,
> + .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
> };
>
> static const struct eth_dev_ops ixgbe_eth_dev_ops = { @@ -1101,6 +1103,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
> eth_dev->dev_ops = &ixgbe_eth_dev_ops;
> eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
> eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
> + eth_dev->tx_pkt_prep = &ixgbe_prep_pkts;
>
> /*
> * For secondary processes, we don't initialise any further as primary diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h
> b/drivers/net/ixgbe/ixgbe_ethdev.h
> index 4ff6338..09d96de 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.h
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.h
> @@ -1,7 +1,7 @@
> /*-
> * BSD LICENSE
> *
> - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
> + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
> * All rights reserved.
> *
> * Redistribution and use in source and binary forms, with or without
> @@ -396,6 +396,12 @@ uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t ixgbe_xmit_pkts_simple(void
> *tx_queue, struct rte_mbuf **tx_pkts,
> uint16_t nb_pkts);
>
> +uint16_t ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
> + uint16_t nb_pkts);
> +
> +uint16_t ixgbe_prep_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
> + uint16_t nb_pkts);
> +
> int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
> struct rte_eth_rss_conf *rss_conf);
>
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 8a306b0..87defa0 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.c
> @@ -1,7 +1,7 @@
> /*-
> * BSD LICENSE
> *
> - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
> + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
> * Copyright 2014 6WIND S.A.
> * All rights reserved.
> *
> @@ -71,6 +71,7 @@
> #include <rte_string_fns.h>
> #include <rte_errno.h>
> #include <rte_ip.h>
> +#include <rte_pkt.h>
>
> #include "ixgbe_logs.h"
> #include "base/ixgbe_api.h"
> @@ -906,6 +907,84 @@ end_of_tx:
>
> /*********************************************************************
> *
> + * TX prep functions
> + *
> +
> +**********************************************************************/
> +uint16_t
> +ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t
> +nb_pkts) {
> + int i, ret;
> + struct rte_mbuf *m;
> + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
> +
> + for (i = 0; i < nb_pkts; i++) {
> + m = tx_pkts[i];
> +
> + /**
> + * Check if packet meets requirements for number of segments
> + *
> + * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and non-TSO
> + */
> +
> + if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
> + rte_errno = -EINVAL;
> + return i;
> + }
> +
> + if ((m->ol_flags & PKT_TX_OFFLOAD_MASK) !=
> + (m->ol_flags & IXGBE_TX_OFFLOAD_MASK)) {
As a nit, it probably makes sense to:
#define IXGBE_TX_OFFLOAD_NOTSUP_MASK (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
and then here:
(m->ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK)
Might help to save few cycles.
> + rte_errno = -EINVAL;
> + return i;
> + }
> +
> +#ifdef RTE_LIBRTE_ETHDEV_DEBUG
> + ret = rte_validate_tx_offload(m);
> + if (ret != 0) {
> + rte_errno = ret;
> + return i;
> + }
> +#endif
> + ret = rte_phdr_cksum_fix(m);
We probable need to update rte_phdr_cksum_fix() to take
into account tx_offload outer lengths in case
PKT_TX_OUTER_IP_CKSUM is defined.
As both ixgbe and i40e can do it these days.
Sorry for not spotting that earlier.
> + if (ret != 0) {
> + rte_errno = ret;
> + return i;
> + }
> + }
> +
> + return i;
> +}
> +
> +/* ixgbe simple path as well as vector TX doesn't support tx offloads
> +*/ uint16_t ixgbe_prep_pkts_simple(void *tx_queue __rte_unused, struct
> +rte_mbuf **tx_pkts,
> + uint16_t nb_pkts)
> +{
> + int i;
> + struct rte_mbuf *m;
> + uint64_t ol_flags;
> +
> + for (i = 0; i < nb_pkts; i++) {
> + m = tx_pkts[i];
> + ol_flags = m->ol_flags;
> +
> + /* simple tx path doesn't support multi-segments */
> + if (m->nb_segs != 1) {
> + rte_errno = -EINVAL;
> + return i;
> + }
> +
> + /* For simple path (simple and vector) no tx offloads are supported */
> + if (ol_flags & PKT_TX_OFFLOAD_MASK) {
> + rte_errno = -EINVAL;
> + return i;
> + }
> + }
> +
> + return i;
> +}
> +
> +/*********************************************************************
> + *
> * RX functions
> *
> **********************************************************************/
> @@ -2290,6 +2369,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
> } else
> #endif
> dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
> + dev->tx_pkt_prep = ixgbe_prep_pkts_simple;
Shouldn't we setup ixgbe_prep_pkts_simple when vTX is selected too?
> } else {
> PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
> PMD_INIT_LOG(DEBUG,
> @@ -2301,6 +2381,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
> (unsigned long)txq->tx_rs_thresh,
> (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
> dev->tx_pkt_burst = ixgbe_xmit_pkts;
> + dev->tx_pkt_prep = ixgbe_prep_pkts;
> }
> }
>
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 2608b36..7bbd9b8 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.h
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.h
> @@ -80,6 +80,8 @@
> #define RTE_IXGBE_WAIT_100_US 100
> #define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2
>
> +#define IXGBE_TX_MAX_SEG 40
> +
> #define IXGBE_PACKET_TYPE_MASK_82599 0X7F
> #define IXGBE_PACKET_TYPE_MASK_X550 0X10FF
> #define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF
> --
> 1.7.9.5
Hi Konstantin,
> -----Original Message-----
> From: Ananyev, Konstantin
> Sent: Monday, September 19, 2016 14:55
> To: Kulasek, TomaszX <tomaszx.kulasek@intel.com>; dev@dpdk.org
> Cc: jerin.jacob@caviumnetworks.com
> Subject: RE: [dpdk-dev] [PATCH v2 5/6] ixgbe: add Tx preparation
>
>
> Hi Tomasz,
>
[...]
> > +uint16_t
> > +ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t
> > +nb_pkts) {
> > + int i, ret;
> > + struct rte_mbuf *m;
> > + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
> > +
> > + for (i = 0; i < nb_pkts; i++) {
> > + m = tx_pkts[i];
> > +
> > + /**
> > + * Check if packet meets requirements for number of segments
> > + *
> > + * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
> non-TSO
> > + */
> > +
> > + if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
> > + rte_errno = -EINVAL;
> > + return i;
> > + }
> > +
> > + if ((m->ol_flags & PKT_TX_OFFLOAD_MASK) !=
> > + (m->ol_flags & IXGBE_TX_OFFLOAD_MASK)) {
>
>
> As a nit, it probably makes sense to:
> #define IXGBE_TX_OFFLOAD_NOTSUP_MASK (PKT_TX_OFFLOAD_MASK ^
> IXGBE_TX_OFFLOAD_MASK)
>
> and then here:
> (m->ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK)
>
> Might help to save few cycles.
>
Ok.
>
> > + rte_errno = -EINVAL;
> > + return i;
> > + }
> > +
> > +#ifdef RTE_LIBRTE_ETHDEV_DEBUG
> > + ret = rte_validate_tx_offload(m);
> > + if (ret != 0) {
> > + rte_errno = ret;
> > + return i;
> > + }
> > +#endif
> > + ret = rte_phdr_cksum_fix(m);
>
> We probable need to update rte_phdr_cksum_fix() to take into account
> tx_offload outer lengths in case PKT_TX_OUTER_IP_CKSUM is defined.
> As both ixgbe and i40e can do it these days.
> Sorry for not spotting that earlier.
>
Ok.
>
> > + if (ret != 0) {
> > + rte_errno = ret;
> > + return i;
> > + }
> > + }
> > +
> > + return i;
> > +}
> > +
[...]
> >
> > **********************************************************************
> > / @@ -2290,6 +2369,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev,
> > struct ixgbe_tx_queue *txq)
> > } else
> > #endif
> > dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
> > + dev->tx_pkt_prep = ixgbe_prep_pkts_simple;
>
> Shouldn't we setup ixgbe_prep_pkts_simple when vTX is selected too?
>
It is, but source code is formatted like below:
#ifdef RTE_IXGBE_INC_VECTOR
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
ixgbe_txq_vec_setup(txq) == 0)) {
PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
} else
#endif
dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
dev->tx_pkt_prep = ixgbe_prep_pkts_simple;
> > } else {
> > PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
> > PMD_INIT_LOG(DEBUG,
> > @@ -2301,6 +2381,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev,
> struct ixgbe_tx_queue *txq)
> > (unsigned long)txq->tx_rs_thresh,
> > (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
> > dev->tx_pkt_burst = ixgbe_xmit_pkts;
> > + dev->tx_pkt_prep = ixgbe_prep_pkts;
> > }
> > }
> >
> > diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h
> > b/drivers/net/ixgbe/ixgbe_rxtx.h index 2608b36..7bbd9b8 100644
> > --- a/drivers/net/ixgbe/ixgbe_rxtx.h
> > +++ b/drivers/net/ixgbe/ixgbe_rxtx.h
> > @@ -80,6 +80,8 @@
> > #define RTE_IXGBE_WAIT_100_US 100
> > #define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2
> >
> > +#define IXGBE_TX_MAX_SEG 40
> > +
> > #define IXGBE_PACKET_TYPE_MASK_82599 0X7F
> > #define IXGBE_PACKET_TYPE_MASK_X550 0X10FF
> > #define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF
> > --
> > 1.7.9.5
Tomasz.
>
> [...]
>
> > >
> > > ********************************************************************
> > > ** / @@ -2290,6 +2369,7 @@ ixgbe_set_tx_function(struct rte_eth_dev
> > > *dev, struct ixgbe_tx_queue *txq)
> > > } else
> > > #endif
> > > dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
> > > + dev->tx_pkt_prep = ixgbe_prep_pkts_simple;
> >
> > Shouldn't we setup ixgbe_prep_pkts_simple when vTX is selected too?
> >
>
>
> It is, but source code is formatted like below:
>
> #ifdef RTE_IXGBE_INC_VECTOR
> if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
> (rte_eal_process_type() != RTE_PROC_PRIMARY ||
> ixgbe_txq_vec_setup(txq) == 0)) {
> PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
> dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
Yep, so I thought we need a:
dev->tx_pkt_prep = ixgbe_prep_pkts_simple;
here too, no?
Konstantin
> } else
> #endif
> dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
> dev->tx_pkt_prep = ixgbe_prep_pkts_simple;
>
>
> > > } else {
> > > PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
> > > PMD_INIT_LOG(DEBUG,
> > > @@ -2301,6 +2381,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev,
> > struct ixgbe_tx_queue *txq)
> > > (unsigned long)txq->tx_rs_thresh,
> > > (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
> > > dev->tx_pkt_burst = ixgbe_xmit_pkts;
> > > + dev->tx_pkt_prep = ixgbe_prep_pkts;
> > > }
> > > }
> > >
> > > diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h
> > > b/drivers/net/ixgbe/ixgbe_rxtx.h index 2608b36..7bbd9b8 100644
> > > --- a/drivers/net/ixgbe/ixgbe_rxtx.h
> > > +++ b/drivers/net/ixgbe/ixgbe_rxtx.h
> > > @@ -80,6 +80,8 @@
> > > #define RTE_IXGBE_WAIT_100_US 100
> > > #define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2
> > >
> > > +#define IXGBE_TX_MAX_SEG 40
> > > +
> > > #define IXGBE_PACKET_TYPE_MASK_82599 0X7F
> > > #define IXGBE_PACKET_TYPE_MASK_X550 0X10FF
> > > #define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF
> > > --
> > > 1.7.9.5
>
> Tomasz.
> -----Original Message-----
> From: Ananyev, Konstantin
> Sent: Monday, September 19, 2016 4:24 PM
> To: Kulasek, TomaszX <tomaszx.kulasek@intel.com>; dev@dpdk.org
> Cc: jerin.jacob@caviumnetworks.com
> Subject: RE: [dpdk-dev] [PATCH v2 5/6] ixgbe: add Tx preparation
>
> >
> > [...]
> >
> > > >
> > > > ******************************************************************
> > > > **
> > > > ** / @@ -2290,6 +2369,7 @@ ixgbe_set_tx_function(struct
> > > > rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
> > > > } else
> > > > #endif
> > > > dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
> > > > + dev->tx_pkt_prep = ixgbe_prep_pkts_simple;
> > >
> > > Shouldn't we setup ixgbe_prep_pkts_simple when vTX is selected too?
> > >
> >
> >
> > It is, but source code is formatted like below:
> >
> > #ifdef RTE_IXGBE_INC_VECTOR
> > if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
> > (rte_eal_process_type() != RTE_PROC_PRIMARY ||
> > ixgbe_txq_vec_setup(txq) == 0)) {
> > PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
> > dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
>
> Yep, so I thought we need a:
> dev->tx_pkt_prep = ixgbe_prep_pkts_simple;
> here too, no?
Or if we decide, not to setup tx_pkt_prep at all for simple TX path -
set NULL for both?
But I think for both simple and vector TX the tx_prep value should be the same.
Konstantin
>
> Konstantin
>
>
> > } else
> > #endif
> > dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
> > dev->tx_pkt_prep = ixgbe_prep_pkts_simple;
> >
> >
> > > > } else {
> > > > PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
> > > > PMD_INIT_LOG(DEBUG,
> > > > @@ -2301,6 +2381,7 @@ ixgbe_set_tx_function(struct rte_eth_dev
> > > > *dev,
> > > struct ixgbe_tx_queue *txq)
> > > > (unsigned long)txq->tx_rs_thresh,
> > > > (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
> > > > dev->tx_pkt_burst = ixgbe_xmit_pkts;
> > > > + dev->tx_pkt_prep = ixgbe_prep_pkts;
> > > > }
> > > > }
> > > >
> > > > diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h
> > > > b/drivers/net/ixgbe/ixgbe_rxtx.h index 2608b36..7bbd9b8 100644
> > > > --- a/drivers/net/ixgbe/ixgbe_rxtx.h
> > > > +++ b/drivers/net/ixgbe/ixgbe_rxtx.h
> > > > @@ -80,6 +80,8 @@
> > > > #define RTE_IXGBE_WAIT_100_US 100
> > > > #define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2
> > > >
> > > > +#define IXGBE_TX_MAX_SEG 40
> > > > +
> > > > #define IXGBE_PACKET_TYPE_MASK_82599 0X7F
> > > > #define IXGBE_PACKET_TYPE_MASK_X550 0X10FF
> > > > #define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF
> > > > --
> > > > 1.7.9.5
> >
> > Tomasz.
@@ -515,6 +515,8 @@ static const struct rte_eth_desc_lim tx_desc_lim = {
.nb_max = IXGBE_MAX_RING_DESC,
.nb_min = IXGBE_MIN_RING_DESC,
.nb_align = IXGBE_TXD_ALIGN,
+ .nb_seg_max = IXGBE_TX_MAX_SEG,
+ .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
};
static const struct eth_dev_ops ixgbe_eth_dev_ops = {
@@ -1101,6 +1103,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &ixgbe_eth_dev_ops;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+ eth_dev->tx_pkt_prep = &ixgbe_prep_pkts;
/*
* For secondary processes, we don't initialise any further as primary
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -396,6 +396,12 @@ uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t ixgbe_prep_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* Copyright 2014 6WIND S.A.
* All rights reserved.
*
@@ -71,6 +71,7 @@
#include <rte_string_fns.h>
#include <rte_errno.h>
#include <rte_ip.h>
+#include <rte_pkt.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
@@ -906,6 +907,84 @@ end_of_tx:
/*********************************************************************
*
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ /**
+ * Check if packet meets requirements for number of segments
+ *
+ * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and non-TSO
+ */
+
+ if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if ((m->ol_flags & PKT_TX_OFFLOAD_MASK) !=
+ (m->ol_flags & IXGBE_TX_OFFLOAD_MASK)) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_phdr_cksum_fix(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/* ixgbe simple path as well as vector TX doesn't support tx offloads */
+uint16_t
+ixgbe_prep_pkts_simple(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i;
+ struct rte_mbuf *m;
+ uint64_t ol_flags;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /* simple tx path doesn't support multi-segments */
+ if (m->nb_segs != 1) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ /* For simple path (simple and vector) no tx offloads are supported */
+ if (ol_flags & PKT_TX_OFFLOAD_MASK) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
* RX functions
*
**********************************************************************/
@@ -2290,6 +2369,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
} else
#endif
dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
+ dev->tx_pkt_prep = ixgbe_prep_pkts_simple;
} else {
PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
PMD_INIT_LOG(DEBUG,
@@ -2301,6 +2381,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
(unsigned long)txq->tx_rs_thresh,
(unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
dev->tx_pkt_burst = ixgbe_xmit_pkts;
+ dev->tx_pkt_prep = ixgbe_prep_pkts;
}
}
@@ -80,6 +80,8 @@
#define RTE_IXGBE_WAIT_100_US 100
#define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2
+#define IXGBE_TX_MAX_SEG 40
+
#define IXGBE_PACKET_TYPE_MASK_82599 0X7F
#define IXGBE_PACKET_TYPE_MASK_X550 0X10FF
#define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF