[v6] net/i40e: add diagnostic support in Tx path

Message ID 20240304093321.592061-1-mingjinx.ye@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Bruce Richardson
Headers
Series [v6] net/i40e: add diagnostic support in Tx path |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/github-robot: build success github build: passed
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-abi-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-unit-amd64-testing success Testing PASS
ci/iol-compile-amd64-testing success Testing PASS
ci/iol-unit-arm64-testing success Testing PASS
ci/iol-compile-arm64-testing success Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/intel-Functional success Functional PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-sample-apps-testing success Testing PASS

Commit Message

Mingjin Ye March 4, 2024, 9:33 a.m. UTC
  Implemented a Tx wrapper to perform a thorough check on mbufs,
categorizing and counting invalid cases by type for diagnostic
purposes. The count of invalid cases is accessible through xstats_get.

Also, the devarg option "mbuf_check" was introduced to configure the
diagnostic parameters to enable the appropriate diagnostic features.

supported cases: mbuf, size, segment, offload.
 1. mbuf: Check for corrupted mbuf.
 2. size: Check min/max packet length according to HW spec.
 3. segment: Check number of mbuf segments not exceed HW limits.
 4. offload: Check for use of an unsupported offload flag.

parameter format: "mbuf_check=<case>" or "mbuf_check=[<case1>,<case2>]"
eg: dpdk-testpmd -a 0000:87:00.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: remove strict.
---
v3: optimised.
---
v4: rebase.
---
v5: fix ci error.
---
v6: Changes the commit log.
---
 doc/guides/nics/i40e.rst       |  14 +++
 drivers/net/i40e/i40e_ethdev.c | 138 ++++++++++++++++++++++++++++-
 drivers/net/i40e/i40e_ethdev.h |  28 ++++++
 drivers/net/i40e/i40e_rxtx.c   | 153 +++++++++++++++++++++++++++++++--
 drivers/net/i40e/i40e_rxtx.h   |   2 +
 5 files changed, 327 insertions(+), 8 deletions(-)
  

Comments

Bruce Richardson March 4, 2024, 11:47 a.m. UTC | #1
On Mon, Mar 04, 2024 at 09:33:21AM +0000, Mingjin Ye wrote:
> Implemented a Tx wrapper to perform a thorough check on mbufs,
> categorizing and counting invalid cases by type for diagnostic
> purposes. The count of invalid cases is accessible through xstats_get.
> 
> Also, the devarg option "mbuf_check" was introduced to configure the
> diagnostic parameters to enable the appropriate diagnostic features.
> 
> supported cases: mbuf, size, segment, offload.
>  1. mbuf: Check for corrupted mbuf.
>  2. size: Check min/max packet length according to HW spec.
>  3. segment: Check number of mbuf segments not exceed HW limits.
>  4. offload: Check for use of an unsupported offload flag.
> 
> parameter format: "mbuf_check=<case>" or "mbuf_check=[<case1>,<case2>]"
> eg: dpdk-testpmd -a 0000:87:00.0,mbuf_check=[mbuf,size] -- -i
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>

Review comments inline below, thanks.

This implementation seems more complex than the previous iavf one that I
previously reviewed and merged. This includes more changes to the TX path
selection logic, so it would be worthwhile including a note about that in
the commit log.

/Bruce

> ---
> v2: remove strict.
> ---
> v3: optimised.
> ---
> v4: rebase.
> ---
> v5: fix ci error.
> ---
> v6: Changes the commit log.
> ---
>  doc/guides/nics/i40e.rst       |  14 +++
>  drivers/net/i40e/i40e_ethdev.c | 138 ++++++++++++++++++++++++++++-
>  drivers/net/i40e/i40e_ethdev.h |  28 ++++++
>  drivers/net/i40e/i40e_rxtx.c   | 153 +++++++++++++++++++++++++++++++--
>  drivers/net/i40e/i40e_rxtx.h   |   2 +
>  5 files changed, 327 insertions(+), 8 deletions(-)
> 
> diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
> index 15689ac958..91b45e1d40 100644
> --- a/doc/guides/nics/i40e.rst
> +++ b/doc/guides/nics/i40e.rst
> @@ -275,6 +275,20 @@ Runtime Configuration
>  
>    -a 84:00.0,vf_msg_cfg=80@120:180
>  
> +- ``Support TX diagnostics`` (default ``not enabled``)
> +
> +  Set the ``devargs`` parameter ``mbuf_check`` to enable TX diagnostics.
> +  For example, ``-a 87:00.0,mbuf_check=<case>`` or ``-a 87:00.0,mbuf_check=[<case1>,<case2>...]``.
> +  Thereafter, ``rte_eth_xstats_get()`` can be used to get the error counts,
> +  which are collected in ``tx_mbuf_error_packets`` xstats.
> +  In testpmd these can be shown via: ``testpmd> show port xstats all``.
> +  Supported values for the ``case`` parameter are:
> +
> +  *   mbuf: Check for corrupted mbuf.
> +  *   size: Check min/max packet length according to HW spec.
> +  *   segment: Check number of mbuf segments does not exceed HW limits.
> +  *   offload: Check for use of an unsupported offload flag.
> +
>  Vector RX Pre-conditions
>  ~~~~~~~~~~~~~~~~~~~~~~~~
>  For Vector RX it is assumed that the number of descriptor rings will be a power
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 4d21341382..3e2ddcaa3e 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -48,6 +48,7 @@
>  #define ETH_I40E_SUPPORT_MULTI_DRIVER	"support-multi-driver"
>  #define ETH_I40E_QUEUE_NUM_PER_VF_ARG	"queue-num-per-vf"
>  #define ETH_I40E_VF_MSG_CFG		"vf_msg_cfg"
> +#define ETH_I40E_MBUF_CHECK_ARG       "mbuf_check"
>  
>  #define I40E_CLEAR_PXE_WAIT_MS     200
>  #define I40E_VSI_TSR_QINQ_STRIP		0x4010
> @@ -412,6 +413,7 @@ static const char *const valid_keys[] = {
>  	ETH_I40E_SUPPORT_MULTI_DRIVER,
>  	ETH_I40E_QUEUE_NUM_PER_VF_ARG,
>  	ETH_I40E_VF_MSG_CFG,
> +	ETH_I40E_MBUF_CHECK_ARG,
>  	NULL};
>  
>  static const struct rte_pci_id pci_id_i40e_map[] = {
> @@ -545,6 +547,14 @@ static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
>  #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
>  		sizeof(rte_i40e_stats_strings[0]))
>  
> +static const struct rte_i40e_xstats_name_off i40e_mbuf_strings[] = {
> +	{"tx_mbuf_error_packets", offsetof(struct i40e_mbuf_stats,
> +		tx_pkt_errors)},
> +};
> +
> +#define I40E_NB_MBUF_XSTATS (sizeof(i40e_mbuf_strings) / \
> +		sizeof(i40e_mbuf_strings[0]))
> +
>  static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
>  	{"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
>  		tx_dropped_link_down)},
> @@ -1373,6 +1383,88 @@ read_vf_msg_config(__rte_unused const char *key,
>  	return 0;
>  }
>  
> +static int
> +read_mbuf_check_config(__rte_unused const char *key, const char *value, void *args)
> +{
> +	char *cur;
> +	char *tmp;
> +	int str_len;
> +	int valid_len;
> +
> +	int ret = 0;
> +	uint64_t *mc_flags = args;
> +	char *str2 = strdup(value);
> +	if (str2 == NULL)
> +		return -1;
> +
> +	str_len = strlen(str2);
> +	if (str2[0] == '[' && str2[str_len - 1] == ']') {
> +		if (str_len < 3) {
> +			ret = -1;
> +			goto mdd_end;
> +		}
> +		valid_len = str_len - 2;
> +		memmove(str2, str2 + 1, valid_len);
> +		memset(str2 + valid_len, '\0', 2);
> +	}
> +	cur = strtok_r(str2, ",", &tmp);
> +	while (cur != NULL) {
> +		if (!strcmp(cur, "mbuf"))
> +			*mc_flags |= I40E_MBUF_CHECK_F_TX_MBUF;
> +		else if (!strcmp(cur, "size"))
> +			*mc_flags |= I40E_MBUF_CHECK_F_TX_SIZE;
> +		else if (!strcmp(cur, "segment"))
> +			*mc_flags |= I40E_MBUF_CHECK_F_TX_SEGMENT;
> +		else if (!strcmp(cur, "offload"))
> +			*mc_flags |= I40E_MBUF_CHECK_F_TX_OFFLOAD;
> +		else
> +			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
> +		cur = strtok_r(NULL, ",", &tmp);
> +	}
> +
> +mdd_end:
> +	free(str2);
> +	return ret;
> +}
> +
> +static int
> +i40e_parse_mbuf_check(struct rte_eth_dev *dev)
> +{
> +	struct i40e_adapter *ad =
> +		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +	struct rte_kvargs *kvlist;
> +	int kvargs_count;
> +	int ret = 0;
> +
> +	if (!dev->device->devargs)
> +		return ret;
> +
> +	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
> +	if (!kvlist)
> +		return -EINVAL;
> +
> +	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_MBUF_CHECK_ARG);
> +	if (!kvargs_count)
> +		goto free_end;
> +
> +	if (kvargs_count > 1)
> +		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
> +			    "the first invalid or last valid one is used !",
> +			    ETH_I40E_MBUF_CHECK_ARG);

Don't split error strings across lines, since it means you can't easily
grep for it if you get an error output.
Also, this error message doesn't make sense to me: "first invalid or last valid"?
Can you simplify and clarify it a bit?

> +
> +	ret = rte_kvargs_process(kvlist, ETH_I40E_MBUF_CHECK_ARG,
> +				read_mbuf_check_config, &ad->mc_flags);
> +	if (ret)
> +		goto free_end;
> +
> +	if (ad->mc_flags)
> +		ad->devargs.mbuf_check = 1;
> +
> +free_end:
> +	rte_kvargs_free(kvlist);
> +	return ret;
> +}
> +
>  static int
>  i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
>  		struct i40e_vf_msg_cfg *msg_cfg)
> @@ -1488,6 +1580,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
>  	}
>  
>  	i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
> +	i40e_parse_mbuf_check(dev);
>  	/* Check if need to support multi-driver */
>  	i40e_support_multi_driver(dev);
>  
> @@ -2324,6 +2417,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
>  	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>  	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>  	struct i40e_vsi *main_vsi = pf->main_vsi;
> +	struct i40e_adapter *ad =
> +		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
>  	int ret, i;
>  	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
>  	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
> @@ -2483,6 +2578,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
>  	max_frame_size = dev->data->mtu ?
>  		dev->data->mtu + I40E_ETH_OVERHEAD :
>  		I40E_FRAME_SIZE_MAX;
> +	ad->max_pkt_len = max_frame_size;
>  
>  	/* Set the max frame size to HW*/
>  	i40e_aq_set_mac_config(hw, max_frame_size, TRUE, false, 0, NULL);
> @@ -3502,13 +3598,17 @@ i40e_dev_stats_reset(struct rte_eth_dev *dev)
>  	/* read the stats, reading current register values into offset */
>  	i40e_read_stats_registers(pf, hw);
>  
> +	memset(&pf->mbuf_stats, 0,
> +		sizeof(struct i40e_mbuf_stats));
> +

This line surely fits in 100 chars, and so shouldn't need to be split?

>  	return 0;
>  }
>  
>  static uint32_t
>  i40e_xstats_calc_num(void)
>  {
> -	return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
> +	return I40E_NB_ETH_XSTATS + I40E_NB_MBUF_XSTATS +
> +		I40E_NB_HW_PORT_XSTATS +
>  		(I40E_NB_RXQ_PRIO_XSTATS * 8) +
>  		(I40E_NB_TXQ_PRIO_XSTATS * 8);
>  }
> @@ -3533,6 +3633,14 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
>  		count++;
>  	}
>  
> +	/* Get stats from i40e_mbuf_stats struct */
> +	for (i = 0; i < I40E_NB_MBUF_XSTATS; i++) {
> +		strlcpy(xstats_names[count].name,
> +			i40e_mbuf_strings[i].name,
> +			sizeof(xstats_names[count].name));
> +		count++;
> +	}
> +
>  	/* Get individual stats from i40e_hw_port struct */
>  	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
>  		strlcpy(xstats_names[count].name,
> @@ -3563,12 +3671,28 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
>  	return count;
>  }
>  
> +static void
> +i40e_dev_update_mbuf_stats(struct rte_eth_dev *ethdev,
> +		struct i40e_mbuf_stats *mbuf_stats)
> +{
> +	uint16_t idx;
> +	struct i40e_tx_queue *txq;
> +
> +	for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
> +		txq = ethdev->data->tx_queues[idx];
> +		mbuf_stats->tx_pkt_errors += txq->mbuf_errors;
> +	}
> +}
> +
>  static int
>  i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
>  		    unsigned n)
>  {
>  	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>  	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct i40e_adapter *adapter =
> +		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +	struct i40e_mbuf_stats mbuf_stats = {0};
>  	unsigned i, count, prio;
>  	struct i40e_hw_port_stats *hw_stats = &pf->stats;
>  
> @@ -3583,6 +3707,9 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
>  
>  	count = 0;
>  
> +	if (adapter->devargs.mbuf_check)
> +		i40e_dev_update_mbuf_stats(dev, &mbuf_stats);
> +
>  	/* Get stats from i40e_eth_stats struct */
>  	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
>  		xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
> @@ -3591,6 +3718,15 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
>  		count++;
>  	}
>  
> +	/* Get stats from i40e_mbuf_stats struct */
> +	for (i = 0; i < I40E_NB_MBUF_XSTATS; i++) {
> +		xstats[count].value =
> +			*(uint64_t *)((char *)&mbuf_stats +
> +					i40e_mbuf_strings[i].offset);
> +		xstats[count].id = count;
> +		count++;
> +	}
> +
>  	/* Get individual stats from i40e_hw_port struct */
>  	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
>  		xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
> diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
> index 445e1c0b38..ae3f2f60ac 100644
> --- a/drivers/net/i40e/i40e_ethdev.h
> +++ b/drivers/net/i40e/i40e_ethdev.h
> @@ -1109,6 +1109,10 @@ struct i40e_vf_msg_cfg {
>  	uint32_t ignore_second;
>  };
>  
> +struct i40e_mbuf_stats {
> +	uint64_t tx_pkt_errors;
> +};
> +
>  /*
>   * Structure to store private data specific for PF instance.
>   */
> @@ -1123,6 +1127,7 @@ struct i40e_pf {
>  
>  	struct i40e_hw_port_stats stats_offset;
>  	struct i40e_hw_port_stats stats;
> +	struct i40e_mbuf_stats mbuf_stats;
>  	u64 rx_err1;	/* rxerr1 */
>  	u64 rx_err1_offset;
>  
> @@ -1225,6 +1230,25 @@ struct i40e_vsi_vlan_pvid_info {
>  #define I40E_MAX_PKT_TYPE  256
>  #define I40E_FLOW_TYPE_MAX 64
>  
> +#define I40E_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
> +#define I40E_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
> +#define I40E_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
> +#define I40E_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
> +
> +enum i40e_tx_burst_type {
> +	I40E_TX_DEFAULT,
> +	I40E_TX_SIMPLE,
> +	I40E_TX_SSE,
> +	I40E_TX_AVX2,
> +	I40E_TX_AVX512,
> +};
> +
> +/**
> + * Cache devargs parse result.
> + */
> +struct i40e_devargs {
> +	int mbuf_check;
> +};
>  /*
>   * Structure to store private data for each PF/VF instance.
>   */
> @@ -1241,6 +1265,10 @@ struct i40e_adapter {
>  	bool tx_simple_allowed;
>  	bool tx_vec_allowed;
>  
> +	struct i40e_devargs devargs;
> +	uint64_t mc_flags; /* mbuf check flags. */
> +	uint16_t max_pkt_len; /* Maximum packet length */
> +	enum i40e_tx_burst_type tx_burst_type;
>  	/* For PTP */
>  	struct rte_timecounter systime_tc;
>  	struct rte_timecounter rx_tstamp_tc;
> diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
> index a0bc30d45b..c7c9c945e4 100644
> --- a/drivers/net/i40e/i40e_rxtx.c
> +++ b/drivers/net/i40e/i40e_rxtx.c
> @@ -1536,6 +1536,138 @@ i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
>  	return nb_tx;
>  }
>  
> +static
> +const eth_tx_burst_t i40e_tx_pkt_burst_ops[] = {
> +	[I40E_TX_DEFAULT] = i40e_xmit_pkts,
> +	[I40E_TX_SIMPLE] = i40e_xmit_pkts_simple,
> +	[I40E_TX_SSE] = i40e_xmit_pkts_vec,
> +#ifdef RTE_ARCH_X86
> +	[I40E_TX_AVX2] = i40e_xmit_pkts_vec_avx2,
> +#ifdef CC_AVX512_SUPPORT
> +	[I40E_TX_AVX512] = i40e_xmit_pkts_vec_avx512,
> +#endif
> +#endif
> +};
> +
> +/* Tx mbuf check */
> +static uint16_t
> +i40e_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
> +	      uint16_t nb_pkts)

Strange indentation, mix of tabs and spaces that is neither a double-indent
nor lines up with the opening bracket.

> +{
> +	struct i40e_tx_queue *txq = tx_queue;
> +	uint16_t idx;
> +	uint64_t ol_flags;
> +	struct rte_mbuf *mb;
> +	bool pkt_error = false;
> +	const char *reason = NULL;
> +	uint16_t good_pkts = nb_pkts;
> +	struct i40e_adapter *adapter = txq->vsi->adapter;
> +	enum i40e_tx_burst_type tx_burst_type =
> +		txq->vsi->adapter->tx_burst_type;
> +
> +

One blank line should be enough here.

> +	for (idx = 0; idx < nb_pkts; idx++) {
> +		mb = tx_pkts[idx];
> +		ol_flags = mb->ol_flags;
> +
> +		if ((adapter->mc_flags & I40E_MBUF_CHECK_F_TX_MBUF) &&
> +			(rte_mbuf_check(mb, 0, &reason) != 0)) {

This continuation indent lines up with the first line of the block and so
is really unclear. Double indent it or align with opening bracket -
whichever style is already used in this file.

> +			PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
> +			pkt_error = true;
> +			break;
> +		}
> +
> +		if ((adapter->mc_flags & I40E_MBUF_CHECK_F_TX_SIZE) &&
> +			(mb->data_len > mb->pkt_len ||
> +			mb->data_len < I40E_TX_MIN_PKT_LEN ||
> +			mb->data_len > adapter->max_pkt_len)) {

Same issue. Line continuations should never line up with the block body.

> +			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
> +			"of range, reasonable range (%d - %u)\n", mb->data_len,
> +			I40E_TX_MIN_PKT_LEN, adapter->max_pkt_len);

Don't split error messages. Also indent line continuation.
Comment applies to further error message below too.

> +			pkt_error = true;
> +			break;
> +		}
> +
> +		if (adapter->mc_flags & I40E_MBUF_CHECK_F_TX_SEGMENT) {
> +			if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
> +				/**
> +				 * No TSO case: nb->segs, pkt_len to not exceed
> +				 * the limites.
> +				 */
> +				if (mb->nb_segs > I40E_TX_MAX_MTU_SEG) {
> +					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
> +					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
> +					I40E_TX_MAX_MTU_SEG);
> +					pkt_error = true;
> +					break;
> +				}
> +				if (mb->pkt_len > I40E_FRAME_SIZE_MAX) {
> +					PMD_TX_LOG(ERR, "INVALID mbuf: pkt_len (%d) exceeds "
> +					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
> +					I40E_FRAME_SIZE_MAX);
> +					pkt_error = true;
> +					break;
> +				}
> +			} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
> +				/** TSO case: tso_segsz, nb_segs, pkt_len not exceed
> +				 * the limits.
> +				 */
> +				if (mb->tso_segsz < I40E_MIN_TSO_MSS ||
> +					mb->tso_segsz > I40E_MAX_TSO_MSS) {
> +					/**
> +					 * MSS outside the range are considered malicious
> +					 */
> +					PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
> +					"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
> +					I40E_MIN_TSO_MSS, I40E_MAX_TSO_MSS);
> +					pkt_error = true;
> +					break;
> +				}
> +				if (mb->nb_segs >
> +					((struct i40e_tx_queue *)tx_queue)->nb_tx_desc) {
> +					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
> +					"of ring length\n");
> +					pkt_error = true;
> +					break;
> +				}
> +				if (mb->pkt_len > I40E_TSO_FRAME_SIZE_MAX) {
> +					PMD_TX_LOG(ERR, "INVALID mbuf: pkt_len (%d) exceeds "
> +					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
> +					I40E_TSO_FRAME_SIZE_MAX);
> +					pkt_error = true;
> +					break;
> +				}
> +			}
> +		}
> +
> +		if (adapter->mc_flags & I40E_MBUF_CHECK_F_TX_OFFLOAD) {
> +			if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) {
> +				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
> +				"is not supported\n");
> +				pkt_error = true;
> +				break;
> +			}
> +
> +			if (!rte_validate_tx_offload(mb)) {
> +				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
> +				"setup error\n");
> +				pkt_error = true;
> +				break;
> +			}
> +		}
> +	}
> +
> +	if (pkt_error) {
> +		txq->mbuf_errors++;
> +		good_pkts = idx;
> +		if (good_pkts == 0)
> +			return 0;
> +	}
> +
> +	return i40e_tx_pkt_burst_ops[tx_burst_type](tx_queue,
> +								tx_pkts, good_pkts);
> +}
> +
>  /*********************************************************************
>   *
>   *  TX simple prep functions
> @@ -3468,6 +3600,8 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
>  {
>  	struct i40e_adapter *ad =
>  		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +	enum i40e_tx_burst_type tx_burst_type = I40E_TX_DEFAULT;
> +	int mbuf_check = ad->devargs.mbuf_check;
>  	int i;
>  
>  	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> @@ -3502,34 +3636,39 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
>  #ifdef CC_AVX512_SUPPORT
>  				PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
>  					    dev->data->port_id);
> -				dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx512;
> +				tx_burst_type = I40E_TX_AVX512;
>  #endif
>  			} else {
>  				PMD_INIT_LOG(DEBUG, "Using %sVector Tx (port %d).",
>  					     ad->tx_use_avx2 ? "avx2 " : "",
>  					     dev->data->port_id);
> -				dev->tx_pkt_burst = ad->tx_use_avx2 ?
> -						    i40e_xmit_pkts_vec_avx2 :
> -						    i40e_xmit_pkts_vec;
> +				tx_burst_type = ad->tx_use_avx2 ? I40E_TX_AVX2 : I40E_TX_SSE;
>  				dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec;
>  			}
>  #else /* RTE_ARCH_X86 */
>  			PMD_INIT_LOG(DEBUG, "Using Vector Tx (port %d).",
>  				     dev->data->port_id);
> -			dev->tx_pkt_burst = i40e_xmit_pkts_vec;
> +			tx_burst_type = I40E_TX_SSE;
>  			dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec;
>  #endif /* RTE_ARCH_X86 */
>  		} else {
>  			PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
> -			dev->tx_pkt_burst = i40e_xmit_pkts_simple;
> +			tx_burst_type = I40E_TX_SIMPLE;
>  			dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec;
>  		}
>  		dev->tx_pkt_prepare = i40e_simple_prep_pkts;
>  	} else {
>  		PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
> -		dev->tx_pkt_burst = i40e_xmit_pkts;
> +		tx_burst_type = I40E_TX_DEFAULT;
>  		dev->tx_pkt_prepare = i40e_prep_pkts;
>  	}
> +
> +	if (mbuf_check) {
> +		ad->tx_burst_type = tx_burst_type;
> +		dev->tx_pkt_burst = i40e_xmit_pkts_check;
> +	} else {
> +		dev->tx_pkt_burst = i40e_tx_pkt_burst_ops[tx_burst_type];
> +	}
>  }
>  
>  static const struct {
> diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
> index 31dd947222..70320cf25e 100644
> --- a/drivers/net/i40e/i40e_rxtx.h
> +++ b/drivers/net/i40e/i40e_rxtx.h
> @@ -162,6 +162,8 @@ struct i40e_tx_queue {
>  	uint16_t tx_next_dd;
>  	uint16_t tx_next_rs;
>  	bool q_set; /**< indicate if tx queue has been configured */
> +	uint64_t mbuf_errors;
> +
>  	bool tx_deferred_start; /**< don't start this queue in dev start */
>  	uint8_t dcb_tc;         /**< Traffic class of tx queue */
>  	uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
> -- 
> 2.25.1
>
  

Patch

diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index 15689ac958..91b45e1d40 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -275,6 +275,20 @@  Runtime Configuration
 
   -a 84:00.0,vf_msg_cfg=80@120:180
 
+- ``Support TX diagnostics`` (default ``not enabled``)
+
+  Set the ``devargs`` parameter ``mbuf_check`` to enable TX diagnostics.
+  For example, ``-a 87:00.0,mbuf_check=<case>`` or ``-a 87:00.0,mbuf_check=[<case1>,<case2>...]``.
+  Thereafter, ``rte_eth_xstats_get()`` can be used to get the error counts,
+  which are collected in ``tx_mbuf_error_packets`` xstats.
+  In testpmd these can be shown via: ``testpmd> show port xstats all``.
+  Supported values for the ``case`` parameter are:
+
+  *   mbuf: Check for corrupted mbuf.
+  *   size: Check min/max packet length according to HW spec.
+  *   segment: Check number of mbuf segments does not exceed HW limits.
+  *   offload: Check for use of an unsupported offload flag.
+
 Vector RX Pre-conditions
 ~~~~~~~~~~~~~~~~~~~~~~~~
 For Vector RX it is assumed that the number of descriptor rings will be a power
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4d21341382..3e2ddcaa3e 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -48,6 +48,7 @@ 
 #define ETH_I40E_SUPPORT_MULTI_DRIVER	"support-multi-driver"
 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG	"queue-num-per-vf"
 #define ETH_I40E_VF_MSG_CFG		"vf_msg_cfg"
+#define ETH_I40E_MBUF_CHECK_ARG       "mbuf_check"
 
 #define I40E_CLEAR_PXE_WAIT_MS     200
 #define I40E_VSI_TSR_QINQ_STRIP		0x4010
@@ -412,6 +413,7 @@  static const char *const valid_keys[] = {
 	ETH_I40E_SUPPORT_MULTI_DRIVER,
 	ETH_I40E_QUEUE_NUM_PER_VF_ARG,
 	ETH_I40E_VF_MSG_CFG,
+	ETH_I40E_MBUF_CHECK_ARG,
 	NULL};
 
 static const struct rte_pci_id pci_id_i40e_map[] = {
@@ -545,6 +547,14 @@  static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
 		sizeof(rte_i40e_stats_strings[0]))
 
+static const struct rte_i40e_xstats_name_off i40e_mbuf_strings[] = {
+	{"tx_mbuf_error_packets", offsetof(struct i40e_mbuf_stats,
+		tx_pkt_errors)},
+};
+
+#define I40E_NB_MBUF_XSTATS (sizeof(i40e_mbuf_strings) / \
+		sizeof(i40e_mbuf_strings[0]))
+
 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
 	{"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
 		tx_dropped_link_down)},
@@ -1373,6 +1383,88 @@  read_vf_msg_config(__rte_unused const char *key,
 	return 0;
 }
 
+static int
+read_mbuf_check_config(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= I40E_MBUF_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= I40E_MBUF_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= I40E_MBUF_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= I40E_MBUF_CHECK_F_TX_OFFLOAD;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
+static int
+i40e_parse_mbuf_check(struct rte_eth_dev *dev)
+{
+	struct i40e_adapter *ad =
+		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct rte_kvargs *kvlist;
+	int kvargs_count;
+	int ret = 0;
+
+	if (!dev->device->devargs)
+		return ret;
+
+	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+	if (!kvlist)
+		return -EINVAL;
+
+	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_MBUF_CHECK_ARG);
+	if (!kvargs_count)
+		goto free_end;
+
+	if (kvargs_count > 1)
+		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+			    "the first invalid or last valid one is used !",
+			    ETH_I40E_MBUF_CHECK_ARG);
+
+	ret = rte_kvargs_process(kvlist, ETH_I40E_MBUF_CHECK_ARG,
+				read_mbuf_check_config, &ad->mc_flags);
+	if (ret)
+		goto free_end;
+
+	if (ad->mc_flags)
+		ad->devargs.mbuf_check = 1;
+
+free_end:
+	rte_kvargs_free(kvlist);
+	return ret;
+}
+
 static int
 i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
 		struct i40e_vf_msg_cfg *msg_cfg)
@@ -1488,6 +1580,7 @@  eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
 	}
 
 	i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
+	i40e_parse_mbuf_check(dev);
 	/* Check if need to support multi-driver */
 	i40e_support_multi_driver(dev);
 
@@ -2324,6 +2417,8 @@  i40e_dev_start(struct rte_eth_dev *dev)
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_vsi *main_vsi = pf->main_vsi;
+	struct i40e_adapter *ad =
+		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	int ret, i;
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
@@ -2483,6 +2578,7 @@  i40e_dev_start(struct rte_eth_dev *dev)
 	max_frame_size = dev->data->mtu ?
 		dev->data->mtu + I40E_ETH_OVERHEAD :
 		I40E_FRAME_SIZE_MAX;
+	ad->max_pkt_len = max_frame_size;
 
 	/* Set the max frame size to HW*/
 	i40e_aq_set_mac_config(hw, max_frame_size, TRUE, false, 0, NULL);
@@ -3502,13 +3598,17 @@  i40e_dev_stats_reset(struct rte_eth_dev *dev)
 	/* read the stats, reading current register values into offset */
 	i40e_read_stats_registers(pf, hw);
 
+	memset(&pf->mbuf_stats, 0,
+		sizeof(struct i40e_mbuf_stats));
+
 	return 0;
 }
 
 static uint32_t
 i40e_xstats_calc_num(void)
 {
-	return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
+	return I40E_NB_ETH_XSTATS + I40E_NB_MBUF_XSTATS +
+		I40E_NB_HW_PORT_XSTATS +
 		(I40E_NB_RXQ_PRIO_XSTATS * 8) +
 		(I40E_NB_TXQ_PRIO_XSTATS * 8);
 }
@@ -3533,6 +3633,14 @@  static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 		count++;
 	}
 
+	/* Get stats from i40e_mbuf_stats struct */
+	for (i = 0; i < I40E_NB_MBUF_XSTATS; i++) {
+		strlcpy(xstats_names[count].name,
+			i40e_mbuf_strings[i].name,
+			sizeof(xstats_names[count].name));
+		count++;
+	}
+
 	/* Get individual stats from i40e_hw_port struct */
 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
 		strlcpy(xstats_names[count].name,
@@ -3563,12 +3671,28 @@  static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return count;
 }
 
+static void
+i40e_dev_update_mbuf_stats(struct rte_eth_dev *ethdev,
+		struct i40e_mbuf_stats *mbuf_stats)
+{
+	uint16_t idx;
+	struct i40e_tx_queue *txq;
+
+	for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
+		txq = ethdev->data->tx_queues[idx];
+		mbuf_stats->tx_pkt_errors += txq->mbuf_errors;
+	}
+}
+
 static int
 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		    unsigned n)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_adapter *adapter =
+		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct i40e_mbuf_stats mbuf_stats = {0};
 	unsigned i, count, prio;
 	struct i40e_hw_port_stats *hw_stats = &pf->stats;
 
@@ -3583,6 +3707,9 @@  i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 
 	count = 0;
 
+	if (adapter->devargs.mbuf_check)
+		i40e_dev_update_mbuf_stats(dev, &mbuf_stats);
+
 	/* Get stats from i40e_eth_stats struct */
 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
 		xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
@@ -3591,6 +3718,15 @@  i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		count++;
 	}
 
+	/* Get stats from i40e_mbuf_stats struct */
+	for (i = 0; i < I40E_NB_MBUF_XSTATS; i++) {
+		xstats[count].value =
+			*(uint64_t *)((char *)&mbuf_stats +
+					i40e_mbuf_strings[i].offset);
+		xstats[count].id = count;
+		count++;
+	}
+
 	/* Get individual stats from i40e_hw_port struct */
 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
 		xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 445e1c0b38..ae3f2f60ac 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -1109,6 +1109,10 @@  struct i40e_vf_msg_cfg {
 	uint32_t ignore_second;
 };
 
+struct i40e_mbuf_stats {
+	uint64_t tx_pkt_errors;
+};
+
 /*
  * Structure to store private data specific for PF instance.
  */
@@ -1123,6 +1127,7 @@  struct i40e_pf {
 
 	struct i40e_hw_port_stats stats_offset;
 	struct i40e_hw_port_stats stats;
+	struct i40e_mbuf_stats mbuf_stats;
 	u64 rx_err1;	/* rxerr1 */
 	u64 rx_err1_offset;
 
@@ -1225,6 +1230,25 @@  struct i40e_vsi_vlan_pvid_info {
 #define I40E_MAX_PKT_TYPE  256
 #define I40E_FLOW_TYPE_MAX 64
 
+#define I40E_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
+#define I40E_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
+#define I40E_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define I40E_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+
+enum i40e_tx_burst_type {
+	I40E_TX_DEFAULT,
+	I40E_TX_SIMPLE,
+	I40E_TX_SSE,
+	I40E_TX_AVX2,
+	I40E_TX_AVX512,
+};
+
+/**
+ * Cache devargs parse result.
+ */
+struct i40e_devargs {
+	int mbuf_check;
+};
 /*
  * Structure to store private data for each PF/VF instance.
  */
@@ -1241,6 +1265,10 @@  struct i40e_adapter {
 	bool tx_simple_allowed;
 	bool tx_vec_allowed;
 
+	struct i40e_devargs devargs;
+	uint64_t mc_flags; /* mbuf check flags. */
+	uint16_t max_pkt_len; /* Maximum packet length */
+	enum i40e_tx_burst_type tx_burst_type;
 	/* For PTP */
 	struct rte_timecounter systime_tc;
 	struct rte_timecounter rx_tstamp_tc;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index a0bc30d45b..c7c9c945e4 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1536,6 +1536,138 @@  i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
+static
+const eth_tx_burst_t i40e_tx_pkt_burst_ops[] = {
+	[I40E_TX_DEFAULT] = i40e_xmit_pkts,
+	[I40E_TX_SIMPLE] = i40e_xmit_pkts_simple,
+	[I40E_TX_SSE] = i40e_xmit_pkts_vec,
+#ifdef RTE_ARCH_X86
+	[I40E_TX_AVX2] = i40e_xmit_pkts_vec_avx2,
+#ifdef CC_AVX512_SUPPORT
+	[I40E_TX_AVX512] = i40e_xmit_pkts_vec_avx512,
+#endif
+#endif
+};
+
+/* Tx mbuf check */
+static uint16_t
+i40e_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	struct i40e_tx_queue *txq = tx_queue;
+	uint16_t idx;
+	uint64_t ol_flags;
+	struct rte_mbuf *mb;
+	bool pkt_error = false;
+	const char *reason = NULL;
+	uint16_t good_pkts = nb_pkts;
+	struct i40e_adapter *adapter = txq->vsi->adapter;
+	enum i40e_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->mc_flags & I40E_MBUF_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 0, &reason) != 0)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			pkt_error = true;
+			break;
+		}
+
+		if ((adapter->mc_flags & I40E_MBUF_CHECK_F_TX_SIZE) &&
+			(mb->data_len > mb->pkt_len ||
+			mb->data_len < I40E_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->max_pkt_len)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			I40E_TX_MIN_PKT_LEN, adapter->max_pkt_len);
+			pkt_error = true;
+			break;
+		}
+
+		if (adapter->mc_flags & I40E_MBUF_CHECK_F_TX_SEGMENT) {
+			if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
+				/**
+				 * No TSO case: nb->segs, pkt_len to not exceed
+				 * the limites.
+				 */
+				if (mb->nb_segs > I40E_TX_MAX_MTU_SEG) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					I40E_TX_MAX_MTU_SEG);
+					pkt_error = true;
+					break;
+				}
+				if (mb->pkt_len > I40E_FRAME_SIZE_MAX) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: pkt_len (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					I40E_FRAME_SIZE_MAX);
+					pkt_error = true;
+					break;
+				}
+			} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+				/** TSO case: tso_segsz, nb_segs, pkt_len not exceed
+				 * the limits.
+				 */
+				if (mb->tso_segsz < I40E_MIN_TSO_MSS ||
+					mb->tso_segsz > I40E_MAX_TSO_MSS) {
+					/**
+					 * MSS outside the range are considered malicious
+					 */
+					PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+					"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+					I40E_MIN_TSO_MSS, I40E_MAX_TSO_MSS);
+					pkt_error = true;
+					break;
+				}
+				if (mb->nb_segs >
+					((struct i40e_tx_queue *)tx_queue)->nb_tx_desc) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+					"of ring length\n");
+					pkt_error = true;
+					break;
+				}
+				if (mb->pkt_len > I40E_TSO_FRAME_SIZE_MAX) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: pkt_len (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					I40E_TSO_FRAME_SIZE_MAX);
+					pkt_error = true;
+					break;
+				}
+			}
+		}
+
+		if (adapter->mc_flags & I40E_MBUF_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				pkt_error = true;
+				break;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				pkt_error = true;
+				break;
+			}
+		}
+	}
+
+	if (pkt_error) {
+		txq->mbuf_errors++;
+		good_pkts = idx;
+		if (good_pkts == 0)
+			return 0;
+	}
+
+	return i40e_tx_pkt_burst_ops[tx_burst_type](tx_queue,
+								tx_pkts, good_pkts);
+}
+
 /*********************************************************************
  *
  *  TX simple prep functions
@@ -3468,6 +3600,8 @@  i40e_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct i40e_adapter *ad =
 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	enum i40e_tx_burst_type tx_burst_type = I40E_TX_DEFAULT;
+	int mbuf_check = ad->devargs.mbuf_check;
 	int i;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
@@ -3502,34 +3636,39 @@  i40e_set_tx_function(struct rte_eth_dev *dev)
 #ifdef CC_AVX512_SUPPORT
 				PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
 					    dev->data->port_id);
-				dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx512;
+				tx_burst_type = I40E_TX_AVX512;
 #endif
 			} else {
 				PMD_INIT_LOG(DEBUG, "Using %sVector Tx (port %d).",
 					     ad->tx_use_avx2 ? "avx2 " : "",
 					     dev->data->port_id);
-				dev->tx_pkt_burst = ad->tx_use_avx2 ?
-						    i40e_xmit_pkts_vec_avx2 :
-						    i40e_xmit_pkts_vec;
+				tx_burst_type = ad->tx_use_avx2 ? I40E_TX_AVX2 : I40E_TX_SSE;
 				dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec;
 			}
 #else /* RTE_ARCH_X86 */
 			PMD_INIT_LOG(DEBUG, "Using Vector Tx (port %d).",
 				     dev->data->port_id);
-			dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+			tx_burst_type = I40E_TX_SSE;
 			dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec;
 #endif /* RTE_ARCH_X86 */
 		} else {
 			PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
-			dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+			tx_burst_type = I40E_TX_SIMPLE;
 			dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec;
 		}
 		dev->tx_pkt_prepare = i40e_simple_prep_pkts;
 	} else {
 		PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
-		dev->tx_pkt_burst = i40e_xmit_pkts;
+		tx_burst_type = I40E_TX_DEFAULT;
 		dev->tx_pkt_prepare = i40e_prep_pkts;
 	}
+
+	if (mbuf_check) {
+		ad->tx_burst_type = tx_burst_type;
+		dev->tx_pkt_burst = i40e_xmit_pkts_check;
+	} else {
+		dev->tx_pkt_burst = i40e_tx_pkt_burst_ops[tx_burst_type];
+	}
 }
 
 static const struct {
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 31dd947222..70320cf25e 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -162,6 +162,8 @@  struct i40e_tx_queue {
 	uint16_t tx_next_dd;
 	uint16_t tx_next_rs;
 	bool q_set; /**< indicate if tx queue has been configured */
+	uint64_t mbuf_errors;
+
 	bool tx_deferred_start; /**< don't start this queue in dev start */
 	uint8_t dcb_tc;         /**< Traffic class of tx queue */
 	uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */