@@ -28,7 +28,7 @@
* Request TX completion every time descriptors reach this threshold since
* the previous request. Must be a power of two for performance reasons.
*/
-#define MLX5_TX_COMP_THRESH 32
+#define MLX5_TX_COMP_THRESH 32u
/*
* Request TX completion every time the total number of WQEBBs used for inlining
@@ -72,7 +72,7 @@
* boundary with accounting the title Control and Ethernet
* segments.
*/
-#define MLX5_EMPW_DEF_INLINE_LEN (3U * MLX5_WQE_SIZE + \
+#define MLX5_EMPW_DEF_INLINE_LEN (3u * MLX5_WQE_SIZE + \
MLX5_DSEG_MIN_INLINE_SIZE - \
MLX5_WQE_DSEG_SIZE)
/*
@@ -90,11 +90,16 @@
* If there are no enough resources to built minimal
* EMPW the sending loop exits.
*/
-#define MLX5_EMPW_MIN_PACKETS (2 + 3 * 4)
-#define MLX5_EMPW_MAX_PACKETS ((MLX5_WQE_SIZE_MAX - \
- MLX5_WQE_CSEG_SIZE - \
- MLX5_WQE_ESEG_SIZE) / \
- MLX5_WSEG_SIZE)
+#define MLX5_EMPW_MIN_PACKETS (2u + 3u * 4u)
+/*
+ * Maximal amount of packets to be sent with EMPW.
+ * This value is not recommended to exceed MLX5_TX_COMP_THRESH,
+ * otherwise there might be up to MLX5_EMPW_MAX_PACKETS mbufs
+ * without CQE generation request, being multiplied by
+ * MLX5_TX_COMP_MAX_CQE it may cause significant latency
+ * in tx burst routine at the moment of freeing multiple mbufs.
+ */
+#define MLX5_EMPW_MAX_PACKETS MLX5_TX_COMP_THRESH
/*
* Default packet length threshold to be inlined with
* ordinary SEND. Inlining saves the MR key search
@@ -2063,8 +2063,6 @@ enum mlx5_txcmp_code {
*
* @param txq
* Pointer to TX queue structure.
- * @param n_mbuf
- * Number of mbuf not stored yet in elts array.
* @param loc
* Pointer to burst routine local context.
* @param olx
@@ -2073,18 +2071,23 @@ enum mlx5_txcmp_code {
*/
static __rte_always_inline void
mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
- unsigned int n_mbuf,
struct mlx5_txq_local *restrict loc,
- unsigned int olx __rte_unused)
+ unsigned int olx)
{
- uint16_t head = txq->elts_head + n_mbuf;
+ uint16_t head = txq->elts_head;
+ unsigned int part;
+ part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc->pkts_sent -
+ (MLX5_TXOFF_CONFIG(MULTI) ? loc->pkts_copy : 0);
+ head += part;
if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
- (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres) {
+ (MLX5_TXOFF_CONFIG(INLINE) &&
+ (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
volatile struct mlx5_wqe *last = loc->wqe_last;
txq->elts_comp = head;
- txq->wqe_comp = txq->wqe_ci;
+ if (MLX5_TXOFF_CONFIG(INLINE))
+ txq->wqe_comp = txq->wqe_ci;
/* Request unconditional completion on last WQE. */
last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
MLX5_COMP_MODE_OFFSET);
@@ -3023,6 +3026,8 @@ enum mlx5_txcmp_code {
wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
return MLX5_TXCMP_CODE_MULTI;
}
@@ -3131,6 +3136,8 @@ enum mlx5_txcmp_code {
} while (true);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
return MLX5_TXCMP_CODE_MULTI;
}
@@ -3287,6 +3294,8 @@ enum mlx5_txcmp_code {
wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
return MLX5_TXCMP_CODE_MULTI;
}
@@ -3496,6 +3505,8 @@ enum mlx5_txcmp_code {
--loc->elts_free;
++loc->pkts_sent;
--pkts_n;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
@@ -3637,7 +3648,7 @@ enum mlx5_txcmp_code {
struct mlx5_txq_local *restrict loc,
unsigned int ds,
unsigned int slen,
- unsigned int olx __rte_unused)
+ unsigned int olx)
{
assert(!MLX5_TXOFF_CONFIG(INLINE));
#ifdef MLX5_PMD_SOFT_COUNTERS
@@ -3652,6 +3663,8 @@ enum mlx5_txcmp_code {
loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
}
/*
@@ -3694,6 +3707,8 @@ enum mlx5_txcmp_code {
loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
txq->wqe_ci += (len + 3) / 4;
loc->wqe_free -= (len + 3) / 4;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
}
/**
@@ -3865,6 +3880,7 @@ enum mlx5_txcmp_code {
if (unlikely(!loc->elts_free ||
!loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
+ pkts_n -= part;
goto next_empw;
}
/* Packet attributes match, continue the same eMPW. */
@@ -3884,6 +3900,8 @@ enum mlx5_txcmp_code {
txq->wqe_ci += (2 + part + 3) / 4;
loc->wqe_free -= (2 + part + 3) / 4;
pkts_n -= part;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
@@ -3922,10 +3940,14 @@ enum mlx5_txcmp_code {
struct mlx5_wqe_dseg *restrict dseg;
struct mlx5_wqe_eseg *restrict eseg;
enum mlx5_txcmp_code ret;
- unsigned int room, part;
+ unsigned int room, part, nlim;
unsigned int slen = 0;
-next_empw:
+ /*
+ * Limits the amount of packets in one WQE
+ * to improve CQE latency generation.
+ */
+ nlim = RTE_MIN(pkts_n, MLX5_EMPW_MAX_PACKETS);
/* Check whether we have minimal amount WQEs */
if (unlikely(loc->wqe_free <
((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
@@ -4044,12 +4066,6 @@ enum mlx5_txcmp_code {
mlx5_tx_idone_empw(txq, loc, part, slen, olx);
return MLX5_TXCMP_CODE_EXIT;
}
- /* Check if we have minimal room left. */
- if (room < MLX5_WQE_DSEG_SIZE) {
- part -= room;
- mlx5_tx_idone_empw(txq, loc, part, slen, olx);
- goto next_empw;
- }
loc->mbuf = *pkts++;
if (likely(pkts_n > 1))
rte_prefetch0(*pkts);
@@ -4089,6 +4105,10 @@ enum mlx5_txcmp_code {
mlx5_tx_idone_empw(txq, loc, part, slen, olx);
return MLX5_TXCMP_CODE_ERROR;
}
+ /* Check if we have minimal room left. */
+ nlim--;
+ if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
+ break;
/*
* Check whether packet parameters coincide
* within assumed eMPW batch:
@@ -4114,7 +4134,7 @@ enum mlx5_txcmp_code {
if (unlikely(!loc->elts_free ||
!loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
- goto next_empw;
+ /* Continue the loop with new eMPW session. */
}
assert(false);
}
@@ -4355,6 +4375,8 @@ enum mlx5_txcmp_code {
}
++loc->pkts_sent;
--pkts_n;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
@@ -4630,9 +4652,6 @@ enum mlx5_txcmp_code {
/* Take a shortcut if nothing is sent. */
if (unlikely(loc.pkts_sent == 0))
return 0;
- /* Not all of the mbufs may be stored into elts yet. */
- part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
- mlx5_tx_request_completion(txq, part, &loc, olx);
/*
* Ring QP doorbell immediately after WQE building completion
* to improve latencies. The pure software related data treatment
@@ -4640,10 +4659,13 @@ enum mlx5_txcmp_code {
* processed in this thread only by the polling.
*/
mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, 0);
+ /* Not all of the mbufs may be stored into elts yet. */
+ part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent -
+ (MLX5_TXOFF_CONFIG(MULTI) ? loc.pkts_copy : 0);
if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
/*
* There are some single-segment mbufs not stored in elts.
- * It can be only if last packet was single-segment.
+ * It can be only if the last packet was single-segment.
* The copying is gathered into one place due to it is
* a good opportunity to optimize that with SIMD.
* Unfortunately if inlining is enabled the gaps in