[v4,5/8] net/mlx5: add Tx datapath configuration and setup
Checks
Commit Message
This patch updates the Tx datapath control and configuration
structures and code for mananging Tx datapath settings.
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
drivers/net/mlx5/mlx5.c | 123 +++++++++++++++++++++++++
drivers/net/mlx5/mlx5.h | 2 +-
drivers/net/mlx5/mlx5_rxtx.c | 4 +-
drivers/net/mlx5/mlx5_rxtx.h | 56 ++++++++---
drivers/net/mlx5/mlx5_txq.c | 215 ++++++++++++++++++++++++++++++++++++++++---
5 files changed, 370 insertions(+), 30 deletions(-)
Comments
> On Jul 21, 2019, at 7:24 AM, Viacheslav Ovsiienko <viacheslavo@mellanox.com> wrote:
>
> This patch updates the Tx datapath control and configuration
> structures and code for mananging Tx datapath settings.
>
> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
> ---
Acked-by: Yongseok Koh <yskoh@mellanox.com>
> drivers/net/mlx5/mlx5.c | 123 +++++++++++++++++++++++++
> drivers/net/mlx5/mlx5.h | 2 +-
> drivers/net/mlx5/mlx5_rxtx.c | 4 +-
> drivers/net/mlx5/mlx5_rxtx.h | 56 ++++++++---
> drivers/net/mlx5/mlx5_txq.c | 215 ++++++++++++++++++++++++++++++++++++++++---
> 5 files changed, 370 insertions(+), 30 deletions(-)
>
> diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
> index bbf2583..37d3c08 100644
> --- a/drivers/net/mlx5/mlx5.c
> +++ b/drivers/net/mlx5/mlx5.c
> @@ -1185,6 +1185,127 @@ struct mlx5_dev_spawn_data {
> }
>
> /**
> + * Configures the minimal amount of data to inline into WQE
> + * while sending packets.
> + *
> + * - the txq_inline_min has the maximal priority, if this
> + * key is specified in devargs
> + * - if DevX is enabled the inline mode is queried from the
> + * device (HCA attributes and NIC vport context if needed).
> + * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4LX
> + * and none (0 bytes) for other NICs
> + *
> + * @param spawn
> + * Verbs device parameters (name, port, switch_info) to spawn.
> + * @param config
> + * Device configuration parameters.
> + */
> +static void
> +mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
> + struct mlx5_dev_config *config)
> +{
> + if (config->txq_inline_min != MLX5_ARG_UNSET) {
> + /* Application defines size of inlined data explicitly. */
> + switch (spawn->pci_dev->id.device_id) {
> + case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
> + case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
> + case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
> + case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
> + if (config->txq_inline_min <
> + (int)MLX5_INLINE_HSIZE_L2) {
> + DRV_LOG(DEBUG,
> + "txq_inline_mix aligned to minimal"
> + " ConnectX-4 required value %d",
> + (int)MLX5_INLINE_HSIZE_L2);
> + config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
> + }
> + break;
> + }
> + goto exit;
> + }
> + if (config->hca_attr.eth_net_offloads) {
> + /* We have DevX enabled, inline mode queried successfully. */
> + switch (config->hca_attr.wqe_inline_mode) {
> + case MLX5_CAP_INLINE_MODE_L2:
> + /* outer L2 header must be inlined. */
> + config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
> + goto exit;
> + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
> + /* No inline data are required by NIC. */
> + config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
> + config->hw_vlan_insert =
> + config->hca_attr.wqe_vlan_insert;
> + DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
> + goto exit;
> + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
> + /* inline mode is defined by NIC vport context. */
> + if (!config->hca_attr.eth_virt)
> + break;
> + switch (config->hca_attr.vport_inline_mode) {
> + case MLX5_INLINE_MODE_NONE:
> + config->txq_inline_min =
> + MLX5_INLINE_HSIZE_NONE;
> + goto exit;
> + case MLX5_INLINE_MODE_L2:
> + config->txq_inline_min =
> + MLX5_INLINE_HSIZE_L2;
> + goto exit;
> + case MLX5_INLINE_MODE_IP:
> + config->txq_inline_min =
> + MLX5_INLINE_HSIZE_L3;
> + goto exit;
> + case MLX5_INLINE_MODE_TCP_UDP:
> + config->txq_inline_min =
> + MLX5_INLINE_HSIZE_L4;
> + goto exit;
> + case MLX5_INLINE_MODE_INNER_L2:
> + config->txq_inline_min =
> + MLX5_INLINE_HSIZE_INNER_L2;
> + goto exit;
> + case MLX5_INLINE_MODE_INNER_IP:
> + config->txq_inline_min =
> + MLX5_INLINE_HSIZE_INNER_L3;
> + goto exit;
> + case MLX5_INLINE_MODE_INNER_TCP_UDP:
> + config->txq_inline_min =
> + MLX5_INLINE_HSIZE_INNER_L4;
> + goto exit;
> + }
> + }
> + }
> + /*
> + * We get here if we are unable to deduce
> + * inline data size with DevX. Try PCI ID
> + * to determine old NICs.
> + */
> + switch (spawn->pci_dev->id.device_id) {
> + case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
> + case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
> + case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
> + case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
> + config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
> + config->hw_vlan_insert = 0;
> + break;
> + case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
> + case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
> + case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
> + case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
> + /*
> + * These NICs support VLAN insertion from WQE and
> + * report the wqe_vlan_insert flag. But there is the bug
> + * and PFC control may be broken, so disable feature.
> + */
> + config->hw_vlan_insert = 0;
> + break;
> + default:
> + config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
> + break;
> + }
> +exit:
> + DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min);
> +}
> +
> +/**
> * Spawn an Ethernet device from Verbs information.
> *
> * @param dpdk_dev
> @@ -1677,6 +1798,8 @@ struct mlx5_dev_spawn_data {
> #else
> config.dv_esw_en = 0;
> #endif
> + /* Detect minimal data bytes to inline. */
> + mlx5_set_min_inline(spawn, &config);
> /* Store device configuration on private structure. */
> priv->config = config;
> if (config.dv_flow_en) {
> diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
> index 6ea9d4e..7aad94d 100644
> --- a/drivers/net/mlx5/mlx5.h
> +++ b/drivers/net/mlx5/mlx5.h
> @@ -194,6 +194,7 @@ struct mlx5_hca_attr {
> struct mlx5_dev_config {
> unsigned int hw_csum:1; /* Checksum offload is supported. */
> unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
> + unsigned int hw_vlan_insert:1; /* VLAN insertion in WQE is supported. */
> unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
> unsigned int hw_padding:1; /* End alignment padding is supported. */
> unsigned int vf:1; /* This is a VF. */
> @@ -203,7 +204,6 @@ struct mlx5_dev_config {
> unsigned int cqe_comp:1; /* CQE compression is enabled. */
> unsigned int cqe_pad:1; /* CQE padding is enabled. */
> unsigned int tso:1; /* Whether TSO is supported. */
> - unsigned int tx_inline:1; /* Engage TX data inlining. */
> unsigned int rx_vec_en:1; /* Rx vector is enabled. */
> unsigned int mr_ext_memseg_en:1;
> /* Whether memseg should be extended for MR creation. */
> diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
> index f2d6918..13f9431 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.c
> +++ b/drivers/net/mlx5/mlx5_rxtx.c
> @@ -507,7 +507,7 @@
> MKSTR(err_str, "Unexpected CQE error syndrome "
> "0x%02x CQN = %u SQN = %u wqe_counter = %u "
> "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
> - txq_ctrl->cqn, txq->qp_num_8s >> 8,
> + txq->cqe_s, txq->qp_num_8s >> 8,
> rte_be_to_cpu_16(err_cqe->wqe_counter),
> txq->wqe_ci, txq->cq_ci);
> MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
> @@ -516,7 +516,7 @@
> mlx5_dump_debug_information(name, NULL, err_str, 0);
> mlx5_dump_debug_information(name, "MLX5 Error CQ:",
> (const void *)((uintptr_t)
> - &(*txq->cqes)[0]),
> + txq->cqes),
> sizeof(*err_cqe) *
> (1 << txq->cqe_n));
> mlx5_dump_debug_information(name, "MLX5 Error SQ:",
> diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
> index acde09d..03ddd9e 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.h
> +++ b/drivers/net/mlx5/mlx5_rxtx.h
> @@ -188,37 +188,61 @@ struct mlx5_hrxq {
> uint8_t rss_key[]; /* Hash key. */
> };
>
> +/* TX queue send local data. */
> +__extension__
> +struct mlx5_txq_local {
> + struct mlx5_wqe *wqe_last; /* last sent WQE pointer. */
> + struct rte_mbuf *mbuf; /* first mbuf to process. */
> + uint16_t pkts_copy; /* packets copied to elts. */
> + uint16_t pkts_sent; /* packets sent. */
> + uint16_t elts_free; /* available elts remain. */
> + uint16_t wqe_free; /* available wqe remain. */
> + uint16_t mbuf_off; /* data offset in current mbuf. */
> + uint16_t mbuf_nseg; /* number of remaining mbuf. */
> +};
> +
> /* TX queue descriptor. */
> __extension__
> struct mlx5_txq_data {
> uint16_t elts_head; /* Current counter in (*elts)[]. */
> uint16_t elts_tail; /* Counter of first element awaiting completion. */
> - uint16_t elts_comp; /* Counter since last completion request. */
> - uint16_t mpw_comp; /* WQ index since last completion request. */
> + uint16_t elts_comp; /* elts index since last completion request. */
> + uint16_t elts_s; /* Number of mbuf elements. */
> + uint16_t elts_m; /* Mask for mbuf elements indices. */
> + /* Fields related to elts mbuf storage. */
> + uint16_t wqe_ci; /* Consumer index for work queue. */
> + uint16_t wqe_pi; /* Producer index for work queue. */
> + uint16_t wqe_s; /* Number of WQ elements. */
> + uint16_t wqe_m; /* Mask Number for WQ elements. */
> + uint16_t wqe_comp; /* WQE index since last completion request. */
> + uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */
> + /* WQ related fields. */
> uint16_t cq_ci; /* Consumer index for completion queue. */
> #ifndef NDEBUG
> - uint16_t cq_pi; /* Producer index for completion queue. */
> + uint16_t cq_pi; /* Counter of issued CQE "always" requests. */
> #endif
> - uint16_t wqe_ci; /* Consumer index for work queue. */
> - uint16_t wqe_pi; /* Producer index for work queue. */
> - uint16_t elts_n:4; /* (*elts)[] length (in log2). */
> + uint16_t cqe_s; /* Number of CQ elements. */
> + uint16_t cqe_m; /* Mask for CQ indices. */
> + /* CQ related fields. */
> + uint16_t elts_n:4; /* elts[] length (in log2). */
> uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
> - uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
> + uint16_t wqe_n:4; /* Number of WQ elements (in log2). */
> uint16_t tso_en:1; /* When set hardware TSO is enabled. */
> uint16_t tunnel_en:1;
> /* When set TX offload for tunneled packets are supported. */
> uint16_t swp_en:1; /* Whether SW parser is enabled. */
> - uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
> - uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
> - uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
> + uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
> + uint16_t inlen_send; /* Ordinary send data inline size. */
> + uint16_t inlen_empw; /* eMPW max packet size to inline. */
> + uint16_t inlen_mode; /* Minimal data length to inline. */
> uint32_t qp_num_8s; /* QP number shifted by 8. */
> uint64_t offloads; /* Offloads for Tx Queue. */
> struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
> - volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
> - volatile void *wqes; /* Work queue (use volatile to write into). */
> + struct mlx5_wqe *wqes; /* Work queue. */
> + struct mlx5_wqe *wqes_end; /* Work queue array limit. */
> + volatile struct mlx5_cqe *cqes; /* Completion queue. */
> volatile uint32_t *qp_db; /* Work queue doorbell. */
> volatile uint32_t *cq_db; /* Completion queue doorbell. */
> - struct rte_mbuf *(*elts)[]; /* TX elements. */
> uint16_t port_id; /* Port ID of device. */
> uint16_t idx; /* Queue index. */
> struct mlx5_txq_stats stats; /* TX queue counters. */
> @@ -226,6 +250,8 @@ struct mlx5_txq_data {
> rte_spinlock_t *uar_lock;
> /* UAR access lock required for 32bit implementations */
> #endif
> + struct rte_mbuf *elts[0];
> + /* Storage for queued packets, must be the last field. */
> } __rte_cache_aligned;
>
> /* Verbs Rx queue elements. */
> @@ -239,7 +265,6 @@ struct mlx5_txq_ibv {
>
> /* TX queue control descriptor. */
> struct mlx5_txq_ctrl {
> - struct mlx5_txq_data txq; /* Data path structure. */
> LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
> rte_atomic32_t refcnt; /* Reference counter. */
> unsigned int socket; /* CPU socket ID for allocations. */
> @@ -249,8 +274,9 @@ struct mlx5_txq_ctrl {
> struct mlx5_priv *priv; /* Back pointer to private data. */
> off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
> void *bf_reg; /* BlueFlame register from Verbs. */
> - uint32_t cqn; /* CQ number. */
> uint16_t dump_file_n; /* Number of dump files. */
> + struct mlx5_txq_data txq; /* Data path structure. */
> + /* Must be the last field in the structure, contains elts[]. */
> };
>
> #define MLX5_TX_BFREG(txq) \
> diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
> index 55892e2..2f3aa5b 100644
> --- a/drivers/net/mlx5/mlx5_txq.c
> +++ b/drivers/net/mlx5/mlx5_txq.c
> @@ -47,7 +47,7 @@
> unsigned int i;
>
> for (i = 0; (i != elts_n); ++i)
> - (*txq_ctrl->txq.elts)[i] = NULL;
> + txq_ctrl->txq.elts[i] = NULL;
> DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
> PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
> txq_ctrl->txq.elts_head = 0;
> @@ -68,7 +68,7 @@
> const uint16_t elts_m = elts_n - 1;
> uint16_t elts_head = txq_ctrl->txq.elts_head;
> uint16_t elts_tail = txq_ctrl->txq.elts_tail;
> - struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
> + struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
>
> DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
> PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
> @@ -411,7 +411,8 @@ struct mlx5_txq_ibv *
> attr.cq = (struct ibv_cq_init_attr_ex){
> .comp_mask = 0,
> };
> - cqe_n = desc / MLX5_TX_COMP_THRESH + 1;
> + cqe_n = desc / MLX5_TX_COMP_THRESH +
> + 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
> tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
> if (tmpl.cq == NULL) {
> DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
> @@ -449,7 +450,7 @@ struct mlx5_txq_ibv *
> .pd = priv->sh->pd,
> .comp_mask = IBV_QP_INIT_ATTR_PD,
> };
> - if (txq_data->max_inline)
> + if (txq_data->inlen_send)
> attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
> if (txq_data->tso_en) {
> attr.init.max_tso_header = txq_ctrl->max_tso_header;
> @@ -523,25 +524,29 @@ struct mlx5_txq_ibv *
> goto error;
> }
> txq_data->cqe_n = log2above(cq_info.cqe_cnt);
> + txq_data->cqe_s = 1 << txq_data->cqe_n;
> + txq_data->cqe_m = txq_data->cqe_s - 1;
> txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
> txq_data->wqes = qp.sq.buf;
> txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
> + txq_data->wqe_s = 1 << txq_data->wqe_n;
> + txq_data->wqe_m = txq_data->wqe_s - 1;
> + txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
> txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
> txq_data->cq_db = cq_info.dbrec;
> - txq_data->cqes =
> - (volatile struct mlx5_cqe (*)[])
> - (uintptr_t)cq_info.buf;
> + txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
> txq_data->cq_ci = 0;
> #ifndef NDEBUG
> txq_data->cq_pi = 0;
> #endif
> txq_data->wqe_ci = 0;
> txq_data->wqe_pi = 0;
> + txq_data->wqe_comp = 0;
> + txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
> txq_ibv->qp = tmpl.qp;
> txq_ibv->cq = tmpl.cq;
> rte_atomic32_inc(&txq_ibv->refcnt);
> txq_ctrl->bf_reg = qp.bf.reg;
> - txq_ctrl->cqn = cq_info.cqn;
> txq_uar_init(txq_ctrl);
> if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
> txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
> @@ -663,7 +668,11 @@ struct mlx5_txq_ibv *
> unsigned int wqe_size;
> const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
>
> - wqe_size = MLX5_WQE_SIZE + txq_ctrl->max_inline_data;
> + wqe_size = MLX5_WQE_CSEG_SIZE +
> + MLX5_WQE_ESEG_SIZE +
> + MLX5_WSEG_SIZE -
> + MLX5_ESEG_MIN_INLINE_SIZE +
> + txq_ctrl->max_inline_data;
> return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
> }
>
> @@ -676,7 +685,189 @@ struct mlx5_txq_ibv *
> static void
> txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
> {
> - (void)txq_ctrl;
> + struct mlx5_priv *priv = txq_ctrl->priv;
> + struct mlx5_dev_config *config = &priv->config;
> + unsigned int inlen_send; /* Inline data for ordinary SEND.*/
> + unsigned int inlen_empw; /* Inline data for enhanced MPW. */
> + unsigned int inlen_mode; /* Minimal required Inline data. */
> + unsigned int txqs_inline; /* Min Tx queues to enable inline. */
> + uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
> + bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
> + DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
> + DEV_TX_OFFLOAD_GRE_TNL_TSO |
> + DEV_TX_OFFLOAD_IP_TNL_TSO |
> + DEV_TX_OFFLOAD_UDP_TNL_TSO);
> + bool vlan_inline;
> + unsigned int temp;
> +
> + if (config->txqs_inline == MLX5_ARG_UNSET)
> + txqs_inline =
> +#if defined(RTE_ARCH_ARM64)
> + (priv->sh->pci_dev->id.device_id ==
> + PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
> + MLX5_INLINE_MAX_TXQS_BLUEFIELD :
> +#endif
> + MLX5_INLINE_MAX_TXQS;
> + else
> + txqs_inline = (unsigned int)config->txqs_inline;
> + inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
> + MLX5_SEND_DEF_INLINE_LEN :
> + (unsigned int)config->txq_inline_max;
> + inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
> + MLX5_EMPW_DEF_INLINE_LEN :
> + (unsigned int)config->txq_inline_mpw;
> + inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
> + 0 : (unsigned int)config->txq_inline_min;
> + if (config->mps != MLX5_MPW_ENHANCED)
> + inlen_empw = 0;
> + /*
> + * If there is requested minimal amount of data to inline
> + * we MUST enable inlining. This is a case for ConnectX-4
> + * which usually requires L2 inlined for correct operating
> + * and ConnectX-4LX which requires L2-L4 inlined to
> + * support E-Switch Flows.
> + */
> + if (inlen_mode) {
> + if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
> + /*
> + * Optimize minimal inlining for single
> + * segment packets to fill one WQEBB
> + * without gaps.
> + */
> + temp = MLX5_ESEG_MIN_INLINE_SIZE;
> + } else {
> + temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
> + temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
> + MLX5_ESEG_MIN_INLINE_SIZE;
> + temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
> + }
> + if (temp != inlen_mode) {
> + DRV_LOG(INFO,
> + "port %u minimal required inline setting"
> + " aligned from %u to %u",
> + PORT_ID(priv), inlen_mode, temp);
> + inlen_mode = temp;
> + }
> + }
> + /*
> + * If port is configured to support VLAN insertion and device
> + * does not support this feature by HW (for NICs before ConnectX-5
> + * or in case of wqe_vlan_insert flag is not set) we must enable
> + * data inline on all queues because it is supported by single
> + * tx_burst routine.
> + */
> + txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
> + vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
> + !config->hw_vlan_insert;
> + if (vlan_inline)
> + inlen_send = RTE_MAX(inlen_send, MLX5_ESEG_MIN_INLINE_SIZE);
> + /*
> + * If there are few Tx queues it is prioritized
> + * to save CPU cycles and disable data inlining at all.
> + */
> + if ((inlen_send && priv->txqs_n >= txqs_inline) || vlan_inline) {
> + /*
> + * The data sent with ordinal MLX5_OPCODE_SEND
> + * may be inlined in Ethernet Segment, align the
> + * length accordingly to fit entire WQEBBs.
> + */
> + temp = (inlen_send / MLX5_WQE_SIZE) * MLX5_WQE_SIZE +
> + MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
> + temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
> + MLX5_ESEG_MIN_INLINE_SIZE -
> + MLX5_WQE_CSEG_SIZE -
> + MLX5_WQE_ESEG_SIZE -
> + MLX5_WQE_DSEG_SIZE * 2);
> + temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
> + temp = RTE_MAX(temp, inlen_mode);
> + if (temp != inlen_send) {
> + DRV_LOG(INFO,
> + "port %u ordinary send inline setting"
> + " aligned from %u to %u",
> + PORT_ID(priv), inlen_send, temp);
> + inlen_send = temp;
> + }
> + /*
> + * Not aligned to cache lines, but to WQEs.
> + * First bytes of data (initial alignment)
> + * is going to be copied explicitly at the
> + * beginning of inlining buffer in Ethernet
> + * Segment.
> + */
> + assert(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
> + assert(inlen_send <= MLX5_WQE_SIZE_MAX +
> + MLX5_ESEG_MIN_INLINE_SIZE -
> + MLX5_WQE_CSEG_SIZE -
> + MLX5_WQE_ESEG_SIZE -
> + MLX5_WQE_DSEG_SIZE * 2);
> + txq_ctrl->txq.inlen_send = inlen_send;
> + txq_ctrl->txq.inlen_mode = inlen_mode;
> + txq_ctrl->txq.inlen_empw = 0;
> + } else {
> + /*
> + * If minimal inlining is requested we must
> + * enable inlining in general, despite the
> + * number of configured queues.
> + */
> + inlen_send = inlen_mode;
> + if (inlen_mode) {
> + /*
> + * Extend space for inline data to allow
> + * optional alignment of data buffer
> + * start address, it may improve PCIe
> + * performance.
> + */
> + inlen_send = RTE_MIN(inlen_send + MLX5_WQE_SIZE,
> + MLX5_SEND_MAX_INLINE_LEN);
> + }
> + txq_ctrl->txq.inlen_send = inlen_send;
> + txq_ctrl->txq.inlen_mode = inlen_mode;
> + txq_ctrl->txq.inlen_empw = 0;
> + inlen_send = 0;
> + inlen_empw = 0;
> + }
> + if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
> + /*
> + * The data sent with MLX5_OPCODE_ENHANCED_MPSW
> + * may be inlined in Data Segment, align the
> + * length accordingly to fit entire WQEBBs.
> + */
> + temp = (inlen_empw + MLX5_WQE_SIZE - 1) / MLX5_WQE_SIZE;
> + temp = temp * MLX5_WQE_SIZE +
> + MLX5_DSEG_MIN_INLINE_SIZE - MLX5_WQE_DSEG_SIZE;
> + temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
> + MLX5_DSEG_MIN_INLINE_SIZE -
> + MLX5_WQE_CSEG_SIZE -
> + MLX5_WQE_ESEG_SIZE -
> + MLX5_WQE_DSEG_SIZE);
> + temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
> + if (temp != inlen_empw) {
> + DRV_LOG(INFO,
> + "port %u enhanced empw inline setting"
> + " aligned from %u to %u",
> + PORT_ID(priv), inlen_empw, temp);
> + inlen_empw = temp;
> + }
> + assert(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
> + assert(inlen_empw <= MLX5_WQE_SIZE_MAX +
> + MLX5_DSEG_MIN_INLINE_SIZE -
> + MLX5_WQE_CSEG_SIZE -
> + MLX5_WQE_ESEG_SIZE -
> + MLX5_WQE_DSEG_SIZE);
> + txq_ctrl->txq.inlen_empw = inlen_empw;
> + }
> + txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
> + if (tso) {
> + txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
> + txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
> + MLX5_MAX_TSO_HEADER);
> + txq_ctrl->txq.tso_en = 1;
> + }
> + txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
> + txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
> + DEV_TX_OFFLOAD_UDP_TNL_TSO |
> + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
> + txq_ctrl->txq.offloads) && config->swp;
> }
>
> /**
> @@ -724,6 +915,8 @@ struct mlx5_txq_ctrl *
> tmpl->priv = priv;
> tmpl->socket = socket;
> tmpl->txq.elts_n = log2above(desc);
> + tmpl->txq.elts_s = desc;
> + tmpl->txq.elts_m = desc - 1;
> tmpl->txq.port_id = dev->data->port_id;
> tmpl->txq.idx = idx;
> txq_set_params(tmpl);
> @@ -737,8 +930,6 @@ struct mlx5_txq_ctrl *
> rte_errno = ENOMEM;
> goto error;
> }
> - tmpl->txq.elts =
> - (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
> rte_atomic32_inc(&tmpl->refcnt);
> LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
> return tmpl;
> --
> 1.8.3.1
>
@@ -1185,6 +1185,127 @@ struct mlx5_dev_spawn_data {
}
/**
+ * Configures the minimal amount of data to inline into WQE
+ * while sending packets.
+ *
+ * - the txq_inline_min has the maximal priority, if this
+ * key is specified in devargs
+ * - if DevX is enabled the inline mode is queried from the
+ * device (HCA attributes and NIC vport context if needed).
+ * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4LX
+ * and none (0 bytes) for other NICs
+ *
+ * @param spawn
+ * Verbs device parameters (name, port, switch_info) to spawn.
+ * @param config
+ * Device configuration parameters.
+ */
+static void
+mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
+ struct mlx5_dev_config *config)
+{
+ if (config->txq_inline_min != MLX5_ARG_UNSET) {
+ /* Application defines size of inlined data explicitly. */
+ switch (spawn->pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
+ if (config->txq_inline_min <
+ (int)MLX5_INLINE_HSIZE_L2) {
+ DRV_LOG(DEBUG,
+ "txq_inline_mix aligned to minimal"
+ " ConnectX-4 required value %d",
+ (int)MLX5_INLINE_HSIZE_L2);
+ config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
+ }
+ break;
+ }
+ goto exit;
+ }
+ if (config->hca_attr.eth_net_offloads) {
+ /* We have DevX enabled, inline mode queried successfully. */
+ switch (config->hca_attr.wqe_inline_mode) {
+ case MLX5_CAP_INLINE_MODE_L2:
+ /* outer L2 header must be inlined. */
+ config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
+ goto exit;
+ case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
+ /* No inline data are required by NIC. */
+ config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
+ config->hw_vlan_insert =
+ config->hca_attr.wqe_vlan_insert;
+ DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
+ goto exit;
+ case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+ /* inline mode is defined by NIC vport context. */
+ if (!config->hca_attr.eth_virt)
+ break;
+ switch (config->hca_attr.vport_inline_mode) {
+ case MLX5_INLINE_MODE_NONE:
+ config->txq_inline_min =
+ MLX5_INLINE_HSIZE_NONE;
+ goto exit;
+ case MLX5_INLINE_MODE_L2:
+ config->txq_inline_min =
+ MLX5_INLINE_HSIZE_L2;
+ goto exit;
+ case MLX5_INLINE_MODE_IP:
+ config->txq_inline_min =
+ MLX5_INLINE_HSIZE_L3;
+ goto exit;
+ case MLX5_INLINE_MODE_TCP_UDP:
+ config->txq_inline_min =
+ MLX5_INLINE_HSIZE_L4;
+ goto exit;
+ case MLX5_INLINE_MODE_INNER_L2:
+ config->txq_inline_min =
+ MLX5_INLINE_HSIZE_INNER_L2;
+ goto exit;
+ case MLX5_INLINE_MODE_INNER_IP:
+ config->txq_inline_min =
+ MLX5_INLINE_HSIZE_INNER_L3;
+ goto exit;
+ case MLX5_INLINE_MODE_INNER_TCP_UDP:
+ config->txq_inline_min =
+ MLX5_INLINE_HSIZE_INNER_L4;
+ goto exit;
+ }
+ }
+ }
+ /*
+ * We get here if we are unable to deduce
+ * inline data size with DevX. Try PCI ID
+ * to determine old NICs.
+ */
+ switch (spawn->pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
+ config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
+ config->hw_vlan_insert = 0;
+ break;
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
+ /*
+ * These NICs support VLAN insertion from WQE and
+ * report the wqe_vlan_insert flag. But there is the bug
+ * and PFC control may be broken, so disable feature.
+ */
+ config->hw_vlan_insert = 0;
+ break;
+ default:
+ config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
+ break;
+ }
+exit:
+ DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min);
+}
+
+/**
* Spawn an Ethernet device from Verbs information.
*
* @param dpdk_dev
@@ -1677,6 +1798,8 @@ struct mlx5_dev_spawn_data {
#else
config.dv_esw_en = 0;
#endif
+ /* Detect minimal data bytes to inline. */
+ mlx5_set_min_inline(spawn, &config);
/* Store device configuration on private structure. */
priv->config = config;
if (config.dv_flow_en) {
@@ -194,6 +194,7 @@ struct mlx5_hca_attr {
struct mlx5_dev_config {
unsigned int hw_csum:1; /* Checksum offload is supported. */
unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
+ unsigned int hw_vlan_insert:1; /* VLAN insertion in WQE is supported. */
unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
unsigned int vf:1; /* This is a VF. */
@@ -203,7 +204,6 @@ struct mlx5_dev_config {
unsigned int cqe_comp:1; /* CQE compression is enabled. */
unsigned int cqe_pad:1; /* CQE padding is enabled. */
unsigned int tso:1; /* Whether TSO is supported. */
- unsigned int tx_inline:1; /* Engage TX data inlining. */
unsigned int rx_vec_en:1; /* Rx vector is enabled. */
unsigned int mr_ext_memseg_en:1;
/* Whether memseg should be extended for MR creation. */
@@ -507,7 +507,7 @@
MKSTR(err_str, "Unexpected CQE error syndrome "
"0x%02x CQN = %u SQN = %u wqe_counter = %u "
"wq_ci = %u cq_ci = %u", err_cqe->syndrome,
- txq_ctrl->cqn, txq->qp_num_8s >> 8,
+ txq->cqe_s, txq->qp_num_8s >> 8,
rte_be_to_cpu_16(err_cqe->wqe_counter),
txq->wqe_ci, txq->cq_ci);
MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
@@ -516,7 +516,7 @@
mlx5_dump_debug_information(name, NULL, err_str, 0);
mlx5_dump_debug_information(name, "MLX5 Error CQ:",
(const void *)((uintptr_t)
- &(*txq->cqes)[0]),
+ txq->cqes),
sizeof(*err_cqe) *
(1 << txq->cqe_n));
mlx5_dump_debug_information(name, "MLX5 Error SQ:",
@@ -188,37 +188,61 @@ struct mlx5_hrxq {
uint8_t rss_key[]; /* Hash key. */
};
+/* TX queue send local data. */
+__extension__
+struct mlx5_txq_local {
+ struct mlx5_wqe *wqe_last; /* last sent WQE pointer. */
+ struct rte_mbuf *mbuf; /* first mbuf to process. */
+ uint16_t pkts_copy; /* packets copied to elts. */
+ uint16_t pkts_sent; /* packets sent. */
+ uint16_t elts_free; /* available elts remain. */
+ uint16_t wqe_free; /* available wqe remain. */
+ uint16_t mbuf_off; /* data offset in current mbuf. */
+ uint16_t mbuf_nseg; /* number of remaining mbuf. */
+};
+
/* TX queue descriptor. */
__extension__
struct mlx5_txq_data {
uint16_t elts_head; /* Current counter in (*elts)[]. */
uint16_t elts_tail; /* Counter of first element awaiting completion. */
- uint16_t elts_comp; /* Counter since last completion request. */
- uint16_t mpw_comp; /* WQ index since last completion request. */
+ uint16_t elts_comp; /* elts index since last completion request. */
+ uint16_t elts_s; /* Number of mbuf elements. */
+ uint16_t elts_m; /* Mask for mbuf elements indices. */
+ /* Fields related to elts mbuf storage. */
+ uint16_t wqe_ci; /* Consumer index for work queue. */
+ uint16_t wqe_pi; /* Producer index for work queue. */
+ uint16_t wqe_s; /* Number of WQ elements. */
+ uint16_t wqe_m; /* Mask Number for WQ elements. */
+ uint16_t wqe_comp; /* WQE index since last completion request. */
+ uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */
+ /* WQ related fields. */
uint16_t cq_ci; /* Consumer index for completion queue. */
#ifndef NDEBUG
- uint16_t cq_pi; /* Producer index for completion queue. */
+ uint16_t cq_pi; /* Counter of issued CQE "always" requests. */
#endif
- uint16_t wqe_ci; /* Consumer index for work queue. */
- uint16_t wqe_pi; /* Producer index for work queue. */
- uint16_t elts_n:4; /* (*elts)[] length (in log2). */
+ uint16_t cqe_s; /* Number of CQ elements. */
+ uint16_t cqe_m; /* Mask for CQ indices. */
+ /* CQ related fields. */
+ uint16_t elts_n:4; /* elts[] length (in log2). */
uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
- uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
+ uint16_t wqe_n:4; /* Number of WQ elements (in log2). */
uint16_t tso_en:1; /* When set hardware TSO is enabled. */
uint16_t tunnel_en:1;
/* When set TX offload for tunneled packets are supported. */
uint16_t swp_en:1; /* Whether SW parser is enabled. */
- uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
- uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
- uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
+ uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
+ uint16_t inlen_send; /* Ordinary send data inline size. */
+ uint16_t inlen_empw; /* eMPW max packet size to inline. */
+ uint16_t inlen_mode; /* Minimal data length to inline. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
uint64_t offloads; /* Offloads for Tx Queue. */
struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
- volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
- volatile void *wqes; /* Work queue (use volatile to write into). */
+ struct mlx5_wqe *wqes; /* Work queue. */
+ struct mlx5_wqe *wqes_end; /* Work queue array limit. */
+ volatile struct mlx5_cqe *cqes; /* Completion queue. */
volatile uint32_t *qp_db; /* Work queue doorbell. */
volatile uint32_t *cq_db; /* Completion queue doorbell. */
- struct rte_mbuf *(*elts)[]; /* TX elements. */
uint16_t port_id; /* Port ID of device. */
uint16_t idx; /* Queue index. */
struct mlx5_txq_stats stats; /* TX queue counters. */
@@ -226,6 +250,8 @@ struct mlx5_txq_data {
rte_spinlock_t *uar_lock;
/* UAR access lock required for 32bit implementations */
#endif
+ struct rte_mbuf *elts[0];
+ /* Storage for queued packets, must be the last field. */
} __rte_cache_aligned;
/* Verbs Rx queue elements. */
@@ -239,7 +265,6 @@ struct mlx5_txq_ibv {
/* TX queue control descriptor. */
struct mlx5_txq_ctrl {
- struct mlx5_txq_data txq; /* Data path structure. */
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
unsigned int socket; /* CPU socket ID for allocations. */
@@ -249,8 +274,9 @@ struct mlx5_txq_ctrl {
struct mlx5_priv *priv; /* Back pointer to private data. */
off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
void *bf_reg; /* BlueFlame register from Verbs. */
- uint32_t cqn; /* CQ number. */
uint16_t dump_file_n; /* Number of dump files. */
+ struct mlx5_txq_data txq; /* Data path structure. */
+ /* Must be the last field in the structure, contains elts[]. */
};
#define MLX5_TX_BFREG(txq) \
@@ -47,7 +47,7 @@
unsigned int i;
for (i = 0; (i != elts_n); ++i)
- (*txq_ctrl->txq.elts)[i] = NULL;
+ txq_ctrl->txq.elts[i] = NULL;
DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
txq_ctrl->txq.elts_head = 0;
@@ -68,7 +68,7 @@
const uint16_t elts_m = elts_n - 1;
uint16_t elts_head = txq_ctrl->txq.elts_head;
uint16_t elts_tail = txq_ctrl->txq.elts_tail;
- struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
+ struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
@@ -411,7 +411,8 @@ struct mlx5_txq_ibv *
attr.cq = (struct ibv_cq_init_attr_ex){
.comp_mask = 0,
};
- cqe_n = desc / MLX5_TX_COMP_THRESH + 1;
+ cqe_n = desc / MLX5_TX_COMP_THRESH +
+ 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
@@ -449,7 +450,7 @@ struct mlx5_txq_ibv *
.pd = priv->sh->pd,
.comp_mask = IBV_QP_INIT_ATTR_PD,
};
- if (txq_data->max_inline)
+ if (txq_data->inlen_send)
attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
if (txq_data->tso_en) {
attr.init.max_tso_header = txq_ctrl->max_tso_header;
@@ -523,25 +524,29 @@ struct mlx5_txq_ibv *
goto error;
}
txq_data->cqe_n = log2above(cq_info.cqe_cnt);
+ txq_data->cqe_s = 1 << txq_data->cqe_n;
+ txq_data->cqe_m = txq_data->cqe_s - 1;
txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
txq_data->wqes = qp.sq.buf;
txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
+ txq_data->wqe_s = 1 << txq_data->wqe_n;
+ txq_data->wqe_m = txq_data->wqe_s - 1;
+ txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
txq_data->cq_db = cq_info.dbrec;
- txq_data->cqes =
- (volatile struct mlx5_cqe (*)[])
- (uintptr_t)cq_info.buf;
+ txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
txq_data->cq_ci = 0;
#ifndef NDEBUG
txq_data->cq_pi = 0;
#endif
txq_data->wqe_ci = 0;
txq_data->wqe_pi = 0;
+ txq_data->wqe_comp = 0;
+ txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
txq_ibv->qp = tmpl.qp;
txq_ibv->cq = tmpl.cq;
rte_atomic32_inc(&txq_ibv->refcnt);
txq_ctrl->bf_reg = qp.bf.reg;
- txq_ctrl->cqn = cq_info.cqn;
txq_uar_init(txq_ctrl);
if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
@@ -663,7 +668,11 @@ struct mlx5_txq_ibv *
unsigned int wqe_size;
const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
- wqe_size = MLX5_WQE_SIZE + txq_ctrl->max_inline_data;
+ wqe_size = MLX5_WQE_CSEG_SIZE +
+ MLX5_WQE_ESEG_SIZE +
+ MLX5_WSEG_SIZE -
+ MLX5_ESEG_MIN_INLINE_SIZE +
+ txq_ctrl->max_inline_data;
return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
}
@@ -676,7 +685,189 @@ struct mlx5_txq_ibv *
static void
txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
{
- (void)txq_ctrl;
+ struct mlx5_priv *priv = txq_ctrl->priv;
+ struct mlx5_dev_config *config = &priv->config;
+ unsigned int inlen_send; /* Inline data for ordinary SEND.*/
+ unsigned int inlen_empw; /* Inline data for enhanced MPW. */
+ unsigned int inlen_mode; /* Minimal required Inline data. */
+ unsigned int txqs_inline; /* Min Tx queues to enable inline. */
+ uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
+ bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO);
+ bool vlan_inline;
+ unsigned int temp;
+
+ if (config->txqs_inline == MLX5_ARG_UNSET)
+ txqs_inline =
+#if defined(RTE_ARCH_ARM64)
+ (priv->sh->pci_dev->id.device_id ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
+ MLX5_INLINE_MAX_TXQS_BLUEFIELD :
+#endif
+ MLX5_INLINE_MAX_TXQS;
+ else
+ txqs_inline = (unsigned int)config->txqs_inline;
+ inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
+ MLX5_SEND_DEF_INLINE_LEN :
+ (unsigned int)config->txq_inline_max;
+ inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
+ MLX5_EMPW_DEF_INLINE_LEN :
+ (unsigned int)config->txq_inline_mpw;
+ inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
+ 0 : (unsigned int)config->txq_inline_min;
+ if (config->mps != MLX5_MPW_ENHANCED)
+ inlen_empw = 0;
+ /*
+ * If there is requested minimal amount of data to inline
+ * we MUST enable inlining. This is a case for ConnectX-4
+ * which usually requires L2 inlined for correct operating
+ * and ConnectX-4LX which requires L2-L4 inlined to
+ * support E-Switch Flows.
+ */
+ if (inlen_mode) {
+ if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
+ /*
+ * Optimize minimal inlining for single
+ * segment packets to fill one WQEBB
+ * without gaps.
+ */
+ temp = MLX5_ESEG_MIN_INLINE_SIZE;
+ } else {
+ temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
+ temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
+ MLX5_ESEG_MIN_INLINE_SIZE;
+ temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
+ }
+ if (temp != inlen_mode) {
+ DRV_LOG(INFO,
+ "port %u minimal required inline setting"
+ " aligned from %u to %u",
+ PORT_ID(priv), inlen_mode, temp);
+ inlen_mode = temp;
+ }
+ }
+ /*
+ * If port is configured to support VLAN insertion and device
+ * does not support this feature by HW (for NICs before ConnectX-5
+ * or in case of wqe_vlan_insert flag is not set) we must enable
+ * data inline on all queues because it is supported by single
+ * tx_burst routine.
+ */
+ txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
+ vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
+ !config->hw_vlan_insert;
+ if (vlan_inline)
+ inlen_send = RTE_MAX(inlen_send, MLX5_ESEG_MIN_INLINE_SIZE);
+ /*
+ * If there are few Tx queues it is prioritized
+ * to save CPU cycles and disable data inlining at all.
+ */
+ if ((inlen_send && priv->txqs_n >= txqs_inline) || vlan_inline) {
+ /*
+ * The data sent with ordinal MLX5_OPCODE_SEND
+ * may be inlined in Ethernet Segment, align the
+ * length accordingly to fit entire WQEBBs.
+ */
+ temp = (inlen_send / MLX5_WQE_SIZE) * MLX5_WQE_SIZE +
+ MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
+ temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
+ MLX5_ESEG_MIN_INLINE_SIZE -
+ MLX5_WQE_CSEG_SIZE -
+ MLX5_WQE_ESEG_SIZE -
+ MLX5_WQE_DSEG_SIZE * 2);
+ temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
+ temp = RTE_MAX(temp, inlen_mode);
+ if (temp != inlen_send) {
+ DRV_LOG(INFO,
+ "port %u ordinary send inline setting"
+ " aligned from %u to %u",
+ PORT_ID(priv), inlen_send, temp);
+ inlen_send = temp;
+ }
+ /*
+ * Not aligned to cache lines, but to WQEs.
+ * First bytes of data (initial alignment)
+ * is going to be copied explicitly at the
+ * beginning of inlining buffer in Ethernet
+ * Segment.
+ */
+ assert(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
+ assert(inlen_send <= MLX5_WQE_SIZE_MAX +
+ MLX5_ESEG_MIN_INLINE_SIZE -
+ MLX5_WQE_CSEG_SIZE -
+ MLX5_WQE_ESEG_SIZE -
+ MLX5_WQE_DSEG_SIZE * 2);
+ txq_ctrl->txq.inlen_send = inlen_send;
+ txq_ctrl->txq.inlen_mode = inlen_mode;
+ txq_ctrl->txq.inlen_empw = 0;
+ } else {
+ /*
+ * If minimal inlining is requested we must
+ * enable inlining in general, despite the
+ * number of configured queues.
+ */
+ inlen_send = inlen_mode;
+ if (inlen_mode) {
+ /*
+ * Extend space for inline data to allow
+ * optional alignment of data buffer
+ * start address, it may improve PCIe
+ * performance.
+ */
+ inlen_send = RTE_MIN(inlen_send + MLX5_WQE_SIZE,
+ MLX5_SEND_MAX_INLINE_LEN);
+ }
+ txq_ctrl->txq.inlen_send = inlen_send;
+ txq_ctrl->txq.inlen_mode = inlen_mode;
+ txq_ctrl->txq.inlen_empw = 0;
+ inlen_send = 0;
+ inlen_empw = 0;
+ }
+ if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
+ /*
+ * The data sent with MLX5_OPCODE_ENHANCED_MPSW
+ * may be inlined in Data Segment, align the
+ * length accordingly to fit entire WQEBBs.
+ */
+ temp = (inlen_empw + MLX5_WQE_SIZE - 1) / MLX5_WQE_SIZE;
+ temp = temp * MLX5_WQE_SIZE +
+ MLX5_DSEG_MIN_INLINE_SIZE - MLX5_WQE_DSEG_SIZE;
+ temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
+ MLX5_DSEG_MIN_INLINE_SIZE -
+ MLX5_WQE_CSEG_SIZE -
+ MLX5_WQE_ESEG_SIZE -
+ MLX5_WQE_DSEG_SIZE);
+ temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
+ if (temp != inlen_empw) {
+ DRV_LOG(INFO,
+ "port %u enhanced empw inline setting"
+ " aligned from %u to %u",
+ PORT_ID(priv), inlen_empw, temp);
+ inlen_empw = temp;
+ }
+ assert(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
+ assert(inlen_empw <= MLX5_WQE_SIZE_MAX +
+ MLX5_DSEG_MIN_INLINE_SIZE -
+ MLX5_WQE_CSEG_SIZE -
+ MLX5_WQE_ESEG_SIZE -
+ MLX5_WQE_DSEG_SIZE);
+ txq_ctrl->txq.inlen_empw = inlen_empw;
+ }
+ txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
+ if (tso) {
+ txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
+ txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
+ MLX5_MAX_TSO_HEADER);
+ txq_ctrl->txq.tso_en = 1;
+ }
+ txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
+ txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
+ txq_ctrl->txq.offloads) && config->swp;
}
/**
@@ -724,6 +915,8 @@ struct mlx5_txq_ctrl *
tmpl->priv = priv;
tmpl->socket = socket;
tmpl->txq.elts_n = log2above(desc);
+ tmpl->txq.elts_s = desc;
+ tmpl->txq.elts_m = desc - 1;
tmpl->txq.port_id = dev->data->port_id;
tmpl->txq.idx = idx;
txq_set_params(tmpl);
@@ -737,8 +930,6 @@ struct mlx5_txq_ctrl *
rte_errno = ENOMEM;
goto error;
}
- tmpl->txq.elts =
- (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
rte_atomic32_inc(&tmpl->refcnt);
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;