@@ -106,17 +106,6 @@
#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
#endif
-struct mlx5_args {
- int cqe_comp;
- int txq_inline;
- int txqs_inline;
- int mps;
- int mpw_hdr_dseg;
- int inline_max_packet_sz;
- int tso;
- int tx_vec_en;
- int rx_vec_en;
-};
/**
* Retrieve integer value from environment variable.
*
@@ -489,35 +478,91 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
static struct rte_pci_driver mlx5_driver;
/**
- * Assign parameters from args into priv, only non default
- * values are considered.
+ * Update parameters from application configuration.
*
- * @param[out] priv
+ * @param[in/out] priv
* Pointer to private structure.
- * @param[in] args
- * Pointer to args values.
*/
static void
-mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
+mlx5_args_update(struct priv *priv)
{
- if (args->cqe_comp != MLX5_ARG_UNSET)
- priv->cqe_comp = args->cqe_comp;
- if (args->txq_inline != MLX5_ARG_UNSET)
- priv->txq_inline = args->txq_inline;
- if (args->txqs_inline != MLX5_ARG_UNSET)
- priv->txqs_inline = args->txqs_inline;
- if (args->mps != MLX5_ARG_UNSET)
- priv->mps = args->mps ? priv->mps : 0;
- if (args->mpw_hdr_dseg != MLX5_ARG_UNSET)
- priv->mpw_hdr_dseg = args->mpw_hdr_dseg;
- if (args->inline_max_packet_sz != MLX5_ARG_UNSET)
- priv->inline_max_packet_sz = args->inline_max_packet_sz;
- if (args->tso != MLX5_ARG_UNSET)
- priv->tso = args->tso;
- if (args->tx_vec_en != MLX5_ARG_UNSET)
- priv->tx_vec_en = args->tx_vec_en;
- if (args->rx_vec_en != MLX5_ARG_UNSET)
- priv->rx_vec_en = args->rx_vec_en;
+ struct mlx5_args *args_def = &priv->args_default;
+ struct mlx5_args *args = &priv->args;
+
+ if (args_def->cqe_comp != MLX5_ARG_UNSET) {
+ if (!priv->cqe_comp && args_def->cqe_comp) {
+ WARN("Rx CQE compression is not supported");
+ args_def->cqe_comp = 0;
+ }
+ args->cqe_comp = args_def->cqe_comp;
+ } else {
+ args->cqe_comp = priv->cqe_comp;
+ }
+ if (args_def->tso != MLX5_ARG_UNSET) {
+ if (!priv->tso && args_def->tso) {
+ WARN("TSO is not supported");
+ args_def->tso = 0;
+ }
+ args->tso = args_def->tso;
+ } else {
+ args->tso = 0;
+ }
+ if (args_def->mps != MLX5_ARG_UNSET) {
+ if (!priv->mps && args_def->mps) {
+ WARN("multi-packet send not supported");
+ args_def->mps = MLX5_MPW_DISABLED;
+ }
+ if (args->tso && args_def->mps) {
+ WARN("multi-packet send not supported in conjunction "
+ "with TSO. MPS disabled");
+ args->mps = MLX5_MPW_DISABLED;
+ } else {
+ args->mps = args_def->mps ? priv->mps :
+ MLX5_MPW_DISABLED;
+ }
+ } else {
+ if (args->tso)
+ args->mps = MLX5_MPW_DISABLED;
+ else
+ args->mps = priv->mps;
+ }
+ if (args_def->txq_inline != MLX5_ARG_UNSET) {
+ args->txq_inline = args_def->txq_inline;
+ } else {
+ if (args->mps == MLX5_MPW_ENHANCED)
+ args->txq_inline = MLX5_WQE_SIZE_MAX -
+ MLX5_WQE_SIZE;
+ else
+ args->txq_inline = 0;
+ }
+ if (args_def->txqs_inline != MLX5_ARG_UNSET) {
+ args->txqs_inline = args_def->txqs_inline;
+ } else {
+ if (args->mps == MLX5_MPW_ENHANCED)
+ args->txqs_inline = MLX5_EMPW_MIN_TXQS;
+ else
+ args->txqs_inline = 0;
+ }
+ if (args_def->mpw_hdr_dseg != MLX5_ARG_UNSET)
+ args->mpw_hdr_dseg = args_def->mpw_hdr_dseg;
+ else
+ args->mpw_hdr_dseg = 0;
+ if (args_def->inline_max_packet_sz != MLX5_ARG_UNSET) {
+ args->inline_max_packet_sz = args_def->inline_max_packet_sz;
+ } else {
+ if (args->mps == MLX5_MPW_ENHANCED)
+ args->inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN;
+ else
+ args->inline_max_packet_sz = 0;
+ }
+ if (args_def->tx_vec_en != MLX5_ARG_UNSET)
+ args->tx_vec_en = args_def->tx_vec_en;
+ else
+ args->tx_vec_en = 1;
+ if (args_def->rx_vec_en != MLX5_ARG_UNSET)
+ args->rx_vec_en = args_def->rx_vec_en;
+ else
+ args->rx_vec_en = 1;
}
/**
@@ -774,19 +819,16 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->port = port;
priv->pd = pd;
priv->mtu = ETHER_MTU;
- priv->mps = mps; /* Enable MPW by default if supported. */
+ priv->mps = mps;
priv->cqe_comp = cqe_comp;
priv->tunnel_en = tunnel_en;
- /* Enable vector by default if supported. */
- priv->tx_vec_en = 1;
- priv->rx_vec_en = 1;
err = mlx5_args(&args, pci_dev->device.devargs);
if (err) {
ERROR("failed to process device arguments: %s",
strerror(err));
goto port_error;
}
- mlx5_args_assign(priv, &args);
+ priv->args_default = args;
if (ibv_query_device_ex(ctx, NULL, &device_attr_ex)) {
ERROR("ibv_query_device_ex() failed");
goto port_error;
@@ -847,34 +889,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (priv->tso)
priv->max_tso_payload_sz =
device_attr_ex.tso_caps.max_tso;
- if (priv->mps && !mps) {
- ERROR("multi-packet send not supported on this device"
- " (" MLX5_TXQ_MPW_EN ")");
- err = ENOTSUP;
- goto port_error;
- } else if (priv->mps && priv->tso) {
- WARN("multi-packet send not supported in conjunction "
- "with TSO. MPS disabled");
- priv->mps = 0;
- }
- INFO("%sMPS is %s",
- priv->mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
- priv->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
- /* Set default values for Enhanced MPW, a.k.a MPWv2. */
- if (priv->mps == MLX5_MPW_ENHANCED) {
- if (args.txqs_inline == MLX5_ARG_UNSET)
- priv->txqs_inline = MLX5_EMPW_MIN_TXQS;
- if (args.inline_max_packet_sz == MLX5_ARG_UNSET)
- priv->inline_max_packet_sz =
- MLX5_EMPW_MAX_INLINE_LEN;
- if (args.txq_inline == MLX5_ARG_UNSET)
- priv->txq_inline = MLX5_WQE_SIZE_MAX -
- MLX5_WQE_SIZE;
- }
- if (priv->cqe_comp && !cqe_comp) {
- WARN("Rx CQE compression isn't supported");
- priv->cqe_comp = 0;
- }
+ mlx5_args_update(priv);
/* Configure the first MAC address by default. */
if (priv_get_mac(priv, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx5_en loaded?"
@@ -90,6 +90,18 @@ struct mlx5_xstats_ctrl {
/* Flow list . */
TAILQ_HEAD(mlx5_flows, rte_flow);
+struct mlx5_args {
+ int cqe_comp;
+ int txq_inline;
+ int txqs_inline;
+ int mps;
+ int mpw_hdr_dseg;
+ int inline_max_packet_sz;
+ int tso;
+ int tx_vec_en;
+ int rx_vec_en;
+};
+
struct priv {
struct rte_eth_dev *dev; /* Ethernet device of master process. */
struct ibv_context *ctx; /* Verbs context. */
@@ -108,21 +120,15 @@ struct priv {
unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
unsigned int sriov:1; /* This is a VF or PF with VF devices. */
- unsigned int mps:2; /* Multi-packet send mode (0: disabled). */
- unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
- unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
+ unsigned int mps:2; /* Multi-packet send supported mode. */
+ unsigned int cqe_comp:1; /* Whether CQE compression is supported. */
unsigned int pending_alarm:1; /* An alarm is pending. */
unsigned int tso:1; /* Whether TSO is supported. */
unsigned int tunnel_en:1;
unsigned int isolated:1; /* Whether isolated mode is enabled. */
- unsigned int tx_vec_en:1; /* Whether Tx vector is enabled. */
- unsigned int rx_vec_en:1; /* Whether Rx vector is enabled. */
unsigned int counter_set_supported:1; /* Counter set is supported. */
/* Whether Tx offloads for tunneled packets are supported. */
unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
- unsigned int txq_inline; /* Maximum packet size for inlining. */
- unsigned int txqs_inline; /* Queue number threshold for inlining. */
- unsigned int inline_max_packet_sz; /* Max packet size for inlining. */
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
@@ -149,6 +155,8 @@ struct priv {
rte_spinlock_t lock; /* Lock for control functions. */
int primary_socket; /* Unix socket for primary process. */
struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */
+ struct mlx5_args args_default; /* Args default as set by the app. */
+ struct mlx5_args args; /* Args value as set on runtime. */
};
/**
@@ -701,14 +701,14 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
(priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
DEV_RX_OFFLOAD_TIMESTAMP;
- if (!priv->mps)
+ if (!priv->args.mps)
info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
if (priv->hw_csum)
info->tx_offload_capa |=
(DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM);
- if (priv->tso)
+ if (priv->args.tso)
info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
if (priv->tunnel_en)
info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
@@ -1434,7 +1434,7 @@ priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
assert(dev != NULL);
dev->tx_pkt_burst = mlx5_tx_burst;
/* Select appropriate TX function. */
- if (priv->mps == MLX5_MPW_ENHANCED) {
+ if (priv->args.mps == MLX5_MPW_ENHANCED) {
if (priv_check_vec_tx_support(priv) > 0) {
if (priv_check_raw_vec_tx_support(priv) > 0)
dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
@@ -1445,10 +1445,10 @@ priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
dev->tx_pkt_burst = mlx5_tx_burst_empw;
DEBUG("selected Enhanced MPW TX function");
}
- } else if (priv->mps && priv->txq_inline) {
+ } else if (priv->args.mps && priv->args.txq_inline) {
dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
- } else if (priv->mps) {
+ } else if (priv->args.mps) {
dev->tx_pkt_burst = mlx5_tx_burst_mpw;
DEBUG("selected MPW TX function");
}
@@ -612,7 +612,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
.comp_mask = 0,
};
- if (priv->cqe_comp && !rxq_data->hw_timestamp) {
+ if (priv->args.cqe_comp && !rxq_data->hw_timestamp) {
attr.cq.mlx5.comp_mask |=
MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
@@ -622,7 +622,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
*/
if (rxq_check_vec_support(rxq_data) < 0)
attr.cq.ibv.cqe *= 2;
- } else if (priv->cqe_comp && rxq_data->hw_timestamp) {
+ } else if (priv->args.cqe_comp && rxq_data->hw_timestamp) {
DEBUG("Rx CQE compression is disabled for HW timestamp");
}
tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
@@ -885,7 +885,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
struct rte_eth_dev *dev = priv->dev;
struct mlx5_rxq_ctrl *tmpl;
const uint16_t desc_n =
- desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+ desc + priv->args.rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
tmpl = rte_calloc_socket("RXQ", 1,
@@ -329,10 +329,10 @@ priv_check_raw_vec_tx_support(struct priv *priv)
int __attribute__((cold))
priv_check_vec_tx_support(struct priv *priv)
{
- if (!priv->tx_vec_en ||
+ if (!priv->args.tx_vec_en ||
priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
- priv->mps != MLX5_MPW_ENHANCED ||
- priv->tso)
+ priv->args.mps != MLX5_MPW_ENHANCED ||
+ priv->args.tso)
return -ENOTSUP;
return 1;
}
@@ -352,7 +352,7 @@ rxq_check_vec_support(struct mlx5_rxq_data *rxq)
struct mlx5_rxq_ctrl *ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
+ if (!ctrl->priv->args.rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP;
return 1;
}
@@ -371,7 +371,7 @@ priv_check_vec_rx_support(struct priv *priv)
{
uint16_t i;
- if (!priv->rx_vec_en)
+ if (!priv->args.rx_vec_en)
return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
@@ -322,7 +322,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
};
cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
- if (priv->mps == MLX5_MPW_ENHANCED)
+ if (priv->args.mps == MLX5_MPW_ENHANCED)
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
@@ -582,34 +582,35 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
tmpl->priv = priv;
tmpl->socket = socket;
tmpl->txq.elts_n = log2above(desc);
- if (priv->mps == MLX5_MPW_ENHANCED)
- tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
+ if (priv->args.mps == MLX5_MPW_ENHANCED)
+ tmpl->txq.mpw_hdr_dseg = priv->args.mpw_hdr_dseg;
/* MRs will be registered in mp2mr[] later. */
DEBUG("priv->device_attr.max_qp_wr is %d",
priv->device_attr.orig_attr.max_qp_wr);
DEBUG("priv->device_attr.max_sge is %d",
priv->device_attr.orig_attr.max_sge);
- if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
+ if (priv->args.txq_inline &&
+ priv->txqs_n >= (unsigned int)priv->args.txqs_inline) {
unsigned int ds_cnt;
tmpl->txq.max_inline =
- ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
+ ((priv->args.txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
tmpl->txq.inline_en = 1;
/* TSO and MPS can't be enabled concurrently. */
- assert(!priv->tso || !priv->mps);
- if (priv->mps == MLX5_MPW_ENHANCED) {
+ assert(!priv->args.tso || !priv->args.mps);
+ if (priv->args.mps == MLX5_MPW_ENHANCED) {
tmpl->txq.inline_max_packet_sz =
- priv->inline_max_packet_sz;
+ priv->args.inline_max_packet_sz;
/* To minimize the size of data set, avoid requesting
* too large WQ.
*/
tmpl->max_inline_data =
- ((RTE_MIN(priv->txq_inline,
- priv->inline_max_packet_sz) +
+ ((RTE_MIN(priv->args.txq_inline,
+ priv->args.inline_max_packet_sz) +
(RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
- } else if (priv->tso) {
+ } else if (priv->args.tso) {
int inline_diff = tmpl->txq.max_inline - max_tso_inline;
/*
@@ -641,11 +642,11 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
RTE_CACHE_LINE_SIZE);
WARN("txq inline is too large (%d) setting it to "
"the maximum possible: %d\n",
- priv->txq_inline, max_inline);
+ priv->args.txq_inline, max_inline);
tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
}
}
- if (priv->tso) {
+ if (priv->args.tso) {
tmpl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
tmpl->txq.max_inline = RTE_MAX(tmpl->txq.max_inline,
max_tso_inline);