@@ -999,8 +999,6 @@ mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
* Backing DPDK device.
* @param spawn
* Verbs device parameters (name, port, switch_info) to spawn.
- * @param config
- * Device configuration parameters.
* @param eth_da
* Device arguments.
*
@@ -1014,12 +1012,10 @@ mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
static struct rte_eth_dev *
mlx5_dev_spawn(struct rte_device *dpdk_dev,
struct mlx5_dev_spawn_data *spawn,
- struct mlx5_dev_config *config,
struct rte_eth_devargs *eth_da)
{
const struct mlx5_switch_info *switch_info = &spawn->info;
struct mlx5_dev_ctx_shared *sh = NULL;
- struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
struct ibv_port_attr port_attr = { .state = IBV_PORT_NOP };
struct rte_eth_dev *eth_dev = NULL;
struct mlx5_priv *priv = NULL;
@@ -1029,7 +1025,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
int own_domain_id = 0;
uint16_t port_id;
struct mlx5_port_info vport_info = { .query_flags = 0 };
- int nl_rdma = -1;
+ int nl_rdma;
int i;
/* Determine if this port representor is supposed to be spawned. */
@@ -1107,13 +1103,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
mlx5_dev_close(eth_dev);
return NULL;
}
- /* Process parameters. */
- err = mlx5_args(config, dpdk_dev->devargs);
- if (err) {
- DRV_LOG(ERR, "failed to process device arguments: %s",
- strerror(rte_errno));
- return NULL;
- }
sh = mlx5_alloc_shared_dev_ctx(spawn);
if (!sh)
return NULL;
@@ -1269,41 +1258,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
DRV_LOG(DEBUG, "dev_port-%u new domain_id=%u\n",
priv->dev_port, priv->domain_id);
}
- if (config->hw_padding && !sh->dev_cap.hw_padding) {
- DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
- config->hw_padding = 0;
- } else if (config->hw_padding) {
- DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
- }
- /*
- * MPW is disabled by default, while the Enhanced MPW is enabled
- * by default.
- */
- if (config->mps == MLX5_ARG_UNSET)
- config->mps = (sh->dev_cap.mps == MLX5_MPW_ENHANCED) ?
- MLX5_MPW_ENHANCED : MLX5_MPW_DISABLED;
- else
- config->mps = config->mps ? sh->dev_cap.mps : MLX5_MPW_DISABLED;
- DRV_LOG(INFO, "%sMPS is %s",
- config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
- config->mps == MLX5_MPW ? "legacy " : "",
- config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
if (sh->cdev->config.devx) {
+ struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
+
sh->steering_format_version = hca_attr->steering_format_version;
- /* LRO is supported only when DV flow enabled. */
- if (sh->dev_cap.lro_supported && sh->config.dv_flow_en)
- sh->dev_cap.lro_supported = 0;
- if (sh->dev_cap.lro_supported) {
- /*
- * If LRO timeout is not configured by application,
- * use the minimal supported value.
- */
- if (!config->lro_timeout)
- config->lro_timeout =
- hca_attr->lro_timer_supported_periods[0];
- DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
- config->lro_timeout);
- }
#if defined(HAVE_MLX5DV_DR) && \
(defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) || \
defined(HAVE_MLX5_DR_CREATE_ACTION_ASO))
@@ -1395,39 +1353,14 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
}
#endif
}
- if (config->cqe_comp && !sh->dev_cap.cqe_comp) {
- DRV_LOG(WARNING, "Rx CQE 128B compression is not supported.");
- config->cqe_comp = 0;
- }
- if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
- (!sh->cdev->config.devx || !hca_attr->mini_cqe_resp_flow_tag)) {
- DRV_LOG(WARNING, "Flow Tag CQE compression"
- " format isn't supported.");
- config->cqe_comp = 0;
- }
- if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
- (!sh->cdev->config.devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
- DRV_LOG(WARNING, "L3/L4 Header CQE compression"
- " format isn't supported.");
- config->cqe_comp = 0;
- }
- DRV_LOG(DEBUG, "Rx CQE compression is %ssupported",
- config->cqe_comp ? "" : "not ");
- if (config->std_delay_drop || config->hp_delay_drop) {
- if (!hca_attr->rq_delay_drop) {
- config->std_delay_drop = 0;
- config->hp_delay_drop = 0;
- DRV_LOG(WARNING,
- "dev_port-%u: Rxq delay drop is not supported",
- priv->dev_port);
- }
- }
- if (config->mprq.enabled && !sh->dev_cap.mprq.enabled) {
- DRV_LOG(WARNING, "Multi-Packet RQ isn't supported.");
- config->mprq.enabled = 0;
+ /* Process parameters and store port configuration on priv structure. */
+ err = mlx5_port_args_config(priv, dpdk_dev->devargs, &priv->config);
+ if (err) {
+ err = rte_errno;
+ DRV_LOG(ERR, "Failed to process port configure: %s",
+ strerror(rte_errno));
+ goto error;
}
- if (config->max_dump_files_num == 0)
- config->max_dump_files_num = 128;
eth_dev = rte_eth_dev_allocate(name);
if (eth_dev == NULL) {
DRV_LOG(ERR, "can not allocate rte ethdev");
@@ -1528,10 +1461,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
* Verbs context returned by ibv_open_device().
*/
mlx5_link_update(eth_dev, 0);
- /* Detect minimal data bytes to inline. */
- mlx5_set_min_inline(spawn, config);
- /* Store device configuration on private structure. */
- priv->config = *config;
for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
icfg[i].release_mem_en = !!sh->config.reclaim_mode;
if (sh->config.reclaim_mode)
@@ -1899,25 +1828,6 @@ mlx5_device_bond_pci_match(const char *ibdev_name,
return pf;
}
-static void
-mlx5_os_config_default(struct mlx5_dev_config *config)
-{
- memset(config, 0, sizeof(*config));
- config->mps = MLX5_ARG_UNSET;
- config->cqe_comp = 1;
- config->rx_vec_en = 1;
- config->txq_inline_max = MLX5_ARG_UNSET;
- config->txq_inline_min = MLX5_ARG_UNSET;
- config->txq_inline_mpw = MLX5_ARG_UNSET;
- config->txqs_inline = MLX5_ARG_UNSET;
- config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
- config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
- config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
- config->log_hp_size = MLX5_ARG_UNSET;
- config->std_delay_drop = 0;
- config->hp_delay_drop = 0;
-}
-
/**
* Register a PCI device within bonding.
*
@@ -1966,7 +1876,6 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
int bd = -1;
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
struct mlx5_dev_spawn_data *list = NULL;
- struct mlx5_dev_config dev_config;
struct rte_eth_devargs eth_da = *req_eth_da;
struct rte_pci_addr owner_pci = pci_dev->addr; /* Owner PF. */
struct mlx5_bond_info bond_info;
@@ -2308,10 +2217,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
for (i = 0; i != ns; ++i) {
uint32_t restore;
- /* Default configuration. */
- mlx5_os_config_default(&dev_config);
- list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i],
- &dev_config, ð_da);
+ list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i], ð_da);
if (!list[i].eth_dev) {
if (rte_errno != EBUSY && rte_errno != EEXIST)
break;
@@ -2466,7 +2372,6 @@ static int
mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev)
{
struct rte_eth_devargs eth_da = { .nb_ports = 0 };
- struct mlx5_dev_config config;
struct mlx5_dev_spawn_data spawn = { .pf_bond = -1 };
struct rte_device *dev = cdev->dev;
struct rte_auxiliary_device *adev = RTE_DEV_TO_AUXILIARY(dev);
@@ -2477,8 +2382,6 @@ mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev)
ret = mlx5_os_parse_eth_devargs(dev, ð_da);
if (ret != 0)
return ret;
- /* Set default config data. */
- mlx5_os_config_default(&config);
/* Init spawn data. */
spawn.max_port = 1;
spawn.phys_port = 1;
@@ -2491,7 +2394,7 @@ mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev)
spawn.ifindex = ret;
spawn.cdev = cdev;
/* Spawn device. */
- eth_dev = mlx5_dev_spawn(dev, &spawn, &config, ð_da);
+ eth_dev = mlx5_dev_spawn(dev, &spawn, ð_da);
if (eth_dev == NULL)
return -rte_errno;
/* Post create. */
@@ -2101,9 +2101,9 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_args_check(const char *key, const char *val, void *opaque)
+mlx5_port_args_check_handler(const char *key, const char *val, void *opaque)
{
- struct mlx5_dev_config *config = opaque;
+ struct mlx5_port_config *config = opaque;
signed long tmp;
/* No-op, port representors are processed in mlx5_dev_spawn(). */
@@ -2197,38 +2197,156 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
}
/**
- * Parse device parameters.
+ * Parse user port parameters and adjust them according to device capabilities.
*
- * @param config
- * Pointer to device configuration structure.
+ * @param priv
+ * Pointer to shared device context.
* @param devargs
* Device arguments structure.
+ * @param config
+ * Pointer to port configuration structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
+mlx5_port_args_config(struct mlx5_priv *priv, struct rte_devargs *devargs,
+ struct mlx5_port_config *config)
{
struct rte_kvargs *kvlist;
+ struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
+ struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
+ bool devx = priv->sh->cdev->config.devx;
int ret = 0;
- if (devargs == NULL)
- return 0;
- /* Following UGLY cast is done to pass checkpatch. */
- kvlist = rte_kvargs_parse(devargs->args, NULL);
- if (kvlist == NULL) {
- rte_errno = EINVAL;
- return -rte_errno;
+ /* Default configuration. */
+ memset(config, 0, sizeof(*config));
+ config->mps = MLX5_ARG_UNSET;
+ config->cqe_comp = 1;
+ config->rx_vec_en = 1;
+ config->txq_inline_max = MLX5_ARG_UNSET;
+ config->txq_inline_min = MLX5_ARG_UNSET;
+ config->txq_inline_mpw = MLX5_ARG_UNSET;
+ config->txqs_inline = MLX5_ARG_UNSET;
+ config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
+ config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
+ config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
+ config->log_hp_size = MLX5_ARG_UNSET;
+ config->std_delay_drop = 0;
+ config->hp_delay_drop = 0;
+ /* Parse device parameters. */
+ if (devargs != NULL) {
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL) {
+ DRV_LOG(ERR,
+ "Failed to parse device arguments.");
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ /* Process parameters. */
+ ret = rte_kvargs_process(kvlist, NULL,
+ mlx5_port_args_check_handler, config);
+ rte_kvargs_free(kvlist);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to process port arguments: %s",
+ strerror(rte_errno));
+ return -rte_errno;
+ }
}
- /* Process parameters. */
- ret = rte_kvargs_process(kvlist, NULL, mlx5_args_check, config);
- if (ret) {
- rte_errno = EINVAL;
- ret = -rte_errno;
+ /* Adjust parameters according to device capabilities. */
+ if (config->hw_padding && !dev_cap->hw_padding) {
+ DRV_LOG(DEBUG, "Rx end alignment padding isn't supported.");
+ config->hw_padding = 0;
+ } else if (config->hw_padding) {
+ DRV_LOG(DEBUG, "Rx end alignment padding is enabled.");
}
- rte_kvargs_free(kvlist);
- return ret;
+ /*
+ * MPW is disabled by default, while the Enhanced MPW is enabled
+ * by default.
+ */
+ if (config->mps == MLX5_ARG_UNSET)
+ config->mps = (dev_cap->mps == MLX5_MPW_ENHANCED) ?
+ MLX5_MPW_ENHANCED : MLX5_MPW_DISABLED;
+ else
+ config->mps = config->mps ? dev_cap->mps : MLX5_MPW_DISABLED;
+ DRV_LOG(INFO, "%sMPS is %s",
+ config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
+ config->mps == MLX5_MPW ? "legacy " : "",
+ config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+ /* LRO is supported only when DV flow enabled. */
+ if (dev_cap->lro_supported && !priv->sh->config.dv_flow_en)
+ dev_cap->lro_supported = 0;
+ if (dev_cap->lro_supported) {
+ /*
+ * If LRO timeout is not configured by application,
+ * use the minimal supported value.
+ */
+ if (!config->lro_timeout)
+ config->lro_timeout =
+ hca_attr->lro_timer_supported_periods[0];
+ DRV_LOG(DEBUG, "LRO session timeout set to %d usec.",
+ config->lro_timeout);
+ }
+ if (config->cqe_comp && !dev_cap->cqe_comp) {
+ DRV_LOG(WARNING, "Rx CQE 128B compression is not supported.");
+ config->cqe_comp = 0;
+ }
+ if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
+ (!devx || !hca_attr->mini_cqe_resp_flow_tag)) {
+ DRV_LOG(WARNING,
+ "Flow Tag CQE compression format isn't supported.");
+ config->cqe_comp = 0;
+ }
+ if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
+ (!devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
+ DRV_LOG(WARNING,
+ "L3/L4 Header CQE compression format isn't supported.");
+ config->cqe_comp = 0;
+ }
+ DRV_LOG(DEBUG, "Rx CQE compression is %ssupported.",
+ config->cqe_comp ? "" : "not ");
+ if ((config->std_delay_drop || config->hp_delay_drop) &&
+ !dev_cap->rq_delay_drop_en) {
+ config->std_delay_drop = 0;
+ config->hp_delay_drop = 0;
+ DRV_LOG(WARNING, "dev_port-%u: Rxq delay drop isn't supported.",
+ priv->dev_port);
+ }
+ if (config->mprq.enabled && !priv->sh->dev_cap.mprq.enabled) {
+ DRV_LOG(WARNING, "Multi-Packet RQ isn't supported.");
+ config->mprq.enabled = 0;
+ }
+ if (config->max_dump_files_num == 0)
+ config->max_dump_files_num = 128;
+ /* Detect minimal data bytes to inline. */
+ mlx5_set_min_inline(priv);
+ DRV_LOG(DEBUG, "VLAN insertion in WQE is %ssupported.",
+ config->hw_vlan_insert ? "" : "not ");
+ DRV_LOG(DEBUG, "\"rxq_pkt_pad_en\" is %u.", config->hw_padding);
+ DRV_LOG(DEBUG, "\"rxq_cqe_comp_en\" is %u.", config->cqe_comp);
+ DRV_LOG(DEBUG, "\"cqe_comp_fmt\" is %u.", config->cqe_comp_fmt);
+ DRV_LOG(DEBUG, "\"rx_vec_en\" is %u.", config->rx_vec_en);
+ DRV_LOG(DEBUG, "Standard \"delay_drop\" is %u.",
+ config->std_delay_drop);
+ DRV_LOG(DEBUG, "Hairpin \"delay_drop\" is %u.", config->hp_delay_drop);
+ DRV_LOG(DEBUG, "\"max_dump_files_num\" is %u.",
+ config->max_dump_files_num);
+ DRV_LOG(DEBUG, "\"log_hp_size\" is %u.", config->log_hp_size);
+ DRV_LOG(DEBUG, "\"mprq_en\" is %u.", config->mprq.enabled);
+ DRV_LOG(DEBUG, "\"mprq_log_stride_num\" is %u.",
+ config->mprq.log_stride_num);
+ DRV_LOG(DEBUG, "\"mprq_log_stride_size\" is %u.",
+ config->mprq.log_stride_size);
+ DRV_LOG(DEBUG, "\"mprq_max_memcpy_len\" is %u.",
+ config->mprq.max_memcpy_len);
+ DRV_LOG(DEBUG, "\"rxqs_min_mprq\" is %u.", config->mprq.min_rxqs_num);
+ DRV_LOG(DEBUG, "\"lro_timeout_usec\" is %u.", config->lro_timeout);
+ DRV_LOG(DEBUG, "\"txq_mpw_en\" is %d.", config->mps);
+ DRV_LOG(DEBUG, "\"txqs_min_inline\" is %d.", config->txqs_inline);
+ DRV_LOG(DEBUG, "\"txq_inline_min\" is %d.", config->txq_inline_min);
+ DRV_LOG(DEBUG, "\"txq_inline_max\" is %d.", config->txq_inline_max);
+ DRV_LOG(DEBUG, "\"txq_inline_mpw\" is %d.", config->txq_inline_mpw);
+ return 0;
}
/**
@@ -2366,21 +2484,19 @@ mlx5_probe_again_args_validate(struct mlx5_common_device *cdev)
* - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx
* and none (0 bytes) for other NICs
*
- * @param spawn
- * Verbs device parameters (name, port, switch_info) to spawn.
- * @param config
- * Device configuration parameters.
+ * @param priv
+ * Pointer to the private device data structure.
*/
void
-mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
- struct mlx5_dev_config *config)
+mlx5_set_min_inline(struct mlx5_priv *priv)
{
- struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
+ struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
+ struct mlx5_port_config *config = &priv->config;
if (config->txq_inline_min != MLX5_ARG_UNSET) {
/* Application defines size of inlined data explicitly. */
- if (spawn->pci_dev != NULL) {
- switch (spawn->pci_dev->id.device_id) {
+ if (priv->pci_dev != NULL) {
+ switch (priv->pci_dev->id.device_id) {
case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
if (config->txq_inline_min <
@@ -2446,7 +2562,7 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
}
}
}
- if (spawn->pci_dev == NULL) {
+ if (priv->pci_dev == NULL) {
config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
goto exit;
}
@@ -2455,7 +2571,7 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
* inline data size with DevX. Try PCI ID
* to determine old NICs.
*/
- switch (spawn->pci_dev->id.device_id) {
+ switch (priv->pci_dev->id.device_id) {
case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
@@ -243,14 +243,13 @@ struct mlx5_stats_ctrl {
#define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS)
/*
- * Device configuration structure.
- *
- * Merged configuration from:
- *
- * - Device capabilities,
- * - User device parameters disabled features.
+ * Port configuration structure.
+ * User device parameters disabled features.
+ * This structure contains all configurations coming from devargs which
+ * oriented to port. When probing again, devargs doesn't have to be compatible
+ * with primary devargs. It is updated for each port in spawn function.
*/
-struct mlx5_dev_config {
+struct mlx5_port_config {
unsigned int hw_vlan_insert:1; /* VLAN insertion in WQE is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
@@ -1450,7 +1449,7 @@ struct mlx5_priv {
uint32_t link_speed_capa; /* Link speed capabilities. */
struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */
- struct mlx5_dev_config config; /* Device configuration. */
+ struct mlx5_port_config config; /* Port configuration. */
/* Context for Verbs allocator. */
int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */
int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */
@@ -1539,7 +1538,6 @@ void mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh);
for (port_id = mlx5_eth_find_next(0, dev); \
port_id < RTE_MAX_ETHPORTS; \
port_id = mlx5_eth_find_next(port_id + 1, dev))
-int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs);
void mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
struct mlx5_hca_attr *hca_attr);
struct mlx5_dev_ctx_shared *
@@ -1548,10 +1546,11 @@ void mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh);
int mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev);
void mlx5_free_table_hash_list(struct mlx5_priv *priv);
int mlx5_alloc_table_hash_list(struct mlx5_priv *priv);
-void mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
- struct mlx5_dev_config *config);
+void mlx5_set_min_inline(struct mlx5_priv *priv);
void mlx5_set_metadata_mask(struct rte_eth_dev *dev);
int mlx5_probe_again_args_validate(struct mlx5_common_device *cdev);
+int mlx5_port_args_config(struct mlx5_priv *priv, struct rte_devargs *devargs,
+ struct mlx5_port_config *config);
bool mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev);
int mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev);
void mlx5_flow_counter_mode_config(struct rte_eth_dev *dev);
@@ -766,8 +766,7 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
tir_attr->indirect_table = ind_tbl->rqt->id;
if (dev->data->dev_conf.lpbk_mode)
- tir_attr->self_lb_block =
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ tir_attr->self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
if (lro) {
tir_attr->lro_timeout_period_usecs = priv->config.lro_timeout;
tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
@@ -266,7 +266,7 @@ static void
mlx5_set_txlimit_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
unsigned int inlen;
uint16_t nb_max;
@@ -302,7 +302,6 @@ int
mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
unsigned int max;
/* FIXME: we should ask the device for these values. */
@@ -321,8 +320,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
info->rx_seg_capa.max_nseg = MLX5_MAX_RXQ_NSEG;
- info->rx_seg_capa.multi_pools = !config->mprq.enabled;
- info->rx_seg_capa.offset_allowed = !config->mprq.enabled;
+ info->rx_seg_capa.multi_pools = !priv->config.mprq.enabled;
+ info->rx_seg_capa.offset_allowed = !priv->config.mprq.enabled;
info->rx_seg_capa.offset_align_log2 = 0;
info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
info->rx_queue_offload_capa);
@@ -1562,7 +1562,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
uint32_t *actual_log_stride_size)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
uint32_t log_min_stride_num = dev_cap->mprq.log_min_stride_num;
uint32_t log_max_stride_num = dev_cap->mprq.log_max_stride_num;
@@ -1681,7 +1681,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
@@ -517,7 +517,7 @@ eth_tx_burst_t
mlx5_select_tx_function(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
unsigned int diff = 0, olx = 0, i, m;
@@ -100,7 +100,7 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
if (dev_cap->hw_csum)
@@ -741,7 +741,7 @@ static void
txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
{
struct mlx5_priv *priv = txq_ctrl->priv;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
unsigned int inlen_send; /* Inline data for ordinary SEND.*/
unsigned int inlen_empw; /* Inline data for enhanced MPW. */
@@ -960,7 +960,7 @@ static int
txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
{
struct mlx5_priv *priv = txq_ctrl->priv;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
unsigned int max_inline;
max_inline = txq_calc_inline_max(txq_ctrl);
@@ -284,8 +284,6 @@ mlx5_os_set_nonblock_channel_fd(int fd)
* Backing DPDK device.
* @param spawn
* Verbs device parameters (name, port, switch_info) to spawn.
- * @param config
- * Device configuration parameters.
*
* @return
* A valid Ethernet device object on success, NULL otherwise and rte_errno
@@ -295,8 +293,7 @@ mlx5_os_set_nonblock_channel_fd(int fd)
*/
static struct rte_eth_dev *
mlx5_dev_spawn(struct rte_device *dpdk_dev,
- struct mlx5_dev_spawn_data *spawn,
- struct mlx5_dev_config *config)
+ struct mlx5_dev_spawn_data *spawn)
{
const struct mlx5_switch_info *switch_info = &spawn->info;
struct mlx5_dev_ctx_shared *sh = NULL;
@@ -317,14 +314,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
return NULL;
}
DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
- /* Process parameters. */
- err = mlx5_args(config, dpdk_dev->devargs);
- if (err) {
- err = rte_errno;
- DRV_LOG(ERR, "failed to process device arguments: %s",
- strerror(rte_errno));
- goto error;
- }
sh = mlx5_alloc_shared_dev_ctx(spawn);
if (!sh)
return NULL;
@@ -396,24 +385,14 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
}
own_domain_id = 1;
}
- if (config->hw_padding) {
- DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
- config->hw_padding = 0;
- }
- DRV_LOG(DEBUG, "%sMPS is %s.",
- config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
- config->mps == MLX5_MPW ? "legacy " : "",
- config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
- if (config->cqe_comp) {
- DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
- config->cqe_comp = 0;
- }
- if (config->mprq.enabled) {
- DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
- config->mprq.enabled = 0;
+ /* Process parameters and store port configuration on priv structure. */
+ err = mlx5_port_args_config(priv, dpdk_dev->devargs, &priv->config);
+ if (err) {
+ err = rte_errno;
+ DRV_LOG(ERR, "Failed to process port configure: %s",
+ strerror(rte_errno));
+ goto error;
}
- if (config->max_dump_files_num == 0)
- config->max_dump_files_num = 128;
eth_dev = rte_eth_dev_allocate(name);
if (eth_dev == NULL) {
DRV_LOG(ERR, "can not allocate rte ethdev");
@@ -508,10 +487,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
* Verbs context returned by ibv_open_device().
*/
mlx5_link_update(eth_dev, 0);
- /* Detect minimal data bytes to inline. */
- mlx5_set_min_inline(spawn, config);
- /* Store device configuration on private structure. */
- priv->config = *config;
for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
icfg[i].release_mem_en = !!sh->config.reclaim_mode;
if (sh->config.reclaim_mode)
@@ -817,18 +792,6 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev)
.name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
},
};
- struct mlx5_dev_config dev_config = {
- .rx_vec_en = 1,
- .txq_inline_max = MLX5_ARG_UNSET,
- .txq_inline_min = MLX5_ARG_UNSET,
- .txq_inline_mpw = MLX5_ARG_UNSET,
- .txqs_inline = MLX5_ARG_UNSET,
- .mprq = {
- .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
- .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
- },
- .log_hp_size = MLX5_ARG_UNSET,
- };
int ret;
uint32_t restore;
@@ -842,7 +805,7 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev)
strerror(rte_errno));
return -rte_errno;
}
- spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, &dev_config);
+ spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn);
if (!spawn.eth_dev)
return -rte_errno;
restore = spawn.eth_dev->data->dev_flags;