@@ -354,6 +354,16 @@ mlx5_dev_hw_global_prepare(struct mlx5_common_device *cdev, uint32_t classes)
ret = mlx5_os_pd_create(cdev);
if (ret)
goto error;
+ /* All actions taken below are relevant only when DevX is supported */
+ if (cdev->config.devx == 0)
+ return 0;
+ /* Query HCA attributes. */
+ ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, &cdev->config.hca_attr);
+ if (ret) {
+ DRV_LOG(ERR, "Unable to read HCA capabilities.");
+ rte_errno = ENOTSUP;
+ goto error;
+ }
return 0;
error:
mlx5_dev_hw_global_release(cdev);
@@ -332,6 +332,7 @@ void mlx5_common_init(void);
* - User device parameters disabled features.
*/
struct mlx5_common_dev_config {
+ struct mlx5_hca_attr hca_attr; /* HCA attributes. */
int dbnc; /* Skip doorbell register write barrier. */
unsigned int devx:1; /* Whether devx interface is available or not. */
unsigned int sys_mem_en:1; /* The default memory allocator. */
@@ -39,7 +39,6 @@ struct mlx5_compress_priv {
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
void *uar;
uint8_t min_block_size;
- uint8_t qp_ts_format; /* Whether SQ supports timestamp formats. */
/* Minimum huffman block size supported by the device. */
struct rte_compressdev_config dev_config;
LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
@@ -243,7 +242,8 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
goto err;
}
qp_attr.cqn = qp->cq.cq->id;
- qp_attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
+ qp_attr.ts_format =
+ mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
qp_attr.rq_size = 0;
qp_attr.sq_size = RTE_BIT32(log_ops_n);
qp_attr.mmo = priv->mmo_decomp_qp && priv->mmo_comp_qp
@@ -755,7 +755,7 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
{
struct rte_compressdev *compressdev;
struct mlx5_compress_priv *priv;
- struct mlx5_hca_attr att = { 0 };
+ struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
struct rte_compressdev_pmd_init_params init_params = {
.name = "",
.socket_id = cdev->dev->numa_node,
@@ -767,10 +767,9 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
rte_errno = ENOTSUP;
return -rte_errno;
}
- if (mlx5_devx_cmd_query_hca_attr(cdev->ctx, &att) != 0 ||
- ((att.mmo_compress_sq_en == 0 || att.mmo_decompress_sq_en == 0 ||
- att.mmo_dma_sq_en == 0) && (att.mmo_compress_qp_en == 0 ||
- att.mmo_decompress_qp_en == 0 || att.mmo_dma_qp_en == 0))) {
+ if ((attr->mmo_compress_sq_en == 0 || attr->mmo_decompress_sq_en == 0 ||
+ attr->mmo_dma_sq_en == 0) && (attr->mmo_compress_qp_en == 0 ||
+ attr->mmo_decompress_qp_en == 0 || attr->mmo_dma_qp_en == 0)) {
DRV_LOG(ERR, "Not enough capabilities to support compress "
"operations, maybe old FW/OFED version?");
rte_errno = ENOTSUP;
@@ -789,16 +788,15 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
compressdev->enqueue_burst = mlx5_compress_enqueue_burst;
compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
priv = compressdev->data->dev_private;
- priv->mmo_decomp_sq = att.mmo_decompress_sq_en;
- priv->mmo_decomp_qp = att.mmo_decompress_qp_en;
- priv->mmo_comp_sq = att.mmo_compress_sq_en;
- priv->mmo_comp_qp = att.mmo_compress_qp_en;
- priv->mmo_dma_sq = att.mmo_dma_sq_en;
- priv->mmo_dma_qp = att.mmo_dma_qp_en;
+ priv->mmo_decomp_sq = attr->mmo_decompress_sq_en;
+ priv->mmo_decomp_qp = attr->mmo_decompress_qp_en;
+ priv->mmo_comp_sq = attr->mmo_compress_sq_en;
+ priv->mmo_comp_qp = attr->mmo_compress_qp_en;
+ priv->mmo_dma_sq = attr->mmo_dma_sq_en;
+ priv->mmo_dma_qp = attr->mmo_dma_qp_en;
priv->cdev = cdev;
priv->compressdev = compressdev;
- priv->min_block_size = att.compress_min_block_size;
- priv->qp_ts_format = att.qp_ts_format;
+ priv->min_block_size = attr->compress_min_block_size;
if (mlx5_compress_uar_prepare(priv) != 0) {
rte_compressdev_pmd_destroy(priv->compressdev);
return -1;
@@ -669,7 +669,8 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
attr.cqn = qp->cq_obj.cq->id;
attr.rq_size = 0;
attr.sq_size = RTE_BIT32(log_nb_desc);
- attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
+ attr.ts_format =
+ mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp_obj, log_nb_desc,
&attr, socket_id);
if (ret) {
@@ -920,7 +921,6 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
struct mlx5_devx_obj *login;
struct mlx5_crypto_priv *priv;
struct mlx5_crypto_devarg_params devarg_prms = { 0 };
- struct mlx5_hca_attr attr = { 0 };
struct rte_cryptodev_pmd_init_params init_params = {
.name = "",
.private_data_size = sizeof(struct mlx5_crypto_priv),
@@ -937,8 +937,7 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
rte_errno = ENOTSUP;
return -rte_errno;
}
- if (mlx5_devx_cmd_query_hca_attr(cdev->ctx, &attr) != 0 ||
- attr.crypto == 0 || attr.aes_xts == 0) {
+ if (!cdev->config.hca_attr.crypto || !cdev->config.hca_attr.aes_xts) {
DRV_LOG(ERR, "Not enough capabilities to support crypto "
"operations, maybe old FW/OFED version?");
rte_errno = ENOTSUP;
@@ -972,7 +971,6 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
priv->cdev = cdev;
priv->login_obj = login;
priv->crypto_dev = crypto_dev;
- priv->qp_ts_format = attr.qp_ts_format;
if (mlx5_crypto_uar_prepare(priv) != 0) {
rte_cryptodev_pmd_destroy(priv->crypto_dev);
return -1;
@@ -24,7 +24,6 @@ struct mlx5_crypto_priv {
void *uar; /* User Access Region. */
volatile uint64_t *uar_addr;
uint32_t max_segs_num; /* Maximum supported data segs. */
- uint8_t qp_ts_format; /* Whether QP supports timestamp formats. */
struct mlx5_hlist *dek_hlist; /* Dek hash list. */
struct rte_cryptodev_config dev_config;
struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
@@ -132,8 +132,8 @@ mlx5_os_set_nonblock_channel_fd(int fd)
* with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
* device attributes from the glue out parameter.
*
- * @param dev
- * Pointer to ibv context.
+ * @param cdev
+ * Pointer to mlx5 device.
*
* @param device_attr
* Pointer to mlx5 device attributes.
@@ -142,15 +142,17 @@ mlx5_os_set_nonblock_channel_fd(int fd)
* 0 on success, non zero error number otherwise
*/
int
-mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
+mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
+ struct mlx5_dev_attr *device_attr)
{
int err;
+ struct ibv_context *ctx = cdev->ctx;
struct ibv_device_attr_ex attr_ex;
+
memset(device_attr, 0, sizeof(*device_attr));
err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
if (err)
return err;
-
device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
device_attr->max_sge = attr_ex.orig_attr.max_sge;
@@ -1326,27 +1328,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
config->mps == MLX5_MPW ? "legacy " : "",
config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
if (sh->devx) {
- err = mlx5_devx_cmd_query_hca_attr(sh->cdev->ctx,
- &config->hca_attr);
- if (err) {
- err = -err;
- goto error;
- }
- /* Check relax ordering support. */
- if (!haswell_broadwell_cpu) {
- sh->cmng.relaxed_ordering_write =
- config->hca_attr.relaxed_ordering_write;
- sh->cmng.relaxed_ordering_read =
- config->hca_attr.relaxed_ordering_read;
- } else {
- sh->cmng.relaxed_ordering_read = 0;
- sh->cmng.relaxed_ordering_write = 0;
- }
- sh->rq_ts_format = config->hca_attr.rq_ts_format;
- sh->sq_ts_format = config->hca_attr.sq_ts_format;
+ config->hca_attr = sh->cdev->config.hca_attr;
sh->steering_format_version =
config->hca_attr.steering_format_version;
- sh->qp_ts_format = config->hca_attr.qp_ts_format;
/* Check for LRO support. */
if (config->dest_tir && config->hca_attr.lro_cap &&
config->dv_flow_en) {
@@ -520,6 +520,7 @@ mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh)
static void
mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
{
+ struct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr;
int i;
memset(&sh->cmng, 0, sizeof(sh->cmng));
@@ -532,6 +533,10 @@ mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
TAILQ_INIT(&sh->cmng.counters[i]);
rte_spinlock_init(&sh->cmng.csl[i]);
}
+ if (sh->devx && !haswell_broadwell_cpu) {
+ sh->cmng.relaxed_ordering_write = attr->relaxed_ordering_write;
+ sh->cmng.relaxed_ordering_read = attr->relaxed_ordering_read;
+ }
}
/**
@@ -1287,7 +1292,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
sh->devx = sh->cdev->config.devx;
if (spawn->bond_info)
sh->bond = *spawn->bond_info;
- err = mlx5_os_get_dev_attr(sh->cdev->ctx, &sh->device_attr);
+ err = mlx5_os_get_dev_attr(sh->cdev, &sh->device_attr);
if (err) {
DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
goto error;
@@ -1127,11 +1127,8 @@ struct mlx5_dev_ctx_shared {
uint32_t refcnt;
uint32_t devx:1; /* Opened with DV. */
uint32_t flow_hit_aso_en:1; /* Flow Hit ASO is supported. */
- uint32_t rq_ts_format:2; /* RQ timestamp formats supported. */
- uint32_t sq_ts_format:2; /* SQ timestamp formats supported. */
uint32_t steering_format_version:4;
/* Indicates the device steering logic format. */
- uint32_t qp_ts_format:2; /* QP timestamp formats supported. */
uint32_t meter_aso_en:1; /* Flow Meter ASO is supported. */
uint32_t ct_aso_en:1; /* Connection Tracking ASO is supported. */
uint32_t tunnel_header_0_1:1; /* tunnel_header_0_1 is supported. */
@@ -1765,7 +1762,8 @@ void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);
/* mlx5_os.c */
struct rte_pci_driver;
-int mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *dev_attr);
+int mlx5_os_get_dev_attr(struct mlx5_common_device *dev,
+ struct mlx5_dev_attr *dev_attr);
void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
int mlx5_os_net_probe(struct mlx5_common_device *cdev);
void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
@@ -236,6 +236,7 @@ static int
mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_common_device *cdev = priv->sh->cdev;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
@@ -249,7 +250,8 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
rq_attr.vsd = (rxq_data->vlan_strip) ? 0 : 1;
rq_attr.cqn = rxq_ctrl->obj->cq_obj.cq->id;
rq_attr.scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
- rq_attr.ts_format = mlx5_ts_format_conv(priv->sh->rq_ts_format);
+ rq_attr.ts_format =
+ mlx5_ts_format_conv(cdev->config.hca_attr.rq_ts_format);
/* Fill WQ attributes for this RQ. */
if (mlx5_rxq_mprq_enabled(rxq_data)) {
rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
@@ -276,12 +278,11 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ?
MLX5_WQ_END_PAD_MODE_ALIGN :
MLX5_WQ_END_PAD_MODE_NONE;
- rq_attr.wq_attr.pd = priv->sh->cdev->pdn;
+ rq_attr.wq_attr.pd = cdev->pdn;
rq_attr.counter_set_id = priv->counter_set_id;
/* Create RQ using DevX API. */
- return mlx5_devx_rq_create(priv->sh->cdev->ctx, &rxq_ctrl->obj->rq_obj,
- wqe_size, log_desc_n, &rq_attr,
- rxq_ctrl->socket);
+ return mlx5_devx_rq_create(cdev->ctx, &rxq_ctrl->obj->rq_obj, wqe_size,
+ log_desc_n, &rq_attr, rxq_ctrl->socket);
}
/**
@@ -981,6 +982,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
uint16_t log_desc_n)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_common_device *cdev = priv->sh->cdev;
struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq_data, struct mlx5_txq_ctrl, txq);
@@ -994,14 +996,15 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
.tis_lst_sz = 1,
.tis_num = priv->sh->tis->id,
.wq_attr = (struct mlx5_devx_wq_attr){
- .pd = priv->sh->cdev->pdn,
+ .pd = cdev->pdn,
.uar_page =
mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar),
},
- .ts_format = mlx5_ts_format_conv(priv->sh->sq_ts_format),
+ .ts_format =
+ mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format),
};
/* Create Send Queue object with DevX. */
- return mlx5_devx_sq_create(priv->sh->cdev->ctx, &txq_obj->sq_obj,
+ return mlx5_devx_sq_create(cdev->ctx, &txq_obj->sq_obj,
log_desc_n, &sq_attr, priv->sh->numa_node);
}
#endif
@@ -319,7 +319,7 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
if (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0,
sh->tx_uar, cdev->pdn,
MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format)) {
+ cdev->config.hca_attr.sq_ts_format)) {
mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
return -1;
}
@@ -329,7 +329,7 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
if (mlx5_aso_sq_create(cdev->ctx, &sh->mtrmng->pools_mng.sq, 0,
sh->tx_uar, cdev->pdn,
MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format))
+ cdev->config.hca_attr.sq_ts_format))
return -1;
mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
break;
@@ -341,7 +341,7 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
if (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0,
sh->tx_uar, cdev->pdn,
MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format)) {
+ cdev->config.hca_attr.sq_ts_format)) {
mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
return -1;
}
@@ -235,7 +235,8 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
.pd = sh->cdev->pdn,
.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
},
- .ts_format = mlx5_ts_format_conv(sh->sq_ts_format),
+ .ts_format = mlx5_ts_format_conv
+ (sh->cdev->config.hca_attr.sq_ts_format),
};
struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
struct mlx5_devx_cq_attr cq_attr = {
@@ -445,7 +446,8 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
sq_attr.wq_attr.cd_slave = 1;
sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
sq_attr.wq_attr.pd = sh->cdev->pdn;
- sq_attr.ts_format = mlx5_ts_format_conv(sh->sq_ts_format);
+ sq_attr.ts_format =
+ mlx5_ts_format_conv(sh->cdev->config.hca_attr.sq_ts_format);
ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
log2above(wq->sq_size),
&sq_attr, sh->numa_node);
@@ -143,44 +143,39 @@ mlx5_init_once(void)
/**
* Get mlx5 device attributes.
*
- * @param ctx
- * Pointer to device context.
+ * @param cdev
+ * Pointer to mlx5 device.
*
* @param device_attr
* Pointer to mlx5 device attributes.
*
* @return
- * 0 on success, non zero error number otherwise
+ * 0 on success, non zero error number otherwise.
*/
int
-mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
+mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
+ struct mlx5_dev_attr *device_attr)
{
struct mlx5_context *mlx5_ctx;
- struct mlx5_hca_attr hca_attr;
void *pv_iseg = NULL;
u32 cb_iseg = 0;
int err = 0;
- if (!ctx)
+ if (!cdev || !cdev->ctx)
return -EINVAL;
- mlx5_ctx = (struct mlx5_context *)ctx;
+ mlx5_ctx = (struct mlx5_context *)cdev->ctx;
memset(device_attr, 0, sizeof(*device_attr));
- err = mlx5_devx_cmd_query_hca_attr(mlx5_ctx, &hca_attr);
- if (err) {
- DRV_LOG(ERR, "Failed to get device hca_cap");
- return err;
- }
- device_attr->max_cq = 1 << hca_attr.log_max_cq;
- device_attr->max_qp = 1 << hca_attr.log_max_qp;
- device_attr->max_qp_wr = 1 << hca_attr.log_max_qp_sz;
- device_attr->max_cqe = 1 << hca_attr.log_max_cq_sz;
- device_attr->max_mr = 1 << hca_attr.log_max_mrw_sz;
- device_attr->max_pd = 1 << hca_attr.log_max_pd;
- device_attr->max_srq = 1 << hca_attr.log_max_srq;
- device_attr->max_srq_wr = 1 << hca_attr.log_max_srq_sz;
- if (hca_attr.rss_ind_tbl_cap) {
+ device_attr->max_cq = 1 << cdev->config.hca_attr.log_max_cq;
+ device_attr->max_qp = 1 << cdev->config.hca_attr.log_max_qp;
+ device_attr->max_qp_wr = 1 << cdev->config.hca_attr.log_max_qp_sz;
+ device_attr->max_cqe = 1 << cdev->config.hca_attr.log_max_cq_sz;
+ device_attr->max_mr = 1 << cdev->config.hca_attr.log_max_mrw_sz;
+ device_attr->max_pd = 1 << cdev->config.hca_attr.log_max_pd;
+ device_attr->max_srq = 1 << cdev->config.hca_attr.log_max_srq;
+ device_attr->max_srq_wr = 1 << cdev->config.hca_attr.log_max_srq_sz;
+ if (cdev->config.hca_attr.rss_ind_tbl_cap) {
device_attr->max_rwq_indirection_table_size =
- 1 << hca_attr.rss_ind_tbl_cap;
+ 1 << cdev->config.hca_attr.rss_ind_tbl_cap;
}
pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
if (pv_iseg == NULL) {
@@ -359,7 +354,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
goto error;
}
DRV_LOG(DEBUG, "MPW isn't supported");
- mlx5_os_get_dev_attr(sh->cdev->ctx, &device_attr);
+ mlx5_os_get_dev_attr(sh->cdev, &device_attr);
config->swp = 0;
config->ind_table_max_size =
sh->device_attr.max_rwq_indirection_table_size;
@@ -452,21 +447,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
config->cqe_comp = 0;
}
if (sh->devx) {
- err = mlx5_devx_cmd_query_hca_attr(sh->cdev->ctx,
- &config->hca_attr);
- if (err) {
- err = -err;
- goto error;
- }
- /* Check relax ordering support. */
- sh->cmng.relaxed_ordering_read = 0;
- sh->cmng.relaxed_ordering_write = 0;
- if (!haswell_broadwell_cpu) {
- sh->cmng.relaxed_ordering_write =
- config->hca_attr.relaxed_ordering_write;
- sh->cmng.relaxed_ordering_read =
- config->hca_attr.relaxed_ordering_read;
- }
+ config->hca_attr = sh->cdev->config.hca_attr;
config->hw_csum = config->hca_attr.csum_cap;
DRV_LOG(DEBUG, "checksum offloading is %ssupported",
(config->hw_csum ? "" : "not "));
@@ -492,9 +473,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
(NS_PER_S / MS_PER_S))
config->rt_timestamp = 1;
}
- sh->rq_ts_format = config->hca_attr.rq_ts_format;
- sh->sq_ts_format = config->hca_attr.sq_ts_format;
- sh->qp_ts_format = config->hca_attr.qp_ts_format;
}
if (config->mprq.enabled) {
DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
@@ -125,18 +125,13 @@ static int
mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
{
struct mlx5_regex_priv *priv = NULL;
- struct mlx5_hca_attr attr;
+ struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
char name[RTE_REGEXDEV_NAME_MAX_LEN];
int ret;
uint32_t val;
- ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, &attr);
- if (ret) {
- DRV_LOG(ERR, "Unable to read HCA capabilities.");
- rte_errno = ENOTSUP;
- return -rte_errno;
- } else if (((!attr.regex) && (!attr.mmo_regex_sq_en) &&
- (!attr.mmo_regex_qp_en)) || attr.regexp_num_of_engines == 0) {
+ if ((!attr->regex && !attr->mmo_regex_sq_en && !attr->mmo_regex_qp_en)
+ || attr->regexp_num_of_engines == 0) {
DRV_LOG(ERR, "Not enough capabilities to support RegEx, maybe "
"old FW/OFED version?");
rte_errno = ENOTSUP;
@@ -154,9 +149,8 @@ mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
rte_errno = ENOMEM;
return -rte_errno;
}
- priv->mmo_regex_qp_cap = attr.mmo_regex_qp_en;
- priv->mmo_regex_sq_cap = attr.mmo_regex_sq_en;
- priv->qp_ts_format = attr.qp_ts_format;
+ priv->mmo_regex_qp_cap = attr->mmo_regex_qp_en;
+ priv->mmo_regex_sq_cap = attr->mmo_regex_sq_en;
priv->cdev = cdev;
priv->nb_engines = 2; /* attr.regexp_num_of_engines */
ret = mlx5_devx_regex_register_read(priv->cdev->ctx, 0,
@@ -190,8 +184,8 @@ mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
priv->regexdev->dev_ops = &mlx5_regexdev_ops;
priv->regexdev->enqueue = mlx5_regexdev_enqueue;
#ifdef HAVE_MLX5_UMR_IMKEY
- if (!attr.umr_indirect_mkey_disabled &&
- !attr.umr_modify_entity_size_disabled)
+ if (!attr->umr_indirect_mkey_disabled &&
+ !attr->umr_modify_entity_size_disabled)
priv->has_umr = 1;
if (priv->has_umr)
priv->regexdev->enqueue = mlx5_regexdev_enqueue_gga;
@@ -72,7 +72,6 @@ struct mlx5_regex_priv {
/**< Called by memory event callback. */
struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
uint8_t is_bf2; /* The device is BF2 device. */
- uint8_t qp_ts_format; /* Whether SQ supports timestamp formats. */
uint8_t has_umr; /* The device supports UMR. */
uint32_t mmo_regex_qp_cap:1;
uint32_t mmo_regex_sq_cap:1;
@@ -139,7 +139,8 @@ regex_ctrl_create_hw_qp(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
.cqn = qp->cq.cq_obj.cq->id,
.uar_index = priv->uar->page_id,
.pd = priv->cdev->pdn,
- .ts_format = mlx5_ts_format_conv(priv->qp_ts_format),
+ .ts_format = mlx5_ts_format_conv
+ (priv->cdev->config.hca_attr.qp_ts_format),
.user_index = q_ind,
};
struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind];
@@ -505,36 +505,29 @@ static int
mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev)
{
struct mlx5_vdpa_priv *priv = NULL;
- struct mlx5_hca_attr attr;
- int ret;
+ struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
- ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, &attr);
- if (ret) {
- DRV_LOG(ERR, "Unable to read HCA capabilities.");
- rte_errno = ENOTSUP;
- return -rte_errno;
- } else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
+ if (!attr->vdpa.valid || !attr->vdpa.max_num_virtio_queues) {
DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
"old FW/OFED version?");
rte_errno = ENOTSUP;
return -rte_errno;
}
- if (!attr.vdpa.queue_counters_valid)
+ if (!attr->vdpa.queue_counters_valid)
DRV_LOG(DEBUG, "No capability to support virtq statistics.");
priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
sizeof(struct mlx5_vdpa_virtq) *
- attr.vdpa.max_num_virtio_queues * 2,
+ attr->vdpa.max_num_virtio_queues * 2,
RTE_CACHE_LINE_SIZE);
if (!priv) {
DRV_LOG(ERR, "Failed to allocate private memory.");
rte_errno = ENOMEM;
return -rte_errno;
}
- priv->caps = attr.vdpa;
- priv->log_max_rqt_size = attr.log_max_rqt_size;
- priv->num_lag_ports = attr.num_lag_ports;
- priv->qp_ts_format = attr.qp_ts_format;
- if (attr.num_lag_ports == 0)
+ priv->caps = attr->vdpa;
+ priv->log_max_rqt_size = attr->log_max_rqt_size;
+ priv->num_lag_ports = attr->num_lag_ports;
+ if (attr->num_lag_ports == 0)
priv->num_lag_ports = 1;
priv->cdev = cdev;
priv->var = mlx5_glue->dv_alloc_var(priv->cdev->ctx, 0);
@@ -142,7 +142,6 @@ struct mlx5_vdpa_priv {
struct mlx5_devx_obj *tiss[16]; /* TIS list for each LAG port. */
uint16_t nr_virtqs;
uint8_t num_lag_ports;
- uint8_t qp_ts_format;
uint64_t features; /* Negotiated features. */
uint16_t log_max_rqt_size;
struct mlx5_vdpa_steer steer;
@@ -594,7 +594,8 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
return -1;
attr.pd = priv->cdev->pdn;
- attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
+ attr.ts_format =
+ mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
if (!eqp->fw_qp) {
DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
@@ -605,7 +606,8 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
attr.rq_size = RTE_BIT32(log_desc_n);
attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
attr.sq_size = 0; /* No need SQ. */
- attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
+ attr.ts_format =
+ mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp), log_desc_n,
&attr, SOCKET_ID_ANY);
if (ret) {