@@ -1549,34 +1549,34 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
(config->hw_fcs_strip ? "" : "not "));
if (config->mprq.enabled && mprq) {
- if (config->mprq.stride_num_n &&
- (config->mprq.stride_num_n > mprq_max_stride_num_n ||
- config->mprq.stride_num_n < mprq_min_stride_num_n)) {
- config->mprq.stride_num_n =
- RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
- mprq_min_stride_num_n),
- mprq_max_stride_num_n);
+ if (config->mprq.log_stride_num &&
+ (config->mprq.log_stride_num > mprq_max_stride_num_n ||
+ config->mprq.log_stride_num < mprq_min_stride_num_n)) {
+ config->mprq.log_stride_num =
+ RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM,
+ mprq_min_stride_num_n),
+ mprq_max_stride_num_n);
DRV_LOG(WARNING,
"the number of strides"
" for Multi-Packet RQ is out of range,"
" setting default value (%u)",
- 1 << config->mprq.stride_num_n);
- }
- if (config->mprq.stride_size_n &&
- (config->mprq.stride_size_n > mprq_max_stride_size_n ||
- config->mprq.stride_size_n < mprq_min_stride_size_n)) {
- config->mprq.stride_size_n =
- RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_SIZE_N,
- mprq_min_stride_size_n),
- mprq_max_stride_size_n);
+ 1 << config->mprq.log_stride_num);
+ }
+ if (config->mprq.log_stride_size &&
+ (config->mprq.log_stride_size > mprq_max_stride_size_n ||
+ config->mprq.log_stride_size < mprq_min_stride_size_n)) {
+ config->mprq.log_stride_size =
+ RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE,
+ mprq_min_stride_size_n),
+ mprq_max_stride_size_n);
DRV_LOG(WARNING,
"the size of a stride"
" for Multi-Packet RQ is out of range,"
" setting default value (%u)",
- 1 << config->mprq.stride_size_n);
+ 1 << config->mprq.log_stride_size);
}
- config->mprq.min_stride_size_n = mprq_min_stride_size_n;
- config->mprq.max_stride_size_n = mprq_max_stride_size_n;
+ config->mprq.log_min_stride_size = mprq_min_stride_size_n;
+ config->mprq.log_max_stride_size = mprq_max_stride_size_n;
} else if (config->mprq.enabled && !mprq) {
DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
config->mprq.enabled = 0;
@@ -272,8 +272,8 @@ mlx5_rxq_ibv_wq_create(struct mlx5_rxq_priv *rxq)
wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
*mprq_attr = (struct mlx5dv_striding_rq_init_attr){
- .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
- .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
+ .single_stride_log_num_of_bytes = rxq_data->log_strd_sz,
+ .single_wqe_log_num_of_strides = rxq_data->log_strd_num,
.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
};
}
@@ -1884,9 +1884,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
} else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
config->mprq.enabled = !!tmp;
} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
- config->mprq.stride_num_n = tmp;
+ config->mprq.log_stride_num = tmp;
} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) {
- config->mprq.stride_size_n = tmp;
+ config->mprq.log_stride_size = tmp;
} else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
config->mprq.max_memcpy_len = tmp;
} else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
@@ -275,10 +275,10 @@ struct mlx5_dev_config {
unsigned int hp_delay_drop:1; /* Enable hairpin Rxq delay drop. */
struct {
unsigned int enabled:1; /* Whether MPRQ is enabled. */
- unsigned int stride_num_n; /* Number of strides. */
- unsigned int stride_size_n; /* Size of a stride. */
- unsigned int min_stride_size_n; /* Min size of a stride. */
- unsigned int max_stride_size_n; /* Max size of a stride. */
+ unsigned int log_stride_num; /* Log number of strides. */
+ unsigned int log_stride_size; /* Log size of a stride. */
+ unsigned int log_min_stride_size; /* Log min size of a stride.*/
+ unsigned int log_max_stride_size; /* Log max size of a stride.*/
unsigned int max_memcpy_len;
/* Maximum packet size to memcpy Rx packets. */
unsigned int min_rxqs_num;
@@ -113,10 +113,10 @@
#define MLX5_UAR_PAGE_NUM_MASK ((MLX5_UAR_PAGE_NUM_MAX) - 1)
/* Log 2 of the default number of strides per WQE for Multi-Packet RQ. */
-#define MLX5_MPRQ_STRIDE_NUM_N 6U
+#define MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM 6U
/* Log 2 of the default size of a stride per WQE for Multi-Packet RQ. */
-#define MLX5_MPRQ_STRIDE_SIZE_N 11U
+#define MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE 11U
/* Two-byte shift is disabled for Multi-Packet RQ. */
#define MLX5_MPRQ_TWO_BYTE_SHIFT 0
@@ -257,11 +257,11 @@ mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)
* 512*2^single_wqe_log_num_of_strides.
*/
rq_attr.wq_attr.single_wqe_log_num_of_strides =
- rxq_data->strd_num_n -
+ rxq_data->log_strd_num -
MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
rq_attr.wq_attr.single_stride_log_num_of_bytes =
- rxq_data->strd_sz_n -
+ rxq_data->log_strd_sz -
MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
wqe_size = sizeof(struct mlx5_wqe_mprq);
} else {
@@ -73,7 +73,7 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
const unsigned int cqe_n = (1 << rxq->cqe_n);
const unsigned int sges_n = (1 << rxq->sges_n);
const unsigned int elts_n = (1 << rxq->elts_n);
- const unsigned int strd_n = (1 << rxq->strd_num_n);
+ const unsigned int strd_n = RTE_BIT32(rxq->log_strd_num);
const unsigned int cqe_cnt = cqe_n - 1;
unsigned int cq_ci, used;
@@ -167,8 +167,8 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
qinfo->scattered_rx = dev->data->scattered_rx;
qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ?
- (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
- (1 << rxq->elts_n);
+ RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) :
+ RTE_BIT32(rxq->elts_n);
}
/**
@@ -354,10 +354,10 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
scat = &((volatile struct mlx5_wqe_mprq *)
rxq->wqes)[i].dseg;
- addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
- 1 << rxq->strd_num_n);
- byte_count = (1 << rxq->strd_sz_n) *
- (1 << rxq->strd_num_n);
+ addr = (uintptr_t)mlx5_mprq_buf_addr
+ (buf, RTE_BIT32(rxq->log_strd_num));
+ byte_count = RTE_BIT32(rxq->log_strd_sz) *
+ RTE_BIT32(rxq->log_strd_num);
lkey = mlx5_rx_addr2mr(rxq, addr);
} else {
struct rte_mbuf *buf = (*rxq->elts)[i];
@@ -383,7 +383,7 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
.ai = 0,
};
rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ?
- (wqe_n >> rxq->sges_n) * (1 << rxq->strd_num_n) : 0;
+ (wqe_n >> rxq->sges_n) * RTE_BIT32(rxq->log_strd_num) : 0;
/* Update doorbell counter. */
rxq->rq_ci = wqe_n >> rxq->sges_n;
rte_io_wmb();
@@ -412,7 +412,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
const uint16_t cqe_n = 1 << rxq->cqe_n;
const uint16_t cqe_mask = cqe_n - 1;
const uint16_t wqe_n = 1 << rxq->elts_n;
- const uint16_t strd_n = 1 << rxq->strd_num_n;
+ const uint16_t strd_n = RTE_BIT32(rxq->log_strd_num);
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
union {
@@ -1045,8 +1045,8 @@ uint16_t
mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct mlx5_rxq_data *rxq = dpdk_rxq;
- const uint32_t strd_n = 1 << rxq->strd_num_n;
- const uint32_t strd_sz = 1 << rxq->strd_sz_n;
+ const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
+ const uint32_t strd_sz = RTE_BIT32(rxq->log_strd_sz);
const uint32_t cq_mask = (1 << rxq->cqe_n) - 1;
const uint32_t wq_mask = (1 << rxq->elts_n) - 1;
volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
@@ -88,8 +88,8 @@ struct mlx5_rxq_data {
unsigned int elts_n:4; /* Log 2 of Mbufs. */
unsigned int rss_hash:1; /* RSS hash result is enabled. */
unsigned int mark:1; /* Marked flow available on the queue. */
- unsigned int strd_num_n:5; /* Log 2 of the number of stride. */
- unsigned int strd_sz_n:4; /* Log 2 of stride size. */
+ unsigned int log_strd_num:5; /* Log 2 of the number of stride. */
+ unsigned int log_strd_sz:4; /* Log 2 of stride size. */
unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */
@@ -395,7 +395,7 @@ mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset,
static __rte_always_inline void
mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
{
- const uint32_t strd_n = 1 << rxq->strd_num_n;
+ const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
struct mlx5_mprq_buf *rep = rxq->mprq_repl;
volatile struct mlx5_wqe_data_seg *wqe =
&((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
@@ -453,8 +453,8 @@ static __rte_always_inline enum mlx5_rqx_code
mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len,
struct mlx5_mprq_buf *buf, uint16_t strd_idx, uint16_t strd_cnt)
{
- const uint32_t strd_n = 1 << rxq->strd_num_n;
- const uint16_t strd_sz = 1 << rxq->strd_sz_n;
+ const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
+ const uint16_t strd_sz = RTE_BIT32(rxq->log_strd_sz);
const uint16_t strd_shift =
MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
const int32_t hdrm_overlap =
@@ -599,7 +599,7 @@ mlx5_check_mprq_support(struct rte_eth_dev *dev)
static __rte_always_inline int
mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
{
- return rxq->strd_num_n > 0;
+ return rxq->log_strd_num > 0;
}
/**
@@ -67,7 +67,7 @@ mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
unsigned int wqe_n = 1 << rxq_data->elts_n;
if (mlx5_rxq_mprq_enabled(rxq_data))
- cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
+ cqe_n = wqe_n * RTE_BIT32(rxq_data->log_strd_num) - 1;
else
cqe_n = wqe_n - 1;
return cqe_n;
@@ -137,8 +137,9 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
{
const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
- (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) :
- (1 << rxq_ctrl->rxq.elts_n);
+ RTE_BIT32(rxq_ctrl->rxq.elts_n) *
+ RTE_BIT32(rxq_ctrl->rxq.log_strd_num) :
+ RTE_BIT32(rxq_ctrl->rxq.elts_n);
unsigned int i;
int err;
@@ -293,8 +294,8 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
{
struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
- (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
- (1 << rxq->elts_n);
+ RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) :
+ RTE_BIT32(rxq->elts_n);
const uint16_t q_mask = q_n - 1;
uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
rxq->elts_ci : rxq->rq_ci;
@@ -1373,8 +1374,8 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
unsigned int buf_len;
unsigned int obj_num;
unsigned int obj_size;
- unsigned int strd_num_n = 0;
- unsigned int strd_sz_n = 0;
+ unsigned int log_strd_num = 0;
+ unsigned int log_strd_sz = 0;
unsigned int i;
unsigned int n_ibv = 0;
int ret;
@@ -1393,16 +1394,18 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
n_ibv++;
desc += 1 << rxq->elts_n;
/* Get the max number of strides. */
- if (strd_num_n < rxq->strd_num_n)
- strd_num_n = rxq->strd_num_n;
+ if (log_strd_num < rxq->log_strd_num)
+ log_strd_num = rxq->log_strd_num;
/* Get the max size of a stride. */
- if (strd_sz_n < rxq->strd_sz_n)
- strd_sz_n = rxq->strd_sz_n;
- }
- MLX5_ASSERT(strd_num_n && strd_sz_n);
- buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
- obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
- sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
+ if (log_strd_sz < rxq->log_strd_sz)
+ log_strd_sz = rxq->log_strd_sz;
+ }
+ MLX5_ASSERT(log_strd_num && log_strd_sz);
+ buf_len = RTE_BIT32(log_strd_num) * RTE_BIT32(log_strd_sz);
+ obj_size = sizeof(struct mlx5_mprq_buf) + buf_len +
+ RTE_BIT32(log_strd_num) *
+ sizeof(struct rte_mbuf_ext_shared_info) +
+ RTE_PKTMBUF_HEADROOM;
/*
* Received packets can be either memcpy'd or externally referenced. In
* case that the packet is attached to an mbuf as an external buffer, as
@@ -1448,7 +1451,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
0, NULL, NULL, mlx5_mprq_buf_init,
- (void *)((uintptr_t)1 << strd_num_n),
+ (void *)((uintptr_t)1 << log_strd_num),
dev->device->numa_node, 0);
if (mp == NULL) {
DRV_LOG(ERR,
@@ -1564,15 +1567,18 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
!rx_seg[0].offset && !rx_seg[0].length;
- unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
- config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
- unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
- (1U << config->mprq.max_stride_size_n) ?
- log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
- unsigned int mprq_stride_cap = (config->mprq.stride_num_n ?
- (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
- (config->mprq.stride_size_n ?
- (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
+ unsigned int log_mprq_stride_nums = config->mprq.log_stride_num ?
+ config->mprq.log_stride_num : MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
+ unsigned int log_mprq_stride_size = non_scatter_min_mbuf_size <=
+ RTE_BIT32(config->mprq.log_max_stride_size) ?
+ log2above(non_scatter_min_mbuf_size) :
+ MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE;
+ unsigned int mprq_stride_cap = (config->mprq.log_stride_num ?
+ RTE_BIT32(config->mprq.log_stride_num) :
+ RTE_BIT32(log_mprq_stride_nums)) *
+ (config->mprq.log_stride_size ?
+ RTE_BIT32(config->mprq.log_stride_size) :
+ RTE_BIT32(log_mprq_stride_size));
/*
* Always allocate extra slots, even if eventually
* the vector Rx will not be used.
@@ -1584,7 +1590,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
(!!mprq_en) *
- (desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
+ (desc >> log_mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
0, socket);
if (!tmpl) {
rte_errno = ENOMEM;
@@ -1689,37 +1695,37 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
* - MPRQ is enabled.
* - The number of descs is more than the number of strides.
* - max_rx_pktlen plus overhead is less than the max size
- * of a stride or mprq_stride_size is specified by a user.
+ * of a stride or log_mprq_stride_size is specified by a user.
* Need to make sure that there are enough strides to encap
- * the maximum packet size in case mprq_stride_size is set.
+ * the maximum packet size in case log_mprq_stride_size is set.
* Otherwise, enable Rx scatter if necessary.
*/
- if (mprq_en && desc > (1U << mprq_stride_nums) &&
+ if (mprq_en && desc > RTE_BIT32(log_mprq_stride_nums) &&
(non_scatter_min_mbuf_size <=
- (1U << config->mprq.max_stride_size_n) ||
- (config->mprq.stride_size_n &&
+ RTE_BIT32(config->mprq.log_max_stride_size) ||
+ (config->mprq.log_stride_size &&
non_scatter_min_mbuf_size <= mprq_stride_cap))) {
/* TODO: Rx scatter isn't supported yet. */
tmpl->rxq.sges_n = 0;
/* Trim the number of descs needed. */
- desc >>= mprq_stride_nums;
- tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
- config->mprq.stride_num_n : mprq_stride_nums;
- tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
- config->mprq.stride_size_n : mprq_stride_size;
+ desc >>= log_mprq_stride_nums;
+ tmpl->rxq.log_strd_num = config->mprq.log_stride_num ?
+ config->mprq.log_stride_num : log_mprq_stride_nums;
+ tmpl->rxq.log_strd_sz = config->mprq.log_stride_size ?
+ config->mprq.log_stride_size : log_mprq_stride_size;
tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
tmpl->rxq.strd_scatter_en =
!!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
config->mprq.max_memcpy_len);
max_lro_size = RTE_MIN(max_rx_pktlen,
- (1u << tmpl->rxq.strd_num_n) *
- (1u << tmpl->rxq.strd_sz_n));
+ RTE_BIT32(tmpl->rxq.log_strd_num) *
+ RTE_BIT32(tmpl->rxq.log_strd_sz));
DRV_LOG(DEBUG,
"port %u Rx queue %u: Multi-Packet RQ is enabled"
" strd_num_n = %u, strd_sz_n = %u",
dev->data->port_id, idx,
- tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
+ tmpl->rxq.log_strd_num, tmpl->rxq.log_strd_sz);
} else if (tmpl->rxq.rxseg_n == 1) {
MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
tmpl->rxq.sges_n = 0;
@@ -1762,15 +1768,15 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
" min_stride_sz = %u, max_stride_sz = %u).",
dev->data->port_id, non_scatter_min_mbuf_size,
desc, priv->rxqs_n,
- config->mprq.stride_size_n ?
- (1U << config->mprq.stride_size_n) :
- (1U << mprq_stride_size),
- config->mprq.stride_num_n ?
- (1U << config->mprq.stride_num_n) :
- (1U << mprq_stride_nums),
+ config->mprq.log_stride_size ?
+ RTE_BIT32(config->mprq.log_stride_size) :
+ RTE_BIT32(log_mprq_stride_size),
+ config->mprq.log_stride_num ?
+ RTE_BIT32(config->mprq.log_stride_num) :
+ RTE_BIT32(log_mprq_stride_nums),
config->mprq.min_rxqs_num,
- (1U << config->mprq.min_stride_size_n),
- (1U << config->mprq.max_stride_size_n));
+ RTE_BIT32(config->mprq.log_min_stride_size),
+ RTE_BIT32(config->mprq.log_max_stride_size));
DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
dev->data->port_id, 1 << tmpl->rxq.sges_n);
if (desc % (1 << tmpl->rxq.sges_n)) {
@@ -148,7 +148,7 @@ static inline void
mlx5_rx_mprq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq)
{
const uint16_t wqe_n = 1 << rxq->elts_n;
- const uint32_t strd_n = 1 << rxq->strd_num_n;
+ const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
const uint32_t elts_n = wqe_n * strd_n;
const uint32_t wqe_mask = elts_n - 1;
uint32_t n = elts_n - (rxq->elts_ci - rxq->rq_pi);
@@ -197,8 +197,8 @@ rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq,
{
const uint16_t wqe_n = 1 << rxq->elts_n;
const uint16_t wqe_mask = wqe_n - 1;
- const uint16_t strd_sz = 1 << rxq->strd_sz_n;
- const uint32_t strd_n = 1 << rxq->strd_num_n;
+ const uint16_t strd_sz = RTE_BIT32(rxq->log_strd_sz);
+ const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
const uint32_t elts_n = wqe_n * strd_n;
const uint32_t elts_mask = elts_n - 1;
uint32_t elts_idx = rxq->rq_pi & elts_mask;
@@ -428,7 +428,7 @@ rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
const uint16_t q_n = 1 << rxq->cqe_n;
const uint16_t q_mask = q_n - 1;
const uint16_t wqe_n = 1 << rxq->elts_n;
- const uint32_t strd_n = 1 << rxq->strd_num_n;
+ const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
const uint32_t elts_n = wqe_n * strd_n;
const uint32_t elts_mask = elts_n - 1;
volatile struct mlx5_cqe *cq;