@@ -985,6 +985,7 @@ struct mlx5_dev_spawn_data {
.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
.vlan_filter_set = mlx5_vlan_filter_set,
.rx_queue_setup = mlx5_rx_queue_setup,
+ .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
.tx_queue_setup = mlx5_tx_queue_setup,
.rx_queue_release = mlx5_rx_queue_release,
.tx_queue_release = mlx5_tx_queue_release,
@@ -1051,6 +1052,7 @@ struct mlx5_dev_spawn_data {
.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
.vlan_filter_set = mlx5_vlan_filter_set,
.rx_queue_setup = mlx5_rx_queue_setup,
+ .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
.tx_queue_setup = mlx5_tx_queue_setup,
.rx_queue_release = mlx5_rx_queue_release,
.tx_queue_release = mlx5_tx_queue_release,
@@ -106,21 +106,25 @@
struct mlx5_priv *priv = dev->data->dev_private;
uint16_t i;
uint16_t n = 0;
+ uint16_t n_ibv = 0;
if (mlx5_check_mprq_support(dev) < 0)
return 0;
/* All the configured queues should be enabled. */
for (i = 0; i < priv->rxqs_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl = container_of
+ (rxq, struct mlx5_rxq_ctrl, rxq);
- if (!rxq)
+ if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
continue;
+ n_ibv++;
if (mlx5_rxq_mprq_enabled(rxq))
++n;
}
/* Multi-Packet RQ can't be partially configured. */
- assert(n == 0 || n == priv->rxqs_n);
- return n == priv->rxqs_n;
+ assert(n == 0 || n == n_ibv);
+ return n == n_ibv;
}
/**
@@ -427,6 +431,7 @@
}
/**
+ * Rx queue presetup checks.
*
* @param dev
* Pointer to Ethernet device structure.
@@ -434,25 +439,14 @@
* RX queue index.
* @param desc
* Number of descriptors to configure in queue.
- * @param socket
- * NUMA socket on which memory must be allocated.
- * @param[in] conf
- * Thresholds parameters.
- * @param mp
- * Memory pool for buffer allocations.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-int
-mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
- unsigned int socket, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp)
+static int
+mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
@@ -476,6 +470,41 @@
return -rte_errno;
}
mlx5_rxq_release(dev, idx);
+ return 0;
+}
+
+/**
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ int res;
+
+ res = mlx5_rx_queue_pre_setup(dev, idx, desc);
+ if (res)
+ return res;
rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
if (!rxq_ctrl) {
DRV_LOG(ERR, "port %u unable to allocate queue index %u",
@@ -490,6 +519,56 @@
}
/**
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param hairpin_conf
+ * Hairpin configuration parameters.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc,
+ const struct rte_eth_hairpin_conf *hairpin_conf)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ int res;
+
+ res = mlx5_rx_queue_pre_setup(dev, idx, desc);
+ if (res)
+ return res;
+ if (hairpin_conf->peer_count != 1 ||
+ hairpin_conf->peers[0].port != dev->data->port_id ||
+ hairpin_conf->peers[0].queue >= priv->txqs_n) {
+ DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
+ " invalid hairpind configuration", dev->data->port_id,
+ idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
+ if (!rxq_ctrl) {
+ DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
+ dev->data->port_id, idx);
+ (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
+ return 0;
+}
+
+/**
* DPDK callback to release a RX queue.
*
* @param dpdk_rxq
@@ -561,6 +640,24 @@
}
/**
+ * Release an Rx hairpin related resources.
+ *
+ * @param rxq_obj
+ * Hairpin Rx queue object.
+ */
+static void
+rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
+{
+ struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
+
+ assert(rxq_obj);
+ rq_attr.state = MLX5_RQC_STATE_RST;
+ rq_attr.rq_state = MLX5_RQC_STATE_RDY;
+ mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
+ claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
+}
+
+/**
* Release an Rx verbs/DevX queue object.
*
* @param rxq_obj
@@ -577,14 +674,22 @@
assert(rxq_obj->wq);
assert(rxq_obj->cq);
if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
- rxq_free_elts(rxq_obj->rxq_ctrl);
- if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
+ switch (rxq_obj->type) {
+ case MLX5_RXQ_OBJ_TYPE_IBV:
+ rxq_free_elts(rxq_obj->rxq_ctrl);
claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
- } else if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
+ claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
+ break;
+ case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
+ rxq_free_elts(rxq_obj->rxq_ctrl);
claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
rxq_release_rq_resources(rxq_obj->rxq_ctrl);
+ claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
+ break;
+ case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
+ rxq_obj_hairpin_release(rxq_obj);
+ break;
}
- claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
if (rxq_obj->channel)
claim_zero(mlx5_glue->destroy_comp_channel
(rxq_obj->channel));
@@ -1132,6 +1237,70 @@
}
/**
+ * Create the Rx hairpin queue object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ *
+ * @return
+ * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_rxq_obj *
+mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_devx_create_rq_attr attr = { 0 };
+ struct mlx5_rxq_obj *tmpl = NULL;
+ int ret = 0;
+
+ assert(rxq_data);
+ assert(!rxq_ctrl->obj);
+ tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
+ rxq_ctrl->socket);
+ if (!tmpl) {
+ DRV_LOG(ERR,
+ "port %u Rx queue %u cannot allocate verbs resources",
+ dev->data->port_id, rxq_data->idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
+ tmpl->rxq_ctrl = rxq_ctrl;
+ attr.hairpin = 1;
+ /* Workaround for hairpin startup */
+ attr.wq_attr.log_hairpin_num_packets = log2above(32);
+ /* Workaround for packets larger than 1KB */
+ attr.wq_attr.log_hairpin_data_sz =
+ priv->config.hca_attr.log_max_hairpin_wq_data_sz;
+ tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
+ rxq_ctrl->socket);
+ if (!tmpl->rq) {
+ DRV_LOG(ERR,
+ "port %u Rx hairpin queue %u can't create rq object",
+ dev->data->port_id, idx);
+ rte_errno = errno;
+ goto error;
+ }
+ DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
+ idx, (void *)&tmpl);
+ rte_atomic32_inc(&tmpl->refcnt);
+ LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ return tmpl;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (tmpl->rq)
+ mlx5_devx_cmd_destroy(tmpl->rq);
+ rte_errno = ret; /* Restore rte_errno. */
+ return NULL;
+}
+
+/**
* Create the Rx queue Verbs/DevX object.
*
* @param dev
@@ -1163,6 +1332,8 @@ struct mlx5_rxq_obj *
assert(rxq_data);
assert(!rxq_ctrl->obj);
+ if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
+ return mlx5_rxq_obj_hairpin_new(dev, idx);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
priv->verbs_alloc_ctx.obj = rxq_ctrl;
tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
@@ -1433,15 +1604,19 @@ struct mlx5_rxq_obj *
unsigned int strd_num_n = 0;
unsigned int strd_sz_n = 0;
unsigned int i;
+ unsigned int n_ibv = 0;
if (!mlx5_mprq_enabled(dev))
return 0;
/* Count the total number of descriptors configured. */
for (i = 0; i != priv->rxqs_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl = container_of
+ (rxq, struct mlx5_rxq_ctrl, rxq);
- if (rxq == NULL)
+ if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
continue;
+ n_ibv++;
desc += 1 << rxq->elts_n;
/* Get the max number of strides. */
if (strd_num_n < rxq->strd_num_n)
@@ -1466,7 +1641,7 @@ struct mlx5_rxq_obj *
* this Mempool gets available again.
*/
desc *= 4;
- obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
+ obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
/*
* rte_mempool_create_empty() has sanity check to refuse large cache
* size compared to the number of elements.
@@ -1514,8 +1689,10 @@ struct mlx5_rxq_obj *
/* Set mempool for each Rx queue. */
for (i = 0; i != priv->rxqs_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl = container_of
+ (rxq, struct mlx5_rxq_ctrl, rxq);
- if (rxq == NULL)
+ if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
continue;
rxq->mprq_mp = mp;
}
@@ -1620,6 +1797,7 @@ struct mlx5_rxq_ctrl *
rte_errno = ENOMEM;
return NULL;
}
+ tmpl->type = MLX5_RXQ_TYPE_STANDARD;
if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
MLX5_MR_BTREE_CACHE_N, socket)) {
/* rte_errno is already set. */
@@ -1788,6 +1966,49 @@ struct mlx5_rxq_ctrl *
}
/**
+ * Create a DPDK Rx hairpin queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param hairpin_conf
+ * The hairpin binding configuration.
+ *
+ * @return
+ * A DPDK queue object on success, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_rxq_ctrl *
+mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ const struct rte_eth_hairpin_conf *hairpin_conf)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *tmpl;
+
+ tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl), 0, SOCKET_ID_ANY);
+ if (!tmpl) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
+ tmpl->socket = SOCKET_ID_ANY;
+ tmpl->rxq.rss_hash = 0;
+ tmpl->rxq.port_id = dev->data->port_id;
+ tmpl->priv = priv;
+ tmpl->rxq.mp = NULL;
+ tmpl->rxq.elts_n = log2above(desc);
+ tmpl->rxq.elts = NULL;
+ tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
+ tmpl->hairpin_conf = *hairpin_conf;
+ tmpl->rxq.idx = idx;
+ rte_atomic32_inc(&tmpl->refcnt);
+ LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
+ return tmpl;
+}
+
+/**
* Get a Rx queue.
*
* @param dev
@@ -1841,7 +2062,8 @@ struct mlx5_rxq_ctrl *
if (rxq_ctrl->dbr_umem_id_valid)
claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
rxq_ctrl->dbr_offset));
- mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+ mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
LIST_REMOVE(rxq_ctrl, next);
rte_free(rxq_ctrl);
(*priv->rxqs)[idx] = NULL;
@@ -159,6 +159,13 @@ struct mlx5_rxq_data {
enum mlx5_rxq_obj_type {
MLX5_RXQ_OBJ_TYPE_IBV, /* mlx5_rxq_obj with ibv_wq. */
MLX5_RXQ_OBJ_TYPE_DEVX_RQ, /* mlx5_rxq_obj with mlx5_devx_rq. */
+ MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN,
+ /* mlx5_rxq_obj with mlx5_devx_rq and hairpin support. */
+};
+
+enum mlx5_rxq_type {
+ MLX5_RXQ_TYPE_STANDARD, /* Standard Rx queue. */
+ MLX5_RXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */
};
/* Verbs/DevX Rx queue elements. */
@@ -183,6 +190,7 @@ struct mlx5_rxq_ctrl {
rte_atomic32_t refcnt; /* Reference counter. */
struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
struct mlx5_priv *priv; /* Back pointer to private data. */
+ enum mlx5_rxq_type type; /* Rxq type. */
unsigned int socket; /* CPU socket ID for allocations. */
unsigned int irq:1; /* Whether IRQ is enabled. */
unsigned int dbr_umem_id_valid:1; /* dbr_umem_id holds a valid value. */
@@ -193,6 +201,7 @@ struct mlx5_rxq_ctrl {
uint32_t dbr_umem_id; /* Storing door-bell information, */
uint64_t dbr_offset; /* needed when freeing door-bell. */
struct mlx5dv_devx_umem *wq_umem; /* WQ buffer registration info. */
+ struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
};
enum mlx5_ind_tbl_type {
@@ -339,6 +348,9 @@ struct mlx5_txq_ctrl {
int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp);
+int mlx5_rx_hairpin_queue_setup
+ (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ const struct rte_eth_hairpin_conf *hairpin_conf);
void mlx5_rx_queue_release(void *dpdk_rxq);
int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
@@ -351,6 +363,9 @@ struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_rxconf *conf,
struct rte_mempool *mp);
+struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
+ (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ const struct rte_eth_hairpin_conf *hairpin_conf);
struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_verify(struct rte_eth_dev *dev);
@@ -118,6 +118,13 @@
if (!rxq_ctrl)
continue;
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
+ rxq_ctrl->obj = mlx5_rxq_obj_new
+ (dev, i, MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN);
+ if (!rxq_ctrl->obj)
+ goto error;
+ continue;
+ }
/* Pre-register Rx mempool. */
mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;