@@ -1158,6 +1158,22 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = ENOMEM;
goto error;
}
+ /*
+ * When user configures remote PD and CTX and device creates RxQ by
+ * DevX, external RxQ is both supported and requested.
+ */
+ if (mlx5_imported_pd_and_ctx(sh->cdev) && mlx5_devx_obj_ops_en(sh)) {
+ priv->ext_rxqs = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
+ sizeof(struct mlx5_external_rxq) *
+ MLX5_MAX_EXT_RX_QUEUES, 0,
+ SOCKET_ID_ANY);
+ if (priv->ext_rxqs == NULL) {
+ DRV_LOG(ERR, "Fail to allocate external RxQ array.");
+ err = ENOMEM;
+ goto error;
+ }
+ DRV_LOG(DEBUG, "External RxQ is supported.");
+ }
priv->sh = sh;
priv->dev_port = spawn->phys_port;
priv->pci_dev = spawn->pci_dev;
@@ -1617,6 +1633,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
mlx5_list_destroy(priv->hrxqs);
if (eth_dev && priv->flex_item_map)
mlx5_flex_item_port_cleanup(eth_dev);
+ mlx5_free(priv->ext_rxqs);
mlx5_free(priv);
if (eth_dev != NULL)
eth_dev->data->dev_private = NULL;
@@ -1930,6 +1930,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
dev->data->port_id);
if (priv->hrxqs)
mlx5_list_destroy(priv->hrxqs);
+ mlx5_free(priv->ext_rxqs);
/*
* Free the shared context in last turn, because the cleanup
* routines above may use some shared fields, like
@@ -1461,6 +1461,7 @@ struct mlx5_priv {
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
+ struct mlx5_external_rxq *ext_rxqs; /* External RX queues array. */
struct mlx5_rxq_priv *(*rxq_privs)[]; /* RX queue non-shared data. */
struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
@@ -175,6 +175,9 @@
/* Maximum number of indirect actions supported by rte_flow */
#define MLX5_MAX_INDIRECT_ACTIONS 3
+/* Maximum number of external Rx queues supported by rte_flow */
+#define MLX5_MAX_EXT_RX_QUEUES (UINT16_MAX - MLX5_EXTERNAL_RX_QUEUE_ID_MIN + 1)
+
/*
* Linux definition of static_assert is found in /usr/include/assert.h.
* Windows does not require a redefinition.
@@ -27,6 +27,7 @@
#include "mlx5_tx.h"
#include "mlx5_autoconf.h"
#include "mlx5_devx.h"
+#include "rte_pmd_mlx5.h"
/**
* Get the interface index from device name.
@@ -81,9 +82,10 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
rte_errno = EINVAL;
return -rte_errno;
}
- priv->rss_conf.rss_key =
- mlx5_realloc(priv->rss_conf.rss_key, MLX5_MEM_RTE,
- MLX5_RSS_HASH_KEY_LEN, 0, SOCKET_ID_ANY);
+ priv->rss_conf.rss_key = mlx5_realloc(priv->rss_conf.rss_key,
+ MLX5_MEM_RTE,
+ MLX5_RSS_HASH_KEY_LEN, 0,
+ SOCKET_ID_ANY);
if (!priv->rss_conf.rss_key) {
DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
dev->data->port_id, rxqs_n);
@@ -127,6 +129,14 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
rte_errno = EINVAL;
return -rte_errno;
}
+ if (priv->ext_rxqs && rxqs_n >= MLX5_EXTERNAL_RX_QUEUE_ID_MIN) {
+ DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u), "
+ "the maximal number of internal Rx queues is %u",
+ dev->data->port_id, rxqs_n,
+ MLX5_EXTERNAL_RX_QUEUE_ID_MIN - 1);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
if (rxqs_n != priv->rxqs_n) {
DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
dev->data->port_id, priv->rxqs_n, rxqs_n);
@@ -175,6 +175,12 @@ struct mlx5_rxq_priv {
uint32_t hairpin_status; /* Hairpin binding status. */
};
+/* External RX queue descriptor. */
+struct mlx5_external_rxq {
+ uint32_t hw_id; /* Queue index in the Hardware. */
+ uint32_t refcnt; /* Reference counter. */
+};
+
/* mlx5_rxq.c */
extern uint8_t rss_hash_default_key[];
@@ -30,6 +30,7 @@
#include "mlx5_utils.h"
#include "mlx5_autoconf.h"
#include "mlx5_devx.h"
+#include "rte_pmd_mlx5.h"
/* Default RSS hash key also used for ConnectX-3. */
@@ -3008,3 +3009,119 @@ mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
data->rt_timestamp = sh->dev_cap.rt_timestamp;
}
}
+
+/**
+ * Validate given external RxQ rte_plow index, and get pointer to concurrent
+ * external RxQ object to map/unmap.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] dpdk_idx
+ * Queue index in rte_flow.
+ *
+ * @return
+ * Pointer to concurrent external RxQ on success,
+ * NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_external_rxq *
+mlx5_external_rx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
+{
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+
+ if (dpdk_idx < MLX5_EXTERNAL_RX_QUEUE_ID_MIN) {
+ DRV_LOG(ERR, "Queue index %u should be in range: [%u, %u].",
+ dpdk_idx, MLX5_EXTERNAL_RX_QUEUE_ID_MIN, UINT16_MAX);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ if (rte_eth_dev_is_valid_port(port_id) < 0) {
+ DRV_LOG(ERR, "There is no Ethernet device for port %u.",
+ port_id);
+ rte_errno = ENODEV;
+ return NULL;
+ }
+ dev = &rte_eth_devices[port_id];
+ priv = dev->data->dev_private;
+ if (!mlx5_imported_pd_and_ctx(priv->sh->cdev)) {
+ DRV_LOG(ERR, "Port %u "
+ "external RxQ isn't supported on local PD and CTX.",
+ port_id);
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+ if (!mlx5_devx_obj_ops_en(priv->sh)) {
+ DRV_LOG(ERR,
+ "Port %u external RxQ isn't supported by Verbs API.",
+ port_id);
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+ /*
+ * When user configures remote PD and CTX and device creates RxQ by
+ * DevX, external RxQs array is allocated.
+ */
+ MLX5_ASSERT(priv->ext_rxqs != NULL);
+ return &priv->ext_rxqs[dpdk_idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
+}
+
+int
+rte_pmd_mlx5_external_rx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
+ uint32_t hw_idx)
+{
+ struct mlx5_external_rxq *ext_rxq;
+ uint32_t unmapped = 0;
+
+ ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
+ if (ext_rxq == NULL)
+ return -rte_errno;
+ if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &unmapped, 1, false,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (ext_rxq->hw_id != hw_idx) {
+ DRV_LOG(ERR, "Port %u external RxQ index %u "
+ "is already mapped to HW index (requesting is "
+ "%u, existing is %u).",
+ port_id, dpdk_idx, hw_idx, ext_rxq->hw_id);
+ rte_errno = EEXIST;
+ return -rte_errno;
+ }
+ DRV_LOG(WARNING, "Port %u external RxQ index %u "
+ "is already mapped to the requested HW index (%u)",
+ port_id, dpdk_idx, hw_idx);
+
+ } else {
+ ext_rxq->hw_id = hw_idx;
+ DRV_LOG(DEBUG, "Port %u external RxQ index %u "
+ "is successfully mapped to the requested HW index (%u)",
+ port_id, dpdk_idx, hw_idx);
+ }
+ return 0;
+}
+
+int
+rte_pmd_mlx5_external_rx_queue_id_unmap(uint16_t port_id, uint16_t dpdk_idx)
+{
+ struct mlx5_external_rxq *ext_rxq;
+ uint32_t mapped = 1;
+
+ ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
+ if (ext_rxq == NULL)
+ return -rte_errno;
+ if (ext_rxq->refcnt > 1) {
+ DRV_LOG(ERR, "Port %u external RxQ index %u still referenced.",
+ port_id, dpdk_idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &mapped, 0, false,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ DRV_LOG(ERR, "Port %u external RxQ index %u doesn't exist.",
+ port_id, dpdk_idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ DRV_LOG(DEBUG,
+ "Port %u external RxQ index %u is successfully unmapped.",
+ port_id, dpdk_idx);
+ return 0;
+}
@@ -61,8 +61,56 @@ int rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n);
__rte_experimental
int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains);
+/**
+ * External Rx queue rte_flow index minimal value.
+ */
+#define MLX5_EXTERNAL_RX_QUEUE_ID_MIN (UINT16_MAX - 1000 + 1)
+
+/**
+ * Update mapping between rte_flow queue index (16 bits) and HW queue index (32
+ * bits) for RxQs which is created outside the PMD.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] dpdk_idx
+ * Queue index in rte_flow.
+ * @param[in] hw_idx
+ * Queue index in hardware.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * Possible values for rte_errno:
+ * - EEXIST - a mapping with the same rte_flow index already exists.
+ * - EINVAL - invalid rte_flow index, out of range.
+ * - ENODEV - there is no Ethernet device for this port id.
+ * - ENOTSUP - the port doesn't support external RxQ.
+ */
+__rte_experimental
+int rte_pmd_mlx5_external_rx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
+ uint32_t hw_idx);
+
+/**
+ * Remove mapping between rte_flow queue index (16 bits) and HW queue index (32
+ * bits) for RxQs which is created outside the PMD.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] dpdk_idx
+ * Queue index in rte_flow.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * Possible values for rte_errno:
+ * - EINVAL - invalid index, out of range, still referenced or doesn't exist.
+ * - ENODEV - there is no Ethernet device for this port id.
+ * - ENOTSUP - the port doesn't support external RxQ.
+ */
+__rte_experimental
+int rte_pmd_mlx5_external_rx_queue_id_unmap(uint16_t port_id,
+ uint16_t dpdk_idx);
+
#ifdef __cplusplus
}
#endif
-#endif
+#endif /* RTE_PMD_PRIVATE_MLX5_H_ */
@@ -9,4 +9,7 @@ EXPERIMENTAL {
rte_pmd_mlx5_get_dyn_flag_names;
# added in 20.11
rte_pmd_mlx5_sync_flow;
+ # added in 22.03
+ rte_pmd_mlx5_external_rx_queue_id_map;
+ rte_pmd_mlx5_external_rx_queue_id_unmap;
};