@@ -2495,6 +2495,7 @@
.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
.vlan_filter_set = mlx5_vlan_filter_set,
.rx_queue_setup = mlx5_rx_queue_setup,
+ .rxseg_queue_setup = mlx5_rxseg_queue_setup,
.rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
.tx_queue_setup = mlx5_tx_queue_setup,
.tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
@@ -2578,6 +2579,7 @@
.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
.vlan_filter_set = mlx5_vlan_filter_set,
.rx_queue_setup = mlx5_rx_queue_setup,
+ .rxseg_queue_setup = mlx5_rxseg_queue_setup,
.rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
.tx_queue_setup = mlx5_tx_queue_setup,
.tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
@@ -162,6 +162,9 @@ struct mlx5_stats_ctrl {
/* Maximal size of aggregated LRO packet. */
#define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE)
+/* Maximal number of segments to split. */
+#define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS)
+
/* LRO configurations structure. */
struct mlx5_lro_config {
uint32_t supported:1; /* Whether LRO is supported. */
@@ -390,6 +390,7 @@
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH);
@@ -715,16 +716,20 @@
* NUMA socket on which memory must be allocated.
* @param[in] conf
* Thresholds parameters.
- * @param mp
- * Memory pool for buffer allocations.
+ * @param rx_seg
+ * Pointer the array of segment descriptions, each element
+ * describes the memory pool, maximal data length, initial
+ * data offset from the beginning of data buffer in mbuf
+ * @param n_seg
+ * Number of elements in the segment descriptions array
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
- unsigned int socket, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp)
+mlx5_rxseg_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ const struct rte_eth_rxseg *rx_seg, uint16_t n_seg)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
@@ -732,10 +737,43 @@
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
int res;
+ if (!n_seg || !rx_seg) {
+ DRV_LOG(ERR, "port %u queue index %u invalid "
+ "split description",
+ dev->data->port_id, idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (n_seg > 1) {
+ uint64_t offloads = conf->offloads |
+ dev->data->dev_conf.rxmode.offloads;
+
+ if (!(offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ DRV_LOG(ERR, "port %u queue index %u split "
+ "configuration requires scattering",
+ dev->data->port_id, idx);
+ rte_errno = ENOSPC;
+ return -rte_errno;
+ }
+ if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
+ DRV_LOG(ERR, "port %u queue index %u split "
+ "offload not configured",
+ dev->data->port_id, idx);
+ rte_errno = ENOSPC;
+ return -rte_errno;
+ }
+ if (n_seg > MLX5_MAX_RXQ_NSEG) {
+ DRV_LOG(ERR, "port %u queue index %u too many "
+ "segments %u to split",
+ dev->data->port_id, idx, n_seg);
+ rte_errno = EOVERFLOW;
+ return -rte_errno;
+ }
+ }
res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
if (res)
return res;
- rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
+ rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
if (!rxq_ctrl) {
DRV_LOG(ERR, "port %u unable to allocate queue index %u",
dev->data->port_id, idx);
@@ -756,6 +794,39 @@
* RX queue index.
* @param desc
* Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
+{
+ struct rte_eth_rxseg rx_seg = {
+ .mp = mp,
+ /*
+ * All other fields are zeroed, zero segment length
+ * means the pool buffer size should be used by PMD.
+ */
+ };
+ return mlx5_rxseg_queue_setup(dev, idx, desc, socket, conf, &rx_seg, 1);
+}
+
+/**
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
* @param hairpin_conf
* Hairpin configuration parameters.
*
@@ -1328,11 +1399,11 @@
struct mlx5_rxq_ctrl *
mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp)
+ const struct rte_eth_rxseg *rx_seg, uint16_t n_seg)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
- unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+ unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
unsigned int mprq_stride_nums;
unsigned int mprq_stride_size;
unsigned int mprq_stride_cap;
@@ -1346,7 +1417,7 @@ struct mlx5_rxq_ctrl *
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
- const int mprq_en = mlx5_check_mprq_support(dev) > 0;
+ const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1;
unsigned int max_rx_pkt_len = lro_on_queue ?
dev->data->dev_conf.rxmode.max_lro_pkt_size :
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1531,7 +1602,7 @@ struct mlx5_rxq_ctrl *
(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
tmpl->rxq.port_id = dev->data->port_id;
tmpl->priv = priv;
- tmpl->rxq.mp = mp;
+ tmpl->rxq.mp = rx_seg[0].mp;
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.rq_repl_thresh =
MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
@@ -150,6 +150,9 @@ struct mlx5_rxq_data {
rte_spinlock_t *uar_lock_cq;
/* CQ (UAR) access lock required for 32bit implementations */
#endif
+ struct rte_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG];
+ /* Buffer split segment descriptions - sizes, offsets, pools. */
+ uint32_t rxseg_n; /* Number of split segment descriptions. */
uint32_t tunnel; /* Tunnel information. */
uint64_t flow_meta_mask;
int32_t flow_meta_offset;
@@ -304,6 +307,10 @@ struct mlx5_txq_ctrl {
int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp);
+int mlx5_rxseg_queue_setup
+ (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ const struct rte_eth_rxseg *rx_seg, uint16_t n_seg);
int mlx5_rx_hairpin_queue_setup
(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf);
@@ -316,7 +323,8 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp);
+ const struct rte_eth_rxseg *rx_seg,
+ uint16_t n_seg);
struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf);