@@ -1780,78 +1780,104 @@ rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc
return (-ENOTSUP);
}
-int
-rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
+static inline int
+rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
{
- struct rte_eth_dev *dev;
- uint16_t max_rxq;
- uint8_t i,j;
+ uint16_t i, num = reta_size / RTE_BIT_WIDTH_64;
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
+ if (!reta_conf)
+ return -EINVAL;
- /* Invalid mask bit(s) setting */
- if ((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
- PMD_DEBUG_TRACE("Invalid update mask bits for port=%d\n",port_id);
- return (-EINVAL);
+ for (i = 0; i < num; i++) {
+ if (reta_conf[i].mask)
+ return 0;
}
- dev = &rte_eth_devices[port_id];
- max_rxq = (dev->data->nb_rx_queues <= ETH_RSS_RETA_MAX_QUEUE) ?
- dev->data->nb_rx_queues : ETH_RSS_RETA_MAX_QUEUE;
- if (reta_conf->mask_lo != 0) {
- for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
- if ((reta_conf->mask_lo & (1ULL << i)) &&
- (reta_conf->reta[i] >= max_rxq)) {
- PMD_DEBUG_TRACE("RETA hash index output"
- "configration for port=%d,invalid"
- "queue=%d\n",port_id,reta_conf->reta[i]);
+ return -EINVAL;
+}
- return (-EINVAL);
- }
+static inline int
+rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size,
+ uint8_t max_rxq)
+{
+ uint16_t i, idx, shift;
+
+ if (!reta_conf)
+ return -EINVAL;
+
+ if (max_rxq == 0) {
+ PMD_DEBUG_TRACE("No receive queue is available\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_BIT_WIDTH_64;
+ shift = i % RTE_BIT_WIDTH_64;
+ if ((reta_conf[idx].mask & (0x1 << shift)) &&
+ (reta_conf[idx].reta[shift] >= max_rxq)) {
+ PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
+ "the maximum rxq index: %u\n", idx, shift,
+ reta_conf[idx].reta[shift], max_rxq);
+ return -EINVAL;
}
}
- if (reta_conf->mask_hi != 0) {
- for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
- j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
+ return 0;
+}
- /* Check if the max entry >= 128 */
- if ((reta_conf->mask_hi & (1ULL << i)) &&
- (reta_conf->reta[j] >= max_rxq)) {
- PMD_DEBUG_TRACE("RETA hash index output"
- "configration for port=%d,invalid"
- "queue=%d\n",port_id,reta_conf->reta[j]);
+int
+rte_eth_dev_rss_reta_update(uint8_t port_id,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct rte_eth_dev *dev;
+ int ret;
- return (-EINVAL);
- }
- }
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
}
+ /* Check mask bits */
+ ret = rte_eth_check_reta_mask(reta_conf, reta_size);
+ if (ret < 0)
+ return ret;
+
+ dev = &rte_eth_devices[port_id];
+
+ /* Check entry value */
+ ret = rte_eth_check_reta_entry(reta_conf, reta_size,
+ dev->data->nb_rx_queues);
+ if (ret < 0)
+ return ret;
+
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
- return (*dev->dev_ops->reta_update)(dev, reta_conf);
+ return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
}
int
-rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
+rte_eth_dev_rss_reta_query(uint8_t port_id,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
{
struct rte_eth_dev *dev;
+ int ret;
if (port_id >= nb_ports) {
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
+ return -ENODEV;
}
- if((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
- PMD_DEBUG_TRACE("Invalid update mask bits for the port=%d\n",port_id);
- return (-EINVAL);
- }
+ /* Check mask bits */
+ ret = rte_eth_check_reta_mask(reta_conf, reta_size);
+ if (ret < 0)
+ return ret;
dev = &rte_eth_devices[port_id];
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
- return (*dev->dev_ops->reta_query)(dev, reta_conf);
+ return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
}
int
@@ -426,8 +426,11 @@ struct rte_eth_rss_conf {
ETH_RSS_L2_PAYLOAD)
/* Definitions used for redirection table entry size */
-#define ETH_RSS_RETA_NUM_ENTRIES 128
-#define ETH_RSS_RETA_MAX_QUEUE 16
+#define ETH_RSS_RETA_SIZE_64 64
+#define ETH_RSS_RETA_SIZE_128 128
+#define ETH_RSS_RETA_SIZE_512 512
+
+#define RTE_BIT_WIDTH_64 (CHAR_BIT * sizeof(uint64_t))
/* Definitions used for VMDQ and DCB functionality */
#define ETH_VMDQ_MAX_VLAN_FILTERS 64 /**< Maximum nb. of VMDQ vlan filters. */
@@ -491,15 +494,15 @@ struct rte_eth_vmdq_mirror_conf {
};
/**
- * A structure used to configure Redirection Table of the Receive Side
- * Scaling (RSS) feature of an Ethernet port.
+ * A structure used to configure 64 entries of Redirection Table of the
+ * Receive Side Scaling (RSS) feature of an Ethernet port. To configure
+ * more than 64 entries supported by hardware, an array of this structure
+ * is needed.
*/
-struct rte_eth_rss_reta {
- /** First 64 mask bits indicate which entry(s) need to updated/queried. */
- uint64_t mask_lo;
- /** Second 64 mask bits indicate which entry(s) need to updated/queried. */
- uint64_t mask_hi;
- uint8_t reta[ETH_RSS_RETA_NUM_ENTRIES]; /**< 128 RETA entries*/
+struct rte_eth_rss_reta_entry64 {
+ uint64_t mask;
+ /**< Mask bits indicate which entries need to be updated/queried. */
+ uint8_t reta[RTE_BIT_WIDTH_64]; /**< 64 redirection table entries. */
};
/**
@@ -906,6 +909,8 @@ struct rte_eth_dev_info {
uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
uint32_t rx_offload_capa; /**< Device RX offload capabilities. */
uint32_t tx_offload_capa; /**< Device TX offload capabilities. */
+ uint16_t reta_size;
+ /**< Device redirection table size, the total number of entries. */
};
struct rte_eth_dev;
@@ -1158,11 +1163,13 @@ typedef int (*priority_flow_ctrl_set_t)(struct rte_eth_dev *dev,
/**< @internal Setup priority flow control parameter on an Ethernet device */
typedef int (*reta_update_t)(struct rte_eth_dev *dev,
- struct rte_eth_rss_reta *reta_conf);
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
/**< @internal Update RSS redirection table on an Ethernet device */
typedef int (*reta_query_t)(struct rte_eth_dev *dev,
- struct rte_eth_rss_reta *reta_conf);
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
/**< @internal Query RSS redirection table on an Ethernet device */
typedef int (*rss_hash_update_t)(struct rte_eth_dev *dev,
@@ -2839,14 +2846,17 @@ int rte_eth_dev_mac_addr_remove(uint8_t port, struct ether_addr *mac_addr);
* @param port
* The port identifier of the Ethernet device.
* @param reta_conf
- * RETA to update.
+ * RETA to update.
+ * @param reta_size
+ * Redirection table size.
* @return
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
int rte_eth_dev_rss_reta_update(uint8_t port,
- struct rte_eth_rss_reta *reta_conf);
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
/**
* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
@@ -2855,13 +2865,16 @@ int rte_eth_dev_rss_reta_update(uint8_t port,
* The port identifier of the Ethernet device.
* @param reta_conf
* RETA to query.
+ * @param reta_size
+ * Redirection table size.
* @return
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
int rte_eth_dev_rss_reta_query(uint8_t port,
- struct rte_eth_rss_reta *reta_conf);
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
/**
* Updates unicast hash table for receiving packet with the given destination