@@ -113,6 +113,10 @@ Runtime Configuration
effect only if the device also supports large LLQ headers. Otherwise, the
default value will be used.
+ * **normal_llq_hdr** (default 0)
+
+ Enforce normal LLQ policy.
+
* **miss_txc_to** (default 5)
Number of seconds after which the Tx packet will be considered missing.
@@ -108,6 +108,7 @@ New Features
* Removed the reporting of `rx_overruns` errors from xstats and instead updated `imissed` stat with its value.
* Added support for sub-optimal configuration notifications from the device.
* Restructured fast release of mbufs when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE optimization is enabled.
+ * Added `normal_llq_hdr` devarg that enforce normal llq header policy.
* **Updated Atomic Rules' Arkville driver.**
@@ -40,6 +40,8 @@
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define DECIMAL_BASE 10
+
/*
* We should try to keep ENA_CLEANUP_BUF_SIZE lower than
* RTE_MEMPOOL_CACHE_MAX_SIZE, so we can fit this in mempool local cache.
@@ -75,6 +77,7 @@ struct ena_stats {
/* Device arguments */
#define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr"
+#define ENA_DEVARG_NORMAL_LLQ_HDR "normal_llq_hdr"
/* Timeout in seconds after which a single uncompleted Tx packet should be
* considered as a missing.
*/
@@ -297,6 +300,8 @@ static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev,
static int ena_configure_aenq(struct ena_adapter *adapter);
static int ena_mp_primary_handle(const struct rte_mp_msg *mp_msg,
const void *peer);
+static ena_llq_policy ena_define_llq_hdr_policy(struct ena_adapter *adapter);
+static bool ena_use_large_llq_hdr(struct ena_adapter *adapter, uint8_t recommended_entry_size);
static const struct eth_dev_ops ena_dev_ops = {
.dev_configure = ena_dev_configure,
@@ -1135,6 +1140,7 @@ ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx,
ctx->max_tx_queue_size = max_tx_queue_size;
ctx->max_rx_queue_size = max_rx_queue_size;
+ PMD_DRV_LOG(INFO, "tx queue size %u\n", max_tx_queue_size);
return 0;
}
@@ -2034,7 +2040,7 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter,
int rc;
u32 llq_feature_mask;
- if (!adapter->enable_llq) {
+ if (adapter->llq_header_policy == ENA_LLQ_POLICY_DISABLED) {
PMD_DRV_LOG(WARNING,
"NOTE: LLQ has been disabled as per user's request. "
"This may lead to a huge performance degradation!\n");
@@ -2241,12 +2247,16 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
adapter->missing_tx_completion_to = ENA_TX_TIMEOUT;
adapter->enable_llq = true;
adapter->use_large_llq_hdr = false;
+ adapter->use_normal_llq_hdr = false;
+ /* Get user bypass */
rc = ena_parse_devargs(adapter, pci_dev->device.devargs);
if (rc != 0) {
PMD_INIT_LOG(CRIT, "Failed to parse devargs\n");
goto err;
}
+ adapter->llq_header_policy = ena_define_llq_hdr_policy(adapter);
+
rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
if (rc != 0) {
PMD_INIT_LOG(CRIT, "Failed to allocate customer metrics buffer\n");
@@ -2264,8 +2274,9 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
if (!(adapter->all_aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE)))
adapter->edev_data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
- set_default_llq_configurations(&llq_config, &get_feat_ctx.llq,
- adapter->use_large_llq_hdr);
+ bool use_large_llq_hdr = ena_use_large_llq_hdr(adapter,
+ get_feat_ctx.llq.entry_size_recommended);
+ set_default_llq_configurations(&llq_config, &get_feat_ctx.llq, use_large_llq_hdr);
rc = ena_set_queues_placement_policy(adapter, ena_dev,
&get_feat_ctx.llq, &llq_config);
if (unlikely(rc)) {
@@ -2273,18 +2284,19 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
return rc;
}
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
queue_type_str = "Regular";
- else
+ } else {
queue_type_str = "Low latency";
+ PMD_DRV_LOG(INFO, "LLQ entry size %uB\n", llq_config.llq_ring_entry_size_value);
+ }
PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str);
calc_queue_ctx.ena_dev = ena_dev;
calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx);
- rc = ena_calc_io_queue_size(&calc_queue_ctx,
- adapter->use_large_llq_hdr);
+ rc = ena_calc_io_queue_size(&calc_queue_ctx, use_large_llq_hdr);
if (unlikely((rc != 0) || (max_num_io_queues == 0))) {
rc = -EFAULT;
goto err_device_destroy;
@@ -3632,7 +3644,7 @@ static int ena_process_uint_devarg(const char *key,
char *str_end;
uint64_t uint_value;
- uint_value = strtoull(value, &str_end, 10);
+ uint_value = strtoull(value, &str_end, DECIMAL_BASE);
if (value == str_end) {
PMD_INIT_LOG(ERR,
"Invalid value for key '%s'. Only uint values are accepted.\n",
@@ -3685,6 +3697,8 @@ static int ena_process_bool_devarg(const char *key,
/* Now, assign it to the proper adapter field. */
if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR) == 0)
adapter->use_large_llq_hdr = bool_value;
+ else if (strcmp(key, ENA_DEVARG_NORMAL_LLQ_HDR) == 0)
+ adapter->use_normal_llq_hdr = bool_value;
else if (strcmp(key, ENA_DEVARG_ENABLE_LLQ) == 0)
adapter->enable_llq = bool_value;
@@ -3696,6 +3710,7 @@ static int ena_parse_devargs(struct ena_adapter *adapter,
{
static const char * const allowed_args[] = {
ENA_DEVARG_LARGE_LLQ_HDR,
+ ENA_DEVARG_NORMAL_LLQ_HDR,
ENA_DEVARG_MISS_TXC_TO,
ENA_DEVARG_ENABLE_LLQ,
NULL,
@@ -3717,6 +3732,10 @@ static int ena_parse_devargs(struct ena_adapter *adapter,
ena_process_bool_devarg, adapter);
if (rc != 0)
goto exit;
+ rc = rte_kvargs_process(kvlist, ENA_DEVARG_NORMAL_LLQ_HDR,
+ ena_process_bool_devarg, adapter);
+ if (rc != 0)
+ goto exit;
rc = rte_kvargs_process(kvlist, ENA_DEVARG_MISS_TXC_TO,
ena_process_uint_devarg, adapter);
if (rc != 0)
@@ -3943,6 +3962,7 @@ RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(net_ena,
ENA_DEVARG_LARGE_LLQ_HDR "=<0|1> "
+ ENA_DEVARG_NORMAL_LLQ_HDR "=<0|1> "
ENA_DEVARG_ENABLE_LLQ "=<0|1> "
ENA_DEVARG_MISS_TXC_TO "=<uint>");
RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE);
@@ -4129,3 +4149,27 @@ ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
/* Return just IPC processing status */
return rte_mp_reply(&mp_rsp, peer);
}
+
+static ena_llq_policy ena_define_llq_hdr_policy(struct ena_adapter *adapter)
+{
+ if (!adapter->enable_llq)
+ return ENA_LLQ_POLICY_DISABLED;
+ if (adapter->use_large_llq_hdr)
+ return ENA_LLQ_POLICY_LARGE;
+ if (adapter->use_normal_llq_hdr)
+ return ENA_LLQ_POLICY_NORMAL;
+ return ENA_LLQ_POLICY_RECOMMENDED;
+}
+
+static bool ena_use_large_llq_hdr(struct ena_adapter *adapter, uint8_t recommended_entry_size)
+{
+ if (adapter->llq_header_policy == ENA_LLQ_POLICY_LARGE) {
+ return true;
+ } else if (adapter->llq_header_policy == ENA_LLQ_POLICY_RECOMMENDED) {
+ PMD_DRV_LOG(INFO, "Recommended device entry size policy %u\n",
+ recommended_entry_size);
+ if (recommended_entry_size == ENA_ADMIN_LIST_ENTRY_SIZE_256B)
+ return true;
+ }
+ return false;
+}
@@ -85,6 +85,14 @@ enum ena_ring_type {
ENA_RING_TYPE_TX = 2,
};
+typedef enum ena_llq_policy_t {
+ ENA_LLQ_POLICY_DISABLED = 0, /* Host queues */
+ ENA_LLQ_POLICY_RECOMMENDED = 1, /* Device recommendation */
+ ENA_LLQ_POLICY_NORMAL = 2, /* 128B long LLQ entry */
+ ENA_LLQ_POLICY_LARGE = 3, /* 256B long LLQ entry */
+ ENA_LLQ_POLICY_LAST,
+} ena_llq_policy;
+
struct ena_tx_buffer {
struct rte_mbuf *mbuf;
unsigned int tx_descs;
@@ -328,9 +336,10 @@ struct ena_adapter {
uint32_t active_aenq_groups;
bool trigger_reset;
-
bool enable_llq;
bool use_large_llq_hdr;
+ bool use_normal_llq_hdr;
+ ena_llq_policy llq_header_policy;
uint32_t last_tx_comp_qid;
uint64_t missing_tx_completion_to;