@@ -297,16 +297,30 @@ Per-Port and Per-Queue Offloads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In the DPDK offload API, offloads are divided into per-port and per-queue offloads.
+A per-queue offloading can be enabled on a queue and disabled on another queue at the same time.
+A pure per-port offloading can't be enabled on a queue and disabled on another queue at the same time.
+A pure per-port offloading must be enabled or disabled on all queues at the same time.
+A per-port offloading can be enabled or disabled on all queues at the same time.
+It is certain that both per-queue and pure per-port offloading are per-port type.
The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
+The dev_info->[rt]x_queue_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
+The dev_info->[rt]x_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading capabilities.
Supported offloads can be either per-port or per-queue.
Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
-Per-port offload configuration is set using ``rte_eth_dev_configure``.
-Per-queue offload configuration is set using ``rte_eth_rx_queue_setup`` and ``rte_eth_tx_queue_setup``.
-To enable per-port offload, the offload should be set on both device configuration and queue setup.
-In case of a mixed configuration the queue setup shall return with an error.
-To enable per-queue offload, the offload can be set only on the queue setup.
-Offloads which are not enabled are disabled by default.
+Any requested offloading by application must be within the device capabilities.
+Any offloading is disabled by default if it is not set in the parameter
+dev_conf->[rt]xmode.offloads to ``rte_eth_dev_configure( )`` and
+[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )``.
+If any offloading is enabled in ``rte_eth_dev_configure( )`` by application,
+it is enabled on all queues no matter whether it is per-queue or
+per-port type and no matter whether it is set or cleared in
+[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )``.
+If a per-queue offloading hasn't been enabled in ``rte_eth_dev_configure( )``,
+it can be enabled or disabled in ``rte_eth_[rt]x_queue_setup( )`` for individual queue.
+A new added offloads in [rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )`` input by application
+is the one which hasn't been enabled in ``rte_eth_dev_configure( )`` and is requested to be enabled
+in ``rte_eth_[rt]x_queue_setup( )``, it must be per-queue type, otherwise return error.
For an application to use the Tx offloads API it should set the ``ETH_TXQ_FLAGS_IGNORE`` flag in the ``txq_flags`` field located in ``rte_eth_txconf`` struct.
In such cases it is not required to set other flags in ``txq_flags``.
@@ -303,6 +303,14 @@ API Changes
* ``rte_flow_create()`` API count action now requires the ``struct rte_flow_action_count``.
* ``rte_flow_query()`` API parameter changed from action type to action structure.
+* **ethdev: changes to offload API**
+
+ A pure per-port offloading isn't requested to be repeated in [rt]x_conf->offloads to
+ ``rte_eth_[rt]x_queue_setup( )``. Now any offloading enabled in ``rte_eth_dev_configure( )``
+ can't be disabled by ``rte_eth_[rt]x_queue_setup( )``. Any new added offloading which has
+ not been enabled in ``rte_eth_dev_configure( )`` and is requested to be enabled in
+ ``rte_eth_[rt]x_queue_setup( )`` must be per-queue type, otherwise return error.
+
ABI Changes
-----------
@@ -435,9 +435,12 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint32_t ring_size;
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t i, base, bsf, tc_mapping;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
nb_desc > AVF_MAX_RING_DESC ||
nb_desc < AVF_MIN_RING_DESC) {
@@ -474,7 +477,7 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->free_thresh = tx_free_thresh;
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
- txq->offloads = tx_conf->offloads;
+ txq->offloads = offloads;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
/* Allocate software ring */
@@ -500,25 +500,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
- uint64_t tx_offloads = eth_dev->data->dev_conf.txmode.offloads;
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
- if (tx_offloads != (tx_offloads & BNXT_DEV_TX_OFFLOAD_SUPPORT)) {
- PMD_DRV_LOG
- (ERR,
- "Tx offloads requested 0x%" PRIx64 " supported 0x%x\n",
- tx_offloads, BNXT_DEV_TX_OFFLOAD_SUPPORT);
- return -ENOTSUP;
- }
-
- if (rx_offloads != (rx_offloads & BNXT_DEV_RX_OFFLOAD_SUPPORT)) {
- PMD_DRV_LOG
- (ERR,
- "Rx offloads requested 0x%" PRIx64 " supported 0x%x\n",
- rx_offloads, BNXT_DEV_RX_OFFLOAD_SUPPORT);
- return -ENOTSUP;
- }
-
bp->rx_queues = (void *)eth_dev->data->rx_queues;
bp->tx_queues = (void *)eth_dev->data->tx_queues;
@@ -366,31 +366,15 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
- uint64_t unsupported_offloads, configured_offloads;
+ uint64_t configured_offloads;
int err;
CXGBE_FUNC_TRACE();
configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;
if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
dev_info(adapter, "can't disable hw crc strip\n");
- configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- }
-
- unsupported_offloads = configured_offloads & ~CXGBE_RX_OFFLOADS;
- if (unsupported_offloads) {
- dev_err(adapter, "Rx offloads 0x%" PRIx64 " are not supported. "
- "Supported:0x%" PRIx64 "\n",
- unsupported_offloads, (uint64_t)CXGBE_RX_OFFLOADS);
- return -ENOTSUP;
- }
-
- configured_offloads = eth_dev->data->dev_conf.txmode.offloads;
- unsupported_offloads = configured_offloads & ~CXGBE_TX_OFFLOADS;
- if (unsupported_offloads) {
- dev_err(adapter, "Tx offloads 0x%" PRIx64 " are not supported. "
- "Supported:0x%" PRIx64 "\n",
- unsupported_offloads, (uint64_t)CXGBE_TX_OFFLOADS);
- return -ENOTSUP;
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_CRC_STRIP;
}
if (!(adapter->flags & FW_QUEUE_BOUND)) {
@@ -440,7 +424,7 @@ int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -448,15 +432,6 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
int err = 0;
unsigned int temp_nb_desc;
- uint64_t unsupported_offloads;
-
- unsupported_offloads = tx_conf->offloads & ~CXGBE_TX_OFFLOADS;
- if (unsupported_offloads) {
- dev_err(adapter, "Tx offloads 0x%" PRIx64 " are not supported. "
- "Supported:0x%" PRIx64 "\n",
- unsupported_offloads, (uint64_t)CXGBE_TX_OFFLOADS);
- return -ENOTSUP;
- }
dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
__func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
@@ -553,7 +528,7 @@ int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mp)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -565,21 +540,6 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
unsigned int temp_nb_desc;
struct rte_eth_dev_info dev_info;
unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
- uint64_t unsupported_offloads, configured_offloads;
-
- configured_offloads = rx_conf->offloads;
- if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
- dev_info(adapter, "can't disable hw crc strip\n");
- configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- }
-
- unsupported_offloads = configured_offloads & ~CXGBE_RX_OFFLOADS;
- if (unsupported_offloads) {
- dev_err(adapter, "Rx offloads 0x%" PRIx64 " are not supported. "
- "Supported:0x%" PRIx64 "\n",
- unsupported_offloads, (uint64_t)CXGBE_RX_OFFLOADS);
- return -ENOTSUP;
- }
dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
__func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
@@ -176,14 +176,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
/* Rx offloads validation */
- if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
- DPAA_PMD_ERR(
- "Rx offloads non supported - requested 0x%" PRIx64
- " supported 0x%" PRIx64,
- rx_offloads,
- dev_rx_offloads_sup | dev_rx_offloads_nodis);
- return -ENOTSUP;
- }
if (dev_rx_offloads_nodis & ~rx_offloads) {
DPAA_PMD_WARN(
"Rx offloads non configurable - requested 0x%" PRIx64
@@ -192,14 +184,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
}
/* Tx offloads validation */
- if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
- DPAA_PMD_ERR(
- "Tx offloads non supported - requested 0x%" PRIx64
- " supported 0x%" PRIx64,
- tx_offloads,
- dev_tx_offloads_sup | dev_tx_offloads_nodis);
- return -ENOTSUP;
- }
if (dev_tx_offloads_nodis & ~tx_offloads) {
DPAA_PMD_WARN(
"Tx offloads non configurable - requested 0x%" PRIx64
@@ -309,14 +309,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
/* Rx offloads validation */
- if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
- DPAA2_PMD_ERR(
- "Rx offloads non supported - requested 0x%" PRIx64
- " supported 0x%" PRIx64,
- rx_offloads,
- dev_rx_offloads_sup | dev_rx_offloads_nodis);
- return -ENOTSUP;
- }
if (dev_rx_offloads_nodis & ~rx_offloads) {
DPAA2_PMD_WARN(
"Rx offloads non configurable - requested 0x%" PRIx64
@@ -325,14 +317,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
}
/* Tx offloads validation */
- if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
- DPAA2_PMD_ERR(
- "Tx offloads non supported - requested 0x%" PRIx64
- " supported 0x%" PRIx64,
- tx_offloads,
- dev_tx_offloads_sup | dev_tx_offloads_nodis);
- return -ENOTSUP;
- }
if (dev_tx_offloads_nodis & ~tx_offloads) {
DPAA2_PMD_WARN(
"Tx offloads non configurable - requested 0x%" PRIx64
@@ -454,29 +454,10 @@ eth_em_configure(struct rte_eth_dev *dev)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- struct rte_eth_dev_info dev_info;
- uint64_t rx_offloads;
- uint64_t tx_offloads;
PMD_INIT_FUNC_TRACE();
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
- eth_em_infos_get(dev, &dev_info);
- rx_offloads = dev->data->dev_conf.rxmode.offloads;
- if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
- PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- rx_offloads, dev_info.rx_offload_capa);
- return -ENOTSUP;
- }
- tx_offloads = dev->data->dev_conf.txmode.offloads;
- if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
- PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- tx_offloads, dev_info.tx_offload_capa);
- return -ENOTSUP;
- }
-
PMD_INIT_FUNC_TRACE();
return 0;
@@ -1183,22 +1183,6 @@ em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
return tx_queue_offload_capa;
}
-static int
-em_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t queue_supported = em_get_tx_queue_offloads_capa(dev);
- uint64_t port_supported = em_get_tx_port_offloads_capa(dev);
-
- if ((requested & (queue_supported | port_supported)) != requested)
- return 0;
-
- if ((port_offloads ^ requested) & port_supported)
- return 0;
-
- return 1;
-}
-
int
eth_em_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1211,21 +1195,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
struct e1000_hw *hw;
uint32_t tsize;
uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!em_check_tx_queue_offloads(dev, tx_conf->offloads)) {
- PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported port offloads 0x%" PRIx64
- " or supported queue offloads 0x%" PRIx64,
- (void *)dev,
- tx_conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- em_get_tx_port_offloads_capa(dev),
- em_get_tx_queue_offloads_capa(dev));
- return -ENOTSUP;
- }
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
/*
* Validate number of transmit descriptors.
@@ -1330,7 +1304,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
em_reset_tx_queue(txq);
dev->data->tx_queues[queue_idx] = txq;
- txq->offloads = tx_conf->offloads;
+ txq->offloads = offloads;
return 0;
}
@@ -1412,22 +1386,6 @@ em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
return rx_queue_offload_capa;
}
-static int
-em_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
- uint64_t queue_supported = em_get_rx_queue_offloads_capa(dev);
- uint64_t port_supported = em_get_rx_port_offloads_capa(dev);
-
- if ((requested & (queue_supported | port_supported)) != requested)
- return 0;
-
- if ((port_offloads ^ requested) & port_supported)
- return 0;
-
- return 1;
-}
-
int
eth_em_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1440,21 +1398,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
struct em_rx_queue *rxq;
struct e1000_hw *hw;
uint32_t rsize;
+ uint64_t offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!em_check_rx_queue_offloads(dev, rx_conf->offloads)) {
- PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported port offloads 0x%" PRIx64
- " or supported queue offloads 0x%" PRIx64,
- (void *)dev,
- rx_conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- em_get_rx_port_offloads_capa(dev),
- em_get_rx_queue_offloads_capa(dev));
- return -ENOTSUP;
- }
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
/*
* Validate number of receive descriptors.
@@ -1523,7 +1471,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
dev->data->rx_queues[queue_idx] = rxq;
em_reset_rx_queue(rxq);
- rxq->offloads = rx_conf->offloads;
+ rxq->offloads = offloads;
return 0;
}
@@ -1475,22 +1475,6 @@ igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
return rx_queue_offload_capa;
}
-static int
-igb_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t queue_supported = igb_get_tx_queue_offloads_capa(dev);
- uint64_t port_supported = igb_get_tx_port_offloads_capa(dev);
-
- if ((requested & (queue_supported | port_supported)) != requested)
- return 0;
-
- if ((port_offloads ^ requested) & port_supported)
- return 0;
-
- return 1;
-}
-
int
eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1502,19 +1486,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
struct igb_tx_queue *txq;
struct e1000_hw *hw;
uint32_t size;
+ uint64_t offloads;
- if (!igb_check_tx_queue_offloads(dev, tx_conf->offloads)) {
- PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported port offloads 0x%" PRIx64
- " or supported queue offloads 0x%" PRIx64,
- (void *)dev,
- tx_conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- igb_get_tx_port_offloads_capa(dev),
- igb_get_tx_queue_offloads_capa(dev));
- return -ENOTSUP;
- }
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1599,7 +1573,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
dev->tx_pkt_burst = eth_igb_xmit_pkts;
dev->tx_pkt_prepare = ð_igb_prep_pkts;
dev->data->tx_queues[queue_idx] = txq;
- txq->offloads = tx_conf->offloads;
+ txq->offloads = offloads;
return 0;
}
@@ -1690,22 +1664,6 @@ igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
return rx_queue_offload_capa;
}
-static int
-igb_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
- uint64_t queue_supported = igb_get_rx_queue_offloads_capa(dev);
- uint64_t port_supported = igb_get_rx_port_offloads_capa(dev);
-
- if ((requested & (queue_supported | port_supported)) != requested)
- return 0;
-
- if ((port_offloads ^ requested) & port_supported)
- return 0;
-
- return 1;
-}
-
int
eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1718,19 +1676,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
struct igb_rx_queue *rxq;
struct e1000_hw *hw;
unsigned int size;
+ uint64_t offloads;
- if (!igb_check_rx_queue_offloads(dev, rx_conf->offloads)) {
- PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported port offloads 0x%" PRIx64
- " or supported queue offloads 0x%" PRIx64,
- (void *)dev,
- rx_conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- igb_get_rx_port_offloads_capa(dev),
- igb_get_rx_queue_offloads_capa(dev));
- return -ENOTSUP;
- }
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1756,7 +1704,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (rxq == NULL)
return -ENOMEM;
- rxq->offloads = rx_conf->offloads;
+ rxq->offloads = offloads;
rxq->mb_pool = mp;
rxq->nb_rx_desc = nb_desc;
rxq->pthresh = rx_conf->rx_thresh.pthresh;
@@ -238,10 +238,6 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads);
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads);
static const struct eth_dev_ops ena_dev_ops = {
.dev_configure = ena_dev_configure,
@@ -1005,12 +1001,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE &&
- !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) {
- RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
- return -EINVAL;
- }
-
ena_qid = ENA_IO_TXQ_IDX(queue_idx);
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
@@ -1065,7 +1055,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
for (i = 0; i < txq->ring_size; i++)
txq->empty_tx_reqs[i] = i;
- txq->offloads = tx_conf->offloads;
+ txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
/* Store pointer to this queue in upper layer */
txq->configured = 1;
@@ -1078,7 +1068,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
__rte_unused unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct ena_com_create_io_ctx ctx =
@@ -1114,11 +1104,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
- RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
- return -EINVAL;
- }
-
ena_qid = ENA_IO_RXQ_IDX(queue_idx);
ctx.qid = ena_qid;
@@ -1422,22 +1407,6 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
{
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
- uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-
- if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
- RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- tx_offloads, adapter->tx_supported_offloads);
- return -ENOTSUP;
- }
-
- if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- rx_offloads, adapter->rx_supported_offloads);
- return -ENOTSUP;
- }
if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
@@ -1459,8 +1428,8 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
break;
}
- adapter->tx_selected_offloads = tx_offloads;
- adapter->rx_selected_offloads = rx_offloads;
+ adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
+ adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
return 0;
}
@@ -1489,32 +1458,6 @@ static void ena_init_rings(struct ena_adapter *adapter)
}
}
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads)
-{
- uint64_t port_offloads = adapter->tx_selected_offloads;
-
- /* Check if port supports all requested offloads.
- * True if all offloads selected for queue are set for port.
- */
- if ((offloads & port_offloads) != offloads)
- return false;
- return true;
-}
-
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads)
-{
- uint64_t port_offloads = adapter->rx_selected_offloads;
-
- /* Check if port supports all requested offloads.
- * True if all offloads selected for queue are set for port.
- */
- if ((offloads & port_offloads) != offloads)
- return false;
- return true;
-}
-
static void ena_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
@@ -90,22 +90,10 @@ static int
fs_dev_configure(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
- uint64_t supp_tx_offloads;
- uint64_t tx_offloads;
uint8_t i;
int ret;
fs_lock(dev, 0);
- supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
- tx_offloads = dev->data->dev_conf.txmode.offloads;
- if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
- rte_errno = ENOTSUP;
- ERROR("Some Tx offloads are not supported, "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- tx_offloads, supp_tx_offloads);
- fs_unlock(dev, 0);
- return -rte_errno;
- }
FOREACH_SUBDEV(sdev, i, dev) {
int rmv_interrupt = 0;
int lsc_interrupt = 0;
@@ -297,25 +285,6 @@ fs_dev_close(struct rte_eth_dev *dev)
fs_unlock(dev, 0);
}
-static bool
-fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
- uint64_t port_offloads;
- uint64_t queue_supp_offloads;
- uint64_t port_supp_offloads;
-
- port_offloads = dev->data->dev_conf.rxmode.offloads;
- queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
- port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return false;
- /* Verify we have no conflict with port offloads */
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return false;
- return true;
-}
-
static void
fs_rx_queue_release(void *queue)
{
@@ -368,19 +337,6 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
fs_rx_queue_release(rxq);
dev->data->rx_queues[rx_queue_id] = NULL;
}
- /* Verify application offloads are valid for our port and queue. */
- if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
- rte_errno = ENOTSUP;
- ERROR("Rx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported offloads 0x%" PRIx64,
- rx_conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- PRIV(dev)->infos.rx_offload_capa |
- PRIV(dev)->infos.rx_queue_offload_capa);
- fs_unlock(dev, 0);
- return -rte_errno;
- }
rxq = rte_zmalloc(NULL,
sizeof(*rxq) +
sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
@@ -499,25 +455,6 @@ fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
return rc;
}
-static bool
-fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
- uint64_t port_offloads;
- uint64_t queue_supp_offloads;
- uint64_t port_supp_offloads;
-
- port_offloads = dev->data->dev_conf.txmode.offloads;
- queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
- port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return false;
- /* Verify we have no conflict with port offloads */
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return false;
- return true;
-}
-
static void
fs_tx_queue_release(void *queue)
{
@@ -557,24 +494,6 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
fs_tx_queue_release(txq);
dev->data->tx_queues[tx_queue_id] = NULL;
}
- /*
- * Don't verify queue offloads for applications which
- * use the old API.
- */
- if (tx_conf != NULL &&
- (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
- fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
- rte_errno = ENOTSUP;
- ERROR("Tx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported offloads 0x%" PRIx64,
- tx_conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- PRIV(dev)->infos.tx_offload_capa |
- PRIV(dev)->infos.tx_queue_offload_capa);
- fs_unlock(dev, 0);
- return -rte_errno;
- }
txq = rte_zmalloc("ethdev TX queue",
sizeof(*txq) +
sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
@@ -448,29 +448,13 @@ static int
fm10k_dev_configure(struct rte_eth_dev *dev)
{
int ret;
- struct rte_eth_dev_info dev_info;
- uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
- uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
PMD_INIT_FUNC_TRACE();
- if ((rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0)
+ if ((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) == 0)
PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
- fm10k_dev_infos_get(dev, &dev_info);
- if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
- PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- rx_offloads, dev_info.rx_offload_capa);
- return -ENOTSUP;
- }
- if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
- PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- tx_offloads, dev_info.tx_offload_capa);
- return -ENOTSUP;
- }
-
/* multipe queue mode checking */
ret = fm10k_check_mq_mode(dev);
if (ret != 0) {
@@ -1827,22 +1811,6 @@ static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
}
static int
-fm10k_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
- uint64_t queue_supported = fm10k_get_rx_queue_offloads_capa(dev);
- uint64_t port_supported = fm10k_get_rx_port_offloads_capa(dev);
-
- if ((requested & (queue_supported | port_supported)) != requested)
- return 0;
-
- if ((port_offloads ^ requested) & port_supported)
- return 0;
-
- return 1;
-}
-
-static int
fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
@@ -1852,20 +1820,11 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
struct fm10k_rx_queue *q;
const struct rte_memzone *mz;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
- if (!fm10k_check_rx_queue_offloads(dev, conf->offloads)) {
- PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported port offloads 0x%" PRIx64
- " or supported queue offloads 0x%" PRIx64,
- (void *)dev, conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- fm10k_get_rx_port_offloads_capa(dev),
- fm10k_get_rx_queue_offloads_capa(dev));
- return -ENOTSUP;
- }
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
/* make sure the mempool element size can account for alignment. */
if (!mempool_element_size_valid(mp)) {
@@ -1911,7 +1870,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
q->queue_id = queue_id;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
- q->offloads = conf->offloads;
+ q->offloads = offloads;
if (handle_rxconf(q, conf))
return -EINVAL;
@@ -2040,22 +1999,6 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
}
static int
-fm10k_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t queue_supported = fm10k_get_tx_queue_offloads_capa(dev);
- uint64_t port_supported = fm10k_get_tx_port_offloads_capa(dev);
-
- if ((requested & (queue_supported | port_supported)) != requested)
- return 0;
-
- if ((port_offloads ^ requested) & port_supported)
- return 0;
-
- return 1;
-}
-
-static int
fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *conf)
@@ -2063,20 +2006,11 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_tx_queue *q;
const struct rte_memzone *mz;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
- if (!fm10k_check_tx_queue_offloads(dev, conf->offloads)) {
- PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported port offloads 0x%" PRIx64
- " or supported queue offloads 0x%" PRIx64,
- (void *)dev, conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- fm10k_get_tx_port_offloads_capa(dev),
- fm10k_get_tx_queue_offloads_capa(dev));
- return -ENOTSUP;
- }
+ offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
/* make sure a valid number of descriptors have been requested */
if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
@@ -2115,7 +2049,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
q->port_id = dev->data->port_id;
q->queue_id = queue_id;
q->txq_flags = conf->txq_flags;
- q->offloads = conf->offloads;
+ q->offloads = offloads;
q->ops = &def_txq_ops;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
@@ -1690,20 +1690,6 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
}
static int
-i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
- struct rte_eth_dev_info dev_info;
- uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
- uint64_t supported; /* All per port offloads */
-
- dev->dev_ops->dev_infos_get(dev, &dev_info);
- supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
- if ((requested & dev_info.rx_offload_capa) != requested)
- return 0; /* requested range check */
- return !((mandatory ^ requested) & supported);
-}
-
-static int
i40e_dev_first_queue(uint16_t idx, void **queues, int num)
{
uint16_t i;
@@ -1792,18 +1778,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len, i;
uint16_t reg_idx, base, bsf, tc_mapping;
int q_offset, use_def_burst_func = 1;
- struct rte_eth_dev_info dev_info;
+ uint64_t offloads;
- if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
- dev->dev_ops->dev_infos_get(dev, &dev_info);
- PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported offloads 0x%" PRIx64,
- (void *)dev, rx_conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- dev_info.rx_offload_capa);
- return -ENOTSUP;
- }
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1857,7 +1834,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
- rxq->offloads = rx_conf->offloads;
+ rxq->offloads = offloads;
/* Allocate the maximun number of RX ring hardware descriptor. */
len = I40E_MAX_RING_DESC;
@@ -2075,20 +2052,6 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
}
static int
-i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
- struct rte_eth_dev_info dev_info;
- uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
- uint64_t supported; /* All per port offloads */
-
- dev->dev_ops->dev_infos_get(dev, &dev_info);
- supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
- if ((requested & dev_info.tx_offload_capa) != requested)
- return 0; /* requested range check */
- return !((mandatory ^ requested) & supported);
-}
-
-static int
i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
struct i40e_tx_queue *txq)
{
@@ -2151,18 +2114,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t reg_idx, i, base, bsf, tc_mapping;
int q_offset;
- struct rte_eth_dev_info dev_info;
+ uint64_t offloads;
- if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
- dev->dev_ops->dev_infos_get(dev, &dev_info);
- PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported offloads 0x%" PRIx64,
- (void *)dev, tx_conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- dev_info.tx_offload_capa);
- return -ENOTSUP;
- }
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -2297,7 +2251,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->queue_id = queue_idx;
txq->reg_idx = reg_idx;
txq->port_id = dev->data->port_id;
- txq->offloads = tx_conf->offloads;
+ txq->offloads = offloads;
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
@@ -2365,9 +2365,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
- struct rte_eth_dev_info dev_info;
- uint64_t rx_offloads;
- uint64_t tx_offloads;
int ret;
PMD_INIT_FUNC_TRACE();
@@ -2379,22 +2376,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
return ret;
}
- ixgbe_dev_info_get(dev, &dev_info);
- rx_offloads = dev->data->dev_conf.rxmode.offloads;
- if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
- PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- rx_offloads, dev_info.rx_offload_capa);
- return -ENOTSUP;
- }
- tx_offloads = dev->data->dev_conf.txmode.offloads;
- if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
- PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- tx_offloads, dev_info.tx_offload_capa);
- return -ENOTSUP;
- }
-
/* set flag to update link status after init */
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -4965,29 +4946,10 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
- struct rte_eth_dev_info dev_info;
- uint64_t rx_offloads;
- uint64_t tx_offloads;
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
- ixgbevf_dev_info_get(dev, &dev_info);
- rx_offloads = dev->data->dev_conf.rxmode.offloads;
- if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
- PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- rx_offloads, dev_info.rx_offload_capa);
- return -ENOTSUP;
- }
- tx_offloads = dev->data->dev_conf.txmode.offloads;
- if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
- PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- tx_offloads, dev_info.tx_offload_capa);
- return -ENOTSUP;
- }
-
/*
* VF has no ability to enable/disable HW CRC
* Keep the persistent behavior the same as Host PF
@@ -2448,22 +2448,6 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
return tx_offload_capa;
}
-static int
-ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t queue_supported = ixgbe_get_tx_queue_offloads(dev);
- uint64_t port_supported = ixgbe_get_tx_port_offloads(dev);
-
- if ((requested & (queue_supported | port_supported)) != requested)
- return 0;
-
- if ((port_offloads ^ requested) & port_supported)
- return 0;
-
- return 1;
-}
-
int __attribute__((cold))
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2475,25 +2459,12 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
struct ixgbe_tx_queue *txq;
struct ixgbe_hw *hw;
uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- /*
- * Don't verify port offloads for application which
- * use the old API.
- */
- if (!ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) {
- PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported queue offloads 0x%" PRIx64
- " or supported port offloads 0x%" PRIx64,
- (void *)dev, tx_conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- ixgbe_get_tx_queue_offloads(dev),
- ixgbe_get_tx_port_offloads(dev));
- return -ENOTSUP;
- }
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
/*
* Validate number of transmit descriptors.
@@ -2621,7 +2592,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
txq->port_id = dev->data->port_id;
txq->txq_flags = tx_conf->txq_flags;
- txq->offloads = tx_conf->offloads;
+ txq->offloads = offloads;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
#ifdef RTE_LIBRTE_SECURITY
@@ -2915,22 +2886,6 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
return offloads;
}
-static int
-ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
- uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
- uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
-
- if ((requested & (queue_supported | port_supported)) != requested)
- return 0;
-
- if ((port_offloads ^ requested) & port_supported)
- return 0;
-
- return 1;
-}
-
int __attribute__((cold))
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2945,21 +2900,12 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) {
- PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported port offloads 0x%" PRIx64
- " or supported queue offloads 0x%" PRIx64,
- (void *)dev, rx_conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- ixgbe_get_rx_port_offloads(dev),
- ixgbe_get_rx_queue_offloads(dev));
- return -ENOTSUP;
- }
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
/*
* Validate number of receive descriptors.
@@ -2994,7 +2940,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
- rxq->offloads = rx_conf->offloads;
+ rxq->offloads = offloads;
/*
* The packet type in RX descriptor is different for different NICs.
@@ -693,26 +693,6 @@ mlx4_get_rx_port_offloads(struct priv *priv)
}
/**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- * Pointer to private structure.
- * @param requested
- * Per-queue offloads configuration.
- *
- * @return
- * Nonzero when configuration is valid.
- */
-static int
-mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested)
-{
- uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads;
- uint64_t supported = mlx4_get_rx_port_offloads(priv);
-
- return !((mandatory ^ requested) & supported);
-}
-
-/**
* DPDK callback to configure a Rx queue.
*
* @param dev
@@ -754,20 +734,13 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
};
int ret;
uint32_t crc_present;
+ uint64_t offloads;
+
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
- (void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
- if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) {
- rte_errno = ENOTSUP;
- ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
- "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
- (void *)dev, conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- (mlx4_get_rx_port_offloads(priv) |
- mlx4_get_rx_queue_offloads(priv)));
- return -rte_errno;
- }
+
if (idx >= dev->data->nb_rx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -793,7 +766,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void *)dev, idx, desc);
}
/* By default, FCS (CRC) is stripped by hardware. */
- if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+ if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
crc_present = 0;
} else if (priv->hw_fcs_strip) {
crc_present = 1;
@@ -825,9 +798,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
.elts = elts,
/* Toggle Rx checksum offload if hardware supports it. */
.csum = priv->hw_csum &&
- (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ (offloads & DEV_RX_OFFLOAD_CHECKSUM),
.csum_l2tun = priv->hw_csum_l2tun &&
- (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ (offloads & DEV_RX_OFFLOAD_CHECKSUM),
.crc_present = crc_present,
.l2tun_offload = priv->hw_csum_l2tun,
.stats = {
@@ -840,7 +813,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
(mb_len - RTE_PKTMBUF_HEADROOM)) {
;
- } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+ } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
uint32_t size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -180,26 +180,6 @@ mlx4_get_tx_port_offloads(struct priv *priv)
}
/**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- * Pointer to private structure.
- * @param requested
- * Per-queue offloads configuration.
- *
- * @return
- * Nonzero when configuration is valid.
- */
-static int
-mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested)
-{
- uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads;
- uint64_t supported = mlx4_get_tx_port_offloads(priv);
-
- return !((mandatory ^ requested) & supported);
-}
-
-/**
* DPDK callback to configure a Tx queue.
*
* @param dev
@@ -246,23 +226,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
},
};
int ret;
+ uint64_t offloads;
+
+ offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
- /*
- * Don't verify port offloads for application which
- * use the old API.
- */
- if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
- !mlx4_check_tx_queue_offloads(priv, conf->offloads)) {
- rte_errno = ENOTSUP;
- ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
- "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
- (void *)dev, conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- mlx4_get_tx_port_offloads(priv));
- return -rte_errno;
- }
+
if (idx >= dev->data->nb_tx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -313,11 +283,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
.elts_comp_cd_init =
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
.csum = priv->hw_csum &&
- (conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ (offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM)),
.csum_l2tun = priv->hw_csum_l2tun &&
- (conf->offloads &
+ (offloads &
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
/* Enable Tx loopback for VF devices. */
.lb = !!priv->vf,
@@ -330,30 +330,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
unsigned int reta_idx_n;
const uint8_t use_app_rss_key =
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
- uint64_t supp_tx_offloads = mlx5_get_tx_port_offloads(dev);
- uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t supp_rx_offloads =
- (mlx5_get_rx_port_offloads() |
- mlx5_get_rx_queue_offloads(dev));
- uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
int ret = 0;
- if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
- DRV_LOG(ERR,
- "port %u some Tx offloads are not supported requested"
- " 0x%" PRIx64 " supported 0x%" PRIx64,
- dev->data->port_id, tx_offloads, supp_tx_offloads);
- rte_errno = ENOTSUP;
- return -rte_errno;
- }
- if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
- DRV_LOG(ERR,
- "port %u some Rx offloads are not supported requested"
- " 0x%" PRIx64 " supported 0x%" PRIx64,
- dev->data->port_id, rx_offloads, supp_rx_offloads);
- rte_errno = ENOTSUP;
- return -rte_errno;
- }
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
rss_hash_default_key_len)) {
@@ -237,32 +237,6 @@ mlx5_get_rx_port_offloads(void)
}
/**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param offloads
- * Per-queue offloads configuration.
- *
- * @return
- * 1 if the configuration is valid, 0 otherwise.
- */
-static int
-mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
-{
- uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
- uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev);
- uint64_t port_supp_offloads = mlx5_get_rx_port_offloads();
-
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return 0;
- if (((port_offloads ^ offloads) & port_supp_offloads))
- return 0;
- return 1;
-}
-
-/**
*
* @param dev
* Pointer to Ethernet device structure.
@@ -305,18 +279,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
rte_errno = EOVERFLOW;
return -rte_errno;
}
- if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) {
- DRV_LOG(ERR,
- "port %u Rx queue offloads 0x%" PRIx64 " don't match"
- " port offloads 0x%" PRIx64 " or supported offloads 0x%"
- PRIx64,
- dev->data->port_id, conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- (mlx5_get_rx_port_offloads() |
- mlx5_get_rx_queue_offloads(dev)));
- rte_errno = ENOTSUP;
- return -rte_errno;
- }
if (!mlx5_rxq_releasable(dev, idx)) {
DRV_LOG(ERR, "port %u unable to release queue index %u",
dev->data->port_id, idx);
@@ -980,6 +942,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
*/
const uint16_t desc_n =
desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+ uint64_t offloads = conf->offloads |
+ dev->data->dev_conf.rxmode.offloads;
tmpl = rte_calloc_socket("RXQ", 1,
sizeof(*tmpl) +
@@ -997,7 +961,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
(mb_len - RTE_PKTMBUF_HEADROOM)) {
tmpl->rxq.sges_n = 0;
- } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+ } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
unsigned int size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1044,12 +1008,12 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
goto error;
}
/* Toggle RX checksum offload if hardware supports it. */
- tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
- tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+ tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+ tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
/* Configure VLAN stripping. */
- tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
- if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+ if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
tmpl->rxq.crc_present = 0;
} else if (config->hw_fcs_strip) {
tmpl->rxq.crc_present = 1;
@@ -127,31 +127,6 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
}
/**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param offloads
- * Per-queue offloads configuration.
- *
- * @return
- * 1 if the configuration is valid, 0 otherwise.
- */
-static int
-mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
-{
- uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev);
-
- /* There are no Tx offloads which are per queue. */
- if ((offloads & port_supp_offloads) != offloads)
- return 0;
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return 0;
- return 1;
-}
-
-/**
* DPDK callback to configure a TX queue.
*
* @param dev
@@ -177,22 +152,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq, struct mlx5_txq_ctrl, txq);
- /*
- * Don't verify port offloads for application which
- * use the old API.
- */
- if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
- !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) {
- rte_errno = ENOTSUP;
- DRV_LOG(ERR,
- "port %u Tx queue offloads 0x%" PRIx64 " don't match"
- " port offloads 0x%" PRIx64 " or supported offloads 0x%"
- PRIx64,
- dev->data->port_id, conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- mlx5_get_tx_port_offloads(dev));
- return -rte_errno;
- }
if (desc <= MLX5_TX_COMP_THRESH) {
DRV_LOG(WARNING,
"port %u number of descriptors requested for Tx queue"
@@ -810,7 +769,8 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
return NULL;
}
assert(desc > MLX5_TX_COMP_THRESH);
- tmpl->txq.offloads = conf->offloads;
+ tmpl->txq.offloads = conf->offloads |
+ dev->data->dev_conf.txmode.offloads;
tmpl->priv = priv;
tmpl->socket = socket;
tmpl->txq.elts_n = log2above(desc);
@@ -318,26 +318,11 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
- RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
- return -EINVAL;
- }
-
if (dev->data->dev_conf.rxmode.split_hdr_size) {
RTE_LOG(INFO, PMD, "Split headers not supported\n");
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
- RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
- return -EINVAL;
- }
-
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
- RTE_LOG(INFO, PMD, "LRO not supported\n");
- return -EINVAL;
- }
-
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
ETHER_HDR_LEN - ETHER_CRC_LEN;
@@ -1522,42 +1507,6 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
}
/**
- * Check whether requested rx queue offloads match port offloads.
- *
- * @param
- * dev Pointer to the device.
- * @param
- * requested Bitmap of the requested offloads.
- *
- * @return
- * 1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
- uint64_t supported = MRVL_RX_OFFLOADS;
- uint64_t unsupported = requested & ~supported;
- uint64_t missing = mandatory & ~requested;
-
- if (unsupported) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
- requested, supported);
- return 0;
- }
-
- if (missing) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
- "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
- requested, missing);
- return 0;
- }
-
- return 1;
-}
-
-/**
* DPDK callback to configure the receive queue.
*
* @param dev
@@ -1587,9 +1536,9 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
uint32_t min_size,
max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
int ret, tc, inq;
+ uint64_t offloads;
- if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads))
- return -ENOTSUP;
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
/*
@@ -1622,8 +1571,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
rxq->priv = priv;
rxq->mp = mp;
- rxq->cksum_enabled =
- dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+ rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
rxq->queue_id = idx;
rxq->port_id = dev->data->port_id;
mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -1686,42 +1634,6 @@ mrvl_rx_queue_release(void *rxq)
}
/**
- * Check whether requested tx queue offloads match port offloads.
- *
- * @param
- * dev Pointer to the device.
- * @param
- * requested Bitmap of the requested offloads.
- *
- * @return
- * 1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
- uint64_t supported = MRVL_TX_OFFLOADS;
- uint64_t unsupported = requested & ~supported;
- uint64_t missing = mandatory & ~requested;
-
- if (unsupported) {
- RTE_LOG(ERR, PMD, "Some Tx offloads are not supported. "
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
- requested, supported);
- return 0;
- }
-
- if (missing) {
- RTE_LOG(ERR, PMD, "Some Tx offloads are missing. "
- "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
- requested, missing);
- return 0;
- }
-
- return 1;
-}
-
-/**
* DPDK callback to configure the transmit queue.
*
* @param dev
@@ -1746,9 +1658,6 @@ mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct mrvl_priv *priv = dev->data->dev_private;
struct mrvl_txq *txq;
- if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads))
- return -ENOTSUP;
-
if (dev->data->tx_queues[idx]) {
rte_free(dev->data->tx_queues[idx]);
dev->data->tx_queues[idx] = NULL;
@@ -412,148 +412,9 @@ nfp_net_configure(struct rte_eth_dev *dev)
}
/* Checking RX offloads */
- if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
- PMD_INIT_LOG(INFO, "rxmode does not support split header");
- return -EINVAL;
- }
-
- if ((rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
- !(hw->cap & NFP_NET_CFG_CTRL_RXCSUM))
- PMD_INIT_LOG(INFO, "RXCSUM not supported");
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
- PMD_INIT_LOG(INFO, "VLAN filter not supported");
- return -EINVAL;
- }
-
- if ((rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) &&
- !(hw->cap & NFP_NET_CFG_CTRL_RXVLAN)) {
- PMD_INIT_LOG(INFO, "hw vlan strip not supported");
- return -EINVAL;
- }
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
- PMD_INIT_LOG(INFO, "VLAN extended not supported");
- return -EINVAL;
- }
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
- PMD_INIT_LOG(INFO, "LRO not supported");
- return -EINVAL;
- }
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
- PMD_INIT_LOG(INFO, "QINQ STRIP not supported");
- return -EINVAL;
- }
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
- PMD_INIT_LOG(INFO, "Outer IP checksum not supported");
- return -EINVAL;
- }
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
- PMD_INIT_LOG(INFO, "MACSEC strip not supported");
- return -EINVAL;
- }
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
- PMD_INIT_LOG(INFO, "MACSEC strip not supported");
- return -EINVAL;
- }
-
if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP))
PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
- if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) &&
- !(hw->cap & NFP_NET_CFG_CTRL_SCATTER)) {
- PMD_INIT_LOG(INFO, "Scatter not supported");
- return -EINVAL;
- }
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
- PMD_INIT_LOG(INFO, "timestamp offfload not supported");
- return -EINVAL;
- }
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_SECURITY) {
- PMD_INIT_LOG(INFO, "security offload not supported");
- return -EINVAL;
- }
-
- /* checking TX offloads */
- if ((txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) &&
- !(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
- PMD_INIT_LOG(INFO, "vlan insert offload not supported");
- return -EINVAL;
- }
-
- if ((txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
- !(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) {
- PMD_INIT_LOG(INFO, "TX checksum offload not supported");
- return -EINVAL;
- }
-
- if (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) {
- PMD_INIT_LOG(INFO, "TX SCTP checksum offload not supported");
- return -EINVAL;
- }
-
- if ((txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) &&
- !(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)) {
- PMD_INIT_LOG(INFO, "TSO TCP offload not supported");
- return -EINVAL;
- }
-
- if (txmode->offloads & DEV_TX_OFFLOAD_UDP_TSO) {
- PMD_INIT_LOG(INFO, "TSO UDP offload not supported");
- return -EINVAL;
- }
-
- if (txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
- PMD_INIT_LOG(INFO, "TX outer checksum offload not supported");
- return -EINVAL;
- }
-
- if (txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) {
- PMD_INIT_LOG(INFO, "QINQ insert offload not supported");
- return -EINVAL;
- }
-
- if (txmode->offloads & DEV_TX_OFFLOAD_VXLAN_TNL_TSO ||
- txmode->offloads & DEV_TX_OFFLOAD_GRE_TNL_TSO ||
- txmode->offloads & DEV_TX_OFFLOAD_IPIP_TNL_TSO ||
- txmode->offloads & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
- PMD_INIT_LOG(INFO, "tunneling offload not supported");
- return -EINVAL;
- }
-
- if (txmode->offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) {
- PMD_INIT_LOG(INFO, "TX MACSEC offload not supported");
- return -EINVAL;
- }
-
- if (txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE) {
- PMD_INIT_LOG(INFO, "multiqueue lockfree not supported");
- return -EINVAL;
- }
-
- if ((txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
- !(hw->cap & NFP_NET_CFG_CTRL_GATHER)) {
- PMD_INIT_LOG(INFO, "TX multisegs not supported");
- return -EINVAL;
- }
-
- if (txmode->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
- PMD_INIT_LOG(INFO, "mbuf fast-free not supported");
- return -EINVAL;
- }
-
- if (txmode->offloads & DEV_TX_OFFLOAD_SECURITY) {
- PMD_INIT_LOG(INFO, "TX security offload not supported");
- return -EINVAL;
- }
-
return 0;
}
@@ -1600,8 +1461,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
const struct rte_memzone *tz;
struct nfp_net_rxq *rxq;
struct nfp_net_hw *hw;
- struct rte_eth_conf *dev_conf;
- struct rte_eth_rxmode *rxmode;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1615,17 +1474,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- dev_conf = &dev->data->dev_conf;
- rxmode = &dev_conf->rxmode;
-
- if (rx_conf->offloads != rxmode->offloads) {
- PMD_DRV_LOG(ERR, "queue %u rx offloads not as port offloads",
- queue_idx);
- PMD_DRV_LOG(ERR, "\tport: %" PRIx64 "", rxmode->offloads);
- PMD_DRV_LOG(ERR, "\tqueue: %" PRIx64 "", rx_conf->offloads);
- return -EINVAL;
- }
-
/*
* Free memory prior to re-allocation if needed. This is the case after
* calling nfp_net_stop
@@ -1762,8 +1610,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
struct nfp_net_txq *txq;
uint16_t tx_free_thresh;
struct nfp_net_hw *hw;
- struct rte_eth_conf *dev_conf;
- struct rte_eth_txmode *txmode;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1777,15 +1623,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return -EINVAL;
}
- dev_conf = &dev->data->dev_conf;
- txmode = &dev_conf->txmode;
-
- if (tx_conf->offloads != txmode->offloads) {
- PMD_DRV_LOG(ERR, "queue %u tx offloads not as port offloads",
- queue_idx);
- return -EINVAL;
- }
-
tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
tx_conf->tx_free_thresh :
DEFAULT_TX_FREE_THRESH);
@@ -262,8 +262,6 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
struct rte_eth_rxmode *rxmode = &conf->rxmode;
struct rte_eth_txmode *txmode = &conf->txmode;
struct octeontx_nic *nic = octeontx_pmd_priv(dev);
- uint64_t configured_offloads;
- uint64_t unsupported_offloads;
int ret;
PMD_INIT_FUNC_TRACE();
@@ -285,38 +283,14 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- configured_offloads = rxmode->offloads;
-
- if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+ if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
- configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- }
-
- unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
-
- if (unsupported_offloads) {
- PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- unsupported_offloads, configured_offloads,
- (uint64_t)OCTEONTX_RX_OFFLOADS);
- return -ENOTSUP;
+ rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
- configured_offloads = txmode->offloads;
-
- if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+ if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
- configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
- }
-
- unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
-
- if (unsupported_offloads) {
- PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
- unsupported_offloads, configured_offloads,
- (uint64_t)OCTEONTX_TX_OFFLOADS);
- return -ENOTSUP;
+ txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
}
if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
@@ -738,14 +712,12 @@ octeontx_dev_tx_queue_release(void *tx_queue)
static int
octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
uint16_t nb_desc, unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct octeontx_nic *nic = octeontx_pmd_priv(dev);
struct octeontx_txq *txq = NULL;
uint16_t dq_num;
int res = 0;
- uint64_t configured_offloads;
- uint64_t unsupported_offloads;
RTE_SET_USED(nb_desc);
RTE_SET_USED(socket_id);
@@ -766,22 +738,6 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
dev->data->tx_queues[qidx] = NULL;
}
- configured_offloads = tx_conf->offloads;
-
- if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
- PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
- configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
- }
-
- unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
- if (unsupported_offloads) {
- PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
- unsupported_offloads, configured_offloads,
- (uint64_t)OCTEONTX_TX_OFFLOADS);
- return -ENOTSUP;
- }
-
/* Allocating tx queue data structure */
txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq),
RTE_CACHE_LINE_SIZE, nic->node);
@@ -837,8 +793,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
uint8_t gaura;
unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
- uint64_t configured_offloads;
- uint64_t unsupported_offloads;
RTE_SET_USED(nb_desc);
@@ -861,22 +815,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
port = nic->port_id;
- configured_offloads = rx_conf->offloads;
-
- if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
- PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
- configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- }
-
- unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
-
- if (unsupported_offloads) {
- PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- unsupported_offloads, configured_offloads,
- (uint64_t)OCTEONTX_RX_OFFLOADS);
- return -ENOTSUP;
- }
/* Rx deferred start is not supported */
if (rx_conf->rx_deferred_start) {
octeontx_log_err("rx deferred start not supported");
@@ -413,14 +413,16 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
{
struct sfc_adapter *sa = dev->data->dev_private;
int rc;
+ uint64_t offloads;
sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
rx_queue_id, nb_rx_desc, socket_id);
sfc_adapter_lock(sa);
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
- rx_conf, mb_pool);
+ rx_conf, mb_pool, offloads);
if (rc != 0)
goto fail_rx_qinit;
@@ -469,13 +471,16 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
{
struct sfc_adapter *sa = dev->data->dev_private;
int rc;
+ uint64_t offloads;
sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
tx_queue_id, nb_tx_desc, socket_id);
sfc_adapter_lock(sa);
- rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id,
+ tx_conf, offloads);
if (rc != 0)
goto fail_tx_qinit;
@@ -830,32 +830,10 @@ sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
}
}
-static boolean_t
-sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
- uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
- uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
- sfc_rx_get_queue_offload_caps(sa);
- uint64_t rejected = requested & ~supported;
- uint64_t missing = (requested & mandatory) ^ mandatory;
- boolean_t mismatch = B_FALSE;
-
- if (rejected) {
- sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
- mismatch = B_TRUE;
- }
-
- if (missing) {
- sfc_rx_log_offloads(sa, "queue", "must be set", missing);
- mismatch = B_TRUE;
- }
-
- return mismatch;
-}
-
static int
sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
- const struct rte_eth_rxconf *rx_conf)
+ const struct rte_eth_rxconf *rx_conf,
+ uint64_t offloads)
{
uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
sfc_rx_get_queue_offload_caps(sa);
@@ -880,17 +858,14 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
rc = EINVAL;
}
- if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+ if ((offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
DEV_RX_OFFLOAD_CHECKSUM)
sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
- (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ (~offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
- if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
- rc = EINVAL;
-
return rc;
}
@@ -998,7 +973,8 @@ int
sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mb_pool)
+ struct rte_mempool *mb_pool,
+ uint64_t offloads)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
struct sfc_rss *rss = &sa->rss;
@@ -1020,7 +996,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
- rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
+ rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
if (rc != 0)
goto fail_bad_conf;
@@ -1033,7 +1009,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
}
if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
- (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
"object size is too small", sw_index);
sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
@@ -1056,7 +1032,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
rxq_info->type_flags =
- (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
+ (offloads & DEV_RX_OFFLOAD_SCATTER) ?
EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
if ((encp->enc_tunnel_encapsulations_supported != 0) &&
@@ -138,7 +138,8 @@ void sfc_rx_stop(struct sfc_adapter *sa);
int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mb_pool);
+ struct rte_mempool *mb_pool,
+ uint64_t offloads);
void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
@@ -90,31 +90,9 @@ sfc_tx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
}
static int
-sfc_tx_queue_offload_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
- uint64_t mandatory = sa->eth_dev->data->dev_conf.txmode.offloads;
- uint64_t supported = sfc_tx_get_dev_offload_caps(sa) |
- sfc_tx_get_queue_offload_caps(sa);
- uint64_t rejected = requested & ~supported;
- uint64_t missing = (requested & mandatory) ^ mandatory;
- boolean_t mismatch = B_FALSE;
-
- if (rejected) {
- sfc_tx_log_offloads(sa, "queue", "is unsupported", rejected);
- mismatch = B_TRUE;
- }
-
- if (missing) {
- sfc_tx_log_offloads(sa, "queue", "must be set", missing);
- mismatch = B_TRUE;
- }
-
- return mismatch;
-}
-
-static int
sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
- const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf,
+ uint64_t offloads)
{
int rc = 0;
@@ -138,15 +116,12 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
}
/* We either perform both TCP and UDP offload, or no offload at all */
- if (((tx_conf->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
- ((tx_conf->offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+ if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+ ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
sfc_err(sa, "TCP and UDP offloads can't be set independently");
rc = EINVAL;
}
- if (sfc_tx_queue_offload_mismatch(sa, tx_conf->offloads))
- rc = EINVAL;
-
return rc;
}
@@ -160,7 +135,8 @@ sfc_tx_qflush_done(struct sfc_txq *txq)
int
sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
uint16_t nb_tx_desc, unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf,
+ uint64_t offloads)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
unsigned int txq_entries;
@@ -183,7 +159,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
SFC_ASSERT(txq_entries >= nb_tx_desc);
SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
- rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf);
+ rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
if (rc != 0)
goto fail_bad_conf;
@@ -210,7 +186,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
SFC_TX_DEFAULT_FREE_THRESH;
txq->flags = tx_conf->txq_flags;
- txq->offloads = tx_conf->offloads;
+ txq->offloads = offloads;
rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
socket_id, &txq->mem);
@@ -221,7 +197,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.max_fill_level = txq_max_fill_level;
info.free_thresh = txq->free_thresh;
info.flags = tx_conf->txq_flags;
- info.offloads = tx_conf->offloads;
+ info.offloads = offloads;
info.txq_entries = txq_info->entries;
info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
info.txq_hw_ring = txq->mem.esm_base;
@@ -121,7 +121,8 @@ void sfc_tx_close(struct sfc_adapter *sa);
int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
uint16_t nb_tx_desc, unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf);
+ const struct rte_eth_txconf *tx_conf,
+ uint64_t offloads);
void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
void sfc_tx_qflush_done(struct sfc_txq *txq);
@@ -280,21 +280,6 @@ tap_rx_offload_get_queue_capa(void)
DEV_RX_OFFLOAD_CRC_STRIP;
}
-static bool
-tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
- uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
- uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa();
- uint64_t port_supp_offloads = tap_rx_offload_get_port_capa();
-
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return false;
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return false;
- return true;
-}
-
/* Callback to handle the rx burst of packets to the correct interface and
* file descriptor(s) in a multi-queue setup.
*/
@@ -408,22 +393,6 @@ tap_tx_offload_get_queue_capa(void)
DEV_TX_OFFLOAD_TCP_CKSUM;
}
-static bool
-tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
- uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa();
- uint64_t port_supp_offloads = tap_tx_offload_get_port_capa();
-
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return false;
- /* Verify we have no conflict with port offloads */
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return false;
- return true;
-}
-
static void
tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
unsigned int l3_len)
@@ -668,18 +637,6 @@ tap_dev_stop(struct rte_eth_dev *dev)
static int
tap_dev_configure(struct rte_eth_dev *dev)
{
- uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa() |
- tap_tx_offload_get_queue_capa();
- uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-
- if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
- rte_errno = ENOTSUP;
- TAP_LOG(ERR,
- "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- tx_offloads, supp_tx_offloads);
- return -rte_errno;
- }
if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
TAP_LOG(ERR,
"%s: number of rx queues %d exceeds max num of queues %d",
@@ -1081,19 +1038,6 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
return -1;
}
- /* Verify application offloads are valid for our port and queue. */
- if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) {
- rte_errno = ENOTSUP;
- TAP_LOG(ERR,
- "%p: Rx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported offloads 0x%" PRIx64,
- (void *)dev, rx_conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- (tap_rx_offload_get_port_capa() |
- tap_rx_offload_get_queue_capa()));
- return -rte_errno;
- }
rxq->mp = mp;
rxq->trigger_seen = 1; /* force initial burst */
rxq->in_port = dev->data->port_id;
@@ -1157,35 +1101,19 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
struct pmd_internals *internals = dev->data->dev_private;
struct tx_queue *txq;
int ret;
+ uint64_t offloads;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -1;
dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
txq = dev->data->tx_queues[tx_queue_id];
- /*
- * Don't verify port offloads for application which
- * use the old API.
- */
- if (tx_conf != NULL &&
- !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
- if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) {
- txq->csum = !!(tx_conf->offloads &
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM));
- } else {
- rte_errno = ENOTSUP;
- TAP_LOG(ERR,
- "%p: Tx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported offloads 0x%" PRIx64,
- (void *)dev, tx_conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- (tap_tx_offload_get_port_capa() |
- tap_tx_offload_get_queue_capa()));
- return -rte_errno;
- }
- }
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ txq->csum = !!(offloads &
+ (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM));
+
ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
if (ret == -1)
return -1;
@@ -931,7 +931,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
bool is_single_pool;
struct nicvf_txq *txq;
struct nicvf *nic = nicvf_pmd_priv(dev);
- uint64_t conf_offloads, offload_capa, unsupported_offloads;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
@@ -945,17 +945,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
socket_id, nic->node);
- conf_offloads = tx_conf->offloads;
- offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
- unsupported_offloads = conf_offloads & ~offload_capa;
- if (unsupported_offloads) {
- PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
- unsupported_offloads, conf_offloads, offload_capa);
- return -ENOTSUP;
- }
-
/* Tx deferred start is not supported */
if (tx_conf->tx_deferred_start) {
PMD_INIT_LOG(ERR, "Tx deferred start not supported");
@@ -1007,9 +996,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
txq->tx_free_thresh = tx_free_thresh;
txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
- txq->offloads = conf_offloads;
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ txq->offloads = offloads;
- is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+ is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
/* Choose optimum free threshold value for multipool case */
if (!is_single_pool) {
@@ -1269,7 +1259,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
uint16_t rx_free_thresh;
struct nicvf_rxq *rxq;
struct nicvf *nic = nicvf_pmd_priv(dev);
- uint64_t conf_offloads, offload_capa, unsupported_offloads;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
@@ -1283,24 +1273,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
socket_id, nic->node);
-
- conf_offloads = rx_conf->offloads;
-
- if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
- PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
- conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
- }
-
- offload_capa = NICVF_RX_OFFLOAD_CAPA;
- unsupported_offloads = conf_offloads & ~offload_capa;
-
- if (unsupported_offloads) {
- PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- unsupported_offloads, conf_offloads, offload_capa);
- return -ENOTSUP;
- }
-
/* Mempool memory must be contiguous, so must be one memory segment*/
if (mp->nb_mem_chunks != 1) {
PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
@@ -1381,10 +1353,11 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
nicvf_rx_queue_reset(rxq);
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
- rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
+ rte_mempool_avail_count(mp), rxq->phys, offloads);
dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1912,8 +1885,6 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
struct rte_eth_txmode *txmode = &conf->txmode;
struct nicvf *nic = nicvf_pmd_priv(dev);
uint8_t cqcount;
- uint64_t conf_rx_offloads, rx_offload_capa;
- uint64_t conf_tx_offloads, tx_offload_capa;
PMD_INIT_FUNC_TRACE();
@@ -1922,32 +1893,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
- tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
- if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
- PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- conf_tx_offloads, tx_offload_capa);
- return -ENOTSUP;
- }
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
- PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
- rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
- }
-
- conf_rx_offloads = rxmode->offloads;
- rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
-
- if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
- PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- conf_rx_offloads, rx_offload_capa);
- return -ENOTSUP;
- }
-
- if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
@@ -385,10 +385,9 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mp)
{
- const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
struct virtio_hw *hw = dev->data->dev_private;
struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
@@ -408,10 +407,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
"Cannot allocate mbufs for rx virtqueue");
}
- if ((rx_conf->offloads ^ rxmode->offloads) &
- VIRTIO_PMD_PER_DEVICE_RX_OFFLOADS)
- return -EINVAL;
-
dev->data->rx_queues[queue_idx] = rxvq;
return 0;
@@ -504,7 +499,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
/* cannot use simple rxtx funcs with multisegs or offloads */
- if (tx_conf->offloads)
+ if (dev->data->dev_conf.txmode.offloads)
hw->use_simple_tx = 0;
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
@@ -393,25 +393,9 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
const struct rte_memzone *mz;
struct vmxnet3_hw *hw = dev->data->dev_private;
size_t size;
- uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
- uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
PMD_INIT_FUNC_TRACE();
- if ((rx_offloads & VMXNET3_RX_OFFLOAD_CAP) != rx_offloads) {
- RTE_LOG(ERR, PMD, "Requested RX offloads 0x%" PRIx64
- " do not match supported 0x%" PRIx64,
- rx_offloads, (uint64_t)VMXNET3_RX_OFFLOAD_CAP);
- return -ENOTSUP;
- }
-
- if ((tx_offloads & VMXNET3_TX_OFFLOAD_CAP) != tx_offloads) {
- RTE_LOG(ERR, PMD, "Requested TX offloads 0x%" PRIx64
- " do not match supported 0x%" PRIx64,
- tx_offloads, (uint64_t)VMXNET3_TX_OFFLOAD_CAP);
- return -ENOTSUP;
- }
-
if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
@@ -1013,7 +1013,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
const struct rte_memzone *mz;
@@ -1025,12 +1025,6 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
- ETH_TXQ_FLAGS_NOXSUMSCTP) {
- PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
- return -EINVAL;
- }
-
txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
RTE_CACHE_LINE_SIZE);
if (txq == NULL) {
@@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
ETHER_MAX_LEN;
}
+ /* Any requested offloading must be within its device capabilities */
+ if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+ local_conf.rxmode.offloads) {
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
+ "0x%" PRIx64 " doesn't match Rx offloads "
+ "capabilities 0x%" PRIx64 "\n",
+ port_id,
+ local_conf.rxmode.offloads,
+ dev_info.rx_offload_capa);
+ return -EINVAL;
+ }
+ if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+ local_conf.txmode.offloads) {
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
+ "0x%" PRIx64 " doesn't match Tx offloads "
+ "capabilities 0x%" PRIx64 "\n",
+ port_id,
+ local_conf.txmode.offloads,
+ dev_info.tx_offload_capa);
+ return -EINVAL;
+ }
+
/* Check that device supports requested rss hash functions. */
if ((dev_info.flow_type_rss_offloads |
dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
@@ -1504,6 +1526,39 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
&local_conf.offloads);
}
+ /*
+ * If an offloading has already been enabled in
+ * rte_eth_dev_configure(), it has been enabled on all queues,
+ * so there is no need to enable it in this queue again.
+ * The local_conf.offloads input to underlying PMD only carries
+ * those offloadings which are only enabled on this queue and
+ * not enabled on all queues.
+ * The underlying PMD must be aware of this point.
+ */
+ local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
+
+ /*
+ * New added offloadings for this queue are those not enabled in
+ * rte_eth_dev_configure( ) and they must be per-queue type.
+ * A pure per-port offloading can't be enabled on a queue while
+ * disabled on another queue. A pure per-port offloading can't
+ * be enabled for any queue as new added one if it hasn't been
+ * enabled in rte_eth_dev_configure( ).
+ */
+ if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+ local_conf.offloads) {
+ RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d, new "
+ "added offloads 0x" PRIx64 " must be "
+ "within pre-queue offload capabilities 0x"
+ PRIx64 " in %s\n",
+ port_id,
+ rx_queue_id,
+ local_conf.offloads,
+ dev_info.rx_queue_offload_capa,
+ __func__);
+ return -EINVAL;
+ }
+
ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
socket_id, &local_conf, mp);
if (!ret) {
@@ -1612,6 +1667,39 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
&local_conf.offloads);
}
+ /*
+ * If an offloading has already been enabled in
+ * rte_eth_dev_configure(), it has been enabled on all queues,
+ * so there is no need to enable it in this queue again.
+ * The local_conf.offloads input to underlying PMD only carries
+ * those offloadings which are only enabled on this queue and
+ * not enabled on all queues.
+ * The underlying PMD must be aware of this point.
+ */
+ local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
+
+ /*
+ * New added offloadings for this queue are those not enabled in
+ * rte_eth_dev_configure( ) and they must be per-queue type.
+ * A pure per-port offloading can't be enabled on a queue while
+ * disabled on another queue. A pure per-port offloading can't
+ * be enabled for any queue as new added one if it hasn't been
+ * enabled in rte_eth_dev_configure( ).
+ */
+ if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+ local_conf.offloads) {
+ RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d tx_queue_id=%d, new "
+ "added offloads 0x" PRIx64 " must be "
+ "within pre-queue offload capabilities 0x"
+ PRIx64 " in %s\n",
+ port_id,
+ tx_queue_id,
+ local_conf.offloads,
+ dev_info.tx_queue_offload_capa,
+ __func__);
+ return -EINVAL;
+ }
+
return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
tx_queue_id, nb_tx_desc, socket_id, &local_conf));
}