[v3,32/34] net/ice: support descriptor ops
Checks
Commit Message
Add below ops,
rx_descriptor_done
rx_descriptor_status
tx_descriptor_status
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Signed-off-by: Qiming Yang <qiming.yang@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
---
drivers/net/ice/ice_ethdev.c | 3 ++
drivers/net/ice/ice_lan_rxtx.c | 84 ++++++++++++++++++++++++++++++++++++++++++
drivers/net/ice/ice_rxtx.h | 3 ++
3 files changed, 90 insertions(+)
Comments
On 12/12/2018 7:00 AM, Wenzhuo Lu wrote:
> Add below ops,
> rx_descriptor_done
> rx_descriptor_status
> tx_descriptor_status
I guess this is our mistake to not clarify this, sorry about it, but
"rx_descriptor_status" replaces "rx_descriptor_done", it is and extended
version, cc'ed Olivier to correct me in case I am wrong.
So when "rx_descriptor_status" implemented, "rx_descriptor_done" can be dropped.
Please see commit log of:
Commit b1b700ce7d6f ("ethdev: add descriptor status API")
copy-paste related part:
"
The descriptor_done() API, and probably the rx_queue_count() API could
be replaced by this new API as soon as it is implemented on all PMDs.
"
Hi Ferruh,
> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Friday, December 14, 2018 5:30 AM
> To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Li, Xiaoyun
> <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Olivier MATZ
> <olivier.matz@6wind.com>
> Subject: Re: [dpdk-dev] [PATCH v3 32/34] net/ice: support descriptor ops
>
> On 12/12/2018 7:00 AM, Wenzhuo Lu wrote:
> > Add below ops,
> > rx_descriptor_done
> > rx_descriptor_status
> > tx_descriptor_status
>
> I guess this is our mistake to not clarify this, sorry about it, but
> "rx_descriptor_status" replaces "rx_descriptor_done", it is and extended
> version, cc'ed Olivier to correct me in case I am wrong.
>
> So when "rx_descriptor_status" implemented, "rx_descriptor_done" can be
> dropped.
>
> Please see commit log of:
> Commit b1b700ce7d6f ("ethdev: add descriptor status API")
>
> copy-paste related part:
> "
> The descriptor_done() API, and probably the rx_queue_count() API could
> be replaced by this new API as soon as it is implemented on all PMDs.
> "
Thanks for the reminder. Will remove it.
@@ -112,6 +112,9 @@ static int ice_xstats_get_names(struct rte_eth_dev *dev,
.get_eeprom_length = ice_get_eeprom_length,
.get_eeprom = ice_get_eeprom,
.rx_queue_count = ice_rx_queue_count,
+ .rx_descriptor_done = ice_rx_descriptor_done,
+ .rx_descriptor_status = ice_rx_descriptor_status,
+ .tx_descriptor_status = ice_tx_descriptor_status,
.stats_get = ice_stats_get,
.stats_reset = ice_stats_reset,
.xstats_get = ice_xstats_get,
@@ -1490,6 +1490,90 @@
return desc;
}
+int
+ice_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ volatile union ice_rx_desc *rxdp;
+ struct ice_rx_queue *rxq = rx_queue;
+ uint16_t desc;
+ int ret;
+
+ if (unlikely(offset >= rxq->nb_rx_desc)) {
+ PMD_DRV_LOG(ERR, "Invalid RX descriptor id %u", offset);
+ return 0;
+ }
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ rxdp = &rxq->rx_ring[desc];
+
+ ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S) &
+ (1 << ICE_RX_DESC_STATUS_DD_S));
+
+ return ret;
+}
+
+int
+ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct ice_rx_queue *rxq = rx_queue;
+ volatile uint64_t *status;
+ uint64_t mask;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
+ mask = rte_cpu_to_le_64((1ULL << ICE_RX_DESC_STATUS_DD_S) <<
+ ICE_RXD_QW1_STATUS_S);
+ if (*status & mask)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct ice_tx_queue *txq = tx_queue;
+ volatile uint64_t *status;
+ uint64_t mask, expect;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
+ expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
+ ICE_TXD_QW1_DTYPE_S);
+ if ((*status & mask) == expect)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void
ice_clear_queues(struct rte_eth_dev *dev)
{
@@ -143,6 +143,9 @@ uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
void ice_set_tx_function(struct rte_eth_dev *dev);
uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int ice_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int ice_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int ice_tx_descriptor_status(void *tx_queue, uint16_t offset);
void ice_set_default_ptype_table(struct rte_eth_dev *dev);
const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev);
void ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,