[2/2] net/cnxk: ethdev Rx/Tx queue status callbacks

Message ID 20211203163627.3254236-2-rbhansali@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers
Series [1/2] common/cnxk: get head-tail of Rx and Tx queues |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/github-robot: build success github build: passed
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS

Commit Message

Rahul Bhansali Dec. 3, 2021, 4:36 p.m. UTC
  Provides ethdev callback support of rx_queue_count,
rx_descriptor_status and tx_descriptor_status.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c     |  3 ++
 drivers/net/cnxk/cnxk_ethdev.h     |  5 +++
 drivers/net/cnxk/cnxk_ethdev_ops.c | 60 ++++++++++++++++++++++++++++++
 3 files changed, 68 insertions(+)
  

Comments

Jerin Jacob Jan. 19, 2022, 9:01 a.m. UTC | #1
On Fri, Dec 3, 2021 at 10:06 PM Rahul Bhansali <rbhansali@marvell.com> wrote:
>
> Provides ethdev callback support of rx_queue_count,
> rx_descriptor_status and tx_descriptor_status.
>
> Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>

Missed to update doc/guides/nics/features/cnxk* for "Rx descriptor
status" and "Tx descriptor status".
Rest looks good to me. Please send the next version.


> ---
>  drivers/net/cnxk/cnxk_ethdev.c     |  3 ++
>  drivers/net/cnxk/cnxk_ethdev.h     |  5 +++
>  drivers/net/cnxk/cnxk_ethdev_ops.c | 60 ++++++++++++++++++++++++++++++
>  3 files changed, 68 insertions(+)
>
> diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
> index 74f625553d..183fd241d8 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.c
> +++ b/drivers/net/cnxk/cnxk_ethdev.c
> @@ -1595,6 +1595,9 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
>         int rc, max_entries;
>
>         eth_dev->dev_ops = &cnxk_eth_dev_ops;
> +       eth_dev->rx_queue_count = cnxk_nix_rx_queue_count;
> +       eth_dev->rx_descriptor_status = cnxk_nix_rx_descriptor_status;
> +       eth_dev->tx_descriptor_status = cnxk_nix_tx_descriptor_status;
>
>         /* Alloc security context */
>         sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
> diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
> index 5bfda3d815..43814a81fc 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.h
> +++ b/drivers/net/cnxk/cnxk_ethdev.h
> @@ -559,6 +559,11 @@ void cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
>  void cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
>                            struct rte_eth_txq_info *qinfo);
>
> +/* Queue status */
> +int cnxk_nix_rx_descriptor_status(void *rxq, uint16_t offset);
> +int cnxk_nix_tx_descriptor_status(void *txq, uint16_t offset);
> +uint32_t cnxk_nix_rx_queue_count(void *rxq);
> +
>  /* Lookup configuration */
>  const uint32_t *cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev);
>  void *cnxk_nix_fastpath_lookup_mem_get(void);
> diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
> index ce5f1f7240..1255d6b40f 100644
> --- a/drivers/net/cnxk/cnxk_ethdev_ops.c
> +++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
> @@ -694,6 +694,66 @@ cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
>         memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf));
>  }
>
> +uint32_t
> +cnxk_nix_rx_queue_count(void *rxq)
> +{
> +       struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
> +       struct roc_nix *nix = &rxq_sp->dev->nix;
> +       uint32_t head, tail;
> +
> +       roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
> +       return (tail - head) % (rxq_sp->qconf.nb_desc);
> +}
> +
> +static inline int
> +nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset, bool is_rx)
> +{
> +       /* Check given offset(queue index) has packet filled/xmit by HW
> +        * in case of Rx or Tx.
> +        * Also, checks for wrap around case.
> +        */
> +       return ((tail > head && offset <= tail && offset >= head) ||
> +               (head > tail && (offset >= head || offset <= tail))) ?
> +                      is_rx :
> +                      !is_rx;
> +}
> +
> +int
> +cnxk_nix_rx_descriptor_status(void *rxq, uint16_t offset)
> +{
> +       struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
> +       struct roc_nix *nix = &rxq_sp->dev->nix;
> +       uint32_t head, tail;
> +
> +       if (rxq_sp->qconf.nb_desc <= offset)
> +               return -EINVAL;
> +
> +       roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
> +
> +       if (nix_offset_has_packet(head, tail, offset, 1))
> +               return RTE_ETH_RX_DESC_DONE;
> +       else
> +               return RTE_ETH_RX_DESC_AVAIL;
> +}
> +
> +int
> +cnxk_nix_tx_descriptor_status(void *txq, uint16_t offset)
> +{
> +       struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
> +       struct roc_nix *nix = &txq_sp->dev->nix;
> +       uint32_t head = 0, tail = 0;
> +
> +       if (txq_sp->qconf.nb_desc <= offset)
> +               return -EINVAL;
> +
> +       roc_nix_sq_head_tail_get(nix, txq_sp->qid, &head, &tail);
> +
> +       if (nix_offset_has_packet(head, tail, offset, 0))
> +               return RTE_ETH_TX_DESC_DONE;
> +       else
> +               return RTE_ETH_TX_DESC_FULL;
> +}
> +
>  /* It is a NOP for cnxk as HW frees the buffer on xmit */
>  int
>  cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
> --
> 2.25.1
>
  

Patch

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 74f625553d..183fd241d8 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1595,6 +1595,9 @@  cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
 	int rc, max_entries;
 
 	eth_dev->dev_ops = &cnxk_eth_dev_ops;
+	eth_dev->rx_queue_count = cnxk_nix_rx_queue_count;
+	eth_dev->rx_descriptor_status = cnxk_nix_rx_descriptor_status;
+	eth_dev->tx_descriptor_status = cnxk_nix_tx_descriptor_status;
 
 	/* Alloc security context */
 	sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 5bfda3d815..43814a81fc 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -559,6 +559,11 @@  void cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
 void cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
 			   struct rte_eth_txq_info *qinfo);
 
+/* Queue status */
+int cnxk_nix_rx_descriptor_status(void *rxq, uint16_t offset);
+int cnxk_nix_tx_descriptor_status(void *txq, uint16_t offset);
+uint32_t cnxk_nix_rx_queue_count(void *rxq);
+
 /* Lookup configuration */
 const uint32_t *cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev);
 void *cnxk_nix_fastpath_lookup_mem_get(void);
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index ce5f1f7240..1255d6b40f 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -694,6 +694,66 @@  cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
 	memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf));
 }
 
+uint32_t
+cnxk_nix_rx_queue_count(void *rxq)
+{
+	struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+	struct roc_nix *nix = &rxq_sp->dev->nix;
+	uint32_t head, tail;
+
+	roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
+	return (tail - head) % (rxq_sp->qconf.nb_desc);
+}
+
+static inline int
+nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset, bool is_rx)
+{
+	/* Check given offset(queue index) has packet filled/xmit by HW
+	 * in case of Rx or Tx.
+	 * Also, checks for wrap around case.
+	 */
+	return ((tail > head && offset <= tail && offset >= head) ||
+		(head > tail && (offset >= head || offset <= tail))) ?
+		       is_rx :
+		       !is_rx;
+}
+
+int
+cnxk_nix_rx_descriptor_status(void *rxq, uint16_t offset)
+{
+	struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+	struct roc_nix *nix = &rxq_sp->dev->nix;
+	uint32_t head, tail;
+
+	if (rxq_sp->qconf.nb_desc <= offset)
+		return -EINVAL;
+
+	roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
+
+	if (nix_offset_has_packet(head, tail, offset, 1))
+		return RTE_ETH_RX_DESC_DONE;
+	else
+		return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+cnxk_nix_tx_descriptor_status(void *txq, uint16_t offset)
+{
+	struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
+	struct roc_nix *nix = &txq_sp->dev->nix;
+	uint32_t head = 0, tail = 0;
+
+	if (txq_sp->qconf.nb_desc <= offset)
+		return -EINVAL;
+
+	roc_nix_sq_head_tail_get(nix, txq_sp->qid, &head, &tail);
+
+	if (nix_offset_has_packet(head, tail, offset, 0))
+		return RTE_ETH_TX_DESC_DONE;
+	else
+		return RTE_ETH_TX_DESC_FULL;
+}
+
 /* It is a NOP for cnxk as HW frees the buffer on xmit */
 int
 cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)