[v5,07/11] net/virtio: implement transmit path for packed queues
diff mbox series

Message ID 20180906181947.20646-8-jfreimann@redhat.com
State Superseded, archived
Delegated to: Maxime Coquelin
Headers show
Series
  • implement packed virtqueues
Related show

Checks

Context Check Description
ci/Intel-compilation success Compilation OK

Commit Message

Jens Freimann Sept. 6, 2018, 6:19 p.m. UTC
This implements the transmit path for devices with
support for packed virtqueues.

Add the feature bit and enable code to
add buffers to vring and mark descriptors as available.

Signed-off-by: Jens Freiman <jfreimann@redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c |   8 +-
 drivers/net/virtio/virtio_ethdev.h |   2 +
 drivers/net/virtio/virtio_rxtx.c   | 113 ++++++++++++++++++++++++++++-
 3 files changed, 121 insertions(+), 2 deletions(-)

Comments

Gavin Hu Sept. 10, 2018, 7:13 a.m. UTC | #1
> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Jens Freimann
> Sent: Friday, September 7, 2018 2:20 AM
> To: dev@dpdk.org
> Cc: tiwei.bie@intel.com; maxime.coquelin@redhat.com
> Subject: [dpdk-dev] [PATCH v5 07/11] net/virtio: implement transmit path
> for packed queues
>
> This implements the transmit path for devices with support for packed
> virtqueues.
>
> Add the feature bit and enable code to
> add buffers to vring and mark descriptors as available.
>
> Signed-off-by: Jens Freiman <jfreimann@redhat.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c |   8 +-
>  drivers/net/virtio/virtio_ethdev.h |   2 +
>  drivers/net/virtio/virtio_rxtx.c   | 113 ++++++++++++++++++++++++++++-
>  3 files changed, 121 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index ad91f7f82..d2c5755bb 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -384,6 +384,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> vtpci_queue_idx)
>  vq->hw = hw;
>  vq->vq_queue_index = vtpci_queue_idx;
>  vq->vq_nentries = vq_size;
> +if (vtpci_packed_queue(hw))
> +vq->vq_ring.avail_wrap_counter = 1;
>
>  /*
>   * Reserve a memzone for vring elements @@ -1338,7 +1340,11 @@
> set_rxtx_funcs(struct rte_eth_dev *eth_dev)
>  eth_dev->rx_pkt_burst = &virtio_recv_pkts;
>  }
>
> -if (hw->use_inorder_tx) {
> +if (vtpci_packed_queue(hw)) {
> +PMD_INIT_LOG(INFO, "virtio: using virtio 1.1 Tx path on
> port %u",
> +eth_dev->data->port_id);
> +eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
> +} else if (hw->use_inorder_tx) {
>  PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on
> port %u",
>  eth_dev->data->port_id);
>  eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder; diff --git
> a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
> index b726ad108..04161b461 100644
> --- a/drivers/net/virtio/virtio_ethdev.h
> +++ b/drivers/net/virtio/virtio_ethdev.h
> @@ -79,6 +79,8 @@ uint16_t virtio_recv_mergeable_pkts_inorder(void
> *rx_queue,
>
>  uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  uint16_t nb_pkts);
> +uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> +uint16_t nb_pkts);
>
>  uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf
> **tx_pkts,
>  uint16_t nb_pkts);
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index eb891433e..12787070e 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -38,6 +38,112 @@
>  #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)  #endif
>
> +
> +/* Cleanup from completed transmits. */ static void
> +virtio_xmit_cleanup_packed(struct virtqueue *vq) {
> +uint16_t idx;
> +uint16_t size = vq->vq_nentries;
> +struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
> +struct vq_desc_extra *dxp;
> +
> +idx = vq->vq_used_cons_idx;
> +while (desc_is_used(&desc[idx], &vq->vq_ring) &&
> +       vq->vq_free_cnt < size) {
> +dxp = &vq->vq_descx[idx];
> +vq->vq_free_cnt += dxp->ndescs;
> +idx = dxp->ndescs;
> +idx = idx >= size ? idx - size : idx;
> +}
> +}
> +
> +uint16_t
> +virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
> +     uint16_t nb_pkts)
> +{
> +struct virtnet_tx *txvq = tx_queue;
> +struct virtqueue *vq = txvq->vq;
> +uint16_t i;
> +struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
> +uint16_t idx, prev;
> +struct vq_desc_extra *dxp;
> +
> +if (unlikely(nb_pkts < 1))
> +return nb_pkts;
> +
> +PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
> +
> +if (likely(vq->vq_free_cnt < vq->vq_free_thresh))
> +virtio_xmit_cleanup_packed(vq);
> +
> +for (i = 0; i < nb_pkts; i++) {
> +struct rte_mbuf *txm = tx_pkts[i];
> +struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
> +uint16_t head_idx;
> +int wrap_counter;
> +int descs_used;
> +
> +if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
> +virtio_xmit_cleanup_packed(vq);
> +
> +if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
> +PMD_TX_LOG(ERR,
> +   "No free tx descriptors to transmit");
> +break;
> +}
> +}
> +
> +txvq->stats.bytes += txm->pkt_len;
> +
> +vq->vq_free_cnt -= txm->nb_segs + 1;
> +
> +wrap_counter = vq->vq_ring.avail_wrap_counter;
> +idx = vq->vq_avail_idx;
> +head_idx = idx;
> +
> +dxp = &vq->vq_descx[idx];
> +if (dxp->cookie != NULL)
> +rte_pktmbuf_free(dxp->cookie);
> +dxp->cookie = txm;
> +
> +desc[idx].addr  = txvq->virtio_net_hdr_mem +
> +  RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
> +desc[idx].len   = vq->hw->vtnet_hdr_size;
> +desc[idx].flags = VRING_DESC_F_NEXT |
> +VRING_DESC_F_AVAIL(vq-
> >vq_ring.avail_wrap_counter) |
> +VRING_DESC_F_USED(!vq-
> >vq_ring.avail_wrap_counter);
> +descs_used = 1;
> +
> +do {
> +idx = update_pq_avail_index(vq);
> +desc[idx].addr  =
> VIRTIO_MBUF_DATA_DMA_ADDR(txm, vq);
> +desc[idx].len   = txm->data_len;
> +desc[idx].flags = VRING_DESC_F_NEXT |
> +VRING_DESC_F_AVAIL(vq-
> >vq_ring.avail_wrap_counter) |
> +VRING_DESC_F_USED(!vq-
> >vq_ring.avail_wrap_counter);

According to spec, all the flags update should be moved after the memory barriers.

> +descs_used++;
> +} while ((txm = txm->next) != NULL);
> +
> +desc[idx].flags &= ~VRING_DESC_F_NEXT;

Ditto

> +
> +rte_smp_wmb();
> +prev = (idx > 0 ? idx : vq->vq_nentries) - 1;
> +desc[prev].index = head_idx; //FIXME
> +desc[head_idx].flags =
> +(VRING_DESC_F_AVAIL(wrap_counter) |
> + VRING_DESC_F_USED(!wrap_counter));
> +
> +vq->vq_descx[head_idx].ndescs = descs_used;
> +idx = update_pq_avail_index(vq);
> +}
> +
> +txvq->stats.packets += i;
> +txvq->stats.errors  += nb_pkts - i;
> +
> +return i;
> +}
> +
>  int
>  virtio_dev_rx_queue_done(void *rxq, uint16_t offset)  { @@ -736,7 +842,12
> @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
>  if (hw->use_inorder_tx)
>  vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
>
> -VIRTQUEUE_DUMP(vq);
> +if (vtpci_packed_queue(hw)) {
> +vq->vq_ring.avail_wrap_counter = 1;
> +}
> +
> +if (!vtpci_packed_queue(hw))
> +VIRTQUEUE_DUMP(vq);
>
>  return 0;
>  }
> --
> 2.17.1

IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.
Gavin Hu Sept. 10, 2018, 9:39 a.m. UTC | #2
One more comment:

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Jens Freimann
> Sent: Friday, September 7, 2018 2:20 AM
> To: dev@dpdk.org
> Cc: tiwei.bie@intel.com; maxime.coquelin@redhat.com
> Subject: [dpdk-dev] [PATCH v5 07/11] net/virtio: implement transmit path
> for packed queues
>
> This implements the transmit path for devices with support for packed
> virtqueues.
>
> Add the feature bit and enable code to
> add buffers to vring and mark descriptors as available.
>
> Signed-off-by: Jens Freiman <jfreimann@redhat.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c |   8 +-
>  drivers/net/virtio/virtio_ethdev.h |   2 +
>  drivers/net/virtio/virtio_rxtx.c   | 113 ++++++++++++++++++++++++++++-
>  3 files changed, 121 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index ad91f7f82..d2c5755bb 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -384,6 +384,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> vtpci_queue_idx)
>  vq->hw = hw;
>  vq->vq_queue_index = vtpci_queue_idx;
>  vq->vq_nentries = vq_size;
> +if (vtpci_packed_queue(hw))
> +vq->vq_ring.avail_wrap_counter = 1;
>
>  /*
>   * Reserve a memzone for vring elements @@ -1338,7 +1340,11 @@
> set_rxtx_funcs(struct rte_eth_dev *eth_dev)
>  eth_dev->rx_pkt_burst = &virtio_recv_pkts;
>  }
>
> -if (hw->use_inorder_tx) {
> +if (vtpci_packed_queue(hw)) {
> +PMD_INIT_LOG(INFO, "virtio: using virtio 1.1 Tx path on
> port %u",
> +eth_dev->data->port_id);
> +eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
> +} else if (hw->use_inorder_tx) {
>  PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on
> port %u",
>  eth_dev->data->port_id);
>  eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder; diff --git
> a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
> index b726ad108..04161b461 100644
> --- a/drivers/net/virtio/virtio_ethdev.h
> +++ b/drivers/net/virtio/virtio_ethdev.h
> @@ -79,6 +79,8 @@ uint16_t virtio_recv_mergeable_pkts_inorder(void
> *rx_queue,
>
>  uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  uint16_t nb_pkts);
> +uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> +uint16_t nb_pkts);
>
>  uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf
> **tx_pkts,
>  uint16_t nb_pkts);
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index eb891433e..12787070e 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -38,6 +38,112 @@
>  #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)  #endif
>
> +
> +/* Cleanup from completed transmits. */ static void
> +virtio_xmit_cleanup_packed(struct virtqueue *vq) {
> +uint16_t idx;
> +uint16_t size = vq->vq_nentries;
> +struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
> +struct vq_desc_extra *dxp;
> +
> +idx = vq->vq_used_cons_idx;
> +while (desc_is_used(&desc[idx], &vq->vq_ring) &&
> +       vq->vq_free_cnt < size) {
> +dxp = &vq->vq_descx[idx];
> +vq->vq_free_cnt += dxp->ndescs;
> +idx = dxp->ndescs;

Should be "+=" here?

> +idx = idx >= size ? idx - size : idx;
> +}
> +}
> +
> +uint16_t
> +virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
> +     uint16_t nb_pkts)
> +{
> +struct virtnet_tx *txvq = tx_queue;
> +struct virtqueue *vq = txvq->vq;
> +uint16_t i;
> +struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
> +uint16_t idx, prev;
> +struct vq_desc_extra *dxp;
> +
> +if (unlikely(nb_pkts < 1))
> +return nb_pkts;
> +
> +PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
> +
> +if (likely(vq->vq_free_cnt < vq->vq_free_thresh))
> +virtio_xmit_cleanup_packed(vq);
> +
> +for (i = 0; i < nb_pkts; i++) {
> +struct rte_mbuf *txm = tx_pkts[i];
> +struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
> +uint16_t head_idx;
> +int wrap_counter;
> +int descs_used;
> +
> +if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
> +virtio_xmit_cleanup_packed(vq);
> +
> +if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
> +PMD_TX_LOG(ERR,
> +   "No free tx descriptors to transmit");
> +break;
> +}
> +}
> +
> +txvq->stats.bytes += txm->pkt_len;
> +
> +vq->vq_free_cnt -= txm->nb_segs + 1;
> +
> +wrap_counter = vq->vq_ring.avail_wrap_counter;
> +idx = vq->vq_avail_idx;
> +head_idx = idx;
> +
> +dxp = &vq->vq_descx[idx];
> +if (dxp->cookie != NULL)
> +rte_pktmbuf_free(dxp->cookie);
> +dxp->cookie = txm;
> +
> +desc[idx].addr  = txvq->virtio_net_hdr_mem +
> +  RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
> +desc[idx].len   = vq->hw->vtnet_hdr_size;
> +desc[idx].flags = VRING_DESC_F_NEXT |
> +VRING_DESC_F_AVAIL(vq-
> >vq_ring.avail_wrap_counter) |
> +VRING_DESC_F_USED(!vq-
> >vq_ring.avail_wrap_counter);
> +descs_used = 1;
> +
> +do {
> +idx = update_pq_avail_index(vq);
> +desc[idx].addr  =
> VIRTIO_MBUF_DATA_DMA_ADDR(txm, vq);
> +desc[idx].len   = txm->data_len;
> +desc[idx].flags = VRING_DESC_F_NEXT |
> +VRING_DESC_F_AVAIL(vq-
> >vq_ring.avail_wrap_counter) |
> +VRING_DESC_F_USED(!vq-
> >vq_ring.avail_wrap_counter);
> +descs_used++;
> +} while ((txm = txm->next) != NULL);
> +
> +desc[idx].flags &= ~VRING_DESC_F_NEXT;
> +
> +rte_smp_wmb();
> +prev = (idx > 0 ? idx : vq->vq_nentries) - 1;
> +desc[prev].index = head_idx; //FIXME
> +desc[head_idx].flags =
> +(VRING_DESC_F_AVAIL(wrap_counter) |
> + VRING_DESC_F_USED(!wrap_counter));
> +
> +vq->vq_descx[head_idx].ndescs = descs_used;
> +idx = update_pq_avail_index(vq);
> +}
> +
> +txvq->stats.packets += i;
> +txvq->stats.errors  += nb_pkts - i;
> +
> +return i;
> +}
> +
>  int
>  virtio_dev_rx_queue_done(void *rxq, uint16_t offset)  { @@ -736,7 +842,12
> @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
>  if (hw->use_inorder_tx)
>  vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
>
> -VIRTQUEUE_DUMP(vq);
> +if (vtpci_packed_queue(hw)) {
> +vq->vq_ring.avail_wrap_counter = 1;
> +}
> +
> +if (!vtpci_packed_queue(hw))
> +VIRTQUEUE_DUMP(vq);
>
>  return 0;
>  }
> --
> 2.17.1

IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.
Maxime Coquelin Sept. 12, 2018, 2:58 p.m. UTC | #3
On 09/06/2018 08:19 PM, Jens Freimann wrote:
> This implements the transmit path for devices with
> support for packed virtqueues.
> 
> Add the feature bit and enable code to
> add buffers to vring and mark descriptors as available.
> 
> Signed-off-by: Jens Freiman <jfreimann@redhat.com>
> ---
>   drivers/net/virtio/virtio_ethdev.c |   8 +-
>   drivers/net/virtio/virtio_ethdev.h |   2 +
>   drivers/net/virtio/virtio_rxtx.c   | 113 ++++++++++++++++++++++++++++-
>   3 files changed, 121 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> index ad91f7f82..d2c5755bb 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -384,6 +384,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
>   	vq->hw = hw;
>   	vq->vq_queue_index = vtpci_queue_idx;
>   	vq->vq_nentries = vq_size;
> +	if (vtpci_packed_queue(hw))
> +		vq->vq_ring.avail_wrap_counter = 1;
>   
>   	/*
>   	 * Reserve a memzone for vring elements
> @@ -1338,7 +1340,11 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
>   		eth_dev->rx_pkt_burst = &virtio_recv_pkts;
>   	}
>   
> -	if (hw->use_inorder_tx) {
> +	if (vtpci_packed_queue(hw)) {
> +		PMD_INIT_LOG(INFO, "virtio: using virtio 1.1 Tx path on port %u",
> +			eth_dev->data->port_id);
> +		eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
> +	} else if (hw->use_inorder_tx) {
>   		PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
>   			eth_dev->data->port_id);
>   		eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
> diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
> index b726ad108..04161b461 100644
> --- a/drivers/net/virtio/virtio_ethdev.h
> +++ b/drivers/net/virtio/virtio_ethdev.h
> @@ -79,6 +79,8 @@ uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,
>   
>   uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>   		uint16_t nb_pkts);
> +uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
> +		uint16_t nb_pkts);
>   
>   uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,
>   		uint16_t nb_pkts);
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index eb891433e..12787070e 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -38,6 +38,112 @@
>   #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
>   #endif
>   
> +
> +/* Cleanup from completed transmits. */
> +static void
> +virtio_xmit_cleanup_packed(struct virtqueue *vq)
> +{
> +	uint16_t idx;
> +	uint16_t size = vq->vq_nentries;
> +	struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
> +	struct vq_desc_extra *dxp;
> +
> +	idx = vq->vq_used_cons_idx;
> +	while (desc_is_used(&desc[idx], &vq->vq_ring) &&
> +	       vq->vq_free_cnt < size) {
> +		dxp = &vq->vq_descx[idx];
> +		vq->vq_free_cnt += dxp->ndescs;
> +		idx = dxp->ndescs;
> +		idx = idx >= size ? idx - size : idx;
> +	}
> +}
> +
> +uint16_t
> +virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
> +		     uint16_t nb_pkts)
> +{
> +	struct virtnet_tx *txvq = tx_queue;
> +	struct virtqueue *vq = txvq->vq;
> +	uint16_t i;
> +	struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
> +	uint16_t idx, prev;
> +	struct vq_desc_extra *dxp;
> +
> +	if (unlikely(nb_pkts < 1))
> +		return nb_pkts;
> +
> +	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
> +
> +	if (likely(vq->vq_free_cnt < vq->vq_free_thresh))
> +		virtio_xmit_cleanup_packed(vq);
> +
> +	for (i = 0; i < nb_pkts; i++) {
> +		struct rte_mbuf *txm = tx_pkts[i];
> +		struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
> +		uint16_t head_idx;
> +		int wrap_counter;
> +		int descs_used;
> +
> +		if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
> +			virtio_xmit_cleanup_packed(vq);
> +
> +			if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
> +				PMD_TX_LOG(ERR,
> +					   "No free tx descriptors to transmit");
> +				break;
> +			}
> +		}
> +
> +		txvq->stats.bytes += txm->pkt_len;
> +
> +		vq->vq_free_cnt -= txm->nb_segs + 1;
> +
> +		wrap_counter = vq->vq_ring.avail_wrap_counter;
> +		idx = vq->vq_avail_idx;
> +		head_idx = idx;
> +
> +		dxp = &vq->vq_descx[idx];
> +		if (dxp->cookie != NULL)
> +			rte_pktmbuf_free(dxp->cookie);
> +		dxp->cookie = txm;
> +
> +		desc[idx].addr  = txvq->virtio_net_hdr_mem +
> +				  RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
> +		desc[idx].len   = vq->hw->vtnet_hdr_size;
> +		desc[idx].flags = VRING_DESC_F_NEXT |
> +			VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) |
> +			VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter);
> +		descs_used = 1;
> +
> +		do {
> +			idx = update_pq_avail_index(vq);
> +			desc[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(txm, vq);
> +			desc[idx].len   = txm->data_len;
> +			desc[idx].flags = VRING_DESC_F_NEXT |
> +				VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) |
> +				VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter);
> +			descs_used++;
> +		} while ((txm = txm->next) != NULL);
> +
> +		desc[idx].flags &= ~VRING_DESC_F_NEXT;
> +
> +		rte_smp_wmb();
> +		prev = (idx > 0 ? idx : vq->vq_nentries) - 1;
> +		desc[prev].index = head_idx; //FIXME

//FIXIT! :)

> +		desc[head_idx].flags =
> +			(VRING_DESC_F_AVAIL(wrap_counter) |
> +			 VRING_DESC_F_USED(!wrap_counter));
> +
> +		vq->vq_descx[head_idx].ndescs = descs_used;
> +		idx = update_pq_avail_index(vq);
> +	}
> +
> +	txvq->stats.packets += i;
> +	txvq->stats.errors  += nb_pkts - i;
> +
> +	return i;
> +}
> +
>   int
>   virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
>   {
> @@ -736,7 +842,12 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
>   	if (hw->use_inorder_tx)
>   		vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
>   
> -	VIRTQUEUE_DUMP(vq);
> +	if (vtpci_packed_queue(hw)) {
> +		vq->vq_ring.avail_wrap_counter = 1;
> +	}
> +
> +	if (!vtpci_packed_queue(hw))
> +		VIRTQUEUE_DUMP(vq);

I guess the check isn't necessary anymore since support is added in
patch 5.

>   
>   	return 0;
>   }
>
Tiwei Bie Sept. 13, 2018, 9:15 a.m. UTC | #4
On Thu, Sep 06, 2018 at 07:19:43PM +0100, Jens Freimann wrote:
> This implements the transmit path for devices with
> support for packed virtqueues.
> 
> Add the feature bit and enable code to
> add buffers to vring and mark descriptors as available.
> 
> Signed-off-by: Jens Freiman <jfreimann@redhat.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c |   8 +-
>  drivers/net/virtio/virtio_ethdev.h |   2 +
>  drivers/net/virtio/virtio_rxtx.c   | 113 ++++++++++++++++++++++++++++-
>  3 files changed, 121 insertions(+), 2 deletions(-)
[...]
> +
> +uint16_t
> +virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
> +		     uint16_t nb_pkts)
> +{
> +	struct virtnet_tx *txvq = tx_queue;
> +	struct virtqueue *vq = txvq->vq;
> +	uint16_t i;
> +	struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
> +	uint16_t idx, prev;
> +	struct vq_desc_extra *dxp;
> +
> +	if (unlikely(nb_pkts < 1))
> +		return nb_pkts;
> +
> +	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
> +
> +	if (likely(vq->vq_free_cnt < vq->vq_free_thresh))
> +		virtio_xmit_cleanup_packed(vq);
> +
> +	for (i = 0; i < nb_pkts; i++) {
> +		struct rte_mbuf *txm = tx_pkts[i];
> +		struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
> +		uint16_t head_idx;
> +		int wrap_counter;
> +		int descs_used;
> +
> +		if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
> +			virtio_xmit_cleanup_packed(vq);
> +
> +			if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
> +				PMD_TX_LOG(ERR,
> +					   "No free tx descriptors to transmit");
> +				break;
> +			}
> +		}
> +
> +		txvq->stats.bytes += txm->pkt_len;
> +

We also need to update the stats by calling
virtio_update_packet_stats()

We also need to handle the offloads. See
virtqueue_xmit_offload().

> +		vq->vq_free_cnt -= txm->nb_segs + 1;
> +
> +		wrap_counter = vq->vq_ring.avail_wrap_counter;
> +		idx = vq->vq_avail_idx; 
> +		head_idx = idx;
> +
> +		dxp = &vq->vq_descx[idx];
> +		if (dxp->cookie != NULL)
> +			rte_pktmbuf_free(dxp->cookie);
> +		dxp->cookie = txm;
> +
> +		desc[idx].addr  = txvq->virtio_net_hdr_mem +
> +				  RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
> +		desc[idx].len   = vq->hw->vtnet_hdr_size;
> +		desc[idx].flags = VRING_DESC_F_NEXT |
> +			VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) |
> +			VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter);
> +		descs_used = 1;
> +
> +		do {
> +			idx = update_pq_avail_index(vq);
> +			desc[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(txm, vq);
> +			desc[idx].len   = txm->data_len;
> +			desc[idx].flags = VRING_DESC_F_NEXT |
> +				VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) |
> +				VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter);
> +			descs_used++;
> +		} while ((txm = txm->next) != NULL);
> +
> +		desc[idx].flags &= ~VRING_DESC_F_NEXT;
> +
> +		rte_smp_wmb();
> +		prev = (idx > 0 ? idx : vq->vq_nentries) - 1;
> +		desc[prev].index = head_idx; //FIXME
> +		desc[head_idx].flags =
> +			(VRING_DESC_F_AVAIL(wrap_counter) |
> +			 VRING_DESC_F_USED(!wrap_counter));
> +
> +		vq->vq_descx[head_idx].ndescs = descs_used;
> +		idx = update_pq_avail_index(vq);
> +	}
> +
> +	txvq->stats.packets += i;
> +	txvq->stats.errors  += nb_pkts - i;
> +
> +	return i;
> +}
> +
>  int
>  virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
>  {
> @@ -736,7 +842,12 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
>  	if (hw->use_inorder_tx)
>  		vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
>  
> -	VIRTQUEUE_DUMP(vq);
> +	if (vtpci_packed_queue(hw)) {
> +		vq->vq_ring.avail_wrap_counter = 1;
> +	}
> +
> +	if (!vtpci_packed_queue(hw))
> +		VIRTQUEUE_DUMP(vq);
>  
>  	return 0;
>  }
> -- 
> 2.17.1
>

Patch
diff mbox series

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index ad91f7f82..d2c5755bb 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -384,6 +384,8 @@  virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
 	vq->hw = hw;
 	vq->vq_queue_index = vtpci_queue_idx;
 	vq->vq_nentries = vq_size;
+	if (vtpci_packed_queue(hw))
+		vq->vq_ring.avail_wrap_counter = 1;
 
 	/*
 	 * Reserve a memzone for vring elements
@@ -1338,7 +1340,11 @@  set_rxtx_funcs(struct rte_eth_dev *eth_dev)
 		eth_dev->rx_pkt_burst = &virtio_recv_pkts;
 	}
 
-	if (hw->use_inorder_tx) {
+	if (vtpci_packed_queue(hw)) {
+		PMD_INIT_LOG(INFO, "virtio: using virtio 1.1 Tx path on port %u",
+			eth_dev->data->port_id);
+		eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
+	} else if (hw->use_inorder_tx) {
 		PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
 			eth_dev->data->port_id);
 		eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index b726ad108..04161b461 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -79,6 +79,8 @@  uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,
 
 uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		uint16_t nb_pkts);
+uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
 
 uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,
 		uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index eb891433e..12787070e 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -38,6 +38,112 @@ 
 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
 #endif
 
+
+/* Cleanup from completed transmits. */
+static void
+virtio_xmit_cleanup_packed(struct virtqueue *vq)
+{
+	uint16_t idx;
+	uint16_t size = vq->vq_nentries;
+	struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
+	struct vq_desc_extra *dxp;
+
+	idx = vq->vq_used_cons_idx;
+	while (desc_is_used(&desc[idx], &vq->vq_ring) &&
+	       vq->vq_free_cnt < size) {
+		dxp = &vq->vq_descx[idx];
+		vq->vq_free_cnt += dxp->ndescs;
+		idx = dxp->ndescs;
+		idx = idx >= size ? idx - size : idx;
+	}
+}
+
+uint16_t
+virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
+		     uint16_t nb_pkts)
+{
+	struct virtnet_tx *txvq = tx_queue;
+	struct virtqueue *vq = txvq->vq;
+	uint16_t i;
+	struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
+	uint16_t idx, prev;
+	struct vq_desc_extra *dxp;
+
+	if (unlikely(nb_pkts < 1))
+		return nb_pkts;
+
+	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
+
+	if (likely(vq->vq_free_cnt < vq->vq_free_thresh))
+		virtio_xmit_cleanup_packed(vq);
+
+	for (i = 0; i < nb_pkts; i++) {
+		struct rte_mbuf *txm = tx_pkts[i];
+		struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
+		uint16_t head_idx;
+		int wrap_counter;
+		int descs_used;
+
+		if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
+			virtio_xmit_cleanup_packed(vq);
+
+			if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
+				PMD_TX_LOG(ERR,
+					   "No free tx descriptors to transmit");
+				break;
+			}
+		}
+
+		txvq->stats.bytes += txm->pkt_len;
+
+		vq->vq_free_cnt -= txm->nb_segs + 1;
+
+		wrap_counter = vq->vq_ring.avail_wrap_counter;
+		idx = vq->vq_avail_idx; 
+		head_idx = idx;
+
+		dxp = &vq->vq_descx[idx];
+		if (dxp->cookie != NULL)
+			rte_pktmbuf_free(dxp->cookie);
+		dxp->cookie = txm;
+
+		desc[idx].addr  = txvq->virtio_net_hdr_mem +
+				  RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
+		desc[idx].len   = vq->hw->vtnet_hdr_size;
+		desc[idx].flags = VRING_DESC_F_NEXT |
+			VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) |
+			VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter);
+		descs_used = 1;
+
+		do {
+			idx = update_pq_avail_index(vq);
+			desc[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(txm, vq);
+			desc[idx].len   = txm->data_len;
+			desc[idx].flags = VRING_DESC_F_NEXT |
+				VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) |
+				VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter);
+			descs_used++;
+		} while ((txm = txm->next) != NULL);
+
+		desc[idx].flags &= ~VRING_DESC_F_NEXT;
+
+		rte_smp_wmb();
+		prev = (idx > 0 ? idx : vq->vq_nentries) - 1;
+		desc[prev].index = head_idx; //FIXME
+		desc[head_idx].flags =
+			(VRING_DESC_F_AVAIL(wrap_counter) |
+			 VRING_DESC_F_USED(!wrap_counter));
+
+		vq->vq_descx[head_idx].ndescs = descs_used;
+		idx = update_pq_avail_index(vq);
+	}
+
+	txvq->stats.packets += i;
+	txvq->stats.errors  += nb_pkts - i;
+
+	return i;
+}
+
 int
 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
 {
@@ -736,7 +842,12 @@  virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
 	if (hw->use_inorder_tx)
 		vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
 
-	VIRTQUEUE_DUMP(vq);
+	if (vtpci_packed_queue(hw)) {
+		vq->vq_ring.avail_wrap_counter = 1;
+	}
+
+	if (!vtpci_packed_queue(hw))
+		VIRTQUEUE_DUMP(vq);
 
 	return 0;
 }