[v13,07/10] net/virtio: add virtio send command packed queue support

Message ID 20181214155916.1142-8-jfreimann@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series implement packed virtqueues |

Checks

Context Check Description
ci/Intel-compilation success Compilation OK

Commit Message

Jens Freimann Dec. 14, 2018, 3:59 p.m. UTC
  Use packed virtqueue format when reading and writing descriptors
to/from the ring.

Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c | 90 ++++++++++++++++++++++++++++++
 1 file changed, 90 insertions(+)
  

Comments

Maxime Coquelin Dec. 17, 2018, 4:48 p.m. UTC | #1
On 12/14/18 4:59 PM, Jens Freimann wrote:
> Use packed virtqueue format when reading and writing descriptors
> to/from the ring.
> 
> Signed-off-by: Jens Freimann<jfreimann@redhat.com>
> ---
>   drivers/net/virtio/virtio_ethdev.c | 90 ++++++++++++++++++++++++++++++
>   1 file changed, 90 insertions(+)

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime
  

Patch

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 0394ac0af..5e7af51c9 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -141,6 +141,90 @@  static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
 
 struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
 
+static struct virtio_pmd_ctrl *
+virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
+		       int *dlen, int pkt_num)
+{
+	struct virtqueue *vq = cvq->vq;
+	int head;
+	struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
+	struct virtio_pmd_ctrl *result;
+	int wrap_counter;
+	int sum = 0;
+	int k;
+
+	/*
+	 * Format is enforced in qemu code:
+	 * One TX packet for header;
+	 * At least one TX packet per argument;
+	 * One RX packet for ACK.
+	 */
+	head = vq->vq_avail_idx;
+	wrap_counter = vq->avail_wrap_counter;
+	desc[head].flags = VRING_DESC_F_NEXT;
+	desc[head].addr = cvq->virtio_net_hdr_mem;
+	desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+	vq->vq_free_cnt--;
+	if (++vq->vq_avail_idx >= vq->vq_nentries) {
+		vq->vq_avail_idx -= vq->vq_nentries;
+		vq->avail_wrap_counter ^= 1;
+	}
+
+	for (k = 0; k < pkt_num; k++) {
+		desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+			+ sizeof(struct virtio_net_ctrl_hdr)
+			+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
+		desc[vq->vq_avail_idx].len = dlen[k];
+		desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT;
+		sum += dlen[k];
+		vq->vq_free_cnt--;
+		_set_desc_avail(&desc[vq->vq_avail_idx],
+				vq->avail_wrap_counter);
+		rte_smp_wmb();
+		vq->vq_free_cnt--;
+		if (++vq->vq_avail_idx >= vq->vq_nentries) {
+			vq->vq_avail_idx -= vq->vq_nentries;
+			vq->avail_wrap_counter ^= 1;
+		}
+	}
+
+
+	desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+		+ sizeof(struct virtio_net_ctrl_hdr);
+	desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
+	desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE;
+	_set_desc_avail(&desc[vq->vq_avail_idx],
+			vq->avail_wrap_counter);
+	_set_desc_avail(&desc[head], wrap_counter);
+	rte_smp_wmb();
+
+	vq->vq_free_cnt--;
+	if (++vq->vq_avail_idx >= vq->vq_nentries) {
+		vq->vq_avail_idx -= vq->vq_nentries;
+		vq->avail_wrap_counter ^= 1;
+	}
+
+	virtqueue_notify(vq);
+
+	/* wait for used descriptors in virtqueue */
+	do {
+		rte_rmb();
+		usleep(100);
+	} while (!desc_is_used(&desc[head], vq));
+
+	/* now get used descriptors */
+	while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) {
+		vq->vq_free_cnt++;
+		if (++vq->vq_used_cons_idx >= vq->vq_nentries) {
+			vq->vq_used_cons_idx -= vq->vq_nentries;
+			vq->used_wrap_counter ^= 1;
+		}
+	}
+
+	result = cvq->virtio_net_hdr_mz->addr;
+	return result;
+}
+
 static int
 virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
 		int *dlen, int pkt_num)
@@ -174,6 +258,11 @@  virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
 	memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
 		sizeof(struct virtio_pmd_ctrl));
 
+	if (vtpci_packed_queue(vq->hw)) {
+		result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num);
+		goto out_unlock;
+	}
+
 	/*
 	 * Format is enforced in qemu code:
 	 * One TX packet for header;
@@ -245,6 +334,7 @@  virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
 
 	result = cvq->virtio_net_hdr_mz->addr;
 
+out_unlock:
 	rte_spinlock_unlock(&cvq->lock);
 	return result->status;
 }