@@ -1547,7 +1547,7 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
}
} else {
- if (hw->use_simple_rx) {
+ if (hw->use_vec_rx) {
PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
@@ -2157,33 +2157,31 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return -EBUSY;
}
- hw->use_simple_rx = 1;
-
if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
hw->use_inorder_tx = 1;
hw->use_inorder_rx = 1;
- hw->use_simple_rx = 0;
+ hw->use_vec_rx = 0;
}
if (vtpci_packed_queue(hw)) {
- hw->use_simple_rx = 0;
+ hw->use_vec_rx = 0;
hw->use_inorder_rx = 0;
}
#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
- hw->use_simple_rx = 0;
+ hw->use_vec_rx = 0;
}
#endif
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
- hw->use_simple_rx = 0;
+ hw->use_vec_rx = 0;
}
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_TCP_LRO |
DEV_RX_OFFLOAD_VLAN_STRIP))
- hw->use_simple_rx = 0;
+ hw->use_vec_rx = 0;
return 0;
}
@@ -250,7 +250,8 @@ struct virtio_hw {
uint8_t vlan_strip;
uint8_t use_msix;
uint8_t modern;
- uint8_t use_simple_rx;
+ uint8_t use_vec_rx;
+ uint8_t use_vec_tx;
uint8_t use_inorder_rx;
uint8_t use_inorder_tx;
uint8_t weak_barriers;
@@ -995,7 +995,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
/* Allocate blank mbufs for the each rx descriptor */
nbufs = 0;
- if (hw->use_simple_rx) {
+ if (hw->use_vec_rx) {
for (desc_idx = 0; desc_idx < vq->vq_nentries;
desc_idx++) {
vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
@@ -1013,7 +1013,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
&rxvq->fake_mbuf;
}
- if (hw->use_simple_rx) {
+ if (hw->use_vec_rx) {
while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxvq);
nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
@@ -450,6 +450,8 @@ static const char *valid_args[] = {
VIRTIO_USER_ARG_IN_ORDER,
#define VIRTIO_USER_ARG_PACKED_VQ "packed_vq"
VIRTIO_USER_ARG_PACKED_VQ,
+#define VIRTIO_USER_ARG_VECTORIZED "vectorized"
+ VIRTIO_USER_ARG_VECTORIZED,
NULL
};
@@ -518,7 +520,8 @@ virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
*/
hw->use_msix = 1;
hw->modern = 0;
- hw->use_simple_rx = 0;
+ hw->use_vec_rx = 0;
+ hw->use_vec_tx = 0;
hw->use_inorder_rx = 0;
hw->use_inorder_tx = 0;
hw->virtio_user_dev = dev;
@@ -552,6 +555,8 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
uint64_t mrg_rxbuf = 1;
uint64_t in_order = 1;
uint64_t packed_vq = 0;
+ uint64_t vectorized = 0;
+
char *path = NULL;
char *ifname = NULL;
char *mac_addr = NULL;
@@ -668,6 +673,17 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
}
}
+#ifdef RTE_LIBRTE_VIRTIO_INC_VECTOR
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_VECTORIZED) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_VECTORIZED,
+ &get_integer_arg, &vectorized) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_VECTORIZED);
+ goto end;
+ }
+ }
+#endif
+
if (queues > 1 && cq == 0) {
PMD_INIT_LOG(ERR, "multi-q requires ctrl-q");
goto end;
@@ -705,6 +721,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
}
hw = eth_dev->data->dev_private;
+
if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
queue_size, mac_addr, &ifname, server_mode,
mrg_rxbuf, in_order, packed_vq) < 0) {
@@ -720,6 +737,20 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
goto end;
}
+ if (vectorized) {
+ if (packed_vq) {
+#if defined(CC_AVX512_SUPPORT)
+ hw->use_vec_rx = 1;
+ hw->use_vec_tx = 1;
+#else
+ PMD_INIT_LOG(INFO,
+ "building environment do not match packed ring vectorized requirement");
+#endif
+ } else {
+ hw->use_vec_rx = 1;
+ }
+ }
+
rte_eth_dev_probing_finish(eth_dev);
ret = 0;
@@ -777,4 +808,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,
"server=<0|1> "
"mrg_rxbuf=<0|1> "
"in_order=<0|1> "
- "packed_vq=<0|1>");
+ "packed_vq=<0|1>"
+ "vectorized=<0|1>");
@@ -32,7 +32,7 @@ virtqueue_detach_unused(struct virtqueue *vq)
end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
for (idx = 0; idx < vq->vq_nentries; idx++) {
- if (hw->use_simple_rx && type == VTNET_RQ) {
+ if (hw->use_vec_rx && type == VTNET_RQ) {
if (start <= end && idx >= start && idx < end)
continue;
if (start > end && (idx >= start || idx < end))
@@ -97,7 +97,7 @@ virtqueue_rxvq_flush_split(struct virtqueue *vq)
for (i = 0; i < nb_used; i++) {
used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
uep = &vq->vq_split.ring.used->ring[used_idx];
- if (hw->use_simple_rx) {
+ if (hw->use_vec_rx) {
desc_idx = used_idx;
rte_pktmbuf_free(vq->sw_ring[desc_idx]);
vq->vq_free_cnt++;
@@ -121,7 +121,7 @@ virtqueue_rxvq_flush_split(struct virtqueue *vq)
vq->vq_used_cons_idx++;
}
- if (hw->use_simple_rx) {
+ if (hw->use_vec_rx) {
while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxq);
if (virtqueue_kick_prepare(vq))