[dpdk-dev,6/7] net/virtio: add IN_ORDER Rx/Tx into selection

Message ID 20180608090724.20855-7-yong.liu@intel.com (mailing list archive)
State Changes Requested, archived
Delegated to: Maxime Coquelin
Headers
Series support VIRTIO_F_IN_ORDER feature |

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Marvin Liu June 8, 2018, 9:07 a.m. UTC
  After IN_ORDER Rx/Tx paths added, need to update Rx/Tx path selection
logic. Also need to handle device start up Rx queue descriptors flush
action separately.

Rx path select logic: If IN_ORDER is disabled will select normal Rx
path. If IN_ORDER is enabled, Rx offload and merge-able are disabled
will select simple Rx path. Otherwise will select IN_ORDER Rx path.

Tx path select logic: If IN_ORDER is disabled will select normal Tx
path. If IN_ORDER is enabled and merge-able is disabled will select
simple Tx path. Otherwise will select IN_ORDER Tx path.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
  

Comments

Tiwei Bie June 20, 2018, 7:44 a.m. UTC | #1
On Fri, Jun 08, 2018 at 05:07:23PM +0800, Marvin Liu wrote:
[...]
>  
> @@ -634,6 +634,24 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
>  			virtio_rxq_rearm_vec(rxvq);
>  			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
>  		}
> +	} else if (hw->use_inorder_rx) {
> +		if ((!virtqueue_full(vq))) {
> +			uint16_t free_cnt = vq->vq_free_cnt;
> +			struct rte_mbuf *pkts[free_cnt];
> +
> +			if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts, free_cnt)) {
> +				error = virtqueue_enqueue_inorder_refill(vq,
> +						pkts,
> +						free_cnt);
> +				if (unlikely(error)) {
> +					for (i = 0; i < free_cnt; i++)
> +						rte_pktmbuf_free(pkts[i]);
> +				}
> +			}
> +
> +			nbufs += free_cnt;
> +			vq_update_avail_idx(vq);

It looks a bit weird to introduce above code
in this patch.

> +		}
>  	} else {
>  		while (!virtqueue_full(vq)) {
>  			m = rte_mbuf_raw_alloc(rxvq->mpool);
> diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
> index a7d0a9cbe..56a77cc71 100644
> --- a/drivers/net/virtio/virtqueue.c
> +++ b/drivers/net/virtio/virtqueue.c
> @@ -74,6 +74,14 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
>  			desc_idx = used_idx;
>  			rte_pktmbuf_free(vq->sw_ring[desc_idx]);
>  			vq->vq_free_cnt++;
> +		} else if (hw->use_inorder_rx) {
> +			desc_idx = (uint16_t)uep->id;
> +			dxp = &vq->vq_descx[desc_idx];
> +			if (dxp->cookie != NULL) {
> +				rte_pktmbuf_free(dxp->cookie);
> +				dxp->cookie = NULL;
> +			}

Ditto.

> +			vq_ring_free_inorder(vq, desc_idx, 1);
>  		} else {
>  			desc_idx = (uint16_t)uep->id;
>  			dxp = &vq->vq_descx[desc_idx];
> -- 
> 2.17.0
>
  
Marvin Liu June 20, 2018, 4:13 p.m. UTC | #2
On 06/20/2018 03:44 PM, Tiwei Bie wrote:
> On Fri, Jun 08, 2018 at 05:07:23PM +0800, Marvin Liu wrote:
> [...]
>>   
>> @@ -634,6 +634,24 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
>>   			virtio_rxq_rearm_vec(rxvq);
>>   			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
>>   		}
>> +	} else if (hw->use_inorder_rx) {
>> +		if ((!virtqueue_full(vq))) {
>> +			uint16_t free_cnt = vq->vq_free_cnt;
>> +			struct rte_mbuf *pkts[free_cnt];
>> +
>> +			if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts, free_cnt)) {
>> +				error = virtqueue_enqueue_inorder_refill(vq,
>> +						pkts,
>> +						free_cnt);
>> +				if (unlikely(error)) {
>> +					for (i = 0; i < free_cnt; i++)
>> +						rte_pktmbuf_free(pkts[i]);
>> +				}
>> +			}
>> +
>> +			nbufs += free_cnt;
>> +			vq_update_avail_idx(vq);
> It looks a bit weird to introduce above code
> in this patch.
Tiwei, code involved here just due to flag "use_inorder_rx" defined in 
this patch.  Will move changes in rx_queue_setup_finish and rxvq_flush 
function to other patch.

Thanks,
Marvin
>
>> +		}
>>   	} else {
>>   		while (!virtqueue_full(vq)) {
>>   			m = rte_mbuf_raw_alloc(rxvq->mpool);
>> diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
>> index a7d0a9cbe..56a77cc71 100644
>> --- a/drivers/net/virtio/virtqueue.c
>> +++ b/drivers/net/virtio/virtqueue.c
>> @@ -74,6 +74,14 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
>>   			desc_idx = used_idx;
>>   			rte_pktmbuf_free(vq->sw_ring[desc_idx]);
>>   			vq->vq_free_cnt++;
>> +		} else if (hw->use_inorder_rx) {
>> +			desc_idx = (uint16_t)uep->id;
>> +			dxp = &vq->vq_descx[desc_idx];
>> +			if (dxp->cookie != NULL) {
>> +				rte_pktmbuf_free(dxp->cookie);
>> +				dxp->cookie = NULL;
>> +			}
> Ditto.
>
>> +			vq_ring_free_inorder(vq, desc_idx, 1);
>>   		} else {
>>   			desc_idx = (uint16_t)uep->id;
>>   			dxp = &vq->vq_descx[desc_idx];
>> -- 
>> 2.17.0
>>
  

Patch

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index df50a571a..af5eba655 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1320,6 +1320,11 @@  set_rxtx_funcs(struct rte_eth_dev *eth_dev)
 		PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
 			eth_dev->data->port_id);
 		eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+	} else if (hw->use_inorder_rx) {
+		PMD_INIT_LOG(INFO,
+			"virtio: using inorder mergeable buffer Rx path on port %u",
+			eth_dev->data->port_id);
+		eth_dev->rx_pkt_burst = &virtio_recv_inorder_pkts;
 	} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
 		PMD_INIT_LOG(INFO,
 			"virtio: using mergeable buffer Rx path on port %u",
@@ -1335,6 +1340,10 @@  set_rxtx_funcs(struct rte_eth_dev *eth_dev)
 		PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u",
 			eth_dev->data->port_id);
 		eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
+	} else if (hw->use_inorder_tx) {
+		PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
+			eth_dev->data->port_id);
+		eth_dev->tx_pkt_burst = virtio_xmit_inorder_pkts;
 	} else {
 		PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
 			eth_dev->data->port_id);
@@ -1871,23 +1880,25 @@  virtio_dev_configure(struct rte_eth_dev *dev)
 
 	rte_spinlock_init(&hw->state_lock);
 
-	hw->use_simple_rx = 1;
-	hw->use_simple_tx = 1;
-
 #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
 	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
 		hw->use_simple_rx = 0;
 		hw->use_simple_tx = 0;
 	}
 #endif
-	if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
-		hw->use_simple_rx = 0;
-		hw->use_simple_tx = 0;
-	}
+	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
+		if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+			hw->use_inorder_rx = 1;
+			hw->use_inorder_tx = 1;
+		} else {
+			hw->use_simple_rx = 1;
+			hw->use_simple_tx = 1;
+		}
 
-	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-			   DEV_RX_OFFLOAD_TCP_CKSUM))
-		hw->use_simple_rx = 0;
+		if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+				   DEV_RX_OFFLOAD_TCP_CKSUM))
+			hw->use_inorder_rx = 1;
+	}
 
 	return 0;
 }
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index 049344383..77f805df6 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -239,6 +239,8 @@  struct virtio_hw {
 	uint8_t     modern;
 	uint8_t     use_simple_rx;
 	uint8_t     use_simple_tx;
+	uint8_t     use_inorder_rx;
+	uint8_t     use_inorder_tx;
 	uint16_t    port_id;
 	uint8_t     mac_addr[ETHER_ADDR_LEN];
 	uint32_t    notify_off_multiplier;
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index d0473d6b4..b0852b721 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -604,7 +604,7 @@  virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
 	struct virtnet_rx *rxvq = &vq->rxq;
 	struct rte_mbuf *m;
 	uint16_t desc_idx;
-	int error, nbufs;
+	int error, nbufs, i;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -634,6 +634,24 @@  virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
 			virtio_rxq_rearm_vec(rxvq);
 			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
 		}
+	} else if (hw->use_inorder_rx) {
+		if ((!virtqueue_full(vq))) {
+			uint16_t free_cnt = vq->vq_free_cnt;
+			struct rte_mbuf *pkts[free_cnt];
+
+			if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts, free_cnt)) {
+				error = virtqueue_enqueue_inorder_refill(vq,
+						pkts,
+						free_cnt);
+				if (unlikely(error)) {
+					for (i = 0; i < free_cnt; i++)
+						rte_pktmbuf_free(pkts[i]);
+				}
+			}
+
+			nbufs += free_cnt;
+			vq_update_avail_idx(vq);
+		}
 	} else {
 		while (!virtqueue_full(vq)) {
 			m = rte_mbuf_raw_alloc(rxvq->mpool);
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index a7d0a9cbe..56a77cc71 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -74,6 +74,14 @@  virtqueue_rxvq_flush(struct virtqueue *vq)
 			desc_idx = used_idx;
 			rte_pktmbuf_free(vq->sw_ring[desc_idx]);
 			vq->vq_free_cnt++;
+		} else if (hw->use_inorder_rx) {
+			desc_idx = (uint16_t)uep->id;
+			dxp = &vq->vq_descx[desc_idx];
+			if (dxp->cookie != NULL) {
+				rte_pktmbuf_free(dxp->cookie);
+				dxp->cookie = NULL;
+			}
+			vq_ring_free_inorder(vq, desc_idx, 1);
 		} else {
 			desc_idx = (uint16_t)uep->id;
 			dxp = &vq->vq_descx[desc_idx];