@@ -699,7 +699,7 @@ virtio_dev_close(struct rte_eth_dev *dev)
if (!hw->opened)
return 0;
- hw->opened = false;
+ hw->opened = 0;
/* reset the NIC */
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
@@ -1864,7 +1864,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
}
}
- hw->opened = true;
+ hw->opened = 1;
return 0;
@@ -1973,7 +1973,7 @@ virtio_dev_devargs_parse(struct rte_devargs *devargs, uint32_t *speed, int *vect
return ret;
}
-static bool
+static uint8_t
rx_offload_enabled(struct virtio_hw *hw)
{
return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
@@ -1981,7 +1981,7 @@ rx_offload_enabled(struct virtio_hw *hw)
vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
}
-static bool
+static uint8_t
tx_offload_enabled(struct virtio_hw *hw)
{
return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
@@ -2267,7 +2267,7 @@ virtio_dev_start(struct rte_eth_dev *dev)
}
set_rxtx_funcs(dev);
- hw->started = true;
+ hw->started = 1;
/* Initialize Link state */
virtio_dev_link_update(dev, 0);
@@ -2336,7 +2336,7 @@ virtio_dev_stop(struct rte_eth_dev *dev)
rte_spinlock_lock(&hw->state_lock);
if (!hw->started)
goto out_unlock;
- hw->started = false;
+ hw->started = 0;
if (intr_conf->lsc || intr_conf->rxq) {
virtio_intr_disable(dev);
@@ -247,26 +247,25 @@ struct virtio_pci_ops {
struct virtio_net_config;
struct virtio_hw {
- struct virtnet_ctl *cvq;
- uint64_t req_guest_features;
- uint64_t guest_features;
- uint32_t max_queue_pairs;
- bool started;
- uint16_t max_mtu;
- uint16_t vtnet_hdr_size;
- uint8_t vlan_strip;
- uint8_t use_msix;
- uint8_t use_vec_rx;
- uint8_t use_vec_tx;
- uint8_t use_inorder_rx;
- uint8_t use_inorder_tx;
- uint8_t weak_barriers;
- bool has_tx_offload;
- bool has_rx_offload;
- uint16_t port_id;
- uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
- uint32_t speed; /* link speed in MB */
- uint8_t duplex;
+ struct virtqueue **vqs;
+ uint64_t guest_features;
+ uint16_t vtnet_hdr_size;
+ uint8_t started;
+ uint8_t weak_barriers;
+ uint8_t vlan_strip;
+ uint8_t has_tx_offload;
+ uint8_t has_rx_offload;
+ uint8_t use_vec_rx;
+ uint8_t use_vec_tx;
+ uint8_t use_inorder_rx;
+ uint8_t use_inorder_tx;
+ uint8_t opened;
+ uint16_t port_id;
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
+ uint32_t speed; /* link speed in MB */
+ uint8_t duplex;
+ uint8_t use_msix;
+ uint16_t max_mtu;
/*
* App management thread and virtio interrupt handler thread
* both can change device state, this lock is meant to avoid
@@ -274,9 +273,9 @@ struct virtio_hw {
*/
rte_spinlock_t state_lock;
struct rte_mbuf **inject_pkts;
- bool opened;
-
- struct virtqueue **vqs;
+ uint16_t max_queue_pairs;
+ uint64_t req_guest_features;
+ struct virtnet_ctl *cvq;
};
struct virtio_pci_dev {
@@ -615,7 +615,7 @@ virtqueue_notify(struct virtqueue *vq)
static inline void
virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
struct rte_mbuf *cookie,
- bool offload)
+ uint8_t offload)
{
if (offload) {
if (cookie->ol_flags & PKT_TX_TCP_SEG)