[dpdk-dev,v2] ethdev: Rename RX/TX enable queue field for queue start and stop
Commit Message
Update comments for the field start_rx_per_q for better readability.
Rename the field name to rx_enable_queue for better readability too.
Accordingly Update its reference in sample vhost.
Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
---
examples/vhost/main.c | 4 ++--
lib/librte_ether/rte_ethdev.h | 16 ++++++++++++++--
lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 8 ++++----
lib/librte_pmd_ixgbe/ixgbe_rxtx.h | 4 ++--
4 files changed, 22 insertions(+), 10 deletions(-)
Comments
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ouyang Changchun
> Sent: Wednesday, July 23, 2014 12:48 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2] ethdev: Rename RX/TX enable queue field
> for queue start and stop
>
> Update comments for the field start_rx_per_q for better readability.
> Rename the field name to rx_enable_queue for better readability too.
> Accordingly Update its reference in sample vhost.
>
> Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
Acked-by Chen Jing (Mark) <jing.d.chen@intel.com>
> ---
> examples/vhost/main.c | 4 ++--
> lib/librte_ether/rte_ethdev.h | 16 ++++++++++++++--
> lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 8 ++++----
> lib/librte_pmd_ixgbe/ixgbe_rxtx.h | 4 ++--
> 4 files changed, 22 insertions(+), 10 deletions(-)
>
> diff --git a/examples/vhost/main.c b/examples/vhost/main.c
> index 193aa25..2eea431 100644
> --- a/examples/vhost/main.c
> +++ b/examples/vhost/main.c
> @@ -2984,9 +2984,9 @@ MAIN(int argc, char *argv[])
> char pool_name[RTE_MEMPOOL_NAMESIZE];
> char ring_name[RTE_MEMPOOL_NAMESIZE];
>
> - rx_conf_default.start_rx_per_q = (uint8_t)zero_copy;
> + rx_conf_default.rx_enable_queue = (uint8_t)zero_copy;
> rx_conf_default.rx_drop_en = 0;
> - tx_conf_default.start_tx_per_q = (uint8_t)zero_copy;
> + tx_conf_default.tx_enable_queue = (uint8_t)zero_copy;
> nb_mbuf = num_rx_descriptor
> + num_switching_cores * MBUF_CACHE_SIZE_ZCP
> + num_switching_cores * MAX_PKT_BURST;
> diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
> index 50df654..ba439f6 100644
> --- a/lib/librte_ether/rte_ethdev.h
> +++ b/lib/librte_ether/rte_ethdev.h
> @@ -604,7 +604,16 @@ struct rte_eth_rxconf {
> struct rte_eth_thresh rx_thresh; /**< RX ring threshold registers. */
> uint16_t rx_free_thresh; /**< Drives the freeing of RX descriptors.
> */
> uint8_t rx_drop_en; /**< Drop packets if no descriptors are available.
> */
> - uint8_t start_rx_per_q; /**< start rx per queue. */
> + /**< If rx_enable_queue is true, rte_eth_dev_rx_queue_start
> should be
> + invocated to start RX for one queue after rte_eth_dev_start
> is
> + invocated, and rte_eth_dev_rx_queue_start instead of
> + rte_eth_dev_start is responsible for allocating mbuf from
> + mempool and setup the DMA physical address. It is useful in
> + such scenario: buffer address is not available at the point of
> + rte_eth_dev_start's invocating but available later, e.g. in
> + VHOST zero copy case, the buffer address used to setup
> DMA
> + address is available only after one VM(guest) startup. */
> + uint8_t rx_enable_queue;
> };
>
> #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001 /**< nb_segs=1 for all
> mbufs */
> @@ -625,7 +634,10 @@ struct rte_eth_txconf {
> uint16_t tx_rs_thresh; /**< Drives the setting of RS bit on TXDs. */
> uint16_t tx_free_thresh; /**< Drives the freeing of TX buffers. */
> uint32_t txq_flags; /**< Set flags for the Tx queue */
> - uint8_t start_tx_per_q; /**< start tx per queue. */
> + /**< If tx_enable_queue is true, rte_eth_dev_tx_queue_start must
> be
> + invocated to start TX for one queue after rte_eth_dev_start
> is
> + invocated. Refer to start_rx_per_q for the use case. */
> + uint8_t tx_enable_queue;
> };
>
> /**
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> index dfc2076..2872fad 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> @@ -1846,7 +1846,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev
> *dev,
> txq->port_id = dev->data->port_id;
> txq->txq_flags = tx_conf->txq_flags;
> txq->ops = &def_txq_ops;
> - txq->start_tx_per_q = tx_conf->start_tx_per_q;
> + txq->tx_enable_queue = tx_conf->tx_enable_queue;
>
> /*
> * Modification to set VFTDT for virtual function if vf is detected
> @@ -2091,7 +2091,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev
> *dev,
> rxq->crc_len = (uint8_t) ((dev->data-
> >dev_conf.rxmode.hw_strip_crc) ?
> 0 : ETHER_CRC_LEN);
> rxq->drop_en = rx_conf->rx_drop_en;
> - rxq->start_rx_per_q = rx_conf->start_rx_per_q;
> + rxq->rx_enable_queue = rx_conf->rx_enable_queue;
>
> /*
> * Allocate RX ring hardware descriptors. A memzone large enough to
> @@ -3652,13 +3652,13 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
>
> for (i = 0; i < dev->data->nb_tx_queues; i++) {
> txq = dev->data->tx_queues[i];
> - if (!txq->start_tx_per_q)
> + if (!txq->tx_enable_queue)
> ixgbe_dev_tx_queue_start(dev, i);
> }
>
> for (i = 0; i < dev->data->nb_rx_queues; i++) {
> rxq = dev->data->rx_queues[i];
> - if (!rxq->start_rx_per_q)
> + if (!rxq->rx_enable_queue)
> ixgbe_dev_rx_queue_start(dev, i);
> }
>
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
> b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
> index 64c0695..d6d856e 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
> @@ -135,7 +135,7 @@ struct igb_rx_queue {
> uint8_t port_id; /**< Device port identifier. */
> uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
> uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
> - uint8_t start_rx_per_q;
> + uint8_t rx_enable_queue;
> #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
> /** need to alloc dummy mbuf, for wraparound when scanning hw
> ring */
> struct rte_mbuf fake_mbuf;
> @@ -200,7 +200,7 @@ struct igb_tx_queue {
> /** Hardware context0 history. */
> struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
> struct ixgbe_txq_ops *ops; /**< txq ops */
> - uint8_t start_tx_per_q;
> + uint8_t tx_enable_queue;
> };
>
> struct ixgbe_txq_ops {
> --
> 1.8.4.2
Hi Ouyang,
2014-07-23 12:48, Ouyang Changchun:
> Update comments for the field start_rx_per_q for better readability.
> Rename the field name to rx_enable_queue for better readability too.
> Accordingly Update its reference in sample vhost.
> - uint8_t start_rx_per_q; /**< start rx per queue. */
> + /**< If rx_enable_queue is true, rte_eth_dev_rx_queue_start should be
> + invocated to start RX for one queue after rte_eth_dev_start is
> + invocated, and rte_eth_dev_rx_queue_start instead of
> + rte_eth_dev_start is responsible for allocating mbuf from
> + mempool and setup the DMA physical address. It is useful in
> + such scenario: buffer address is not available at the point of
> + rte_eth_dev_start's invocating but available later, e.g. in
> + VHOST zero copy case, the buffer address used to setup DMA
> + address is available only after one VM(guest) startup. */
> + uint8_t rx_enable_queue;
> };
I have many comments here.
The doxygen comment /**< must be used only after the symbol you are
commenting:
http://doxygen.org/manual/docblocks.html#memberdoc
The comment is too long.
The use case would be in the manual, not in doxygen.
The comment about rte_eth_dev_rx_queue_start would be in the doxygen
comment of rte_eth_dev_rx_queue_start.
When this variable is set, it doesn't enable anything. It only disables
the queue when doing a global start. Its name should be rx_deferred_start.
All fields of this structure are about one queue, so the "queue" word is
not needed.
> - uint8_t start_tx_per_q; /**< start tx per queue. */
> + /**< If tx_enable_queue is true, rte_eth_dev_tx_queue_start must be
> + invocated to start TX for one queue after rte_eth_dev_start is
> + invocated. Refer to start_rx_per_q for the use case. */
> + uint8_t tx_enable_queue;
> };
You refer to the old name (start_rx_per_q).
By the way, a one line description (without referral) for both fields should
be enough. Something like "do not start with rte_eth_dev_start()".
> @@ -3652,13 +3652,13 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
>
> for (i = 0; i < dev->data->nb_tx_queues; i++) {
> txq = dev->data->tx_queues[i];
> - if (!txq->start_tx_per_q)
> + if (!txq->tx_enable_queue)
> ixgbe_dev_tx_queue_start(dev, i);
> }
Here it's clear that this field is about disabling start.
Please rework this patch and update i40e accordingly.
Thanks
Hi Thomas,
Thanks very much for your comments!
I will rework this patch and also update for i40e.
Thanks and best regards,
Changchun
> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
> Sent: Thursday, September 25, 2014 5:31 PM
> To: Ouyang, Changchun
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2] ethdev: Rename RX/TX enable queue
> field for queue start and stop
>
> Hi Ouyang,
>
> 2014-07-23 12:48, Ouyang Changchun:
> > Update comments for the field start_rx_per_q for better readability.
> > Rename the field name to rx_enable_queue for better readability too.
> > Accordingly Update its reference in sample vhost.
>
> > - uint8_t start_rx_per_q; /**< start rx per queue. */
> > + /**< If rx_enable_queue is true, rte_eth_dev_rx_queue_start
> should be
> > + invocated to start RX for one queue after rte_eth_dev_start
> is
> > + invocated, and rte_eth_dev_rx_queue_start instead of
> > + rte_eth_dev_start is responsible for allocating mbuf from
> > + mempool and setup the DMA physical address. It is useful in
> > + such scenario: buffer address is not available at the point of
> > + rte_eth_dev_start's invocating but available later, e.g. in
> > + VHOST zero copy case, the buffer address used to setup
> DMA
> > + address is available only after one VM(guest) startup. */
> > + uint8_t rx_enable_queue;
> > };
>
> I have many comments here.
>
> The doxygen comment /**< must be used only after the symbol you are
> commenting:
> http://doxygen.org/manual/docblocks.html#memberdoc
>
> The comment is too long.
> The use case would be in the manual, not in doxygen.
> The comment about rte_eth_dev_rx_queue_start would be in the doxygen
> comment of rte_eth_dev_rx_queue_start.
>
> When this variable is set, it doesn't enable anything. It only disables the
> queue when doing a global start. Its name should be rx_deferred_start.
> All fields of this structure are about one queue, so the "queue" word is not
> needed.
>
> > - uint8_t start_tx_per_q; /**< start tx per queue. */
> > + /**< If tx_enable_queue is true, rte_eth_dev_tx_queue_start must
> be
> > + invocated to start TX for one queue after rte_eth_dev_start
> is
> > + invocated. Refer to start_rx_per_q for the use case. */
> > + uint8_t tx_enable_queue;
> > };
>
> You refer to the old name (start_rx_per_q).
> By the way, a one line description (without referral) for both fields should be
> enough. Something like "do not start with rte_eth_dev_start()".
>
> > @@ -3652,13 +3652,13 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
> >
> > for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > txq = dev->data->tx_queues[i];
> > - if (!txq->start_tx_per_q)
> > + if (!txq->tx_enable_queue)
> > ixgbe_dev_tx_queue_start(dev, i);
> > }
>
> Here it's clear that this field is about disabling start.
>
> Please rework this patch and update i40e accordingly.
> Thanks
> --
> Thomas
@@ -2984,9 +2984,9 @@ MAIN(int argc, char *argv[])
char pool_name[RTE_MEMPOOL_NAMESIZE];
char ring_name[RTE_MEMPOOL_NAMESIZE];
- rx_conf_default.start_rx_per_q = (uint8_t)zero_copy;
+ rx_conf_default.rx_enable_queue = (uint8_t)zero_copy;
rx_conf_default.rx_drop_en = 0;
- tx_conf_default.start_tx_per_q = (uint8_t)zero_copy;
+ tx_conf_default.tx_enable_queue = (uint8_t)zero_copy;
nb_mbuf = num_rx_descriptor
+ num_switching_cores * MBUF_CACHE_SIZE_ZCP
+ num_switching_cores * MAX_PKT_BURST;
@@ -604,7 +604,16 @@ struct rte_eth_rxconf {
struct rte_eth_thresh rx_thresh; /**< RX ring threshold registers. */
uint16_t rx_free_thresh; /**< Drives the freeing of RX descriptors. */
uint8_t rx_drop_en; /**< Drop packets if no descriptors are available. */
- uint8_t start_rx_per_q; /**< start rx per queue. */
+ /**< If rx_enable_queue is true, rte_eth_dev_rx_queue_start should be
+ invocated to start RX for one queue after rte_eth_dev_start is
+ invocated, and rte_eth_dev_rx_queue_start instead of
+ rte_eth_dev_start is responsible for allocating mbuf from
+ mempool and setup the DMA physical address. It is useful in
+ such scenario: buffer address is not available at the point of
+ rte_eth_dev_start's invocating but available later, e.g. in
+ VHOST zero copy case, the buffer address used to setup DMA
+ address is available only after one VM(guest) startup. */
+ uint8_t rx_enable_queue;
};
#define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001 /**< nb_segs=1 for all mbufs */
@@ -625,7 +634,10 @@ struct rte_eth_txconf {
uint16_t tx_rs_thresh; /**< Drives the setting of RS bit on TXDs. */
uint16_t tx_free_thresh; /**< Drives the freeing of TX buffers. */
uint32_t txq_flags; /**< Set flags for the Tx queue */
- uint8_t start_tx_per_q; /**< start tx per queue. */
+ /**< If tx_enable_queue is true, rte_eth_dev_tx_queue_start must be
+ invocated to start TX for one queue after rte_eth_dev_start is
+ invocated. Refer to start_rx_per_q for the use case. */
+ uint8_t tx_enable_queue;
};
/**
@@ -1846,7 +1846,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->txq_flags = tx_conf->txq_flags;
txq->ops = &def_txq_ops;
- txq->start_tx_per_q = tx_conf->start_tx_per_q;
+ txq->tx_enable_queue = tx_conf->tx_enable_queue;
/*
* Modification to set VFTDT for virtual function if vf is detected
@@ -2091,7 +2091,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
- rxq->start_rx_per_q = rx_conf->start_rx_per_q;
+ rxq->rx_enable_queue = rx_conf->rx_enable_queue;
/*
* Allocate RX ring hardware descriptors. A memzone large enough to
@@ -3652,13 +3652,13 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- if (!txq->start_tx_per_q)
+ if (!txq->tx_enable_queue)
ixgbe_dev_tx_queue_start(dev, i);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- if (!rxq->start_rx_per_q)
+ if (!rxq->rx_enable_queue)
ixgbe_dev_rx_queue_start(dev, i);
}
@@ -135,7 +135,7 @@ struct igb_rx_queue {
uint8_t port_id; /**< Device port identifier. */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
- uint8_t start_rx_per_q;
+ uint8_t rx_enable_queue;
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
struct rte_mbuf fake_mbuf;
@@ -200,7 +200,7 @@ struct igb_tx_queue {
/** Hardware context0 history. */
struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
struct ixgbe_txq_ops *ops; /**< txq ops */
- uint8_t start_tx_per_q;
+ uint8_t tx_enable_queue;
};
struct ixgbe_txq_ops {