[v5,5/8] eventdev: add Tx adapter event vector support
Checks
Commit Message
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add event vector support for event eth Tx adapter, the implementation
receives events from the single linked queue and based on
rte_event_vector::union_valid transmits the vector of mbufs to a given
port, queue pair.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
.../rte_event_eth_tx_adapter.c | 66 ++++++++++++++++---
lib/librte_eventdev/rte_eventdev.c | 5 +-
2 files changed, 60 insertions(+), 11 deletions(-)
Comments
> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Wednesday, March 24, 2021 10:35 AM
> To: jerinj@marvell.com; Jayatheerthan, Jay <jay.jayatheerthan@intel.com>; Carrillo, Erik G <erik.g.carrillo@intel.com>; Gujjar,
> Abhinandan S <abhinandan.gujjar@intel.com>; McDaniel, Timothy <timothy.mcdaniel@intel.com>; hemant.agrawal@nxp.com; Van
> Haaren, Harry <harry.van.haaren@intel.com>; mattias.ronnblom <mattias.ronnblom@ericsson.com>; Ma, Liang J
> <liang.j.ma@intel.com>
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@marvell.com>
> Subject: [dpdk-dev] [PATCH v5 5/8] eventdev: add Tx adapter event vector support
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Add event vector support for event eth Tx adapter, the implementation
> receives events from the single linked queue and based on
> rte_event_vector::union_valid transmits the vector of mbufs to a given
Typo: attr_valid instead of union_valid
> port, queue pair.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
> .../rte_event_eth_tx_adapter.c | 66 ++++++++++++++++---
> lib/librte_eventdev/rte_eventdev.c | 5 +-
> 2 files changed, 60 insertions(+), 11 deletions(-)
>
> diff --git a/lib/librte_eventdev/rte_event_eth_tx_adapter.c b/lib/librte_eventdev/rte_event_eth_tx_adapter.c
> index 5b4c42dcf..db260bfb6 100644
> --- a/lib/librte_eventdev/rte_event_eth_tx_adapter.c
> +++ b/lib/librte_eventdev/rte_event_eth_tx_adapter.c
> @@ -510,6 +510,47 @@ txa_service_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
> stats->tx_dropped += unsent - sent;
> }
>
> +static uint16_t
> +txa_process_event_vector(struct txa_service_data *txa,
> + struct rte_event_vector *vec)
> +{
> + struct txa_service_queue_info *tqi;
> + uint16_t port, queue, nb_tx = 0;
> + struct rte_mbuf **mbufs;
> + int i;
> +
> + mbufs = (struct rte_mbuf **)vec->mbufs;
> + if (vec->attr_valid) {
> + port = vec->port;
> + queue = vec->queue;
> + tqi = txa_service_queue(txa, port, queue);
> + if (unlikely(tqi == NULL || !tqi->added)) {
> + rte_pktmbuf_free_bulk(mbufs, vec->nb_elem);
> + rte_mempool_put(rte_mempool_from_obj(vec), vec);
> + return 0;
> + }
> + for (i = 0; i < vec->nb_elem; i++) {
> + nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf,
> + mbufs[i]);
> + }
> + } else {
> + for (i = 0; i < vec->nb_elem; i++) {
> + port = mbufs[i]->port;
> + queue = rte_event_eth_tx_adapter_txq_get(mbufs[i]);
> + tqi = txa_service_queue(txa, port, queue);
> + if (unlikely(tqi == NULL || !tqi->added)) {
> + rte_pktmbuf_free(mbufs[i]);
> + continue;
> + }
> + nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf,
> + mbufs[i]);
> + }
> + }
> + rte_mempool_put(rte_mempool_from_obj(vec), vec);
> +
> + return nb_tx;
> +}
> +
> static void
> txa_service_tx(struct txa_service_data *txa, struct rte_event *ev,
> uint32_t n)
> @@ -522,22 +563,27 @@ txa_service_tx(struct txa_service_data *txa, struct rte_event *ev,
>
> nb_tx = 0;
> for (i = 0; i < n; i++) {
> - struct rte_mbuf *m;
> uint16_t port;
> uint16_t queue;
> struct txa_service_queue_info *tqi;
>
> - m = ev[i].mbuf;
> - port = m->port;
> - queue = rte_event_eth_tx_adapter_txq_get(m);
> + if (!(ev[i].event_type & RTE_EVENT_TYPE_VECTOR)) {
> + struct rte_mbuf *m;
>
> - tqi = txa_service_queue(txa, port, queue);
> - if (unlikely(tqi == NULL || !tqi->added)) {
> - rte_pktmbuf_free(m);
> - continue;
> - }
> + m = ev[i].mbuf;
> + port = m->port;
> + queue = rte_event_eth_tx_adapter_txq_get(m);
>
> - nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf, m);
> + tqi = txa_service_queue(txa, port, queue);
> + if (unlikely(tqi == NULL || !tqi->added)) {
> + rte_pktmbuf_free(m);
> + continue;
> + }
> +
> + nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf, m);
> + } else {
> + nb_tx += txa_process_event_vector(txa, ev[i].vec);
> + }
> }
>
> stats->tx_packets += nb_tx;
> diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
> index 254a31b1f..ed6b5ba59 100644
> --- a/lib/librte_eventdev/rte_eventdev.c
> +++ b/lib/librte_eventdev/rte_eventdev.c
> @@ -196,7 +196,10 @@ rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
> if (caps == NULL)
> return -EINVAL;
>
> - *caps = 0;
> + if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
> + *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
> + else
> + *caps = 0;
>
> return dev->dev_ops->eth_tx_adapter_caps_get ?
> (*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
> --
> 2.17.1
With changes above, looks good.
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
@@ -510,6 +510,47 @@ txa_service_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
stats->tx_dropped += unsent - sent;
}
+static uint16_t
+txa_process_event_vector(struct txa_service_data *txa,
+ struct rte_event_vector *vec)
+{
+ struct txa_service_queue_info *tqi;
+ uint16_t port, queue, nb_tx = 0;
+ struct rte_mbuf **mbufs;
+ int i;
+
+ mbufs = (struct rte_mbuf **)vec->mbufs;
+ if (vec->attr_valid) {
+ port = vec->port;
+ queue = vec->queue;
+ tqi = txa_service_queue(txa, port, queue);
+ if (unlikely(tqi == NULL || !tqi->added)) {
+ rte_pktmbuf_free_bulk(mbufs, vec->nb_elem);
+ rte_mempool_put(rte_mempool_from_obj(vec), vec);
+ return 0;
+ }
+ for (i = 0; i < vec->nb_elem; i++) {
+ nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf,
+ mbufs[i]);
+ }
+ } else {
+ for (i = 0; i < vec->nb_elem; i++) {
+ port = mbufs[i]->port;
+ queue = rte_event_eth_tx_adapter_txq_get(mbufs[i]);
+ tqi = txa_service_queue(txa, port, queue);
+ if (unlikely(tqi == NULL || !tqi->added)) {
+ rte_pktmbuf_free(mbufs[i]);
+ continue;
+ }
+ nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf,
+ mbufs[i]);
+ }
+ }
+ rte_mempool_put(rte_mempool_from_obj(vec), vec);
+
+ return nb_tx;
+}
+
static void
txa_service_tx(struct txa_service_data *txa, struct rte_event *ev,
uint32_t n)
@@ -522,22 +563,27 @@ txa_service_tx(struct txa_service_data *txa, struct rte_event *ev,
nb_tx = 0;
for (i = 0; i < n; i++) {
- struct rte_mbuf *m;
uint16_t port;
uint16_t queue;
struct txa_service_queue_info *tqi;
- m = ev[i].mbuf;
- port = m->port;
- queue = rte_event_eth_tx_adapter_txq_get(m);
+ if (!(ev[i].event_type & RTE_EVENT_TYPE_VECTOR)) {
+ struct rte_mbuf *m;
- tqi = txa_service_queue(txa, port, queue);
- if (unlikely(tqi == NULL || !tqi->added)) {
- rte_pktmbuf_free(m);
- continue;
- }
+ m = ev[i].mbuf;
+ port = m->port;
+ queue = rte_event_eth_tx_adapter_txq_get(m);
- nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf, m);
+ tqi = txa_service_queue(txa, port, queue);
+ if (unlikely(tqi == NULL || !tqi->added)) {
+ rte_pktmbuf_free(m);
+ continue;
+ }
+
+ nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf, m);
+ } else {
+ nb_tx += txa_process_event_vector(txa, ev[i].vec);
+ }
}
stats->tx_packets += nb_tx;
@@ -196,7 +196,10 @@ rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
if (caps == NULL)
return -EINVAL;
- *caps = 0;
+ if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
+ *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
+ else
+ *caps = 0;
return dev->dev_ops->eth_tx_adapter_caps_get ?
(*dev->dev_ops->eth_tx_adapter_caps_get)(dev,