From patchwork Wed Sep 22 15:13:59 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Naga Harish K, S V" X-Patchwork-Id: 99438 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8A711A0C45; Wed, 22 Sep 2021 17:14:41 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 637B7411FA; Wed, 22 Sep 2021 17:14:30 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id C86DC41209 for ; Wed, 22 Sep 2021 17:14:28 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10115"; a="210695443" X-IronPort-AV: E=Sophos;i="5.85,314,1624345200"; d="scan'208";a="210695443" Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 22 Sep 2021 08:14:28 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.85,314,1624345200"; d="scan'208";a="550287272" Received: from txandevlnx322.an.intel.com ([10.123.117.44]) by FMSMGA003.fm.intel.com with ESMTP; 22 Sep 2021 08:14:27 -0700 From: Naga Harish K S V To: jerinj@marvell.com, jay.jayatheerthan@intel.com Cc: dev@dpdk.org Date: Wed, 22 Sep 2021 10:13:59 -0500 Message-Id: <20210922151400.3718855-4-s.v.naga.harish.k@intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20210922151400.3718855-1-s.v.naga.harish.k@intel.com> References: <20210921094559.1788022-1-s.v.naga.harish.k@intel.com> <20210922151400.3718855-1-s.v.naga.harish.k@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v3 4/5] eventdev/rx_adapter: implement per queue event buffer X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" this patch implement the per queue event buffer with required validations. Signed-off-by: Naga Harish K S V --- lib/eventdev/rte_event_eth_rx_adapter.c | 187 +++++++++++++++++------- 1 file changed, 138 insertions(+), 49 deletions(-) diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 7dec9a8734..f3d5efd916 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -99,10 +99,12 @@ struct rte_event_eth_rx_adapter { uint8_t rss_key_be[RSS_KEY_SIZE]; /* Event device identifier */ uint8_t eventdev_id; - /* Per ethernet device structure */ - struct eth_device_info *eth_devices; /* Event port identifier */ uint8_t event_port_id; + /* Flag indicating per rxq event buffer */ + bool use_queue_event_buf; + /* Per ethernet device structure */ + struct eth_device_info *eth_devices; /* Lock to serialize config updates with service function */ rte_spinlock_t rx_lock; /* Max mbufs processed in any service function invocation */ @@ -238,6 +240,7 @@ struct eth_rx_queue_info { uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ uint64_t event; struct eth_rx_vector_data vector_data; + struct rte_eth_event_enqueue_buffer *event_buf; }; static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ -753,10 +756,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, /* Enqueue buffered events to event device */ static inline uint16_t -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, + struct rte_eth_event_enqueue_buffer *buf) { - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t count = buf->last ? buf->last - buf->head : buf->count; @@ -874,15 +876,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, uint16_t rx_queue_id, struct rte_mbuf **mbufs, - uint16_t num) + uint16_t num, + struct rte_eth_event_enqueue_buffer *buf) { uint32_t i; struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; struct eth_rx_queue_info *eth_rx_queue_info = &dev_info->rx_queue[rx_queue_id]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; uint16_t new_tail = buf->tail; uint64_t event = eth_rx_queue_info->event; uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ -968,11 +969,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t queue_id, uint32_t rx_count, uint32_t max_rx, - int *rxq_empty) + int *rxq_empty, + struct rte_eth_event_enqueue_buffer *buf) { struct rte_mbuf *mbufs[BATCH_SIZE]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t n; @@ -985,7 +985,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, */ while (rxa_pkt_buf_available(buf)) { if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); stats->rx_poll_count++; n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); @@ -994,14 +994,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, *rxq_empty = 1; break; } - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); nb_rx += n; if (rx_count + nb_rx > max_rx) break; } if (buf->count > 0) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); return nb_rx; } @@ -1142,7 +1142,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) ring_lock = &rx_adapter->intr_ring_lock; if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); while (rxa_pkt_buf_available(buf)) { struct eth_device_info *dev_info; @@ -1194,7 +1194,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) continue; n = rxa_eth_rx(rx_adapter, port, i, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); nb_rx += n; enq_buffer_full = !rxq_empty && n == 0; @@ -1215,7 +1215,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) } else { n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); rx_adapter->qd_valid = !rxq_empty; nb_rx += n; if (nb_rx > rx_adapter->max_nb_rx) @@ -1246,13 +1246,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { uint32_t num_queue; uint32_t nb_rx = 0; - struct rte_eth_event_enqueue_buffer *buf; + struct rte_eth_event_enqueue_buffer *buf = NULL; uint32_t wrr_pos; uint32_t max_nb_rx; wrr_pos = rx_adapter->wrr_pos; max_nb_rx = rx_adapter->max_nb_rx; - buf = &rx_adapter->event_enqueue_buffer; /* Iterate through a WRR sequence */ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { @@ -1260,24 +1259,36 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[d]; + buf = dev_info->rx_queue[qid].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + /* Don't do a batch dequeue from the rx queue if there isn't * enough space in the enqueue buffer. */ if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (!rxa_pkt_buf_available(buf)) { - rx_adapter->wrr_pos = wrr_pos; - return nb_rx; + if (rx_adapter->use_queue_event_buf) + goto poll_next_entry; + else { + rx_adapter->wrr_pos = wrr_pos; + return nb_rx; + } } nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, - NULL); + NULL, buf); if (nb_rx > max_nb_rx) { rx_adapter->wrr_pos = (wrr_pos + 1) % rx_adapter->wrr_len; break; } +poll_next_entry: if (++wrr_pos == rx_adapter->wrr_len) wrr_pos = 0; } @@ -1288,12 +1299,18 @@ static void rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { struct rte_event_eth_rx_adapter *rx_adapter = arg; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; + struct rte_eth_event_enqueue_buffer *buf = NULL; struct rte_event *ev; + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[vec->port]; + buf = dev_info->rx_queue[vec->queue].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + if (buf->count) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (vec->vector_ev->nb_elem == 0) return; @@ -1905,9 +1922,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, rx_adapter->num_rx_intr -= intrq; dev_info->nb_rx_intr -= intrq; dev_info->nb_shared_intr -= intrq && sintrq; + if (rx_adapter->use_queue_event_buf) { + struct rte_eth_event_enqueue_buffer *event_buf = + dev_info->rx_queue[rx_queue_id].event_buf; + rte_free(event_buf->events); + rte_free(event_buf); + dev_info->rx_queue[rx_queue_id].event_buf = NULL; + } } -static void +static int rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, struct eth_device_info *dev_info, int32_t rx_queue_id, @@ -1919,15 +1943,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, int intrq; int sintrq; struct rte_event *qi_ev; + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; + uint16_t eth_dev_id = dev_info->dev->data->port_id; + int ret; if (rx_queue_id == -1) { uint16_t nb_rx_queues; uint16_t i; nb_rx_queues = dev_info->dev->data->nb_rx_queues; - for (i = 0; i < nb_rx_queues; i++) - rxa_add_queue(rx_adapter, dev_info, i, conf); - return; + for (i = 0; i < nb_rx_queues; i++) { + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); + if (ret) + return ret; + } + return 0; } pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -1990,6 +2020,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, dev_info->next_q_idx = 0; } } + + if (!rx_adapter->use_queue_event_buf) + return 0; + + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", + sizeof(*new_rx_buf), 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); + new_rx_buf->events_size += (2 * BATCH_SIZE); + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", + sizeof(struct rte_event) * + new_rx_buf->events_size, 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf->events == NULL) { + rte_free(new_rx_buf); + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + queue_info->event_buf = new_rx_buf; + + return 0; } static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, @@ -2018,6 +2079,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, temp_conf.servicing_weight = 1; } queue_conf = &temp_conf; + + if (queue_conf->servicing_weight == 0 && + rx_adapter->use_queue_event_buf) { + + RTE_EDEV_LOG_ERR("Use of queue level event buffer " + "not supported for interrupt queues " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -EINVAL; + } } nb_rx_queues = dev_info->dev->data->nb_rx_queues; @@ -2097,7 +2168,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + if (ret) + goto err_free_rxqueue; rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); rte_free(rx_adapter->eth_rx_poll); @@ -2118,7 +2191,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, rte_free(rx_poll); rte_free(rx_wrr); - return 0; + return ret; } static int @@ -2244,20 +2317,26 @@ rxa_create(uint8_t id, uint8_t dev_id, rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; /* Rx adapter event buffer allocation */ - buf = &rx_adapter->event_enqueue_buffer; - buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); - - events = rte_zmalloc_socket(rx_adapter->mem_name, - buf->events_size * sizeof(*events), - 0, socket_id); - if (events == NULL) { - RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); - rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter); - return -ENOMEM; - } + rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; + + if (!rx_adapter->use_queue_event_buf) { + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate memory " + "for adapter event buffer"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } - rx_adapter->event_enqueue_buffer.events = events; + rx_adapter->event_enqueue_buffer.events = events; + } event_eth_rx_adapter[id] = rx_adapter; @@ -2277,6 +2356,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, /* use default values for adapter params */ rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + rxa_params.use_queue_event_buf = false; return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } @@ -2297,9 +2377,9 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, if (rxa_params == NULL) { rxa_params = &temp_params; rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; - } - - if (rxa_params->event_buf_size == 0) + rxa_params->use_queue_event_buf = false; + } else if ((!rxa_params->use_queue_event_buf && + rxa_params->event_buf_size == 0)) return -EINVAL; pc = rte_malloc(NULL, sizeof(*pc), 0); @@ -2368,7 +2448,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter->event_enqueue_buffer.events); + if (!rx_adapter->use_queue_event_buf) + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2472,6 +2553,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, return -EINVAL; } + if ((rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size == 0) || + (!rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size != 0)) { + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); + return -EINVAL; + } + dev_info = &rx_adapter->eth_devices[eth_dev_id]; if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {