From patchwork Thu Aug 4 10:36:21 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Volodymyr Fialko X-Patchwork-Id: 114607 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0049EA00C4; Thu, 4 Aug 2022 12:36:43 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C55CF42BD5; Thu, 4 Aug 2022 12:36:42 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 74C5942BD5 for ; Thu, 4 Aug 2022 12:36:41 +0200 (CEST) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 274782VK012693; Thu, 4 Aug 2022 03:36:40 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=JvIsMMgXKHYhhMlyryDsEr2ygLOVqXUdHYjEuibxdPs=; b=T8gejAgS9LhWzs9hfhVn1q77nxhLMCwk6mSIhnHt/kX4kXDYWGIDWrvWJND6NVMgkScd OxjdYxKNAnu/gh2PORPG0rO+wboVNeq2nEuYhYnddFcUEoLu5mwwPGOUWvRfXj6TZXfT vjhJ10CDpjmPhuE0NdndF75hntaECTA+hd9h5DwAltYALy9HxF69xzwYcBgRWUyA9bn8 VHUlIW2wYQjmBOlSyyYfVa381y9ESHb3nwyK3JaNv4LUSINIoFjSOep1tzNI38qFhY01 +WLOv9JeC1QBuC8L7xRUf125baRR01pVA3HnI4HNk/4WW7tu69BY382epIDif0zrpkKf gQ== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3hqp04n5yh-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Thu, 04 Aug 2022 03:36:40 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Thu, 4 Aug 2022 03:36:39 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Thu, 4 Aug 2022 03:36:39 -0700 Received: from localhost.localdomain (unknown [10.28.34.39]) by maili.marvell.com (Postfix) with ESMTP id 0D5263F7057; Thu, 4 Aug 2022 03:36:36 -0700 (PDT) From: Volodymyr Fialko To: , Radu Nicolau , Akhil Goyal CC: , , Volodymyr Fialko Subject: [PATCH 1/6] examples/ipsec-secgw: add event crypto adapter init Date: Thu, 4 Aug 2022 12:36:21 +0200 Message-ID: <20220804103626.102688-2-vfialko@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220804103626.102688-1-vfialko@marvell.com> References: <20220804103626.102688-1-vfialko@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: XDSmZsIw6-_VxpIRhysRHVE22aV-xPVO X-Proofpoint-ORIG-GUID: XDSmZsIw6-_VxpIRhysRHVE22aV-xPVO X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.883,Hydra:6.0.517,FMLib:17.11.122.1 definitions=2022-08-04_03,2022-08-04_01,2022-06-22_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Create, configure and start an event crypto adapter. This adapter will be used in lookaside event mode processing. Signed-off-by: Volodymyr Fialko --- examples/ipsec-secgw/event_helper.c | 144 ++++++++++++++++++++++++++++ examples/ipsec-secgw/event_helper.h | 2 + examples/ipsec-secgw/ipsec-secgw.c | 44 ++++++--- 3 files changed, 175 insertions(+), 15 deletions(-) diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c index b36f20a3fd..6b00a21b6a 100644 --- a/examples/ipsec-secgw/event_helper.c +++ b/examples/ipsec-secgw/event_helper.c @@ -2,8 +2,10 @@ * Copyright (C) 2020 Marvell International Ltd. */ #include +#include #include #include +#include #include #include #include @@ -742,6 +744,126 @@ eh_start_eventdev(struct eventmode_conf *em_conf) return 0; } +static int +eh_initialize_crypto_adapter(struct eventmode_conf *em_conf) +{ + struct rte_event_dev_info evdev_default_conf = {0}; + struct rte_event_port_conf port_conf = {0}; + struct eventdev_params *eventdev_config; + uint8_t eventdev_id, cdev_id, n; + uint32_t cap; + int ret; + + if (!em_conf->enable_event_crypto_adapter) + return 0; + + /* + * More then one eventdev is not supported, + * all event crypto adapters will be assigned to one eventdev + */ + RTE_ASSERT(em_conf->nb_eventdev == 1); + + /* Get event device configuration */ + eventdev_config = &(em_conf->eventdev_config[0]); + eventdev_id = eventdev_config->eventdev_id; + + n = rte_cryptodev_count(); + + for (cdev_id = 0; cdev_id != n; cdev_id++) { + /* Check event's crypto capabilities */ + ret = rte_event_crypto_adapter_caps_get(eventdev_id, cdev_id, &cap); + if (ret < 0) { + EH_LOG_ERR("Failed to get event device's crypto capabilities %d", ret); + return ret; + } + + if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)) { + EH_LOG_ERR("Event crypto adapter does not support forward mode!"); + return -EINVAL; + } + + /* Create event crypto adapter */ + + /* Get default configuration of event dev */ + ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf); + if (ret < 0) { + EH_LOG_ERR("Failed to get event dev info %d", ret); + return ret; + } + + /* Setup port conf */ + port_conf.new_event_threshold = + evdev_default_conf.max_num_events; + port_conf.dequeue_depth = + evdev_default_conf.max_event_port_dequeue_depth; + port_conf.enqueue_depth = + evdev_default_conf.max_event_port_enqueue_depth; + + /* Create adapter */ + ret = rte_event_crypto_adapter_create(cdev_id, eventdev_id, + &port_conf, RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD); + if (ret < 0) { + EH_LOG_ERR("Failed to create event crypto adapter %d", ret); + return ret; + } + + /* Add crypto queue pairs to event crypto adapter */ + ret = rte_event_crypto_adapter_queue_pair_add(cdev_id, eventdev_id, + -1, /* adds all the pre configured queue pairs to the instance */ + NULL); + if (ret < 0) { + EH_LOG_ERR("Failed to add queue pairs to event crypto adapter %d", ret); + return ret; + } + } + + return 0; +} + +static int +eh_start_crypto_adapter(struct eventmode_conf *em_conf) +{ + uint8_t cdev_id, n; + int ret; + + if (!em_conf->enable_event_crypto_adapter) + return 0; + + n = rte_cryptodev_count(); + for (cdev_id = 0; cdev_id != n; cdev_id++) { + ret = rte_event_crypto_adapter_start(cdev_id); + if (ret < 0) { + EH_LOG_ERR("Failed to start event crypto device %d (%d)", + cdev_id, ret); + return ret; + } + } + + return 0; +} + +static int +eh_stop_crypto_adapter(struct eventmode_conf *em_conf) +{ + uint8_t cdev_id, n; + int ret; + + if (!em_conf->enable_event_crypto_adapter) + return 0; + + n = rte_cryptodev_count(); + for (cdev_id = 0; cdev_id != n; cdev_id++) { + ret = rte_event_crypto_adapter_stop(cdev_id); + if (ret < 0) { + EH_LOG_ERR("Failed to stop event crypto device %d (%d)", + cdev_id, ret); + return ret; + } + } + + return 0; +} + static int eh_event_vector_limits_validate(struct eventmode_conf *em_conf, uint8_t ev_dev_id, uint8_t ethdev_id) @@ -1695,6 +1817,13 @@ eh_devs_init(struct eh_conf *conf) return ret; } + /* Setup event crypto adapter */ + ret = eh_initialize_crypto_adapter(em_conf); + if (ret < 0) { + EH_LOG_ERR("Failed to start event dev %d", ret); + return ret; + } + /* Setup Rx adapter */ ret = eh_initialize_rx_adapter(em_conf); if (ret < 0) { @@ -1716,6 +1845,14 @@ eh_devs_init(struct eh_conf *conf) return ret; } + /* Start event crypto adapter */ + ret = eh_start_crypto_adapter(em_conf); + if (ret < 0) { + EH_LOG_ERR("Failed to start event crypto dev %d", ret); + return ret; + } + + /* Start eth devices after setting up adapter */ RTE_ETH_FOREACH_DEV(port_id) { @@ -1786,6 +1923,13 @@ eh_devs_uninit(struct eh_conf *conf) } } + /* Stop event crypto adapter */ + ret = eh_stop_crypto_adapter(em_conf); + if (ret < 0) { + EH_LOG_ERR("Failed to start event crypto dev %d", ret); + return ret; + } + /* Stop and release event devices */ for (i = 0; i < em_conf->nb_eventdev; i++) { diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h index f3cbe57cb3..4b26dc8fc2 100644 --- a/examples/ipsec-secgw/event_helper.h +++ b/examples/ipsec-secgw/event_helper.h @@ -185,6 +185,8 @@ struct eventmode_conf { /**< Max vector timeout in nanoseconds */ uint64_t vector_pool_sz; /**< Vector pool size */ + bool enable_event_crypto_adapter; + /**< Enables event crypto adapter related configuration */ }; /** diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 815b9254ae..4ca5936bdf 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -2094,7 +2095,7 @@ max_session_size(void) } static void -session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz) +session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz, size_t user_data_sz) { char mp_name[RTE_MEMPOOL_NAMESIZE]; struct rte_mempool *sess_mp; @@ -2107,8 +2108,8 @@ session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz) nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ * CDEV_MP_CACHE_MULTIPLIER); sess_mp = rte_cryptodev_sym_session_pool_create( - mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0, - socket_id); + mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, + user_data_sz, socket_id); ctx->session_pool = sess_mp; if (ctx->session_pool == NULL) @@ -2441,7 +2442,8 @@ signal_handler(int signum) } static void -ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa) +ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa, + struct eventmode_conf *em_conf) { struct rte_ipsec_session *ips; int32_t i; @@ -2451,9 +2453,11 @@ ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa) for (i = 0; i < nb_sa; i++) { ips = ipsec_get_primary_session(&sa[i]); - if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) - rte_exit(EXIT_FAILURE, "Event mode supports only " - "inline protocol sessions\n"); + if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) + em_conf->enable_event_crypto_adapter = true; + else if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) + rte_exit(EXIT_FAILURE, "Event mode supports inline " + "and lookaside protocol sessions\n"); } } @@ -2486,13 +2490,12 @@ check_event_mode_params(struct eh_conf *eh_conf) em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED; /* - * Event mode currently supports only inline protocol sessions. - * If there are other types of sessions configured then exit with - * error. + * Event mode currently supports inline and lookaside protocol + * sessions. If there are other types of sessions configured then exit + * with error. */ - ev_mode_sess_verify(sa_in, nb_sa_in); - ev_mode_sess_verify(sa_out, nb_sa_out); - + ev_mode_sess_verify(sa_in, nb_sa_in, em_conf); + ev_mode_sess_verify(sa_out, nb_sa_out, em_conf); /* Option --config does not apply to event mode */ if (nb_lcore_params > 0) { @@ -2925,7 +2928,7 @@ main(int32_t argc, char **argv) uint64_t req_rx_offloads[RTE_MAX_ETHPORTS]; uint64_t req_tx_offloads[RTE_MAX_ETHPORTS]; struct eh_conf *eh_conf = NULL; - size_t sess_sz; + size_t sess_sz, user_data_sz; nb_bufs_in_pool = 0; @@ -2991,6 +2994,16 @@ main(int32_t argc, char **argv) else nb_crypto_qp = 0; + /* + * In event lookaside mode request memory for crypto metadata. Should + * be removed once API will no longer require usage of user data in + * DPDK 22.11 + */ + if (((struct eventmode_conf *)(eh_conf->mode_params))->enable_event_crypto_adapter) + user_data_sz = sizeof(union rte_event_crypto_metadata); + else + user_data_sz = 0; + nb_crypto_qp = cryptodevs_init(nb_crypto_qp); if (nb_bufs_in_pool == 0) { @@ -3032,7 +3045,8 @@ main(int32_t argc, char **argv) if (socket_ctx[socket_id].session_pool) continue; - session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz); + session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz, + user_data_sz); session_priv_pool_init(&socket_ctx[socket_id], socket_id, sess_sz); } From patchwork Thu Aug 4 10:36:22 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Volodymyr Fialko X-Patchwork-Id: 114608 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E12DDA00C4; Thu, 4 Aug 2022 12:36:48 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 27CD242BE1; Thu, 4 Aug 2022 12:36:47 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id CD33942BDE for ; Thu, 4 Aug 2022 12:36:44 +0200 (CEST) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 274782VL012693; Thu, 4 Aug 2022 03:36:44 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=yr+hulEUR2NXx4UrnLd5HhxB/WlSTZ9g5wR9m1VzU58=; b=aPPlrAIVQHh9Haa/f2P2xgjL6mOpIcgCy6Lz+cFKOscfDoVyslvGa+Le+5qOdIT2dzWi 3cB8o8s2Cq73VQQGfBWQJx/TpC35Ztiuffeap/W1DEpQxOU6RTG97YJUCPV75hEizOVQ dhSqs/+uA8hhxSet5X06J5aNEBih9BWERbqKjegLiJt6Y4R0RWRkapWoPeRTESI1S9Z+ e+LhPuf6/iMvvdw84CY3pSA99aXSDQ2iTWhitY5St034y3YH3QqK/kkWpq8UIHHSdCj7 F5MBfPydpK5fQW/ZLlbLqGvcq7j41gig7BkHeskrL60rgzDyKRkqY2A6PRFfU8UVJBgJ Xg== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3hqp04n5yv-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Thu, 04 Aug 2022 03:36:43 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Thu, 4 Aug 2022 03:36:42 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Thu, 4 Aug 2022 03:36:42 -0700 Received: from localhost.localdomain (unknown [10.28.34.39]) by maili.marvell.com (Postfix) with ESMTP id 81E343F705C; Thu, 4 Aug 2022 03:36:40 -0700 (PDT) From: Volodymyr Fialko To: , Radu Nicolau , Akhil Goyal CC: , , Volodymyr Fialko Subject: [PATCH 2/6] examples/ipsec-secgw: add queue for event crypto adapter Date: Thu, 4 Aug 2022 12:36:22 +0200 Message-ID: <20220804103626.102688-3-vfialko@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220804103626.102688-1-vfialko@marvell.com> References: <20220804103626.102688-1-vfialko@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: O6WyNFid2ENbpeS9obaty68kbsLKFPtD X-Proofpoint-ORIG-GUID: O6WyNFid2ENbpeS9obaty68kbsLKFPtD X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.883,Hydra:6.0.517,FMLib:17.11.122.1 definitions=2022-08-04_03,2022-08-04_01,2022-06-22_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add separate event queue for event crypto adapter processing, to resolve queue contention between new and already processed events. Signed-off-by: Volodymyr Fialko --- examples/ipsec-secgw/event_helper.c | 95 +++++++++++++++++++++-------- examples/ipsec-secgw/event_helper.h | 2 + 2 files changed, 71 insertions(+), 26 deletions(-) diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c index 6b00a21b6a..9c20a05da8 100644 --- a/examples/ipsec-secgw/event_helper.c +++ b/examples/ipsec-secgw/event_helper.c @@ -17,6 +17,8 @@ #define DEFAULT_VECTOR_SIZE 16 #define DEFAULT_VECTOR_TMO 102400 +#define INVALID_EV_QUEUE_ID -1 + static volatile bool eth_core_running; static int @@ -151,11 +153,10 @@ eh_dev_has_burst_mode(uint8_t dev_id) } static int -eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) +eh_set_nb_eventdev(struct eventmode_conf *em_conf) { - int lcore_count, nb_eventdev, nb_eth_dev, ret; struct eventdev_params *eventdev_config; - struct rte_event_dev_info dev_info; + int nb_eventdev; /* Get the number of event devices */ nb_eventdev = rte_event_dev_count(); @@ -170,6 +171,23 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) return -EINVAL; } + /* Set event dev id*/ + eventdev_config = &(em_conf->eventdev_config[0]); + eventdev_config->eventdev_id = 0; + + /* Update the number of event devices */ + em_conf->nb_eventdev = 1; + + return 0; +} + +static int +eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) +{ + int lcore_count, nb_eth_dev, ret; + struct eventdev_params *eventdev_config; + struct rte_event_dev_info dev_info; + /* Get the number of eth devs */ nb_eth_dev = rte_eth_dev_count_avail(); if (nb_eth_dev == 0) { @@ -197,15 +215,30 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) eventdev_config = &(em_conf->eventdev_config[0]); /* Save number of queues & ports available */ - eventdev_config->eventdev_id = 0; - eventdev_config->nb_eventqueue = dev_info.max_event_queues; + eventdev_config->nb_eventqueue = nb_eth_dev; eventdev_config->nb_eventport = dev_info.max_event_ports; eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES; - /* Check if there are more queues than required */ - if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) { - /* One queue is reserved for Tx */ - eventdev_config->nb_eventqueue = nb_eth_dev + 1; + /* One queue is reserved for Tx */ + eventdev_config->tx_queue_id = INVALID_EV_QUEUE_ID; + if (eventdev_config->all_internal_ports) { + if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) { + EH_LOG_ERR("Not enough event queues available"); + return -EINVAL; + } + eventdev_config->tx_queue_id = + eventdev_config->nb_eventqueue++; + } + + /* One queue is reserved for event crypto adapter */ + eventdev_config->ev_cpt_queue_id = INVALID_EV_QUEUE_ID; + if (em_conf->enable_event_crypto_adapter) { + if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) { + EH_LOG_ERR("Not enough event queues available"); + return -EINVAL; + } + eventdev_config->ev_cpt_queue_id = + eventdev_config->nb_eventqueue++; } /* Check if there are more ports than required */ @@ -214,9 +247,6 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) eventdev_config->nb_eventport = lcore_count; } - /* Update the number of event devices */ - em_conf->nb_eventdev++; - return 0; } @@ -245,15 +275,10 @@ eh_do_capability_check(struct eventmode_conf *em_conf) /* * If Rx & Tx internal ports are supported by all event devices then - * eth cores won't be required. Override the eth core mask requested - * and decrement number of event queues by one as it won't be needed - * for Tx. + * eth cores won't be required. Override the eth core mask requested. */ - if (all_internal_ports) { + if (all_internal_ports) rte_bitmap_reset(em_conf->eth_core_mask); - for (i = 0; i < em_conf->nb_eventdev; i++) - em_conf->eventdev_config[i].nb_eventqueue--; - } } static int @@ -370,6 +395,10 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf) eventdev_config->nb_eventqueue : eventdev_config->nb_eventqueue - 1; + /* Reserve one queue for event crypto adapter */ + if (em_conf->enable_event_crypto_adapter) + nb_eventqueue--; + /* * Map all queues of eth device (port) to an event queue. If there * are more event queues than eth ports then create 1:1 mapping. @@ -541,14 +570,18 @@ eh_validate_conf(struct eventmode_conf *em_conf) * and initialize the config with all ports & queues available */ if (em_conf->nb_eventdev == 0) { + ret = eh_set_nb_eventdev(em_conf); + if (ret != 0) + return ret; + eh_do_capability_check(em_conf); ret = eh_set_default_conf_eventdev(em_conf); if (ret != 0) return ret; + } else { + /* Perform capability check for the selected event devices */ + eh_do_capability_check(em_conf); } - /* Perform capability check for the selected event devices */ - eh_do_capability_check(em_conf); - /* * Check if links are specified. Else generate a default config for * the event ports used. @@ -594,8 +627,8 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf) uint8_t *queue = NULL; uint8_t eventdev_id; int nb_eventqueue; - uint8_t i, j; - int ret; + int ret, j; + uint8_t i; for (i = 0; i < nb_eventdev; i++) { @@ -657,14 +690,24 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf) * stage if event device does not have internal * ports. This will be an atomic queue. */ - if (!eventdev_config->all_internal_ports && - j == nb_eventqueue-1) { + if (j == eventdev_config->tx_queue_id) { eventq_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC; } else { eventq_conf.schedule_type = em_conf->ext_params.sched_type; } + /* + * Give event crypto device's queue higher priority then Rx queues. This + * will allow crypto events to be processed with highest priority. + */ + if (j == eventdev_config->ev_cpt_queue_id) { + eventq_conf.priority = + RTE_EVENT_DEV_PRIORITY_HIGHEST; + } else { + eventq_conf.priority = + RTE_EVENT_DEV_PRIORITY_NORMAL; + } /* Set max atomic flows to 1024 */ eventq_conf.nb_atomic_flows = 1024; diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h index 4b26dc8fc2..af5cfcf794 100644 --- a/examples/ipsec-secgw/event_helper.h +++ b/examples/ipsec-secgw/event_helper.h @@ -88,6 +88,8 @@ struct eventdev_params { uint8_t nb_eventport; uint8_t ev_queue_mode; uint8_t all_internal_ports; + int tx_queue_id; + int ev_cpt_queue_id; }; /** From patchwork Thu Aug 4 10:36:23 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Volodymyr Fialko X-Patchwork-Id: 114609 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 13E9DA00C4; Thu, 4 Aug 2022 12:36:54 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4FAC442BE6; Thu, 4 Aug 2022 12:36:51 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 9CF3B4281B for ; Thu, 4 Aug 2022 12:36:49 +0200 (CEST) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 2746toYa012702; Thu, 4 Aug 2022 03:36:48 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=KKUOn867nMaB8+isYjjHjfHRROZ1gWac7mZ5ykuF0Yg=; b=K3QTGJlDv1xndRRN/otigVcHP1eI1f9BGzlBuZODR9k7SoKMC+w3f6JenR5dkB20HGU6 BZA+NzuEiO8FySS+iuyWB90veNFR4/XhyqdO9Slj1kcMHLY2ftEv41+QHKg6mRQYwuvu f7LtdyeB/go7ji7eJ/m8zrJ0cs7NOWwUoeYwyMn2c1FjtBO6gDt4BnBjZwKxW4QD+qtD Ty8MCSGNyMTkCz9DOLEIveQqeSP2eRi+4TjjgN7aP9uDQhiblCSfx1eW9x+6PUlVuKsX K6t2J7w46j/KoqeamrwXN5QteFQEfkuIEfY/d2R2an32CRluPMvpAZgTnnJlY4SxonQB Ag== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3hqp04n608-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Thu, 04 Aug 2022 03:36:48 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Thu, 4 Aug 2022 03:36:47 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Thu, 4 Aug 2022 03:36:47 -0700 Received: from localhost.localdomain (unknown [10.28.34.39]) by maili.marvell.com (Postfix) with ESMTP id 2FE403F705C; Thu, 4 Aug 2022 03:36:44 -0700 (PDT) From: Volodymyr Fialko To: , Radu Nicolau , Akhil Goyal CC: , , Volodymyr Fialko Subject: [PATCH 3/6] examples/ipsec-secgw: add lookaside event mode Date: Thu, 4 Aug 2022 12:36:23 +0200 Message-ID: <20220804103626.102688-4-vfialko@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220804103626.102688-1-vfialko@marvell.com> References: <20220804103626.102688-1-vfialko@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: esik8_lflTvP-2jRrSWkmcnuIRbTrAcH X-Proofpoint-ORIG-GUID: esik8_lflTvP-2jRrSWkmcnuIRbTrAcH X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.883,Hydra:6.0.517,FMLib:17.11.122.1 definitions=2022-08-04_03,2022-08-04_01,2022-06-22_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add base support for lookaside event mode. Events that are coming from ethdev will be enqueued to the event crypto adapter, processed and enqueued back to ethdev for the transmission. Signed-off-by: Volodymyr Fialko --- doc/guides/sample_app_ug/ipsec_secgw.rst | 4 +- examples/ipsec-secgw/ipsec-secgw.c | 3 +- examples/ipsec-secgw/ipsec.c | 35 +++- examples/ipsec-secgw/ipsec.h | 8 +- examples/ipsec-secgw/ipsec_worker.c | 224 +++++++++++++++++++++-- examples/ipsec-secgw/sa.c | 23 ++- 6 files changed, 262 insertions(+), 35 deletions(-) diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst index 07686d2285..c7b87889f1 100644 --- a/doc/guides/sample_app_ug/ipsec_secgw.rst +++ b/doc/guides/sample_app_ug/ipsec_secgw.rst @@ -83,8 +83,8 @@ The application supports two modes of operation: poll mode and event mode. every type of event device without affecting existing paths/use cases. The worker to be used will be determined by the operating conditions and the underlying device capabilities. **Currently the application provides non-burst, internal port worker - threads and supports inline protocol only.** It also provides infrastructure for - non-internal port however does not define any worker threads. + threads.** It also provides infrastructure for non-internal port however does not + define any worker threads. Event mode also supports event vectorization. The event devices, ethernet device pairs which support the capability ``RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR`` can diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 4ca5936bdf..0bd1f15ae5 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -3121,7 +3121,8 @@ main(int32_t argc, char **argv) if ((socket_ctx[socket_id].session_pool != NULL) && (socket_ctx[socket_id].sa_in == NULL) && (socket_ctx[socket_id].sa_out == NULL)) { - sa_init(&socket_ctx[socket_id], socket_id, lcore_conf); + sa_init(&socket_ctx[socket_id], socket_id, lcore_conf, + eh_conf->mode_params); sp4_init(&socket_ctx[socket_id], socket_id); sp6_init(&socket_ctx[socket_id], socket_id); rt_init(&socket_ctx[socket_id], socket_id); diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c index 7b7bfff696..030cfe7a82 100644 --- a/examples/ipsec-secgw/ipsec.c +++ b/examples/ipsec-secgw/ipsec.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -56,14 +57,17 @@ set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec) int create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[], - struct socket_ctx *skt_ctx, struct ipsec_sa *sa, - struct rte_ipsec_session *ips) + struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf, + struct ipsec_sa *sa, struct rte_ipsec_session *ips) { uint16_t cdev_id = RTE_CRYPTO_MAX_DEVS; + enum rte_crypto_op_sess_type sess_type; struct rte_cryptodev_info cdev_info; + enum rte_crypto_op_type op_type; unsigned long cdev_id_qp = 0; - struct cdev_key key = { 0 }; struct ipsec_ctx *ipsec_ctx; + struct cdev_key key = { 0 }; + void *sess = NULL; uint32_t lcore_id; int32_t ret = 0; @@ -159,6 +163,10 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[], return -1; } ips->security.ctx = ctx; + + sess = ips->security.ses; + op_type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + sess_type = RTE_CRYPTO_OP_SECURITY_SESSION; } else { RTE_LOG(ERR, IPSEC, "Inline not supported\n"); return -1; @@ -183,6 +191,27 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[], rte_cryptodev_info_get(cdev_id, &cdev_info); } + /* Setup meta data required by event crypto adapter */ + if (em_conf->enable_event_crypto_adapter && sess != NULL) { + union rte_event_crypto_metadata m_data = {0}; + const struct eventdev_params *eventdev_conf; + + eventdev_conf = &(em_conf->eventdev_config[0]); + + /* Fill in response information */ + m_data.response_info.sched_type = em_conf->ext_params.sched_type; + m_data.response_info.op = RTE_EVENT_OP_NEW; + m_data.response_info.queue_id = eventdev_conf->ev_cpt_queue_id; + + /* Fill in request information */ + m_data.request_info.cdev_id = cdev_id; + m_data.request_info.queue_pair_id = 0; + + /* Attach meta info to session */ + rte_cryptodev_session_event_mdata_set(cdev_id, sess, op_type, + sess_type, &m_data, sizeof(m_data)); + } + return 0; } diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index 2005ae8fec..5ef63e8fc4 100644 --- a/examples/ipsec-secgw/ipsec.h +++ b/examples/ipsec-secgw/ipsec.h @@ -14,6 +14,7 @@ #include #include +#include "event_helper.h" #include "ipsec-secgw.h" #define RTE_LOGTYPE_IPSEC_ESP RTE_LOGTYPE_USER2 @@ -424,7 +425,8 @@ sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound); void sa_init(struct socket_ctx *ctx, int32_t socket_id, - struct lcore_conf *lcore_conf); + struct lcore_conf *lcore_conf, + const struct eventmode_conf *em_conf); void rt_init(struct socket_ctx *ctx, int32_t socket_id); @@ -441,8 +443,8 @@ enqueue_cop_burst(struct cdev_qp *cqp); int create_lookaside_session(struct ipsec_ctx *ipsec_ctx[], - struct socket_ctx *skt_ctx, struct ipsec_sa *sa, - struct rte_ipsec_session *ips); + struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf, + struct ipsec_sa *sa, struct rte_ipsec_session *ips); int create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c index 803157d8ee..2661f0275f 100644 --- a/examples/ipsec-secgw/ipsec_worker.c +++ b/examples/ipsec-secgw/ipsec_worker.c @@ -3,6 +3,7 @@ * Copyright (C) 2020 Marvell International Ltd. */ #include +#include #include #include #include @@ -11,6 +12,7 @@ #include "ipsec.h" #include "ipsec-secgw.h" #include "ipsec_worker.h" +#include "sad.h" #if defined(__ARM_NEON) #include "ipsec_lpm_neon.h" @@ -228,6 +230,43 @@ check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx, ip->num = j; } +static inline void +pkt_l3_len_set(struct rte_mbuf *pkt) +{ + struct rte_ipv4_hdr *ipv4; + struct rte_ipv6_hdr *ipv6; + size_t l3len, ext_len; + uint32_t l3_type; + int next_proto; + uint8_t *p; + + l3_type = pkt->packet_type & RTE_PTYPE_L3_MASK; + if (l3_type == RTE_PTYPE_L3_IPV4) { + ipv4 = rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *); + pkt->l3_len = ipv4->ihl * 4; + } else if (l3_type & RTE_PTYPE_L3_IPV6) { + ipv6 = rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *); + l3len = sizeof(struct rte_ipv6_hdr); + if (l3_type == RTE_PTYPE_L3_IPV6_EXT || + l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) { + p = rte_pktmbuf_mtod(pkt, uint8_t *); + next_proto = ipv6->proto; + while (next_proto != IPPROTO_ESP && + l3len < pkt->data_len && + (next_proto = rte_ipv6_get_next_ext(p + l3len, + next_proto, &ext_len)) >= 0) + l3len += ext_len; + + /* Drop pkt when IPv6 header exceeds first seg size */ + if (unlikely(l3len > pkt->data_len)) { + free_pkts(&pkt, 1); + return; + } + } + pkt->l3_len = l3len; + } +} + static inline uint16_t route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) { @@ -287,9 +326,67 @@ get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type) return RTE_MAX_ETHPORTS; } +static inline void +crypto_op_reset(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], + struct rte_crypto_op *cop[], uint16_t num) +{ + struct rte_crypto_sym_op *sop; + uint32_t i; + + const struct rte_crypto_op unproc_cop = { + .type = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + .status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED, + .sess_type = RTE_CRYPTO_OP_SECURITY_SESSION, + }; + + for (i = 0; i != num; i++) { + cop[i]->raw = unproc_cop.raw; + sop = cop[i]->sym; + sop->m_src = mb[i]; + sop->m_dst = NULL; + __rte_security_attach_session(sop, ss->security.ses); + } +} + +static inline int +event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt, + struct ipsec_sa *sa, const struct eh_event_link_info *ev_link) +{ + struct ipsec_mbuf_metadata *priv; + struct rte_ipsec_session *sess; + struct rte_crypto_op *cop; + struct rte_event cev; + int ret; + + /* Get IPsec session */ + sess = ipsec_get_primary_session(sa); + + /* Get pkt private data */ + priv = get_priv(pkt); + cop = &priv->cop; + + /* Reset crypto operation data */ + crypto_op_reset(sess, &pkt, &cop, 1); + + /* Update event_ptr with rte_crypto_op */ + cev.event = 0; + cev.event_ptr = cop; + + /* Enqueue event to crypto adapter */ + ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id, + ev_link->event_port_id, &cev, 1); + if (unlikely(ret <= 0)) { + /* pkt will be freed by the caller */ + RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i (errno: %i)\n", ret, rte_errno); + return rte_errno; + } + + return 0; +} + static inline int process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, - struct rte_event *ev) + const struct eh_event_link_info *ev_link, struct rte_event *ev) { struct ipsec_sa *sa = NULL; struct rte_mbuf *pkt; @@ -340,7 +437,22 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, goto drop_pkt_and_exit; } break; + case PKT_TYPE_IPSEC_IPV4: + case PKT_TYPE_IPSEC_IPV6: + rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); + pkt_l3_len_set(pkt); + + sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1); + sa = ipsec_mask_saptr(sa); + if (unlikely(sa == NULL)) { + RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n"); + goto drop_pkt_and_exit; + } + if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link))) + goto drop_pkt_and_exit; + + return PKT_POSTED; default: RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n", type); @@ -389,7 +501,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, static inline int process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt, - struct rte_event *ev) + const struct eh_event_link_info *ev_link, struct rte_event *ev) { struct rte_ipsec_session *sess; struct sa_ctx *sa_ctx; @@ -456,11 +568,9 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt, /* Get IPsec session */ sess = ipsec_get_primary_session(sa); - /* Allow only inline protocol for now */ - if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) { - RTE_LOG(ERR, IPSEC, "SA type not supported\n"); - goto drop_pkt_and_exit; - } + /* Determine protocol type */ + if (sess->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) + goto lookaside; rte_security_set_pkt_metadata(sess->security.ctx, sess->security.ses, pkt, NULL); @@ -482,6 +592,13 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt, ipsec_event_pre_forward(pkt, port_id); return PKT_FORWARDED; +lookaside: + /* prepare pkt - advance start to L3 */ + rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); + + if (likely(event_crypto_enqueue(ctx, pkt, sa, ev_link) == 0)) + return PKT_POSTED; + drop_pkt_and_exit: RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n"); rte_pktmbuf_free(pkt); @@ -737,6 +854,67 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links, rte_mempool_put(rte_mempool_from_obj(vec), vec); } +static inline int +ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf, + struct rte_event *ev) +{ + struct rte_ether_hdr *ethhdr; + struct rte_crypto_op *cop; + struct rte_mbuf *pkt; + uint16_t port_id; + struct ip *ip; + + /* Get pkt data */ + cop = ev->event_ptr; + pkt = cop->sym->m_src; + + /* If operation was not successful, drop the packet */ + if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) { + RTE_LOG_DP(INFO, IPSEC, "Crypto operation failed\n"); + free_pkts(&pkt, 1); + return PKT_DROPPED; + } + + ip = rte_pktmbuf_mtod(pkt, struct ip *); + + /* Prepend Ether layer */ + ethhdr = (struct rte_ether_hdr *)rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN); + + /* Route pkt and update required fields */ + if (ip->ip_v == IPVERSION) { + pkt->ol_flags |= lconf->outbound.ipv4_offloads; + pkt->l3_len = sizeof(struct ip); + pkt->l2_len = RTE_ETHER_HDR_LEN; + + ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + + port_id = route4_pkt(pkt, lconf->rt.rt4_ctx); + } else { + pkt->ol_flags |= lconf->outbound.ipv6_offloads; + pkt->l3_len = sizeof(struct ip6_hdr); + pkt->l2_len = RTE_ETHER_HDR_LEN; + + ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + + port_id = route6_pkt(pkt, lconf->rt.rt6_ctx); + } + + if (unlikely(port_id == RTE_MAX_ETHPORTS)) { + RTE_LOG_DP(DEBUG, IPSEC, "Cannot route processed packet\n"); + free_pkts(&pkt, 1); + return PKT_DROPPED; + } + + /* Update Ether with port's MAC addresses */ + memcpy(ðhdr->src_addr, ðaddr_tbl[port_id].src, sizeof(struct rte_ether_addr)); + memcpy(ðhdr->dst_addr, ðaddr_tbl[port_id].dst, sizeof(struct rte_ether_addr)); + + /* Update event */ + ev->mbuf = pkt; + + return PKT_FORWARDED; +} + /* * Event mode exposes various operating modes depending on the * capabilities of the event device and the operating mode @@ -924,6 +1102,14 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links, "Launching event mode worker (non-burst - Tx internal port - " "app mode) on lcore %d\n", lcore_id); + ret = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz); + if (ret != 0) { + RTE_LOG(ERR, IPSEC, + "SAD cache init on lcore %u, failed with code: %d\n", + lcore_id, ret); + return; + } + /* Check if it's single link */ if (nb_links != 1) { RTE_LOG(INFO, IPSEC, @@ -950,6 +1136,20 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links, ipsec_ev_vector_process(&lconf, links, &ev); continue; case RTE_EVENT_TYPE_ETHDEV: + if (is_unprotected_port(ev.mbuf->port)) + ret = process_ipsec_ev_inbound(&lconf.inbound, + &lconf.rt, links, &ev); + else + ret = process_ipsec_ev_outbound(&lconf.outbound, + &lconf.rt, links, &ev); + if (ret != 1) + /* The pkt has been dropped or posted */ + continue; + break; + case RTE_EVENT_TYPE_CRYPTODEV: + ret = ipsec_ev_cryptodev_process(&lconf, &ev); + if (unlikely(ret != PKT_FORWARDED)) + continue; break; default: RTE_LOG(ERR, IPSEC, "Invalid event type %u", @@ -957,16 +1157,6 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links, continue; } - if (is_unprotected_port(ev.mbuf->port)) - ret = process_ipsec_ev_inbound(&lconf.inbound, - &lconf.rt, &ev); - else - ret = process_ipsec_ev_outbound(&lconf.outbound, - &lconf.rt, &ev); - if (ret != 1) - /* The pkt has been dropped */ - continue; - /* * Since tx internal port is available, events can be * directly enqueued to the adapter and it would be diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index 5dca578790..7a0c528f75 100644 --- a/examples/ipsec-secgw/sa.c +++ b/examples/ipsec-secgw/sa.c @@ -1235,7 +1235,8 @@ static int sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], uint32_t nb_entries, uint32_t inbound, struct socket_ctx *skt_ctx, - struct ipsec_ctx *ips_ctx[]) + struct ipsec_ctx *ips_ctx[], + const struct eventmode_conf *em_conf) { struct ipsec_sa *sa; uint32_t i, idx; @@ -1408,7 +1409,8 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], return -EINVAL; } } else { - rc = create_lookaside_session(ips_ctx, skt_ctx, sa, ips); + rc = create_lookaside_session(ips_ctx, skt_ctx, + em_conf, sa, ips); if (rc != 0) { RTE_LOG(ERR, IPSEC_ESP, "create_lookaside_session() failed\n"); @@ -1431,17 +1433,19 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], static inline int sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], uint32_t nb_entries, struct socket_ctx *skt_ctx, - struct ipsec_ctx *ips_ctx[]) + struct ipsec_ctx *ips_ctx[], + const struct eventmode_conf *em_conf) { - return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx); + return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx, em_conf); } static inline int sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], uint32_t nb_entries, struct socket_ctx *skt_ctx, - struct ipsec_ctx *ips_ctx[]) + struct ipsec_ctx *ips_ctx[], + const struct eventmode_conf *em_conf) { - return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx); + return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx, em_conf); } /* @@ -1673,7 +1677,8 @@ sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound) void sa_init(struct socket_ctx *ctx, int32_t socket_id, - struct lcore_conf *lcore_conf) + struct lcore_conf *lcore_conf, + const struct eventmode_conf *em_conf) { int32_t rc; const char *name; @@ -1705,7 +1710,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id, rte_exit(EXIT_FAILURE, "failed to init SAD\n"); RTE_LCORE_FOREACH(lcore_id) ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].inbound; - sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx); + sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx, em_conf); if (app_sa_prm.enable != 0) { rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in, @@ -1727,7 +1732,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id, RTE_LCORE_FOREACH(lcore_id) ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].outbound; - sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx); + sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx, em_conf); if (app_sa_prm.enable != 0) { rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out, From patchwork Thu Aug 4 10:36:24 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Volodymyr Fialko X-Patchwork-Id: 114610 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 34D1AA00C4; Thu, 4 Aug 2022 12:37:02 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id AC94542BD2; Thu, 4 Aug 2022 12:36:53 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 9812D42BF7 for ; Thu, 4 Aug 2022 12:36:52 +0200 (CEST) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 27476CmW012676; Thu, 4 Aug 2022 03:36:51 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=3ZTCimRBMbmppT9t/wk34qI8g5sozzTs3moOiGoQM6w=; b=i3uVhmfYkskVlbsmrYBOuHoHIj08SBTLQfBHtUUuI9UJEY4JCaftf3DhPqCP8gbswDig SucB3dTKyYeeCZGjL1asbSmONF+a3rw+LxFkL8txYXBe9VcSVbjX5eWuEaFaD/HoQFKQ YBKUW91z7R19u+p91Ad7E1lGPLw4UHi8bkTixkFpgtedgzf7ZYEKLXO6CxPE15awIlcg PtRRtzpvo1S4fAXag+0UAvt6L4vGQvPZma6rAYFPED9/CSrkaA5JvEw5GgZkW2b+HjrF U7QrFEQyERvtuH1qFvVoGNI/qRNyHDCVujx9tbx6RybyQDpywcDFdTduNT+yP+klP9UY wQ== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3hqp04n60e-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Thu, 04 Aug 2022 03:36:51 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Thu, 4 Aug 2022 03:36:50 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Thu, 4 Aug 2022 03:36:50 -0700 Received: from localhost.localdomain (unknown [10.28.34.39]) by maili.marvell.com (Postfix) with ESMTP id 6937F3F7057; Thu, 4 Aug 2022 03:36:48 -0700 (PDT) From: Volodymyr Fialko To: , Radu Nicolau , Akhil Goyal CC: , , Volodymyr Fialko Subject: [PATCH 4/6] examples/ipsec-secgw: add stats for event mode Date: Thu, 4 Aug 2022 12:36:24 +0200 Message-ID: <20220804103626.102688-5-vfialko@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220804103626.102688-1-vfialko@marvell.com> References: <20220804103626.102688-1-vfialko@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: yPtLu6Wxwto5ZFkkhcLwJF0FSAy1L6fd X-Proofpoint-ORIG-GUID: yPtLu6Wxwto5ZFkkhcLwJF0FSAy1L6fd X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.883,Hydra:6.0.517,FMLib:17.11.122.1 definitions=2022-08-04_03,2022-08-04_01,2022-06-22_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add per core statistic(rx/tx) counters for event mode worker. Signed-off-by: Volodymyr Fialko --- examples/ipsec-secgw/ipsec_worker.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c index 2661f0275f..f94ab10a5b 100644 --- a/examples/ipsec-secgw/ipsec_worker.c +++ b/examples/ipsec-secgw/ipsec_worker.c @@ -494,7 +494,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, drop_pkt_and_exit: RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n"); - rte_pktmbuf_free(pkt); + free_pkts(&pkt, 1); ev->mbuf = NULL; return PKT_DROPPED; } @@ -601,7 +601,7 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt, drop_pkt_and_exit: RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n"); - rte_pktmbuf_free(pkt); + free_pkts(&pkt, 1); ev->mbuf = NULL; return PKT_DROPPED; } @@ -816,6 +816,7 @@ ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf, pkt = vec->mbufs[0]; ev_vector_attr_init(vec); + core_stats_update_rx(vec->nb_elem); if (is_unprotected_port(pkt->port)) ret = process_ipsec_ev_inbound_vector(&lconf->inbound, &lconf->rt, vec); @@ -824,6 +825,7 @@ ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf, &lconf->rt, vec); if (likely(ret > 0)) { + core_stats_update_tx(vec->nb_elem); vec->nb_elem = ret; rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, links[0].event_port_id, @@ -1136,6 +1138,7 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links, ipsec_ev_vector_process(&lconf, links, &ev); continue; case RTE_EVENT_TYPE_ETHDEV: + core_stats_update_rx(1); if (is_unprotected_port(ev.mbuf->port)) ret = process_ipsec_ev_inbound(&lconf.inbound, &lconf.rt, links, &ev); @@ -1157,6 +1160,7 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links, continue; } + core_stats_update_tx(1); /* * Since tx internal port is available, events can be * directly enqueued to the adapter and it would be From patchwork Thu Aug 4 10:36:25 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Volodymyr Fialko X-Patchwork-Id: 114611 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E7B9AA00C4; Thu, 4 Aug 2022 12:37:07 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id CFD4942BF1; Thu, 4 Aug 2022 12:36:57 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 2BD9542BF3 for ; Thu, 4 Aug 2022 12:36:56 +0200 (CEST) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 2746toYc012702; Thu, 4 Aug 2022 03:36:55 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=uSM6y9aC+SKHlSwlcEfBV7gHfP6gNKwa9ZcuBpHLViU=; b=By5PBkT/ZSYO+7tmY4W8ZkIefmYVNdkwTEwJytesUae7+kM0N0LmkMlxfn6lyjgeypfG 3E3aaqy7rxSVGLgVDHv6biVkhUzD4GUPM0zuhQZlNzmlqevIajQeNZw4kjrnIk7RSCGo cRcXAZmRUPtudAsTOb4J/6jSMYXsTa30fn2CBRQowKlcYptid8VG1zdiQNxuZwsN7R+r jcL0Ly65PWzj6HOeXKFdtGwmxbSWGXp6qp/Yw26PMdZYJHFPlZAKYVgYK9H06DeYgZDe nyMKOzA+YdQipfdp605fP+BpotDTi0cWWfwCoy1LdQOgpyakkHTqP30Tc11IN7WEt0Iv ag== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3hqp04n60t-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Thu, 04 Aug 2022 03:36:55 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Thu, 4 Aug 2022 03:36:53 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Thu, 4 Aug 2022 03:36:53 -0700 Received: from localhost.localdomain (unknown [10.28.34.39]) by maili.marvell.com (Postfix) with ESMTP id D5EA33F7057; Thu, 4 Aug 2022 03:36:51 -0700 (PDT) From: Volodymyr Fialko To: , Radu Nicolau , Akhil Goyal CC: , , Volodymyr Fialko Subject: [PATCH 5/6] examples/ipsec-secgw: add event vector support for lookaside Date: Thu, 4 Aug 2022 12:36:25 +0200 Message-ID: <20220804103626.102688-6-vfialko@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220804103626.102688-1-vfialko@marvell.com> References: <20220804103626.102688-1-vfialko@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: cQJMa4nMvk5t1wSTUP_rCatgFyDYk0dN X-Proofpoint-ORIG-GUID: cQJMa4nMvk5t1wSTUP_rCatgFyDYk0dN X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.883,Hydra:6.0.517,FMLib:17.11.122.1 definitions=2022-08-04_03,2022-08-04_01,2022-06-22_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add vector support for event crypto adapter in lookaside mode. Once --event-vector enabled, event crypto adapter will group processed crypto operation into rte_event_vector event with type RTE_EVENT_TYPE_CRYPTODEV_VECTOR. Signed-off-by: Volodymyr Fialko --- doc/guides/sample_app_ug/ipsec_secgw.rst | 3 + examples/ipsec-secgw/event_helper.c | 34 ++- examples/ipsec-secgw/ipsec-secgw.c | 2 +- examples/ipsec-secgw/ipsec-secgw.h | 1 + examples/ipsec-secgw/ipsec_worker.c | 281 ++++++++++++++++++----- 5 files changed, 265 insertions(+), 56 deletions(-) diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst index c7b87889f1..2a1aeae7c5 100644 --- a/doc/guides/sample_app_ug/ipsec_secgw.rst +++ b/doc/guides/sample_app_ug/ipsec_secgw.rst @@ -94,6 +94,9 @@ The application supports two modes of operation: poll mode and event mode. (default vector-size is 16) and vector-tmo (default vector-tmo is 102400ns). By default event vectorization is disabled and it can be enabled using event-vector option. + For the event devices, crypto device pairs which support the capability + ``RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR`` vector aggregation could also be enable + using event-vector option. Additionally the event mode introduces two submodes of processing packets: diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c index 9c20a05da8..635e6f24bf 100644 --- a/examples/ipsec-secgw/event_helper.c +++ b/examples/ipsec-secgw/event_helper.c @@ -790,12 +790,15 @@ eh_start_eventdev(struct eventmode_conf *em_conf) static int eh_initialize_crypto_adapter(struct eventmode_conf *em_conf) { + struct rte_event_crypto_adapter_queue_conf queue_conf; struct rte_event_dev_info evdev_default_conf = {0}; struct rte_event_port_conf port_conf = {0}; struct eventdev_params *eventdev_config; + char mp_name[RTE_MEMPOOL_NAMESIZE]; + const uint8_t nb_qp_per_cdev = 1; uint8_t eventdev_id, cdev_id, n; - uint32_t cap; - int ret; + uint32_t cap, nb_elem; + int ret, socket_id; if (!em_conf->enable_event_crypto_adapter) return 0; @@ -850,10 +853,35 @@ eh_initialize_crypto_adapter(struct eventmode_conf *em_conf) return ret; } + memset(&queue_conf, 0, sizeof(queue_conf)); + if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) && + (em_conf->ext_params.event_vector)) { + queue_conf.flags |= RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR; + queue_conf.vector_sz = em_conf->ext_params.vector_size; + /* + * Currently all sessions configured with same response + * info fields, so packets will be aggregated to the + * same vector. This allows us to configure number of + * vectors only to hold all queue pair descriptors. + */ + nb_elem = (qp_desc_nb / queue_conf.vector_sz) + 1; + nb_elem *= nb_qp_per_cdev; + socket_id = rte_cryptodev_socket_id(cdev_id); + snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, + "QP_VEC_%u_%u", socket_id, cdev_id); + queue_conf.vector_mp = rte_event_vector_pool_create( + mp_name, nb_elem, 0, + queue_conf.vector_sz, socket_id); + if (queue_conf.vector_mp == NULL) { + EH_LOG_ERR("failed to create event vector pool"); + return -ENOMEM; + } + } + /* Add crypto queue pairs to event crypto adapter */ ret = rte_event_crypto_adapter_queue_pair_add(cdev_id, eventdev_id, -1, /* adds all the pre configured queue pairs to the instance */ - NULL); + &queue_conf); if (ret < 0) { EH_LOG_ERR("Failed to add queue pairs to event crypto adapter %d", ret); return ret; diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 0bd1f15ae5..02b1fabaf5 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -85,7 +85,7 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; /* * Configurable number of descriptors per queue pair */ -static uint32_t qp_desc_nb = 2048; +uint32_t qp_desc_nb = 2048; #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \ (addr)->addr_bytes[0], (addr)->addr_bytes[1], \ diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h index f02736075b..c6d11f3aac 100644 --- a/examples/ipsec-secgw/ipsec-secgw.h +++ b/examples/ipsec-secgw/ipsec-secgw.h @@ -145,6 +145,7 @@ extern bool per_port_pool; extern uint32_t mtu_size; extern uint32_t frag_tbl_sz; +extern uint32_t qp_desc_nb; #define SS_F (1U << 0) /* Single SA mode */ #define INL_PR_F (1U << 1) /* Inline Protocol */ diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c index f94ab10a5b..466bb03bde 100644 --- a/examples/ipsec-secgw/ipsec_worker.c +++ b/examples/ipsec-secgw/ipsec_worker.c @@ -267,6 +267,21 @@ pkt_l3_len_set(struct rte_mbuf *pkt) } } +static inline void +ipsec_sad_lookup(struct ipsec_sad *sad, struct rte_mbuf *pkts[], void *sa[], uint16_t nb_pkts) +{ + uint16_t i; + + if (nb_pkts == 0) + return; + + for (i = 0; i < nb_pkts; i++) { + rte_pktmbuf_adj(pkts[i], RTE_ETHER_HDR_LEN); + pkt_l3_len_set(pkts[i]); + } + sad_lookup(sad, pkts, sa, nb_pkts); +} + static inline uint16_t route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) { @@ -348,18 +363,11 @@ crypto_op_reset(const struct rte_ipsec_session *ss, } } -static inline int -event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt, - struct ipsec_sa *sa, const struct eh_event_link_info *ev_link) +static inline void +crypto_prepare_event(struct rte_mbuf *pkt, struct rte_ipsec_session *sess, struct rte_event *ev) { struct ipsec_mbuf_metadata *priv; - struct rte_ipsec_session *sess; struct rte_crypto_op *cop; - struct rte_event cev; - int ret; - - /* Get IPsec session */ - sess = ipsec_get_primary_session(sa); /* Get pkt private data */ priv = get_priv(pkt); @@ -369,13 +377,39 @@ event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt, crypto_op_reset(sess, &pkt, &cop, 1); /* Update event_ptr with rte_crypto_op */ - cev.event = 0; - cev.event_ptr = cop; + ev->event = 0; + ev->event_ptr = cop; +} + +static inline void +free_pkts_from_events(struct rte_event events[], uint16_t count) +{ + struct rte_crypto_op *cop; + int i; + + for (i = 0; i < count; i++) { + cop = events[i].event_ptr; + free_pkts(&cop->sym->m_src, 1); + } +} + +static inline int +event_crypto_enqueue(struct rte_mbuf *pkt, + struct ipsec_sa *sa, const struct eh_event_link_info *ev_link) +{ + struct rte_ipsec_session *sess; + struct rte_event ev; + int ret; + + /* Get IPsec session */ + sess = ipsec_get_primary_session(sa); + + crypto_prepare_event(pkt, sess, &ev); /* Enqueue event to crypto adapter */ ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id, - ev_link->event_port_id, &cev, 1); - if (unlikely(ret <= 0)) { + ev_link->event_port_id, &ev, 1); + if (unlikely(ret != 1)) { /* pkt will be freed by the caller */ RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i (errno: %i)\n", ret, rte_errno); return rte_errno; @@ -449,7 +483,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, goto drop_pkt_and_exit; } - if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link))) + if (unlikely(event_crypto_enqueue(pkt, sa, ev_link))) goto drop_pkt_and_exit; return PKT_POSTED; @@ -596,7 +630,7 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt, /* prepare pkt - advance start to L3 */ rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); - if (likely(event_crypto_enqueue(ctx, pkt, sa, ev_link) == 0)) + if (likely(event_crypto_enqueue(pkt, sa, ev_link) == 0)) return PKT_POSTED; drop_pkt_and_exit: @@ -607,14 +641,12 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt, } static inline int -ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt, - struct ipsec_traffic *t, struct sa_ctx *sa_ctx) +ipsec_ev_route_ip_pkts(struct rte_event_vector *vec, struct route_table *rt, + struct ipsec_traffic *t) { - struct rte_ipsec_session *sess; - uint32_t sa_idx, i, j = 0; - uint16_t port_id = 0; struct rte_mbuf *pkt; - struct ipsec_sa *sa; + uint16_t port_id = 0; + uint32_t i, j = 0; /* Route IPv4 packets */ for (i = 0; i < t->ip4.num; i++) { @@ -646,34 +678,111 @@ ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt, free_pkts(&pkt, 1); } + return j; +} + +static inline int +ipsec_ev_inbound_route_pkts(struct rte_event_vector *vec, + struct route_table *rt, + struct ipsec_traffic *t, + const struct eh_event_link_info *ev_link) +{ + uint32_t ret, i, j, ev_len = 0; + struct rte_event events[MAX_PKTS]; + struct rte_ipsec_session *sess; + struct rte_mbuf *pkt; + struct ipsec_sa *sa; + + j = ipsec_ev_route_ip_pkts(vec, rt, t); + /* Route ESP packets */ + for (i = 0; i < t->ipsec.num; i++) { + pkt = t->ipsec.pkts[i]; + sa = ipsec_mask_saptr(t->ipsec.saptr[i]); + if (unlikely(sa == NULL)) { + free_pkts(&pkt, 1); + continue; + } + sess = ipsec_get_primary_session(sa); + crypto_prepare_event(pkt, sess, &events[ev_len]); + ev_len++; + } + + if (ev_len) { + ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id, + ev_link->event_port_id, events, ev_len); + if (ret < ev_len) { + RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n", + ev_len, rte_errno); + free_pkts_from_events(&events[ret], ev_len - ret); + return -rte_errno; + } + } + + return j; +} + +static inline int +ipsec_ev_outbound_route_pkts(struct rte_event_vector *vec, struct route_table *rt, + struct ipsec_traffic *t, struct sa_ctx *sa_ctx, + const struct eh_event_link_info *ev_link) +{ + uint32_t sa_idx, ret, i, j, ev_len = 0; + struct rte_event events[MAX_PKTS]; + struct rte_ipsec_session *sess; + uint16_t port_id = 0; + struct rte_mbuf *pkt; + struct ipsec_sa *sa; + + j = ipsec_ev_route_ip_pkts(vec, rt, t); + + /* Handle IPsec packets. + * For lookaside IPsec packets, submit to cryptodev queue. + * For inline IPsec packets, route the packet. + */ for (i = 0; i < t->ipsec.num; i++) { /* Validate sa_idx */ sa_idx = t->ipsec.res[i]; pkt = t->ipsec.pkts[i]; - if (unlikely(sa_idx >= sa_ctx->nb_sa)) + if (unlikely(sa_idx >= sa_ctx->nb_sa)) { free_pkts(&pkt, 1); - else { - /* Else the packet has to be protected */ - sa = &(sa_ctx->sa[sa_idx]); - /* Get IPsec session */ - sess = ipsec_get_primary_session(sa); - /* Allow only inline protocol for now */ - if (unlikely(sess->type != - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) { - RTE_LOG(ERR, IPSEC, "SA type not supported\n"); - free_pkts(&pkt, 1); - continue; - } + continue; + } + /* Else the packet has to be protected */ + sa = &(sa_ctx->sa[sa_idx]); + /* Get IPsec session */ + sess = ipsec_get_primary_session(sa); + switch (sess->type) { + case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: + rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); + crypto_prepare_event(pkt, sess, &events[ev_len]); + ev_len++; + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: rte_security_set_pkt_metadata(sess->security.ctx, sess->security.ses, pkt, NULL); - pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; port_id = sa->portid; update_mac_addrs(pkt, port_id); ipsec_event_pre_forward(pkt, port_id); ev_vector_attr_update(vec, pkt); vec->mbufs[j++] = pkt; + break; + default: + RTE_LOG(ERR, IPSEC, "SA type not supported\n"); + free_pkts(&pkt, 1); + break; + } + } + + if (ev_len) { + ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id, + ev_link->event_port_id, events, ev_len); + if (ret < ev_len) { + RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n", + ev_len, rte_errno); + free_pkts_from_events(&events[ret], ev_len - ret); + return -rte_errno; } } @@ -698,6 +807,10 @@ classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t) t->ip6.data[t->ip6.num] = nlp; t->ip6.pkts[(t->ip6.num)++] = pkt; break; + case PKT_TYPE_IPSEC_IPV4: + case PKT_TYPE_IPSEC_IPV6: + t->ipsec.pkts[(t->ipsec.num)++] = pkt; + break; default: RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n", type); @@ -708,7 +821,8 @@ classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t) static inline int process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt, - struct rte_event_vector *vec) + struct rte_event_vector *vec, + const struct eh_event_link_info *ev_link) { struct ipsec_traffic t; struct rte_mbuf *pkt; @@ -738,12 +852,15 @@ process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt, check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4); check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6); - return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx); + ipsec_sad_lookup(&ctx->sa_ctx->sad, t.ipsec.pkts, t.ipsec.saptr, t.ipsec.num); + + return ipsec_ev_inbound_route_pkts(vec, rt, &t, ev_link); } static inline int process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt, - struct rte_event_vector *vec) + struct rte_event_vector *vec, + const struct eh_event_link_info *ev_link) { struct ipsec_traffic t; struct rte_mbuf *pkt; @@ -766,7 +883,7 @@ process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt, check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec); check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec); - return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx); + return ipsec_ev_outbound_route_pkts(vec, rt, &t, ctx->sa_ctx, ev_link); } static inline int @@ -817,12 +934,13 @@ ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf, ev_vector_attr_init(vec); core_stats_update_rx(vec->nb_elem); + if (is_unprotected_port(pkt->port)) ret = process_ipsec_ev_inbound_vector(&lconf->inbound, - &lconf->rt, vec); + &lconf->rt, vec, links); else ret = process_ipsec_ev_outbound_vector(&lconf->outbound, - &lconf->rt, vec); + &lconf->rt, vec, links); if (likely(ret > 0)) { core_stats_update_tx(vec->nb_elem); @@ -857,24 +975,19 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links, } static inline int -ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf, - struct rte_event *ev) +ipsec_ev_cryptodev_process_one_pkt( + const struct lcore_conf_ev_tx_int_port_wrkr *lconf, + const struct rte_crypto_op *cop, struct rte_mbuf *pkt) { struct rte_ether_hdr *ethhdr; - struct rte_crypto_op *cop; - struct rte_mbuf *pkt; uint16_t port_id; struct ip *ip; - /* Get pkt data */ - cop = ev->event_ptr; - pkt = cop->sym->m_src; - - /* If operation was not successful, drop the packet */ + /* If operation was not successful, free the packet */ if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) { RTE_LOG_DP(INFO, IPSEC, "Crypto operation failed\n"); free_pkts(&pkt, 1); - return PKT_DROPPED; + return -1; } ip = rte_pktmbuf_mtod(pkt, struct ip *); @@ -904,13 +1017,74 @@ ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf, if (unlikely(port_id == RTE_MAX_ETHPORTS)) { RTE_LOG_DP(DEBUG, IPSEC, "Cannot route processed packet\n"); free_pkts(&pkt, 1); - return PKT_DROPPED; + return -1; } /* Update Ether with port's MAC addresses */ memcpy(ðhdr->src_addr, ðaddr_tbl[port_id].src, sizeof(struct rte_ether_addr)); memcpy(ðhdr->dst_addr, ðaddr_tbl[port_id].dst, sizeof(struct rte_ether_addr)); + ipsec_event_pre_forward(pkt, port_id); + + return 0; +} + +static inline void +ipsec_ev_cryptodev_vector_process( + const struct lcore_conf_ev_tx_int_port_wrkr *lconf, + const struct eh_event_link_info *links, + struct rte_event *ev) +{ + struct rte_event_vector *vec = ev->vec; + const uint16_t nb_events = 1; + struct rte_crypto_op *cop; + struct rte_mbuf *pkt; + uint16_t enqueued; + int i, n = 0; + + ev_vector_attr_init(vec); + /* Transform cop vec into pkt vec */ + for (i = 0; i < vec->nb_elem; i++) { + /* Get pkt data */ + cop = vec->ptrs[i]; + pkt = cop->sym->m_src; + if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt)) + continue; + + vec->mbufs[n++] = pkt; + ev_vector_attr_update(vec, pkt); + } + + if (n == 0) { + rte_mempool_put(rte_mempool_from_obj(vec), vec); + return; + } + + vec->nb_elem = n; + enqueued = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, + links[0].event_port_id, ev, nb_events, 0); + if (enqueued != nb_events) { + RTE_LOG_DP(INFO, IPSEC, "Failed to enqueue to tx, ret = %u," + " errno = %i\n", enqueued, rte_errno); + free_pkts(vec->mbufs, vec->nb_elem); + rte_mempool_put(rte_mempool_from_obj(vec), vec); + } +} + +static inline int +ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf, + struct rte_event *ev) +{ + struct rte_crypto_op *cop; + struct rte_mbuf *pkt; + + /* Get pkt data */ + cop = ev->event_ptr; + pkt = cop->sym->m_src; + + if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt)) + return PKT_DROPPED; + /* Update event */ ev->mbuf = pkt; @@ -1154,6 +1328,9 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links, if (unlikely(ret != PKT_FORWARDED)) continue; break; + case RTE_EVENT_TYPE_CRYPTODEV_VECTOR: + ipsec_ev_cryptodev_vector_process(&lconf, links, &ev); + continue; default: RTE_LOG(ERR, IPSEC, "Invalid event type %u", ev.event_type); From patchwork Thu Aug 4 10:36:26 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Volodymyr Fialko X-Patchwork-Id: 114612 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6A94AA00C4; Thu, 4 Aug 2022 12:37:13 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D9FAA42BFA; Thu, 4 Aug 2022 12:37:00 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 7DF1142BF2 for ; Thu, 4 Aug 2022 12:36:59 +0200 (CEST) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 2748BrPM012709; Thu, 4 Aug 2022 03:36:58 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=fyHsl5FFPF1D7z+fj/Oq2XNA1t1dVn90ryXXH8xGkxw=; b=i0AyPnXF1uMsqnu2gbWjGRGs202gVlOx4KUzAloIwy40DGiDExNgCWWlAwAMaGsTRo85 3xpK45to/BcszQt/AKG+yYjZ3YZCFRViukAcrPVqNF4tzzbs5fgSELyFK5uqmMlqfnt6 gu8KruJ/NTzam1NgOUvcQALNdM3yIw6P0XZzfPry1M7ZKCRr/q/tlHwt//6fxde+82XN sL77bXwqV5MdALIePx0YnJJpRui6fyz+FYoDK1BJVEPjTXFKa68KqOkowQZArv6MMqbY VJh590O/XyZoRFFzkhZbY1OeMGGGT8WnRbmZgGGiM/kxnYX3uPZlVlRNCnTuhS3ZsWpi iA== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3hqp04n618-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Thu, 04 Aug 2022 03:36:58 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Thu, 4 Aug 2022 03:36:57 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Thu, 4 Aug 2022 03:36:57 -0700 Received: from localhost.localdomain (unknown [10.28.34.39]) by maili.marvell.com (Postfix) with ESMTP id 5A6653F705C; Thu, 4 Aug 2022 03:36:55 -0700 (PDT) From: Volodymyr Fialko To: , Radu Nicolau , Akhil Goyal CC: , , Volodymyr Fialko Subject: [PATCH 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside Date: Thu, 4 Aug 2022 12:36:26 +0200 Message-ID: <20220804103626.102688-7-vfialko@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220804103626.102688-1-vfialko@marvell.com> References: <20220804103626.102688-1-vfialko@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: XGJPwk0KqPXv5EHsHMrfCuv5Bli3MgrE X-Proofpoint-ORIG-GUID: XGJPwk0KqPXv5EHsHMrfCuv5Bli3MgrE X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.883,Hydra:6.0.517,FMLib:17.11.122.1 definitions=2022-08-04_03,2022-08-04_01,2022-06-22_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Limit number of queue pairs to one for event lookaside mode, since all cores are using same queue in this mode. Signed-off-by: Volodymyr Fialko --- examples/ipsec-secgw/ipsec-secgw.c | 67 +++++++++++++++++------------- 1 file changed, 37 insertions(+), 30 deletions(-) diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 02b1fabaf5..d6b5b73811 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -1541,7 +1541,7 @@ add_mapping(const char *str, uint16_t cdev_id, } static int32_t -add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id, +add_cdev_mapping(const struct rte_cryptodev_info *dev_info, uint16_t cdev_id, uint16_t qp, struct lcore_params *params) { int32_t ret = 0; @@ -1597,6 +1597,37 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id, return ret; } +static uint16_t +map_cdev_to_cores_from_config(enum eh_pkt_transfer_mode mode, int16_t cdev_id, + const struct rte_cryptodev_info *cdev_info, + uint16_t *last_used_lcore_id) +{ + uint16_t nb_qp = 0, i = 0, max_nb_qps; + + /* For event lookaside mode all sessions are bound to single qp. + * It's enough to bind one core, since all cores will share same qp + * Event inline mode do not use this functionality. + */ + if (mode == EH_PKT_TRANSFER_MODE_EVENT) { + add_cdev_mapping(cdev_info, cdev_id, nb_qp, &lcore_params[0]); + return 1; + } + + /* Check if there are enough queue pairs for all configured cores */ + max_nb_qps = RTE_MIN(nb_lcore_params, cdev_info->max_nb_queue_pairs); + + while (nb_qp < max_nb_qps && i < nb_lcore_params) { + if (add_cdev_mapping(cdev_info, cdev_id, nb_qp, + &lcore_params[*last_used_lcore_id])) + nb_qp++; + (*last_used_lcore_id)++; + *last_used_lcore_id %= nb_lcore_params; + i++; + } + + return nb_qp; +} + /* Check if the device is enabled by cryptodev_mask */ static int check_cryptodev_mask(uint8_t cdev_id) @@ -1608,13 +1639,13 @@ check_cryptodev_mask(uint8_t cdev_id) } static uint16_t -cryptodevs_init(uint16_t req_queue_num) +cryptodevs_init(enum eh_pkt_transfer_mode mode) { + struct rte_hash_parameters params = { 0 }; struct rte_cryptodev_config dev_conf; struct rte_cryptodev_qp_conf qp_conf; - uint16_t idx, max_nb_qps, qp, total_nb_qps, i; + uint16_t idx, qp, total_nb_qps; int16_t cdev_id; - struct rte_hash_parameters params = { 0 }; const uint64_t mseg_flag = multi_seg_required() ? RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0; @@ -1655,23 +1686,8 @@ cryptodevs_init(uint16_t req_queue_num) cdev_id, rte_cryptodev_get_feature_name(mseg_flag)); - if (nb_lcore_params > cdev_info.max_nb_queue_pairs) - max_nb_qps = cdev_info.max_nb_queue_pairs; - else - max_nb_qps = nb_lcore_params; - - qp = 0; - i = 0; - while (qp < max_nb_qps && i < nb_lcore_params) { - if (add_cdev_mapping(&cdev_info, cdev_id, qp, - &lcore_params[idx])) - qp++; - idx++; - idx = idx % nb_lcore_params; - i++; - } - qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp)); + qp = map_cdev_to_cores_from_config(mode, cdev_id, &cdev_info, &idx); if (qp == 0) continue; @@ -2985,15 +3001,6 @@ main(int32_t argc, char **argv) sess_sz = max_session_size(); - /* - * In event mode request minimum number of crypto queues - * to be reserved equal to number of ports. - */ - if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT) - nb_crypto_qp = rte_eth_dev_count_avail(); - else - nb_crypto_qp = 0; - /* * In event lookaside mode request memory for crypto metadata. Should * be removed once API will no longer require usage of user data in @@ -3004,7 +3011,7 @@ main(int32_t argc, char **argv) else user_data_sz = 0; - nb_crypto_qp = cryptodevs_init(nb_crypto_qp); + nb_crypto_qp = cryptodevs_init(eh_conf->mode); if (nb_bufs_in_pool == 0) { RTE_ETH_FOREACH_DEV(portid) {