[1/6] examples/ipsec-secgw: add event crypto adapter init

Message ID 20220804103626.102688-2-vfialko@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series examples/ipsec-secgw: add lookaside event mode |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-testing warning apply patch failure

Commit Message

Volodymyr Fialko Aug. 4, 2022, 10:36 a.m. UTC
  Create, configure and start an event crypto adapter. This adapter will
be used in lookaside event mode processing.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 examples/ipsec-secgw/event_helper.c | 144 ++++++++++++++++++++++++++++
 examples/ipsec-secgw/event_helper.h |   2 +
 examples/ipsec-secgw/ipsec-secgw.c  |  44 ++++++---
 3 files changed, 175 insertions(+), 15 deletions(-)
  

Patch

diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c
index b36f20a3fd..6b00a21b6a 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -2,8 +2,10 @@ 
  * Copyright (C) 2020 Marvell International Ltd.
  */
 #include <rte_bitmap.h>
+#include <rte_cryptodev.h>
 #include <rte_ethdev.h>
 #include <rte_eventdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <rte_malloc.h>
@@ -742,6 +744,126 @@  eh_start_eventdev(struct eventmode_conf *em_conf)
 	return 0;
 }
 
+static int
+eh_initialize_crypto_adapter(struct eventmode_conf *em_conf)
+{
+	struct rte_event_dev_info evdev_default_conf = {0};
+	struct rte_event_port_conf port_conf = {0};
+	struct eventdev_params *eventdev_config;
+	uint8_t eventdev_id, cdev_id, n;
+	uint32_t cap;
+	int ret;
+
+	if (!em_conf->enable_event_crypto_adapter)
+		return 0;
+
+	/*
+	 * More then one eventdev is not supported,
+	 * all event crypto adapters will be assigned to one eventdev
+	 */
+	RTE_ASSERT(em_conf->nb_eventdev == 1);
+
+	/* Get event device configuration */
+	eventdev_config = &(em_conf->eventdev_config[0]);
+	eventdev_id = eventdev_config->eventdev_id;
+
+	n = rte_cryptodev_count();
+
+	for (cdev_id = 0; cdev_id != n; cdev_id++) {
+		/* Check event's crypto capabilities */
+		ret = rte_event_crypto_adapter_caps_get(eventdev_id, cdev_id, &cap);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to get event device's crypto capabilities %d", ret);
+			return ret;
+		}
+
+		if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)) {
+			EH_LOG_ERR("Event crypto adapter does not support forward mode!");
+			return -EINVAL;
+		}
+
+		/* Create event crypto adapter */
+
+		/* Get default configuration of event dev */
+		ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to get event dev info %d", ret);
+			return ret;
+		}
+
+		/* Setup port conf */
+		port_conf.new_event_threshold =
+				evdev_default_conf.max_num_events;
+		port_conf.dequeue_depth =
+				evdev_default_conf.max_event_port_dequeue_depth;
+		port_conf.enqueue_depth =
+				evdev_default_conf.max_event_port_enqueue_depth;
+
+		/* Create adapter */
+		ret = rte_event_crypto_adapter_create(cdev_id, eventdev_id,
+				&port_conf, RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to create event crypto adapter %d", ret);
+			return ret;
+		}
+
+		/* Add crypto queue pairs to event crypto adapter */
+		ret = rte_event_crypto_adapter_queue_pair_add(cdev_id, eventdev_id,
+				-1, /* adds all the pre configured queue pairs to the instance */
+				NULL);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to add queue pairs to event crypto adapter %d", ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+eh_start_crypto_adapter(struct eventmode_conf *em_conf)
+{
+	uint8_t cdev_id, n;
+	int ret;
+
+	if (!em_conf->enable_event_crypto_adapter)
+		return 0;
+
+	n = rte_cryptodev_count();
+	for (cdev_id = 0; cdev_id != n; cdev_id++) {
+		ret = rte_event_crypto_adapter_start(cdev_id);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to start event crypto device %d (%d)",
+					cdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+eh_stop_crypto_adapter(struct eventmode_conf *em_conf)
+{
+	uint8_t cdev_id, n;
+	int ret;
+
+	if (!em_conf->enable_event_crypto_adapter)
+		return 0;
+
+	n = rte_cryptodev_count();
+	for (cdev_id = 0; cdev_id != n; cdev_id++) {
+		ret = rte_event_crypto_adapter_stop(cdev_id);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to stop event crypto device %d (%d)",
+					cdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
 static int
 eh_event_vector_limits_validate(struct eventmode_conf *em_conf,
 				uint8_t ev_dev_id, uint8_t ethdev_id)
@@ -1695,6 +1817,13 @@  eh_devs_init(struct eh_conf *conf)
 		return ret;
 	}
 
+	/* Setup event crypto adapter */
+	ret = eh_initialize_crypto_adapter(em_conf);
+	if (ret < 0) {
+		EH_LOG_ERR("Failed to start event dev %d", ret);
+		return ret;
+	}
+
 	/* Setup Rx adapter */
 	ret = eh_initialize_rx_adapter(em_conf);
 	if (ret < 0) {
@@ -1716,6 +1845,14 @@  eh_devs_init(struct eh_conf *conf)
 		return ret;
 	}
 
+	/* Start event crypto adapter */
+	ret = eh_start_crypto_adapter(em_conf);
+	if (ret < 0) {
+		EH_LOG_ERR("Failed to start event crypto dev %d", ret);
+		return ret;
+	}
+
+
 	/* Start eth devices after setting up adapter */
 	RTE_ETH_FOREACH_DEV(port_id) {
 
@@ -1786,6 +1923,13 @@  eh_devs_uninit(struct eh_conf *conf)
 		}
 	}
 
+	/* Stop event crypto adapter */
+	ret = eh_stop_crypto_adapter(em_conf);
+	if (ret < 0) {
+		EH_LOG_ERR("Failed to start event crypto dev %d", ret);
+		return ret;
+	}
+
 	/* Stop and release event devices */
 	for (i = 0; i < em_conf->nb_eventdev; i++) {
 
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index f3cbe57cb3..4b26dc8fc2 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -185,6 +185,8 @@  struct eventmode_conf {
 		/**< Max vector timeout in nanoseconds */
 	uint64_t vector_pool_sz;
 		/**< Vector pool size */
+	bool enable_event_crypto_adapter;
+		/**< Enables event crypto adapter related configuration */
 };
 
 /**
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 815b9254ae..4ca5936bdf 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -44,6 +44,7 @@ 
 #include <rte_cryptodev.h>
 #include <rte_security.h>
 #include <rte_eventdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_ip.h>
 #include <rte_ip_frag.h>
 #include <rte_alarm.h>
@@ -2094,7 +2095,7 @@  max_session_size(void)
 }
 
 static void
-session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
+session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz, size_t user_data_sz)
 {
 	char mp_name[RTE_MEMPOOL_NAMESIZE];
 	struct rte_mempool *sess_mp;
@@ -2107,8 +2108,8 @@  session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
 	nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
 			CDEV_MP_CACHE_MULTIPLIER);
 	sess_mp = rte_cryptodev_sym_session_pool_create(
-			mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0,
-			socket_id);
+			mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ,
+			user_data_sz, socket_id);
 	ctx->session_pool = sess_mp;
 
 	if (ctx->session_pool == NULL)
@@ -2441,7 +2442,8 @@  signal_handler(int signum)
 }
 
 static void
-ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
+ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa,
+		struct eventmode_conf *em_conf)
 {
 	struct rte_ipsec_session *ips;
 	int32_t i;
@@ -2451,9 +2453,11 @@  ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
 
 	for (i = 0; i < nb_sa; i++) {
 		ips = ipsec_get_primary_session(&sa[i]);
-		if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
-			rte_exit(EXIT_FAILURE, "Event mode supports only "
-				 "inline protocol sessions\n");
+		if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
+			em_conf->enable_event_crypto_adapter = true;
+		else if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+			rte_exit(EXIT_FAILURE, "Event mode supports inline "
+				 "and lookaside protocol sessions\n");
 	}
 
 }
@@ -2486,13 +2490,12 @@  check_event_mode_params(struct eh_conf *eh_conf)
 		em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
 
 	/*
-	 * Event mode currently supports only inline protocol sessions.
-	 * If there are other types of sessions configured then exit with
-	 * error.
+	 * Event mode currently supports inline and lookaside protocol
+	 * sessions. If there are other types of sessions configured then exit
+	 * with error.
 	 */
-	ev_mode_sess_verify(sa_in, nb_sa_in);
-	ev_mode_sess_verify(sa_out, nb_sa_out);
-
+	ev_mode_sess_verify(sa_in, nb_sa_in, em_conf);
+	ev_mode_sess_verify(sa_out, nb_sa_out, em_conf);
 
 	/* Option --config does not apply to event mode */
 	if (nb_lcore_params > 0) {
@@ -2925,7 +2928,7 @@  main(int32_t argc, char **argv)
 	uint64_t req_rx_offloads[RTE_MAX_ETHPORTS];
 	uint64_t req_tx_offloads[RTE_MAX_ETHPORTS];
 	struct eh_conf *eh_conf = NULL;
-	size_t sess_sz;
+	size_t sess_sz, user_data_sz;
 
 	nb_bufs_in_pool = 0;
 
@@ -2991,6 +2994,16 @@  main(int32_t argc, char **argv)
 	else
 		nb_crypto_qp = 0;
 
+	/*
+	 * In event lookaside mode request memory for crypto metadata. Should
+	 * be removed once API will no longer require usage of user data in
+	 * DPDK 22.11
+	 */
+	if (((struct eventmode_conf *)(eh_conf->mode_params))->enable_event_crypto_adapter)
+		user_data_sz = sizeof(union rte_event_crypto_metadata);
+	else
+		user_data_sz = 0;
+
 	nb_crypto_qp = cryptodevs_init(nb_crypto_qp);
 
 	if (nb_bufs_in_pool == 0) {
@@ -3032,7 +3045,8 @@  main(int32_t argc, char **argv)
 		if (socket_ctx[socket_id].session_pool)
 			continue;
 
-		session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
+		session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz,
+				user_data_sz);
 		session_priv_pool_init(&socket_ctx[socket_id], socket_id,
 			sess_sz);
 	}