> Add driver inbound and outbound worker thread for ipsec-secgw. In driver
> mode application does as little as possible. It simply forwards packets
> back to port from which traffic was received instructing HW to apply
> inline security processing using first outbound SA configured for
> a given port. If a port does not have SA configured outbound traffic
> on that port will be silently dropped. The aim of this mode is to
> measure HW capabilities. Driver mode is selected with single-sa option.
> The single-sa option accepts SA index however in event mode the SA
> index is ignored.
>
> Example command to run ipsec-secgw in driver mode:
> ipsec-secgw -w 0002:02:00.0,ipsec_in_max_spi=128
> -w 0002:03:00.0,ipsec_in_max_spi=128 -w 0002:0e:00.0 -w 0002:10:00.1
> --log-level=8 -c 0x1 -- -P -p 0x3 -u 0x1 --config "(1,0,0),(0,0,0)"
> -f aes-gcm.cfg --transfer-mode event --schedule-type parallel
> --single-sa 0
>
> Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
> Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
> ---
> examples/ipsec-secgw/Makefile | 1 +
> examples/ipsec-secgw/ipsec-secgw.c | 36 +++---
> examples/ipsec-secgw/ipsec-secgw.h | 17 +++
> examples/ipsec-secgw/ipsec.h | 11 ++
> examples/ipsec-secgw/ipsec_worker.c | 240 ++++++++++++++++++++++++++++++++++++
> examples/ipsec-secgw/meson.build | 2 +-
> 6 files changed, 291 insertions(+), 16 deletions(-)
> create mode 100644 examples/ipsec-secgw/ipsec-secgw.h
> create mode 100644 examples/ipsec-secgw/ipsec_worker.c
>
> diff --git a/examples/ipsec-secgw/Makefile b/examples/ipsec-secgw/Makefile
> index 09e3c5a..f6fd94c 100644
> --- a/examples/ipsec-secgw/Makefile
> +++ b/examples/ipsec-secgw/Makefile
> @@ -15,6 +15,7 @@ SRCS-y += sa.c
> SRCS-y += rt.c
> SRCS-y += ipsec_process.c
> SRCS-y += ipsec-secgw.c
> +SRCS-y += ipsec_worker.c
> SRCS-y += event_helper.c
>
> CFLAGS += -gdwarf-2
> diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
> index f1cc3fb..86215fb 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.c
> +++ b/examples/ipsec-secgw/ipsec-secgw.c
> @@ -70,8 +70,6 @@ volatile bool force_quit;
>
> #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
>
> -#define NB_SOCKETS 4
> -
> /* Configure how many packets ahead to prefetch, when reading packets */
> #define PREFETCH_OFFSET 3
>
> @@ -79,8 +77,6 @@ volatile bool force_quit;
>
> #define MAX_LCORE_PARAMS 1024
>
> -#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
> -
> /*
> * Configurable number of RX/TX ring descriptors
> */
> @@ -190,12 +186,10 @@ static const struct option lgopts[] = {
> /* mask of enabled ports */
> static uint32_t enabled_port_mask;
> static uint64_t enabled_cryptodev_mask = UINT64_MAX;
> -static uint32_t unprotected_port_mask;
> static int32_t promiscuous_on = 1;
> static int32_t numa_on = 1; /**< NUMA is enabled by default. */
> static uint32_t nb_lcores;
> static uint32_t single_sa;
> -static uint32_t single_sa_idx;
> static uint32_t schedule_type;
>
> /*
> @@ -279,8 +273,6 @@ static struct rte_eth_conf port_conf = {
> },
> };
>
> -static struct socket_ctx socket_ctx[NB_SOCKETS];
> -
> /*
> * Determine is multi-segment support required:
> * - either frame buffer size is smaller then mtu
> @@ -1114,8 +1106,8 @@ drain_outbound_crypto_queues(const struct lcore_conf *qconf,
> }
>
> /* main processing loop */
> -static int32_t
> -main_loop(__attribute__((unused)) void *dummy)
> +void
> +ipsec_poll_mode_worker(void)
> {
> struct rte_mbuf *pkts[MAX_PKT_BURST];
> uint32_t lcore_id;
> @@ -1157,7 +1149,7 @@ main_loop(__attribute__((unused)) void *dummy)
> if (qconf->nb_rx_queue == 0) {
> RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
> lcore_id);
> - return 0;
> + return;
> }
>
> RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
> @@ -1170,7 +1162,7 @@ main_loop(__attribute__((unused)) void *dummy)
> lcore_id, portid, queueid);
> }
>
> - while (1) {
> + while (!force_quit) {
> cur_tsc = rte_rdtsc();
>
> /* TX queue buffer drain */
> @@ -1324,8 +1316,10 @@ print_usage(const char *prgname)
> " -a enables SA SQN atomic behaviour\n"
> " -f CONFIG_FILE: Configuration file\n"
> " --config (port,queue,lcore): Rx queue configuration\n"
> - " --single-sa SAIDX: Use single SA index for outbound traffic,\n"
> - " bypassing the SP\n"
> + " --single-sa SAIDX: In poll mode use single SA index for\n"
> + " outbound traffic, bypassing the SP\n"
> + " In event mode selects driver mode,\n"
> + " SA index value is ignored\n"
> " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
> " devices to configure\n"
> " --transfer-mode MODE\n"
> @@ -1980,6 +1974,18 @@ cryptodevs_init(void)
> i++;
> }
>
> + /*
> + * Set the queue pair to at least the number of ethernet
> + * devices for inline outbound.
> + */
> + qp = RTE_MAX(rte_eth_dev_count_avail(), qp);
> +
> + /*
> + * The requested number of queues should never exceed
> + * the max available
> + */
> + qp = RTE_MIN(qp, max_nb_qps);
> +
Same comment as for v1:
I still don't understand why we have to do it for unconditionally.
For poll mode it seems to bring nothing but waste of resources.
Konstantin
> if (qp == 0)
> continue;
>
> @@ -2871,7 +2877,7 @@ main(int32_t argc, char **argv)
> check_all_ports_link_status(enabled_port_mask);
>
> /* launch per-lcore init on every lcore */
> - rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
> + rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER);
>
> RTE_LCORE_FOREACH_SLAVE(lcore_id) {
> if (rte_eal_wait_lcore(lcore_id) < 0)
> diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
> new file mode 100644
> index 0000000..5b19e29
> --- /dev/null
> +++ b/examples/ipsec-secgw/ipsec-secgw.h
> @@ -0,0 +1,17 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright (C) 2020 Marvell International Ltd.
> + */
> +#ifndef _IPSEC_SECGW_H_
> +#define _IPSEC_SECGW_H_
> +
> +#define NB_SOCKETS 4
> +
> +#define UNPROTECTED_PORT(portid) (unprotected_port_mask & (1 << portid))
> +
> +/* Port mask to identify the unprotected ports */
> +uint32_t unprotected_port_mask;
> +
> +/* Index of SA in single mode */
> +uint32_t single_sa_idx;
> +
> +#endif /* _IPSEC_SECGW_H_ */
> diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
> index 0539aec..65be2ac 100644
> --- a/examples/ipsec-secgw/ipsec.h
> +++ b/examples/ipsec-secgw/ipsec.h
> @@ -13,6 +13,8 @@
> #include <rte_flow.h>
> #include <rte_ipsec.h>
>
> +#include "ipsec-secgw.h"
> +
> #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
> #define RTE_LOGTYPE_IPSEC_ESP RTE_LOGTYPE_USER2
> #define RTE_LOGTYPE_IPSEC_IPIP RTE_LOGTYPE_USER3
> @@ -258,6 +260,15 @@ struct ipsec_traffic {
> struct traffic_type ip6;
> };
>
> +/* Socket ctx */
> +struct socket_ctx socket_ctx[NB_SOCKETS];
> +
> +void
> +ipsec_poll_mode_worker(void);
> +
> +int
> +ipsec_launch_one_lcore(void *args);
> +
> uint16_t
> ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
> uint16_t nb_pkts, uint16_t len);
@@ -15,6 +15,7 @@ SRCS-y += sa.c
SRCS-y += rt.c
SRCS-y += ipsec_process.c
SRCS-y += ipsec-secgw.c
+SRCS-y += ipsec_worker.c
SRCS-y += event_helper.c
CFLAGS += -gdwarf-2
@@ -70,8 +70,6 @@ volatile bool force_quit;
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-#define NB_SOCKETS 4
-
/* Configure how many packets ahead to prefetch, when reading packets */
#define PREFETCH_OFFSET 3
@@ -79,8 +77,6 @@ volatile bool force_quit;
#define MAX_LCORE_PARAMS 1024
-#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
-
/*
* Configurable number of RX/TX ring descriptors
*/
@@ -190,12 +186,10 @@ static const struct option lgopts[] = {
/* mask of enabled ports */
static uint32_t enabled_port_mask;
static uint64_t enabled_cryptodev_mask = UINT64_MAX;
-static uint32_t unprotected_port_mask;
static int32_t promiscuous_on = 1;
static int32_t numa_on = 1; /**< NUMA is enabled by default. */
static uint32_t nb_lcores;
static uint32_t single_sa;
-static uint32_t single_sa_idx;
static uint32_t schedule_type;
/*
@@ -279,8 +273,6 @@ static struct rte_eth_conf port_conf = {
},
};
-static struct socket_ctx socket_ctx[NB_SOCKETS];
-
/*
* Determine is multi-segment support required:
* - either frame buffer size is smaller then mtu
@@ -1114,8 +1106,8 @@ drain_outbound_crypto_queues(const struct lcore_conf *qconf,
}
/* main processing loop */
-static int32_t
-main_loop(__attribute__((unused)) void *dummy)
+void
+ipsec_poll_mode_worker(void)
{
struct rte_mbuf *pkts[MAX_PKT_BURST];
uint32_t lcore_id;
@@ -1157,7 +1149,7 @@ main_loop(__attribute__((unused)) void *dummy)
if (qconf->nb_rx_queue == 0) {
RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
lcore_id);
- return 0;
+ return;
}
RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
@@ -1170,7 +1162,7 @@ main_loop(__attribute__((unused)) void *dummy)
lcore_id, portid, queueid);
}
- while (1) {
+ while (!force_quit) {
cur_tsc = rte_rdtsc();
/* TX queue buffer drain */
@@ -1324,8 +1316,10 @@ print_usage(const char *prgname)
" -a enables SA SQN atomic behaviour\n"
" -f CONFIG_FILE: Configuration file\n"
" --config (port,queue,lcore): Rx queue configuration\n"
- " --single-sa SAIDX: Use single SA index for outbound traffic,\n"
- " bypassing the SP\n"
+ " --single-sa SAIDX: In poll mode use single SA index for\n"
+ " outbound traffic, bypassing the SP\n"
+ " In event mode selects driver mode,\n"
+ " SA index value is ignored\n"
" --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
" devices to configure\n"
" --transfer-mode MODE\n"
@@ -1980,6 +1974,18 @@ cryptodevs_init(void)
i++;
}
+ /*
+ * Set the queue pair to at least the number of ethernet
+ * devices for inline outbound.
+ */
+ qp = RTE_MAX(rte_eth_dev_count_avail(), qp);
+
+ /*
+ * The requested number of queues should never exceed
+ * the max available
+ */
+ qp = RTE_MIN(qp, max_nb_qps);
+
if (qp == 0)
continue;
@@ -2871,7 +2877,7 @@ main(int32_t argc, char **argv)
check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
+ rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER);
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
new file mode 100644
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+#ifndef _IPSEC_SECGW_H_
+#define _IPSEC_SECGW_H_
+
+#define NB_SOCKETS 4
+
+#define UNPROTECTED_PORT(portid) (unprotected_port_mask & (1 << portid))
+
+/* Port mask to identify the unprotected ports */
+uint32_t unprotected_port_mask;
+
+/* Index of SA in single mode */
+uint32_t single_sa_idx;
+
+#endif /* _IPSEC_SECGW_H_ */
@@ -13,6 +13,8 @@
#include <rte_flow.h>
#include <rte_ipsec.h>
+#include "ipsec-secgw.h"
+
#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_IPSEC_ESP RTE_LOGTYPE_USER2
#define RTE_LOGTYPE_IPSEC_IPIP RTE_LOGTYPE_USER3
@@ -258,6 +260,15 @@ struct ipsec_traffic {
struct traffic_type ip6;
};
+/* Socket ctx */
+struct socket_ctx socket_ctx[NB_SOCKETS];
+
+void
+ipsec_poll_mode_worker(void);
+
+int
+ipsec_launch_one_lcore(void *args);
+
uint16_t
ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
uint16_t nb_pkts, uint16_t len);
new file mode 100644
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memcpy.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+
+#include "event_helper.h"
+#include "ipsec.h"
+#include "ipsec-secgw.h"
+
+extern volatile bool force_quit;
+
+static inline void
+ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
+{
+ /* Save the destination port in the mbuf */
+ m->port = port_id;
+
+ /* Save eth queue for Tx */
+ rte_event_eth_tx_adapter_txq_set(m, 0);
+}
+
+static inline void
+prepare_out_sessions_tbl(struct sa_ctx *sa_out,
+ struct rte_security_session **sess_tbl, uint16_t size)
+{
+ struct rte_ipsec_session *pri_sess;
+ struct ipsec_sa *sa;
+ int i;
+
+ for (i = 0; i < IPSEC_SA_MAX_ENTRIES; i++) {
+
+ sa = &sa_out->sa[i];
+ if (!sa->spi)
+ continue;
+
+ pri_sess = ipsec_get_primary_session(sa);
+ if (pri_sess->type !=
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
+
+ RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
+ pri_sess->type);
+ continue;
+ }
+
+ if (sa->portid >= size) {
+ RTE_LOG(ERR, IPSEC,
+ "Port id >= than table size %d, %d\n",
+ sa->portid, size);
+ continue;
+ }
+
+ /* Use only first inline session found for a given port */
+ if (sess_tbl[sa->portid])
+ continue;
+ sess_tbl[sa->portid] = pri_sess->security.ses;
+ }
+}
+
+/*
+ * Event mode exposes various operating modes depending on the
+ * capabilities of the event device and the operating mode
+ * selected.
+ */
+
+/* Workers registered */
+#define IPSEC_EVENTMODE_WORKERS 1
+
+/*
+ * Event mode worker
+ * Operating parameters : non-burst - Tx internal port - driver mode
+ */
+static void
+ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
+ uint8_t nb_links)
+{
+ struct rte_security_session *sess_tbl[RTE_MAX_ETHPORTS] = { NULL };
+ unsigned int nb_rx = 0;
+ struct rte_mbuf *pkt;
+ struct rte_event ev;
+ uint32_t lcore_id;
+ int32_t socket_id;
+ int16_t port_id;
+
+ /* Check if we have links registered for this lcore */
+ if (nb_links == 0) {
+ /* No links registered - exit */
+ return;
+ }
+
+ /* Get core ID */
+ lcore_id = rte_lcore_id();
+
+ /* Get socket ID */
+ socket_id = rte_lcore_to_socket_id(lcore_id);
+
+ /*
+ * Prepare security sessions table. In outbound driver mode
+ * we always use first session configured for a given port
+ */
+ prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, sess_tbl,
+ RTE_MAX_ETHPORTS);
+
+ RTE_LOG(INFO, IPSEC,
+ "Launching event mode worker (non-burst - Tx internal port - "
+ "driver mode) on lcore %d\n", lcore_id);
+
+ /* We have valid links */
+
+ /* Check if it's single link */
+ if (nb_links != 1) {
+ RTE_LOG(INFO, IPSEC,
+ "Multiple links not supported. Using first link\n");
+ }
+
+ RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
+ links[0].event_port_id);
+ while (!force_quit) {
+ /* Read packet from event queues */
+ nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
+ links[0].event_port_id,
+ &ev, /* events */
+ 1, /* nb_events */
+ 0 /* timeout_ticks */);
+
+ if (nb_rx == 0)
+ continue;
+
+ pkt = ev.mbuf;
+ port_id = pkt->port;
+
+ rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
+
+ /* Process packet */
+ ipsec_event_pre_forward(pkt, port_id);
+
+ if (!UNPROTECTED_PORT(port_id)) {
+
+ if (unlikely(!sess_tbl[port_id])) {
+ rte_pktmbuf_free(pkt);
+ continue;
+ }
+
+ /* Save security session */
+ pkt->udata64 = (uint64_t) sess_tbl[port_id];
+
+ /* Mark the packet for Tx security offload */
+ pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
+ }
+
+ /*
+ * Since tx internal port is available, events can be
+ * directly enqueued to the adapter and it would be
+ * internally submitted to the eth device.
+ */
+ rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+ links[0].event_port_id,
+ &ev, /* events */
+ 1, /* nb_events */
+ 0 /* flags */);
+ }
+}
+
+static uint8_t
+ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
+{
+ struct eh_app_worker_params *wrkr;
+ uint8_t nb_wrkr_param = 0;
+
+ /* Save workers */
+ wrkr = wrkrs;
+
+ /* Non-burst - Tx internal port - driver mode */
+ wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
+ wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
+ wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
+ wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
+ wrkr++;
+
+ return nb_wrkr_param;
+}
+
+static void
+ipsec_eventmode_worker(struct eh_conf *conf)
+{
+ struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
+ {{{0} }, NULL } };
+ uint8_t nb_wrkr_param;
+
+ /* Populate l2fwd_wrkr params */
+ nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
+
+ /*
+ * Launch correct worker after checking
+ * the event device's capabilities.
+ */
+ eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
+}
+
+int ipsec_launch_one_lcore(void *args)
+{
+ struct eh_conf *conf;
+
+ conf = (struct eh_conf *)args;
+
+ if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
+ /* Run in poll mode */
+ ipsec_poll_mode_worker();
+ } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
+ /* Run in event mode */
+ ipsec_eventmode_worker(conf);
+ }
+ return 0;
+}
@@ -10,5 +10,5 @@ deps += ['security', 'lpm', 'acl', 'hash', 'ip_frag', 'ipsec', 'eventdev']
allow_experimental_apis = true
sources = files(
'esp.c', 'ipsec.c', 'ipsec_process.c', 'ipsec-secgw.c',
- 'parser.c', 'rt.c', 'sa.c', 'sp4.c', 'sp6.c', 'event_helper.c'
+ 'parser.c', 'rt.c', 'sa.c', 'sp4.c', 'sp6.c', 'event_helper.c', 'ipsec_worker.c'
)