Message ID | 1575808249-31135-12-git-send-email-anoobj@marvell.com (mailing list archive) |
---|---|
State | Superseded, archived |
Delegated to: | akhil goyal |
Headers | show |
Series | add eventmode to ipsec-secgw | expand |
Context | Check | Description |
---|---|---|
ci/Intel-compilation | fail | Compilation issues |
ci/checkpatch | success | coding style OK |
> > Add IPsec application processing code for event mode. > > Signed-off-by: Anoob Joseph <anoobj@marvell.com> > Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com> > --- > examples/ipsec-secgw/ipsec-secgw.c | 124 ++++++------------ > examples/ipsec-secgw/ipsec-secgw.h | 81 ++++++++++++ > examples/ipsec-secgw/ipsec.h | 37 +++--- > examples/ipsec-secgw/ipsec_worker.c | 242 ++++++++++++++++++++++++++++++++++-- > examples/ipsec-secgw/ipsec_worker.h | 39 ++++++ > examples/ipsec-secgw/sa.c | 11 -- > 6 files changed, 409 insertions(+), 125 deletions(-) > create mode 100644 examples/ipsec-secgw/ipsec-secgw.h > create mode 100644 examples/ipsec-secgw/ipsec_worker.h > > diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c > index c5d95b9..2e7d4d8 100644 > --- a/examples/ipsec-secgw/ipsec-secgw.c > +++ b/examples/ipsec-secgw/ipsec-secgw.c > @@ -50,12 +50,11 @@ > > #include "event_helper.h" > #include "ipsec.h" > +#include "ipsec_worker.h" > #include "parser.h" > > volatile bool force_quit; > > -#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 > - > #define MAX_JUMBO_PKT_LEN 9600 > > #define MEMPOOL_CACHE_SIZE 256 > @@ -70,8 +69,6 @@ volatile bool force_quit; > > #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ > > -#define NB_SOCKETS 4 > - > /* Configure how many packets ahead to prefetch, when reading packets */ > #define PREFETCH_OFFSET 3 > > @@ -79,8 +76,6 @@ volatile bool force_quit; > > #define MAX_LCORE_PARAMS 1024 > > -#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid)) > - > /* > * Configurable number of RX/TX ring descriptors > */ > @@ -89,29 +84,6 @@ volatile bool force_quit; > static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT; > static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; > > -#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN > -#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ > - (((uint64_t)((a) & 0xff) << 56) | \ > - ((uint64_t)((b) & 0xff) << 48) | \ > - ((uint64_t)((c) & 0xff) << 40) | \ > - ((uint64_t)((d) & 0xff) << 32) | \ > - ((uint64_t)((e) & 0xff) << 24) | \ > - ((uint64_t)((f) & 0xff) << 16) | \ > - ((uint64_t)((g) & 0xff) << 8) | \ > - ((uint64_t)(h) & 0xff)) > -#else > -#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ > - (((uint64_t)((h) & 0xff) << 56) | \ > - ((uint64_t)((g) & 0xff) << 48) | \ > - ((uint64_t)((f) & 0xff) << 40) | \ > - ((uint64_t)((e) & 0xff) << 32) | \ > - ((uint64_t)((d) & 0xff) << 24) | \ > - ((uint64_t)((c) & 0xff) << 16) | \ > - ((uint64_t)((b) & 0xff) << 8) | \ > - ((uint64_t)(a) & 0xff)) > -#endif > -#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0)) > - > #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \ > (addr)->addr_bytes[0], (addr)->addr_bytes[1], \ > (addr)->addr_bytes[2], (addr)->addr_bytes[3], \ > @@ -123,18 +95,6 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; > > #define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) > > -/* port/source ethernet addr and destination ethernet addr */ > -struct ethaddr_info { > - uint64_t src, dst; > -}; > - > -struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { > - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, > - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, > - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, > - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } > -}; > - > struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS]; > > #define CMD_LINE_OPT_CONFIG "config" > @@ -192,10 +152,16 @@ static const struct option lgopts[] = { > {NULL, 0, 0, 0} > }; > > +struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { > + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, > + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, > + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, > + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } > +}; > + > /* mask of enabled ports */ > static uint32_t enabled_port_mask; > static uint64_t enabled_cryptodev_mask = UINT64_MAX; > -static uint32_t unprotected_port_mask; > static int32_t promiscuous_on = 1; > static int32_t numa_on = 1; /**< NUMA is enabled by default. */ > static uint32_t nb_lcores; > @@ -283,8 +249,6 @@ static struct rte_eth_conf port_conf = { > }, > }; > > -static struct socket_ctx socket_ctx[NB_SOCKETS]; > - > /* > * Determine is multi-segment support required: > * - either frame buffer size is smaller then mtu > @@ -2828,47 +2792,10 @@ main(int32_t argc, char **argv) > > sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads); > port_init(portid, req_rx_offloads, req_tx_offloads); > - /* Create default ipsec flow for the ethernet device */ > - ret = create_default_ipsec_flow(portid, req_rx_offloads); > - if (ret) > - printf("Cannot create default flow, err=%d, port=%d\n", > - ret, portid); > } > > cryptodevs_init(); > > - /* start ports */ > - RTE_ETH_FOREACH_DEV(portid) { > - if ((enabled_port_mask & (1 << portid)) == 0) > - continue; > - > - /* > - * Start device > - * note: device must be started before a flow rule > - * can be installed. > - */ > - ret = rte_eth_dev_start(portid); > - if (ret < 0) > - rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " > - "err=%d, port=%d\n", ret, portid); > - /* > - * If enabled, put device in promiscuous mode. > - * This allows IO forwarding mode to forward packets > - * to itself through 2 cross-connected ports of the > - * target machine. > - */ > - if (promiscuous_on) { > - ret = rte_eth_promiscuous_enable(portid); > - if (ret != 0) > - rte_exit(EXIT_FAILURE, > - "rte_eth_promiscuous_enable: err=%s, port=%d\n", > - rte_strerror(-ret), portid); > - } > - > - rte_eth_dev_callback_register(portid, > - RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL); > - } > - > /* fragment reassemble is enabled */ > if (frag_tbl_sz != 0) { > ret = reassemble_init(); > @@ -2889,8 +2816,6 @@ main(int32_t argc, char **argv) > } > } > > - check_all_ports_link_status(enabled_port_mask); > - > /* > * Set the enabled port mask in helper config for use by helper > * sub-system. This will be used while intializing devices using > @@ -2903,6 +2828,39 @@ main(int32_t argc, char **argv) > if (ret < 0) > rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret); > > + /* Create default ipsec flow for each port and start each port */ > + RTE_ETH_FOREACH_DEV(portid) { > + if ((enabled_port_mask & (1 << portid)) == 0) > + continue; > + > + ret = create_default_ipsec_flow(portid, req_rx_offloads); That doesn't look right. For more than one eth port in the system, req_rx_offloads will be overwritten by that moment. > + if (ret) > + printf("create_default_ipsec_flow failed, err=%d, " > + "port=%d\n", ret, portid); > + /* > + * Start device > + * note: device must be started before a flow rule > + * can be installed. > + */ > + ret = rte_eth_dev_start(portid); Moving that piece of code (dev_start) after sa_init() breaks ixgbe inline-crypto support. As I understand, because configured ipsec flows don't persist dev_start(). At least for ixgbe PMD. Any reason why to move that code at all? > + if (ret < 0) > + rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " > + "err=%d, port=%d\n", ret, portid); > + /* > + * If enabled, put device in promiscuous mode. > + * This allows IO forwarding mode to forward packets > + * to itself through 2 cross-connected ports of the > + * target machine. > + */ > + if (promiscuous_on) > + rte_eth_promiscuous_enable(portid); > + > + rte_eth_dev_callback_register(portid, > + RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL); > + } > + > + check_all_ports_link_status(enabled_port_mask); > + > /* launch per-lcore init on every lcore */ > rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER); >
> --- a/examples/ipsec-secgw/ipsec_worker.c > +++ b/examples/ipsec-secgw/ipsec_worker.c > @@ -15,6 +15,7 @@ > #include <ctype.h> > #include <stdbool.h> > > +#include <rte_acl.h> > #include <rte_common.h> > #include <rte_log.h> > #include <rte_memcpy.h> > @@ -29,12 +30,51 @@ > #include <rte_eventdev.h> > #include <rte_malloc.h> > #include <rte_mbuf.h> > +#include <rte_lpm.h> > +#include <rte_lpm6.h> > > #include "ipsec.h" > +#include "ipsec_worker.h" > #include "event_helper.h" > > extern volatile bool force_quit; > > +static inline enum pkt_type > +process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp) > +{ > + struct rte_ether_hdr *eth; > + > + eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); > + if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { > + *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + > + offsetof(struct ip, ip_p)); > + if (**nlp == IPPROTO_ESP) > + return PKT_TYPE_IPSEC_IPV4; > + else > + return PKT_TYPE_PLAIN_IPV4; > + } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { > + *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + > + offsetof(struct ip6_hdr, ip6_nxt)); > + if (**nlp == IPPROTO_ESP) > + return PKT_TYPE_IPSEC_IPV6; > + else > + return PKT_TYPE_PLAIN_IPV6; > + } > + > + /* Unknown/Unsupported type */ > + return PKT_TYPE_INVALID; > +} Looking though that file, it seems like you choose to create your own set of helper functions, instead of trying to reuse existing ones: process_ipsec_get_pkt_type() VS prepare_one_packet() update_mac_addrs() VS prepare_tx_pkt() check_sp() VS inbound_sp_sa() Obviously there is nothing good in code (and possible bugs) duplication. Any reason why you can't reuse existing functions and need to reinvent your own? > + > +static inline void > +update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid) > +{ > + struct rte_ether_hdr *ethhdr; > + > + ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); > + memcpy(ðhdr->s_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN); > + memcpy(ðhdr->d_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN); > +} > + > static inline void > ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) > { > @@ -45,6 +85,177 @@ ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) > rte_event_eth_tx_adapter_txq_set(m, 0); > } > > +static inline int > +check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx) > +{ > + uint32_t res; > + > + if (unlikely(sp == NULL)) > + return 0; > + > + rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1, > + DEFAULT_MAX_CATEGORIES); > + > + if (unlikely(res == 0)) { > + /* No match */ > + return 0; > + } > + > + if (res == DISCARD) > + return 0; > + else if (res == BYPASS) { > + *sa_idx = 0; > + return 1; > + } > + > + *sa_idx = SPI2IDX(res); > + if (*sa_idx < IPSEC_SA_MAX_ENTRIES) > + return 1; > + > + /* Invalid SA IDX */ > + return 0; > +} > + > +static inline uint16_t > +route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) > +{ > + uint32_t dst_ip; > + uint16_t offset; > + uint32_t hop; > + int ret; > + > + offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst); > + dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset); > + dst_ip = rte_be_to_cpu_32(dst_ip); > + > + ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop); > + > + if (ret == 0) { > + /* We have a hit */ > + return hop; > + } > + > + /* else */ > + return RTE_MAX_ETHPORTS; > +} > + > +/* TODO: To be tested */ > +static inline uint16_t > +route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) > +{ > + uint8_t dst_ip[16]; > + uint8_t *ip6_dst; > + uint16_t offset; > + uint32_t hop; > + int ret; > + > + offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst); > + ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset); > + memcpy(&dst_ip[0], ip6_dst, 16); > + > + ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop); > + > + if (ret == 0) { > + /* We have a hit */ > + return hop; > + } > + > + /* else */ > + return RTE_MAX_ETHPORTS; > +} > + > +static inline uint16_t > +get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type) > +{ > + if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4) > + return route4_pkt(pkt, rt->rt4_ctx); > + else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6) > + return route6_pkt(pkt, rt->rt6_ctx); > + > + return RTE_MAX_ETHPORTS; > +} > + > +static inline int > +process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, > + struct rte_event *ev) > +{ > + struct ipsec_sa *sa = NULL; > + struct rte_mbuf *pkt; > + uint16_t port_id = 0; > + enum pkt_type type; > + uint32_t sa_idx; > + uint8_t *nlp; > + > + /* Get pkt from event */ > + pkt = ev->mbuf; > + > + /* Check the packet type */ > + type = process_ipsec_get_pkt_type(pkt, &nlp); > + > + switch (type) { > + case PKT_TYPE_PLAIN_IPV4: > + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) > + sa = (struct ipsec_sa *) pkt->udata64; > + > + /* Check if we have a match */ > + if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { > + /* No valid match */ > + goto drop_pkt_and_exit; > + } > + break; > + > + case PKT_TYPE_PLAIN_IPV6: > + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) > + sa = (struct ipsec_sa *) pkt->udata64; > + > + /* Check if we have a match */ > + if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { > + /* No valid match */ > + goto drop_pkt_and_exit; > + } > + break; > + > + default: > + RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type); > + goto drop_pkt_and_exit; > + } > + > + /* Check if the packet has to be bypassed */ > + if (sa_idx == 0) > + goto route_and_send_pkt; > + > + /* Else the packet has to be protected with SA */ > + > + /* If the packet was IPsec processed, then SA pointer should be set */ > + if (sa == NULL) > + goto drop_pkt_and_exit; > + > + /* SPI on the packet should match with the one in SA */ > + if (unlikely(sa->spi != sa_idx)) > + goto drop_pkt_and_exit; > + > +route_and_send_pkt: > + port_id = get_route(pkt, rt, type); > + if (unlikely(port_id == RTE_MAX_ETHPORTS)) { > + /* no match */ > + goto drop_pkt_and_exit; > + } > + /* else, we have a matching route */ > + > + /* Update mac addresses */ > + update_mac_addrs(pkt, port_id); > + > + /* Update the event with the dest port */ > + ipsec_event_pre_forward(pkt, port_id); > + return 1; > + > +drop_pkt_and_exit: > + RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n"); > + rte_pktmbuf_free(pkt); > + ev->mbuf = NULL; > + return 0; > +} > + > /* > * Event mode exposes various operating modes depending on the > * capabilities of the event device and the operating mode > @@ -134,11 +345,11 @@ static void > ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links, > uint8_t nb_links) > { > + struct lcore_conf_ev_tx_int_port_wrkr lconf; > unsigned int nb_rx = 0; > - unsigned int port_id; > - struct rte_mbuf *pkt; > struct rte_event ev; > uint32_t lcore_id; > + int32_t socket_id; > > /* Check if we have links registered for this lcore */ > if (nb_links == 0) { > @@ -151,6 +362,21 @@ ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links, > /* Get core ID */ > lcore_id = rte_lcore_id(); > > + /* Get socket ID */ > + socket_id = rte_lcore_to_socket_id(lcore_id); > + > + /* Save routing table */ > + lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4; > + lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6; > + lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in; > + lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in; > + lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in; > + lconf.inbound.session_pool = socket_ctx[socket_id].session_pool; > + lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out; > + lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out; > + lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out; > + lconf.outbound.session_pool = socket_ctx[socket_id].session_pool; > + > RTE_LOG(INFO, IPSEC, > "Launching event mode worker (non-burst - Tx internal port - " > "app mode - inbound) on lcore %d\n", lcore_id); > @@ -175,13 +401,11 @@ ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links, > if (nb_rx == 0) > continue; > > - port_id = ev.queue_id; > - pkt = ev.mbuf; > - > - rte_prefetch0(rte_pktmbuf_mtod(pkt, void *)); > - > - /* Process packet */ > - ipsec_event_pre_forward(pkt, port_id); > + if (process_ipsec_ev_inbound(&lconf.inbound, > + &lconf.rt, &ev) != 1) { > + /* The pkt has been dropped */ > + continue; > + } > > /* > * Since tx internal port is available, events can be > diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h > new file mode 100644 > index 0000000..fd18a2e > --- /dev/null > +++ b/examples/ipsec-secgw/ipsec_worker.h > @@ -0,0 +1,39 @@ > +/* SPDX-License-Identifier: BSD-3-Clause > + * Copyright(c) 2018 Cavium, Inc > + */ > +#ifndef _IPSEC_WORKER_H_ > +#define _IPSEC_WORKER_H_ > + > +#include "ipsec.h" > + > +enum pkt_type { > + PKT_TYPE_PLAIN_IPV4 = 1, > + PKT_TYPE_IPSEC_IPV4, > + PKT_TYPE_PLAIN_IPV6, > + PKT_TYPE_IPSEC_IPV6, > + PKT_TYPE_INVALID > +}; > + > +struct route_table { > + struct rt_ctx *rt4_ctx; > + struct rt_ctx *rt6_ctx; > +}; > + > +/* > + * Conf required by event mode worker with tx internal port > + */ > +struct lcore_conf_ev_tx_int_port_wrkr { > + struct ipsec_ctx inbound; > + struct ipsec_ctx outbound; > + struct route_table rt; > +} __rte_cache_aligned; > + > +/* TODO > + * > + * Move this function to ipsec_worker.c > + */ > +void ipsec_poll_mode_worker(void); > + > +int ipsec_launch_one_lcore(void *args); > + > +#endif /* _IPSEC_WORKER_H_ */ > diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c > index 7f046e3..9e17ba0 100644 > --- a/examples/ipsec-secgw/sa.c > +++ b/examples/ipsec-secgw/sa.c > @@ -772,17 +772,6 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound) > printf("\n"); > } > > -struct sa_ctx { > - void *satbl; /* pointer to array of rte_ipsec_sa objects*/ > - struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; > - union { > - struct { > - struct rte_crypto_sym_xform a; > - struct rte_crypto_sym_xform b; > - }; > - } xf[IPSEC_SA_MAX_ENTRIES]; > -}; > - > static struct sa_ctx * > sa_create(const char *name, int32_t socket_id) > { > -- > 2.7.4
> +static inline int > +process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, > + struct rte_event *ev) > +{ > + struct ipsec_sa *sa = NULL; > + struct rte_mbuf *pkt; > + uint16_t port_id = 0; > + enum pkt_type type; > + uint32_t sa_idx; > + uint8_t *nlp; > + > + /* Get pkt from event */ > + pkt = ev->mbuf; > + > + /* Check the packet type */ > + type = process_ipsec_get_pkt_type(pkt, &nlp); > + > + switch (type) { > + case PKT_TYPE_PLAIN_IPV4: > + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) > + sa = (struct ipsec_sa *) pkt->udata64; Shouldn't packets with PKT_RX_SEC_OFFLOAD_FAIL be handled somehow? Another question - as I can see from the code, right now event mode supports only inline-proto, correct? If so, then probably an error should be reported at startup, if in config file some other types of sessions were requested. > + > + /* Check if we have a match */ > + if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { > + /* No valid match */ > + goto drop_pkt_and_exit; > + } > + break; > + > + case PKT_TYPE_PLAIN_IPV6: > + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) > + sa = (struct ipsec_sa *) pkt->udata64; > + > + /* Check if we have a match */ > + if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { > + /* No valid match */ > + goto drop_pkt_and_exit; > + } > + break; > + > + default: > + RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type); > + goto drop_pkt_and_exit; > + } > + > + /* Check if the packet has to be bypassed */ > + if (sa_idx == 0) > + goto route_and_send_pkt; > + > + /* Else the packet has to be protected with SA */ > + > + /* If the packet was IPsec processed, then SA pointer should be set */ > + if (sa == NULL) > + goto drop_pkt_and_exit; > + > + /* SPI on the packet should match with the one in SA */ > + if (unlikely(sa->spi != sa_idx)) > + goto drop_pkt_and_exit; > + > +route_and_send_pkt: > + port_id = get_route(pkt, rt, type); > + if (unlikely(port_id == RTE_MAX_ETHPORTS)) { > + /* no match */ > + goto drop_pkt_and_exit; > + } > + /* else, we have a matching route */ > + > + /* Update mac addresses */ > + update_mac_addrs(pkt, port_id); > + > + /* Update the event with the dest port */ > + ipsec_event_pre_forward(pkt, port_id); > + return 1; > + > +drop_pkt_and_exit: > + RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n"); > + rte_pktmbuf_free(pkt); > + ev->mbuf = NULL; > + return 0; > +} > +
Hi Konstantin, Please see inline. Thanks, Anoob > -----Original Message----- > From: Ananyev, Konstantin <konstantin.ananyev@intel.com> > Sent: Wednesday, December 25, 2019 8:49 PM > To: Anoob Joseph <anoobj@marvell.com>; Akhil Goyal <akhil.goyal@nxp.com>; > Nicolau, Radu <radu.nicolau@intel.com>; Thomas Monjalon > <thomas@monjalon.net> > Cc: Lukas Bartosik <lbartosik@marvell.com>; Jerin Jacob Kollanukkaran > <jerinj@marvell.com>; Narayana Prasad Raju Athreya > <pathreya@marvell.com>; Ankur Dwivedi <adwivedi@marvell.com>; Archana > Muniganti <marchana@marvell.com>; Tejasree Kondoj > <ktejasree@marvell.com>; Vamsi Krishna Attunuru <vattunuru@marvell.com>; > dev@dpdk.org > Subject: [EXT] RE: [PATCH 11/14] examples/ipsec-secgw: add app processing > code > > External Email > > ---------------------------------------------------------------------- > > > +static inline int > > +process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, > > + struct rte_event *ev) > > +{ > > + struct ipsec_sa *sa = NULL; > > + struct rte_mbuf *pkt; > > + uint16_t port_id = 0; > > + enum pkt_type type; > > + uint32_t sa_idx; > > + uint8_t *nlp; > > + > > + /* Get pkt from event */ > > + pkt = ev->mbuf; > > + > > + /* Check the packet type */ > > + type = process_ipsec_get_pkt_type(pkt, &nlp); > > + > > + switch (type) { > > + case PKT_TYPE_PLAIN_IPV4: > > + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) > > + sa = (struct ipsec_sa *) pkt->udata64; > > > Shouldn't packets with PKT_RX_SEC_OFFLOAD_FAIL be handled somehow? [Anoob] Yes. Will fix this in v2. > Another question - as I can see from the code, right now event mode supports > only inline-proto, correct? > If so, then probably an error should be reported at startup, if in config file > some other types of sessions were requested. [Anoob] Okay. Will add this in v2. > > > + > > + /* Check if we have a match */ > > + if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { > > + /* No valid match */ > > + goto drop_pkt_and_exit; > > + } > > + break; > > + > > + case PKT_TYPE_PLAIN_IPV6: > > + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) > > + sa = (struct ipsec_sa *) pkt->udata64; > > + > > + /* Check if we have a match */ > > + if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { > > + /* No valid match */ > > + goto drop_pkt_and_exit; > > + } > > + break; > > + > > + default: > > + RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type); > > + goto drop_pkt_and_exit; > > + } > > + > > + /* Check if the packet has to be bypassed */ > > + if (sa_idx == 0) > > + goto route_and_send_pkt; > > + > > + /* Else the packet has to be protected with SA */ > > + > > + /* If the packet was IPsec processed, then SA pointer should be set */ > > + if (sa == NULL) > > + goto drop_pkt_and_exit; > > + > > + /* SPI on the packet should match with the one in SA */ > > + if (unlikely(sa->spi != sa_idx)) > > + goto drop_pkt_and_exit; > > + > > +route_and_send_pkt: > > + port_id = get_route(pkt, rt, type); > > + if (unlikely(port_id == RTE_MAX_ETHPORTS)) { > > + /* no match */ > > + goto drop_pkt_and_exit; > > + } > > + /* else, we have a matching route */ > > + > > + /* Update mac addresses */ > > + update_mac_addrs(pkt, port_id); > > + > > + /* Update the event with the dest port */ > > + ipsec_event_pre_forward(pkt, port_id); > > + return 1; > > + > > +drop_pkt_and_exit: > > + RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n"); > > + rte_pktmbuf_free(pkt); > > + ev->mbuf = NULL; > > + return 0; > > +} > > +
Hi Konstantin, Please see inline. Thanks, Lukasz On 23.12.2019 17:49, Ananyev, Konstantin wrote: > External Email > > ---------------------------------------------------------------------- > > >> >> Add IPsec application processing code for event mode. >> >> Signed-off-by: Anoob Joseph <anoobj@marvell.com> >> Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com> >> --- >> examples/ipsec-secgw/ipsec-secgw.c | 124 ++++++------------ >> examples/ipsec-secgw/ipsec-secgw.h | 81 ++++++++++++ >> examples/ipsec-secgw/ipsec.h | 37 +++--- >> examples/ipsec-secgw/ipsec_worker.c | 242 ++++++++++++++++++++++++++++++++++-- >> examples/ipsec-secgw/ipsec_worker.h | 39 ++++++ >> examples/ipsec-secgw/sa.c | 11 -- >> 6 files changed, 409 insertions(+), 125 deletions(-) >> create mode 100644 examples/ipsec-secgw/ipsec-secgw.h >> create mode 100644 examples/ipsec-secgw/ipsec_worker.h >> >> diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c >> index c5d95b9..2e7d4d8 100644 >> --- a/examples/ipsec-secgw/ipsec-secgw.c >> +++ b/examples/ipsec-secgw/ipsec-secgw.c >> @@ -50,12 +50,11 @@ >> >> #include "event_helper.h" >> #include "ipsec.h" >> +#include "ipsec_worker.h" >> #include "parser.h" >> >> volatile bool force_quit; >> >> -#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 >> - >> #define MAX_JUMBO_PKT_LEN 9600 >> >> #define MEMPOOL_CACHE_SIZE 256 >> @@ -70,8 +69,6 @@ volatile bool force_quit; >> >> #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ >> >> -#define NB_SOCKETS 4 >> - >> /* Configure how many packets ahead to prefetch, when reading packets */ >> #define PREFETCH_OFFSET 3 >> >> @@ -79,8 +76,6 @@ volatile bool force_quit; >> >> #define MAX_LCORE_PARAMS 1024 >> >> -#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid)) >> - >> /* >> * Configurable number of RX/TX ring descriptors >> */ >> @@ -89,29 +84,6 @@ volatile bool force_quit; >> static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT; >> static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; >> >> -#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN >> -#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ >> - (((uint64_t)((a) & 0xff) << 56) | \ >> - ((uint64_t)((b) & 0xff) << 48) | \ >> - ((uint64_t)((c) & 0xff) << 40) | \ >> - ((uint64_t)((d) & 0xff) << 32) | \ >> - ((uint64_t)((e) & 0xff) << 24) | \ >> - ((uint64_t)((f) & 0xff) << 16) | \ >> - ((uint64_t)((g) & 0xff) << 8) | \ >> - ((uint64_t)(h) & 0xff)) >> -#else >> -#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ >> - (((uint64_t)((h) & 0xff) << 56) | \ >> - ((uint64_t)((g) & 0xff) << 48) | \ >> - ((uint64_t)((f) & 0xff) << 40) | \ >> - ((uint64_t)((e) & 0xff) << 32) | \ >> - ((uint64_t)((d) & 0xff) << 24) | \ >> - ((uint64_t)((c) & 0xff) << 16) | \ >> - ((uint64_t)((b) & 0xff) << 8) | \ >> - ((uint64_t)(a) & 0xff)) >> -#endif >> -#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0)) >> - >> #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \ >> (addr)->addr_bytes[0], (addr)->addr_bytes[1], \ >> (addr)->addr_bytes[2], (addr)->addr_bytes[3], \ >> @@ -123,18 +95,6 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; >> >> #define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) >> >> -/* port/source ethernet addr and destination ethernet addr */ >> -struct ethaddr_info { >> - uint64_t src, dst; >> -}; >> - >> -struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { >> - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, >> - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, >> - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, >> - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } >> -}; >> - >> struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS]; >> >> #define CMD_LINE_OPT_CONFIG "config" >> @@ -192,10 +152,16 @@ static const struct option lgopts[] = { >> {NULL, 0, 0, 0} >> }; >> >> +struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { >> + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, >> + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, >> + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, >> + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } >> +}; >> + >> /* mask of enabled ports */ >> static uint32_t enabled_port_mask; >> static uint64_t enabled_cryptodev_mask = UINT64_MAX; >> -static uint32_t unprotected_port_mask; >> static int32_t promiscuous_on = 1; >> static int32_t numa_on = 1; /**< NUMA is enabled by default. */ >> static uint32_t nb_lcores; >> @@ -283,8 +249,6 @@ static struct rte_eth_conf port_conf = { >> }, >> }; >> >> -static struct socket_ctx socket_ctx[NB_SOCKETS]; >> - >> /* >> * Determine is multi-segment support required: >> * - either frame buffer size is smaller then mtu >> @@ -2828,47 +2792,10 @@ main(int32_t argc, char **argv) >> >> sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads); >> port_init(portid, req_rx_offloads, req_tx_offloads); >> - /* Create default ipsec flow for the ethernet device */ >> - ret = create_default_ipsec_flow(portid, req_rx_offloads); >> - if (ret) >> - printf("Cannot create default flow, err=%d, port=%d\n", >> - ret, portid); >> } >> >> cryptodevs_init(); >> >> - /* start ports */ >> - RTE_ETH_FOREACH_DEV(portid) { >> - if ((enabled_port_mask & (1 << portid)) == 0) >> - continue; >> - >> - /* >> - * Start device >> - * note: device must be started before a flow rule >> - * can be installed. >> - */ >> - ret = rte_eth_dev_start(portid); >> - if (ret < 0) >> - rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " >> - "err=%d, port=%d\n", ret, portid); >> - /* >> - * If enabled, put device in promiscuous mode. >> - * This allows IO forwarding mode to forward packets >> - * to itself through 2 cross-connected ports of the >> - * target machine. >> - */ >> - if (promiscuous_on) { >> - ret = rte_eth_promiscuous_enable(portid); >> - if (ret != 0) >> - rte_exit(EXIT_FAILURE, >> - "rte_eth_promiscuous_enable: err=%s, port=%d\n", >> - rte_strerror(-ret), portid); >> - } >> - >> - rte_eth_dev_callback_register(portid, >> - RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL); >> - } >> - >> /* fragment reassemble is enabled */ >> if (frag_tbl_sz != 0) { >> ret = reassemble_init(); >> @@ -2889,8 +2816,6 @@ main(int32_t argc, char **argv) >> } >> } >> >> - check_all_ports_link_status(enabled_port_mask); >> - >> /* >> * Set the enabled port mask in helper config for use by helper >> * sub-system. This will be used while intializing devices using >> @@ -2903,6 +2828,39 @@ main(int32_t argc, char **argv) >> if (ret < 0) >> rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret); >> >> + /* Create default ipsec flow for each port and start each port */ >> + RTE_ETH_FOREACH_DEV(portid) { >> + if ((enabled_port_mask & (1 << portid)) == 0) >> + continue; >> + >> + ret = create_default_ipsec_flow(portid, req_rx_offloads); > > That doesn't look right. > For more than one eth port in the system, req_rx_offloads will be overwritten by that moment. [Lukasz] You're right. I will fix it in v2. > >> + if (ret) >> + printf("create_default_ipsec_flow failed, err=%d, " >> + "port=%d\n", ret, portid); >> + /* >> + * Start device >> + * note: device must be started before a flow rule >> + * can be installed. >> + */ >> + ret = rte_eth_dev_start(portid); > > Moving that piece of code (dev_start) after sa_init() breaks ixgbe inline-crypto support. > As I understand, because configured ipsec flows don't persist dev_start(). > At least for ixgbe PMD. > Any reason why to move that code at all? [Lukasz] We moved starting eth port after creation of default ipsec flow in order to stop packets from temporary omitting inline (after eth port is started but before flow is created) . This happens if traffic is flowing and ipsec-secgw app is started. However moving eth_dev_start after sa_init is not necessary. I will revert this change to start eth ports before sa_init. >> + if (ret < 0) >> + rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " >> + "err=%d, port=%d\n", ret, portid); >> + /* >> + * If enabled, put device in promiscuous mode. >> + * This allows IO forwarding mode to forward packets >> + * to itself through 2 cross-connected ports of the >> + * target machine. >> + */ >> + if (promiscuous_on) >> + rte_eth_promiscuous_enable(portid); >> + >> + rte_eth_dev_callback_register(portid, >> + RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL); >> + } >> + >> + check_all_ports_link_status(enabled_port_mask); >> + >> /* launch per-lcore init on every lcore */ >> rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER); >>
Hi Konstantin, Please see inline. Thanks, Lukasz On 24.12.2019 14:13, Ananyev, Konstantin wrote: > External Email > > ---------------------------------------------------------------------- > >> --- a/examples/ipsec-secgw/ipsec_worker.c >> +++ b/examples/ipsec-secgw/ipsec_worker.c >> @@ -15,6 +15,7 @@ >> #include <ctype.h> >> #include <stdbool.h> >> >> +#include <rte_acl.h> >> #include <rte_common.h> >> #include <rte_log.h> >> #include <rte_memcpy.h> >> @@ -29,12 +30,51 @@ >> #include <rte_eventdev.h> >> #include <rte_malloc.h> >> #include <rte_mbuf.h> >> +#include <rte_lpm.h> >> +#include <rte_lpm6.h> >> >> #include "ipsec.h" >> +#include "ipsec_worker.h" >> #include "event_helper.h" >> >> extern volatile bool force_quit; >> >> +static inline enum pkt_type >> +process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp) >> +{ >> + struct rte_ether_hdr *eth; >> + >> + eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); >> + if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { >> + *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + >> + offsetof(struct ip, ip_p)); >> + if (**nlp == IPPROTO_ESP) >> + return PKT_TYPE_IPSEC_IPV4; >> + else >> + return PKT_TYPE_PLAIN_IPV4; >> + } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { >> + *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + >> + offsetof(struct ip6_hdr, ip6_nxt)); >> + if (**nlp == IPPROTO_ESP) >> + return PKT_TYPE_IPSEC_IPV6; >> + else >> + return PKT_TYPE_PLAIN_IPV6; >> + } >> + >> + /* Unknown/Unsupported type */ >> + return PKT_TYPE_INVALID; >> +} > > Looking though that file, it seems like you choose to create your own set of > helper functions, instead of trying to reuse existing ones: > > process_ipsec_get_pkt_type() VS prepare_one_packet() > update_mac_addrs() VS prepare_tx_pkt() > check_sp() VS inbound_sp_sa() > > Obviously there is nothing good in code (and possible bugs) duplication. > Any reason why you can't reuse existing functions and need to reinvent your own? [Lukasz] The prepare_one_packet() and prepare_tx_pkt() do much more than we need and for performance reasons we crafted new functions. For example process_ipsec_get_pkt_type function returns nlp and whether packet type is plain or IPsec. That's all. Prepare_one_packet() process packets in chunks and does much more - it adjusts mbuf and ipv4 lengths then it demultiplex packet into plan and IPsec flows and finally does inline checks. This is similar for update_mac_addrs() vs prepare_tx_pkt() and check_sp() vs inbound_sp_sa() that prepare_tx_pkt() and inbound_sp_sa() do more that we need in event mode. > >> + >> +static inline void >> +update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid) >> +{ >> + struct rte_ether_hdr *ethhdr; >> + >> + ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); >> + memcpy(ðhdr->s_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN); >> + memcpy(ðhdr->d_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN); >> +} >> + >> static inline void >> ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) >> { >> @@ -45,6 +85,177 @@ ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) >> rte_event_eth_tx_adapter_txq_set(m, 0); >> } >> >> +static inline int >> +check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx) >> +{ >> + uint32_t res; >> + >> + if (unlikely(sp == NULL)) >> + return 0; >> + >> + rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1, >> + DEFAULT_MAX_CATEGORIES); >> + >> + if (unlikely(res == 0)) { >> + /* No match */ >> + return 0; >> + } >> + >> + if (res == DISCARD) >> + return 0; >> + else if (res == BYPASS) { >> + *sa_idx = 0; >> + return 1; >> + } >> + >> + *sa_idx = SPI2IDX(res); >> + if (*sa_idx < IPSEC_SA_MAX_ENTRIES) >> + return 1; >> + >> + /* Invalid SA IDX */ >> + return 0; >> +} >> + >> +static inline uint16_t >> +route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) >> +{ >> + uint32_t dst_ip; >> + uint16_t offset; >> + uint32_t hop; >> + int ret; >> + >> + offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst); >> + dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset); >> + dst_ip = rte_be_to_cpu_32(dst_ip); >> + >> + ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop); >> + >> + if (ret == 0) { >> + /* We have a hit */ >> + return hop; >> + } >> + >> + /* else */ >> + return RTE_MAX_ETHPORTS; >> +} >> + >> +/* TODO: To be tested */ >> +static inline uint16_t >> +route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) >> +{ >> + uint8_t dst_ip[16]; >> + uint8_t *ip6_dst; >> + uint16_t offset; >> + uint32_t hop; >> + int ret; >> + >> + offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst); >> + ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset); >> + memcpy(&dst_ip[0], ip6_dst, 16); >> + >> + ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop); >> + >> + if (ret == 0) { >> + /* We have a hit */ >> + return hop; >> + } >> + >> + /* else */ >> + return RTE_MAX_ETHPORTS; >> +} >> + >> +static inline uint16_t >> +get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type) >> +{ >> + if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4) >> + return route4_pkt(pkt, rt->rt4_ctx); >> + else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6) >> + return route6_pkt(pkt, rt->rt6_ctx); >> + >> + return RTE_MAX_ETHPORTS; >> +} >> + >> +static inline int >> +process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, >> + struct rte_event *ev) >> +{ >> + struct ipsec_sa *sa = NULL; >> + struct rte_mbuf *pkt; >> + uint16_t port_id = 0; >> + enum pkt_type type; >> + uint32_t sa_idx; >> + uint8_t *nlp; >> + >> + /* Get pkt from event */ >> + pkt = ev->mbuf; >> + >> + /* Check the packet type */ >> + type = process_ipsec_get_pkt_type(pkt, &nlp); >> + >> + switch (type) { >> + case PKT_TYPE_PLAIN_IPV4: >> + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) >> + sa = (struct ipsec_sa *) pkt->udata64; >> + >> + /* Check if we have a match */ >> + if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { >> + /* No valid match */ >> + goto drop_pkt_and_exit; >> + } >> + break; >> + >> + case PKT_TYPE_PLAIN_IPV6: >> + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) >> + sa = (struct ipsec_sa *) pkt->udata64; >> + >> + /* Check if we have a match */ >> + if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { >> + /* No valid match */ >> + goto drop_pkt_and_exit; >> + } >> + break; >> + >> + default: >> + RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type); >> + goto drop_pkt_and_exit; >> + } >> + >> + /* Check if the packet has to be bypassed */ >> + if (sa_idx == 0) >> + goto route_and_send_pkt; >> + >> + /* Else the packet has to be protected with SA */ >> + >> + /* If the packet was IPsec processed, then SA pointer should be set */ >> + if (sa == NULL) >> + goto drop_pkt_and_exit; >> + >> + /* SPI on the packet should match with the one in SA */ >> + if (unlikely(sa->spi != sa_idx)) >> + goto drop_pkt_and_exit; >> + >> +route_and_send_pkt: >> + port_id = get_route(pkt, rt, type); >> + if (unlikely(port_id == RTE_MAX_ETHPORTS)) { >> + /* no match */ >> + goto drop_pkt_and_exit; >> + } >> + /* else, we have a matching route */ >> + >> + /* Update mac addresses */ >> + update_mac_addrs(pkt, port_id); >> + >> + /* Update the event with the dest port */ >> + ipsec_event_pre_forward(pkt, port_id); >> + return 1; >> + >> +drop_pkt_and_exit: >> + RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n"); >> + rte_pktmbuf_free(pkt); >> + ev->mbuf = NULL; >> + return 0; >> +} >> + >> /* >> * Event mode exposes various operating modes depending on the >> * capabilities of the event device and the operating mode >> @@ -134,11 +345,11 @@ static void >> ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links, >> uint8_t nb_links) >> { >> + struct lcore_conf_ev_tx_int_port_wrkr lconf; >> unsigned int nb_rx = 0; >> - unsigned int port_id; >> - struct rte_mbuf *pkt; >> struct rte_event ev; >> uint32_t lcore_id; >> + int32_t socket_id; >> >> /* Check if we have links registered for this lcore */ >> if (nb_links == 0) { >> @@ -151,6 +362,21 @@ ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links, >> /* Get core ID */ >> lcore_id = rte_lcore_id(); >> >> + /* Get socket ID */ >> + socket_id = rte_lcore_to_socket_id(lcore_id); >> + >> + /* Save routing table */ >> + lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4; >> + lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6; >> + lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in; >> + lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in; >> + lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in; >> + lconf.inbound.session_pool = socket_ctx[socket_id].session_pool; >> + lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out; >> + lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out; >> + lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out; >> + lconf.outbound.session_pool = socket_ctx[socket_id].session_pool; >> + >> RTE_LOG(INFO, IPSEC, >> "Launching event mode worker (non-burst - Tx internal port - " >> "app mode - inbound) on lcore %d\n", lcore_id); >> @@ -175,13 +401,11 @@ ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links, >> if (nb_rx == 0) >> continue; >> >> - port_id = ev.queue_id; >> - pkt = ev.mbuf; >> - >> - rte_prefetch0(rte_pktmbuf_mtod(pkt, void *)); >> - >> - /* Process packet */ >> - ipsec_event_pre_forward(pkt, port_id); >> + if (process_ipsec_ev_inbound(&lconf.inbound, >> + &lconf.rt, &ev) != 1) { >> + /* The pkt has been dropped */ >> + continue; >> + } >> >> /* >> * Since tx internal port is available, events can be >> diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h >> new file mode 100644 >> index 0000000..fd18a2e >> --- /dev/null >> +++ b/examples/ipsec-secgw/ipsec_worker.h >> @@ -0,0 +1,39 @@ >> +/* SPDX-License-Identifier: BSD-3-Clause >> + * Copyright(c) 2018 Cavium, Inc >> + */ >> +#ifndef _IPSEC_WORKER_H_ >> +#define _IPSEC_WORKER_H_ >> + >> +#include "ipsec.h" >> + >> +enum pkt_type { >> + PKT_TYPE_PLAIN_IPV4 = 1, >> + PKT_TYPE_IPSEC_IPV4, >> + PKT_TYPE_PLAIN_IPV6, >> + PKT_TYPE_IPSEC_IPV6, >> + PKT_TYPE_INVALID >> +}; >> + >> +struct route_table { >> + struct rt_ctx *rt4_ctx; >> + struct rt_ctx *rt6_ctx; >> +}; >> + >> +/* >> + * Conf required by event mode worker with tx internal port >> + */ >> +struct lcore_conf_ev_tx_int_port_wrkr { >> + struct ipsec_ctx inbound; >> + struct ipsec_ctx outbound; >> + struct route_table rt; >> +} __rte_cache_aligned; >> + >> +/* TODO >> + * >> + * Move this function to ipsec_worker.c >> + */ >> +void ipsec_poll_mode_worker(void); >> + >> +int ipsec_launch_one_lcore(void *args); >> + >> +#endif /* _IPSEC_WORKER_H_ */ >> diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c >> index 7f046e3..9e17ba0 100644 >> --- a/examples/ipsec-secgw/sa.c >> +++ b/examples/ipsec-secgw/sa.c >> @@ -772,17 +772,6 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound) >> printf("\n"); >> } >> >> -struct sa_ctx { >> - void *satbl; /* pointer to array of rte_ipsec_sa objects*/ >> - struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; >> - union { >> - struct { >> - struct rte_crypto_sym_xform a; >> - struct rte_crypto_sym_xform b; >> - }; >> - } xf[IPSEC_SA_MAX_ENTRIES]; >> -}; >> - >> static struct sa_ctx * >> sa_create(const char *name, int32_t socket_id) >> { >> -- >> 2.7.4 >
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index c5d95b9..2e7d4d8 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -50,12 +50,11 @@ #include "event_helper.h" #include "ipsec.h" +#include "ipsec_worker.h" #include "parser.h" volatile bool force_quit; -#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 - #define MAX_JUMBO_PKT_LEN 9600 #define MEMPOOL_CACHE_SIZE 256 @@ -70,8 +69,6 @@ volatile bool force_quit; #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ -#define NB_SOCKETS 4 - /* Configure how many packets ahead to prefetch, when reading packets */ #define PREFETCH_OFFSET 3 @@ -79,8 +76,6 @@ volatile bool force_quit; #define MAX_LCORE_PARAMS 1024 -#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid)) - /* * Configurable number of RX/TX ring descriptors */ @@ -89,29 +84,6 @@ volatile bool force_quit; static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT; static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; -#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN -#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ - (((uint64_t)((a) & 0xff) << 56) | \ - ((uint64_t)((b) & 0xff) << 48) | \ - ((uint64_t)((c) & 0xff) << 40) | \ - ((uint64_t)((d) & 0xff) << 32) | \ - ((uint64_t)((e) & 0xff) << 24) | \ - ((uint64_t)((f) & 0xff) << 16) | \ - ((uint64_t)((g) & 0xff) << 8) | \ - ((uint64_t)(h) & 0xff)) -#else -#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ - (((uint64_t)((h) & 0xff) << 56) | \ - ((uint64_t)((g) & 0xff) << 48) | \ - ((uint64_t)((f) & 0xff) << 40) | \ - ((uint64_t)((e) & 0xff) << 32) | \ - ((uint64_t)((d) & 0xff) << 24) | \ - ((uint64_t)((c) & 0xff) << 16) | \ - ((uint64_t)((b) & 0xff) << 8) | \ - ((uint64_t)(a) & 0xff)) -#endif -#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0)) - #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \ (addr)->addr_bytes[0], (addr)->addr_bytes[1], \ (addr)->addr_bytes[2], (addr)->addr_bytes[3], \ @@ -123,18 +95,6 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; #define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) -/* port/source ethernet addr and destination ethernet addr */ -struct ethaddr_info { - uint64_t src, dst; -}; - -struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } -}; - struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS]; #define CMD_LINE_OPT_CONFIG "config" @@ -192,10 +152,16 @@ static const struct option lgopts[] = { {NULL, 0, 0, 0} }; +struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } +}; + /* mask of enabled ports */ static uint32_t enabled_port_mask; static uint64_t enabled_cryptodev_mask = UINT64_MAX; -static uint32_t unprotected_port_mask; static int32_t promiscuous_on = 1; static int32_t numa_on = 1; /**< NUMA is enabled by default. */ static uint32_t nb_lcores; @@ -283,8 +249,6 @@ static struct rte_eth_conf port_conf = { }, }; -static struct socket_ctx socket_ctx[NB_SOCKETS]; - /* * Determine is multi-segment support required: * - either frame buffer size is smaller then mtu @@ -2828,47 +2792,10 @@ main(int32_t argc, char **argv) sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads); port_init(portid, req_rx_offloads, req_tx_offloads); - /* Create default ipsec flow for the ethernet device */ - ret = create_default_ipsec_flow(portid, req_rx_offloads); - if (ret) - printf("Cannot create default flow, err=%d, port=%d\n", - ret, portid); } cryptodevs_init(); - /* start ports */ - RTE_ETH_FOREACH_DEV(portid) { - if ((enabled_port_mask & (1 << portid)) == 0) - continue; - - /* - * Start device - * note: device must be started before a flow rule - * can be installed. - */ - ret = rte_eth_dev_start(portid); - if (ret < 0) - rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " - "err=%d, port=%d\n", ret, portid); - /* - * If enabled, put device in promiscuous mode. - * This allows IO forwarding mode to forward packets - * to itself through 2 cross-connected ports of the - * target machine. - */ - if (promiscuous_on) { - ret = rte_eth_promiscuous_enable(portid); - if (ret != 0) - rte_exit(EXIT_FAILURE, - "rte_eth_promiscuous_enable: err=%s, port=%d\n", - rte_strerror(-ret), portid); - } - - rte_eth_dev_callback_register(portid, - RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL); - } - /* fragment reassemble is enabled */ if (frag_tbl_sz != 0) { ret = reassemble_init(); @@ -2889,8 +2816,6 @@ main(int32_t argc, char **argv) } } - check_all_ports_link_status(enabled_port_mask); - /* * Set the enabled port mask in helper config for use by helper * sub-system. This will be used while intializing devices using @@ -2903,6 +2828,39 @@ main(int32_t argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret); + /* Create default ipsec flow for each port and start each port */ + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + + ret = create_default_ipsec_flow(portid, req_rx_offloads); + if (ret) + printf("create_default_ipsec_flow failed, err=%d, " + "port=%d\n", ret, portid); + /* + * Start device + * note: device must be started before a flow rule + * can be installed. + */ + ret = rte_eth_dev_start(portid); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " + "err=%d, port=%d\n", ret, portid); + /* + * If enabled, put device in promiscuous mode. + * This allows IO forwarding mode to forward packets + * to itself through 2 cross-connected ports of the + * target machine. + */ + if (promiscuous_on) + rte_eth_promiscuous_enable(portid); + + rte_eth_dev_callback_register(portid, + RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL); + } + + check_all_ports_link_status(enabled_port_mask); + /* launch per-lcore init on every lcore */ rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER); diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h new file mode 100644 index 0000000..67e1193 --- /dev/null +++ b/examples/ipsec-secgw/ipsec-secgw.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ +#ifndef _IPSEC_SECGW_H_ +#define _IPSEC_SECGW_H_ + +#include <rte_hash.h> + +#define MAX_PKT_BURST 32 + +#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 + +#define NB_SOCKETS 4 + +#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid)) + +#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN +#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ + (((uint64_t)((a) & 0xff) << 56) | \ + ((uint64_t)((b) & 0xff) << 48) | \ + ((uint64_t)((c) & 0xff) << 40) | \ + ((uint64_t)((d) & 0xff) << 32) | \ + ((uint64_t)((e) & 0xff) << 24) | \ + ((uint64_t)((f) & 0xff) << 16) | \ + ((uint64_t)((g) & 0xff) << 8) | \ + ((uint64_t)(h) & 0xff)) +#else +#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ + (((uint64_t)((h) & 0xff) << 56) | \ + ((uint64_t)((g) & 0xff) << 48) | \ + ((uint64_t)((f) & 0xff) << 40) | \ + ((uint64_t)((e) & 0xff) << 32) | \ + ((uint64_t)((d) & 0xff) << 24) | \ + ((uint64_t)((c) & 0xff) << 16) | \ + ((uint64_t)((b) & 0xff) << 8) | \ + ((uint64_t)(a) & 0xff)) +#endif + +#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0)) + +struct traffic_type { + const uint8_t *data[MAX_PKT_BURST * 2]; + struct rte_mbuf *pkts[MAX_PKT_BURST * 2]; + void *saptr[MAX_PKT_BURST * 2]; + uint32_t res[MAX_PKT_BURST * 2]; + uint32_t num; +}; + +struct ipsec_traffic { + struct traffic_type ipsec; + struct traffic_type ip4; + struct traffic_type ip6; +}; + +/* Fields optimized for devices without burst */ +struct traffic_type_nb { + const uint8_t *data; + struct rte_mbuf *pkt; + uint32_t res; + uint32_t num; +}; + +struct ipsec_traffic_nb { + struct traffic_type_nb ipsec; + struct traffic_type_nb ip4; + struct traffic_type_nb ip6; +}; + +/* port/source ethernet addr and destination ethernet addr */ +struct ethaddr_info { + uint64_t src, dst; +}; + +struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS]; + +/* TODO: All var definitions need to be part of a .c file */ + +/* Port mask to identify the unprotected ports */ +uint32_t unprotected_port_mask; + +#endif /* _IPSEC_SECGW_H_ */ diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index 0b9fc04..0c5ee8a 100644 --- a/examples/ipsec-secgw/ipsec.h +++ b/examples/ipsec-secgw/ipsec.h @@ -13,11 +13,11 @@ #include <rte_flow.h> #include <rte_ipsec.h> -#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 +#include "ipsec-secgw.h" + #define RTE_LOGTYPE_IPSEC_ESP RTE_LOGTYPE_USER2 #define RTE_LOGTYPE_IPSEC_IPIP RTE_LOGTYPE_USER3 -#define MAX_PKT_BURST 32 #define MAX_INFLIGHT 128 #define MAX_QP_PER_LCORE 256 @@ -153,6 +153,17 @@ struct ipsec_sa { struct rte_security_session_conf sess_conf; } __rte_cache_aligned; +struct sa_ctx { + void *satbl; /* pointer to array of rte_ipsec_sa objects*/ + struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; + union { + struct { + struct rte_crypto_sym_xform a; + struct rte_crypto_sym_xform b; + }; + } xf[IPSEC_SA_MAX_ENTRIES]; +}; + struct ipsec_mbuf_metadata { struct ipsec_sa *sa; struct rte_crypto_op cop; @@ -233,26 +244,8 @@ struct cnt_blk { uint32_t cnt; } __attribute__((packed)); -struct traffic_type { - const uint8_t *data[MAX_PKT_BURST * 2]; - struct rte_mbuf *pkts[MAX_PKT_BURST * 2]; - void *saptr[MAX_PKT_BURST * 2]; - uint32_t res[MAX_PKT_BURST * 2]; - uint32_t num; -}; - -struct ipsec_traffic { - struct traffic_type ipsec; - struct traffic_type ip4; - struct traffic_type ip6; -}; - - -void -ipsec_poll_mode_worker(void); - -int -ipsec_launch_one_lcore(void *args); +/* Socket ctx */ +struct socket_ctx socket_ctx[NB_SOCKETS]; uint16_t ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c index fce274a..2af9475 100644 --- a/examples/ipsec-secgw/ipsec_worker.c +++ b/examples/ipsec-secgw/ipsec_worker.c @@ -15,6 +15,7 @@ #include <ctype.h> #include <stdbool.h> +#include <rte_acl.h> #include <rte_common.h> #include <rte_log.h> #include <rte_memcpy.h> @@ -29,12 +30,51 @@ #include <rte_eventdev.h> #include <rte_malloc.h> #include <rte_mbuf.h> +#include <rte_lpm.h> +#include <rte_lpm6.h> #include "ipsec.h" +#include "ipsec_worker.h" #include "event_helper.h" extern volatile bool force_quit; +static inline enum pkt_type +process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp) +{ + struct rte_ether_hdr *eth; + + eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); + if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + + offsetof(struct ip, ip_p)); + if (**nlp == IPPROTO_ESP) + return PKT_TYPE_IPSEC_IPV4; + else + return PKT_TYPE_PLAIN_IPV4; + } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + + offsetof(struct ip6_hdr, ip6_nxt)); + if (**nlp == IPPROTO_ESP) + return PKT_TYPE_IPSEC_IPV6; + else + return PKT_TYPE_PLAIN_IPV6; + } + + /* Unknown/Unsupported type */ + return PKT_TYPE_INVALID; +} + +static inline void +update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid) +{ + struct rte_ether_hdr *ethhdr; + + ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); + memcpy(ðhdr->s_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN); + memcpy(ðhdr->d_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN); +} + static inline void ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) { @@ -45,6 +85,177 @@ ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) rte_event_eth_tx_adapter_txq_set(m, 0); } +static inline int +check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx) +{ + uint32_t res; + + if (unlikely(sp == NULL)) + return 0; + + rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1, + DEFAULT_MAX_CATEGORIES); + + if (unlikely(res == 0)) { + /* No match */ + return 0; + } + + if (res == DISCARD) + return 0; + else if (res == BYPASS) { + *sa_idx = 0; + return 1; + } + + *sa_idx = SPI2IDX(res); + if (*sa_idx < IPSEC_SA_MAX_ENTRIES) + return 1; + + /* Invalid SA IDX */ + return 0; +} + +static inline uint16_t +route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) +{ + uint32_t dst_ip; + uint16_t offset; + uint32_t hop; + int ret; + + offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst); + dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset); + dst_ip = rte_be_to_cpu_32(dst_ip); + + ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop); + + if (ret == 0) { + /* We have a hit */ + return hop; + } + + /* else */ + return RTE_MAX_ETHPORTS; +} + +/* TODO: To be tested */ +static inline uint16_t +route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) +{ + uint8_t dst_ip[16]; + uint8_t *ip6_dst; + uint16_t offset; + uint32_t hop; + int ret; + + offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst); + ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset); + memcpy(&dst_ip[0], ip6_dst, 16); + + ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop); + + if (ret == 0) { + /* We have a hit */ + return hop; + } + + /* else */ + return RTE_MAX_ETHPORTS; +} + +static inline uint16_t +get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type) +{ + if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4) + return route4_pkt(pkt, rt->rt4_ctx); + else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6) + return route6_pkt(pkt, rt->rt6_ctx); + + return RTE_MAX_ETHPORTS; +} + +static inline int +process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, + struct rte_event *ev) +{ + struct ipsec_sa *sa = NULL; + struct rte_mbuf *pkt; + uint16_t port_id = 0; + enum pkt_type type; + uint32_t sa_idx; + uint8_t *nlp; + + /* Get pkt from event */ + pkt = ev->mbuf; + + /* Check the packet type */ + type = process_ipsec_get_pkt_type(pkt, &nlp); + + switch (type) { + case PKT_TYPE_PLAIN_IPV4: + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) + sa = (struct ipsec_sa *) pkt->udata64; + + /* Check if we have a match */ + if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { + /* No valid match */ + goto drop_pkt_and_exit; + } + break; + + case PKT_TYPE_PLAIN_IPV6: + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) + sa = (struct ipsec_sa *) pkt->udata64; + + /* Check if we have a match */ + if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { + /* No valid match */ + goto drop_pkt_and_exit; + } + break; + + default: + RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type); + goto drop_pkt_and_exit; + } + + /* Check if the packet has to be bypassed */ + if (sa_idx == 0) + goto route_and_send_pkt; + + /* Else the packet has to be protected with SA */ + + /* If the packet was IPsec processed, then SA pointer should be set */ + if (sa == NULL) + goto drop_pkt_and_exit; + + /* SPI on the packet should match with the one in SA */ + if (unlikely(sa->spi != sa_idx)) + goto drop_pkt_and_exit; + +route_and_send_pkt: + port_id = get_route(pkt, rt, type); + if (unlikely(port_id == RTE_MAX_ETHPORTS)) { + /* no match */ + goto drop_pkt_and_exit; + } + /* else, we have a matching route */ + + /* Update mac addresses */ + update_mac_addrs(pkt, port_id); + + /* Update the event with the dest port */ + ipsec_event_pre_forward(pkt, port_id); + return 1; + +drop_pkt_and_exit: + RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n"); + rte_pktmbuf_free(pkt); + ev->mbuf = NULL; + return 0; +} + /* * Event mode exposes various operating modes depending on the * capabilities of the event device and the operating mode @@ -134,11 +345,11 @@ static void ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links, uint8_t nb_links) { + struct lcore_conf_ev_tx_int_port_wrkr lconf; unsigned int nb_rx = 0; - unsigned int port_id; - struct rte_mbuf *pkt; struct rte_event ev; uint32_t lcore_id; + int32_t socket_id; /* Check if we have links registered for this lcore */ if (nb_links == 0) { @@ -151,6 +362,21 @@ ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links, /* Get core ID */ lcore_id = rte_lcore_id(); + /* Get socket ID */ + socket_id = rte_lcore_to_socket_id(lcore_id); + + /* Save routing table */ + lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4; + lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6; + lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in; + lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in; + lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in; + lconf.inbound.session_pool = socket_ctx[socket_id].session_pool; + lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out; + lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out; + lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out; + lconf.outbound.session_pool = socket_ctx[socket_id].session_pool; + RTE_LOG(INFO, IPSEC, "Launching event mode worker (non-burst - Tx internal port - " "app mode - inbound) on lcore %d\n", lcore_id); @@ -175,13 +401,11 @@ ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links, if (nb_rx == 0) continue; - port_id = ev.queue_id; - pkt = ev.mbuf; - - rte_prefetch0(rte_pktmbuf_mtod(pkt, void *)); - - /* Process packet */ - ipsec_event_pre_forward(pkt, port_id); + if (process_ipsec_ev_inbound(&lconf.inbound, + &lconf.rt, &ev) != 1) { + /* The pkt has been dropped */ + continue; + } /* * Since tx internal port is available, events can be diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h new file mode 100644 index 0000000..fd18a2e --- /dev/null +++ b/examples/ipsec-secgw/ipsec_worker.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium, Inc + */ +#ifndef _IPSEC_WORKER_H_ +#define _IPSEC_WORKER_H_ + +#include "ipsec.h" + +enum pkt_type { + PKT_TYPE_PLAIN_IPV4 = 1, + PKT_TYPE_IPSEC_IPV4, + PKT_TYPE_PLAIN_IPV6, + PKT_TYPE_IPSEC_IPV6, + PKT_TYPE_INVALID +}; + +struct route_table { + struct rt_ctx *rt4_ctx; + struct rt_ctx *rt6_ctx; +}; + +/* + * Conf required by event mode worker with tx internal port + */ +struct lcore_conf_ev_tx_int_port_wrkr { + struct ipsec_ctx inbound; + struct ipsec_ctx outbound; + struct route_table rt; +} __rte_cache_aligned; + +/* TODO + * + * Move this function to ipsec_worker.c + */ +void ipsec_poll_mode_worker(void); + +int ipsec_launch_one_lcore(void *args); + +#endif /* _IPSEC_WORKER_H_ */ diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index 7f046e3..9e17ba0 100644 --- a/examples/ipsec-secgw/sa.c +++ b/examples/ipsec-secgw/sa.c @@ -772,17 +772,6 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound) printf("\n"); } -struct sa_ctx { - void *satbl; /* pointer to array of rte_ipsec_sa objects*/ - struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; - union { - struct { - struct rte_crypto_sym_xform a; - struct rte_crypto_sym_xform b; - }; - } xf[IPSEC_SA_MAX_ENTRIES]; -}; - static struct sa_ctx * sa_create(const char *name, int32_t socket_id) {