[7/7] examples/ipsec-secgw: add ethdev reset callback
Checks
Commit Message
Add event handler for ethdev reset callback
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 18 +++-
examples/ipsec-secgw/ipsec.h | 4 +-
examples/ipsec-secgw/sa.c | 130 +++++++++++++++++++++++++++--
3 files changed, 139 insertions(+), 13 deletions(-)
Comments
> Add event handler for ethdev reset callback
I believe the below patch need to be split in 4-5 patches.
It has a lot of irrelevant stuff.
>
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
> ---
> examples/ipsec-secgw/ipsec-secgw.c | 18 +++-
> examples/ipsec-secgw/ipsec.h | 4 +-
> examples/ipsec-secgw/sa.c | 130 +++++++++++++++++++++++++++--
> 3 files changed, 139 insertions(+), 13 deletions(-)
>
> diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-
> secgw/ipsec-secgw.c
> index e725d84e7c..9ba9568978 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.c
> +++ b/examples/ipsec-secgw/ipsec-secgw.c
> @@ -2254,7 +2254,7 @@ port_init(uint16_t portid, uint64_t
> req_rx_offloads, uint64_t req_tx_offloads)
> local_port_conf.rxmode.offloads)
> rte_exit(EXIT_FAILURE,
> "Error: port %u required RX offloads: 0x%" PRIx64
> - ", avaialbe RX offloads: 0x%" PRIx64 "\n",
> + ", available RX offloads: 0x%" PRIx64 "\n",
> portid, local_port_conf.rxmode.offloads,
> dev_info.rx_offload_capa);
>
> @@ -2262,7 +2262,7 @@ port_init(uint16_t portid, uint64_t
> req_rx_offloads, uint64_t req_tx_offloads)
> local_port_conf.txmode.offloads)
> rte_exit(EXIT_FAILURE,
> "Error: port %u required TX offloads: 0x%" PRIx64
> - ", avaialbe TX offloads: 0x%" PRIx64 "\n",
> + ", available TX offloads: 0x%" PRIx64 "\n",
> portid, local_port_conf.txmode.offloads,
> dev_info.tx_offload_capa);
Submit a separate patch for above so that it can be back ported.
>
> @@ -2543,6 +2543,17 @@ inline_ipsec_event_callback(uint16_t port_id,
> enum rte_eth_event_type type,
> return -1;
> }
>
> +static int
> +ethdev_reset_event_callback(uint16_t port_id,
> + enum rte_eth_event_type type __rte_unused,
> + void *param __rte_unused, void *ret_param __rte_unused)
> +{
> + printf("Reset Event on port id %d\n", port_id);
> + printf("Force quit application");
> + force_quit = true;
> + return 0;
> +}
> +
> static uint16_t
> rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
> struct rte_mbuf *pkt[], uint16_t nb_pkts,
> @@ -3317,6 +3328,9 @@ main(int32_t argc, char **argv)
> rte_strerror(-ret), portid);
> }
>
> + rte_eth_dev_callback_register(portid,
> RTE_ETH_EVENT_INTR_RESET,
> + ethdev_reset_event_callback, NULL);
> +
> rte_eth_dev_callback_register(portid,
> RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback,
> NULL);
> }
> diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
> index db7988604a..e8752e0bde 100644
> --- a/examples/ipsec-secgw/ipsec.h
> +++ b/examples/ipsec-secgw/ipsec.h
> @@ -65,7 +65,7 @@ struct ip_addr {
> } ip;
> };
>
> -#define MAX_KEY_SIZE 36
> +#define MAX_KEY_SIZE 132
Reason?? This change do not match with the patch description.
>
> /*
> * application wide SA parameters
> @@ -146,7 +146,7 @@ struct ipsec_sa {
> uint8_t udp_encap;
> uint16_t portid;
> uint16_t mss;
> - uint16_t esn;
> + uint32_t esn;
>
> uint8_t fdir_qid;
> uint8_t fdir_flag;
> diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
> index 3ee5ed7dcf..0be8bdef7a 100644
> --- a/examples/ipsec-secgw/sa.c
> +++ b/examples/ipsec-secgw/sa.c
> @@ -46,6 +46,7 @@ struct supported_cipher_algo {
> struct supported_auth_algo {
> const char *keyword;
> enum rte_crypto_auth_algorithm algo;
> + uint16_t iv_len;
> uint16_t digest_len;
> uint16_t key_len;
> uint8_t key_not_req;
> @@ -98,6 +99,20 @@ const struct supported_cipher_algo cipher_algos[] = {
> .block_size = 4,
> .key_len = 20
> },
> + {
> + .keyword = "aes-192-ctr",
> + .algo = RTE_CRYPTO_CIPHER_AES_CTR,
> + .iv_len = 16,
> + .block_size = 16,
> + .key_len = 28
> + },
> + {
> + .keyword = "aes-256-ctr",
> + .algo = RTE_CRYPTO_CIPHER_AES_CTR,
> + .iv_len = 16,
> + .block_size = 16,
> + .key_len = 36
> + },
I think the above change need a separate patch to add support for new algos.
> {
> .keyword = "3des-cbc",
> .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
> @@ -126,6 +141,31 @@ const struct supported_auth_algo auth_algos[] = {
> .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
> .digest_len = 16,
> .key_len = 32
> + },
> + {
> + .keyword = "sha384-hmac",
> + .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
> + .digest_len = 24,
> + .key_len = 48
> + },
> + {
> + .keyword = "sha512-hmac",
> + .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
> + .digest_len = 32,
> + .key_len = 64
> + },
> + {
> + .keyword = "aes-gmac",
> + .algo = RTE_CRYPTO_AUTH_AES_GMAC,
> + .iv_len = 8,
> + .digest_len = 16,
> + .key_len = 20
> + },
> + {
> + .keyword = "aes-xcbc-mac-96",
> + .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
> + .digest_len = 12,
> + .key_len = 16
> }
> };
>
> @@ -156,6 +196,42 @@ const struct supported_aead_algo aead_algos[] = {
> .key_len = 36,
> .digest_len = 16,
> .aad_len = 8,
> + },
> + {
> + .keyword = "aes-128-ccm",
> + .algo = RTE_CRYPTO_AEAD_AES_CCM,
> + .iv_len = 8,
> + .block_size = 4,
> + .key_len = 20,
> + .digest_len = 16,
> + .aad_len = 8,
> + },
> + {
> + .keyword = "aes-192-ccm",
> + .algo = RTE_CRYPTO_AEAD_AES_CCM,
> + .iv_len = 8,
> + .block_size = 4,
> + .key_len = 28,
> + .digest_len = 16,
> + .aad_len = 8,
> + },
> + {
> + .keyword = "aes-256-ccm",
> + .algo = RTE_CRYPTO_AEAD_AES_CCM,
> + .iv_len = 8,
> + .block_size = 4,
> + .key_len = 36,
> + .digest_len = 16,
> + .aad_len = 8,
> + },
> + {
> + .keyword = "chacha20-poly1305",
> + .algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
> + .iv_len = 12,
> + .block_size = 64,
> + .key_len = 36,
> + .digest_len = 16,
> + .aad_len = 8,
Above changes are also not relevant to this patch.
And MAX_KEY_SIZE is updated as 132, but max size that I see here is 64.
> }
> };
>
> @@ -352,6 +428,8 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
> } else if (strcmp(tokens[ti], "ipv6-udp-tunnel") == 0) {
> sa_cnt->nb_v6++;
> rule->flags |= IP6_TUNNEL |
> NATT_UDP_TUNNEL;
> + rule->udp.sport = 0;
> + rule->udp.dport = 4500;
Again irrelevant change as per the description.
> } else if (strcmp(tokens[ti], "transport") == 0) {
> sa_cnt->nb_v4++;
> sa_cnt->nb_v6++;
> @@ -499,6 +577,15 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
> if (status->status < 0)
> return;
>
> + if (algo->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
> + key_len -= 4;
> + rule->auth_key_len = key_len;
> + rule->iv_len = algo->iv_len;
> + memcpy(&rule->salt,
> + &rule->auth_key[key_len], 4);
> + }
> +
> +
Extra line and irrelevant change.
> auth_algo_p = 1;
> continue;
> }
> @@ -1209,10 +1296,15 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct
> ipsec_sa entries[],
> sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM ||
> sa->aead_algo ==
> RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
>
> - if (ips->type ==
> RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
> + if (ips->type ==
> +
> RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
> iv_length = 8;
> - else
> - iv_length = 12;
> + } else {
> + if (sa->aead_algo ==
> RTE_CRYPTO_AEAD_AES_CCM)
> + iv_length = 11;
> + else
> + iv_length = 12;
> + }
>
> sa_ctx->xf[idx].a.type =
> RTE_CRYPTO_SYM_XFORM_AEAD;
> sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
> @@ -1236,10 +1328,8 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct
> ipsec_sa entries[],
> case RTE_CRYPTO_CIPHER_NULL:
> case RTE_CRYPTO_CIPHER_3DES_CBC:
> case RTE_CRYPTO_CIPHER_AES_CBC:
> - iv_length = sa->iv_len;
> - break;
> case RTE_CRYPTO_CIPHER_AES_CTR:
> - iv_length = 16;
> + iv_length = sa->iv_len;
> break;
> default:
> RTE_LOG(ERR, IPSEC_ESP,
> @@ -1248,6 +1338,15 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct
> ipsec_sa entries[],
> return -EINVAL;
> }
>
> + if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC)
> {
> + if (ips->type ==
> +
> RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
> + iv_length = 8;
> + } else {
> + iv_length = 12;
> + }
> + }
> +
> if (inbound) {
> sa_ctx->xf[idx].b.type =
> RTE_CRYPTO_SYM_XFORM_CIPHER;
> sa_ctx->xf[idx].b.cipher.algo = sa-
> >cipher_algo;
> @@ -1269,6 +1368,9 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct
> ipsec_sa entries[],
> sa->digest_len;
> sa_ctx->xf[idx].a.auth.op =
> RTE_CRYPTO_AUTH_OP_VERIFY;
> + sa_ctx->xf[idx].a.auth.iv.offset = IV_OFFSET;
> + sa_ctx->xf[idx].a.auth.iv.length = iv_length;
> +
> } else { /* outbound */
> sa_ctx->xf[idx].a.type =
> RTE_CRYPTO_SYM_XFORM_CIPHER;
> sa_ctx->xf[idx].a.cipher.algo = sa-
> >cipher_algo;
> @@ -1290,11 +1392,21 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct
> ipsec_sa entries[],
> sa->digest_len;
> sa_ctx->xf[idx].b.auth.op =
> RTE_CRYPTO_AUTH_OP_GENERATE;
> + sa_ctx->xf[idx].b.auth.iv.offset = IV_OFFSET;
> + sa_ctx->xf[idx].b.auth.iv.length = iv_length;
> +
> }
>
> - sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
> - sa_ctx->xf[idx].b.next = NULL;
> - sa->xforms = &sa_ctx->xf[idx].a;
> + if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC)
> {
> + sa->xforms = inbound ?
> + &sa_ctx->xf[idx].a : &sa_ctx-
> >xf[idx].b;
> + sa->xforms->next = NULL;
> +
> + } else {
> + sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
> + sa_ctx->xf[idx].b.next = NULL;
> + sa->xforms = &sa_ctx->xf[idx].a;
> + }
> }
>
> if (ips->type ==
> --
> 2.25.1
@@ -2254,7 +2254,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
local_port_conf.rxmode.offloads)
rte_exit(EXIT_FAILURE,
"Error: port %u required RX offloads: 0x%" PRIx64
- ", avaialbe RX offloads: 0x%" PRIx64 "\n",
+ ", available RX offloads: 0x%" PRIx64 "\n",
portid, local_port_conf.rxmode.offloads,
dev_info.rx_offload_capa);
@@ -2262,7 +2262,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
local_port_conf.txmode.offloads)
rte_exit(EXIT_FAILURE,
"Error: port %u required TX offloads: 0x%" PRIx64
- ", avaialbe TX offloads: 0x%" PRIx64 "\n",
+ ", available TX offloads: 0x%" PRIx64 "\n",
portid, local_port_conf.txmode.offloads,
dev_info.tx_offload_capa);
@@ -2543,6 +2543,17 @@ inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
return -1;
}
+static int
+ethdev_reset_event_callback(uint16_t port_id,
+ enum rte_eth_event_type type __rte_unused,
+ void *param __rte_unused, void *ret_param __rte_unused)
+{
+ printf("Reset Event on port id %d\n", port_id);
+ printf("Force quit application");
+ force_quit = true;
+ return 0;
+}
+
static uint16_t
rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
struct rte_mbuf *pkt[], uint16_t nb_pkts,
@@ -3317,6 +3328,9 @@ main(int32_t argc, char **argv)
rte_strerror(-ret), portid);
}
+ rte_eth_dev_callback_register(portid, RTE_ETH_EVENT_INTR_RESET,
+ ethdev_reset_event_callback, NULL);
+
rte_eth_dev_callback_register(portid,
RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
}
@@ -65,7 +65,7 @@ struct ip_addr {
} ip;
};
-#define MAX_KEY_SIZE 36
+#define MAX_KEY_SIZE 132
/*
* application wide SA parameters
@@ -146,7 +146,7 @@ struct ipsec_sa {
uint8_t udp_encap;
uint16_t portid;
uint16_t mss;
- uint16_t esn;
+ uint32_t esn;
uint8_t fdir_qid;
uint8_t fdir_flag;
@@ -46,6 +46,7 @@ struct supported_cipher_algo {
struct supported_auth_algo {
const char *keyword;
enum rte_crypto_auth_algorithm algo;
+ uint16_t iv_len;
uint16_t digest_len;
uint16_t key_len;
uint8_t key_not_req;
@@ -98,6 +99,20 @@ const struct supported_cipher_algo cipher_algos[] = {
.block_size = 4,
.key_len = 20
},
+ {
+ .keyword = "aes-192-ctr",
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .iv_len = 16,
+ .block_size = 16,
+ .key_len = 28
+ },
+ {
+ .keyword = "aes-256-ctr",
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .iv_len = 16,
+ .block_size = 16,
+ .key_len = 36
+ },
{
.keyword = "3des-cbc",
.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
@@ -126,6 +141,31 @@ const struct supported_auth_algo auth_algos[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.digest_len = 16,
.key_len = 32
+ },
+ {
+ .keyword = "sha384-hmac",
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .digest_len = 24,
+ .key_len = 48
+ },
+ {
+ .keyword = "sha512-hmac",
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .digest_len = 32,
+ .key_len = 64
+ },
+ {
+ .keyword = "aes-gmac",
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .iv_len = 8,
+ .digest_len = 16,
+ .key_len = 20
+ },
+ {
+ .keyword = "aes-xcbc-mac-96",
+ .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ .digest_len = 12,
+ .key_len = 16
}
};
@@ -156,6 +196,42 @@ const struct supported_aead_algo aead_algos[] = {
.key_len = 36,
.digest_len = 16,
.aad_len = 8,
+ },
+ {
+ .keyword = "aes-128-ccm",
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .iv_len = 8,
+ .block_size = 4,
+ .key_len = 20,
+ .digest_len = 16,
+ .aad_len = 8,
+ },
+ {
+ .keyword = "aes-192-ccm",
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .iv_len = 8,
+ .block_size = 4,
+ .key_len = 28,
+ .digest_len = 16,
+ .aad_len = 8,
+ },
+ {
+ .keyword = "aes-256-ccm",
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .iv_len = 8,
+ .block_size = 4,
+ .key_len = 36,
+ .digest_len = 16,
+ .aad_len = 8,
+ },
+ {
+ .keyword = "chacha20-poly1305",
+ .algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+ .iv_len = 12,
+ .block_size = 64,
+ .key_len = 36,
+ .digest_len = 16,
+ .aad_len = 8,
}
};
@@ -352,6 +428,8 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
} else if (strcmp(tokens[ti], "ipv6-udp-tunnel") == 0) {
sa_cnt->nb_v6++;
rule->flags |= IP6_TUNNEL | NATT_UDP_TUNNEL;
+ rule->udp.sport = 0;
+ rule->udp.dport = 4500;
} else if (strcmp(tokens[ti], "transport") == 0) {
sa_cnt->nb_v4++;
sa_cnt->nb_v6++;
@@ -499,6 +577,15 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
if (status->status < 0)
return;
+ if (algo->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ key_len -= 4;
+ rule->auth_key_len = key_len;
+ rule->iv_len = algo->iv_len;
+ memcpy(&rule->salt,
+ &rule->auth_key[key_len], 4);
+ }
+
+
auth_algo_p = 1;
continue;
}
@@ -1209,10 +1296,15 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM ||
sa->aead_algo == RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
- if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
+ if (ips->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
iv_length = 8;
- else
- iv_length = 12;
+ } else {
+ if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
+ iv_length = 11;
+ else
+ iv_length = 12;
+ }
sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
@@ -1236,10 +1328,8 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
case RTE_CRYPTO_CIPHER_NULL:
case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CBC:
- iv_length = sa->iv_len;
- break;
case RTE_CRYPTO_CIPHER_AES_CTR:
- iv_length = 16;
+ iv_length = sa->iv_len;
break;
default:
RTE_LOG(ERR, IPSEC_ESP,
@@ -1248,6 +1338,15 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
return -EINVAL;
}
+ if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ if (ips->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ iv_length = 8;
+ } else {
+ iv_length = 12;
+ }
+ }
+
if (inbound) {
sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
@@ -1269,6 +1368,9 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
sa->digest_len;
sa_ctx->xf[idx].a.auth.op =
RTE_CRYPTO_AUTH_OP_VERIFY;
+ sa_ctx->xf[idx].a.auth.iv.offset = IV_OFFSET;
+ sa_ctx->xf[idx].a.auth.iv.length = iv_length;
+
} else { /* outbound */
sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
@@ -1290,11 +1392,21 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
sa->digest_len;
sa_ctx->xf[idx].b.auth.op =
RTE_CRYPTO_AUTH_OP_GENERATE;
+ sa_ctx->xf[idx].b.auth.iv.offset = IV_OFFSET;
+ sa_ctx->xf[idx].b.auth.iv.length = iv_length;
+
}
- sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
- sa_ctx->xf[idx].b.next = NULL;
- sa->xforms = &sa_ctx->xf[idx].a;
+ if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ sa->xforms = inbound ?
+ &sa_ctx->xf[idx].a : &sa_ctx->xf[idx].b;
+ sa->xforms->next = NULL;
+
+ } else {
+ sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
+ sa_ctx->xf[idx].b.next = NULL;
+ sa->xforms = &sa_ctx->xf[idx].a;
+ }
}
if (ips->type ==