@@ -38,6 +38,7 @@ Features of the OCTEON TX2 Ethdev PMD are:
- IEEE1588 timestamping
- HW offloaded `ethdev Rx queue` to `eventdev event queue` packet injection
- Support Rx interrupt
+- Inline IPsec processing support
Prerequisites
-------------
@@ -178,6 +179,17 @@ Runtime Config Options
traffic on this port should be higig2 traffic only. Supported switch header
types are "higig2" and "dsa".
+- ``Max SPI for inbound inline IPsec`` (default ``1``)
+
+ Max SPI supported for inbound inline IPsec processing can be specified by
+ ``ipsec_in_max_spi`` ``devargs`` parameter.
+
+ For example::
+ -w 0002:02:00.0,ipsec_in_max_spi=128
+
+ With the above configuration, application can enable inline IPsec processing
+ on 128 SAs (SPI 0-127).
+
.. note::
Above devarg parameters are configurable per device, user needs to pass the
@@ -211,6 +223,14 @@ SDP interface support
~~~~~~~~~~~~~~~~~~~~~
OCTEON TX2 SDP interface support is limited to PF device, No VF support.
+Inline Protocol Processing
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+``net_octeontx2`` pmd doesn't support the following features for packets to be
+inline protocol processed.
+- TSO offload
+- VLAN/QinQ offload
+- Fragmentation
+
Debugging Options
-----------------
@@ -82,6 +82,15 @@ New Features
Added Chacha20-Poly1305 AEAD algorithm.
+* **Added inline IPsec support to Marvell OCTEONTX2 PMD.**
+
+ Added inline IPsec support to Marvell OCTEONTX2 PMD. With the feature,
+ applications would be able to offload entire IPsec offload to the hardware.
+ For the configured sessions, hardware will do the lookup and perform
+ decryption and IPsec transformation. For the outbound path, application
+ can submit a plain packet to the PMD, and it would be sent out on wire
+ after doing encryption and IPsec transformation of the packet.
+
Removed Items
-------------
@@ -79,10 +79,14 @@ int otx2_npa_lf_obj_ref(void);
typedef int (*otx2_sec_eth_ctx_create_t)(struct rte_eth_dev *eth_dev);
typedef void (*otx2_sec_eth_ctx_destroy_t)(struct rte_eth_dev *eth_dev);
+typedef int (*otx2_sec_eth_init_t)(struct rte_eth_dev *eth_dev);
+typedef void (*otx2_sec_eth_fini_t)(struct rte_eth_dev *eth_dev);
struct otx2_sec_eth_crypto_idev_ops {
otx2_sec_eth_ctx_create_t ctx_create;
otx2_sec_eth_ctx_destroy_t ctx_destroy;
+ otx2_sec_eth_init_t init;
+ otx2_sec_eth_fini_t fini;
};
extern struct otx2_sec_eth_crypto_idev_ops otx2_sec_idev_ops;
@@ -11,7 +11,7 @@ LIB = librte_pmd_octeontx2_crypto.a
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_eal -lrte_ethdev -lrte_mbuf -lrte_mempool -lrte_ring
-LDLIBS += -lrte_cryptodev -lrte_security
+LDLIBS += -lrte_cryptodev -lrte_security -lrte_eventdev
LDLIBS += -lrte_pci -lrte_bus_pci
LDLIBS += -lrte_common_cpt -lrte_common_octeontx2
@@ -9,6 +9,7 @@ deps += ['bus_pci']
deps += ['common_cpt']
deps += ['common_octeontx2']
deps += ['ethdev']
+deps += ['eventdev']
deps += ['security']
name = 'octeontx2_crypto'
@@ -158,4 +158,6 @@ RTE_INIT(otx2_cpt_init_log)
otx2_sec_idev_ops.ctx_create = otx2_sec_eth_ctx_create;
otx2_sec_idev_ops.ctx_destroy = otx2_sec_eth_ctx_destroy;
+ otx2_sec_idev_ops.init = otx2_sec_eth_init;
+ otx2_sec_idev_ops.fini = otx2_sec_eth_fini;
}
new file mode 100644
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_IPSEC_FP_H__
+#define __OTX2_IPSEC_FP_H__
+
+struct otx2_ipsec_fp_sa_ctl {
+ rte_be32_t spi : 32;
+ uint64_t exp_proto_inter_frag : 8;
+ uint64_t rsvd_42_40 : 3;
+ uint64_t esn_en : 1;
+ uint64_t rsvd_45_44 : 2;
+ uint64_t encap_type : 2;
+ uint64_t enc_type : 3;
+ uint64_t rsvd_48 : 1;
+ uint64_t auth_type : 4;
+ uint64_t valid : 1;
+ uint64_t direction : 1;
+ uint64_t outer_ip_ver : 1;
+ uint64_t inner_ip_ver : 1;
+ uint64_t ipsec_mode : 1;
+ uint64_t ipsec_proto : 1;
+ uint64_t aes_key_len : 2;
+};
+
+struct otx2_ipsec_fp_in_sa {
+ /* w0 */
+ struct otx2_ipsec_fp_sa_ctl ctl;
+
+ /* w1 */
+ uint8_t nonce[4]; /* Only for AES-GCM */
+ uint32_t unused;
+
+ /* w2 */
+ uint32_t esn_low;
+ uint32_t esn_hi;
+
+ /* w3-w6 */
+ uint8_t cipher_key[32];
+
+ /* w7-w12 */
+ uint8_t hmac_key[48];
+
+ RTE_STD_C11
+ union {
+ void *userdata;
+ uint64_t udata64;
+ };
+
+ uint64_t reserved1;
+ uint64_t reserved2;
+};
+
+#endif /* __OTX2_IPSEC_FP_H__ */
@@ -3,11 +3,36 @@
*/
#include <rte_ethdev.h>
+#include <rte_eventdev.h>
#include <rte_malloc.h>
+#include <rte_memzone.h>
#include <rte_security.h>
+#include "otx2_ethdev.h"
+#include "otx2_ipsec_fp.h"
#include "otx2_security.h"
+#define SEC_ETH_MAX_PKT_LEN 1450
+
+struct sec_eth_tag_const {
+ RTE_STD_C11
+ union {
+ struct {
+ uint32_t rsvd_11_0 : 12;
+ uint32_t port : 8;
+ uint32_t event_type : 4;
+ uint32_t rsvd_31_24 : 8;
+ };
+ uint32_t u32;
+ };
+};
+
+static inline void
+in_sa_mz_name_get(char *name, int size, uint16_t port)
+{
+ snprintf(name, size, "otx2_ipsec_in_sadb_%u", port);
+}
+
int
otx2_sec_eth_ctx_create(struct rte_eth_dev *eth_dev)
{
@@ -33,3 +58,100 @@ otx2_sec_eth_ctx_destroy(struct rte_eth_dev *eth_dev)
{
rte_free(eth_dev->security_ctx);
}
+
+static int
+sec_eth_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint16_t port = eth_dev->data->port_id;
+ struct nix_inline_ipsec_lf_cfg *req;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct sec_eth_tag_const tag_const;
+ char name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL)
+ return -EINVAL;
+
+ req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
+ req->enable = 1;
+ req->sa_base_addr = mz->iova;
+
+ req->ipsec_cfg0.tt = tt;
+
+ tag_const.u32 = 0;
+ tag_const.event_type = RTE_EVENT_TYPE_ETHDEV;
+ tag_const.port = port;
+ req->ipsec_cfg0.tag_const = tag_const.u32;
+
+ req->ipsec_cfg0.sa_pow2_size =
+ rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa));
+ req->ipsec_cfg0.lenm1_max = SEC_ETH_MAX_PKT_LEN - 1;
+
+ req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi);
+ req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1;
+
+ return otx2_mbox_process(mbox);
+}
+
+int
+otx2_sec_eth_init(struct rte_eth_dev *eth_dev)
+{
+ const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa);
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint16_t port = eth_dev->data->port_id;
+ char name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+ int mz_sz, ret;
+ uint16_t nb_sa;
+
+ RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
+ !RTE_IS_POWER_OF_2(sa_width));
+
+ if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
+ !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+ return 0;
+
+ nb_sa = dev->ipsec_in_max_spi;
+ mz_sz = nb_sa * sa_width;
+ in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
+ mz = rte_memzone_reserve_aligned(name, mz_sz, rte_socket_id(),
+ RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
+
+ if (mz == NULL) {
+ otx2_err("Could not allocate inbound SA DB");
+ return -ENOMEM;
+ }
+
+ memset(mz->addr, 0, mz_sz);
+
+ ret = sec_eth_ipsec_cfg(eth_dev, SSO_TT_ORDERED);
+ if (ret < 0) {
+ otx2_err("Could not configure inline IPsec");
+ goto sec_fini;
+ }
+
+ return 0;
+
+sec_fini:
+ otx2_err("Could not configure device for security");
+ otx2_sec_eth_fini(eth_dev);
+ return ret;
+}
+
+void
+otx2_sec_eth_fini(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint16_t port = eth_dev->data->port_id;
+ char name[RTE_MEMZONE_NAMESIZE];
+
+ if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
+ !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+ return;
+
+ in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
+ rte_memzone_free(rte_memzone_lookup(name));
+}
@@ -11,4 +11,8 @@ int otx2_sec_eth_ctx_create(struct rte_eth_dev *eth_dev);
void otx2_sec_eth_ctx_destroy(struct rte_eth_dev *eth_dev);
+int otx2_sec_eth_init(struct rte_eth_dev *eth_dev);
+
+void otx2_sec_eth_fini(struct rte_eth_dev *eth_dev);
+
#endif /* __OTX2_SECURITY_H__ */
@@ -337,6 +337,10 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
aq->op = NIX_AQ_INSTOP_INIT;
aq->rq.sso_ena = 0;
+
+ if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY)
+ aq->rq.ipsech_ena = 1;
+
aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
aq->rq.spb_ena = 0;
aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
@@ -1610,6 +1614,8 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
/* Free the resources allocated from the previous configure */
if (dev->configured == 1) {
+ if (otx2_sec_idev_ops.fini != NULL)
+ otx2_sec_idev_ops.fini(eth_dev);
otx2_nix_rxchan_bpid_cfg(eth_dev, false);
otx2_nix_vlan_fini(eth_dev);
otx2_nix_mc_addr_list_uninstall(eth_dev);
@@ -1714,10 +1720,17 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
goto cq_fini;
}
+ /* Enable security */
+ if (otx2_sec_idev_ops.init != NULL) {
+ rc = otx2_sec_idev_ops.init(eth_dev);
+ if (rc)
+ goto cq_fini;
+ }
+
rc = otx2_nix_mc_addr_list_install(eth_dev);
if (rc < 0) {
otx2_err("Failed to install mc address list rc=%d", rc);
- goto cq_fini;
+ goto sec_fini;
}
/*
@@ -1753,6 +1766,9 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
uninstall_mc_list:
otx2_nix_mc_addr_list_uninstall(eth_dev);
+sec_fini:
+ if (otx2_sec_idev_ops.fini != NULL)
+ otx2_sec_idev_ops.fini(eth_dev);
cq_fini:
oxt2_nix_unregister_cq_irqs(eth_dev);
q_irq_fini:
@@ -2345,6 +2361,10 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
if (rc)
otx2_err("Failed to cleanup npa lf, rc=%d", rc);
+ /* Disable security */
+ if (otx2_sec_idev_ops.fini != NULL)
+ otx2_sec_idev_ops.fini(eth_dev);
+
/* Destroy security ctx */
if (otx2_sec_idev_ops.ctx_destroy != NULL)
otx2_sec_idev_ops.ctx_destroy(eth_dev);
@@ -324,6 +324,8 @@ struct otx2_eth_dev {
bool mc_tbl_set;
struct otx2_nix_mc_filter_tbl mc_fltr_tbl;
bool sdp_link; /* SDP flag */
+ /* Inline IPsec params */
+ uint16_t ipsec_in_max_spi;
} __rte_cache_aligned;
struct otx2_eth_txq {
@@ -64,6 +64,19 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
}
static int
+parse_ipsec_in_max_spi(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint32_t val;
+
+ val = atoi(value);
+
+ *(uint16_t *)extra_args = val;
+
+ return 0;
+}
+
+static int
parse_flag(const char *key, const char *value, void *extra_args)
{
RTE_SET_USED(key);
@@ -104,6 +117,7 @@ parse_switch_header_type(const char *key, const char *value, void *extra_args)
}
#define OTX2_RSS_RETA_SIZE "reta_size"
+#define OTX2_IPSEC_IN_MAX_SPI "ipsec_in_max_spi"
#define OTX2_SCL_ENABLE "scalar_enable"
#define OTX2_MAX_SQB_COUNT "max_sqb_count"
#define OTX2_FLOW_PREALLOC_SIZE "flow_prealloc_size"
@@ -118,6 +132,7 @@ otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
uint16_t flow_prealloc_size = 8;
uint16_t switch_header_type = 0;
uint16_t flow_max_priority = 3;
+ uint16_t ipsec_in_max_spi = 1;
uint16_t scalar_enable = 0;
struct rte_kvargs *kvlist;
@@ -130,6 +145,8 @@ otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
rte_kvargs_process(kvlist, OTX2_RSS_RETA_SIZE,
&parse_reta_size, &rss_size);
+ rte_kvargs_process(kvlist, OTX2_IPSEC_IN_MAX_SPI,
+ &parse_ipsec_in_max_spi, &ipsec_in_max_spi);
rte_kvargs_process(kvlist, OTX2_SCL_ENABLE,
&parse_flag, &scalar_enable);
rte_kvargs_process(kvlist, OTX2_MAX_SQB_COUNT,
@@ -143,6 +160,7 @@ otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
rte_kvargs_free(kvlist);
null_devargs:
+ dev->ipsec_in_max_spi = ipsec_in_max_spi;
dev->scalar_ena = scalar_enable;
dev->max_sqb_count = sqb_count;
dev->rss_info.rss_size = rss_size;
@@ -157,6 +175,7 @@ otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
RTE_PMD_REGISTER_PARAM_STRING(net_octeontx2,
OTX2_RSS_RETA_SIZE "=<64|128|256>"
+ OTX2_IPSEC_IN_MAX_SPI "=<1-65535>"
OTX2_SCL_ENABLE "=1"
OTX2_MAX_SQB_COUNT "=<8-512>"
OTX2_FLOW_PREALLOC_SIZE "=<1-32>"