From: Tejasree Kondoj <ktejasree@marvell.com>
For enabling outbound inline IPsec, a CPT queue needs to be tied
to a NIX PF_FUNC. Distribute CPT queues fairly among all available
otx2 eth ports.
For inbound, one CPT LF will be assigned and initialized by kernel.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
---
drivers/crypto/octeontx2/Makefile | 3 +-
drivers/crypto/octeontx2/meson.build | 2 +
drivers/crypto/octeontx2/otx2_cryptodev_mbox.c | 53 ++++++++++++++++++++++++++
drivers/crypto/octeontx2/otx2_cryptodev_mbox.h | 7 ++++
drivers/crypto/octeontx2/otx2_cryptodev_ops.c | 38 ++++++++++++++++++
5 files changed, 102 insertions(+), 1 deletion(-)
@@ -10,7 +10,7 @@ LIB = librte_pmd_octeontx2_crypto.a
# build flags
CFLAGS += $(WERROR_FLAGS)
-LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_eal -lrte_ethdev -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_cryptodev
LDLIBS += -lrte_pci -lrte_bus_pci
LDLIBS += -lrte_common_cpt -lrte_common_octeontx2
@@ -21,6 +21,7 @@ CFLAGS += -O3
CFLAGS += -I$(RTE_SDK)/drivers/common/cpt
CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2
CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx2
CFLAGS += -DALLOW_EXPERIMENTAL_API
ifneq ($(CONFIG_RTE_ARCH_64),y)
@@ -8,6 +8,7 @@ endif
deps += ['bus_pci']
deps += ['common_cpt']
deps += ['common_octeontx2']
+deps += ['ethdev']
name = 'octeontx2_crypto'
allow_experimental_apis = true
@@ -32,3 +33,4 @@ endforeach
includes += include_directories('../../common/cpt')
includes += include_directories('../../common/octeontx2')
includes += include_directories('../../mempool/octeontx2')
+includes += include_directories('../../net/octeontx2')
@@ -2,10 +2,13 @@
* Copyright (C) 2019 Marvell International Ltd.
*/
#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
#include "otx2_cryptodev.h"
+#include "otx2_cryptodev_hw_access.h"
#include "otx2_cryptodev_mbox.h"
#include "otx2_dev.h"
+#include "otx2_ethdev.h"
#include "otx2_mbox.h"
#include "cpt_pmd_logs.h"
@@ -173,3 +176,53 @@ otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
return otx2_cpt_send_mbox_msg(vf);
}
+
+int
+otx2_cpt_inline_init(const struct rte_cryptodev *dev)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct cpt_rx_inline_lf_cfg_msg *msg;
+ int ret;
+
+ msg = otx2_mbox_alloc_msg_cpt_rx_inline_lf_cfg(mbox);
+ msg->sso_pf_func = otx2_sso_pf_func_get();
+
+ otx2_mbox_msg_send(mbox, 0);
+ ret = otx2_mbox_process(mbox);
+ if (ret < 0)
+ return -EIO;
+
+ return 0;
+}
+
+int
+otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp,
+ uint16_t port_id)
+{
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct cpt_inline_ipsec_cfg_msg *msg;
+ struct otx2_eth_dev *otx2_eth_dev;
+ int ret;
+
+ if (!otx2_ethdev_is_sec_capable(&rte_eth_devices[port_id]))
+ return -EINVAL;
+
+ otx2_eth_dev = otx2_eth_pmd_priv(eth_dev);
+
+ msg = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(mbox);
+ msg->dir = CPT_INLINE_OUTBOUND;
+ msg->enable = 1;
+ msg->slot = qp->id;
+
+ msg->nix_pf_func = otx2_eth_dev->pf_func;
+
+ otx2_mbox_msg_send(mbox, 0);
+ ret = otx2_mbox_process(mbox);
+ if (ret < 0)
+ return -EIO;
+
+ return 0;
+}
@@ -7,6 +7,8 @@
#include <rte_cryptodev.h>
+#include "otx2_cryptodev_hw_access.h"
+
int otx2_cpt_available_queues_get(const struct rte_cryptodev *dev,
uint16_t *nb_queues);
@@ -22,4 +24,9 @@ int otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
int otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
uint64_t val);
+int otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev,
+ struct otx2_cpt_qp *qp, uint16_t port_id);
+
+int otx2_cpt_inline_init(const struct rte_cryptodev *dev);
+
#endif /* _OTX2_CRYPTODEV_MBOX_H_ */
@@ -6,6 +6,7 @@
#include <rte_cryptodev_pmd.h>
#include <rte_errno.h>
+#include <rte_ethdev.h>
#include "otx2_cryptodev.h"
#include "otx2_cryptodev_capabilities.h"
@@ -127,6 +128,29 @@ otx2_cpt_metabuf_mempool_destroy(struct otx2_cpt_qp *qp)
meta_info->sg_mlen = 0;
}
+static int
+otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
+{
+ static rte_atomic16_t port_offset = RTE_ATOMIC16_INIT(-1);
+ uint16_t port_id, nb_ethport = rte_eth_dev_count_avail();
+ int i, ret;
+
+ for (i = 0; i < nb_ethport; i++) {
+ port_id = rte_atomic16_add_return(&port_offset, 1) % nb_ethport;
+ if (otx2_ethdev_is_sec_capable(&rte_eth_devices[port_id]))
+ break;
+ }
+
+ if (i >= nb_ethport)
+ return 0;
+
+ ret = otx2_cpt_qp_ethdev_bind(dev, qp, port_id);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static struct otx2_cpt_qp *
otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
uint8_t group)
@@ -220,6 +244,12 @@ otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
otx2_cpt_iq_disable(qp);
+ ret = otx2_cpt_qp_inline_cfg(dev, qp);
+ if (ret) {
+ CPT_LOG_ERR("Could not configure queue for inline IPsec");
+ goto mempool_destroy;
+ }
+
ret = otx2_cpt_iq_enable(dev, qp, group, OTX2_CPT_QUEUE_HI_PRIO,
size_div40);
if (ret) {
@@ -913,12 +943,20 @@ otx2_cpt_dev_config(struct rte_cryptodev *dev,
goto queues_detach;
}
+ ret = otx2_cpt_inline_init(dev);
+ if (ret) {
+ CPT_LOG_ERR("Could not enable inline IPsec");
+ goto intr_unregister;
+ }
+
dev->enqueue_burst = otx2_cpt_enqueue_burst;
dev->dequeue_burst = otx2_cpt_dequeue_burst;
rte_mb();
return 0;
+intr_unregister:
+ otx2_cpt_err_intr_unregister(dev);
queues_detach:
otx2_cpt_queues_detach(dev);
return ret;