Adding the infrastructure to save one opaque pointer in idev and
implement the consumer-producer in the PMDs which uses it accordingly.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
---
.../crypto/octeontx2/otx2_cryptodev_hw_access.h | 22 +----
drivers/crypto/octeontx2/otx2_cryptodev_ops.c | 18 ++++
drivers/crypto/octeontx2/otx2_cryptodev_qp.h | 35 ++++++++
drivers/crypto/octeontx2/otx2_security.c | 98 ++++++++++++++++++++++
drivers/crypto/octeontx2/otx2_security.h | 20 +++++
5 files changed, 172 insertions(+), 21 deletions(-)
create mode 100644 drivers/crypto/octeontx2/otx2_cryptodev_qp.h
@@ -15,6 +15,7 @@
#include "cpt_mcode_defines.h"
#include "otx2_dev.h"
+#include "otx2_cryptodev_qp.h"
/* CPT instruction queue length */
#define OTX2_CPT_IQ_LEN 8200
@@ -135,27 +136,6 @@ enum cpt_9x_comp_e {
CPT_9X_COMP_E_LAST_ENTRY = 0x06
};
-struct otx2_cpt_qp {
- uint32_t id;
- /**< Queue pair id */
- uintptr_t base;
- /**< Base address where BAR is mapped */
- void *lmtline;
- /**< Address of LMTLINE */
- rte_iova_t lf_nq_reg;
- /**< LF enqueue register address */
- struct pending_queue pend_q;
- /**< Pending queue */
- struct rte_mempool *sess_mp;
- /**< Session mempool */
- struct rte_mempool *sess_mp_priv;
- /**< Session private data mempool */
- struct cpt_qp_meta_info meta_info;
- /**< Metabuf info required to support operations on the queue pair */
- rte_iova_t iq_dma_addr;
- /**< Instruction queue address */
-};
-
void otx2_cpt_err_intr_unregister(const struct rte_cryptodev *dev);
int otx2_cpt_err_intr_register(const struct rte_cryptodev *dev);
@@ -13,6 +13,7 @@
#include "otx2_cryptodev_hw_access.h"
#include "otx2_cryptodev_mbox.h"
#include "otx2_cryptodev_ops.h"
+#include "otx2_security.h"
#include "otx2_mbox.h"
#include "cpt_hw_types.h"
@@ -148,6 +149,11 @@ otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
if (ret)
return ret;
+ /* Publish inline Tx QP to eth dev security */
+ ret = otx2_sec_tx_cpt_qp_add(port_id, qp);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -242,6 +248,12 @@ otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0);
+ ret = otx2_sec_tx_cpt_qp_remove(qp);
+ if (ret && (ret != -ENOENT)) {
+ CPT_LOG_ERR("Could not delete inline configuration");
+ goto mempool_destroy;
+ }
+
otx2_cpt_iq_disable(qp);
ret = otx2_cpt_qp_inline_cfg(dev, qp);
@@ -275,6 +287,12 @@ otx2_cpt_qp_destroy(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
char name[RTE_MEMZONE_NAMESIZE];
int ret;
+ ret = otx2_sec_tx_cpt_qp_remove(qp);
+ if (ret && (ret != -ENOENT)) {
+ CPT_LOG_ERR("Could not delete inline configuration");
+ return ret;
+ }
+
otx2_cpt_iq_disable(qp);
otx2_cpt_metabuf_mempool_destroy(qp);
new file mode 100644
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_CRYPTODEV_QP_H_
+#define _OTX2_CRYPTODEV_QP_H_
+
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_spinlock.h>
+
+#include "cpt_common.h"
+
+struct otx2_cpt_qp {
+ uint32_t id;
+ /**< Queue pair id */
+ uintptr_t base;
+ /**< Base address where BAR is mapped */
+ void *lmtline;
+ /**< Address of LMTLINE */
+ rte_iova_t lf_nq_reg;
+ /**< LF enqueue register address */
+ struct pending_queue pend_q;
+ /**< Pending queue */
+ struct rte_mempool *sess_mp;
+ /**< Session mempool */
+ struct rte_mempool *sess_mp_priv;
+ /**< Session private data mempool */
+ struct cpt_qp_meta_info meta_info;
+ /**< Metabuf info required to support operations on the queue pair */
+ rte_iova_t iq_dma_addr;
+ /**< Instruction queue address */
+};
+
+#endif /* _OTX2_CRYPTODEV_QP_H_ */
@@ -10,6 +10,7 @@
#include <rte_security.h>
#include <rte_security_driver.h>
+#include "otx2_cryptodev_qp.h"
#include "otx2_ethdev.h"
#include "otx2_ipsec_fp.h"
#include "otx2_security.h"
@@ -29,6 +30,8 @@ struct sec_eth_tag_const {
};
};
+static struct otx2_sec_eth_cfg sec_cfg[OTX2_MAX_INLINE_PORTS];
+
static struct rte_cryptodev_capabilities otx2_sec_eth_crypto_caps[] = {
{ /* AES GCM */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
@@ -116,16 +119,41 @@ static struct rte_security_ops otx2_sec_eth_ops = {
.capabilities_get = otx2_sec_eth_capabilities_get
};
+static int
+otx2_sec_eth_cfg_init(int port_id)
+{
+ struct otx2_sec_eth_cfg *cfg;
+ int i;
+
+ cfg = &sec_cfg[port_id];
+ cfg->tx_cpt_idx = 0;
+ rte_spinlock_init(&cfg->tx_cpt_lock);
+
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ cfg->tx_cpt[i].qp = NULL;
+ rte_atomic16_set(&cfg->tx_cpt[i].ref_cnt, 0);
+ }
+
+ return 0;
+}
+
int
otx2_sec_eth_ctx_create(struct rte_eth_dev *eth_dev)
{
struct rte_security_ctx *ctx;
+ int ret;
ctx = rte_malloc("otx2_sec_eth_ctx",
sizeof(struct rte_security_ctx), 0);
if (ctx == NULL)
return -ENOMEM;
+ ret = otx2_sec_eth_cfg_init(eth_dev->data->port_id);
+ if (ret) {
+ rte_free(ctx);
+ return ret;
+ }
+
/* Populate ctx */
ctx->device = eth_dev;
@@ -239,3 +267,73 @@ otx2_sec_eth_fini(struct rte_eth_dev *eth_dev)
in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
rte_memzone_free(rte_memzone_lookup(name));
}
+
+int
+otx2_sec_tx_cpt_qp_add(uint16_t port_id, struct otx2_cpt_qp *qp)
+{
+ struct otx2_sec_eth_cfg *cfg;
+ int i, ret;
+
+ if (qp == NULL || port_id > OTX2_MAX_INLINE_PORTS)
+ return -EINVAL;
+
+ cfg = &sec_cfg[port_id];
+
+ /* Find a free slot to save CPT LF */
+
+ rte_spinlock_lock(&cfg->tx_cpt_lock);
+
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ if (cfg->tx_cpt[i].qp == NULL) {
+ cfg->tx_cpt[i].qp = qp;
+ ret = 0;
+ goto unlock;
+ }
+ }
+
+ ret = -EINVAL;
+
+unlock:
+ rte_spinlock_unlock(&cfg->tx_cpt_lock);
+ return ret;
+}
+
+int
+otx2_sec_tx_cpt_qp_remove(struct otx2_cpt_qp *qp)
+{
+ struct otx2_sec_eth_cfg *cfg;
+ uint16_t port_id;
+ int i, ret;
+
+ if (qp == NULL)
+ return -EINVAL;
+
+ for (port_id = 0; port_id < OTX2_MAX_INLINE_PORTS; port_id++) {
+ cfg = &sec_cfg[port_id];
+
+ rte_spinlock_lock(&cfg->tx_cpt_lock);
+
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ if (cfg->tx_cpt[i].qp != qp)
+ continue;
+
+ /* Don't free if the QP is in use by any sec session */
+ if (rte_atomic16_read(&cfg->tx_cpt[i].ref_cnt)) {
+ ret = -EBUSY;
+ } else {
+ cfg->tx_cpt[i].qp = NULL;
+ ret = 0;
+ }
+
+ goto unlock;
+ }
+
+ rte_spinlock_unlock(&cfg->tx_cpt_lock);
+ }
+
+ return -ENOENT;
+
+unlock:
+ rte_spinlock_unlock(&cfg->tx_cpt_lock);
+ return ret;
+}
@@ -5,10 +5,27 @@
#ifndef __OTX2_SECURITY_H__
#define __OTX2_SECURITY_H__
+#include <rte_atomic.h>
#include <rte_ethdev.h>
+#include <rte_spinlock.h>
#include "otx2_ipsec_fp.h"
+#define OTX2_MAX_CPT_QP_PER_PORT 64
+#define OTX2_MAX_INLINE_PORTS 64
+
+struct otx2_cpt_qp;
+
+struct otx2_sec_eth_cfg {
+ struct {
+ struct otx2_cpt_qp *qp;
+ rte_atomic16_t ref_cnt;
+ } tx_cpt[OTX2_MAX_CPT_QP_PER_PORT];
+
+ uint16_t tx_cpt_idx;
+ rte_spinlock_t tx_cpt_lock;
+};
+
/*
* Security session for inline IPsec protocol offload. This is private data of
* inline capable PMD.
@@ -33,4 +50,7 @@ int otx2_sec_eth_init(struct rte_eth_dev *eth_dev);
void otx2_sec_eth_fini(struct rte_eth_dev *eth_dev);
+int otx2_sec_tx_cpt_qp_add(uint16_t port_id, struct otx2_cpt_qp *qp);
+
+int otx2_sec_tx_cpt_qp_remove(struct otx2_cpt_qp *qp);
#endif /* __OTX2_SECURITY_H__ */