[v2,1/2] event/octeontx2: fix crypto adapter queue pair ops
Checks
Commit Message
Parameter queue_pair_id of crypto adapter queue pair add/del operation
can be -1 to select all pre configured crypto queue pairs. Added support
for the same in driver. Also added a member in cpt qp structure to
indicate binding state of a queue pair to an event queue.
Fixes: 29768f78d5a7 ("event/octeontx2: add crypto adapter framework")
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
---
drivers/crypto/octeontx2/otx2_cryptodev_qp.h | 4 +-
.../event/octeontx2/otx2_evdev_crypto_adptr.c | 102 ++++++++++++------
2 files changed, 75 insertions(+), 31 deletions(-)
Comments
>-----Original Message-----
>From: Shijith Thotton <sthotton@marvell.com>
>Sent: Monday, April 26, 2021 5:51 PM
>To: dev@dpdk.org
>Cc: Shijith Thotton <sthotton@marvell.com>; Jerin Jacob Kollanukkaran
><jerinj@marvell.com>; Anoob Joseph <anoobj@marvell.com>; Ankur Dwivedi
><adwivedi@marvell.com>; Pavan Nikhilesh Bhagavatula
><pbhagavatula@marvell.com>
>Subject: [PATCH v2 1/2] event/octeontx2: fix crypto adapter queue pair ops
>
>Parameter queue_pair_id of crypto adapter queue pair add/del operation can
>be -1 to select all pre configured crypto queue pairs. Added support for the
>same in driver. Also added a member in cpt qp structure to indicate binding
>state of a queue pair to an event queue.
>
>Fixes: 29768f78d5a7 ("event/octeontx2: add crypto adapter framework")
>
>Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Acked-by: Ankur Dwivedi <adwivedi@marvell.com>
>---
> drivers/crypto/octeontx2/otx2_cryptodev_qp.h | 4 +-
> .../event/octeontx2/otx2_evdev_crypto_adptr.c | 102 ++++++++++++------
> 2 files changed, 75 insertions(+), 31 deletions(-)
>
>diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_qp.h
>b/drivers/crypto/octeontx2/otx2_cryptodev_qp.h
>index 189fa3db4..95bce3621 100644
>--- a/drivers/crypto/octeontx2/otx2_cryptodev_qp.h
>+++ b/drivers/crypto/octeontx2/otx2_cryptodev_qp.h
>@@ -1,5 +1,5 @@
> /* SPDX-License-Identifier: BSD-3-Clause
>- * Copyright (C) 2020 Marvell International Ltd.
>+ * Copyright (C) 2020-2021 Marvell.
> */
>
> #ifndef _OTX2_CRYPTODEV_QP_H_
>@@ -39,6 +39,8 @@ struct otx2_cpt_qp {
> */
> uint8_t ca_enable;
> /**< Set when queue pair is added to crypto adapter */
>+ uint8_t qp_ev_bind;
>+ /**< Set when queue pair is bound to event queue */
> };
>
> #endif /* _OTX2_CRYPTODEV_QP_H_ */
>diff --git a/drivers/event/octeontx2/otx2_evdev_crypto_adptr.c
>b/drivers/event/octeontx2/otx2_evdev_crypto_adptr.c
>index 2c9b347f0..ed600a659 100644
>--- a/drivers/event/octeontx2/otx2_evdev_crypto_adptr.c
>+++ b/drivers/event/octeontx2/otx2_evdev_crypto_adptr.c
>@@ -1,10 +1,11 @@
> /* SPDX-License-Identifier: BSD-3-Clause
>- * Copyright (C) 2020 Marvell International Ltd.
>+ * Copyright (C) 2020-2021 Marvell.
> */
>
> #include <rte_cryptodev.h>
> #include <rte_eventdev.h>
>
>+#include "otx2_cryptodev.h"
> #include "otx2_cryptodev_hw_access.h"
> #include "otx2_cryptodev_qp.h"
> #include "otx2_cryptodev_mbox.h"
>@@ -24,30 +25,66 @@ otx2_ca_caps_get(const struct rte_eventdev *dev,
> return 0;
> }
>
>-int
>-otx2_ca_qp_add(const struct rte_eventdev *dev, const struct rte_cryptodev
>*cdev,
>- int32_t queue_pair_id, const struct rte_event *event)
>+static int
>+otx2_ca_qp_sso_link(const struct rte_cryptodev *cdev, struct otx2_cpt_qp
>*qp,
>+ uint16_t sso_pf_func)
> {
>- struct otx2_sso_evdev *sso_evdev = sso_pmd_priv(dev);
> union otx2_cpt_af_lf_ctl2 af_lf_ctl2;
>- struct otx2_cpt_qp *qp;
> int ret;
>
>- qp = cdev->data->queue_pairs[queue_pair_id];
>-
>- qp->ca_enable = 1;
>- rte_memcpy(&qp->ev, event, sizeof(struct rte_event));
>-
> ret = otx2_cpt_af_reg_read(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
>- qp->blkaddr, &af_lf_ctl2.u);
>+ qp->blkaddr, &af_lf_ctl2.u);
> if (ret)
> return ret;
>
>- af_lf_ctl2.s.sso_pf_func = otx2_sso_pf_func_get();
>+ af_lf_ctl2.s.sso_pf_func = sso_pf_func;
> ret = otx2_cpt_af_reg_write(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
>- qp->blkaddr, af_lf_ctl2.u);
>- if (ret)
>- return ret;
>+ qp->blkaddr, af_lf_ctl2.u);
>+ return ret;
>+}
>+
>+static void
>+otx2_ca_qp_init(struct otx2_cpt_qp *qp, const struct rte_event *event)
>+{
>+ if (event) {
>+ qp->qp_ev_bind = 1;
>+ rte_memcpy(&qp->ev, event, sizeof(struct rte_event));
>+ } else {
>+ qp->qp_ev_bind = 0;
>+ }
>+ qp->ca_enable = 1;
>+}
>+
>+int
>+otx2_ca_qp_add(const struct rte_eventdev *dev, const struct rte_cryptodev
>*cdev,
>+ int32_t queue_pair_id, const struct rte_event *event) {
>+ struct otx2_sso_evdev *sso_evdev = sso_pmd_priv(dev);
>+ struct otx2_cpt_vf *vf = cdev->data->dev_private;
>+ uint16_t sso_pf_func = otx2_sso_pf_func_get();
>+ struct otx2_cpt_qp *qp;
>+ uint8_t qp_id;
>+ int ret;
>+
>+ if (queue_pair_id == -1) {
>+ for (qp_id = 0; qp_id < vf->nb_queues; qp_id++) {
>+ qp = cdev->data->queue_pairs[qp_id];
>+ ret = otx2_ca_qp_sso_link(cdev, qp, sso_pf_func);
>+ if (ret) {
>+ uint8_t qp_tmp;
>+ for (qp_tmp = 0; qp_tmp < qp_id; qp_tmp++)
>+ otx2_ca_qp_del(dev, cdev, qp_tmp);
>+ return ret;
>+ }
>+ otx2_ca_qp_init(qp, event);
>+ }
>+ } else {
>+ qp = cdev->data->queue_pairs[queue_pair_id];
>+ ret = otx2_ca_qp_sso_link(cdev, qp, sso_pf_func);
>+ if (ret)
>+ return ret;
>+ otx2_ca_qp_init(qp, event);
>+ }
>
> sso_evdev->rx_offloads |= NIX_RX_OFFLOAD_SECURITY_F;
> sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)dev); @@ -
>59,24 +96,29 @@ int otx2_ca_qp_del(const struct rte_eventdev *dev, const
>struct rte_cryptodev *cdev,
> int32_t queue_pair_id)
> {
>- union otx2_cpt_af_lf_ctl2 af_lf_ctl2;
>+ struct otx2_cpt_vf *vf = cdev->data->dev_private;
> struct otx2_cpt_qp *qp;
>+ uint8_t qp_id;
> int ret;
>
> RTE_SET_USED(dev);
>
>- qp = cdev->data->queue_pairs[queue_pair_id];
>- qp->ca_enable = 0;
>- memset(&qp->ev, 0, sizeof(struct rte_event));
>+ ret = 0;
>+ if (queue_pair_id == -1) {
>+ for (qp_id = 0; qp_id < vf->nb_queues; qp_id++) {
>+ qp = cdev->data->queue_pairs[qp_id];
>+ ret = otx2_ca_qp_sso_link(cdev, qp, 0);
>+ if (ret)
>+ return ret;
>+ qp->ca_enable = 0;
>+ }
>+ } else {
>+ qp = cdev->data->queue_pairs[queue_pair_id];
>+ ret = otx2_ca_qp_sso_link(cdev, qp, 0);
>+ if (ret)
>+ return ret;
>+ qp->ca_enable = 0;
>+ }
>
>- ret = otx2_cpt_af_reg_read(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
>- qp->blkaddr, &af_lf_ctl2.u);
>- if (ret)
>- return ret;
>-
>- af_lf_ctl2.s.sso_pf_func = 0;
>- ret = otx2_cpt_af_reg_write(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
>- qp->blkaddr, af_lf_ctl2.u);
>-
>- return ret;
>+ return 0;
> }
>--
>2.25.1
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2021 Marvell.
*/
#ifndef _OTX2_CRYPTODEV_QP_H_
@@ -39,6 +39,8 @@ struct otx2_cpt_qp {
*/
uint8_t ca_enable;
/**< Set when queue pair is added to crypto adapter */
+ uint8_t qp_ev_bind;
+ /**< Set when queue pair is bound to event queue */
};
#endif /* _OTX2_CRYPTODEV_QP_H_ */
@@ -1,10 +1,11 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2021 Marvell.
*/
#include <rte_cryptodev.h>
#include <rte_eventdev.h>
+#include "otx2_cryptodev.h"
#include "otx2_cryptodev_hw_access.h"
#include "otx2_cryptodev_qp.h"
#include "otx2_cryptodev_mbox.h"
@@ -24,30 +25,66 @@ otx2_ca_caps_get(const struct rte_eventdev *dev,
return 0;
}
-int
-otx2_ca_qp_add(const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
- int32_t queue_pair_id, const struct rte_event *event)
+static int
+otx2_ca_qp_sso_link(const struct rte_cryptodev *cdev, struct otx2_cpt_qp *qp,
+ uint16_t sso_pf_func)
{
- struct otx2_sso_evdev *sso_evdev = sso_pmd_priv(dev);
union otx2_cpt_af_lf_ctl2 af_lf_ctl2;
- struct otx2_cpt_qp *qp;
int ret;
- qp = cdev->data->queue_pairs[queue_pair_id];
-
- qp->ca_enable = 1;
- rte_memcpy(&qp->ev, event, sizeof(struct rte_event));
-
ret = otx2_cpt_af_reg_read(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
- qp->blkaddr, &af_lf_ctl2.u);
+ qp->blkaddr, &af_lf_ctl2.u);
if (ret)
return ret;
- af_lf_ctl2.s.sso_pf_func = otx2_sso_pf_func_get();
+ af_lf_ctl2.s.sso_pf_func = sso_pf_func;
ret = otx2_cpt_af_reg_write(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
- qp->blkaddr, af_lf_ctl2.u);
- if (ret)
- return ret;
+ qp->blkaddr, af_lf_ctl2.u);
+ return ret;
+}
+
+static void
+otx2_ca_qp_init(struct otx2_cpt_qp *qp, const struct rte_event *event)
+{
+ if (event) {
+ qp->qp_ev_bind = 1;
+ rte_memcpy(&qp->ev, event, sizeof(struct rte_event));
+ } else {
+ qp->qp_ev_bind = 0;
+ }
+ qp->ca_enable = 1;
+}
+
+int
+otx2_ca_qp_add(const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
+ int32_t queue_pair_id, const struct rte_event *event)
+{
+ struct otx2_sso_evdev *sso_evdev = sso_pmd_priv(dev);
+ struct otx2_cpt_vf *vf = cdev->data->dev_private;
+ uint16_t sso_pf_func = otx2_sso_pf_func_get();
+ struct otx2_cpt_qp *qp;
+ uint8_t qp_id;
+ int ret;
+
+ if (queue_pair_id == -1) {
+ for (qp_id = 0; qp_id < vf->nb_queues; qp_id++) {
+ qp = cdev->data->queue_pairs[qp_id];
+ ret = otx2_ca_qp_sso_link(cdev, qp, sso_pf_func);
+ if (ret) {
+ uint8_t qp_tmp;
+ for (qp_tmp = 0; qp_tmp < qp_id; qp_tmp++)
+ otx2_ca_qp_del(dev, cdev, qp_tmp);
+ return ret;
+ }
+ otx2_ca_qp_init(qp, event);
+ }
+ } else {
+ qp = cdev->data->queue_pairs[queue_pair_id];
+ ret = otx2_ca_qp_sso_link(cdev, qp, sso_pf_func);
+ if (ret)
+ return ret;
+ otx2_ca_qp_init(qp, event);
+ }
sso_evdev->rx_offloads |= NIX_RX_OFFLOAD_SECURITY_F;
sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)dev);
@@ -59,24 +96,29 @@ int
otx2_ca_qp_del(const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
int32_t queue_pair_id)
{
- union otx2_cpt_af_lf_ctl2 af_lf_ctl2;
+ struct otx2_cpt_vf *vf = cdev->data->dev_private;
struct otx2_cpt_qp *qp;
+ uint8_t qp_id;
int ret;
RTE_SET_USED(dev);
- qp = cdev->data->queue_pairs[queue_pair_id];
- qp->ca_enable = 0;
- memset(&qp->ev, 0, sizeof(struct rte_event));
+ ret = 0;
+ if (queue_pair_id == -1) {
+ for (qp_id = 0; qp_id < vf->nb_queues; qp_id++) {
+ qp = cdev->data->queue_pairs[qp_id];
+ ret = otx2_ca_qp_sso_link(cdev, qp, 0);
+ if (ret)
+ return ret;
+ qp->ca_enable = 0;
+ }
+ } else {
+ qp = cdev->data->queue_pairs[queue_pair_id];
+ ret = otx2_ca_qp_sso_link(cdev, qp, 0);
+ if (ret)
+ return ret;
+ qp->ca_enable = 0;
+ }
- ret = otx2_cpt_af_reg_read(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
- qp->blkaddr, &af_lf_ctl2.u);
- if (ret)
- return ret;
-
- af_lf_ctl2.s.sso_pf_func = 0;
- ret = otx2_cpt_af_reg_write(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
- qp->blkaddr, af_lf_ctl2.u);
-
- return ret;
+ return 0;
}