@@ -37,7 +37,7 @@ failover_worker_enqueue(struct scheduler_worker *worker,
}
static uint16_t
-schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+schedule_fo_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct fo_scheduler_qp_ctx *qp_ctx =
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
@@ -60,14 +60,14 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
static uint16_t
-schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+schedule_fo_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct rte_ring *order_ring =
((struct scheduler_qp_ctx *)qp)->order_ring;
uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
nb_ops);
- uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ uint16_t nb_ops_enqd = schedule_fo_enqueue(qp, ops,
nb_ops_to_enq);
scheduler_order_insert(order_ring, ops, nb_ops_enqd);
@@ -76,7 +76,7 @@ schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
}
static uint16_t
-schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+schedule_fo_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct fo_scheduler_qp_ctx *qp_ctx =
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
@@ -108,13 +108,13 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
static uint16_t
-schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+schedule_fo_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct rte_ring *order_ring =
((struct scheduler_qp_ctx *)qp)->order_ring;
- schedule_dequeue(qp, ops, nb_ops);
+ schedule_fo_dequeue(qp, ops, nb_ops);
return scheduler_order_drain(order_ring, ops, nb_ops);
}
@@ -145,11 +145,11 @@ scheduler_start(struct rte_cryptodev *dev)
}
if (sched_ctx->reordering_enabled) {
- dev->enqueue_burst = schedule_enqueue_ordering;
- dev->dequeue_burst = schedule_dequeue_ordering;
+ dev->enqueue_burst = schedule_fo_enqueue_ordering;
+ dev->dequeue_burst = schedule_fo_dequeue_ordering;
} else {
- dev->enqueue_burst = schedule_enqueue;
- dev->dequeue_burst = schedule_dequeue;
+ dev->enqueue_burst = schedule_fo_enqueue;
+ dev->dequeue_burst = schedule_fo_dequeue;
}
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
@@ -36,7 +36,7 @@ struct mc_scheduler_qp_ctx {
};
static uint16_t
-schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+schedule_mc_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct mc_scheduler_qp_ctx *mc_qp_ctx =
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
@@ -64,14 +64,14 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
static uint16_t
-schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+schedule_mc_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct rte_ring *order_ring =
((struct scheduler_qp_ctx *)qp)->order_ring;
uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
nb_ops);
- uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ uint16_t nb_ops_enqd = schedule_mc_enqueue(qp, ops,
nb_ops_to_enq);
scheduler_order_insert(order_ring, ops, nb_ops_enqd);
@@ -81,7 +81,7 @@ schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
static uint16_t
-schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+schedule_mc_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct mc_scheduler_qp_ctx *mc_qp_ctx =
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
@@ -107,7 +107,7 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
static uint16_t
-schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+schedule_mc_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct rte_ring *order_ring =
@@ -253,11 +253,11 @@ scheduler_start(struct rte_cryptodev *dev)
sched_ctx->wc_pool[i]);
if (sched_ctx->reordering_enabled) {
- dev->enqueue_burst = &schedule_enqueue_ordering;
- dev->dequeue_burst = &schedule_dequeue_ordering;
+ dev->enqueue_burst = &schedule_mc_enqueue_ordering;
+ dev->dequeue_burst = &schedule_mc_dequeue_ordering;
} else {
- dev->enqueue_burst = &schedule_enqueue;
- dev->dequeue_burst = &schedule_dequeue;
+ dev->enqueue_burst = &schedule_mc_enqueue;
+ dev->dequeue_burst = &schedule_mc_dequeue;
}
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
@@ -34,7 +34,7 @@ struct psd_schedule_op {
};
static uint16_t
-schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+schedule_dist_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct scheduler_qp_ctx *qp_ctx = qp;
struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
@@ -171,14 +171,14 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
static uint16_t
-schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+schedule_dist_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct rte_ring *order_ring =
((struct scheduler_qp_ctx *)qp)->order_ring;
uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
nb_ops);
- uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ uint16_t nb_ops_enqd = schedule_dist_enqueue(qp, ops,
nb_ops_to_enq);
scheduler_order_insert(order_ring, ops, nb_ops_enqd);
@@ -187,7 +187,7 @@ schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
}
static uint16_t
-schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+schedule_dist_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct psd_scheduler_qp_ctx *qp_ctx =
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
@@ -224,13 +224,13 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
static uint16_t
-schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+schedule_dist_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct rte_ring *order_ring =
((struct scheduler_qp_ctx *)qp)->order_ring;
- schedule_dequeue(qp, ops, nb_ops);
+ schedule_dist_dequeue(qp, ops, nb_ops);
return scheduler_order_drain(order_ring, ops, nb_ops);
}
@@ -281,11 +281,11 @@ scheduler_start(struct rte_cryptodev *dev)
}
if (sched_ctx->reordering_enabled) {
- dev->enqueue_burst = &schedule_enqueue_ordering;
- dev->dequeue_burst = &schedule_dequeue_ordering;
+ dev->enqueue_burst = &schedule_dist_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dist_dequeue_ordering;
} else {
- dev->enqueue_burst = &schedule_enqueue;
- dev->dequeue_burst = &schedule_dequeue;
+ dev->enqueue_burst = &schedule_dist_enqueue;
+ dev->dequeue_burst = &schedule_dist_dequeue;
}
return 0;
@@ -17,7 +17,7 @@ struct rr_scheduler_qp_ctx {
};
static uint16_t
-schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+schedule_rr_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rr_scheduler_qp_ctx *rr_qp_ctx =
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
@@ -43,14 +43,14 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
static uint16_t
-schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+schedule_rr_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct rte_ring *order_ring =
((struct scheduler_qp_ctx *)qp)->order_ring;
uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
nb_ops);
- uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ uint16_t nb_ops_enqd = schedule_rr_enqueue(qp, ops,
nb_ops_to_enq);
scheduler_order_insert(order_ring, ops, nb_ops_enqd);
@@ -60,7 +60,7 @@ schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
static uint16_t
-schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+schedule_rr_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rr_scheduler_qp_ctx *rr_qp_ctx =
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
@@ -98,13 +98,13 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
static uint16_t
-schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+schedule_rr_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct rte_ring *order_ring =
((struct scheduler_qp_ctx *)qp)->order_ring;
- schedule_dequeue(qp, ops, nb_ops);
+ schedule_rr_dequeue(qp, ops, nb_ops);
return scheduler_order_drain(order_ring, ops, nb_ops);
}
@@ -130,11 +130,11 @@ scheduler_start(struct rte_cryptodev *dev)
uint16_t i;
if (sched_ctx->reordering_enabled) {
- dev->enqueue_burst = &schedule_enqueue_ordering;
- dev->dequeue_burst = &schedule_dequeue_ordering;
+ dev->enqueue_burst = &schedule_rr_enqueue_ordering;
+ dev->dequeue_burst = &schedule_rr_dequeue_ordering;
} else {
- dev->enqueue_burst = &schedule_enqueue;
- dev->dequeue_burst = &schedule_dequeue;
+ dev->enqueue_burst = &schedule_rr_enqueue;
+ dev->dequeue_burst = &schedule_rr_dequeue;
}
for (i = 0; i < dev->data->nb_queue_pairs; i++) {