get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/119460/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 119460,
    "url": "http://patches.dpdk.org/api/patches/119460/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20221103175347.651579-3-vfialko@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221103175347.651579-3-vfialko@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221103175347.651579-3-vfialko@marvell.com",
    "date": "2022-11-03T17:53:47",
    "name": "[2/2] app/testeventdev: resolve issues with crypto producer",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "aa72657019b5c60a582a848b84dc8f7ed2790e25",
    "submitter": {
        "id": 2390,
        "url": "http://patches.dpdk.org/api/people/2390/?format=api",
        "name": "Volodymyr Fialko",
        "email": "vfialko@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20221103175347.651579-3-vfialko@marvell.com/mbox/",
    "series": [
        {
            "id": 25560,
            "url": "http://patches.dpdk.org/api/series/25560/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=25560",
            "date": "2022-11-03T17:53:45",
            "name": "app/testseventdev: crypto producer fixes",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/25560/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/119460/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/119460/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 8DB40A00C2;\n\tThu,  3 Nov 2022 18:54:08 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 30F1F42D17;\n\tThu,  3 Nov 2022 18:54:03 +0100 (CET)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id 4473D42D17\n for <dev@dpdk.org>; Thu,  3 Nov 2022 18:54:02 +0100 (CET)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 2A3HYGGp013475 for <dev@dpdk.org>; Thu, 3 Nov 2022 10:54:01 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3kmj5m82y5-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Thu, 03 Nov 2022 10:54:01 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18;\n Thu, 3 Nov 2022 10:53:59 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend\n Transport; Thu, 3 Nov 2022 10:53:59 -0700",
            "from localhost.localdomain (unknown [10.28.34.39])\n by maili.marvell.com (Postfix) with ESMTP id C43573F70CE;\n Thu,  3 Nov 2022 10:53:57 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=m9VjD4IgqJNd/UN6fpKlS3pNjPidzMCwTn607EpCcuc=;\n b=WB/Y1SoN/lLSqMvgoGaHnUZvnSWEBGErW4ogJW3JtqoXyqR7VhYCCO3JKmBG+3qFEHnp\n GHUevlJS6YfCbJqtJN7HTZvyi3mHNB7eG9c8cKeoEzi5KI//vuQJfhlDCfUrOZooI1Gm\n 7bmsouxa69Ba6dquJdF/yFrTIZ6mNMSj/VnUSuS+4i8jD5XAyDCUUDeyhy9+nwQdQsxP\n XQFXz4teprF8m1syDTkLzeNco5EtUSWq/51iMBux7fLq4OSImnftNsCYRhJXIbH5YnGK\n 0KZe+ABpS0B7pFqWMkL7SHYZ/k3aV44zOzXic35wvkVnA6H0JrPDPchj6g+He3HnLAGm gg==",
        "From": "Volodymyr Fialko <vfialko@marvell.com>",
        "To": "<dev@dpdk.org>, Jerin Jacob <jerinj@marvell.com>",
        "CC": "<anoobj@marvell.com>, Volodymyr Fialko <vfialko@marvell.com>",
        "Subject": "[PATCH 2/2] app/testeventdev: resolve issues with crypto producer",
        "Date": "Thu, 3 Nov 2022 18:53:47 +0100",
        "Message-ID": "<20221103175347.651579-3-vfialko@marvell.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20221103175347.651579-1-vfialko@marvell.com>",
        "References": "<20221103175347.651579-1-vfialko@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "1pWC4E_3v-AUIrt6SVcORjPSvNYK4P-F",
        "X-Proofpoint-GUID": "1pWC4E_3v-AUIrt6SVcORjPSvNYK4P-F",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.205,Aquarius:18.0.895,Hydra:6.0.545,FMLib:17.11.122.1\n definitions=2022-11-03_04,2022-11-03_01,2022-06-22_01",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Resolve issues with crypto producer in configuration with multiple stages.\n\n1) With symmetric crypto producer and enabled `--fwd_latency` we will\ntreat rte_mbuf as perf_elt which will lead to rte_mbuf header\ncorruption. Use rte_mbuf data to store time stamp information.\n\n2) For asymmetric crypto producer check for event type in\n`process_crypto_request` will not pass in case of multiple stages, due\nto overwrite of event type during event forward. Use producer type to\ndispatch.\n\nSigned-off-by: Volodymyr Fialko <vfialko@marvell.com>\n---\n app/test-eventdev/test_perf_atq.c    |  74 +++++------------\n app/test-eventdev/test_perf_common.c |  48 ++++++++---\n app/test-eventdev/test_perf_common.h | 116 +++++++++++++++++++++------\n app/test-eventdev/test_perf_queue.c  |  81 ++++++-------------\n 4 files changed, 173 insertions(+), 146 deletions(-)",
    "diff": "diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c\nindex 8326f54045..9d30081117 100644\n--- a/app/test-eventdev/test_perf_atq.c\n+++ b/app/test-eventdev/test_perf_atq.c\n@@ -14,16 +14,6 @@ atq_nb_event_queues(struct evt_options *opt)\n \t\trte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);\n }\n \n-static __rte_always_inline void\n-atq_mark_fwd_latency(struct rte_event *const ev)\n-{\n-\tif (unlikely(ev->sub_event_type == 0)) {\n-\t\tstruct perf_elt *const m = ev->event_ptr;\n-\n-\t\tm->timestamp = rte_get_timer_cycles();\n-\t}\n-}\n-\n static __rte_always_inline void\n atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,\n \t\tconst uint8_t nb_stages)\n@@ -37,9 +27,11 @@ atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,\n static int\n perf_atq_worker(void *arg, const int enable_fwd_latency)\n {\n+\tstruct perf_elt *pe = NULL;\n \tuint16_t enq = 0, deq = 0;\n \tstruct rte_event ev;\n \tPERF_WORKER_INIT;\n+\tuint8_t stage;\n \n \twhile (t->done == false) {\n \t\tdeq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);\n@@ -49,35 +41,23 @@ perf_atq_worker(void *arg, const int enable_fwd_latency)\n \t\t\tcontinue;\n \t\t}\n \n-\t\tif (prod_crypto_type &&\n-\t\t    (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {\n-\t\t\tstruct rte_crypto_op *op = ev.event_ptr;\n-\n-\t\t\tif (op->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {\n-\t\t\t\tif (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {\n-\t\t\t\t\tif (op->sym->m_dst == NULL)\n-\t\t\t\t\t\tev.event_ptr = op->sym->m_src;\n-\t\t\t\t\telse\n-\t\t\t\t\t\tev.event_ptr = op->sym->m_dst;\n-\t\t\t\t\trte_crypto_op_free(op);\n-\t\t\t\t}\n-\t\t\t} else {\n-\t\t\t\trte_crypto_op_free(op);\n+\t\tif (prod_crypto_type && (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {\n+\t\t\tif (perf_handle_crypto_ev(&ev, &pe, enable_fwd_latency))\n \t\t\t\tcontinue;\n-\t\t\t}\n \t\t}\n \n-\t\tif (enable_fwd_latency && !prod_timer_type)\n+\t\tstage = ev.sub_event_type % nb_stages;\n+\t\tif (enable_fwd_latency && !prod_timer_type && stage == 0)\n \t\t/* first stage in pipeline, mark ts to compute fwd latency */\n-\t\t\tatq_mark_fwd_latency(&ev);\n+\t\t\tperf_mark_fwd_latency(ev.event_ptr);\n \n \t\t/* last stage in pipeline */\n-\t\tif (unlikely((ev.sub_event_type % nb_stages) == laststage)) {\n+\t\tif (unlikely(stage == laststage)) {\n \t\t\tif (enable_fwd_latency)\n-\t\t\t\tcnt = perf_process_last_stage_latency(pool,\n+\t\t\t\tcnt = perf_process_last_stage_latency(pool, prod_crypto_type,\n \t\t\t\t\t&ev, w, bufs, sz, cnt);\n \t\t\telse\n-\t\t\t\tcnt = perf_process_last_stage(pool, &ev, w,\n+\t\t\t\tcnt = perf_process_last_stage(pool, prod_crypto_type, &ev, w,\n \t\t\t\t\t bufs, sz, cnt);\n \t\t} else {\n \t\t\tatq_fwd_event(&ev, sched_type_list, nb_stages);\n@@ -99,7 +79,9 @@ perf_atq_worker_burst(void *arg, const int enable_fwd_latency)\n \t/* +1 to avoid prefetch out of array check */\n \tstruct rte_event ev[BURST_SIZE + 1];\n \tuint16_t enq = 0, nb_rx = 0;\n+\tstruct perf_elt *pe = NULL;\n \tPERF_WORKER_INIT;\n+\tuint8_t stage;\n \tuint16_t i;\n \n \twhile (t->done == false) {\n@@ -111,40 +93,26 @@ perf_atq_worker_burst(void *arg, const int enable_fwd_latency)\n \t\t}\n \n \t\tfor (i = 0; i < nb_rx; i++) {\n-\t\t\tif (prod_crypto_type &&\n-\t\t\t    (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {\n-\t\t\t\tstruct rte_crypto_op *op = ev[i].event_ptr;\n-\n-\t\t\t\tif (op->status ==\n-\t\t\t\t    RTE_CRYPTO_OP_STATUS_SUCCESS) {\n-\t\t\t\t\tif (op->sym->m_dst == NULL)\n-\t\t\t\t\t\tev[i].event_ptr =\n-\t\t\t\t\t\t\top->sym->m_src;\n-\t\t\t\t\telse\n-\t\t\t\t\t\tev[i].event_ptr =\n-\t\t\t\t\t\t\top->sym->m_dst;\n-\t\t\t\t\trte_crypto_op_free(op);\n-\t\t\t\t} else {\n-\t\t\t\t\trte_crypto_op_free(op);\n+\t\t\tif (prod_crypto_type && (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {\n+\t\t\t\tif (perf_handle_crypto_ev(&ev[i], &pe, enable_fwd_latency))\n \t\t\t\t\tcontinue;\n-\t\t\t\t}\n \t\t\t}\n \n-\t\t\tif (enable_fwd_latency && !prod_timer_type) {\n+\t\t\tstage = ev[i].sub_event_type % nb_stages;\n+\t\t\tif (enable_fwd_latency && !prod_timer_type && stage == 0) {\n \t\t\t\trte_prefetch0(ev[i+1].event_ptr);\n \t\t\t\t/* first stage in pipeline.\n \t\t\t\t * mark time stamp to compute fwd latency\n \t\t\t\t */\n-\t\t\t\tatq_mark_fwd_latency(&ev[i]);\n+\t\t\t\tperf_mark_fwd_latency(ev[i].event_ptr);\n \t\t\t}\n \t\t\t/* last stage in pipeline */\n-\t\t\tif (unlikely((ev[i].sub_event_type % nb_stages)\n-\t\t\t\t\t\t== laststage)) {\n+\t\t\tif (unlikely(stage == laststage)) {\n \t\t\t\tif (enable_fwd_latency)\n-\t\t\t\t\tcnt = perf_process_last_stage_latency(\n-\t\t\t\t\t\tpool, &ev[i], w, bufs, sz, cnt);\n+\t\t\t\t\tcnt = perf_process_last_stage_latency(pool,\n+\t\t\t\t\t\tprod_crypto_type, &ev[i], w, bufs, sz, cnt);\n \t\t\t\telse\n-\t\t\t\t\tcnt = perf_process_last_stage(pool,\n+\t\t\t\t\tcnt = perf_process_last_stage(pool, prod_crypto_type,\n \t\t\t\t\t\t&ev[i], w, bufs, sz, cnt);\n \n \t\t\t\tev[i].op = RTE_EVENT_OP_RELEASE;\ndiff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c\nindex 6aae18fddb..6d04a5265c 100644\n--- a/app/test-eventdev/test_perf_common.c\n+++ b/app/test-eventdev/test_perf_common.c\n@@ -370,16 +370,17 @@ crypto_adapter_enq_op_new(struct prod_data *p)\n \tuint64_t alloc_failures = 0;\n \tuint32_t flow_counter = 0;\n \tstruct rte_crypto_op *op;\n+\tuint16_t len, offset;\n \tstruct rte_mbuf *m;\n \tuint64_t count = 0;\n-\tuint16_t len;\n \n \tif (opt->verbose_level > 1)\n \t\tprintf(\"%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\\n\",\n \t\t       __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,\n \t\t       p->ca.cdev_qp_id);\n \n-\tlen = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;\n+\toffset = sizeof(struct perf_elt);\n+\tlen = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);\n \n \twhile (count < nb_pkts && t->done == false) {\n \t\tif (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {\n@@ -402,19 +403,24 @@ crypto_adapter_enq_op_new(struct prod_data *p)\n \t\t\trte_pktmbuf_append(m, len);\n \t\t\tsym_op = op->sym;\n \t\t\tsym_op->m_src = m;\n-\t\t\tsym_op->cipher.data.offset = 0;\n-\t\t\tsym_op->cipher.data.length = len;\n+\t\t\tsym_op->cipher.data.offset = offset;\n+\t\t\tsym_op->cipher.data.length = len - offset;\n \t\t\trte_crypto_op_attach_sym_session(\n \t\t\t\top, p->ca.crypto_sess[flow_counter++ % nb_flows]);\n \t\t} else {\n \t\t\tstruct rte_crypto_asym_op *asym_op;\n-\t\t\tuint8_t *result = rte_zmalloc(NULL,\n-\t\t\t\t\tmodex_test_case.result_len, 0);\n+\t\t\tuint8_t *result;\n+\n+\t\t\tif (rte_mempool_get(pool, (void **)&result)) {\n+\t\t\t\talloc_failures++;\n+\t\t\t\tcontinue;\n+\t\t\t}\n \n \t\t\top = rte_crypto_op_alloc(t->ca_op_pool,\n \t\t\t\t\t RTE_CRYPTO_OP_TYPE_ASYMMETRIC);\n \t\t\tif (unlikely(op == NULL)) {\n \t\t\t\talloc_failures++;\n+\t\t\t\trte_mempool_put(pool, result);\n \t\t\t\tcontinue;\n \t\t\t}\n \n@@ -451,10 +457,10 @@ crypto_adapter_enq_op_fwd(struct prod_data *p)\n \tuint64_t alloc_failures = 0;\n \tuint32_t flow_counter = 0;\n \tstruct rte_crypto_op *op;\n+\tuint16_t len, offset;\n \tstruct rte_event ev;\n \tstruct rte_mbuf *m;\n \tuint64_t count = 0;\n-\tuint16_t len;\n \n \tif (opt->verbose_level > 1)\n \t\tprintf(\"%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\\n\",\n@@ -466,7 +472,9 @@ crypto_adapter_enq_op_fwd(struct prod_data *p)\n \tev.queue_id = p->queue_id;\n \tev.sched_type = RTE_SCHED_TYPE_ATOMIC;\n \tev.event_type = RTE_EVENT_TYPE_CPU;\n-\tlen = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;\n+\n+\toffset = sizeof(struct perf_elt);\n+\tlen = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);\n \n \twhile (count < nb_pkts && t->done == false) {\n \t\tif (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {\n@@ -489,19 +497,24 @@ crypto_adapter_enq_op_fwd(struct prod_data *p)\n \t\t\trte_pktmbuf_append(m, len);\n \t\t\tsym_op = op->sym;\n \t\t\tsym_op->m_src = m;\n-\t\t\tsym_op->cipher.data.offset = 0;\n-\t\t\tsym_op->cipher.data.length = len;\n+\t\t\tsym_op->cipher.data.offset = offset;\n+\t\t\tsym_op->cipher.data.length = len - offset;\n \t\t\trte_crypto_op_attach_sym_session(\n \t\t\t\top, p->ca.crypto_sess[flow_counter++ % nb_flows]);\n \t\t} else {\n \t\t\tstruct rte_crypto_asym_op *asym_op;\n-\t\t\tuint8_t *result = rte_zmalloc(NULL,\n-\t\t\t\t\tmodex_test_case.result_len, 0);\n+\t\t\tuint8_t *result;\n+\n+\t\t\tif (rte_mempool_get(pool, (void **)&result)) {\n+\t\t\t\talloc_failures++;\n+\t\t\t\tcontinue;\n+\t\t\t}\n \n \t\t\top = rte_crypto_op_alloc(t->ca_op_pool,\n \t\t\t\t\t RTE_CRYPTO_OP_TYPE_ASYMMETRIC);\n \t\t\tif (unlikely(op == NULL)) {\n \t\t\t\talloc_failures++;\n+\t\t\t\trte_mempool_put(pool, result);\n \t\t\t\tcontinue;\n \t\t\t}\n \n@@ -1360,6 +1373,7 @@ perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt)\n \t\treturn -ENODEV;\n \t}\n \n+\n \tt->ca_op_pool = rte_crypto_op_pool_create(\n \t\t\"crypto_op_pool\", opt->crypto_op_type, opt->pool_sz,\n \t\t128, sizeof(union rte_event_crypto_metadata),\n@@ -1510,6 +1524,16 @@ perf_mempool_setup(struct evt_test *test, struct evt_options *opt)\n \t\t\t\t0, NULL, NULL,\n \t\t\t\tperf_elt_init, /* obj constructor */\n \t\t\t\tNULL, opt->socket_id, 0); /* flags */\n+\t} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR &&\n+\t\t\topt->crypto_op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)  {\n+\t\tt->pool = rte_mempool_create(test->name, /* mempool name */\n+\t\t\t\topt->pool_sz, /* number of elements*/\n+\t\t\t\tsizeof(struct perf_elt) + modex_test_case.result_len,\n+\t\t\t\t/* element size*/\n+\t\t\t\t512, /* cache size*/\n+\t\t\t\t0, NULL, NULL,\n+\t\t\t\tNULL, /* obj constructor */\n+\t\t\t\tNULL, opt->socket_id, 0); /* flags */\n \t} else {\n \t\tt->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */\n \t\t\t\topt->pool_sz, /* number of elements*/\ndiff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h\nindex d06d52cdf8..503b6aa1db 100644\n--- a/app/test-eventdev/test_perf_common.h\n+++ b/app/test-eventdev/test_perf_common.h\n@@ -107,11 +107,50 @@ struct perf_elt {\n \t\tprintf(\"%s(): lcore %d dev_id %d port=%d\\n\", __func__,\\\n \t\t\t\trte_lcore_id(), dev, port)\n \n+static __rte_always_inline void\n+perf_mark_fwd_latency(struct perf_elt *const pe)\n+{\n+\tpe->timestamp = rte_get_timer_cycles();\n+}\n+\n+static __rte_always_inline int\n+perf_handle_crypto_ev(struct rte_event *ev, struct perf_elt **pe, int enable_fwd_latency)\n+{\n+\tstruct rte_crypto_op *op = ev->event_ptr;\n+\tstruct rte_mbuf *m;\n+\n+\n+\tif (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {\n+\t\trte_crypto_op_free(op);\n+\t\treturn op->status;\n+\t}\n+\n+\t/* Forward latency not enabled - perf data will not be accessed */\n+\tif (!enable_fwd_latency)\n+\t\treturn 0;\n+\n+\t/* Get pointer to perf data */\n+\tif (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {\n+\t\tif (op->sym->m_dst == NULL)\n+\t\t\tm = op->sym->m_src;\n+\t\telse\n+\t\t\tm = op->sym->m_dst;\n+\t\t*pe = rte_pktmbuf_mtod(m, struct perf_elt *);\n+\t} else {\n+\t\t*pe = RTE_PTR_ADD(op->asym->modex.result.data, op->asym->modex.result.length);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+\n static __rte_always_inline int\n-perf_process_last_stage(struct rte_mempool *const pool,\n+perf_process_last_stage(struct rte_mempool *const pool, uint8_t prod_crypto_type,\n \t\tstruct rte_event *const ev, struct worker_data *const w,\n \t\tvoid *bufs[], int const buf_sz, uint8_t count)\n {\n+\tvoid *to_free_in_bulk;\n+\n \t/* release fence here ensures event_prt is\n \t * stored before updating the number of\n \t * processed packets for worker lcores\n@@ -119,30 +158,42 @@ perf_process_last_stage(struct rte_mempool *const pool,\n \trte_atomic_thread_fence(__ATOMIC_RELEASE);\n \tw->processed_pkts++;\n \n-\tif (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV &&\n-\t\t\t((struct rte_crypto_op *)ev->event_ptr)->type ==\n-\t\t\t\tRTE_CRYPTO_OP_TYPE_ASYMMETRIC) {\n+\tif (prod_crypto_type) {\n \t\tstruct rte_crypto_op *op = ev->event_ptr;\n+\t\tstruct rte_mbuf *m;\n+\n+\t\tif (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {\n+\t\t\tif (op->sym->m_dst == NULL)\n+\t\t\t\tm = op->sym->m_src;\n+\t\t\telse\n+\t\t\t\tm = op->sym->m_dst;\n \n-\t\trte_free(op->asym->modex.result.data);\n+\t\t\tto_free_in_bulk = m;\n+\t\t} else {\n+\t\t\tto_free_in_bulk = op->asym->modex.result.data;\n+\t\t}\n \t\trte_crypto_op_free(op);\n \t} else {\n-\t\tbufs[count++] = ev->event_ptr;\n-\t\tif (unlikely(count == buf_sz)) {\n-\t\t\tcount = 0;\n-\t\t\trte_mempool_put_bulk(pool, bufs, buf_sz);\n-\t\t}\n+\t\tto_free_in_bulk = ev->event_ptr;\n \t}\n+\n+\tbufs[count++] = to_free_in_bulk;\n+\tif (unlikely(count == buf_sz)) {\n+\t\tcount = 0;\n+\t\trte_mempool_put_bulk(pool, bufs, buf_sz);\n+\t}\n+\n \treturn count;\n }\n \n static __rte_always_inline uint8_t\n-perf_process_last_stage_latency(struct rte_mempool *const pool,\n+perf_process_last_stage_latency(struct rte_mempool *const pool, uint8_t prod_crypto_type,\n \t\tstruct rte_event *const ev, struct worker_data *const w,\n \t\tvoid *bufs[], int const buf_sz, uint8_t count)\n {\n \tuint64_t latency;\n-\tstruct perf_elt *const m = ev->event_ptr;\n+\tstruct perf_elt *pe;\n+\tvoid *to_free_in_bulk;\n \n \t/* release fence here ensures event_prt is\n \t * stored before updating the number of\n@@ -151,23 +202,38 @@ perf_process_last_stage_latency(struct rte_mempool *const pool,\n \trte_atomic_thread_fence(__ATOMIC_RELEASE);\n \tw->processed_pkts++;\n \n-\tif (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV &&\n-\t\t\t((struct rte_crypto_op *)m)->type ==\n-\t\t\t\tRTE_CRYPTO_OP_TYPE_ASYMMETRIC) {\n-\t\trte_free(((struct rte_crypto_op *)m)->asym->modex.result.data);\n-\t\trte_crypto_op_free((struct rte_crypto_op *)m);\n-\t} else {\n-\t\tbufs[count++] = ev->event_ptr;\n-\t\tif (unlikely(count == buf_sz)) {\n-\t\t\tcount = 0;\n-\t\t\tlatency = rte_get_timer_cycles() - m->timestamp;\n-\t\t\trte_mempool_put_bulk(pool, bufs, buf_sz);\n+\tif (prod_crypto_type) {\n+\t\tstruct rte_crypto_op *op = ev->event_ptr;\n+\t\tstruct rte_mbuf *m;\n+\n+\t\tif (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {\n+\t\t\tif (op->sym->m_dst == NULL)\n+\t\t\t\tm = op->sym->m_src;\n+\t\t\telse\n+\t\t\t\tm = op->sym->m_dst;\n+\n+\t\t\tto_free_in_bulk = m;\n+\t\t\tpe = rte_pktmbuf_mtod(m, struct perf_elt *);\n \t\t} else {\n-\t\t\tlatency = rte_get_timer_cycles() - m->timestamp;\n+\t\t\tpe = RTE_PTR_ADD(op->asym->modex.result.data,\n+\t\t\t\t\t op->asym->modex.result.length);\n+\t\t\tto_free_in_bulk = op->asym->modex.result.data;\n \t\t}\n+\t\trte_crypto_op_free(op);\n+\t} else {\n+\t\tpe = ev->event_ptr;\n+\t\tto_free_in_bulk = pe;\n+\t}\n \n-\t\tw->latency += latency;\n+\tlatency = rte_get_timer_cycles() - pe->timestamp;\n+\tw->latency += latency;\n+\n+\tbufs[count++] = to_free_in_bulk;\n+\tif (unlikely(count == buf_sz)) {\n+\t\tcount = 0;\n+\t\trte_mempool_put_bulk(pool, bufs, buf_sz);\n \t}\n+\n \treturn count;\n }\n \ndiff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c\nindex 814ab9f9bd..69ef0ebbac 100644\n--- a/app/test-eventdev/test_perf_queue.c\n+++ b/app/test-eventdev/test_perf_queue.c\n@@ -15,17 +15,6 @@ perf_queue_nb_event_queues(struct evt_options *opt)\n \treturn nb_prod * opt->nb_stages;\n }\n \n-static __rte_always_inline void\n-mark_fwd_latency(struct rte_event *const ev,\n-\t\tconst uint8_t nb_stages)\n-{\n-\tif (unlikely((ev->queue_id % nb_stages) == 0)) {\n-\t\tstruct perf_elt *const m = ev->event_ptr;\n-\n-\t\tm->timestamp = rte_get_timer_cycles();\n-\t}\n-}\n-\n static __rte_always_inline void\n fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,\n \t\tconst uint8_t nb_stages)\n@@ -39,9 +28,12 @@ fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,\n static int\n perf_queue_worker(void *arg, const int enable_fwd_latency)\n {\n+\tstruct perf_elt *pe = NULL;\n \tuint16_t enq = 0, deq = 0;\n \tstruct rte_event ev;\n \tPERF_WORKER_INIT;\n+\tuint8_t stage;\n+\n \n \twhile (t->done == false) {\n \t\tdeq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);\n@@ -51,41 +43,30 @@ perf_queue_worker(void *arg, const int enable_fwd_latency)\n \t\t\tcontinue;\n \t\t}\n \n-\t\tif (prod_crypto_type &&\n-\t\t    (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {\n-\t\t\tstruct rte_crypto_op *op = ev.event_ptr;\n-\n-\t\t\tif (op->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {\n-\t\t\t\tif (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {\n-\t\t\t\t\tif (op->sym->m_dst == NULL)\n-\t\t\t\t\t\tev.event_ptr = op->sym->m_src;\n-\t\t\t\t\telse\n-\t\t\t\t\t\tev.event_ptr = op->sym->m_dst;\n-\t\t\t\t\trte_crypto_op_free(op);\n-\t\t\t\t}\n-\t\t\t} else {\n-\t\t\t\trte_crypto_op_free(op);\n+\t\tif (prod_crypto_type && (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {\n+\t\t\tif (perf_handle_crypto_ev(&ev, &pe, enable_fwd_latency))\n \t\t\t\tcontinue;\n-\t\t\t}\n+\t\t} else {\n+\t\t\tpe = ev.event_ptr;\n \t\t}\n \n-\t\tif (enable_fwd_latency && !prod_timer_type)\n+\t\tstage = ev.queue_id % nb_stages;\n+\t\tif (enable_fwd_latency && !prod_timer_type && stage == 0)\n \t\t/* first q in pipeline, mark timestamp to compute fwd latency */\n-\t\t\tmark_fwd_latency(&ev, nb_stages);\n+\t\t\tperf_mark_fwd_latency(pe);\n \n \t\t/* last stage in pipeline */\n-\t\tif (unlikely((ev.queue_id % nb_stages) == laststage)) {\n+\t\tif (unlikely(stage == laststage)) {\n \t\t\tif (enable_fwd_latency)\n-\t\t\t\tcnt = perf_process_last_stage_latency(pool,\n+\t\t\t\tcnt = perf_process_last_stage_latency(pool, prod_crypto_type,\n \t\t\t\t\t&ev, w, bufs, sz, cnt);\n \t\t\telse\n-\t\t\t\tcnt = perf_process_last_stage(pool,\n+\t\t\t\tcnt = perf_process_last_stage(pool, prod_crypto_type,\n \t\t\t\t\t&ev, w, bufs, sz, cnt);\n \t\t} else {\n \t\t\tfwd_event(&ev, sched_type_list, nb_stages);\n \t\t\tdo {\n-\t\t\t\tenq = rte_event_enqueue_burst(dev, port, &ev,\n-\t\t\t\t\t\t\t      1);\n+\t\t\t\tenq = rte_event_enqueue_burst(dev, port, &ev, 1);\n \t\t\t} while (!enq && !t->done);\n \t\t}\n \t}\n@@ -101,7 +82,9 @@ perf_queue_worker_burst(void *arg, const int enable_fwd_latency)\n \t/* +1 to avoid prefetch out of array check */\n \tstruct rte_event ev[BURST_SIZE + 1];\n \tuint16_t enq = 0, nb_rx = 0;\n+\tstruct perf_elt *pe = NULL;\n \tPERF_WORKER_INIT;\n+\tuint8_t stage;\n \tuint16_t i;\n \n \twhile (t->done == false) {\n@@ -113,40 +96,26 @@ perf_queue_worker_burst(void *arg, const int enable_fwd_latency)\n \t\t}\n \n \t\tfor (i = 0; i < nb_rx; i++) {\n-\t\t\tif (prod_crypto_type &&\n-\t\t\t    (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {\n-\t\t\t\tstruct rte_crypto_op *op = ev[i].event_ptr;\n-\n-\t\t\t\tif (op->status ==\n-\t\t\t\t    RTE_CRYPTO_OP_STATUS_SUCCESS) {\n-\t\t\t\t\tif (op->sym->m_dst == NULL)\n-\t\t\t\t\t\tev[i].event_ptr =\n-\t\t\t\t\t\t\top->sym->m_src;\n-\t\t\t\t\telse\n-\t\t\t\t\t\tev[i].event_ptr =\n-\t\t\t\t\t\t\top->sym->m_dst;\n-\t\t\t\t\trte_crypto_op_free(op);\n-\t\t\t\t} else {\n-\t\t\t\t\trte_crypto_op_free(op);\n+\t\t\tif (prod_crypto_type && (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {\n+\t\t\t\tif (perf_handle_crypto_ev(&ev[i], &pe, enable_fwd_latency))\n \t\t\t\t\tcontinue;\n-\t\t\t\t}\n \t\t\t}\n \n-\t\t\tif (enable_fwd_latency && !prod_timer_type) {\n+\t\t\tstage = ev[i].queue_id % nb_stages;\n+\t\t\tif (enable_fwd_latency && !prod_timer_type && stage == 0) {\n \t\t\t\trte_prefetch0(ev[i+1].event_ptr);\n \t\t\t\t/* first queue in pipeline.\n \t\t\t\t * mark time stamp to compute fwd latency\n \t\t\t\t */\n-\t\t\t\tmark_fwd_latency(&ev[i], nb_stages);\n+\t\t\t\tperf_mark_fwd_latency(ev[i].event_ptr);\n \t\t\t}\n \t\t\t/* last stage in pipeline */\n-\t\t\tif (unlikely((ev[i].queue_id % nb_stages) ==\n-\t\t\t\t\t\t laststage)) {\n+\t\t\tif (unlikely(stage == laststage)) {\n \t\t\t\tif (enable_fwd_latency)\n-\t\t\t\t\tcnt = perf_process_last_stage_latency(\n-\t\t\t\t\t\tpool, &ev[i], w, bufs, sz, cnt);\n+\t\t\t\t\tcnt = perf_process_last_stage_latency(pool,\n+\t\t\t\t\t\tprod_crypto_type, &ev[i], w, bufs, sz, cnt);\n \t\t\t\telse\n-\t\t\t\t\tcnt = perf_process_last_stage(pool,\n+\t\t\t\t\tcnt = perf_process_last_stage(pool, prod_crypto_type,\n \t\t\t\t\t\t&ev[i], w, bufs, sz, cnt);\n \n \t\t\t\tev[i].op = RTE_EVENT_OP_RELEASE;\n",
    "prefixes": [
        "2/2"
    ]
}