get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/63660/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 63660,
    "url": "http://patches.dpdk.org/api/patches/63660/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1575819342-20008-5-git-send-email-mchalla@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1575819342-20008-5-git-send-email-mchalla@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1575819342-20008-5-git-send-email-mchalla@marvell.com",
    "date": "2019-12-08T15:35:40",
    "name": "[v1,4/6] raw/octeontx2_ep: add enqueue operation",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "13ba2b3d4c020f3a4c355042e7a9379b09f4c376",
    "submitter": {
        "id": 1532,
        "url": "http://patches.dpdk.org/api/people/1532/?format=api",
        "name": "Mahipal Challa",
        "email": "mchalla@marvell.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1575819342-20008-5-git-send-email-mchalla@marvell.com/mbox/",
    "series": [
        {
            "id": 7751,
            "url": "http://patches.dpdk.org/api/series/7751/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=7751",
            "date": "2019-12-08T15:35:36",
            "name": "OCTEON TX2 End Point Driver",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/7751/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/63660/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/63660/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 580BBA04F1;\n\tSun,  8 Dec 2019 16:36:57 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 0A3BE1BFA2;\n\tSun,  8 Dec 2019 16:36:19 +0100 (CET)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173]) by dpdk.org (Postfix) with ESMTP id DDAEA1BF85\n for <dev@dpdk.org>; Sun,  8 Dec 2019 16:36:16 +0100 (CET)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id\n xB8FUkZc028966 for <dev@dpdk.org>; Sun, 8 Dec 2019 07:36:16 -0800",
            "from sc-exch04.marvell.com ([199.233.58.184])\n by mx0b-0016f401.pphosted.com with ESMTP id 2wrcfpts1w-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Sun, 08 Dec 2019 07:36:16 -0800",
            "from SC-EXCH03.marvell.com (10.93.176.83) by SC-EXCH04.marvell.com\n (10.93.176.84) with Microsoft SMTP Server (TLS) id 15.0.1367.3; Sun, 8 Dec\n 2019 07:36:14 -0800",
            "from maili.marvell.com (10.93.176.43) by SC-EXCH03.marvell.com\n (10.93.176.83) with Microsoft SMTP Server id 15.0.1367.3 via Frontend\n Transport; Sun, 8 Dec 2019 07:36:14 -0800",
            "from hyd1244.marvell.com (hyd1244.marvell.com [10.29.20.28])\n by maili.marvell.com (Postfix) with ESMTP id CD0573F703F;\n Sun,  8 Dec 2019 07:36:12 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-type; s=pfpt0818; bh=wU/0bdF8/+ODf8J4f8VC68cnHjjXm9JufvwU1aQfhjU=;\n b=k9RLjq/zf00B6J9zKNe+35Ol3raV16yRPs062/3P6V0m3T9OABfXgjfDFaq5E7ckrNJE\n EuhJ8NPOzpnlnMyWY0HHIdkCH6FFoDXMCIXHxitqxI4II5HmkA3wlSkRQU8gAyW7fg83\n 8AWZalAGtACPqdrkV8TTgx+Mor6x3ksMKcnbL4VcrHFLB2rS0ht9H4iouN6OiSLgSyTT\n xV0zoaEZUTCLLsIV+NsiTOecCHTJQst/KkY9gP3wenKE5N/Vn+XjldJtn7K6LWx0ZBiC\n ESQ7pxkRdy3P/aQHPn3N4egti+4vQzQ9lqNY0nb/7y0b/RO+RoviEGrGpKbkBjMe+wNE aQ==",
        "From": "Mahipal Challa <mchalla@marvell.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<jerinj@marvell.com>, <pathreya@marvell.com>, <snilla@marvell.com>,\n <venkatn@marvell.com>",
        "Date": "Sun, 8 Dec 2019 21:05:40 +0530",
        "Message-ID": "<1575819342-20008-5-git-send-email-mchalla@marvell.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1575819342-20008-1-git-send-email-mchalla@marvell.com>",
        "References": "<1575819342-20008-1-git-send-email-mchalla@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.95,18.0.572\n definitions=2019-12-08_04:2019-12-05,2019-12-08 signatures=0",
        "Subject": "[dpdk-dev] [PATCH v1 4/6] raw/octeontx2_ep: add enqueue operation",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add rawdev enqueue operation for SDP VF devices.\n\nSigned-off-by: Mahipal Challa <mchalla@marvell.com>\n---\n doc/guides/rawdevs/octeontx2_ep.rst       |   6 +\n drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c | 242 ++++++++++++++++++++++++++++++\n drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h |  39 +++++\n drivers/raw/octeontx2_ep/otx2_ep_rawdev.c |   1 +\n drivers/raw/octeontx2_ep/otx2_ep_rawdev.h |  20 +++\n drivers/raw/octeontx2_ep/otx2_ep_vf.c     |  24 +++\n 6 files changed, 332 insertions(+)",
    "diff": "diff --git a/doc/guides/rawdevs/octeontx2_ep.rst b/doc/guides/rawdevs/octeontx2_ep.rst\nindex 2507fcf..39a7c29 100644\n--- a/doc/guides/rawdevs/octeontx2_ep.rst\n+++ b/doc/guides/rawdevs/octeontx2_ep.rst\n@@ -68,3 +68,9 @@ The following code shows how the device is configured\n \n    rte_rawdev_configure(dev_id, (rte_rawdev_obj_t)&rdev_info);\n \n+Performing Data Transfer\n+------------------------\n+\n+To perform data transfer using SDP VF EP rawdev devices use standard\n+``rte_rawdev_enqueue_buffers()`` and ``rte_rawdev_dequeue_buffers()`` APIs.\n+\ndiff --git a/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c b/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c\nindex 584b818..ebbacfb 100644\n--- a/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c\n+++ b/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c\n@@ -403,3 +403,245 @@\n \treturn -ENOMEM;\n }\n \n+static inline void\n+sdp_iqreq_delete(struct sdp_device *sdpvf,\n+\t\tstruct sdp_instr_queue *iq, uint32_t idx)\n+{\n+\tuint32_t reqtype;\n+\tvoid *buf;\n+\n+\tbuf     = iq->req_list[idx].buf;\n+\treqtype = iq->req_list[idx].reqtype;\n+\n+\tswitch (reqtype) {\n+\tcase SDP_REQTYPE_NORESP:\n+\t\trte_mempool_put(sdpvf->enqdeq_mpool, buf);\n+\t\totx2_sdp_dbg(\"IQ buffer freed at idx[%d]\", idx);\n+\t\tbreak;\n+\n+\tcase SDP_REQTYPE_NORESP_GATHER:\n+\tcase SDP_REQTYPE_NONE:\n+\tdefault:\n+\t\totx2_info(\"This iqreq mode is not supported:%d\", reqtype);\n+\n+\t}\n+\n+\t/* Reset the request list at this index */\n+\tiq->req_list[idx].buf = NULL;\n+\tiq->req_list[idx].reqtype = 0;\n+}\n+\n+static inline void\n+sdp_iqreq_add(struct sdp_instr_queue *iq, void *buf,\n+\t\tuint32_t reqtype)\n+{\n+\tiq->req_list[iq->host_write_index].buf = buf;\n+\tiq->req_list[iq->host_write_index].reqtype = reqtype;\n+\n+\totx2_sdp_dbg(\"IQ buffer added at idx[%d]\", iq->host_write_index);\n+\n+}\n+\n+static void\n+sdp_flush_iq(struct sdp_device *sdpvf,\n+\t\tstruct sdp_instr_queue *iq,\n+\t\tuint32_t pending_thresh __rte_unused)\n+{\n+\tuint32_t instr_processed = 0;\n+\n+\trte_spinlock_lock(&iq->lock);\n+\n+\tiq->otx_read_index = sdpvf->fn_list.update_iq_read_idx(iq);\n+\twhile (iq->flush_index != iq->otx_read_index) {\n+\t\t/* Free the IQ data buffer to the pool */\n+\t\tsdp_iqreq_delete(sdpvf, iq, iq->flush_index);\n+\t\tiq->flush_index =\n+\t\t\tsdp_incr_index(iq->flush_index, 1, iq->nb_desc);\n+\n+\t\tinstr_processed++;\n+\t}\n+\n+\tiq->stats.instr_processed = instr_processed;\n+\trte_atomic64_sub(&iq->instr_pending, instr_processed);\n+\n+\trte_spinlock_unlock(&iq->lock);\n+}\n+\n+static inline void\n+sdp_ring_doorbell(struct sdp_device *sdpvf __rte_unused,\n+\t\tstruct sdp_instr_queue *iq)\n+{\n+\totx2_write64(iq->fill_cnt, iq->doorbell_reg);\n+\n+\t/* Make sure doorbell write goes through */\n+\trte_wmb();\n+\tiq->fill_cnt = 0;\n+\n+}\n+\n+static inline int\n+post_iqcmd(struct sdp_instr_queue *iq, uint8_t *iqcmd)\n+{\n+\tuint8_t *iqptr, cmdsize;\n+\n+\t/* This ensures that the read index does not wrap around to\n+\t * the same position if queue gets full before OCTEON TX2 could\n+\t * fetch any instr.\n+\t */\n+\tif (rte_atomic64_read(&iq->instr_pending) >=\n+\t\t\t      (int32_t)(iq->nb_desc - 1)) {\n+\t\totx2_err(\"IQ is full, pending:%ld\",\n+\t\t\t (long)rte_atomic64_read(&iq->instr_pending));\n+\n+\t\treturn SDP_IQ_SEND_FAILED;\n+\t}\n+\n+\t/* Copy cmd into iq */\n+\tcmdsize = ((iq->iqcmd_64B) ? 64 : 32);\n+\tiqptr   = iq->base_addr + (cmdsize * iq->host_write_index);\n+\n+\trte_memcpy(iqptr, iqcmd, cmdsize);\n+\n+\totx2_sdp_dbg(\"IQ cmd posted @ index:%d\", iq->host_write_index);\n+\n+\t/* Increment the host write index */\n+\tiq->host_write_index =\n+\t\tsdp_incr_index(iq->host_write_index, 1, iq->nb_desc);\n+\n+\tiq->fill_cnt++;\n+\n+\t/* Flush the command into memory. We need to be sure the data\n+\t * is in memory before indicating that the instruction is\n+\t * pending.\n+\t */\n+\trte_wmb();\n+\trte_atomic64_inc(&iq->instr_pending);\n+\n+\t/* SDP_IQ_SEND_SUCCESS */\n+\treturn 0;\n+}\n+\n+\n+static int\n+sdp_send_data(struct sdp_device *sdpvf,\n+\t      struct sdp_instr_queue *iq, void *cmd)\n+{\n+\tuint32_t ret;\n+\n+\t/* Lock this IQ command queue before posting instruction */\n+\trte_spinlock_lock(&iq->post_lock);\n+\n+\t/* Submit IQ command */\n+\tret = post_iqcmd(iq, cmd);\n+\n+\tif (ret == SDP_IQ_SEND_SUCCESS) {\n+\t\tsdp_ring_doorbell(sdpvf, iq);\n+\n+\t\tiq->stats.instr_posted++;\n+\t\totx2_sdp_dbg(\"Instr submit success posted: %ld\\n\",\n+\t\t\t     (long)iq->stats.instr_posted);\n+\n+\t} else {\n+\t\tiq->stats.instr_dropped++;\n+\t\totx2_err(\"Instr submit failled, dropped: %ld\\n\",\n+\t\t\t (long)iq->stats.instr_dropped);\n+\n+\t}\n+\n+\trte_spinlock_unlock(&iq->post_lock);\n+\n+\treturn ret;\n+}\n+\n+\n+/* Enqueue requests/packets to SDP IQ queue.\n+ * returns number of requests enqueued successfully\n+ */\n+int\n+sdp_rawdev_enqueue(struct rte_rawdev *rawdev,\n+\t\t   struct rte_rawdev_buf **buffers __rte_unused,\n+\t\t   unsigned int count, rte_rawdev_obj_t context)\n+{\n+\tstruct sdp_instr_64B *iqcmd;\n+\tstruct sdp_instr_queue *iq;\n+\tstruct sdp_soft_instr *si;\n+\tstruct sdp_device *sdpvf;\n+\n+\tstruct sdp_instr_ih ihx;\n+\n+\tsdpvf = (struct sdp_device *)rawdev->dev_private;\n+\tsi = (struct sdp_soft_instr *)context;\n+\n+\tiq = sdpvf->instr_queue[si->q_no];\n+\n+\tif ((count > 1) || (count < 1)) {\n+\t\totx2_err(\"This mode not supported: req[%d]\", count);\n+\t\tgoto enq_fail;\n+\t}\n+\n+\tmemset(&ihx, 0, sizeof(struct sdp_instr_ih));\n+\n+\tiqcmd = &si->command;\n+\tmemset(iqcmd, 0, sizeof(struct sdp_instr_64B));\n+\n+\tiqcmd->dptr = (uint64_t)si->dptr;\n+\n+\t/* Populate SDP IH */\n+\tihx.pkind  = sdpvf->pkind;\n+\tihx.fsz    = si->ih.fsz + 8; /* 8B for NIX IH */\n+\tihx.gather = si->ih.gather;\n+\n+\t/* Direct data instruction */\n+\tihx.tlen   = si->ih.tlen + ihx.fsz;\n+\n+\tswitch (ihx.gather) {\n+\tcase 0: /* Direct data instr */\n+\t\tihx.tlen = si->ih.tlen + ihx.fsz;\n+\t\tbreak;\n+\n+\tdefault: /* Gather */\n+\t\tswitch (si->ih.gsz) {\n+\t\tcase 0: /* Direct gather instr */\n+\t\t\totx2_err(\"Direct Gather instr : not supported\");\n+\t\t\tgoto enq_fail;\n+\n+\t\tdefault: /* Indirect gather instr */\n+\t\t\totx2_err(\"Indirect Gather instr : not supported\");\n+\t\t\tgoto enq_fail;\n+\t\t}\n+\t}\n+\n+\trte_memcpy(&iqcmd->ih, &ihx, sizeof(uint64_t));\n+\tiqcmd->rptr = (uint64_t)si->rptr;\n+\trte_memcpy(&iqcmd->irh, &si->irh, sizeof(uint64_t));\n+\n+\t/* Swap FSZ(front data) here, to avoid swapping on OCTEON TX2 side */\n+\tsdp_swap_8B_data(&iqcmd->rptr, 1);\n+\tsdp_swap_8B_data(&iqcmd->irh, 1);\n+\n+\totx2_sdp_dbg(\"After swapping\");\n+\totx2_sdp_dbg(\"Word0 [dptr]: 0x%016lx\", (unsigned long)iqcmd->dptr);\n+\totx2_sdp_dbg(\"Word1 [ihtx]: 0x%016lx\", (unsigned long)iqcmd->ih);\n+\totx2_sdp_dbg(\"Word2 [rptr]: 0x%016lx\", (unsigned long)iqcmd->rptr);\n+\totx2_sdp_dbg(\"Word3 [irh]: 0x%016lx\", (unsigned long)iqcmd->irh);\n+\totx2_sdp_dbg(\"Word4 [exhdr[0]]: 0x%016lx\",\n+\t\t\t(unsigned long)iqcmd->exhdr[0]);\n+\n+\tsdp_iqreq_add(iq, si->dptr, si->reqtype);\n+\n+\tif (sdp_send_data(sdpvf, iq, iqcmd)) {\n+\t\totx2_err(\"Data send failled :\");\n+\t\tsdp_iqreq_delete(sdpvf, iq, iq->host_write_index);\n+\t\tgoto enq_fail;\n+\t}\n+\n+\tif (rte_atomic64_read(&iq->instr_pending) >= 1)\n+\t\tsdp_flush_iq(sdpvf, iq, 1 /*(iq->nb_desc / 2)*/);\n+\n+\t/* Return no# of instructions posted successfully. */\n+\treturn count;\n+\n+enq_fail:\n+\treturn SDP_IQ_SEND_FAILED;\n+}\n+\ndiff --git a/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h b/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h\nindex 4c28283..b9b7c0b 100644\n--- a/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h\n+++ b/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h\n@@ -8,4 +8,43 @@\n #include <rte_byteorder.h>\n #include \"otx2_ep_rawdev.h\"\n \n+#define SDP_IQ_SEND_FAILED      (-1)\n+#define SDP_IQ_SEND_SUCCESS     (0)\n+\n+\n+static inline uint64_t\n+sdp_endian_swap_8B(uint64_t _d)\n+{\n+\treturn ((((((uint64_t)(_d)) >>  0) & (uint64_t)0xff) << 56) |\n+\t\t(((((uint64_t)(_d)) >>  8) & (uint64_t)0xff) << 48) |\n+\t\t(((((uint64_t)(_d)) >> 16) & (uint64_t)0xff) << 40) |\n+\t\t(((((uint64_t)(_d)) >> 24) & (uint64_t)0xff) << 32) |\n+\t\t(((((uint64_t)(_d)) >> 32) & (uint64_t)0xff) << 24) |\n+\t\t(((((uint64_t)(_d)) >> 40) & (uint64_t)0xff) << 16) |\n+\t\t(((((uint64_t)(_d)) >> 48) & (uint64_t)0xff) <<  8) |\n+\t\t(((((uint64_t)(_d)) >> 56) & (uint64_t)0xff) <<  0));\n+}\n+\n+static inline void\n+sdp_swap_8B_data(uint64_t *data, uint32_t blocks)\n+{\n+\t/* Swap 8B blocks */\n+\twhile (blocks) {\n+\t\t*data = sdp_endian_swap_8B(*data);\n+\t\tblocks--;\n+\t\tdata++;\n+\t}\n+}\n+\n+static inline uint32_t\n+sdp_incr_index(uint32_t index, uint32_t count, uint32_t max)\n+{\n+\tif ((index + count) >= max)\n+\t\tindex = index + count - max;\n+\telse\n+\t\tindex += count;\n+\n+\treturn index;\n+}\n+\n #endif /* _OTX2_EP_ENQDEQ_H_ */\ndiff --git a/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c b/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c\nindex 2c43d3f..4ba8473 100644\n--- a/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c\n+++ b/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c\n@@ -251,6 +251,7 @@\n \t.dev_start      = sdp_rawdev_start,\n \t.dev_stop       = sdp_rawdev_stop,\n \t.dev_close      = sdp_rawdev_close,\n+\t.enqueue_bufs   = sdp_rawdev_enqueue,\n };\n \n static int\ndiff --git a/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h b/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h\nindex a01f48d..8fd06fb 100644\n--- a/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h\n+++ b/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h\n@@ -8,6 +8,10 @@\n #include <rte_byteorder.h>\n #include <rte_spinlock.h>\n \n+/* IQ instruction req types */\n+#define SDP_REQTYPE_NONE             (0)\n+#define SDP_REQTYPE_NORESP           (1)\n+#define SDP_REQTYPE_NORESP_GATHER    (2)\n \n /* Input Request Header format */\n struct sdp_instr_irh {\n@@ -128,6 +132,13 @@ struct sdp_instr_list {\n };\n #define SDP_IQREQ_LIST_SIZE\t(sizeof(struct sdp_instr_list))\n \n+/* Input Queue statistics. Each input queue has four stats fields. */\n+struct sdp_iq_stats {\n+\tuint64_t instr_posted; /* Instructions posted to this queue. */\n+\tuint64_t instr_processed; /* Instructions processed in this queue. */\n+\tuint64_t instr_dropped; /* Instructions that could not be processed */\n+};\n+\n /* Structure to define the configuration attributes for each Input queue. */\n struct sdp_iq_config {\n \t/* Max number of IQs available */\n@@ -195,6 +206,9 @@ struct sdp_instr_queue {\n \t/* Number of instructions pending to be posted to OCTEON TX2. */\n \tuint32_t fill_cnt;\n \n+\t/* Statistics for this input queue. */\n+\tstruct sdp_iq_stats stats;\n+\n \t/* DMA mapped base address of the input descriptor ring. */\n \tuint64_t base_addr_dma;\n \n@@ -380,6 +394,8 @@ struct sdp_fn_list {\n \tvoid (*setup_oq_regs)(struct sdp_device *sdpvf, uint32_t q_no);\n \n \tint (*setup_device_regs)(struct sdp_device *sdpvf);\n+\tuint32_t (*update_iq_read_idx)(struct sdp_instr_queue *iq);\n+\n \tvoid (*enable_io_queues)(struct sdp_device *sdpvf);\n \tvoid (*disable_io_queues)(struct sdp_device *sdpvf);\n \n@@ -458,4 +474,8 @@ struct sdp_device {\n int sdp_setup_oqs(struct sdp_device *sdpvf, uint32_t oq_no);\n int sdp_delete_oqs(struct sdp_device *sdpvf, uint32_t oq_no);\n \n+int sdp_rawdev_enqueue(struct rte_rawdev *dev, struct rte_rawdev_buf **buffers,\n+\t\t       unsigned int count, rte_rawdev_obj_t context);\n+\n+\n #endif /* _OTX2_EP_RAWDEV_H_ */\ndiff --git a/drivers/raw/octeontx2_ep/otx2_ep_vf.c b/drivers/raw/octeontx2_ep/otx2_ep_vf.c\nindex 8e79fe8..c5c0bc3 100644\n--- a/drivers/raw/octeontx2_ep/otx2_ep_vf.c\n+++ b/drivers/raw/octeontx2_ep/otx2_ep_vf.c\n@@ -409,6 +409,28 @@\n \t\tsdp_vf_disable_oq(sdpvf, q_no);\n }\n \n+static uint32_t\n+sdp_vf_update_read_index(struct sdp_instr_queue *iq)\n+{\n+\tuint32_t new_idx = rte_read32(iq->inst_cnt_reg);\n+\n+\t/* The new instr cnt reg is a 32-bit counter that can roll over.\n+\t * We have noted the counter's initial value at init time into\n+\t * reset_instr_cnt\n+\t */\n+\tif (iq->reset_instr_cnt < new_idx)\n+\t\tnew_idx -= iq->reset_instr_cnt;\n+\telse\n+\t\tnew_idx += (0xffffffff - iq->reset_instr_cnt) + 1;\n+\n+\t/* Modulo of the new index with the IQ size will give us\n+\t * the new index.\n+\t */\n+\tnew_idx %= iq->nb_desc;\n+\n+\treturn new_idx;\n+}\n+\n int\n sdp_vf_setup_device(struct sdp_device *sdpvf)\n {\n@@ -436,6 +458,8 @@\n \tsdpvf->fn_list.setup_oq_regs       = sdp_vf_setup_oq_regs;\n \n \tsdpvf->fn_list.setup_device_regs   = sdp_vf_setup_device_regs;\n+\tsdpvf->fn_list.update_iq_read_idx  = sdp_vf_update_read_index;\n+\n \tsdpvf->fn_list.enable_io_queues    = sdp_vf_enable_io_queues;\n \tsdpvf->fn_list.disable_io_queues   = sdp_vf_disable_io_queues;\n \n",
    "prefixes": [
        "v1",
        "4/6"
    ]
}