get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/109824/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 109824,
    "url": "http://patches.dpdk.org/api/patches/109824/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220419055921.10566-19-ndabilpuram@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220419055921.10566-19-ndabilpuram@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220419055921.10566-19-ndabilpuram@marvell.com",
    "date": "2022-04-19T05:59:16",
    "name": "[19/24] net/cnxk: optimize Rx fast path for security pkts",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "5b1a3d1b63f0311e09cc6586d1ec0ada9c1a0c76",
    "submitter": {
        "id": 1202,
        "url": "http://patches.dpdk.org/api/people/1202/?format=api",
        "name": "Nithin Dabilpuram",
        "email": "ndabilpuram@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220419055921.10566-19-ndabilpuram@marvell.com/mbox/",
    "series": [
        {
            "id": 22546,
            "url": "http://patches.dpdk.org/api/series/22546/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=22546",
            "date": "2022-04-19T05:58:58",
            "name": "[01/24] common/cnxk: add multi channel support for SDP send queues",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/22546/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/109824/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/109824/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id D1397A00C3;\n\tTue, 19 Apr 2022 08:02:14 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 08E0A4281E;\n\tTue, 19 Apr 2022 08:00:52 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id 6E413427EA\n for <dev@dpdk.org>; Tue, 19 Apr 2022 08:00:50 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with ESMTP id\n 23INJ4Zb009960\n for <dev@dpdk.org>; Mon, 18 Apr 2022 23:00:49 -0700",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3ffwap2600-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Mon, 18 Apr 2022 23:00:49 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2;\n Mon, 18 Apr 2022 23:00:47 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend\n Transport; Mon, 18 Apr 2022 23:00:47 -0700",
            "from hyd1588t430.marvell.com (unknown [10.29.52.204])\n by maili.marvell.com (Postfix) with ESMTP id F38F85B6922;\n Mon, 18 Apr 2022 23:00:44 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-type; s=pfpt0220; bh=iTFAK2VYWWlUAYp5IufgxxpBBNTZ2WhZgONrqFJ4UPM=;\n b=T8tCIAFzRpngsD4WLcuu2t8RbOKCxEMnOD0hPuOp+VrI9L0ehaqionG0nzDpRXs8aQuq\n ozL3rS9iX8+6CfwTsZNeoykokOTnh70/lqIV0jbnY1gKbgt8OaOUKZPdJTWhDbOjqxUs\n wpUIyd5HRcunWqpOkoQM5cly3oghd+hUd2MvKFbbpf9naD9aggBbaaThdYijQDgN20q6\n WbF66EKJhIDMykGSMB3Y8ZUsIpU36jtEHXert86gsnWw9WilUAqR1iSS5mzLEkkSRGnQ\n kxo7fzpaj8GMYr5Jlvh04XwM9S9OmQEBTZXSSLdrRinRmV71pM3E6Tr+HTchytOU0ix1 Ew==",
        "From": "Nithin Dabilpuram <ndabilpuram@marvell.com>",
        "To": "<jerinj@marvell.com>, Nithin Dabilpuram <ndabilpuram@marvell.com>, \"Kiran\n Kumar K\" <kirankumark@marvell.com>, Sunil Kumar Kori <skori@marvell.com>,\n Satha Rao <skoteshwar@marvell.com>",
        "CC": "<dev@dpdk.org>",
        "Subject": "[PATCH 19/24] net/cnxk: optimize Rx fast path for security pkts",
        "Date": "Tue, 19 Apr 2022 11:29:16 +0530",
        "Message-ID": "<20220419055921.10566-19-ndabilpuram@marvell.com>",
        "X-Mailer": "git-send-email 2.8.4",
        "In-Reply-To": "<20220419055921.10566-1-ndabilpuram@marvell.com>",
        "References": "<20220419055921.10566-1-ndabilpuram@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Proofpoint-GUID": "Z-iK3mY5iH6yqQOuyi2MFi422VKFRDps",
        "X-Proofpoint-ORIG-GUID": "Z-iK3mY5iH6yqQOuyi2MFi422VKFRDps",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.205,Aquarius:18.0.858,Hydra:6.0.486,FMLib:17.11.64.514\n definitions=2022-04-19_02,2022-04-15_01,2022-02-23_01",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Optimize Rx fast path for security pkts by preprocessing\nmost of the operations such as sa pointer compute,\ninner wqe pointer fetch and ucode completion translation\nbefore the pkt is characterized as inbound inline pkt.\nPreprocessed info will be discarded if pkt is not\nfound to be security pkt. Also fix fetching of CQ word5\nfor vector mode. Get ucode completion code from CPT parse\nheader and RLEN from IP4v/IPv6 decrypted packet as it is\nin same 64B cacheline as CPT parse header in most of\nthe cases. By this method, we avoid accessing an extra\ncacheline\n\nFixes: c062f5726f61 (\"net/cnxk: support IP reassembly\")\n\nSigned-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>\n---\n drivers/net/cnxk/cn10k_rx.h | 487 +++++++++++++++++++++++++++-----------------\n 1 file changed, 305 insertions(+), 182 deletions(-)",
    "diff": "diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h\nindex 94c1f1e..db054c5 100644\n--- a/drivers/net/cnxk/cn10k_rx.h\n+++ b/drivers/net/cnxk/cn10k_rx.h\n@@ -341,6 +341,9 @@ nix_sec_reassemble_frags(const struct cpt_parse_hdr_s *hdr, uint64_t cq_w1,\n \tmbuf->data_len = frag_size;\n \tfragx_sum += frag_size;\n \n+\t/* Mark frag as get */\n+\tRTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);\n+\n \t/* Frag-2: */\n \tif (hdr->w0.num_frags > 2) {\n \t\tfrag_ptr = (uint64_t *)(finfo + 1);\n@@ -354,6 +357,9 @@ nix_sec_reassemble_frags(const struct cpt_parse_hdr_s *hdr, uint64_t cq_w1,\n \t\t*(uint64_t *)(&mbuf->rearm_data) = mbuf_init | data_off;\n \t\tmbuf->data_len = frag_size;\n \t\tfragx_sum += frag_size;\n+\n+\t\t/* Mark frag as get */\n+\t\tRTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);\n \t}\n \n \t/* Frag-3: */\n@@ -368,6 +374,9 @@ nix_sec_reassemble_frags(const struct cpt_parse_hdr_s *hdr, uint64_t cq_w1,\n \t\t*(uint64_t *)(&mbuf->rearm_data) = mbuf_init | data_off;\n \t\tmbuf->data_len = frag_size;\n \t\tfragx_sum += frag_size;\n+\n+\t\t/* Mark frag as get */\n+\t\tRTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);\n \t}\n \n \tif (inner_rx->lctype == NPC_LT_LC_IP) {\n@@ -413,10 +422,10 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,\n \tconst struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)__p;\n \tstruct cn10k_inb_priv_data *inb_priv;\n \tstruct rte_mbuf *inner = NULL;\n-\tuint64_t res_w1;\n \tuint32_t sa_idx;\n-\tuint16_t uc_cc;\n+\tuint16_t ucc;\n \tuint32_t len;\n+\tuintptr_t ip;\n \tvoid *inb_sa;\n \tuint64_t w0;\n \n@@ -438,20 +447,23 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,\n \t\t\t*rte_security_dynfield(inner) =\n \t\t\t\t(uint64_t)inb_priv->userdata;\n \n-\t\t\t/* CPT result(struct cpt_cn10k_res_s) is at\n-\t\t\t * after first IOVA in meta\n+\t\t\t/* Get ucc from cpt parse header */\n+\t\t\tucc = hdr->w3.hw_ccode;\n+\n+\t\t\t/* Calculate inner packet length as\n+\t\t\t * IP total len + l2 len\n \t\t\t */\n-\t\t\tres_w1 = *((uint64_t *)(&inner[1]) + 10);\n-\t\t\tuc_cc = res_w1 & 0xFF;\n+\t\t\tip = (uintptr_t)hdr + ((cq_w5 >> 16) & 0xFF);\n+\t\t\tip += ((cq_w1 >> 40) & 0x6);\n+\t\t\tlen = rte_be_to_cpu_16(*(uint16_t *)ip);\n+\t\t\tlen += ((cq_w5 >> 16) & 0xFF) - (cq_w5 & 0xFF);\n+\t\t\tlen += (cq_w1 & BIT(42)) ? 40 : 0;\n \n-\t\t\t/* Calculate inner packet length */\n-\t\t\tlen = ((res_w1 >> 16) & 0xFFFF) + hdr->w2.il3_off -\n-\t\t\t\tsizeof(struct cpt_parse_hdr_s) - (w0 & 0x7);\n \t\t\tinner->pkt_len = len;\n \t\t\tinner->data_len = len;\n \t\t\t*(uint64_t *)(&inner->rearm_data) = mbuf_init;\n \n-\t\t\tinner->ol_flags = ((uc_cc == CPT_COMP_WARN) ?\n+\t\t\tinner->ol_flags = ((ucc == CPT_COMP_WARN) ?\n \t\t\t\t\t   RTE_MBUF_F_RX_SEC_OFFLOAD :\n \t\t\t\t\t   (RTE_MBUF_F_RX_SEC_OFFLOAD |\n \t\t\t\t\t    RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED));\n@@ -477,6 +489,12 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,\n \t\t*(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;\n \t\t*loff = *loff + 1;\n \n+\t\t/* Mark meta mbuf as put */\n+\t\tRTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0);\n+\n+\t\t/* Mark inner mbuf as get */\n+\t\tRTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);\n+\n \t\treturn inner;\n \t} else if (cq_w1 & BIT(11)) {\n \t\tinner = (struct rte_mbuf *)(rte_be_to_cpu_64(hdr->wqe_ptr) -\n@@ -492,22 +510,21 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,\n \t\t/* Update dynamic field with userdata */\n \t\t*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;\n \n-\t\t/* Update l2 hdr length first */\n+\t\t/* Get ucc from cpt parse header */\n+\t\tucc = hdr->w3.hw_ccode;\n \n-\t\t/* CPT result(struct cpt_cn10k_res_s) is at\n-\t\t * after first IOVA in meta\n-\t\t */\n-\t\tres_w1 = *((uint64_t *)(&inner[1]) + 10);\n-\t\tuc_cc = res_w1 & 0xFF;\n+\t\t/* Calculate inner packet length as IP total len + l2 len */\n+\t\tip = (uintptr_t)hdr + ((cq_w5 >> 16) & 0xFF);\n+\t\tip += ((cq_w1 >> 40) & 0x6);\n+\t\tlen = rte_be_to_cpu_16(*(uint16_t *)ip);\n+\t\tlen += ((cq_w5 >> 16) & 0xFF) - (cq_w5 & 0xFF);\n+\t\tlen += (cq_w1 & BIT(42)) ? 40 : 0;\n \n-\t\t/* Calculate inner packet length */\n-\t\tlen = ((res_w1 >> 16) & 0xFFFF) + hdr->w2.il3_off -\n-\t\t\tsizeof(struct cpt_parse_hdr_s) - (w0 & 0x7);\n \t\tinner->pkt_len = len;\n \t\tinner->data_len = len;\n \t\t*(uint64_t *)(&inner->rearm_data) = mbuf_init;\n \n-\t\tinner->ol_flags = ((uc_cc == CPT_COMP_WARN) ?\n+\t\tinner->ol_flags = ((ucc == CPT_COMP_WARN) ?\n \t\t\t\t   RTE_MBUF_F_RX_SEC_OFFLOAD :\n \t\t\t\t   (RTE_MBUF_F_RX_SEC_OFFLOAD |\n \t\t\t\t    RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED));\n@@ -532,83 +549,34 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,\n \n #if defined(RTE_ARCH_ARM64)\n \n-static __rte_always_inline struct rte_mbuf *\n-nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t sa_base,\n-\t\t     uintptr_t laddr, uint8_t *loff, struct rte_mbuf *mbuf,\n-\t\t     uint16_t data_off, uint8x16_t *rx_desc_field1,\n-\t\t     uint64_t *ol_flags, const uint16_t flags,\n-\t\t     uint64x2_t *rearm)\n+static __rte_always_inline void\n+nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa,\n+\t\t     uintptr_t cpth, struct rte_mbuf *inner,\n+\t\t     uint8x16_t *rx_desc_field1, uint64_t *ol_flags,\n+\t\t     const uint16_t flags, uint64x2_t *rearm)\n {\n-\tconst void *__p = (void *)((uintptr_t)mbuf + (uint16_t)data_off);\n-\tconst struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)__p;\n+\tconst struct cpt_parse_hdr_s *hdr =\n+\t\t(const struct cpt_parse_hdr_s *)cpth;\n \tuint64_t mbuf_init = vgetq_lane_u64(*rearm, 0);\n \tstruct cn10k_inb_priv_data *inb_priv;\n-\tstruct rte_mbuf *inner;\n-\tuint64_t *sg, res_w1;\n-\tuint32_t sa_idx;\n-\tvoid *inb_sa;\n-\tuint16_t len;\n-\tuint64_t w0;\n \n-\tif ((flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11))) {\n-\t\tw0 = hdr->w0.u64;\n-\t\tsa_idx = w0 >> 32;\n+\t/* Clear checksum flags */\n+\t*ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK |\n+\t\t       RTE_MBUF_F_RX_IP_CKSUM_MASK);\n \n-\t\t/* Get SPI from CPT_PARSE_S's cookie(already swapped) */\n-\t\tw0 = hdr->w0.u64;\n-\t\tsa_idx = w0 >> 32;\n+\t/* Get SPI from CPT_PARSE_S's cookie(already swapped) */\n+\tinb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd((void *)inb_sa);\n \n-\t\tinb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);\n-\t\tinb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);\n+\t/* Update dynamic field with userdata */\n+\t*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;\n \n-\t\t/* Clear checksum flags */\n-\t\t*ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK |\n-\t\t\t       RTE_MBUF_F_RX_IP_CKSUM_MASK);\n+\t/* Mark inner mbuf as get */\n+\tRTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);\n \n-\t\tif (!hdr->w0.num_frags) {\n-\t\t\t/* No Reassembly or inbound error */\n-\t\t\tinner = (struct rte_mbuf *)\n-\t\t\t\t(rte_be_to_cpu_64(hdr->wqe_ptr) -\n-\t\t\t\t sizeof(struct rte_mbuf));\n-\t\t\t/* Update dynamic field with userdata */\n-\t\t\t*rte_security_dynfield(inner) =\n-\t\t\t\t(uint64_t)inb_priv->userdata;\n-\n-\t\t\t/* CPT result(struct cpt_cn10k_res_s) is at\n-\t\t\t * after first IOVA in meta\n-\t\t\t */\n-\t\t\tsg = (uint64_t *)(inner + 1);\n-\t\t\tres_w1 = sg[10];\n-\n-\t\t\t/* Clear checksum flags and update security flag */\n-\t\t\t*ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK |\n-\t\t\t\t       RTE_MBUF_F_RX_IP_CKSUM_MASK);\n-\t\t\t*ol_flags |=\n-\t\t\t\t(((res_w1 & 0xFF) == CPT_COMP_WARN) ?\n-\t\t\t\t RTE_MBUF_F_RX_SEC_OFFLOAD :\n-\t\t\t\t (RTE_MBUF_F_RX_SEC_OFFLOAD |\n-\t\t\t\t  RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED));\n-\t\t\t/* Calculate inner packet length */\n-\t\t\tlen = ((res_w1 >> 16) & 0xFFFF) +\n-\t\t\t\thdr->w2.il3_off -\n-\t\t\t\tsizeof(struct cpt_parse_hdr_s) -\n-\t\t\t\t(w0 & 0x7);\n-\t\t\t/* Update pkt_len and data_len */\n-\t\t\t*rx_desc_field1 =\n-\t\t\t\tvsetq_lane_u16(len, *rx_desc_field1, 2);\n-\t\t\t*rx_desc_field1 =\n-\t\t\t\tvsetq_lane_u16(len, *rx_desc_field1, 4);\n-\n-\t\t} else if (!(hdr->w0.err_sum) && !(hdr->w0.reas_sts)) {\n+\tif (flags & NIX_RX_REAS_F && hdr->w0.num_frags) {\n+\t\tif (!(hdr->w0.err_sum) && !(hdr->w0.reas_sts)) {\n \t\t\t/* Reassembly success */\n-\t\t\tinner = nix_sec_reassemble_frags(hdr, cq_w1, cq_w5,\n-\t\t\t\t\t\t\t mbuf_init);\n-\t\t\tsg = (uint64_t *)(inner + 1);\n-\t\t\tres_w1 = sg[10];\n-\n-\t\t\t/* Update dynamic field with userdata */\n-\t\t\t*rte_security_dynfield(inner) =\n-\t\t\t\t(uint64_t)inb_priv->userdata;\n+\t\t\tnix_sec_reassemble_frags(hdr, cq_w1, cq_w5, mbuf_init);\n \n \t\t\t/* Assume success */\n \t\t\t*ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;\n@@ -624,7 +592,7 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t sa_base,\n \t\t\t*rearm = vsetq_lane_u64(mbuf_init, *rearm, 0);\n \t\t} else {\n \t\t\t/* Reassembly failure */\n-\t\t\tinner = nix_sec_attach_frags(hdr, inb_priv, mbuf_init);\n+\t\t\tnix_sec_attach_frags(hdr, inb_priv, mbuf_init);\n \t\t\t*ol_flags |= inner->ol_flags;\n \n \t\t\t/* Update pkt_len and data_len */\n@@ -633,65 +601,7 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t sa_base,\n \t\t\t*rx_desc_field1 = vsetq_lane_u16(inner->data_len,\n \t\t\t\t\t\t\t *rx_desc_field1, 4);\n \t\t}\n-\n-\t\t/* Store meta in lmtline to free\n-\t\t * Assume all meta's from same aura.\n-\t\t */\n-\t\t*(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;\n-\t\t*loff = *loff + 1;\n-\n-\t\t/* Return inner mbuf */\n-\t\treturn inner;\n-\n-\t} else if (cq_w1 & BIT(11)) {\n-\t\tinner = (struct rte_mbuf *)(rte_be_to_cpu_64(hdr->wqe_ptr) -\n-\t\t\t\t\t    sizeof(struct rte_mbuf));\n-\t\t/* Get SPI from CPT_PARSE_S's cookie(already swapped) */\n-\t\tw0 = hdr->w0.u64;\n-\t\tsa_idx = w0 >> 32;\n-\n-\t\tinb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);\n-\t\tinb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);\n-\n-\t\t/* Update dynamic field with userdata */\n-\t\t*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;\n-\n-\t\t/* CPT result(struct cpt_cn10k_res_s) is at\n-\t\t * after first IOVA in meta\n-\t\t */\n-\t\tsg = (uint64_t *)(inner + 1);\n-\t\tres_w1 = sg[10];\n-\n-\t\t/* Clear checksum flags and update security flag */\n-\t\t*ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK | RTE_MBUF_F_RX_IP_CKSUM_MASK);\n-\t\t*ol_flags |= (((res_w1 & 0xFF) == CPT_COMP_WARN) ?\n-\t\t\t      RTE_MBUF_F_RX_SEC_OFFLOAD :\n-\t\t\t      (RTE_MBUF_F_RX_SEC_OFFLOAD | RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED));\n-\t\t/* Calculate inner packet length */\n-\t\tlen = ((res_w1 >> 16) & 0xFFFF) + hdr->w2.il3_off -\n-\t\t\tsizeof(struct cpt_parse_hdr_s) - (w0 & 0x7);\n-\t\t/* Update pkt_len and data_len */\n-\t\t*rx_desc_field1 = vsetq_lane_u16(len, *rx_desc_field1, 2);\n-\t\t*rx_desc_field1 = vsetq_lane_u16(len, *rx_desc_field1, 4);\n-\n-\t\t/* Store meta in lmtline to free\n-\t\t * Assume all meta's from same aura.\n-\t\t */\n-\t\t*(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;\n-\t\t*loff = *loff + 1;\n-\n-\t\t/* Mark meta mbuf as put */\n-\t\tRTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0);\n-\n-\t\t/* Mark inner mbuf as get */\n-\t\tRTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);\n-\n-\t\t/* Return inner mbuf */\n-\t\treturn inner;\n \t}\n-\n-\t/* Return same mbuf as it is not a decrypted pkt */\n-\treturn mbuf;\n }\n #endif\n \n@@ -1040,6 +950,14 @@ nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)\n \treturn ol_flags;\n }\n \n+#define NIX_PUSH_META_TO_FREE(_mbuf, _laddr, _loff_p)                          \\\n+\tdo {                                                                   \\\n+\t\t*(uint64_t *)((_laddr) + (*(_loff_p) << 3)) = (uint64_t)_mbuf; \\\n+\t\t*(_loff_p) = *(_loff_p) + 1;                                   \\\n+\t\t/* Mark meta mbuf as put */                                    \\\n+\t\tRTE_MEMPOOL_CHECK_COOKIES(_mbuf->pool, (void **)&_mbuf, 1, 0); \\\n+\t} while (0)\n+\n static __rte_always_inline uint16_t\n cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,\n \t\t\t   const uint16_t flags, void *lookup_mem,\n@@ -1083,6 +1001,12 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,\n \t\tpkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);\n \t\tif (flags & NIX_RX_OFFLOAD_TSTAMP_F)\n \t\t\ttstamp = rxq->tstamp;\n+\n+\t\tcq0 = desc + CQE_SZ(head);\n+\t\trte_prefetch0(CQE_PTR_OFF(cq0, 0, 64, flags));\n+\t\trte_prefetch0(CQE_PTR_OFF(cq0, 1, 64, flags));\n+\t\trte_prefetch0(CQE_PTR_OFF(cq0, 2, 64, flags));\n+\t\trte_prefetch0(CQE_PTR_OFF(cq0, 3, 64, flags));\n \t} else {\n \t\tRTE_SET_USED(head);\n \t}\n@@ -1188,11 +1112,34 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,\n \t\t\t\t}\n \t\t\t}\n \t\t} else {\n-\t\t\tif (pkts - packets > 4) {\n-\t\t\t\trte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 4, 64, flags));\n-\t\t\t\trte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 5, 64, flags));\n-\t\t\t\trte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 6, 64, flags));\n-\t\t\t\trte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 7, 64, flags));\n+\t\t\tif (flags & NIX_RX_OFFLOAD_SECURITY_F &&\n+\t\t\t    pkts - packets > 4) {\n+\t\t\t\t/* Fetch cpt parse header */\n+\t\t\t\tvoid *p0 =\n+\t\t\t\t\t(void *)*CQE_PTR_OFF(cq0, 4, 72, flags);\n+\t\t\t\tvoid *p1 =\n+\t\t\t\t\t(void *)*CQE_PTR_OFF(cq0, 5, 72, flags);\n+\t\t\t\tvoid *p2 =\n+\t\t\t\t\t(void *)*CQE_PTR_OFF(cq0, 6, 72, flags);\n+\t\t\t\tvoid *p3 =\n+\t\t\t\t\t(void *)*CQE_PTR_OFF(cq0, 7, 72, flags);\n+\t\t\t\trte_prefetch0(p0);\n+\t\t\t\trte_prefetch0(p1);\n+\t\t\t\trte_prefetch0(p2);\n+\t\t\t\trte_prefetch0(p3);\n+\t\t\t}\n+\n+\t\t\tif (pkts - packets > 8) {\n+\t\t\t\tif (flags) {\n+\t\t\t\t\trte_prefetch0(CQE_PTR_OFF(cq0, 8, 0, flags));\n+\t\t\t\t\trte_prefetch0(CQE_PTR_OFF(cq0, 9, 0, flags));\n+\t\t\t\t\trte_prefetch0(CQE_PTR_OFF(cq0, 10, 0, flags));\n+\t\t\t\t\trte_prefetch0(CQE_PTR_OFF(cq0, 11, 0, flags));\n+\t\t\t\t}\n+\t\t\t\trte_prefetch0(CQE_PTR_OFF(cq0, 8, 64, flags));\n+\t\t\t\trte_prefetch0(CQE_PTR_OFF(cq0, 9, 64, flags));\n+\t\t\t\trte_prefetch0(CQE_PTR_OFF(cq0, 10, 64, flags));\n+\t\t\t\trte_prefetch0(CQE_PTR_OFF(cq0, 11, 64, flags));\n \t\t\t}\n \t\t}\n \n@@ -1237,13 +1184,6 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,\n \t\t\tf2 = vqtbl1q_u8(cq2_w8, shuf_msk);\n \t\t\tf3 = vqtbl1q_u8(cq3_w8, shuf_msk);\n \t\t}\n-\t\tif (flags & NIX_RX_OFFLOAD_SECURITY_F) {\n-\t\t\t/* Prefetch probable CPT parse header area */\n-\t\t\trte_prefetch_non_temporal(RTE_PTR_ADD(mbuf0, d_off));\n-\t\t\trte_prefetch_non_temporal(RTE_PTR_ADD(mbuf1, d_off));\n-\t\t\trte_prefetch_non_temporal(RTE_PTR_ADD(mbuf2, d_off));\n-\t\t\trte_prefetch_non_temporal(RTE_PTR_ADD(mbuf3, d_off));\n-\t\t}\n \n \t\t/* Load CQE word0 and word 1 */\n \t\tconst uint64_t cq0_w0 = *CQE_PTR_OFF(cq0, 0, 0, flags);\n@@ -1329,10 +1269,125 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,\n \n \t\t/* Translate meta to mbuf */\n \t\tif (flags & NIX_RX_OFFLOAD_SECURITY_F) {\n-\t\t\tuint64_t cq0_w5 = *(uint64_t *)(cq0 + CQE_SZ(0) + 40);\n-\t\t\tuint64_t cq1_w5 = *(uint64_t *)(cq0 + CQE_SZ(1) + 40);\n-\t\t\tuint64_t cq2_w5 = *(uint64_t *)(cq0 + CQE_SZ(2) + 40);\n-\t\t\tuint64_t cq3_w5 = *(uint64_t *)(cq0 + CQE_SZ(3) + 40);\n+\t\t\tuint64_t cq0_w5 = *CQE_PTR_OFF(cq0, 0, 40, flags);\n+\t\t\tuint64_t cq1_w5 = *CQE_PTR_OFF(cq0, 1, 40, flags);\n+\t\t\tuint64_t cq2_w5 = *CQE_PTR_OFF(cq0, 2, 40, flags);\n+\t\t\tuint64_t cq3_w5 = *CQE_PTR_OFF(cq0, 3, 40, flags);\n+\t\t\tuintptr_t cpth0 = (uintptr_t)mbuf0 + d_off;\n+\t\t\tuintptr_t cpth1 = (uintptr_t)mbuf1 + d_off;\n+\t\t\tuintptr_t cpth2 = (uintptr_t)mbuf2 + d_off;\n+\t\t\tuintptr_t cpth3 = (uintptr_t)mbuf3 + d_off;\n+\n+\t\t\tuint64x2_t inner0, inner1, inner2, inner3;\n+\t\t\tuint64x2_t wqe01, wqe23, sa01, sa23;\n+\t\t\tuint16x4_t lens, l2lens, ltypes;\n+\t\t\tuint8x8_t ucc;\n+\n+\t\t\tinner0 = vld1q_u64((const uint64_t *)cpth0);\n+\t\t\tinner1 = vld1q_u64((const uint64_t *)cpth1);\n+\t\t\tinner2 = vld1q_u64((const uint64_t *)cpth2);\n+\t\t\tinner3 = vld1q_u64((const uint64_t *)cpth3);\n+\n+\t\t\t/* Extract and reverse wqe pointers */\n+\t\t\twqe01 = vzip2q_u64(inner0, inner1);\n+\t\t\twqe23 = vzip2q_u64(inner2, inner3);\n+\t\t\twqe01 = vrev64q_u8(wqe01);\n+\t\t\twqe23 = vrev64q_u8(wqe23);\n+\t\t\t/* Adjust wqe pointers to point to mbuf */\n+\t\t\twqe01 = vsubq_u64(wqe01,\n+\t\t\t\t\t  vdupq_n_u64(sizeof(struct rte_mbuf)));\n+\t\t\twqe23 = vsubq_u64(wqe23,\n+\t\t\t\t\t  vdupq_n_u64(sizeof(struct rte_mbuf)));\n+\n+\t\t\t/* Extract sa idx from cookie area and add to sa_base */\n+\t\t\tsa01 = vzip1q_u64(inner0, inner1);\n+\t\t\tsa23 = vzip1q_u64(inner2, inner3);\n+\n+\t\t\tsa01 = vshrq_n_u64(sa01, 32);\n+\t\t\tsa23 = vshrq_n_u64(sa23, 32);\n+\t\t\tsa01 = vshlq_n_u64(sa01,\n+\t\t\t\t\t   ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);\n+\t\t\tsa23 = vshlq_n_u64(sa23,\n+\t\t\t\t\t   ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);\n+\t\t\tsa01 = vaddq_u64(sa01, vdupq_n_u64(sa_base));\n+\t\t\tsa23 = vaddq_u64(sa23, vdupq_n_u64(sa_base));\n+\n+\t\t\tconst uint8x16_t tbl = {\n+\t\t\t\t0, 0, 0, 0, 0, 0, 0, 0,\n+\t\t\t\t/* HW_CCODE -> RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED */\n+\t\t\t\t1, 0, 1, 1, 1, 1, 0, 1,\n+\t\t\t};\n+\n+\t\t\tconst int8x8_t err_off = {\n+\t\t\t\t/* UCC of significance starts from 0xF0 */\n+\t\t\t\t0xF0,\n+\t\t\t\t/* Move HW_CCODE from 0:6 -> 8:14 */\n+\t\t\t\t-8,\n+\t\t\t\t0xF0,\n+\t\t\t\t-8,\n+\t\t\t\t0xF0,\n+\t\t\t\t-8,\n+\t\t\t\t0xF0,\n+\t\t\t\t-8,\n+\t\t\t};\n+\n+\t\t\tucc = vdup_n_u8(0);\n+\t\t\tucc = vset_lane_u16(*(uint16_t *)(cpth0 + 30), ucc, 0);\n+\t\t\tucc = vset_lane_u16(*(uint16_t *)(cpth1 + 30), ucc, 1);\n+\t\t\tucc = vset_lane_u16(*(uint16_t *)(cpth2 + 30), ucc, 2);\n+\t\t\tucc = vset_lane_u16(*(uint16_t *)(cpth3 + 30), ucc, 3);\n+\t\t\tucc = vsub_s8(ucc, err_off);\n+\t\t\tucc = vqtbl1_u8(tbl, ucc);\n+\n+\t\t\tRTE_BUILD_BUG_ON(NPC_LT_LC_IP != 2);\n+\t\t\tRTE_BUILD_BUG_ON(NPC_LT_LC_IP_OPT != 3);\n+\t\t\tRTE_BUILD_BUG_ON(NPC_LT_LC_IP6 != 4);\n+\t\t\tRTE_BUILD_BUG_ON(NPC_LT_LC_IP6_EXT != 5);\n+\n+\t\t\tltypes = vset_lane_u16((cq0_w1 >> 40) & 0x6, ltypes, 0);\n+\t\t\tltypes = vset_lane_u16((cq1_w1 >> 40) & 0x6, ltypes, 1);\n+\t\t\tltypes = vset_lane_u16((cq2_w1 >> 40) & 0x6, ltypes, 2);\n+\t\t\tltypes = vset_lane_u16((cq3_w1 >> 40) & 0x6, ltypes, 3);\n+\n+\t\t\t/* Extract and reverse l3 length from IPv4/IPv6 hdr\n+\t\t\t * that is in same cacheline most probably as cpth.\n+\t\t\t */\n+\t\t\tcpth0 += ((cq0_w5 >> 16) & 0xFF) +\n+\t\t\t\t vget_lane_u16(ltypes, 0);\n+\t\t\tcpth1 += ((cq1_w5 >> 16) & 0xFF) +\n+\t\t\t\t vget_lane_u16(ltypes, 1);\n+\t\t\tcpth2 += ((cq2_w5 >> 16) & 0xFF) +\n+\t\t\t\t vget_lane_u16(ltypes, 2);\n+\t\t\tcpth3 += ((cq3_w5 >> 16) & 0xFF) +\n+\t\t\t\t vget_lane_u16(ltypes, 3);\n+\t\t\tlens = vdup_n_u16(0);\n+\t\t\tlens = vset_lane_u16(*(uint16_t *)cpth0, lens, 0);\n+\t\t\tlens = vset_lane_u16(*(uint16_t *)cpth1, lens, 1);\n+\t\t\tlens = vset_lane_u16(*(uint16_t *)cpth2, lens, 2);\n+\t\t\tlens = vset_lane_u16(*(uint16_t *)cpth3, lens, 3);\n+\t\t\tlens = vrev16_u8(lens);\n+\n+\t\t\t/* Add l2 length to l3 lengths */\n+\t\t\tl2lens = vdup_n_u16(0);\n+\t\t\tl2lens = vset_lane_u16(((cq0_w5 >> 16) & 0xFF) -\n+\t\t\t\t\t\t       (cq0_w5 & 0xFF),\n+\t\t\t\t\t       l2lens, 0);\n+\t\t\tl2lens = vset_lane_u16(((cq1_w5 >> 16) & 0xFF) -\n+\t\t\t\t\t\t       (cq1_w5 & 0xFF),\n+\t\t\t\t\t       l2lens, 1);\n+\t\t\tl2lens = vset_lane_u16(((cq2_w5 >> 16) & 0xFF) -\n+\t\t\t\t\t\t       (cq2_w5 & 0xFF),\n+\t\t\t\t\t       l2lens, 2);\n+\t\t\tl2lens = vset_lane_u16(((cq3_w5 >> 16) & 0xFF) -\n+\t\t\t\t\t\t       (cq3_w5 & 0xFF),\n+\t\t\t\t\t       l2lens, 3);\n+\t\t\tlens = vadd_u16(lens, l2lens);\n+\n+\t\t\t/* L3 header adjust */\n+\t\t\tconst int8x8_t l3adj = {\n+\t\t\t\t0, 0, 0, 0, 40, 0, 0, 0,\n+\t\t\t};\n+\t\t\tlens = vadd_u16(lens, vtbl1_u8(l3adj, ltypes));\n \n \t\t\t/* Initialize rearm data when reassembly is enabled as\n \t\t\t * data offset might change.\n@@ -1345,25 +1400,93 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,\n \t\t\t}\n \n \t\t\t/* Checksum ol_flags will be cleared if mbuf is meta */\n-\t\t\tmbuf0 = nix_sec_meta_to_mbuf(cq0_w1, cq0_w5, sa_base, laddr,\n-\t\t\t\t\t\t     &loff, mbuf0, d_off, &f0,\n-\t\t\t\t\t\t     &ol_flags0, flags, &rearm0);\n-\t\t\tmbuf01 = vsetq_lane_u64((uint64_t)mbuf0, mbuf01, 0);\n-\n-\t\t\tmbuf1 = nix_sec_meta_to_mbuf(cq1_w1, cq1_w5, sa_base, laddr,\n-\t\t\t\t\t\t     &loff, mbuf1, d_off, &f1,\n-\t\t\t\t\t\t     &ol_flags1, flags, &rearm1);\n-\t\t\tmbuf01 = vsetq_lane_u64((uint64_t)mbuf1, mbuf01, 1);\n-\n-\t\t\tmbuf2 = nix_sec_meta_to_mbuf(cq2_w1, cq2_w5, sa_base, laddr,\n-\t\t\t\t\t\t     &loff, mbuf2, d_off, &f2,\n-\t\t\t\t\t\t     &ol_flags2, flags, &rearm2);\n-\t\t\tmbuf23 = vsetq_lane_u64((uint64_t)mbuf2, mbuf23, 0);\n-\n-\t\t\tmbuf3 = nix_sec_meta_to_mbuf(cq3_w1, cq3_w5, sa_base, laddr,\n-\t\t\t\t\t\t     &loff, mbuf3, d_off, &f3,\n-\t\t\t\t\t\t     &ol_flags3, flags, &rearm3);\n-\t\t\tmbuf23 = vsetq_lane_u64((uint64_t)mbuf3, mbuf23, 1);\n+\t\t\tif (cq0_w1 & BIT(11)) {\n+\t\t\t\tuintptr_t wqe = vgetq_lane_u64(wqe01, 0);\n+\t\t\t\tuintptr_t sa = vgetq_lane_u64(sa01, 0);\n+\t\t\t\tuint16_t len = vget_lane_u16(lens, 0);\n+\n+\t\t\t\tcpth0 = (uintptr_t)mbuf0 + d_off;\n+\t\t\t\t/* Free meta to aura */\n+\t\t\t\tNIX_PUSH_META_TO_FREE(mbuf0, laddr, &loff);\n+\t\t\t\tmbuf01 = vsetq_lane_u64(wqe, mbuf01, 0);\n+\t\t\t\tmbuf0 = (struct rte_mbuf *)wqe;\n+\n+\t\t\t\t/* Update pkt_len and data_len */\n+\t\t\t\tf0 = vsetq_lane_u16(len, f0, 2);\n+\t\t\t\tf0 = vsetq_lane_u16(len, f0, 4);\n+\n+\t\t\t\tnix_sec_meta_to_mbuf(cq0_w1, cq0_w5, sa, cpth0,\n+\t\t\t\t\t\t     mbuf0, &f0, &ol_flags0,\n+\t\t\t\t\t\t     flags, &rearm0);\n+\t\t\t\tol_flags0 |= (RTE_MBUF_F_RX_SEC_OFFLOAD |\n+\t\t\t\t\t(uint64_t)vget_lane_u8(ucc, 1) << 19);\n+\t\t\t}\n+\n+\t\t\tif (cq1_w1 & BIT(11)) {\n+\t\t\t\tuintptr_t wqe = vgetq_lane_u64(wqe01, 1);\n+\t\t\t\tuintptr_t sa = vgetq_lane_u64(sa01, 1);\n+\t\t\t\tuint16_t len = vget_lane_u16(lens, 1);\n+\n+\t\t\t\tcpth1 = (uintptr_t)mbuf1 + d_off;\n+\t\t\t\t/* Free meta to aura */\n+\t\t\t\tNIX_PUSH_META_TO_FREE(mbuf1, laddr, &loff);\n+\t\t\t\tmbuf01 = vsetq_lane_u64(wqe, mbuf01, 1);\n+\t\t\t\tmbuf1 = (struct rte_mbuf *)wqe;\n+\n+\t\t\t\t/* Update pkt_len and data_len */\n+\t\t\t\tf1 = vsetq_lane_u16(len, f1, 2);\n+\t\t\t\tf1 = vsetq_lane_u16(len, f1, 4);\n+\n+\t\t\t\tnix_sec_meta_to_mbuf(cq1_w1, cq1_w5, sa, cpth1,\n+\t\t\t\t\t\t     mbuf1, &f1, &ol_flags1,\n+\t\t\t\t\t\t     flags, &rearm1);\n+\t\t\t\tol_flags1 |= (RTE_MBUF_F_RX_SEC_OFFLOAD |\n+\t\t\t\t\t(uint64_t)vget_lane_u8(ucc, 3) << 19);\n+\t\t\t}\n+\n+\t\t\tif (cq2_w1 & BIT(11)) {\n+\t\t\t\tuintptr_t wqe = vgetq_lane_u64(wqe23, 0);\n+\t\t\t\tuintptr_t sa = vgetq_lane_u64(sa23, 0);\n+\t\t\t\tuint16_t len = vget_lane_u16(lens, 2);\n+\n+\t\t\t\tcpth2 = (uintptr_t)mbuf2 + d_off;\n+\t\t\t\t/* Free meta to aura */\n+\t\t\t\tNIX_PUSH_META_TO_FREE(mbuf2, laddr, &loff);\n+\t\t\t\tmbuf23 = vsetq_lane_u64(wqe, mbuf23, 0);\n+\t\t\t\tmbuf2 = (struct rte_mbuf *)wqe;\n+\n+\t\t\t\t/* Update pkt_len and data_len */\n+\t\t\t\tf2 = vsetq_lane_u16(len, f2, 2);\n+\t\t\t\tf2 = vsetq_lane_u16(len, f2, 4);\n+\n+\t\t\t\tnix_sec_meta_to_mbuf(cq2_w1, cq2_w5, sa, cpth2,\n+\t\t\t\t\t\t     mbuf2, &f2, &ol_flags2,\n+\t\t\t\t\t\t     flags, &rearm2);\n+\t\t\t\tol_flags2 |= (RTE_MBUF_F_RX_SEC_OFFLOAD |\n+\t\t\t\t\t(uint64_t)vget_lane_u8(ucc, 5) << 19);\n+\t\t\t}\n+\n+\t\t\tif (cq3_w1 & BIT(11)) {\n+\t\t\t\tuintptr_t wqe = vgetq_lane_u64(wqe23, 1);\n+\t\t\t\tuintptr_t sa = vgetq_lane_u64(sa23, 1);\n+\t\t\t\tuint16_t len = vget_lane_u16(lens, 3);\n+\n+\t\t\t\tcpth3 = (uintptr_t)mbuf3 + d_off;\n+\t\t\t\t/* Free meta to aura */\n+\t\t\t\tNIX_PUSH_META_TO_FREE(mbuf3, laddr, &loff);\n+\t\t\t\tmbuf23 = vsetq_lane_u64(wqe, mbuf23, 1);\n+\t\t\t\tmbuf3 = (struct rte_mbuf *)wqe;\n+\n+\t\t\t\t/* Update pkt_len and data_len */\n+\t\t\t\tf3 = vsetq_lane_u16(len, f3, 2);\n+\t\t\t\tf3 = vsetq_lane_u16(len, f3, 4);\n+\n+\t\t\t\tnix_sec_meta_to_mbuf(cq3_w1, cq3_w5, sa, cpth3,\n+\t\t\t\t\t\t     mbuf3, &f3, &ol_flags3,\n+\t\t\t\t\t\t     flags, &rearm3);\n+\t\t\t\tol_flags3 |= (RTE_MBUF_F_RX_SEC_OFFLOAD |\n+\t\t\t\t\t(uint64_t)vget_lane_u8(ucc, 7) << 19);\n+\t\t\t}\n \t\t}\n \n \t\tif (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {\n",
    "prefixes": [
        "19/24"
    ]
}