get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/73478/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 73478,
    "url": "https://patches.dpdk.org/api/patches/73478/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20200707222225.33446-4-ajit.khaparde@broadcom.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200707222225.33446-4-ajit.khaparde@broadcom.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200707222225.33446-4-ajit.khaparde@broadcom.com",
    "date": "2020-07-07T22:22:25",
    "name": "[v5,3/3] net/bnxt: add ARM64 vector support",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "58147849e5cbb593ba75bc594ffd2dba52883104",
    "submitter": {
        "id": 501,
        "url": "https://patches.dpdk.org/api/people/501/?format=api",
        "name": "Ajit Khaparde",
        "email": "ajit.khaparde@broadcom.com"
    },
    "delegate": {
        "id": 1766,
        "url": "https://patches.dpdk.org/api/users/1766/?format=api",
        "username": "ajitkhaparde",
        "first_name": "Ajit",
        "last_name": "Khaparde",
        "email": "ajit.khaparde@broadcom.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20200707222225.33446-4-ajit.khaparde@broadcom.com/mbox/",
    "series": [
        {
            "id": 10866,
            "url": "https://patches.dpdk.org/api/series/10866/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=10866",
            "date": "2020-07-07T22:22:22",
            "name": "net/bnxt: vector mode enhancements",
            "version": 5,
            "mbox": "https://patches.dpdk.org/series/10866/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/73478/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/73478/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id AC7A7A00BE;\n\tWed,  8 Jul 2020 00:22:57 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id EC4F01DEF2;\n\tWed,  8 Jul 2020 00:22:40 +0200 (CEST)",
            "from mail-ed1-f98.google.com (mail-ed1-f98.google.com\n [209.85.208.98]) by dpdk.org (Postfix) with ESMTP id 421331DEE0\n for <dev@dpdk.org>; Wed,  8 Jul 2020 00:22:36 +0200 (CEST)",
            "by mail-ed1-f98.google.com with SMTP id h28so40031761edz.0\n for <dev@dpdk.org>; Tue, 07 Jul 2020 15:22:36 -0700 (PDT)",
            "from C02VPB22HTD6.wifi.broadcom.net ([192.19.223.252])\n by smtp-relay.gmail.com with ESMTPS id gj18sm12603ejb.18.2020.07.07.15.22.34\n (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128);\n Tue, 07 Jul 2020 15:22:35 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=broadcom.com;\n s=google;\n h=from:to:cc:subject:date:message-id:in-reply-to:references\n :mime-version:content-transfer-encoding;\n bh=oEuUhZ1opkShWPxFkmV7NhfYmOzLVieGL1UemBMtbO0=;\n b=QQLqWTLtnXgeBtfkhl7/Ap0N28wWESMpOb9yUIIZ4ve3ON+aRwCfVCWrdF2CbzVGyQ\n puCpVuuzjYQX+0J6+vfuzwXVuoHhwg8tsvd8NJbaCq7gSe9pscaDV/cKnyy4oLICOPVZ\n rznaQ1nvvLDIqsm8SZHWv3TPQmQn8bJzy4L/E=",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20161025;\n h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n :references:mime-version:content-transfer-encoding;\n bh=oEuUhZ1opkShWPxFkmV7NhfYmOzLVieGL1UemBMtbO0=;\n b=uVLILebuycszvUMeYBOJTKbcpCefqDLwrtL3Npex8IVGMmHzsIxqzyIq8k5QSMFCv4\n AyxXMlFUByjqTkgXIe4UYzImNZzd66ctx1HQO/4UazteRHOcJg3MmlJs8eI1ZCPxVzi3\n IYFpZa5GTc6VjYQq5EO2KFwewqlCQkRTlxSyx5BjtzI8/GXLi880EOJ9LqFpjk+jdHIg\n oedW/z8fC9h3q0imy9WOAj86v9A7CT6TgOcgsbM7NIb9IVQlvV33Rs8Z0hKJWVRDioej\n 5JQJd6B7nZZU35Nwd7HB8IvFyC1GabdJ38ehgHWKDtJar1njurXSTV6X2JWQI73ffeGT\n /iTg==",
        "X-Gm-Message-State": "AOAM531SC1tAJI4nakR64TJAOBj4Kc86sMb1k5a3xILUXnLwWrD4cnll\n JryA2f1l2VWhIrO9NU++MUEZyFkWEEd+P1bH7U7fcp50R3UXSc9wIneElwCsEz/1tdrT8KDhVK+\n P/RQ9LtdL6ddK0UrIOfIXbzngkYDyy1vnIACMFyZa3BLGm8WUEcUXkV2xHBroOCZz6a505rsFAU\n Ai1A==",
        "X-Google-Smtp-Source": "\n ABdhPJyMPaBFeAC2LSOmSCm6ydt0m6Sh56udg05g9iS26np6JvdFnA+flmkHa3lKA4cSycur3iKhrCY6+90G",
        "X-Received": "by 2002:aa7:c1d8:: with SMTP id\n d24mr65118139edp.178.1594160555808;\n Tue, 07 Jul 2020 15:22:35 -0700 (PDT)",
        "X-Relaying-Domain": "broadcom.com",
        "From": "Ajit Khaparde <ajit.khaparde@broadcom.com>",
        "To": "dev@dpdk.org",
        "Cc": "Lance Richardson <lance.richardson@broadcom.com>,\n Ruifeng Wang <ruifeng.wang@arm.com>",
        "Date": "Tue,  7 Jul 2020 15:22:25 -0700",
        "Message-Id": "<20200707222225.33446-4-ajit.khaparde@broadcom.com>",
        "X-Mailer": "git-send-email 2.21.1 (Apple Git-122.3)",
        "In-Reply-To": "<20200707222225.33446-1-ajit.khaparde@broadcom.com>",
        "References": "\n <CACZ4nhuiiPoX=h-9xUYk+STPhespH86MJz94pnWoUxe9aburZQ@mail.gmail.com>\n <20200707222225.33446-1-ajit.khaparde@broadcom.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v5 3/3] net/bnxt: add ARM64 vector support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Lance Richardson <lance.richardson@broadcom.com>\n\nAdd bnxt vector PMD support using NEON SIMD instructions.\nAlso update the 20.08 release notes with this information.\n\nSigned-off-by: Lance Richardson <lance.richardson@broadcom.com>\nReviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>\nReviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>\n---\n doc/guides/nics/bnxt.rst                |   6 +-\n doc/guides/rel_notes/release_20_08.rst  |   1 +\n drivers/net/bnxt/Makefile               |   3 +\n drivers/net/bnxt/bnxt_ethdev.c          |  10 +-\n drivers/net/bnxt/bnxt_ring.c            |   4 +-\n drivers/net/bnxt/bnxt_rxq.h             |   2 +-\n drivers/net/bnxt/bnxt_rxr.c             |   2 +-\n drivers/net/bnxt/bnxt_rxr.h             |   2 +-\n drivers/net/bnxt/bnxt_rxtx_vec_common.h |  57 +++\n drivers/net/bnxt/bnxt_rxtx_vec_neon.c   | 469 ++++++++++++++++++++++++\n drivers/net/bnxt/bnxt_rxtx_vec_sse.c    |  46 +--\n drivers/net/bnxt/bnxt_txr.h             |   2 +-\n drivers/net/bnxt/meson.build            |   2 +\n 13 files changed, 548 insertions(+), 58 deletions(-)\n create mode 100644 drivers/net/bnxt/bnxt_rxtx_vec_common.h\n create mode 100644 drivers/net/bnxt/bnxt_rxtx_vec_neon.c",
    "diff": "diff --git a/doc/guides/nics/bnxt.rst b/doc/guides/nics/bnxt.rst\nindex a53cdad21..6ff75d0a2 100644\n--- a/doc/guides/nics/bnxt.rst\n+++ b/doc/guides/nics/bnxt.rst\n@@ -774,9 +774,9 @@ Vector processing provides significantly improved performance over scalar\n processing (see Vector Processor, here).\n \n The BNXT PMD supports the vector processing using SSE (Streaming SIMD\n-Extensions) instructions on x86 platforms. The BNXT vPMD (vector mode PMD) is\n-currently limited to Intel/AMD CPU architecture. Support for ARM is *not*\n-currently implemented.\n+Extensions) instructions on x86 platforms. It also supports NEON intrinsics for\n+vector processing on ARM CPUs. The BNXT vPMD (vector mode PMD) is available for\n+Intel/AMD and ARM CPU architectures.\n \n This improved performance comes from several optimizations:\n \ndiff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst\nindex e84258a13..887ed5b40 100644\n--- a/doc/guides/rel_notes/release_20_08.rst\n+++ b/doc/guides/rel_notes/release_20_08.rst\n@@ -102,6 +102,7 @@ New Features\n   * Added support for VXLAN encap/decap.\n   * Added support for rte_flow_query for COUNT action.\n   * Added support for rx_burst_mode_get and tx_burst_mode_get.\n+  * Added vector mode support for ARM CPUs.\n \n * **Added support for BPF_ABS/BPF_IND load instructions.**\n \ndiff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile\nindex 6b9544b5d..c54fd108d 100644\n--- a/drivers/net/bnxt/Makefile\n+++ b/drivers/net/bnxt/Makefile\n@@ -44,6 +44,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += rte_pmd_bnxt.c\n ifeq ($(CONFIG_RTE_ARCH_X86), y)\n SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxtx_vec_sse.c\n endif\n+ifeq ($(CONFIG_RTE_ARCH_ARM64), y)\n+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxtx_vec_neon.c\n+endif\n \n ifeq ($(CONFIG_RTE_LIBRTE_BNXT_PMD), y)\n CFLAGS += -I$(SRCDIR) -I$(SRCDIR)/tf_ulp -I$(SRCDIR)/tf_core -I$(SRCDIR)/hcapi\ndiff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c\nindex 1dc85e6df..3b9d3fbe7 100644\n--- a/drivers/net/bnxt/bnxt_ethdev.c\n+++ b/drivers/net/bnxt/bnxt_ethdev.c\n@@ -1076,7 +1076,7 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)\n {\n \tstruct bnxt *bp = eth_dev->data->dev_private;\n \n-#ifdef RTE_ARCH_X86\n+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)\n #ifndef RTE_LIBRTE_IEEE1588\n \t/*\n \t * Vector mode receive can be enabled only if scatter rx is not\n@@ -1116,7 +1116,7 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)\n static eth_tx_burst_t\n bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)\n {\n-#ifdef RTE_ARCH_X86\n+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)\n #ifndef RTE_LIBRTE_IEEE1588\n \tstruct bnxt *bp = eth_dev->data->dev_private;\n \n@@ -2679,7 +2679,7 @@ bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,\n \t\t\t \"Scalar\");\n \t\treturn 0;\n \t}\n-#ifdef RTE_ARCH_X86\n+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)\n \tif (pkt_burst == bnxt_recv_pkts_vec) {\n \t\tsnprintf(mode->info, sizeof(mode->info), \"%s\",\n \t\t\t \"Vector SSE\");\n@@ -2701,7 +2701,7 @@ bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,\n \t\t\t \"Scalar\");\n \t\treturn 0;\n \t}\n-#ifdef RTE_ARCH_X86\n+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)\n \tif (pkt_burst == bnxt_xmit_pkts_vec) {\n \t\tsnprintf(mode->info, sizeof(mode->info), \"%s\",\n \t\t\t \"Vector SSE\");\n@@ -2730,7 +2730,7 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)\n \tnew_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +\n \t\t       VLAN_TAG_SIZE * BNXT_NUM_VLANS;\n \n-#ifdef RTE_ARCH_X86\n+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)\n \t/*\n \t * If vector-mode tx/rx is active, disallow any MTU change that would\n \t * require scattered receive support.\ndiff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c\nindex 24a947f27..54f654744 100644\n--- a/drivers/net/bnxt/bnxt_ring.c\n+++ b/drivers/net/bnxt/bnxt_ring.c\n@@ -608,7 +608,7 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)\n \t\tbnxt_db_write(&rxr->ag_db, rxr->ag_prod);\n \t}\n \trxq->index = queue_index;\n-#ifdef RTE_ARCH_X86\n+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)\n \tbnxt_rxq_vec_setup(rxq);\n #endif\n \n@@ -713,7 +713,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)\n \t\tbnxt_db_write(&rxr->rx_db, rxr->rx_prod);\n \t\tbnxt_db_write(&rxr->ag_db, rxr->ag_prod);\n \t\trxq->index = i;\n-#ifdef RTE_ARCH_X86\n+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)\n \t\tbnxt_rxq_vec_setup(rxq);\n #endif\n \t}\ndiff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h\nindex 4f5182d9e..d5ce3b6d5 100644\n--- a/drivers/net/bnxt/bnxt_rxq.h\n+++ b/drivers/net/bnxt/bnxt_rxq.h\n@@ -22,7 +22,7 @@ struct bnxt_rx_queue {\n \tuint16_t\t\tnb_rx_hold; /* num held free RX desc */\n \tuint16_t\t\trx_free_thresh; /* max free RX desc to hold */\n \tuint16_t\t\tqueue_id; /* RX queue index */\n-#ifdef RTE_ARCH_X86\n+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)\n \tuint16_t\t\trxrearm_nb; /* number of descs to reinit. */\n \tuint16_t\t\trxrearm_start; /* next desc index to reinit. */\n #endif\ndiff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c\nindex eb6f33c7b..baf73cb25 100644\n--- a/drivers/net/bnxt/bnxt_rxr.c\n+++ b/drivers/net/bnxt/bnxt_rxr.c\n@@ -782,7 +782,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t     !rte_spinlock_trylock(&rxq->lock)))\n \t\treturn 0;\n \n-#if defined(RTE_ARCH_X86)\n+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)\n \t/*\n \t * Replenish buffers if needed when a transition has been made from\n \t * vector- to non-vector- receive processing.\ndiff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h\nindex e60c97fa1..2bf46cd91 100644\n--- a/drivers/net/bnxt/bnxt_rxr.h\n+++ b/drivers/net/bnxt/bnxt_rxr.h\n@@ -221,7 +221,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);\n int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n \n-#ifdef RTE_ARCH_X86\n+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)\n uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t\t    uint16_t nb_pkts);\n int bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq);\ndiff --git a/drivers/net/bnxt/bnxt_rxtx_vec_common.h b/drivers/net/bnxt/bnxt_rxtx_vec_common.h\nnew file mode 100644\nindex 000000000..3da3c48f4\n--- /dev/null\n+++ b/drivers/net/bnxt/bnxt_rxtx_vec_common.h\n@@ -0,0 +1,57 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Broadcom\n+ * All rights reserved.\n+ */\n+\n+#ifndef _BNXT_RXTX_VEC_COMMON_H_\n+#define _BNXT_RXTX_VEC_COMMON_H_\n+\n+#define RTE_BNXT_MAX_RX_BURST\t\t32\n+#define RTE_BNXT_MAX_TX_BURST\t\t32\n+#define RTE_BNXT_RXQ_REARM_THRESH\t32\n+#define RTE_BNXT_DESCS_PER_LOOP\t\t4\n+\n+#define TX_BD_FLAGS_CMPL ((1 << TX_BD_LONG_FLAGS_BD_CNT_SFT) | \\\n+\t\t\t  TX_BD_SHORT_FLAGS_COAL_NOW | \\\n+\t\t\t  TX_BD_SHORT_TYPE_TX_BD_SHORT | \\\n+\t\t\t  TX_BD_LONG_FLAGS_PACKET_END)\n+\n+#define TX_BD_FLAGS_NOCMPL (TX_BD_FLAGS_CMPL | TX_BD_LONG_FLAGS_NO_CMPL)\n+\n+static inline uint32_t\n+bnxt_xmit_flags_len(uint16_t len, uint16_t flags)\n+{\n+\tswitch (len >> 9) {\n+\tcase 0:\n+\t\treturn flags | TX_BD_LONG_FLAGS_LHINT_LT512;\n+\tcase 1:\n+\t\treturn flags | TX_BD_LONG_FLAGS_LHINT_LT1K;\n+\tcase 2:\n+\t\treturn flags | TX_BD_LONG_FLAGS_LHINT_LT2K;\n+\tcase 3:\n+\t\treturn flags | TX_BD_LONG_FLAGS_LHINT_LT2K;\n+\tdefault:\n+\t\treturn flags | TX_BD_LONG_FLAGS_LHINT_GTE2K;\n+\t}\n+}\n+\n+static inline int\n+bnxt_rxq_vec_setup_common(struct bnxt_rx_queue *rxq)\n+{\n+\tuintptr_t p;\n+\tstruct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */\n+\n+\tmb_def.nb_segs = 1;\n+\tmb_def.data_off = RTE_PKTMBUF_HEADROOM;\n+\tmb_def.port = rxq->port_id;\n+\trte_mbuf_refcnt_set(&mb_def, 1);\n+\n+\t/* prevent compiler reordering: rearm_data covers previous fields */\n+\trte_compiler_barrier();\n+\tp = (uintptr_t)&mb_def.rearm_data;\n+\trxq->mbuf_initializer = *(uint64_t *)p;\n+\trxq->rxrearm_nb = 0;\n+\trxq->rxrearm_start = 0;\n+\treturn 0;\n+}\n+#endif /* _BNXT_RXTX_VEC_COMMON_H_ */\ndiff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c\nnew file mode 100644\nindex 000000000..488a0b466\n--- /dev/null\n+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c\n@@ -0,0 +1,469 @@\n+/* SPDX-License-Identifier: BSD-3-Clause */\n+/* Copyright(c) 2019-2020 Broadcom All rights reserved. */\n+\n+#include <inttypes.h>\n+#include <stdbool.h>\n+\n+#include <rte_bitmap.h>\n+#include <rte_byteorder.h>\n+#include <rte_malloc.h>\n+#include <rte_memory.h>\n+#include <rte_vect.h>\n+\n+#include \"bnxt.h\"\n+#include \"bnxt_cpr.h\"\n+#include \"bnxt_ring.h\"\n+#include \"bnxt_rxr.h\"\n+#include \"bnxt_rxq.h\"\n+#include \"hsi_struct_def_dpdk.h\"\n+#include \"bnxt_rxtx_vec_common.h\"\n+\n+#include \"bnxt_txq.h\"\n+#include \"bnxt_txr.h\"\n+\n+/*\n+ * RX Ring handling\n+ */\n+\n+static inline void\n+bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)\n+{\n+\tstruct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start];\n+\tstruct bnxt_sw_rx_bd *rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start];\n+\tstruct rte_mbuf *mb0, *mb1;\n+\tint i;\n+\n+\tconst uint64x2_t hdr_room = {0, RTE_PKTMBUF_HEADROOM};\n+\tconst uint64x2_t addrmask = {0, UINT64_MAX};\n+\n+\t/* Pull RTE_BNXT_RXQ_REARM_THRESH more mbufs into the software ring */\n+\tif (rte_mempool_get_bulk(rxq->mb_pool,\n+\t\t\t\t (void *)rx_bufs,\n+\t\t\t\t RTE_BNXT_RXQ_REARM_THRESH) < 0) {\n+\t\trte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=\n+\t\t\tRTE_BNXT_RXQ_REARM_THRESH;\n+\n+\t\treturn;\n+\t}\n+\n+\t/* Initialize the mbufs in vector, process 2 mbufs in one loop */\n+\tfor (i = 0; i < RTE_BNXT_RXQ_REARM_THRESH; i += 2, rx_bufs += 2) {\n+\t\tuint64x2_t buf_addr0, buf_addr1;\n+\t\tuint64x2_t rxbd0, rxbd1;\n+\n+\t\tmb0 = rx_bufs[0].mbuf;\n+\t\tmb1 = rx_bufs[1].mbuf;\n+\n+\t\t/* Load address fields from both mbufs */\n+\t\tbuf_addr0 = vld1q_u64((uint64_t *)&mb0->buf_addr);\n+\t\tbuf_addr1 = vld1q_u64((uint64_t *)&mb1->buf_addr);\n+\n+\t\t/* Load both rx descriptors (preserving some existing fields) */\n+\t\trxbd0 = vld1q_u64((uint64_t *)(rxbds + 0));\n+\t\trxbd1 = vld1q_u64((uint64_t *)(rxbds + 1));\n+\n+\t\t/* Add default offset to buffer address. */\n+\t\tbuf_addr0 = vaddq_u64(buf_addr0, hdr_room);\n+\t\tbuf_addr1 = vaddq_u64(buf_addr1, hdr_room);\n+\n+\t\t/* Clear all fields except address. */\n+\t\tbuf_addr0 =  vandq_u64(buf_addr0, addrmask);\n+\t\tbuf_addr1 =  vandq_u64(buf_addr1, addrmask);\n+\n+\t\t/* Clear address field in descriptor. */\n+\t\trxbd0 = vbicq_u64(rxbd0, addrmask);\n+\t\trxbd1 = vbicq_u64(rxbd1, addrmask);\n+\n+\t\t/* Set address field in descriptor. */\n+\t\trxbd0 = vaddq_u64(rxbd0, buf_addr0);\n+\t\trxbd1 = vaddq_u64(rxbd1, buf_addr1);\n+\n+\t\t/* Store descriptors to memory. */\n+\t\tvst1q_u64((uint64_t *)(rxbds++), rxbd0);\n+\t\tvst1q_u64((uint64_t *)(rxbds++), rxbd1);\n+\t}\n+\n+\trxq->rxrearm_start += RTE_BNXT_RXQ_REARM_THRESH;\n+\tbnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1);\n+\tif (rxq->rxrearm_start >= rxq->nb_rx_desc)\n+\t\trxq->rxrearm_start = 0;\n+\n+\trxq->rxrearm_nb -= RTE_BNXT_RXQ_REARM_THRESH;\n+}\n+\n+static uint32_t\n+bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)\n+{\n+\tuint32_t l3, pkt_type = 0;\n+\tuint32_t t_ipcs = 0, ip6 = 0, vlan = 0;\n+\tuint32_t flags_type;\n+\n+\tvlan = !!(rxcmp1->flags2 &\n+\t\trte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN));\n+\tpkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;\n+\n+\tt_ipcs = !!(rxcmp1->flags2 &\n+\t\trte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC));\n+\tip6 = !!(rxcmp1->flags2 &\n+\t\t rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE));\n+\n+\tflags_type = rxcmp->flags_type &\n+\t\trte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);\n+\n+\tif (!t_ipcs && !ip6)\n+\t\tl3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;\n+\telse if (!t_ipcs && ip6)\n+\t\tl3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;\n+\telse if (t_ipcs && !ip6)\n+\t\tl3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;\n+\telse\n+\t\tl3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;\n+\n+\tswitch (flags_type) {\n+\tcase RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP):\n+\t\tif (!t_ipcs)\n+\t\t\tpkt_type |= l3 | RTE_PTYPE_L4_ICMP;\n+\t\telse\n+\t\t\tpkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;\n+\t\tbreak;\n+\n+\tcase RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP):\n+\t\tif (!t_ipcs)\n+\t\t\tpkt_type |= l3 | RTE_PTYPE_L4_TCP;\n+\t\telse\n+\t\t\tpkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;\n+\t\tbreak;\n+\n+\tcase RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP):\n+\t\tif (!t_ipcs)\n+\t\t\tpkt_type |= l3 | RTE_PTYPE_L4_UDP;\n+\t\telse\n+\t\t\tpkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;\n+\t\tbreak;\n+\n+\tcase RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP):\n+\t\tpkt_type |= l3;\n+\t\tbreak;\n+\t}\n+\n+\treturn pkt_type;\n+}\n+\n+static void\n+bnxt_parse_csum(struct rte_mbuf *mbuf, struct rx_pkt_cmpl_hi *rxcmp1)\n+{\n+\tuint32_t flags;\n+\n+\tflags = flags2_0xf(rxcmp1);\n+\t/* IP Checksum */\n+\tif (likely(IS_IP_NONTUNNEL_PKT(flags))) {\n+\t\tif (unlikely(RX_CMP_IP_CS_ERROR(rxcmp1)))\n+\t\t\tmbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;\n+\t\telse\n+\t\t\tmbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;\n+\t} else if (IS_IP_TUNNEL_PKT(flags)) {\n+\t\tif (unlikely(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) ||\n+\t\t\t     RX_CMP_IP_CS_ERROR(rxcmp1)))\n+\t\t\tmbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;\n+\t\telse\n+\t\t\tmbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;\n+\t} else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) {\n+\t\tmbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;\n+\t}\n+\n+\t/* L4 Checksum */\n+\tif (likely(IS_L4_NONTUNNEL_PKT(flags))) {\n+\t\tif (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))\n+\t\t\tmbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;\n+\t\telse\n+\t\t\tmbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;\n+\t} else if (IS_L4_TUNNEL_PKT(flags)) {\n+\t\tif (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))\n+\t\t\tmbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;\n+\t\telse\n+\t\t\tmbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;\n+\t\tif (unlikely(RX_CMP_L4_OUTER_CS_ERR2(rxcmp1))) {\n+\t\t\tmbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;\n+\t\t} else if (unlikely(IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS\n+\t\t\t\t    (flags))) {\n+\t\t\tmbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;\n+\t\t} else {\n+\t\t\tmbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;\n+\t\t}\n+\t} else if (unlikely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) {\n+\t\tmbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;\n+\t}\n+}\n+\n+uint16_t\n+bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t   uint16_t nb_pkts)\n+{\n+\tstruct bnxt_rx_queue *rxq = rx_queue;\n+\tstruct bnxt_cp_ring_info *cpr = rxq->cp_ring;\n+\tstruct bnxt_rx_ring_info *rxr = rxq->rx_ring;\n+\tuint32_t raw_cons = cpr->cp_raw_cons;\n+\tuint32_t cons;\n+\tint nb_rx_pkts = 0;\n+\tstruct rx_pkt_cmpl *rxcmp;\n+\tbool evt = false;\n+\tconst uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0};\n+\tconst uint8x16_t shuf_msk = {\n+\t\t0xFF, 0xFF, 0xFF, 0xFF,    /* pkt_type (zeroes) */\n+\t\t2, 3, 0xFF, 0xFF,          /* pkt_len */\n+\t\t2, 3,                      /* data_len */\n+\t\t0xFF, 0xFF,                /* vlan_tci (zeroes) */\n+\t\t12, 13, 14, 15             /* rss hash */\n+\t};\n+\n+\t/* If Rx Q was stopped return */\n+\tif (unlikely(!rxq->rx_started))\n+\t\treturn 0;\n+\n+\tif (rxq->rxrearm_nb >= RTE_BNXT_RXQ_REARM_THRESH)\n+\t\tbnxt_rxq_rearm(rxq, rxr);\n+\n+\t/* Return no more than RTE_BNXT_MAX_RX_BURST per call. */\n+\tnb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);\n+\n+\t/* Make nb_pkts an integer multiple of RTE_BNXT_DESCS_PER_LOOP */\n+\tnb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);\n+\tif (!nb_pkts)\n+\t\treturn 0;\n+\n+\t/* Handle RX burst request */\n+\twhile (1) {\n+\t\tcons = RING_CMP(cpr->cp_ring_struct, raw_cons);\n+\n+\t\trxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];\n+\n+\t\tif (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))\n+\t\t\tbreak;\n+\n+\t\tif (likely(CMP_TYPE(rxcmp) == RX_PKT_CMPL_TYPE_RX_L2)) {\n+\t\t\tstruct rx_pkt_cmpl_hi *rxcmp1;\n+\t\t\tuint32_t tmp_raw_cons;\n+\t\t\tuint16_t cp_cons;\n+\t\t\tstruct rte_mbuf *mbuf;\n+\t\t\tuint64x2_t mm_rxcmp;\n+\t\t\tuint8x16_t pkt_mb;\n+\n+\t\t\ttmp_raw_cons = NEXT_RAW_CMP(raw_cons);\n+\t\t\tcp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);\n+\t\t\trxcmp1 = (struct rx_pkt_cmpl_hi *)\n+\t\t\t\t\t\t&cpr->cp_desc_ring[cp_cons];\n+\n+\t\t\tif (!CMP_VALID(rxcmp1, tmp_raw_cons,\n+\t\t\t\t       cpr->cp_ring_struct))\n+\t\t\t\tbreak;\n+\n+\t\t\traw_cons = tmp_raw_cons;\n+\t\t\tcons = rxcmp->opaque;\n+\n+\t\t\tmbuf = rxr->rx_buf_ring[cons].mbuf;\n+\t\t\trte_prefetch0(mbuf);\n+\t\t\trxr->rx_buf_ring[cons].mbuf = NULL;\n+\n+\t\t\t/* Set constant fields from mbuf initializer. */\n+\t\t\tvst1q_u64((uint64_t *)&mbuf->rearm_data, mbuf_init);\n+\n+\t\t\t/* Set mbuf pkt_len, data_len, and rss_hash fields. */\n+\t\t\tmm_rxcmp = vld1q_u64((uint64_t *)rxcmp);\n+\t\t\tpkt_mb = vqtbl1q_u8(vreinterpretq_u8_u64(mm_rxcmp),\n+\t\t\t\t\t    shuf_msk);\n+\t\t\tvst1q_u64((uint64_t *)&mbuf->rx_descriptor_fields1,\n+\t\t\t\t  vreinterpretq_u64_u8(pkt_mb));\n+\n+\t\t\trte_compiler_barrier();\n+\n+\t\t\tif (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID)\n+\t\t\t\tmbuf->ol_flags |= PKT_RX_RSS_HASH;\n+\n+\t\t\tif (rxcmp1->flags2 &\n+\t\t\t    RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {\n+\t\t\t\tmbuf->vlan_tci = rxcmp1->metadata &\n+\t\t\t\t\t(RX_PKT_CMPL_METADATA_VID_MASK |\n+\t\t\t\t\tRX_PKT_CMPL_METADATA_DE |\n+\t\t\t\t\tRX_PKT_CMPL_METADATA_PRI_MASK);\n+\t\t\t\tmbuf->ol_flags |=\n+\t\t\t\t\tPKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;\n+\t\t\t}\n+\n+\t\t\tbnxt_parse_csum(mbuf, rxcmp1);\n+\t\t\tmbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);\n+\n+\t\t\trx_pkts[nb_rx_pkts++] = mbuf;\n+\t\t} else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {\n+\t\t\tevt =\n+\t\t\tbnxt_event_hwrm_resp_handler(rxq->bp,\n+\t\t\t\t\t\t     (struct cmpl_base *)rxcmp);\n+\t\t}\n+\n+\t\traw_cons = NEXT_RAW_CMP(raw_cons);\n+\t\tif (nb_rx_pkts == nb_pkts || evt)\n+\t\t\tbreak;\n+\t}\n+\trxr->rx_prod = RING_ADV(rxr->rx_ring_struct, rxr->rx_prod, nb_rx_pkts);\n+\n+\trxq->rxrearm_nb += nb_rx_pkts;\n+\tcpr->cp_raw_cons = raw_cons;\n+\tcpr->valid = !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);\n+\tif (nb_rx_pkts || evt)\n+\t\tbnxt_db_cq(cpr);\n+\n+\treturn nb_rx_pkts;\n+}\n+\n+static void\n+bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)\n+{\n+\tstruct bnxt_tx_ring_info *txr = txq->tx_ring;\n+\tstruct rte_mbuf **free = txq->free;\n+\tuint16_t cons = txr->tx_cons;\n+\tunsigned int blk = 0;\n+\n+\twhile (nr_pkts--) {\n+\t\tstruct bnxt_sw_tx_bd *tx_buf;\n+\t\tstruct rte_mbuf *mbuf;\n+\n+\t\ttx_buf = &txr->tx_buf_ring[cons];\n+\t\tcons = RING_NEXT(txr->tx_ring_struct, cons);\n+\t\tmbuf = rte_pktmbuf_prefree_seg(tx_buf->mbuf);\n+\t\ttx_buf->mbuf = NULL;\n+\n+\t\tif (blk && mbuf->pool != free[0]->pool) {\n+\t\t\trte_mempool_put_bulk(free[0]->pool, (void **)free, blk);\n+\t\t\tblk = 0;\n+\t\t}\n+\t\tfree[blk++] = mbuf;\n+\t}\n+\tif (blk)\n+\t\trte_mempool_put_bulk(free[0]->pool, (void **)free, blk);\n+\n+\ttxr->tx_cons = cons;\n+}\n+\n+static void\n+bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)\n+{\n+\tstruct bnxt_cp_ring_info *cpr = txq->cp_ring;\n+\tuint32_t raw_cons = cpr->cp_raw_cons;\n+\tuint32_t cons;\n+\tuint32_t nb_tx_pkts = 0;\n+\tstruct tx_cmpl *txcmp;\n+\tstruct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;\n+\tstruct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;\n+\tuint32_t ring_mask = cp_ring_struct->ring_mask;\n+\n+\tdo {\n+\t\tcons = RING_CMPL(ring_mask, raw_cons);\n+\t\ttxcmp = (struct tx_cmpl *)&cp_desc_ring[cons];\n+\n+\t\tif (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))\n+\t\t\tbreak;\n+\n+\t\tif (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))\n+\t\t\tnb_tx_pkts += txcmp->opaque;\n+\t\telse\n+\t\t\tRTE_LOG_DP(ERR, PMD,\n+\t\t\t\t   \"Unhandled CMP type %02x\\n\",\n+\t\t\t\t   CMP_TYPE(txcmp));\n+\t\traw_cons = NEXT_RAW_CMP(raw_cons);\n+\t} while (nb_tx_pkts < ring_mask);\n+\n+\tcpr->valid = !!(raw_cons & cp_ring_struct->ring_size);\n+\tif (nb_tx_pkts) {\n+\t\tbnxt_tx_cmp_vec(txq, nb_tx_pkts);\n+\t\tcpr->cp_raw_cons = raw_cons;\n+\t\tbnxt_db_cq(cpr);\n+\t}\n+}\n+\n+static uint16_t\n+bnxt_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t\t  uint16_t nb_pkts)\n+{\n+\tstruct bnxt_tx_queue *txq = tx_queue;\n+\tstruct bnxt_tx_ring_info *txr = txq->tx_ring;\n+\tuint16_t prod = txr->tx_prod;\n+\tstruct rte_mbuf *tx_mbuf;\n+\tstruct tx_bd_long *txbd = NULL;\n+\tstruct bnxt_sw_tx_bd *tx_buf;\n+\tuint16_t to_send;\n+\n+\tnb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq));\n+\n+\tif (unlikely(nb_pkts == 0))\n+\t\treturn 0;\n+\n+\t/* Handle TX burst request */\n+\tto_send = nb_pkts;\n+\twhile (to_send) {\n+\t\ttx_mbuf = *tx_pkts++;\n+\t\trte_prefetch0(tx_mbuf);\n+\n+\t\ttx_buf = &txr->tx_buf_ring[prod];\n+\t\ttx_buf->mbuf = tx_mbuf;\n+\t\ttx_buf->nr_bds = 1;\n+\n+\t\ttxbd = &txr->tx_desc_ring[prod];\n+\t\ttxbd->address = tx_mbuf->buf_iova + tx_mbuf->data_off;\n+\t\ttxbd->len = tx_mbuf->data_len;\n+\t\ttxbd->flags_type = bnxt_xmit_flags_len(tx_mbuf->data_len,\n+\t\t\t\t\t\t       TX_BD_FLAGS_NOCMPL);\n+\t\tprod = RING_NEXT(txr->tx_ring_struct, prod);\n+\t\tto_send--;\n+\t}\n+\n+\t/* Request a completion for last packet in burst */\n+\tif (txbd) {\n+\t\ttxbd->opaque = nb_pkts;\n+\t\ttxbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;\n+\t}\n+\n+\trte_compiler_barrier();\n+\tbnxt_db_write(&txr->tx_db, prod);\n+\n+\ttxr->tx_prod = prod;\n+\n+\treturn nb_pkts;\n+}\n+\n+uint16_t\n+bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t   uint16_t nb_pkts)\n+{\n+\tint nb_sent = 0;\n+\tstruct bnxt_tx_queue *txq = tx_queue;\n+\n+\t/* Tx queue was stopped; wait for it to be restarted */\n+\tif (unlikely(!txq->tx_started)) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Tx q stopped;return\\n\");\n+\t\treturn 0;\n+\t}\n+\n+\t/* Handle TX completions */\n+\tif (bnxt_tx_bds_in_hw(txq) >= txq->tx_free_thresh)\n+\t\tbnxt_handle_tx_cp_vec(txq);\n+\n+\twhile (nb_pkts) {\n+\t\tuint16_t ret, num;\n+\n+\t\tnum = RTE_MIN(nb_pkts, RTE_BNXT_MAX_TX_BURST);\n+\t\tret = bnxt_xmit_fixed_burst_vec(tx_queue,\n+\t\t\t\t\t\t&tx_pkts[nb_sent],\n+\t\t\t\t\t\tnum);\n+\t\tnb_sent += ret;\n+\t\tnb_pkts -= ret;\n+\t\tif (ret < num)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn nb_sent;\n+}\n+\n+int __rte_cold\n+bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq)\n+{\n+\treturn bnxt_rxq_vec_setup_common(rxq);\n+}\ndiff --git a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c\nindex 8f73add9b..c4ca5cf2d 100644\n--- a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c\n+++ b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c\n@@ -20,6 +20,7 @@\n #include \"bnxt_rxr.h\"\n #include \"bnxt_rxq.h\"\n #include \"hsi_struct_def_dpdk.h\"\n+#include \"bnxt_rxtx_vec_common.h\"\n \n #include \"bnxt_txq.h\"\n #include \"bnxt_txr.h\"\n@@ -28,11 +29,6 @@\n  * RX Ring handling\n  */\n \n-#define RTE_BNXT_MAX_RX_BURST\t\t32\n-#define RTE_BNXT_MAX_TX_BURST\t\t32\n-#define RTE_BNXT_RXQ_REARM_THRESH\t32\n-#define RTE_BNXT_DESCS_PER_LOOP\t\t4\n-\n static inline void\n bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)\n {\n@@ -388,30 +384,6 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)\n \t}\n }\n \n-#define TX_BD_FLAGS_CMPL ((1 << TX_BD_LONG_FLAGS_BD_CNT_SFT) | \\\n-\t\t\t  TX_BD_SHORT_FLAGS_COAL_NOW | \\\n-\t\t\t  TX_BD_SHORT_TYPE_TX_BD_SHORT | \\\n-\t\t\t  TX_BD_LONG_FLAGS_PACKET_END)\n-\n-#define TX_BD_FLAGS_NOCMPL (TX_BD_FLAGS_CMPL | TX_BD_LONG_FLAGS_NO_CMPL)\n-\n-static inline uint32_t\n-bnxt_xmit_flags_len(uint16_t len, uint16_t flags)\n-{\n-\tswitch (len >> 9) {\n-\tcase 0:\n-\t\treturn flags | TX_BD_LONG_FLAGS_LHINT_LT512;\n-\tcase 1:\n-\t\treturn flags | TX_BD_LONG_FLAGS_LHINT_LT1K;\n-\tcase 2:\n-\t\treturn flags | TX_BD_LONG_FLAGS_LHINT_LT2K;\n-\tcase 3:\n-\t\treturn flags | TX_BD_LONG_FLAGS_LHINT_LT2K;\n-\tdefault:\n-\t\treturn flags | TX_BD_LONG_FLAGS_LHINT_GTE2K;\n-\t}\n-}\n-\n static uint16_t\n bnxt_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t  uint16_t nb_pkts)\n@@ -498,19 +470,5 @@ bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n int __rte_cold\n bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq)\n {\n-\tuintptr_t p;\n-\tstruct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */\n-\n-\tmb_def.nb_segs = 1;\n-\tmb_def.data_off = RTE_PKTMBUF_HEADROOM;\n-\tmb_def.port = rxq->port_id;\n-\trte_mbuf_refcnt_set(&mb_def, 1);\n-\n-\t/* prevent compiler reordering: rearm_data covers previous fields */\n-\trte_compiler_barrier();\n-\tp = (uintptr_t)&mb_def.rearm_data;\n-\trxq->mbuf_initializer = *(uint64_t *)p;\n-\trxq->rxrearm_nb = 0;\n-\trxq->rxrearm_start = 0;\n-\treturn 0;\n+\treturn bnxt_rxq_vec_setup_common(rxq);\n }\ndiff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h\nindex e7f43f9d1..7715c11b8 100644\n--- a/drivers/net/bnxt/bnxt_txr.h\n+++ b/drivers/net/bnxt/bnxt_txr.h\n@@ -59,7 +59,7 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t       uint16_t nb_pkts);\n uint16_t bnxt_dummy_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t      uint16_t nb_pkts);\n-#ifdef RTE_ARCH_X86\n+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)\n uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t    uint16_t nb_pkts);\n #endif\ndiff --git a/drivers/net/bnxt/meson.build b/drivers/net/bnxt/meson.build\nindex 5fb0ed380..0b93c3171 100644\n--- a/drivers/net/bnxt/meson.build\n+++ b/drivers/net/bnxt/meson.build\n@@ -68,4 +68,6 @@ sources = files('bnxt_cpr.c',\n \n if arch_subdir == 'x86'\n \tsources += files('bnxt_rxtx_vec_sse.c')\n+elif arch_subdir == 'arm' and host_machine.cpu_family().startswith('aarch64')\n+\tsources += files('bnxt_rxtx_vec_neon.c')\n endif\n",
    "prefixes": [
        "v5",
        "3/3"
    ]
}