get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/75449/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 75449,
    "url": "http://patches.dpdk.org/api/patches/75449/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20200812063127.8687-5-vikas.gupta@broadcom.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200812063127.8687-5-vikas.gupta@broadcom.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200812063127.8687-5-vikas.gupta@broadcom.com",
    "date": "2020-08-12T06:31:23",
    "name": "[v1,4/8] crypto/bcmfs: add hw queue pair operations",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "28f7570509107bf1ce2faf8bf5b797889bb13ed9",
    "submitter": {
        "id": 1907,
        "url": "http://patches.dpdk.org/api/people/1907/?format=api",
        "name": "Vikas Gupta",
        "email": "vikas.gupta@broadcom.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20200812063127.8687-5-vikas.gupta@broadcom.com/mbox/",
    "series": [
        {
            "id": 11611,
            "url": "http://patches.dpdk.org/api/series/11611/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=11611",
            "date": "2020-08-12T06:31:19",
            "name": "Add Crypto PMD for Broadcom`s FlexSparc devices",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/11611/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/75449/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/75449/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id EADCFA04C2;\n\tWed, 12 Aug 2020 08:32:35 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 0B2C31C0D1;\n\tWed, 12 Aug 2020 08:31:58 +0200 (CEST)",
            "from mail-qt1-f194.google.com (mail-qt1-f194.google.com\n [209.85.160.194]) by dpdk.org (Postfix) with ESMTP id 2B9071C0D6\n for <dev@dpdk.org>; Wed, 12 Aug 2020 08:31:56 +0200 (CEST)",
            "by mail-qt1-f194.google.com with SMTP id x12so777639qtp.1\n for <dev@dpdk.org>; Tue, 11 Aug 2020 23:31:56 -0700 (PDT)",
            "from rahul_yocto_ubuntu18.ibn.broadcom.net ([192.19.234.250])\n by smtp.gmail.com with ESMTPSA id x3sm1301552qkx.3.2020.08.11.23.31.50\n (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);\n Tue, 11 Aug 2020 23:31:53 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=broadcom.com;\n s=google;\n h=from:to:cc:subject:date:message-id:in-reply-to:references;\n bh=Q7kxD0vqZXvds5QIKpoHf8inxxeFxPTLvuhsB8fWulM=;\n b=NM7hF4e80GhV1/H5uez0PFbGz2DAeNuWKerpTtgfZqjYj4b79oc3c3CcLujVoNcrh9\n U4bpdrDLCY8Lnrgss/tQSFPTJdxjrXhXJ1XisLW2nrcGzRxCVoYxyUe8BpS9b1PO9Zfq\n SWFqElRYSD4D7LYZKIyF7jAKOYnlO+c1lMwA0=",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20161025;\n h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n :references;\n bh=Q7kxD0vqZXvds5QIKpoHf8inxxeFxPTLvuhsB8fWulM=;\n b=KUOY279UqZ9xI0Z25Du7krktiB+1QjI2VS4gnjkdnRGmr7iHpiwiVLEX6MUWkjq7R6\n LdZUv8SkfSfOy41TeInBd3pgchIyJ+N31E3pYTWbMuaXcKY1KgoCiFPSdeGlEDcizX1N\n ZZgGs+p5YJ7O+RdlJsbeWo0pXQJhCQZxpa37mqmPTZVKi4lm6OogDc6/4W6In3NqrVi5\n sHE9htXuph5IotK4sTGMF41pTbgfMM5imlGQmMxD5TRuGaRxeg0UscLZ7d7MV0IqPop0\n pgpy/vRGMAOsZoptDL6D2aVaYLu663tuVfynxZRHuPrR1BlskqSs9KL6LZJIhZOqDPR8\n Waqg==",
        "X-Gm-Message-State": "AOAM533g/NoNdb2Q97at0j0XGgo+TuXQHpvr0RKVLRnuysW83pTPQ6gA\n A9eQVAik22AjYtfmOG2wnBKDoDWOGpTRe3GYE/Q6JSRnW/MEdLnAcEtQAGpKt+Q4/GL3dlcHAZC\n B5Y4S9POnqZcosIXKsBTFgzStykDmcmiNTD7+KSbwWgCoAtLMHoWtLoeApZr0",
        "X-Google-Smtp-Source": "\n ABdhPJwvRxiEtYUGJeQBAg8etmDYX8+nhLv77H5ZZX/QtJ2x43wCk2SC5UYT+CLObM6T0bcydYqRfQ==",
        "X-Received": "by 2002:ac8:774f:: with SMTP id g15mr4912344qtu.145.1597213913553;\n Tue, 11 Aug 2020 23:31:53 -0700 (PDT)",
        "From": "Vikas Gupta <vikas.gupta@broadcom.com>",
        "To": "dev@dpdk.org,\n\takhil.goyal@nxp.com",
        "Cc": "ajit.khaparde@broadcom.com, vikram.prakash@broadcom.com,\n Vikas Gupta <vikas.gupta@broadcom.com>,\n Raveendra Padasalagi <raveendra.padasalagi@broadcom.com>",
        "Date": "Wed, 12 Aug 2020 12:01:23 +0530",
        "Message-Id": "<20200812063127.8687-5-vikas.gupta@broadcom.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20200812063127.8687-1-vikas.gupta@broadcom.com>",
        "References": "<20200811145813.44754-1-vikas.gupta@broadcom.com>\n <20200812063127.8687-1-vikas.gupta@broadcom.com>",
        "Subject": "[dpdk-dev] [PATCH v1 4/8] crypto/bcmfs: add hw queue pair operations",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add queue pair operations exported by supported devices.\n\nSigned-off-by: Vikas Gupta <vikas.gupta@broadcom.com>\nSigned-off-by: Raveendra Padasalagi <raveendra.padasalagi@broadcom.com>\nReviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>\n---\n drivers/crypto/bcmfs/bcmfs_dev_msg.h      |  29 +\n drivers/crypto/bcmfs/bcmfs_device.c       |  51 ++\n drivers/crypto/bcmfs/bcmfs_device.h       |  16 +\n drivers/crypto/bcmfs/bcmfs_qp.c           |   1 +\n drivers/crypto/bcmfs/bcmfs_qp.h           |   4 +\n drivers/crypto/bcmfs/hw/bcmfs4_rm.c       | 742 ++++++++++++++++++++++\n drivers/crypto/bcmfs/hw/bcmfs5_rm.c       | 677 ++++++++++++++++++++\n drivers/crypto/bcmfs/hw/bcmfs_rm_common.c |  82 +++\n drivers/crypto/bcmfs/hw/bcmfs_rm_common.h |  46 ++\n drivers/crypto/bcmfs/meson.build          |   5 +-\n 10 files changed, 1652 insertions(+), 1 deletion(-)\n create mode 100644 drivers/crypto/bcmfs/bcmfs_dev_msg.h\n create mode 100644 drivers/crypto/bcmfs/hw/bcmfs4_rm.c\n create mode 100644 drivers/crypto/bcmfs/hw/bcmfs5_rm.c\n create mode 100644 drivers/crypto/bcmfs/hw/bcmfs_rm_common.c\n create mode 100644 drivers/crypto/bcmfs/hw/bcmfs_rm_common.h",
    "diff": "diff --git a/drivers/crypto/bcmfs/bcmfs_dev_msg.h b/drivers/crypto/bcmfs/bcmfs_dev_msg.h\nnew file mode 100644\nindex 000000000..5b50bde35\n--- /dev/null\n+++ b/drivers/crypto/bcmfs/bcmfs_dev_msg.h\n@@ -0,0 +1,29 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Broadcom\n+ * All rights reserved.\n+ */\n+\n+#ifndef _BCMFS_DEV_MSG_H_\n+#define _BCMFS_DEV_MSG_H_\n+\n+#define MAX_SRC_ADDR_BUFFERS    8\n+#define MAX_DST_ADDR_BUFFERS    3\n+\n+struct bcmfs_qp_message {\n+\t/** Physical address of each source */\n+\tuint64_t srcs_addr[MAX_SRC_ADDR_BUFFERS];\n+\t/** Length of each sources */\n+\tuint32_t srcs_len[MAX_SRC_ADDR_BUFFERS];\n+\t/** Total number of sources */\n+\tunsigned int srcs_count;\n+\t/** Physical address of each destination */\n+\tuint64_t dsts_addr[MAX_DST_ADDR_BUFFERS];\n+\t/** Length of each destination */\n+\tuint32_t dsts_len[MAX_DST_ADDR_BUFFERS];\n+\t/** Total number of destinations */\n+\tunsigned int dsts_count;\n+\n+\tvoid *ctx;\n+};\n+\n+#endif /* _BCMFS_DEV_MSG_H_ */\ndiff --git a/drivers/crypto/bcmfs/bcmfs_device.c b/drivers/crypto/bcmfs/bcmfs_device.c\nindex b475c2933..bd2d64acf 100644\n--- a/drivers/crypto/bcmfs/bcmfs_device.c\n+++ b/drivers/crypto/bcmfs/bcmfs_device.c\n@@ -43,6 +43,47 @@ static struct bcmfs_device_attr dev_table[] = {\n \t}\n };\n \n+struct bcmfs_hw_queue_pair_ops_table bcmfs_hw_queue_pair_ops_table = {\n+\t.tl =  RTE_SPINLOCK_INITIALIZER,\n+\t.num_ops = 0\n+};\n+\n+int bcmfs_hw_queue_pair_register_ops(const struct bcmfs_hw_queue_pair_ops *h)\n+{\n+\tstruct bcmfs_hw_queue_pair_ops *ops;\n+\tint16_t ops_index;\n+\n+\trte_spinlock_lock(&bcmfs_hw_queue_pair_ops_table.tl);\n+\n+\tif (h->enq_one_req == NULL || h->dequeue == NULL ||\n+\t    h->ring_db == NULL || h->startq == NULL || h->stopq == NULL) {\n+\t\trte_spinlock_unlock(&bcmfs_hw_queue_pair_ops_table.tl);\n+\t\tBCMFS_LOG(ERR,\n+\t\t\t  \"Missing callback while registering device ops\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (strlen(h->name) >= sizeof(ops->name) - 1) {\n+\t\trte_spinlock_unlock(&bcmfs_hw_queue_pair_ops_table.tl);\n+\t\tBCMFS_LOG(ERR, \"%s(): fs device_ops <%s>: name too long\",\n+\t\t\t\t__func__, h->name);\n+\t\treturn -EEXIST;\n+\t}\n+\n+\tops_index = bcmfs_hw_queue_pair_ops_table.num_ops++;\n+\tops = &bcmfs_hw_queue_pair_ops_table.qp_ops[ops_index];\n+\tstrlcpy(ops->name, h->name, sizeof(ops->name));\n+\tops->enq_one_req = h->enq_one_req;\n+\tops->dequeue = h->dequeue;\n+\tops->ring_db = h->ring_db;\n+\tops->startq = h->startq;\n+\tops->stopq = h->stopq;\n+\n+\trte_spinlock_unlock(&bcmfs_hw_queue_pair_ops_table.tl);\n+\n+\treturn ops_index;\n+}\n+\n TAILQ_HEAD(fsdev_list, bcmfs_device);\n static struct fsdev_list fsdev_list = TAILQ_HEAD_INITIALIZER(fsdev_list);\n \n@@ -53,6 +94,7 @@ fsdev_allocate_one_dev(struct rte_vdev_device *vdev,\n \t\t       enum bcmfs_device_type dev_type __rte_unused)\n {\n \tstruct bcmfs_device *fsdev;\n+\tuint32_t i;\n \n \tfsdev = calloc(1, sizeof(*fsdev));\n \tif (!fsdev)\n@@ -68,6 +110,15 @@ fsdev_allocate_one_dev(struct rte_vdev_device *vdev,\n \t\tgoto cleanup;\n \t}\n \n+\t/* check if registered ops name is present in directory path */\n+\tfor (i = 0; i < bcmfs_hw_queue_pair_ops_table.num_ops; i++)\n+\t\tif (strstr(dirpath,\n+\t\t\t   bcmfs_hw_queue_pair_ops_table.qp_ops[i].name))\n+\t\t\tfsdev->sym_hw_qp_ops =\n+\t\t\t\t&bcmfs_hw_queue_pair_ops_table.qp_ops[i];\n+\tif (!fsdev->sym_hw_qp_ops)\n+\t\tgoto cleanup;\n+\n \tstrcpy(fsdev->dirname, dirpath);\n \tstrcpy(fsdev->name, devname);\n \ndiff --git a/drivers/crypto/bcmfs/bcmfs_device.h b/drivers/crypto/bcmfs/bcmfs_device.h\nindex e03ce5b5b..96beb10fa 100644\n--- a/drivers/crypto/bcmfs/bcmfs_device.h\n+++ b/drivers/crypto/bcmfs/bcmfs_device.h\n@@ -8,6 +8,7 @@\n \n #include <sys/queue.h>\n \n+#include <rte_spinlock.h>\n #include <rte_bus_vdev.h>\n \n #include \"bcmfs_logs.h\"\n@@ -28,6 +29,19 @@ enum bcmfs_device_type {\n \tBCMFS_UNKNOWN\n };\n \n+/* A table to store registered queue pair opertations */\n+struct bcmfs_hw_queue_pair_ops_table {\n+\trte_spinlock_t tl;\n+\t/* Number of used ops structs in the table. */\n+\tuint32_t num_ops;\n+\t /*  Storage for all possible ops structs. */\n+\tstruct bcmfs_hw_queue_pair_ops qp_ops[BCMFS_MAX_NODES];\n+};\n+\n+/* HW queue pair ops register function */\n+int bcmfs_hw_queue_pair_register_ops(const struct bcmfs_hw_queue_pair_ops\n+\t\t\t\t     *qp_ops);\n+\n struct bcmfs_device {\n \tTAILQ_ENTRY(bcmfs_device) next;\n \t/* Directoy path for vfio */\n@@ -46,6 +60,8 @@ struct bcmfs_device {\n \tuint16_t max_hw_qps;\n \t/* current qpairs in use */\n \tstruct bcmfs_qp *qps_in_use[BCMFS_MAX_HW_QUEUES];\n+\t/* queue pair ops exported by symmetric crypto hw */\n+\tstruct bcmfs_hw_queue_pair_ops *sym_hw_qp_ops;\n };\n \n #endif /* _BCMFS_DEV_H_ */\ndiff --git a/drivers/crypto/bcmfs/bcmfs_qp.c b/drivers/crypto/bcmfs/bcmfs_qp.c\nindex 864e7bb74..ec1327b78 100644\n--- a/drivers/crypto/bcmfs/bcmfs_qp.c\n+++ b/drivers/crypto/bcmfs/bcmfs_qp.c\n@@ -227,6 +227,7 @@ bcmfs_qp_setup(struct bcmfs_qp **qp_addr,\n \tqp->qpair_id = queue_pair_id;\n \tqp->ioreg = qp_conf->iobase;\n \tqp->nb_descriptors = nb_descriptors;\n+\tqp->ops = qp_conf->ops;\n \n \tqp->stats.enqueued_count = 0;\n \tqp->stats.dequeued_count = 0;\ndiff --git a/drivers/crypto/bcmfs/bcmfs_qp.h b/drivers/crypto/bcmfs/bcmfs_qp.h\nindex 027d7a50c..e4b0c3f2f 100644\n--- a/drivers/crypto/bcmfs/bcmfs_qp.h\n+++ b/drivers/crypto/bcmfs/bcmfs_qp.h\n@@ -44,6 +44,8 @@ struct bcmfs_qp_config {\n \tuint16_t nb_descriptors;\n \t/* Maximum number of h/w descriptors needed by a request */\n \tuint16_t max_descs_req;\n+\t/* h/w ops associated with qp */\n+\tstruct bcmfs_hw_queue_pair_ops *ops;\n };\n \n struct bcmfs_queue {\n@@ -61,6 +63,8 @@ struct bcmfs_queue {\n \t\t/* s/w pointer for completion h/w queue*/\n \t\tuint32_t cmpl_read_ptr;\n \t};\n+\t/* number of inflight descriptor accumulated  before next db ring */\n+\tuint16_t descs_inflight;\n \t/* Memzone name */\n \tchar memz_name[RTE_MEMZONE_NAMESIZE];\n };\ndiff --git a/drivers/crypto/bcmfs/hw/bcmfs4_rm.c b/drivers/crypto/bcmfs/hw/bcmfs4_rm.c\nnew file mode 100644\nindex 000000000..c1cd1b813\n--- /dev/null\n+++ b/drivers/crypto/bcmfs/hw/bcmfs4_rm.c\n@@ -0,0 +1,742 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Broadcom\n+ * All rights reserved.\n+ */\n+\n+#include <unistd.h>\n+\n+#include <rte_bitmap.h>\n+\n+#include \"bcmfs_device.h\"\n+#include \"bcmfs_dev_msg.h\"\n+#include \"bcmfs_hw_defs.h\"\n+#include \"bcmfs_logs.h\"\n+#include \"bcmfs_qp.h\"\n+#include \"bcmfs_rm_common.h\"\n+\n+/* FS4 configuration */\n+#define RING_BD_TOGGLE_INVALID(offset)\t\t\t\\\n+\t\t\t(((offset) >> FS_RING_BD_ALIGN_ORDER) & 0x1)\n+#define RING_BD_TOGGLE_VALID(offset)\t\t\t\\\n+\t\t\t(!RING_BD_TOGGLE_INVALID(offset))\n+\n+#define RING_VER_MAGIC\t\t\t\t\t0x76303031\n+\n+/* Per-Ring register offsets */\n+#define RING_VER\t\t\t\t\t0x000\n+#define RING_BD_START_ADDR\t\t\t\t0x004\n+#define RING_BD_READ_PTR\t\t\t\t0x008\n+#define RING_BD_WRITE_PTR\t\t\t\t0x00c\n+#define RING_BD_READ_PTR_DDR_LS\t\t\t\t0x010\n+#define RING_BD_READ_PTR_DDR_MS\t\t\t\t0x014\n+#define RING_CMPL_START_ADDR\t\t\t\t0x018\n+#define RING_CMPL_WRITE_PTR\t\t\t\t0x01c\n+#define RING_NUM_REQ_RECV_LS\t\t\t\t0x020\n+#define RING_NUM_REQ_RECV_MS\t\t\t\t0x024\n+#define RING_NUM_REQ_TRANS_LS\t\t\t\t0x028\n+#define RING_NUM_REQ_TRANS_MS\t\t\t\t0x02c\n+#define RING_NUM_REQ_OUTSTAND\t\t\t\t0x030\n+#define RING_CONTROL\t\t\t\t\t0x034\n+#define RING_FLUSH_DONE\t\t\t\t\t0x038\n+#define RING_MSI_ADDR_LS\t\t\t\t0x03c\n+#define RING_MSI_ADDR_MS\t\t\t\t0x040\n+#define RING_MSI_CONTROL\t\t\t\t0x048\n+#define RING_BD_READ_PTR_DDR_CONTROL\t\t\t0x04c\n+#define RING_MSI_DATA_VALUE\t\t\t\t0x064\n+\n+/* Register RING_BD_START_ADDR fields */\n+#define BD_LAST_UPDATE_HW_SHIFT\t\t\t\t28\n+#define BD_LAST_UPDATE_HW_MASK\t\t\t\t0x1\n+#define BD_START_ADDR_VALUE(pa)\t\t\t\t\\\n+\t((uint32_t)((((uint64_t)(pa)) >> FS_RING_BD_ALIGN_ORDER) & 0x0fffffff))\n+#define BD_START_ADDR_DECODE(val)\t\t\t\\\n+\t((uint64_t)((val) & 0x0fffffff) << FS_RING_BD_ALIGN_ORDER)\n+\n+/* Register RING_CMPL_START_ADDR fields */\n+#define CMPL_START_ADDR_VALUE(pa)\t\t\t\\\n+\t((uint32_t)((((uint64_t)(pa)) >> FS_RING_CMPL_ALIGN_ORDER) & 0x7ffffff))\n+\n+/* Register RING_CONTROL fields */\n+#define CONTROL_MASK_DISABLE_CONTROL\t\t\t12\n+#define CONTROL_FLUSH_SHIFT\t\t\t\t5\n+#define CONTROL_ACTIVE_SHIFT\t\t\t\t4\n+#define CONTROL_RATE_ADAPT_MASK\t\t\t\t0xf\n+#define CONTROL_RATE_DYNAMIC\t\t\t\t0x0\n+#define CONTROL_RATE_FAST\t\t\t\t0x8\n+#define CONTROL_RATE_MEDIUM\t\t\t\t0x9\n+#define CONTROL_RATE_SLOW\t\t\t\t0xa\n+#define CONTROL_RATE_IDLE\t\t\t\t0xb\n+\n+/* Register RING_FLUSH_DONE fields */\n+#define FLUSH_DONE_MASK\t\t\t\t\t0x1\n+\n+/* Register RING_MSI_CONTROL fields */\n+#define MSI_TIMER_VAL_SHIFT\t\t\t\t16\n+#define MSI_TIMER_VAL_MASK\t\t\t\t0xffff\n+#define MSI_ENABLE_SHIFT\t\t\t\t15\n+#define MSI_ENABLE_MASK\t\t\t\t\t0x1\n+#define MSI_COUNT_SHIFT\t\t\t\t\t0\n+#define MSI_COUNT_MASK\t\t\t\t\t0x3ff\n+\n+/* Register RING_BD_READ_PTR_DDR_CONTROL fields */\n+#define BD_READ_PTR_DDR_TIMER_VAL_SHIFT\t\t\t16\n+#define BD_READ_PTR_DDR_TIMER_VAL_MASK\t\t\t0xffff\n+#define BD_READ_PTR_DDR_ENABLE_SHIFT\t\t\t15\n+#define BD_READ_PTR_DDR_ENABLE_MASK\t\t\t0x1\n+\n+/* ====== Broadcom FS4-RM ring descriptor defines ===== */\n+\n+\n+/* General descriptor format */\n+#define DESC_TYPE_SHIFT\t\t\t\t60\n+#define DESC_TYPE_MASK\t\t\t\t0xf\n+#define DESC_PAYLOAD_SHIFT\t\t\t0\n+#define DESC_PAYLOAD_MASK\t\t\t0x0fffffffffffffff\n+\n+/* Null descriptor format  */\n+#define NULL_TYPE\t\t\t\t0\n+#define NULL_TOGGLE_SHIFT\t\t\t58\n+#define NULL_TOGGLE_MASK\t\t\t0x1\n+\n+/* Header descriptor format */\n+#define HEADER_TYPE\t\t\t\t1\n+#define HEADER_TOGGLE_SHIFT\t\t\t58\n+#define HEADER_TOGGLE_MASK\t\t\t0x1\n+#define HEADER_ENDPKT_SHIFT\t\t\t57\n+#define HEADER_ENDPKT_MASK\t\t\t0x1\n+#define HEADER_STARTPKT_SHIFT\t\t\t56\n+#define HEADER_STARTPKT_MASK\t\t\t0x1\n+#define HEADER_BDCOUNT_SHIFT\t\t\t36\n+#define HEADER_BDCOUNT_MASK\t\t\t0x1f\n+#define HEADER_BDCOUNT_MAX\t\t\tHEADER_BDCOUNT_MASK\n+#define HEADER_FLAGS_SHIFT\t\t\t16\n+#define HEADER_FLAGS_MASK\t\t\t0xffff\n+#define HEADER_OPAQUE_SHIFT\t\t\t0\n+#define HEADER_OPAQUE_MASK\t\t\t0xffff\n+\n+/* Source (SRC) descriptor format */\n+#define SRC_TYPE\t\t\t\t2\n+#define SRC_LENGTH_SHIFT\t\t\t44\n+#define SRC_LENGTH_MASK\t\t\t\t0xffff\n+#define SRC_ADDR_SHIFT\t\t\t\t0\n+#define SRC_ADDR_MASK\t\t\t\t0x00000fffffffffff\n+\n+/* Destination (DST) descriptor format */\n+#define DST_TYPE\t\t\t\t3\n+#define DST_LENGTH_SHIFT\t\t\t44\n+#define DST_LENGTH_MASK\t\t\t\t0xffff\n+#define DST_ADDR_SHIFT\t\t\t\t0\n+#define DST_ADDR_MASK\t\t\t\t0x00000fffffffffff\n+\n+/* Next pointer (NPTR) descriptor format */\n+#define NPTR_TYPE\t\t\t\t5\n+#define NPTR_TOGGLE_SHIFT\t\t\t58\n+#define NPTR_TOGGLE_MASK\t\t\t0x1\n+#define NPTR_ADDR_SHIFT\t\t\t\t0\n+#define NPTR_ADDR_MASK\t\t\t\t0x00000fffffffffff\n+\n+/* Mega source (MSRC) descriptor format */\n+#define MSRC_TYPE\t\t\t\t6\n+#define MSRC_LENGTH_SHIFT\t\t\t44\n+#define MSRC_LENGTH_MASK\t\t\t0xffff\n+#define MSRC_ADDR_SHIFT\t\t\t\t0\n+#define MSRC_ADDR_MASK\t\t\t\t0x00000fffffffffff\n+\n+/* Mega destination (MDST) descriptor format */\n+#define MDST_TYPE\t\t\t\t7\n+#define MDST_LENGTH_SHIFT\t\t\t44\n+#define MDST_LENGTH_MASK\t\t\t0xffff\n+#define MDST_ADDR_SHIFT\t\t\t\t0\n+#define MDST_ADDR_MASK\t\t\t\t0x00000fffffffffff\n+\n+static uint8_t\n+bcmfs4_is_next_table_desc(void *desc_ptr)\n+{\n+\tuint64_t desc = rm_read_desc(desc_ptr);\n+\tuint32_t type = FS_DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);\n+\n+\treturn (type == NPTR_TYPE) ? true : false;\n+}\n+\n+static uint64_t\n+bcmfs4_next_table_desc(uint32_t toggle, uint64_t next_addr)\n+{\n+\treturn (rm_build_desc(NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |\n+\t\trm_build_desc(toggle, NPTR_TOGGLE_SHIFT, NPTR_TOGGLE_MASK) |\n+\t\trm_build_desc(next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK));\n+}\n+\n+static uint64_t\n+bcmfs4_null_desc(uint32_t toggle)\n+{\n+\treturn (rm_build_desc(NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |\n+\t\trm_build_desc(toggle, NULL_TOGGLE_SHIFT, NULL_TOGGLE_MASK));\n+}\n+\n+static void\n+bcmfs4_flip_header_toggle(void *desc_ptr)\n+{\n+\tuint64_t desc = rm_read_desc(desc_ptr);\n+\n+\tif (desc & ((uint64_t)0x1 << HEADER_TOGGLE_SHIFT))\n+\t\tdesc &= ~((uint64_t)0x1 << HEADER_TOGGLE_SHIFT);\n+\telse\n+\t\tdesc |= ((uint64_t)0x1 << HEADER_TOGGLE_SHIFT);\n+\n+\trm_write_desc(desc_ptr, desc);\n+}\n+\n+static uint64_t\n+bcmfs4_header_desc(uint32_t toggle, uint32_t startpkt,\n+\t\t   uint32_t endpkt, uint32_t bdcount,\n+\t\t   uint32_t flags, uint32_t opaque)\n+{\n+\treturn (rm_build_desc(HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |\n+\t\trm_build_desc(toggle, HEADER_TOGGLE_SHIFT, HEADER_TOGGLE_MASK) |\n+\t\trm_build_desc(startpkt, HEADER_STARTPKT_SHIFT,\n+\t\t\t      HEADER_STARTPKT_MASK) |\n+\t\trm_build_desc(endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK) |\n+\t\trm_build_desc(bdcount, HEADER_BDCOUNT_SHIFT,\n+\t\t\t      HEADER_BDCOUNT_MASK) |\n+\t\trm_build_desc(flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK) |\n+\t\trm_build_desc(opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK));\n+}\n+\n+static void\n+bcmfs4_enqueue_desc(uint32_t nhpos, uint32_t nhcnt,\n+\t\t    uint32_t reqid, uint64_t desc,\n+\t\t    void **desc_ptr, uint32_t *toggle,\n+\t\t    void *start_desc, void *end_desc)\n+{\n+\tuint64_t d;\n+\tuint32_t nhavail, _toggle, _startpkt, _endpkt, _bdcount;\n+\n+\t/*\n+\t * Each request or packet start with a HEADER descriptor followed\n+\t * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,\n+\t * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors\n+\t * following a HEADER descriptor is represented by BDCOUNT field\n+\t * of HEADER descriptor. The max value of BDCOUNT field is 31 which\n+\t * means we can only have 31 non-HEADER descriptors following one\n+\t * HEADER descriptor.\n+\t *\n+\t * In general use, number of non-HEADER descriptors can easily go\n+\t * beyond 31. To tackle this situation, we have packet (or request)\n+\t * extension bits (STARTPKT and ENDPKT) in the HEADER descriptor.\n+\t *\n+\t * To use packet extension, the first HEADER descriptor of request\n+\t * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate\n+\t * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last\n+\t * HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the\n+\t * TOGGLE bit of the first HEADER will be set to invalid state to\n+\t * ensure that FlexDMA engine does not start fetching descriptors\n+\t * till all descriptors are enqueued. The user of this function\n+\t * will flip the TOGGLE bit of first HEADER after all descriptors\n+\t * are enqueued.\n+\t */\n+\n+\tif ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {\n+\t\t/* Prepare the header descriptor */\n+\t\tnhavail = (nhcnt - nhpos);\n+\t\t_toggle = (nhpos == 0) ? !(*toggle) : (*toggle);\n+\t\t_startpkt = (nhpos == 0) ? 0x1 : 0x0;\n+\t\t_endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;\n+\t\t_bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?\n+\t\t\t\tnhavail : HEADER_BDCOUNT_MAX;\n+\t\tif (nhavail <= HEADER_BDCOUNT_MAX)\n+\t\t\t_bdcount = nhavail;\n+\t\telse\n+\t\t\t_bdcount = HEADER_BDCOUNT_MAX;\n+\t\td = bcmfs4_header_desc(_toggle, _startpkt, _endpkt,\n+\t\t\t\t\t_bdcount, 0x0, reqid);\n+\n+\t\t/* Write header descriptor */\n+\t\trm_write_desc(*desc_ptr, d);\n+\n+\t\t/* Point to next descriptor */\n+\t\t*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);\n+\t\tif (*desc_ptr == end_desc)\n+\t\t\t*desc_ptr = start_desc;\n+\n+\t\t/* Skip next pointer descriptors */\n+\t\twhile (bcmfs4_is_next_table_desc(*desc_ptr)) {\n+\t\t\t*toggle = (*toggle) ? 0 : 1;\n+\t\t\t*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);\n+\t\t\tif (*desc_ptr == end_desc)\n+\t\t\t\t*desc_ptr = start_desc;\n+\t\t}\n+\t}\n+\n+\t/* Write desired descriptor */\n+\trm_write_desc(*desc_ptr, desc);\n+\n+\t/* Point to next descriptor */\n+\t*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);\n+\tif (*desc_ptr == end_desc)\n+\t\t*desc_ptr = start_desc;\n+\n+\t/* Skip next pointer descriptors */\n+\twhile (bcmfs4_is_next_table_desc(*desc_ptr)) {\n+\t\t*toggle = (*toggle) ? 0 : 1;\n+\t\t*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);\n+\t\tif (*desc_ptr == end_desc)\n+\t\t\t*desc_ptr = start_desc;\n+\t}\n+}\n+\n+static uint64_t\n+bcmfs4_src_desc(uint64_t addr, unsigned int length)\n+{\n+\treturn (rm_build_desc(SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |\n+\t\trm_build_desc(length, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK) |\n+\t\trm_build_desc(addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK));\n+}\n+\n+static uint64_t\n+bcmfs4_msrc_desc(uint64_t addr, unsigned int length_div_16)\n+{\n+\treturn (rm_build_desc(MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |\n+\t\trm_build_desc(length_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK) |\n+\t\trm_build_desc(addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK));\n+}\n+\n+static uint64_t\n+bcmfs4_dst_desc(uint64_t addr, unsigned int length)\n+{\n+\treturn (rm_build_desc(DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |\n+\t\trm_build_desc(length, DST_LENGTH_SHIFT, DST_LENGTH_MASK) |\n+\t\trm_build_desc(addr, DST_ADDR_SHIFT, DST_ADDR_MASK));\n+}\n+\n+static uint64_t\n+bcmfs4_mdst_desc(uint64_t addr, unsigned int length_div_16)\n+{\n+\treturn (rm_build_desc(MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |\n+\t\trm_build_desc(length_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK) |\n+\t\trm_build_desc(addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK));\n+}\n+\n+static bool\n+bcmfs4_sanity_check(struct bcmfs_qp_message *msg)\n+{\n+\tunsigned int i = 0;\n+\n+\tif (msg == NULL)\n+\t\treturn false;\n+\n+\tfor (i = 0; i <  msg->srcs_count; i++) {\n+\t\tif (msg->srcs_len[i] & 0xf) {\n+\t\t\tif (msg->srcs_len[i] > SRC_LENGTH_MASK)\n+\t\t\t\treturn false;\n+\t\t} else {\n+\t\t\tif (msg->srcs_len[i] > (MSRC_LENGTH_MASK * 16))\n+\t\t\t\treturn false;\n+\t\t}\n+\t}\n+\tfor (i = 0; i <  msg->dsts_count; i++) {\n+\t\tif (msg->dsts_len[i] & 0xf) {\n+\t\t\tif (msg->dsts_len[i] > DST_LENGTH_MASK)\n+\t\t\t\treturn false;\n+\t\t} else {\n+\t\t\tif (msg->dsts_len[i] > (MDST_LENGTH_MASK * 16))\n+\t\t\t\treturn false;\n+\t\t}\n+\t}\n+\n+\treturn true;\n+}\n+\n+static uint32_t\n+estimate_nonheader_desc_count(struct bcmfs_qp_message *msg)\n+{\n+\tuint32_t cnt = 0;\n+\tunsigned int src = 0;\n+\tunsigned int dst = 0;\n+\tunsigned int dst_target = 0;\n+\n+\twhile (src < msg->srcs_count ||\n+\t       dst < msg->dsts_count) {\n+\t\tif (src < msg->srcs_count) {\n+\t\t\tcnt++;\n+\t\t\tdst_target = msg->srcs_len[src];\n+\t\t\tsrc++;\n+\t\t} else {\n+\t\t\tdst_target = UINT_MAX;\n+\t\t}\n+\t\twhile (dst_target && dst < msg->dsts_count) {\n+\t\t\tcnt++;\n+\t\t\tif (msg->dsts_len[dst] < dst_target)\n+\t\t\t\tdst_target -= msg->dsts_len[dst];\n+\t\t\telse\n+\t\t\t\tdst_target = 0;\n+\t\t\tdst++;\n+\t\t}\n+\t}\n+\n+\treturn cnt;\n+}\n+\n+static void *\n+bcmfs4_enqueue_msg(struct bcmfs_qp_message *msg,\n+\t\t   uint32_t nhcnt, uint32_t reqid,\n+\t\t   void *desc_ptr, uint32_t toggle,\n+\t\t   void *start_desc, void *end_desc)\n+{\n+\tuint64_t d;\n+\tuint32_t nhpos = 0;\n+\tunsigned int src = 0;\n+\tunsigned int dst = 0;\n+\tunsigned int dst_target = 0;\n+\tvoid *orig_desc_ptr = desc_ptr;\n+\n+\tif (!desc_ptr || !start_desc || !end_desc)\n+\t\treturn NULL;\n+\n+\tif (desc_ptr < start_desc || end_desc <= desc_ptr)\n+\t\treturn NULL;\n+\n+\twhile (src < msg->srcs_count ||\tdst < msg->dsts_count) {\n+\t\tif (src < msg->srcs_count) {\n+\t\t\tif (msg->srcs_len[src] & 0xf) {\n+\t\t\t\td = bcmfs4_src_desc(msg->srcs_addr[src],\n+\t\t\t\t\t\t    msg->srcs_len[src]);\n+\t\t\t} else {\n+\t\t\t\td = bcmfs4_msrc_desc(msg->srcs_addr[src],\n+\t\t\t\t\t\t     msg->srcs_len[src] / 16);\n+\t\t\t}\n+\t\t\tbcmfs4_enqueue_desc(nhpos, nhcnt, reqid,\n+\t\t\t\t\t    d, &desc_ptr, &toggle,\n+\t\t\t\t\t    start_desc, end_desc);\n+\t\t\tnhpos++;\n+\t\t\tdst_target = msg->srcs_len[src];\n+\t\t\tsrc++;\n+\t\t} else {\n+\t\t\tdst_target = UINT_MAX;\n+\t\t}\n+\n+\t\twhile (dst_target && (dst < msg->dsts_count)) {\n+\t\t\tif (msg->dsts_len[dst] & 0xf) {\n+\t\t\t\td = bcmfs4_dst_desc(msg->dsts_addr[dst],\n+\t\t\t\t\t\t    msg->dsts_len[dst]);\n+\t\t\t} else {\n+\t\t\t\td = bcmfs4_mdst_desc(msg->dsts_addr[dst],\n+\t\t\t\t\t\t     msg->dsts_len[dst] / 16);\n+\t\t\t}\n+\t\t\tbcmfs4_enqueue_desc(nhpos, nhcnt, reqid,\n+\t\t\t\t\t    d, &desc_ptr, &toggle,\n+\t\t\t\t\t    start_desc, end_desc);\n+\t\t\tnhpos++;\n+\t\t\tif (msg->dsts_len[dst] < dst_target)\n+\t\t\t\tdst_target -= msg->dsts_len[dst];\n+\t\t\telse\n+\t\t\t\tdst_target = 0;\n+\t\t\tdst++; /* for next buffer */\n+\t\t}\n+\t}\n+\n+\t/* Null descriptor with invalid toggle bit */\n+\trm_write_desc(desc_ptr, bcmfs4_null_desc(!toggle));\n+\n+\t/* Ensure that descriptors have been written to memory */\n+\trte_smp_wmb();\n+\n+\tbcmfs4_flip_header_toggle(orig_desc_ptr);\n+\n+\treturn desc_ptr;\n+}\n+\n+static int\n+bcmfs4_enqueue_single_request_qp(struct bcmfs_qp *qp, void *op)\n+{\n+\tint reqid;\n+\tvoid *next;\n+\tuint32_t nhcnt;\n+\tint ret = 0;\n+\tuint32_t pos = 0;\n+\tuint64_t slab = 0;\n+\tuint8_t exit_cleanup = false;\n+\tstruct bcmfs_queue *txq = &qp->tx_q;\n+\tstruct bcmfs_qp_message *msg = (struct bcmfs_qp_message *)op;\n+\n+\t/* Do sanity check on message */\n+\tif (!bcmfs4_sanity_check(msg)) {\n+\t\tBCMFS_DP_LOG(ERR, \"Invalid msg on queue %d\", qp->qpair_id);\n+\t\treturn -EIO;\n+\t}\n+\n+\t/* Scan from the beginning */\n+\t__rte_bitmap_scan_init(qp->ctx_bmp);\n+\t/* Scan bitmap to get the free pool */\n+\tret = rte_bitmap_scan(qp->ctx_bmp, &pos, &slab);\n+\tif (ret == 0) {\n+\t\tBCMFS_DP_LOG(ERR, \"BD memory exhausted\");\n+\t\treturn -ERANGE;\n+\t}\n+\n+\treqid = pos + __builtin_ctzll(slab);\n+\trte_bitmap_clear(qp->ctx_bmp, reqid);\n+\tqp->ctx_pool[reqid] = (unsigned long)msg;\n+\n+\t/*\n+\t * Number required descriptors = number of non-header descriptors +\n+\t *\t\t\t\t number of header descriptors +\n+\t *\t\t\t\t 1x null descriptor\n+\t */\n+\tnhcnt = estimate_nonheader_desc_count(msg);\n+\n+\t/* Write descriptors to ring */\n+\tnext = bcmfs4_enqueue_msg(msg, nhcnt, reqid,\n+\t\t\t\t  (uint8_t *)txq->base_addr + txq->tx_write_ptr,\n+\t\t\t\t  RING_BD_TOGGLE_VALID(txq->tx_write_ptr),\n+\t\t\t\t  txq->base_addr,\n+\t\t\t\t  (uint8_t *)txq->base_addr + txq->queue_size);\n+\tif (next == NULL) {\n+\t\tBCMFS_DP_LOG(ERR, \"Enqueue for desc failed on queue %d\",\n+\t\t\t     qp->qpair_id);\n+\t\tret = -EINVAL;\n+\t\texit_cleanup = true;\n+\t\tgoto exit;\n+\t}\n+\n+\t/* Save ring BD write offset */\n+\ttxq->tx_write_ptr = (uint32_t)((uint8_t *)next -\n+\t\t\t\t       (uint8_t *)txq->base_addr);\n+\n+\tqp->nb_pending_requests++;\n+\n+\treturn 0;\n+\n+exit:\n+\t/* Cleanup if we failed */\n+\tif (exit_cleanup)\n+\t\trte_bitmap_set(qp->ctx_bmp, reqid);\n+\n+\treturn ret;\n+}\n+\n+static void\n+bcmfs4_ring_doorbell_qp(struct bcmfs_qp *qp __rte_unused)\n+{\n+\t/* no door bell method supported */\n+}\n+\n+static uint16_t\n+bcmfs4_dequeue_qp(struct bcmfs_qp *qp, void **ops, uint16_t budget)\n+{\n+\tint err;\n+\tuint16_t reqid;\n+\tuint64_t desc;\n+\tuint16_t count = 0;\n+\tunsigned long context = 0;\n+\tstruct bcmfs_queue *hwq = &qp->cmpl_q;\n+\tuint32_t cmpl_read_offset, cmpl_write_offset;\n+\n+\t/*\n+\t * Check whether budget is valid, else set the budget to maximum\n+\t * so that all the available completions will be processed.\n+\t */\n+\tif (budget > qp->nb_pending_requests)\n+\t\tbudget =  qp->nb_pending_requests;\n+\n+\t/*\n+\t * Get current completion read and write offset\n+\t * Note: We should read completion write pointer atleast once\n+\t * after we get a MSI interrupt because HW maintains internal\n+\t * MSI status which will allow next MSI interrupt only after\n+\t * completion write pointer is read.\n+\t */\n+\tcmpl_write_offset = FS_MMIO_READ32((uint8_t *)qp->ioreg +\n+\t\t\t\t\t   RING_CMPL_WRITE_PTR);\n+\tcmpl_write_offset *= FS_RING_DESC_SIZE;\n+\tcmpl_read_offset = hwq->cmpl_read_ptr;\n+\n+\trte_smp_rmb();\n+\n+\t/* For each completed request notify mailbox clients */\n+\treqid = 0;\n+\twhile ((cmpl_read_offset != cmpl_write_offset) && (budget > 0)) {\n+\t\t/* Dequeue next completion descriptor */\n+\t\tdesc = *((uint64_t *)((uint8_t *)hwq->base_addr +\n+\t\t\t\t       cmpl_read_offset));\n+\n+\t\t/* Next read offset */\n+\t\tcmpl_read_offset += FS_RING_DESC_SIZE;\n+\t\tif (cmpl_read_offset == FS_RING_CMPL_SIZE)\n+\t\t\tcmpl_read_offset = 0;\n+\n+\t\t/* Decode error from completion descriptor */\n+\t\terr = rm_cmpl_desc_to_error(desc);\n+\t\tif (err < 0)\n+\t\t\tBCMFS_DP_LOG(ERR, \"error desc rcvd\");\n+\n+\t\t/* Determine request id from completion descriptor */\n+\t\treqid = rm_cmpl_desc_to_reqid(desc);\n+\n+\t\t/* Determine message pointer based on reqid */\n+\t\tcontext = qp->ctx_pool[reqid];\n+\t\tif (context == 0)\n+\t\t\tBCMFS_DP_LOG(ERR, \"HW error detected\");\n+\n+\t\t/* Release reqid for recycling */\n+\t\tqp->ctx_pool[reqid] = 0;\n+\t\trte_bitmap_set(qp->ctx_bmp, reqid);\n+\n+\t\t*ops = (void *)context;\n+\n+\t\t/* Increment number of completions processed */\n+\t\tcount++;\n+\t\tbudget--;\n+\t\tops++;\n+\t}\n+\n+\thwq->cmpl_read_ptr = cmpl_read_offset;\n+\n+\tqp->nb_pending_requests -= count;\n+\n+\treturn count;\n+}\n+\n+static int\n+bcmfs4_start_qp(struct bcmfs_qp *qp)\n+{\n+\tint timeout;\n+\tuint32_t val, off;\n+\tuint64_t d, next_addr, msi;\n+\tstruct bcmfs_queue *tx_queue = &qp->tx_q;\n+\tstruct bcmfs_queue *cmpl_queue = &qp->cmpl_q;\n+\n+\t/* Disable/inactivate ring */\n+\tFS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);\n+\n+\t/* Configure next table pointer entries in BD memory */\n+\tfor (off = 0; off < tx_queue->queue_size; off += FS_RING_DESC_SIZE) {\n+\t\tnext_addr = off + FS_RING_DESC_SIZE;\n+\t\tif (next_addr == tx_queue->queue_size)\n+\t\t\tnext_addr = 0;\n+\t\tnext_addr += (uint64_t)tx_queue->base_phys_addr;\n+\t\tif (FS_RING_BD_ALIGN_CHECK(next_addr))\n+\t\t\td = bcmfs4_next_table_desc(RING_BD_TOGGLE_VALID(off),\n+\t\t\t\t\t\t    next_addr);\n+\t\telse\n+\t\t\td = bcmfs4_null_desc(RING_BD_TOGGLE_INVALID(off));\n+\t\trm_write_desc((uint8_t *)tx_queue->base_addr + off, d);\n+\t}\n+\n+\t/*\n+\t * If user interrupt the test in between the run(Ctrl+C), then all\n+\t * subsequent test run will fail because sw cmpl_read_offset and hw\n+\t * cmpl_write_offset will be pointing at different completion BD. To\n+\t * handle this we should flush all the rings in the startup instead\n+\t * of shutdown function.\n+\t * Ring flush will reset hw cmpl_write_offset.\n+\t */\n+\n+\t/* Set ring flush state */\n+\ttimeout = 1000;\n+\tFS_MMIO_WRITE32(BIT(CONTROL_FLUSH_SHIFT),\n+\t\t\t(uint8_t *)qp->ioreg + RING_CONTROL);\n+\tdo {\n+\t\t/*\n+\t\t * If previous test is stopped in between the run, then\n+\t\t * sw has to read cmpl_write_offset else DME/AE will be not\n+\t\t * come out of flush state.\n+\t\t */\n+\t\tFS_MMIO_READ32((uint8_t *)qp->ioreg + RING_CMPL_WRITE_PTR);\n+\n+\t\tif (FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &\n+\t\t\t\tFLUSH_DONE_MASK)\n+\t\t\tbreak;\n+\t\tusleep(1000);\n+\t} while (--timeout);\n+\tif (!timeout) {\n+\t\tBCMFS_DP_LOG(ERR, \"Ring flush timeout hw-queue %d\",\n+\t\t\t     qp->qpair_id);\n+\t}\n+\n+\t/* Clear ring flush state */\n+\ttimeout = 1000;\n+\tFS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);\n+\tdo {\n+\t\tif (!(FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &\n+\t\t\t\t  FLUSH_DONE_MASK))\n+\t\t\tbreak;\n+\t\tusleep(1000);\n+\t} while (--timeout);\n+\tif (!timeout) {\n+\t\tBCMFS_DP_LOG(ERR, \"Ring clear flush timeout hw-queue %d\",\n+\t\t\t     qp->qpair_id);\n+\t}\n+\n+\t/* Program BD start address */\n+\tval = BD_START_ADDR_VALUE(tx_queue->base_phys_addr);\n+\tFS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_BD_START_ADDR);\n+\n+\t/* BD write pointer will be same as HW write pointer */\n+\ttx_queue->tx_write_ptr = FS_MMIO_READ32((uint8_t *)qp->ioreg +\n+\t\t\t\t\t\tRING_BD_WRITE_PTR);\n+\ttx_queue->tx_write_ptr *= FS_RING_DESC_SIZE;\n+\n+\n+\tfor (off = 0; off < FS_RING_CMPL_SIZE; off += FS_RING_DESC_SIZE)\n+\t\trm_write_desc((uint8_t *)cmpl_queue->base_addr + off, 0x0);\n+\n+\t/* Program completion start address */\n+\tval = CMPL_START_ADDR_VALUE(cmpl_queue->base_phys_addr);\n+\tFS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_CMPL_START_ADDR);\n+\n+\t/* Completion read pointer will be same as HW write pointer */\n+\tcmpl_queue->cmpl_read_ptr = FS_MMIO_READ32((uint8_t *)qp->ioreg +\n+\t\t\t\t\t\t   RING_CMPL_WRITE_PTR);\n+\tcmpl_queue->cmpl_read_ptr *= FS_RING_DESC_SIZE;\n+\n+\t/* Read ring Tx, Rx, and Outstanding counts to clear */\n+\tFS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_LS);\n+\tFS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_MS);\n+\tFS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_LS);\n+\tFS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_MS);\n+\tFS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_OUTSTAND);\n+\n+\t/* Configure per-Ring MSI registers with dummy location */\n+\t/* We leave 1k * FS_RING_DESC_SIZE size from base phys for MSI */\n+\tmsi = cmpl_queue->base_phys_addr + (1024 * FS_RING_DESC_SIZE);\n+\tFS_MMIO_WRITE32((msi & 0xFFFFFFFF),\n+\t\t\t(uint8_t *)qp->ioreg + RING_MSI_ADDR_LS);\n+\tFS_MMIO_WRITE32(((msi >> 32) & 0xFFFFFFFF),\n+\t\t\t(uint8_t *)qp->ioreg + RING_MSI_ADDR_MS);\n+\tFS_MMIO_WRITE32(qp->qpair_id,\n+\t\t\t(uint8_t *)qp->ioreg + RING_MSI_DATA_VALUE);\n+\n+\t/* Configure RING_MSI_CONTROL */\n+\tval = 0;\n+\tval |= (MSI_TIMER_VAL_MASK << MSI_TIMER_VAL_SHIFT);\n+\tval |= BIT(MSI_ENABLE_SHIFT);\n+\tval |= (0x1 & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;\n+\tFS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_MSI_CONTROL);\n+\n+\t/* Enable/activate ring */\n+\tval = BIT(CONTROL_ACTIVE_SHIFT);\n+\tFS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_CONTROL);\n+\n+\treturn 0;\n+}\n+\n+static void\n+bcmfs4_shutdown_qp(struct bcmfs_qp *qp)\n+{\n+\t/* Disable/inactivate ring */\n+\tFS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);\n+}\n+\n+struct bcmfs_hw_queue_pair_ops bcmfs4_qp_ops = {\n+\t.name = \"fs4\",\n+\t.enq_one_req = bcmfs4_enqueue_single_request_qp,\n+\t.ring_db = bcmfs4_ring_doorbell_qp,\n+\t.dequeue = bcmfs4_dequeue_qp,\n+\t.startq = bcmfs4_start_qp,\n+\t.stopq = bcmfs4_shutdown_qp,\n+};\n+\n+RTE_INIT(bcmfs4_register_qp_ops)\n+{\n+\t bcmfs_hw_queue_pair_register_ops(&bcmfs4_qp_ops);\n+}\ndiff --git a/drivers/crypto/bcmfs/hw/bcmfs5_rm.c b/drivers/crypto/bcmfs/hw/bcmfs5_rm.c\nnew file mode 100644\nindex 000000000..fd92121da\n--- /dev/null\n+++ b/drivers/crypto/bcmfs/hw/bcmfs5_rm.c\n@@ -0,0 +1,677 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Broadcom\n+ * All rights reserved.\n+ */\n+\n+#include <unistd.h>\n+\n+#include <rte_bitmap.h>\n+\n+#include \"bcmfs_qp.h\"\n+#include \"bcmfs_logs.h\"\n+#include \"bcmfs_dev_msg.h\"\n+#include \"bcmfs_device.h\"\n+#include \"bcmfs_hw_defs.h\"\n+#include \"bcmfs_rm_common.h\"\n+\n+/* Ring version */\n+#define RING_VER_MAGIC\t\t\t\t\t0x76303032\n+\n+/* Per-Ring register offsets */\n+#define RING_VER\t\t\t\t\t0x000\n+#define RING_BD_START_ADDRESS_LSB\t\t\t0x004\n+#define RING_BD_READ_PTR\t\t\t\t0x008\n+#define RING_BD_WRITE_PTR\t\t\t\t0x00c\n+#define RING_BD_READ_PTR_DDR_LS\t\t\t\t0x010\n+#define RING_BD_READ_PTR_DDR_MS\t\t\t\t0x014\n+#define RING_CMPL_START_ADDR_LSB\t\t\t0x018\n+#define RING_CMPL_WRITE_PTR\t\t\t\t0x01c\n+#define RING_NUM_REQ_RECV_LS\t\t\t\t0x020\n+#define RING_NUM_REQ_RECV_MS\t\t\t\t0x024\n+#define RING_NUM_REQ_TRANS_LS\t\t\t\t0x028\n+#define RING_NUM_REQ_TRANS_MS\t\t\t\t0x02c\n+#define RING_NUM_REQ_OUTSTAND\t\t\t\t0x030\n+#define RING_CONTROL\t\t\t\t\t0x034\n+#define RING_FLUSH_DONE\t\t\t\t\t0x038\n+#define RING_MSI_ADDR_LS\t\t\t\t0x03c\n+#define RING_MSI_ADDR_MS\t\t\t\t0x040\n+#define RING_MSI_CONTROL\t\t\t\t0x048\n+#define RING_BD_READ_PTR_DDR_CONTROL\t\t\t0x04c\n+#define RING_MSI_DATA_VALUE\t\t\t\t0x064\n+#define RING_BD_START_ADDRESS_MSB\t\t\t0x078\n+#define RING_CMPL_START_ADDR_MSB\t\t\t0x07c\n+#define RING_DOORBELL_BD_WRITE_COUNT\t\t\t0x074\n+\n+/* Register RING_BD_START_ADDR fields */\n+#define BD_LAST_UPDATE_HW_SHIFT\t\t\t\t28\n+#define BD_LAST_UPDATE_HW_MASK\t\t\t\t0x1\n+#define BD_START_ADDR_VALUE(pa)\t\t\t\t\\\n+\t((uint32_t)((((uint64_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))\n+#define BD_START_ADDR_DECODE(val)\t\t\t\\\n+\t((uint64_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER)\n+\n+/* Register RING_CMPL_START_ADDR fields */\n+#define CMPL_START_ADDR_VALUE(pa)\t\t\t\\\n+\t((uint32_t)((((uint64_t)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))\n+\n+/* Register RING_CONTROL fields */\n+#define CONTROL_MASK_DISABLE_CONTROL\t\t\t12\n+#define CONTROL_FLUSH_SHIFT\t\t\t\t5\n+#define CONTROL_ACTIVE_SHIFT\t\t\t\t4\n+#define CONTROL_RATE_ADAPT_MASK\t\t\t\t0xf\n+#define CONTROL_RATE_DYNAMIC\t\t\t\t0x0\n+#define CONTROL_RATE_FAST\t\t\t\t0x8\n+#define CONTROL_RATE_MEDIUM\t\t\t\t0x9\n+#define CONTROL_RATE_SLOW\t\t\t\t0xa\n+#define CONTROL_RATE_IDLE\t\t\t\t0xb\n+\n+/* Register RING_FLUSH_DONE fields */\n+#define FLUSH_DONE_MASK\t\t\t\t\t0x1\n+\n+/* Register RING_MSI_CONTROL fields */\n+#define MSI_TIMER_VAL_SHIFT\t\t\t\t16\n+#define MSI_TIMER_VAL_MASK\t\t\t\t0xffff\n+#define MSI_ENABLE_SHIFT\t\t\t\t15\n+#define MSI_ENABLE_MASK\t\t\t\t\t0x1\n+#define MSI_COUNT_SHIFT\t\t\t\t\t0\n+#define MSI_COUNT_MASK\t\t\t\t\t0x3ff\n+\n+/* Register RING_BD_READ_PTR_DDR_CONTROL fields */\n+#define BD_READ_PTR_DDR_TIMER_VAL_SHIFT\t\t\t16\n+#define BD_READ_PTR_DDR_TIMER_VAL_MASK\t\t\t0xffff\n+#define BD_READ_PTR_DDR_ENABLE_SHIFT\t\t\t15\n+#define BD_READ_PTR_DDR_ENABLE_MASK\t\t\t0x1\n+\n+/* General descriptor format */\n+#define DESC_TYPE_SHIFT\t\t\t\t\t60\n+#define DESC_TYPE_MASK\t\t\t\t\t0xf\n+#define DESC_PAYLOAD_SHIFT\t\t\t\t0\n+#define DESC_PAYLOAD_MASK\t\t\t\t0x0fffffffffffffff\n+\n+/* Null descriptor format  */\n+#define NULL_TYPE\t\t\t\t\t0\n+#define NULL_TOGGLE_SHIFT\t\t\t\t59\n+#define NULL_TOGGLE_MASK\t\t\t\t0x1\n+\n+/* Header descriptor format */\n+#define HEADER_TYPE\t\t\t\t\t1\n+#define HEADER_TOGGLE_SHIFT\t\t\t\t59\n+#define HEADER_TOGGLE_MASK\t\t\t\t0x1\n+#define HEADER_ENDPKT_SHIFT\t\t\t\t57\n+#define HEADER_ENDPKT_MASK\t\t\t\t0x1\n+#define HEADER_STARTPKT_SHIFT\t\t\t\t56\n+#define HEADER_STARTPKT_MASK\t\t\t\t0x1\n+#define HEADER_BDCOUNT_SHIFT\t\t\t\t36\n+#define HEADER_BDCOUNT_MASK\t\t\t\t0x1f\n+#define HEADER_BDCOUNT_MAX\t\t\t\tHEADER_BDCOUNT_MASK\n+#define HEADER_FLAGS_SHIFT\t\t\t\t16\n+#define HEADER_FLAGS_MASK\t\t\t\t0xffff\n+#define HEADER_OPAQUE_SHIFT\t\t\t\t0\n+#define HEADER_OPAQUE_MASK\t\t\t\t0xffff\n+\n+/* Source (SRC) descriptor format */\n+\n+#define SRC_TYPE\t\t\t\t\t2\n+#define SRC_LENGTH_SHIFT\t\t\t\t44\n+#define SRC_LENGTH_MASK\t\t\t\t\t0xffff\n+#define SRC_ADDR_SHIFT\t\t\t\t\t0\n+#define SRC_ADDR_MASK\t\t\t\t\t0x00000fffffffffff\n+\n+/* Destination (DST) descriptor format */\n+#define DST_TYPE\t\t\t\t\t3\n+#define DST_LENGTH_SHIFT\t\t\t\t44\n+#define DST_LENGTH_MASK\t\t\t\t\t0xffff\n+#define DST_ADDR_SHIFT\t\t\t\t\t0\n+#define DST_ADDR_MASK\t\t\t\t\t0x00000fffffffffff\n+\n+/* Next pointer (NPTR) descriptor format */\n+#define NPTR_TYPE\t\t\t\t\t5\n+#define NPTR_TOGGLE_SHIFT\t\t\t\t59\n+#define NPTR_TOGGLE_MASK\t\t\t\t0x1\n+#define NPTR_ADDR_SHIFT\t\t\t\t\t0\n+#define NPTR_ADDR_MASK\t\t\t\t\t0x00000fffffffffff\n+\n+/* Mega source (MSRC) descriptor format */\n+#define MSRC_TYPE\t\t\t\t\t6\n+#define MSRC_LENGTH_SHIFT\t\t\t\t44\n+#define MSRC_LENGTH_MASK\t\t\t\t0xffff\n+#define MSRC_ADDR_SHIFT\t\t\t\t\t0\n+#define MSRC_ADDR_MASK\t\t\t\t\t0x00000fffffffffff\n+\n+/* Mega destination (MDST) descriptor format */\n+#define MDST_TYPE\t\t\t\t\t7\n+#define MDST_LENGTH_SHIFT\t\t\t\t44\n+#define MDST_LENGTH_MASK\t\t\t\t0xffff\n+#define MDST_ADDR_SHIFT\t\t\t\t\t0\n+#define MDST_ADDR_MASK\t\t\t\t\t0x00000fffffffffff\n+\n+static uint8_t\n+bcmfs5_is_next_table_desc(void *desc_ptr)\n+{\n+\tuint64_t desc = rm_read_desc(desc_ptr);\n+\tuint32_t type = FS_DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);\n+\n+\treturn (type == NPTR_TYPE) ? true : false;\n+}\n+\n+static uint64_t\n+bcmfs5_next_table_desc(uint64_t next_addr)\n+{\n+\treturn (rm_build_desc(NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |\n+\t\trm_build_desc(next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK));\n+}\n+\n+static uint64_t\n+bcmfs5_null_desc(void)\n+{\n+\treturn rm_build_desc(NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);\n+}\n+\n+static uint64_t\n+bcmfs5_header_desc(uint32_t startpkt, uint32_t endpkt,\n+\t\t\t\t       uint32_t bdcount, uint32_t flags,\n+\t\t\t\t       uint32_t opaque)\n+{\n+\treturn (rm_build_desc(HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |\n+\t\trm_build_desc(startpkt, HEADER_STARTPKT_SHIFT,\n+\t\t\t      HEADER_STARTPKT_MASK) |\n+\t\trm_build_desc(endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK) |\n+\t\trm_build_desc(bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK) |\n+\t\trm_build_desc(flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK) |\n+\t\trm_build_desc(opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK));\n+}\n+\n+static int\n+bcmfs5_enqueue_desc(uint32_t nhpos, uint32_t nhcnt,\n+\t\t    uint32_t reqid, uint64_t desc,\n+\t\t    void **desc_ptr, void *start_desc,\n+\t\t    void *end_desc)\n+{\n+\tuint64_t d;\n+\tuint32_t nhavail, _startpkt, _endpkt, _bdcount;\n+\tint is_nxt_page = 0;\n+\n+\t/*\n+\t * Each request or packet start with a HEADER descriptor followed\n+\t * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,\n+\t * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors\n+\t * following a HEADER descriptor is represented by BDCOUNT field\n+\t * of HEADER descriptor. The max value of BDCOUNT field is 31 which\n+\t * means we can only have 31 non-HEADER descriptors following one\n+\t * HEADER descriptor.\n+\t *\n+\t * In general use, number of non-HEADER descriptors can easily go\n+\t * beyond 31. To tackle this situation, we have packet (or request)\n+\t * extension bits (STARTPKT and ENDPKT) in the HEADER descriptor.\n+\t *\n+\t * To use packet extension, the first HEADER descriptor of request\n+\t * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate\n+\t * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last\n+\t * HEADER descriptor will have STARTPKT=0 and ENDPKT=1.\n+\t */\n+\n+\tif ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {\n+\t\t/* Prepare the header descriptor */\n+\t\tnhavail = (nhcnt - nhpos);\n+\t\t_startpkt = (nhpos == 0) ? 0x1 : 0x0;\n+\t\t_endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;\n+\t\t_bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?\n+\t\t\t\tnhavail : HEADER_BDCOUNT_MAX;\n+\t\tif (nhavail <= HEADER_BDCOUNT_MAX)\n+\t\t\t_bdcount = nhavail;\n+\t\telse\n+\t\t\t_bdcount = HEADER_BDCOUNT_MAX;\n+\t\td = bcmfs5_header_desc(_startpkt, _endpkt,\n+\t\t\t\t       _bdcount, 0x0, reqid);\n+\n+\t\t/* Write header descriptor */\n+\t\trm_write_desc(*desc_ptr, d);\n+\n+\t\t/* Point to next descriptor */\n+\t\t*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);\n+\t\tif (*desc_ptr == end_desc)\n+\t\t\t*desc_ptr = start_desc;\n+\n+\t\t/* Skip next pointer descriptors */\n+\t\twhile (bcmfs5_is_next_table_desc(*desc_ptr)) {\n+\t\t\tis_nxt_page = 1;\n+\t\t\t*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);\n+\t\t\tif (*desc_ptr == end_desc)\n+\t\t\t\t*desc_ptr = start_desc;\n+\t\t}\n+\t}\n+\n+\t/* Write desired descriptor */\n+\trm_write_desc(*desc_ptr, desc);\n+\n+\t/* Point to next descriptor */\n+\t*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);\n+\tif (*desc_ptr == end_desc)\n+\t\t*desc_ptr = start_desc;\n+\n+\t/* Skip next pointer descriptors */\n+\twhile (bcmfs5_is_next_table_desc(*desc_ptr)) {\n+\t\tis_nxt_page = 1;\n+\t\t*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);\n+\t\tif (*desc_ptr == end_desc)\n+\t\t\t*desc_ptr = start_desc;\n+\t}\n+\n+\treturn is_nxt_page;\n+}\n+\n+static uint64_t\n+bcmfs5_src_desc(uint64_t addr, unsigned int len)\n+{\n+\treturn (rm_build_desc(SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |\n+\t\trm_build_desc(len, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK) |\n+\t\trm_build_desc(addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK));\n+}\n+\n+static uint64_t\n+bcmfs5_msrc_desc(uint64_t addr, unsigned int len_div_16)\n+{\n+\treturn (rm_build_desc(MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |\n+\t\trm_build_desc(len_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK) |\n+\t\trm_build_desc(addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK));\n+}\n+\n+static uint64_t\n+bcmfs5_dst_desc(uint64_t addr, unsigned int len)\n+{\n+\treturn (rm_build_desc(DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |\n+\t\trm_build_desc(len, DST_LENGTH_SHIFT, DST_LENGTH_MASK) |\n+\t\trm_build_desc(addr, DST_ADDR_SHIFT, DST_ADDR_MASK));\n+}\n+\n+static uint64_t\n+bcmfs5_mdst_desc(uint64_t addr, unsigned int len_div_16)\n+{\n+\treturn (rm_build_desc(MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |\n+\t\trm_build_desc(len_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK) |\n+\t\trm_build_desc(addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK));\n+}\n+\n+static bool\n+bcmfs5_sanity_check(struct bcmfs_qp_message *msg)\n+{\n+\tunsigned int i = 0;\n+\n+\tif (msg == NULL)\n+\t\treturn false;\n+\n+\tfor (i = 0; i <  msg->srcs_count; i++) {\n+\t\tif (msg->srcs_len[i] & 0xf) {\n+\t\t\tif (msg->srcs_len[i] > SRC_LENGTH_MASK)\n+\t\t\t\treturn false;\n+\t\t} else {\n+\t\t\tif (msg->srcs_len[i] > (MSRC_LENGTH_MASK * 16))\n+\t\t\t\treturn false;\n+\t\t}\n+\t}\n+\tfor (i = 0; i <  msg->dsts_count; i++) {\n+\t\tif (msg->dsts_len[i] & 0xf) {\n+\t\t\tif (msg->dsts_len[i] > DST_LENGTH_MASK)\n+\t\t\t\treturn false;\n+\t\t} else {\n+\t\t\tif (msg->dsts_len[i] > (MDST_LENGTH_MASK * 16))\n+\t\t\t\treturn false;\n+\t\t}\n+\t}\n+\n+\treturn true;\n+}\n+\n+static void *\n+bcmfs5_enqueue_msg(struct bcmfs_queue *txq,\n+\t\t   struct bcmfs_qp_message *msg,\n+\t\t   uint32_t reqid, void *desc_ptr,\n+\t\t   void *start_desc, void *end_desc)\n+{\n+\tuint64_t d;\n+\tunsigned int src, dst;\n+\tuint32_t nhpos = 0;\n+\tint nxt_page = 0;\n+\tuint32_t nhcnt = msg->srcs_count + msg->dsts_count;\n+\n+\tif (desc_ptr == NULL || start_desc == NULL || end_desc == NULL)\n+\t\treturn NULL;\n+\n+\tif (desc_ptr < start_desc || end_desc <= desc_ptr)\n+\t\treturn NULL;\n+\n+\tfor (src = 0; src < msg->srcs_count; src++) {\n+\t\tif (msg->srcs_len[src] & 0xf)\n+\t\t\td = bcmfs5_src_desc(msg->srcs_addr[src],\n+\t\t\t\t\t    msg->srcs_len[src]);\n+\t\telse\n+\t\t\td = bcmfs5_msrc_desc(msg->srcs_addr[src],\n+\t\t\t\t\t     msg->srcs_len[src] / 16);\n+\n+\t\tnxt_page = bcmfs5_enqueue_desc(nhpos, nhcnt, reqid,\n+\t\t\t\t\t       d, &desc_ptr, start_desc,\n+\t\t\t\t\t       end_desc);\n+\t\tif (nxt_page)\n+\t\t\ttxq->descs_inflight++;\n+\t\tnhpos++;\n+\t}\n+\n+\tfor (dst = 0; dst < msg->dsts_count; dst++) {\n+\t\tif (msg->dsts_len[dst] & 0xf)\n+\t\t\td = bcmfs5_dst_desc(msg->dsts_addr[dst],\n+\t\t\t\t\t    msg->dsts_len[dst]);\n+\t\telse\n+\t\t\td = bcmfs5_mdst_desc(msg->dsts_addr[dst],\n+\t\t\t\t\t     msg->dsts_len[dst] / 16);\n+\n+\t\tnxt_page = bcmfs5_enqueue_desc(nhpos, nhcnt, reqid,\n+\t\t\t\t\t       d, &desc_ptr, start_desc,\n+\t\t\t\t\t       end_desc);\n+\t\tif (nxt_page)\n+\t\t\ttxq->descs_inflight++;\n+\t\tnhpos++;\n+\t}\n+\n+\ttxq->descs_inflight += nhcnt + 1;\n+\n+\treturn desc_ptr;\n+}\n+\n+static int\n+bcmfs5_enqueue_single_request_qp(struct bcmfs_qp *qp, void *op)\n+{\n+\tvoid *next;\n+\tint reqid;\n+\tint ret = 0;\n+\tuint64_t slab = 0;\n+\tuint32_t pos = 0;\n+\tuint8_t exit_cleanup = false;\n+\tstruct bcmfs_queue *txq = &qp->tx_q;\n+\tstruct bcmfs_qp_message *msg = (struct bcmfs_qp_message *)op;\n+\n+\t/* Do sanity check on message */\n+\tif (!bcmfs5_sanity_check(msg)) {\n+\t\tBCMFS_DP_LOG(ERR, \"Invalid msg on queue %d\", qp->qpair_id);\n+\t\treturn -EIO;\n+\t}\n+\n+\t/* Scan from the beginning */\n+\t__rte_bitmap_scan_init(qp->ctx_bmp);\n+\t/* Scan bitmap to get the free pool */\n+\tret = rte_bitmap_scan(qp->ctx_bmp, &pos, &slab);\n+\tif (ret == 0) {\n+\t\tBCMFS_DP_LOG(ERR, \"BD memory exhausted\");\n+\t\treturn -ERANGE;\n+\t}\n+\n+\treqid = pos + __builtin_ctzll(slab);\n+\trte_bitmap_clear(qp->ctx_bmp, reqid);\n+\tqp->ctx_pool[reqid] = (unsigned long)msg;\n+\n+\t/* Write descriptors to ring */\n+\tnext = bcmfs5_enqueue_msg(txq, msg, reqid,\n+\t\t\t\t  (uint8_t *)txq->base_addr + txq->tx_write_ptr,\n+\t\t\t\t  txq->base_addr,\n+\t\t\t\t  (uint8_t *)txq->base_addr + txq->queue_size);\n+\tif (next == NULL) {\n+\t\tBCMFS_DP_LOG(ERR, \"Enqueue for desc failed on queue %d\",\n+\t\t\t     qp->qpair_id);\n+\t\tret = -EINVAL;\n+\t\texit_cleanup = true;\n+\t\tgoto exit;\n+\t}\n+\n+\t/* Save ring BD write offset */\n+\ttxq->tx_write_ptr = (uint32_t)((uint8_t *)next -\n+\t\t\t\t       (uint8_t *)txq->base_addr);\n+\n+\tqp->nb_pending_requests++;\n+\n+\treturn 0;\n+\n+exit:\n+\t/* Cleanup if we failed */\n+\tif (exit_cleanup)\n+\t\trte_bitmap_set(qp->ctx_bmp, reqid);\n+\n+\treturn ret;\n+}\n+\n+static void bcmfs5_write_doorbell(struct bcmfs_qp *qp)\n+{\n+\tstruct bcmfs_queue *txq = &qp->tx_q;\n+\n+\t/* sync in bfeore ringing the door-bell */\n+\trte_wmb();\n+\n+\tFS_MMIO_WRITE32(txq->descs_inflight,\n+\t\t\t(uint8_t *)qp->ioreg + RING_DOORBELL_BD_WRITE_COUNT);\n+\n+\t/* reset the count */\n+\ttxq->descs_inflight = 0;\n+}\n+\n+static uint16_t\n+bcmfs5_dequeue_qp(struct bcmfs_qp *qp, void **ops, uint16_t budget)\n+{\n+\tint err;\n+\tuint16_t reqid;\n+\tuint64_t desc;\n+\tuint16_t count = 0;\n+\tunsigned long context = 0;\n+\tstruct bcmfs_queue *hwq = &qp->cmpl_q;\n+\tuint32_t cmpl_read_offset, cmpl_write_offset;\n+\n+\t/*\n+\t * Check whether budget is valid, else set the budget to maximum\n+\t * so that all the available completions will be processed.\n+\t */\n+\tif (budget > qp->nb_pending_requests)\n+\t\tbudget =  qp->nb_pending_requests;\n+\n+\t/*\n+\t * Get current completion read and write offset\n+\t *\n+\t * Note: We should read completion write pointer atleast once\n+\t * after we get a MSI interrupt because HW maintains internal\n+\t * MSI status which will allow next MSI interrupt only after\n+\t * completion write pointer is read.\n+\t */\n+\tcmpl_write_offset = FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_CMPL_WRITE_PTR);\n+\tcmpl_write_offset *= FS_RING_DESC_SIZE;\n+\tcmpl_read_offset = hwq->cmpl_read_ptr;\n+\n+\t/* read the ring cmpl write ptr before cmpl read offset */\n+\trte_smp_rmb();\n+\n+\t/* For each completed request notify mailbox clients */\n+\treqid = 0;\n+\twhile ((cmpl_read_offset != cmpl_write_offset) && (budget > 0)) {\n+\t\t/* Dequeue next completion descriptor */\n+\t\tdesc = *((uint64_t *)((uint8_t *)hwq->base_addr +\n+\t\t\t\t      cmpl_read_offset));\n+\n+\t\t/* Next read offset */\n+\t\tcmpl_read_offset += FS_RING_DESC_SIZE;\n+\t\tif (cmpl_read_offset == FS_RING_CMPL_SIZE)\n+\t\t\tcmpl_read_offset = 0;\n+\n+\t\t/* Decode error from completion descriptor */\n+\t\terr = rm_cmpl_desc_to_error(desc);\n+\t\tif (err < 0)\n+\t\t\tBCMFS_DP_LOG(ERR, \"error desc rcvd\");\n+\n+\t\t/* Determine request id from completion descriptor */\n+\t\treqid = rm_cmpl_desc_to_reqid(desc);\n+\n+\t\t/* Retrieve context */\n+\t\tcontext = qp->ctx_pool[reqid];\n+\t\tif (context == 0)\n+\t\t\tBCMFS_DP_LOG(ERR, \"HW error detected\");\n+\n+\t\t/* Release reqid for recycling */\n+\t\tqp->ctx_pool[reqid] = 0;\n+\t\trte_bitmap_set(qp->ctx_bmp, reqid);\n+\n+\t\t*ops = (void *)context;\n+\n+\t\t/* Increment number of completions processed */\n+\t\tcount++;\n+\t\tbudget--;\n+\t\tops++;\n+\t}\n+\n+\thwq->cmpl_read_ptr = cmpl_read_offset;\n+\n+\tqp->nb_pending_requests -= count;\n+\n+\treturn count;\n+}\n+\n+static int\n+bcmfs5_start_qp(struct bcmfs_qp *qp)\n+{\n+\tuint32_t val, off;\n+\tuint64_t d, next_addr, msi;\n+\tint timeout;\n+\tuint32_t bd_high, bd_low, cmpl_high, cmpl_low;\n+\tstruct bcmfs_queue *tx_queue = &qp->tx_q;\n+\tstruct bcmfs_queue *cmpl_queue = &qp->cmpl_q;\n+\n+\t/* Disable/inactivate ring */\n+\tFS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);\n+\n+\t/* Configure next table pointer entries in BD memory */\n+\tfor (off = 0; off < tx_queue->queue_size; off += FS_RING_DESC_SIZE) {\n+\t\tnext_addr = off + FS_RING_DESC_SIZE;\n+\t\tif (next_addr == tx_queue->queue_size)\n+\t\t\tnext_addr = 0;\n+\t\tnext_addr += (uint64_t)tx_queue->base_phys_addr;\n+\t\tif (FS_RING_BD_ALIGN_CHECK(next_addr))\n+\t\t\td = bcmfs5_next_table_desc(next_addr);\n+\t\telse\n+\t\t\td = bcmfs5_null_desc();\n+\t\trm_write_desc((uint8_t *)tx_queue->base_addr + off, d);\n+\t}\n+\n+\t/*\n+\t * If user interrupt the test in between the run(Ctrl+C), then all\n+\t * subsequent test run will fail because sw cmpl_read_offset and hw\n+\t * cmpl_write_offset will be pointing at different completion BD. To\n+\t * handle this we should flush all the rings in the startup instead\n+\t * of shutdown function.\n+\t * Ring flush will reset hw cmpl_write_offset.\n+\t */\n+\n+\t/* Set ring flush state */\n+\ttimeout = 1000;\n+\tFS_MMIO_WRITE32(BIT(CONTROL_FLUSH_SHIFT),\n+\t\t\t(uint8_t *)qp->ioreg + RING_CONTROL);\n+\tdo {\n+\t\t/*\n+\t\t * If previous test is stopped in between the run, then\n+\t\t * sw has to read cmpl_write_offset else DME/AE will be not\n+\t\t * come out of flush state.\n+\t\t */\n+\t\tFS_MMIO_READ32((uint8_t *)qp->ioreg + RING_CMPL_WRITE_PTR);\n+\n+\t\tif (FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &\n+\t\t\t\t   FLUSH_DONE_MASK)\n+\t\t\tbreak;\n+\t\tusleep(1000);\n+\t} while (--timeout);\n+\tif (!timeout) {\n+\t\tBCMFS_DP_LOG(ERR, \"Ring flush timeout hw-queue %d\",\n+\t\t\t     qp->qpair_id);\n+\t}\n+\n+\t/* Clear ring flush state */\n+\ttimeout = 1000;\n+\tFS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);\n+\tdo {\n+\t\tif (!(FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &\n+\t\t\t\t     FLUSH_DONE_MASK))\n+\t\t\tbreak;\n+\t\tusleep(1000);\n+\t} while (--timeout);\n+\tif (!timeout) {\n+\t\tBCMFS_DP_LOG(ERR, \"Ring clear flush timeout hw-queue %d\",\n+\t\t\t     qp->qpair_id);\n+\t}\n+\n+\t/* Program BD start address */\n+\tbd_low = lower_32_bits(tx_queue->base_phys_addr);\n+\tbd_high = upper_32_bits(tx_queue->base_phys_addr);\n+\tFS_MMIO_WRITE32(bd_low, (uint8_t *)qp->ioreg +\n+\t\t\t\tRING_BD_START_ADDRESS_LSB);\n+\tFS_MMIO_WRITE32(bd_high, (uint8_t *)qp->ioreg +\n+\t\t\t\t RING_BD_START_ADDRESS_MSB);\n+\n+\ttx_queue->tx_write_ptr = 0;\n+\n+\tfor (off = 0; off < FS_RING_CMPL_SIZE; off += FS_RING_DESC_SIZE)\n+\t\trm_write_desc((uint8_t *)cmpl_queue->base_addr + off, 0x0);\n+\n+\t/* Completion read pointer will be same as HW write pointer */\n+\tcmpl_queue->cmpl_read_ptr = FS_MMIO_READ32((uint8_t *)qp->ioreg +\n+\t\t\t\t\t\t   RING_CMPL_WRITE_PTR);\n+\t/* Program completion start address */\n+\tcmpl_low = lower_32_bits(cmpl_queue->base_phys_addr);\n+\tcmpl_high = upper_32_bits(cmpl_queue->base_phys_addr);\n+\tFS_MMIO_WRITE32(cmpl_low, (uint8_t *)qp->ioreg +\n+\t\t\t\tRING_CMPL_START_ADDR_LSB);\n+\tFS_MMIO_WRITE32(cmpl_high, (uint8_t *)qp->ioreg +\n+\t\t\t\tRING_CMPL_START_ADDR_MSB);\n+\n+\tcmpl_queue->cmpl_read_ptr *= FS_RING_DESC_SIZE;\n+\n+\t/* Read ring Tx, Rx, and Outstanding counts to clear */\n+\tFS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_LS);\n+\tFS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_MS);\n+\tFS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_LS);\n+\tFS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_MS);\n+\tFS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_OUTSTAND);\n+\n+\t/* Configure per-Ring MSI registers with dummy location */\n+\tmsi = cmpl_queue->base_phys_addr + (1024 * FS_RING_DESC_SIZE);\n+\tFS_MMIO_WRITE32((msi & 0xFFFFFFFF),\n+\t\t\t(uint8_t *)qp->ioreg + RING_MSI_ADDR_LS);\n+\tFS_MMIO_WRITE32(((msi >> 32) & 0xFFFFFFFF),\n+\t\t\t(uint8_t *)qp->ioreg + RING_MSI_ADDR_MS);\n+\tFS_MMIO_WRITE32(qp->qpair_id, (uint8_t *)qp->ioreg +\n+\t\t\t\t      RING_MSI_DATA_VALUE);\n+\n+\t/* Configure RING_MSI_CONTROL */\n+\tval = 0;\n+\tval |= (MSI_TIMER_VAL_MASK << MSI_TIMER_VAL_SHIFT);\n+\tval |= BIT(MSI_ENABLE_SHIFT);\n+\tval |= (0x1 & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;\n+\tFS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_MSI_CONTROL);\n+\n+\t/* Enable/activate ring */\n+\tval = BIT(CONTROL_ACTIVE_SHIFT);\n+\tFS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_CONTROL);\n+\n+\treturn 0;\n+}\n+\n+static void\n+bcmfs5_shutdown_qp(struct bcmfs_qp *qp)\n+{\n+\t/* Disable/inactivate ring */\n+\tFS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);\n+}\n+\n+struct bcmfs_hw_queue_pair_ops bcmfs5_qp_ops = {\n+\t.name = \"fs5\",\n+\t.enq_one_req = bcmfs5_enqueue_single_request_qp,\n+\t.ring_db = bcmfs5_write_doorbell,\n+\t.dequeue = bcmfs5_dequeue_qp,\n+\t.startq = bcmfs5_start_qp,\n+\t.stopq = bcmfs5_shutdown_qp,\n+};\n+\n+RTE_INIT(bcmfs5_register_qp_ops)\n+{\n+\tbcmfs_hw_queue_pair_register_ops(&bcmfs5_qp_ops);\n+}\ndiff --git a/drivers/crypto/bcmfs/hw/bcmfs_rm_common.c b/drivers/crypto/bcmfs/hw/bcmfs_rm_common.c\nnew file mode 100644\nindex 000000000..9445d28f9\n--- /dev/null\n+++ b/drivers/crypto/bcmfs/hw/bcmfs_rm_common.c\n@@ -0,0 +1,82 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(C) 2020 Broadcom.\n+ * All rights reserved.\n+ */\n+\n+#include \"bcmfs_hw_defs.h\"\n+#include \"bcmfs_rm_common.h\"\n+\n+/* Completion descriptor format */\n+#define FS_CMPL_OPAQUE_SHIFT\t\t\t0\n+#define FS_CMPL_OPAQUE_MASK\t\t\t0xffff\n+#define FS_CMPL_ENGINE_STATUS_SHIFT\t\t16\n+#define FS_CMPL_ENGINE_STATUS_MASK\t\t0xffff\n+#define FS_CMPL_DME_STATUS_SHIFT\t\t32\n+#define FS_CMPL_DME_STATUS_MASK\t\t\t0xffff\n+#define FS_CMPL_RM_STATUS_SHIFT\t\t\t48\n+#define FS_CMPL_RM_STATUS_MASK\t\t\t0xffff\n+/* Completion RM status code */\n+#define FS_RM_STATUS_CODE_SHIFT\t\t\t0\n+#define FS_RM_STATUS_CODE_MASK\t\t\t0x3ff\n+#define FS_RM_STATUS_CODE_GOOD\t\t\t0x0\n+#define FS_RM_STATUS_CODE_AE_TIMEOUT\t\t0x3ff\n+\n+\n+/* Completion DME status code */\n+#define FS_DME_STATUS_MEM_COR_ERR\t\tBIT(0)\n+#define FS_DME_STATUS_MEM_UCOR_ERR\t\tBIT(1)\n+#define FS_DME_STATUS_FIFO_UNDRFLOW\t\tBIT(2)\n+#define FS_DME_STATUS_FIFO_OVERFLOW\t\tBIT(3)\n+#define FS_DME_STATUS_RRESP_ERR\t\t\tBIT(4)\n+#define FS_DME_STATUS_BRESP_ERR\t\t\tBIT(5)\n+#define FS_DME_STATUS_ERROR_MASK\t\t(FS_DME_STATUS_MEM_COR_ERR | \\\n+\t\t\t\t\t\t FS_DME_STATUS_MEM_UCOR_ERR | \\\n+\t\t\t\t\t\t FS_DME_STATUS_FIFO_UNDRFLOW | \\\n+\t\t\t\t\t\t FS_DME_STATUS_FIFO_OVERFLOW | \\\n+\t\t\t\t\t\t FS_DME_STATUS_RRESP_ERR | \\\n+\t\t\t\t\t\t FS_DME_STATUS_BRESP_ERR)\n+\n+/* APIs related to ring manager descriptors */\n+uint64_t\n+rm_build_desc(uint64_t val, uint32_t shift,\n+\t   uint64_t mask)\n+{\n+\treturn((val & mask) << shift);\n+}\n+\n+uint64_t\n+rm_read_desc(void *desc_ptr)\n+{\n+\treturn le64_to_cpu(*((uint64_t *)desc_ptr));\n+}\n+\n+void\n+rm_write_desc(void *desc_ptr, uint64_t desc)\n+{\n+\t*((uint64_t *)desc_ptr) = cpu_to_le64(desc);\n+}\n+\n+uint32_t\n+rm_cmpl_desc_to_reqid(uint64_t cmpl_desc)\n+{\n+\treturn (uint32_t)(cmpl_desc & FS_CMPL_OPAQUE_MASK);\n+}\n+\n+int\n+rm_cmpl_desc_to_error(uint64_t cmpl_desc)\n+{\n+\tuint32_t status;\n+\n+\tstatus = FS_DESC_DEC(cmpl_desc, FS_CMPL_DME_STATUS_SHIFT,\n+\t\t\t     FS_CMPL_DME_STATUS_MASK);\n+\tif (status & FS_DME_STATUS_ERROR_MASK)\n+\t\treturn -EIO;\n+\n+\tstatus = FS_DESC_DEC(cmpl_desc, FS_CMPL_RM_STATUS_SHIFT,\n+\t\t\t     FS_CMPL_RM_STATUS_MASK);\n+\tstatus &= FS_RM_STATUS_CODE_MASK;\n+\tif (status == FS_RM_STATUS_CODE_AE_TIMEOUT)\n+\t\treturn -ETIMEDOUT;\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/crypto/bcmfs/hw/bcmfs_rm_common.h b/drivers/crypto/bcmfs/hw/bcmfs_rm_common.h\nnew file mode 100644\nindex 000000000..5cbafa0da\n--- /dev/null\n+++ b/drivers/crypto/bcmfs/hw/bcmfs_rm_common.h\n@@ -0,0 +1,46 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Broadcom\n+ * All rights reserved.\n+ */\n+\n+#ifndef _BCMFS_RM_COMMON_H_\n+#define _BCMFS_RM_COMMON_H_\n+\n+#include <rte_byteorder.h>\n+#include <rte_common.h>\n+#include <rte_io.h>\n+\n+/* Descriptor helper macros */\n+#define FS_DESC_DEC(d, s, m)\t\t\t(((d) >> (s)) & (m))\n+\n+#define FS_RING_BD_ALIGN_CHECK(addr)\t\t\t\\\n+\t\t\t(!((addr) & ((0x1 << FS_RING_BD_ALIGN_ORDER) - 1)))\n+\n+#define cpu_to_le64     rte_cpu_to_le_64\n+#define cpu_to_le32     rte_cpu_to_le_32\n+#define cpu_to_le16     rte_cpu_to_le_16\n+\n+#define le64_to_cpu     rte_le_to_cpu_64\n+#define le32_to_cpu     rte_le_to_cpu_32\n+#define le16_to_cpu     rte_le_to_cpu_16\n+\n+#define lower_32_bits(x) ((uint32_t)(x))\n+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))\n+\n+uint64_t\n+rm_build_desc(uint64_t val, uint32_t shift,\n+\t   uint64_t mask);\n+uint64_t\n+rm_read_desc(void *desc_ptr);\n+\n+void\n+rm_write_desc(void *desc_ptr, uint64_t desc);\n+\n+uint32_t\n+rm_cmpl_desc_to_reqid(uint64_t cmpl_desc);\n+\n+int\n+rm_cmpl_desc_to_error(uint64_t cmpl_desc);\n+\n+#endif /* _BCMFS_RM_COMMON_H_ */\n+\ndiff --git a/drivers/crypto/bcmfs/meson.build b/drivers/crypto/bcmfs/meson.build\nindex 7e2bcbf14..cd58bd5e2 100644\n--- a/drivers/crypto/bcmfs/meson.build\n+++ b/drivers/crypto/bcmfs/meson.build\n@@ -8,5 +8,8 @@ sources = files(\n \t\t'bcmfs_logs.c',\n \t\t'bcmfs_device.c',\n \t\t'bcmfs_vfio.c',\n-\t\t'bcmfs_qp.c'\n+\t\t'bcmfs_qp.c',\n+\t\t'hw/bcmfs4_rm.c',\n+\t\t'hw/bcmfs5_rm.c',\n+\t\t'hw/bcmfs_rm_common.c'\n \t\t)\n",
    "prefixes": [
        "v1",
        "4/8"
    ]
}