get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/39527/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 39527,
    "url": "http://patches.dpdk.org/api/patches/39527/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1525853881-140647-4-git-send-email-abhinandan.gujjar@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1525853881-140647-4-git-send-email-abhinandan.gujjar@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1525853881-140647-4-git-send-email-abhinandan.gujjar@intel.com",
    "date": "2018-05-09T08:17:59",
    "name": "[dpdk-dev,v5,3/5] eventdev: add crypto adapter implementation",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "abb8cc42795cfb78bc5f34c841dcf057a8c40bbf",
    "submitter": {
        "id": 883,
        "url": "http://patches.dpdk.org/api/people/883/?format=api",
        "name": "Gujjar, Abhinandan S",
        "email": "abhinandan.gujjar@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1525853881-140647-4-git-send-email-abhinandan.gujjar@intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/39527/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/39527/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 6A31BAAB7;\n\tWed,  9 May 2018 10:17:46 +0200 (CEST)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n\tby dpdk.org (Postfix) with ESMTP id BA234AAC1\n\tfor <dev@dpdk.org>; Wed,  9 May 2018 10:17:43 +0200 (CEST)",
            "from fmsmga001.fm.intel.com ([10.253.24.23])\n\tby fmsmga101.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t09 May 2018 01:17:43 -0700",
            "from unknown (HELO localhost.localdomain) ([10.224.122.195])\n\tby fmsmga001.fm.intel.com with ESMTP; 09 May 2018 01:17:40 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.49,381,1520924400\"; d=\"scan'208\";a=\"53647833\"",
        "From": "Abhinandan Gujjar <abhinandan.gujjar@intel.com>",
        "To": "jerin.jacob@caviumnetworks.com, hemant.agrawal@nxp.com,\n\takhil.goyal@nxp.com, dev@dpdk.org",
        "Cc": "narender.vangati@intel.com, abhinandan.gujjar@intel.com,\n\tnikhil.rao@intel.com, gage.eads@intel.com",
        "Date": "Wed,  9 May 2018 13:47:59 +0530",
        "Message-Id": "<1525853881-140647-4-git-send-email-abhinandan.gujjar@intel.com>",
        "X-Mailer": "git-send-email 1.9.1",
        "In-Reply-To": "<1525853881-140647-1-git-send-email-abhinandan.gujjar@intel.com>",
        "References": "<1525853881-140647-1-git-send-email-abhinandan.gujjar@intel.com>",
        "Subject": "[dpdk-dev] [v5,3/5] eventdev: add crypto adapter implementation",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch adds common code for the crypto adapter to support\nSW and HW based transfer mechanisms. The adapter uses an EAL\nservice core function for SW based packet transfer and uses\nthe eventdev PMD functions to configure HW based packet\ntransfer between the crypto device and the event device.\nThis patch also adds adapter to the meson build system &\nupdates the necessary makefile & map file.\n\nSigned-off-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>\nSigned-off-by: Nikhil Rao <nikhil.rao@intel.com>\nSigned-off-by: Gage Eads <gage.eads@intel.com>\nAcked-by: Akhil Goyal <akhil.goyal@nxp.com>\n---\n config/common_base                             |    1 +\n config/rte_config.h                            |    1 +\n lib/Makefile                                   |    3 +-\n lib/librte_eventdev/Makefile                   |    3 +\n lib/librte_eventdev/meson.build                |    8 +-\n lib/librte_eventdev/rte_event_crypto_adapter.c | 1128 ++++++++++++++++++++++++\n lib/librte_eventdev/rte_eventdev_version.map   |   13 +-\n 7 files changed, 1152 insertions(+), 5 deletions(-)\n create mode 100644 lib/librte_eventdev/rte_event_crypto_adapter.c",
    "diff": "diff --git a/config/common_base b/config/common_base\nindex 03a8688..8420d29 100644\n--- a/config/common_base\n+++ b/config/common_base\n@@ -577,6 +577,7 @@ CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n\n CONFIG_RTE_EVENT_MAX_DEVS=16\n CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64\n CONFIG_RTE_EVENT_TIMER_ADAPTER_NUM_MAX=32\n+CONFIG_RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE=32\n \n #\n # Compile PMD for skeleton event device\ndiff --git a/config/rte_config.h b/config/rte_config.h\nindex 6d2fa79..04f6377 100644\n--- a/config/rte_config.h\n+++ b/config/rte_config.h\n@@ -61,6 +61,7 @@\n #define RTE_EVENT_MAX_DEVS 16\n #define RTE_EVENT_MAX_QUEUES_PER_DEV 64\n #define RTE_EVENT_TIMER_ADAPTER_NUM_MAX 32\n+#define RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE 32\n \n /* rawdev defines */\n #define RTE_RAWDEV_MAX_DEVS 10\ndiff --git a/lib/Makefile b/lib/Makefile\nindex 057bf78..02db127 100644\n--- a/lib/Makefile\n+++ b/lib/Makefile\n@@ -32,7 +32,8 @@ DEPDIRS-librte_security := librte_eal librte_mempool librte_ring librte_mbuf\n DEPDIRS-librte_security += librte_ethdev\n DEPDIRS-librte_security += librte_cryptodev\n DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += librte_eventdev\n-DEPDIRS-librte_eventdev := librte_eal librte_ring librte_ethdev librte_hash librte_mempool librte_timer\n+DEPDIRS-librte_eventdev := librte_eal librte_ring librte_ethdev librte_hash \\\n+                           librte_mempool librte_timer librte_cryptodev\n DIRS-$(CONFIG_RTE_LIBRTE_RAWDEV) += librte_rawdev\n DEPDIRS-librte_rawdev := librte_eal librte_ethdev\n DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += librte_vhost\ndiff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile\nindex 297df4a..804b036 100644\n--- a/lib/librte_eventdev/Makefile\n+++ b/lib/librte_eventdev/Makefile\n@@ -15,12 +15,14 @@ CFLAGS += -DALLOW_EXPERIMENTAL_API\n CFLAGS += -O3\n CFLAGS += $(WERROR_FLAGS)\n LDLIBS += -lrte_eal -lrte_ring -lrte_ethdev -lrte_hash -lrte_mempool -lrte_timer\n+LDLIBS += -lrte_cryptodev -lrte_mempool\n \n # library source files\n SRCS-y += rte_eventdev.c\n SRCS-y += rte_event_ring.c\n SRCS-y += rte_event_eth_rx_adapter.c\n SRCS-y += rte_event_timer_adapter.c\n+SRCS-y += rte_event_crypto_adapter.c\n \n # export include files\n SYMLINK-y-include += rte_eventdev.h\n@@ -31,6 +33,7 @@ SYMLINK-y-include += rte_event_ring.h\n SYMLINK-y-include += rte_event_eth_rx_adapter.h\n SYMLINK-y-include += rte_event_timer_adapter.h\n SYMLINK-y-include += rte_event_timer_adapter_pmd.h\n+SYMLINK-y-include += rte_event_crypto_adapter.h\n \n # versioning export map\n EXPORT_MAP := rte_eventdev_version.map\ndiff --git a/lib/librte_eventdev/meson.build b/lib/librte_eventdev/meson.build\nindex 232b870..df9be41 100644\n--- a/lib/librte_eventdev/meson.build\n+++ b/lib/librte_eventdev/meson.build\n@@ -6,7 +6,8 @@ allow_experimental_apis = true\n sources = files('rte_eventdev.c',\n \t\t'rte_event_ring.c',\n \t\t'rte_event_eth_rx_adapter.c',\n-\t\t'rte_event_timer_adapter.c')\n+\t\t'rte_event_timer_adapter.c',\n+\t\t'rte_event_crypto_adapter.c')\n headers = files('rte_eventdev.h',\n \t\t'rte_eventdev_pmd.h',\n \t\t'rte_eventdev_pmd_pci.h',\n@@ -14,5 +15,6 @@ headers = files('rte_eventdev.h',\n \t\t'rte_event_ring.h',\n \t\t'rte_event_eth_rx_adapter.h',\n \t\t'rte_event_timer_adapter.h',\n-\t\t'rte_event_timer_adapter_pmd.h')\n-deps += ['ring', 'ethdev', 'hash', 'mempool', 'timer']\n+\t\t'rte_event_timer_adapter_pmd.h',\n+\t\t'rte_event_crypto_adapter.h')\n+deps += ['ring', 'ethdev', 'hash', 'mempool', 'timer', 'cryptodev']\ndiff --git a/lib/librte_eventdev/rte_event_crypto_adapter.c b/lib/librte_eventdev/rte_event_crypto_adapter.c\nnew file mode 100644\nindex 0000000..831f842\n--- /dev/null\n+++ b/lib/librte_eventdev/rte_event_crypto_adapter.c\n@@ -0,0 +1,1128 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 Intel Corporation.\n+ * All rights reserved.\n+ */\n+\n+#include <string.h>\n+#include <stdbool.h>\n+#include <rte_common.h>\n+#include <rte_dev.h>\n+#include <rte_errno.h>\n+#include <rte_cryptodev.h>\n+#include <rte_cryptodev_pmd.h>\n+#include <rte_log.h>\n+#include <rte_malloc.h>\n+#include <rte_service_component.h>\n+\n+#include \"rte_eventdev.h\"\n+#include \"rte_eventdev_pmd.h\"\n+#include \"rte_event_crypto_adapter.h\"\n+\n+#define BATCH_SIZE 32\n+#define DEFAULT_MAX_NB 128\n+#define CRYPTO_ADAPTER_NAME_LEN 32\n+#define CRYPTO_ADAPTER_MEM_NAME_LEN 32\n+#define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100\n+\n+/* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD\n+ * iterations of eca_crypto_adapter_enq_run()\n+ */\n+#define CRYPTO_ENQ_FLUSH_THRESHOLD 1024\n+\n+struct rte_event_crypto_adapter {\n+\t/* Event device identifier */\n+\tuint8_t eventdev_id;\n+\t/* Event port identifier */\n+\tuint8_t event_port_id;\n+\t/* Store event device's implicit release capability */\n+\tuint8_t implicit_release_disabled;\n+\t/* Max crypto ops processed in any service function invocation */\n+\tuint32_t max_nb;\n+\t/* Lock to serialize config updates with service function */\n+\trte_spinlock_t lock;\n+\t/* Next crypto device to be processed */\n+\tuint16_t next_cdev_id;\n+\t/* Per crypto device structure */\n+\tstruct crypto_device_info *cdevs;\n+\t/* Loop counter to flush crypto ops */\n+\tuint16_t transmit_loop_count;\n+\t/* Per instance stats structure */\n+\tstruct rte_event_crypto_adapter_stats crypto_stats;\n+\t/* Configuration callback for rte_service configuration */\n+\trte_event_crypto_adapter_conf_cb conf_cb;\n+\t/* Configuration callback argument */\n+\tvoid *conf_arg;\n+\t/* Set if  default_cb is being used */\n+\tint default_cb_arg;\n+\t/* Service initialization state */\n+\tuint8_t service_inited;\n+\t/* Memory allocation name */\n+\tchar mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];\n+\t/* Socket identifier cached from eventdev */\n+\tint socket_id;\n+\t/* Per adapter EAL service */\n+\tuint32_t service_id;\n+\t/* No. of queue pairs configured */\n+\tuint16_t nb_qps;\n+\t/* Adapter mode */\n+\tenum rte_event_crypto_adapter_mode mode;\n+} __rte_cache_aligned;\n+\n+/* Per crypto device information */\n+struct crypto_device_info {\n+\t/* Pointer to cryptodev */\n+\tstruct rte_cryptodev *dev;\n+\t/* Pointer to queue pair info */\n+\tstruct crypto_queue_pair_info *qpairs;\n+\t/* Next queue pair to be processed */\n+\tuint16_t next_queue_pair_id;\n+\t/* Set to indicate cryptodev->eventdev packet\n+\t * transfer uses a hardware mechanism\n+\t */\n+\tuint8_t internal_event_port;\n+\t/* Set to indicate processing has been started */\n+\tuint8_t dev_started;\n+\t/* If num_qpairs > 0, the start callback will\n+\t * be invoked if not already invoked\n+\t */\n+\tuint16_t num_qpairs;\n+} __rte_cache_aligned;\n+\n+/* Per queue pair information */\n+struct crypto_queue_pair_info {\n+\t/* Set to indicate queue pair is enabled */\n+\tbool qp_enabled;\n+\t/* Pointer to hold rte_crypto_ops for batching */\n+\tstruct rte_crypto_op **op_buffer;\n+\t/* No of crypto ops accumulated */\n+\tuint8_t len;\n+} __rte_cache_aligned;\n+\n+static struct rte_event_crypto_adapter **event_crypto_adapter;\n+\n+/* Macros to check for valid adapter */\n+#define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \\\n+\tif (!eca_valid_id(id)) { \\\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid crypto adapter id = %d\\n\", id); \\\n+\t\treturn retval; \\\n+\t} \\\n+} while (0)\n+\n+static inline int\n+eca_valid_id(uint8_t id)\n+{\n+\treturn id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;\n+}\n+\n+static int\n+eca_init(void)\n+{\n+\tconst char *name = \"crypto_adapter_array\";\n+\tconst struct rte_memzone *mz;\n+\tunsigned int sz;\n+\n+\tsz = sizeof(*event_crypto_adapter) *\n+\t    RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;\n+\tsz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);\n+\n+\tmz = rte_memzone_lookup(name);\n+\tif (mz == NULL) {\n+\t\tmz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,\n+\t\t\t\t\t\t RTE_CACHE_LINE_SIZE);\n+\t\tif (mz == NULL) {\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to reserve memzone err = %\"\n+\t\t\t\t\tPRId32, rte_errno);\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n+\n+\tevent_crypto_adapter = mz->addr;\n+\treturn 0;\n+}\n+\n+static inline struct rte_event_crypto_adapter *\n+eca_id_to_adapter(uint8_t id)\n+{\n+\treturn event_crypto_adapter ?\n+\t\tevent_crypto_adapter[id] : NULL;\n+}\n+\n+static int\n+eca_default_config_cb(uint8_t id, uint8_t dev_id,\n+\t\t\tstruct rte_event_crypto_adapter_conf *conf, void *arg)\n+{\n+\tstruct rte_event_dev_config dev_conf;\n+\tstruct rte_eventdev *dev;\n+\tuint8_t port_id;\n+\tint started;\n+\tint ret;\n+\tstruct rte_event_port_conf *port_conf = arg;\n+\tstruct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);\n+\n+\tdev = &rte_eventdevs[adapter->eventdev_id];\n+\tdev_conf = dev->data->dev_conf;\n+\n+\tstarted = dev->data->dev_started;\n+\tif (started)\n+\t\trte_event_dev_stop(dev_id);\n+\tport_id = dev_conf.nb_event_ports;\n+\tdev_conf.nb_event_ports += 1;\n+\tret = rte_event_dev_configure(dev_id, &dev_conf);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to configure event dev %u\\n\", dev_id);\n+\t\tif (started) {\n+\t\t\tif (rte_event_dev_start(dev_id))\n+\t\t\t\treturn -EIO;\n+\t\t}\n+\t\treturn ret;\n+\t}\n+\n+\tret = rte_event_port_setup(dev_id, port_id, port_conf);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to setup event port %u\\n\", port_id);\n+\t\treturn ret;\n+\t}\n+\n+\tconf->event_port_id = port_id;\n+\tconf->max_nb = DEFAULT_MAX_NB;\n+\tif (started)\n+\t\tret = rte_event_dev_start(dev_id);\n+\n+\tadapter->default_cb_arg = 1;\n+\treturn ret;\n+}\n+\n+int __rte_experimental\n+rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,\n+\t\t\t\trte_event_crypto_adapter_conf_cb conf_cb,\n+\t\t\t\tenum rte_event_crypto_adapter_mode mode,\n+\t\t\t\tvoid *conf_arg)\n+{\n+\tstruct rte_event_crypto_adapter *adapter;\n+\tchar mem_name[CRYPTO_ADAPTER_NAME_LEN];\n+\tstruct rte_event_dev_info dev_info;\n+\tint socket_id;\n+\tuint8_t i;\n+\tint ret;\n+\n+\tEVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tif (conf_cb == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (event_crypto_adapter == NULL) {\n+\t\tret = eca_init();\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\tadapter = eca_id_to_adapter(id);\n+\tif (adapter != NULL) {\n+\t\tRTE_EDEV_LOG_ERR(\"Crypto adapter id %u already exists!\", id);\n+\t\treturn -EEXIST;\n+\t}\n+\n+\tsocket_id = rte_event_dev_socket_id(dev_id);\n+\tsnprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,\n+\t\t \"rte_event_crypto_adapter_%d\", id);\n+\n+\tadapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),\n+\t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n+\tif (adapter == NULL) {\n+\t\tRTE_EDEV_LOG_ERR(\"Failed to get mem for event crypto adapter!\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tret = rte_event_dev_info_get(dev_id, &dev_info);\n+\tif (ret < 0) {\n+\t\tRTE_EDEV_LOG_ERR(\"Failed to get info for eventdev %d: %s!\",\n+\t\t\t\t dev_id, dev_info.driver_name);\n+\t\treturn ret;\n+\t}\n+\n+\tadapter->implicit_release_disabled = (dev_info.event_dev_cap &\n+\t\t\tRTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);\n+\tadapter->eventdev_id = dev_id;\n+\tadapter->socket_id = socket_id;\n+\tadapter->conf_cb = conf_cb;\n+\tadapter->conf_arg = conf_arg;\n+\tadapter->mode = mode;\n+\tstrcpy(adapter->mem_name, mem_name);\n+\tadapter->cdevs = rte_zmalloc_socket(adapter->mem_name,\n+\t\t\t\t\trte_cryptodev_count() *\n+\t\t\t\t\tsizeof(struct crypto_device_info), 0,\n+\t\t\t\t\tsocket_id);\n+\tif (adapter->cdevs == NULL) {\n+\t\tRTE_EDEV_LOG_ERR(\"Failed to get mem for crypto devices\\n\");\n+\t\trte_free(adapter);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trte_spinlock_init(&adapter->lock);\n+\tfor (i = 0; i < rte_cryptodev_count(); i++)\n+\t\tadapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);\n+\n+\tevent_crypto_adapter[id] = adapter;\n+\n+\treturn 0;\n+}\n+\n+\n+int __rte_experimental\n+rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,\n+\t\t\t\tstruct rte_event_port_conf *port_config,\n+\t\t\t\tenum rte_event_crypto_adapter_mode mode)\n+{\n+\tstruct rte_event_port_conf *pc;\n+\tint ret;\n+\n+\tif (port_config == NULL)\n+\t\treturn -EINVAL;\n+\tEVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\tpc = rte_malloc(NULL, sizeof(*pc), 0);\n+\tif (pc == NULL)\n+\t\treturn -ENOMEM;\n+\t*pc = *port_config;\n+\tret = rte_event_crypto_adapter_create_ext(id, dev_id,\n+\t\t\t\t\t\t  eca_default_config_cb,\n+\t\t\t\t\t\t  mode,\n+\t\t\t\t\t\t  pc);\n+\tif (ret)\n+\t\trte_free(pc);\n+\n+\treturn ret;\n+}\n+\n+int __rte_experimental\n+rte_event_crypto_adapter_free(uint8_t id)\n+{\n+\tstruct rte_event_crypto_adapter *adapter;\n+\n+\tEVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\tadapter = eca_id_to_adapter(id);\n+\tif (adapter == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (adapter->nb_qps) {\n+\t\tRTE_EDEV_LOG_ERR(\"%\" PRIu16 \"Queue pairs not deleted\",\n+\t\t\t\tadapter->nb_qps);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tif (adapter->default_cb_arg)\n+\t\trte_free(adapter->conf_arg);\n+\trte_free(adapter->cdevs);\n+\trte_free(adapter);\n+\tevent_crypto_adapter[id] = NULL;\n+\n+\treturn 0;\n+}\n+\n+static inline unsigned int\n+eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,\n+\t\t struct rte_event *ev, unsigned int cnt)\n+{\n+\tstruct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;\n+\tunion rte_event_crypto_metadata *m_data = NULL;\n+\tstruct crypto_queue_pair_info *qp_info = NULL;\n+\tstruct rte_crypto_op *crypto_op;\n+\tunsigned int i, n;\n+\tuint16_t qp_id, len, ret;\n+\tuint8_t cdev_id;\n+\n+\tlen = 0;\n+\tret = 0;\n+\tn = 0;\n+\tstats->event_deq_count += cnt;\n+\n+\tfor (i = 0; i < cnt; i++) {\n+\t\tcrypto_op = ev[i].event_ptr;\n+\t\tif (crypto_op == NULL)\n+\t\t\tcontinue;\n+\t\tif (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {\n+\t\t\tm_data = rte_cryptodev_sym_session_get_private_data(\n+\t\t\t\t\tcrypto_op->sym->session);\n+\t\t\tif (m_data == NULL) {\n+\t\t\t\trte_pktmbuf_free(crypto_op->sym->m_src);\n+\t\t\t\trte_crypto_op_free(crypto_op);\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\n+\t\t\tcdev_id = m_data->request_info.cdev_id;\n+\t\t\tqp_id = m_data->request_info.queue_pair_id;\n+\t\t\tqp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];\n+\t\t\tif (qp_info == NULL) {\n+\t\t\t\trte_pktmbuf_free(crypto_op->sym->m_src);\n+\t\t\t\trte_crypto_op_free(crypto_op);\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\tlen = qp_info->len;\n+\t\t\tqp_info->op_buffer[len] = crypto_op;\n+\t\t\tlen++;\n+\t\t} else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&\n+\t\t\t\tcrypto_op->private_data_offset) {\n+\t\t\tm_data = (union rte_event_crypto_metadata *)\n+\t\t\t\t ((uint8_t *)crypto_op +\n+\t\t\t\t\tcrypto_op->private_data_offset);\n+\t\t\tcdev_id = m_data->request_info.cdev_id;\n+\t\t\tqp_id = m_data->request_info.queue_pair_id;\n+\t\t\tqp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];\n+\t\t\tif (qp_info == NULL) {\n+\t\t\t\trte_pktmbuf_free(crypto_op->sym->m_src);\n+\t\t\t\trte_crypto_op_free(crypto_op);\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\tlen = qp_info->len;\n+\t\t\tqp_info->op_buffer[len] = crypto_op;\n+\t\t\tlen++;\n+\t\t} else {\n+\t\t\trte_pktmbuf_free(crypto_op->sym->m_src);\n+\t\t\trte_crypto_op_free(crypto_op);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tif (len == BATCH_SIZE) {\n+\t\t\tstruct rte_crypto_op **op_buffer = qp_info->op_buffer;\n+\t\t\tret = rte_cryptodev_enqueue_burst(cdev_id,\n+\t\t\t\t\t\t\t  qp_id,\n+\t\t\t\t\t\t\t  op_buffer,\n+\t\t\t\t\t\t\t  BATCH_SIZE);\n+\n+\t\t\tstats->crypto_enq_count += ret;\n+\n+\t\t\twhile (ret < len) {\n+\t\t\t\tstruct rte_crypto_op *op;\n+\t\t\t\top = op_buffer[ret++];\n+\t\t\t\tstats->crypto_enq_fail++;\n+\t\t\t\trte_pktmbuf_free(op->sym->m_src);\n+\t\t\t\trte_crypto_op_free(op);\n+\t\t\t}\n+\n+\t\t\tlen = 0;\n+\t\t}\n+\n+\t\tif (qp_info)\n+\t\t\tqp_info->len = len;\n+\t\tn += ret;\n+\t}\n+\n+\treturn n;\n+}\n+\n+static unsigned int\n+eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)\n+{\n+\tstruct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;\n+\tstruct crypto_device_info *curr_dev;\n+\tstruct crypto_queue_pair_info *curr_queue;\n+\tstruct rte_crypto_op **op_buffer;\n+\tstruct rte_cryptodev *dev;\n+\tuint8_t cdev_id;\n+\tuint16_t qp;\n+\tuint16_t ret;\n+\tuint16_t num_cdev = rte_cryptodev_count();\n+\n+\tret = 0;\n+\tfor (cdev_id = 0; cdev_id < num_cdev; cdev_id++) {\n+\t\tcurr_dev = &adapter->cdevs[cdev_id];\n+\t\tif (curr_dev == NULL)\n+\t\t\tcontinue;\n+\t\tdev = curr_dev->dev;\n+\n+\t\tfor (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {\n+\n+\t\t\tcurr_queue = &curr_dev->qpairs[qp];\n+\t\t\tif (!curr_queue->qp_enabled)\n+\t\t\t\tcontinue;\n+\n+\t\t\top_buffer = curr_queue->op_buffer;\n+\t\t\tret = rte_cryptodev_enqueue_burst(cdev_id,\n+\t\t\t\t\t\t\t  qp,\n+\t\t\t\t\t\t\t  op_buffer,\n+\t\t\t\t\t\t\t  curr_queue->len);\n+\t\t\tstats->crypto_enq_count += ret;\n+\n+\t\t\twhile (ret < curr_queue->len) {\n+\t\t\t\tstruct rte_crypto_op *op;\n+\t\t\t\top = op_buffer[ret++];\n+\t\t\t\tstats->crypto_enq_fail++;\n+\t\t\t\trte_pktmbuf_free(op->sym->m_src);\n+\t\t\t\trte_crypto_op_free(op);\n+\t\t\t}\n+\t\t\tcurr_queue->len = 0;\n+\t\t}\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int\n+eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,\n+\t\t\tunsigned int max_enq)\n+{\n+\tstruct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;\n+\tstruct rte_event ev[BATCH_SIZE];\n+\tunsigned int nb_enq, nb_enqueued;\n+\tuint16_t n;\n+\tuint8_t event_dev_id = adapter->eventdev_id;\n+\tuint8_t event_port_id = adapter->event_port_id;\n+\n+\tnb_enqueued = 0;\n+\tif (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)\n+\t\treturn 0;\n+\n+\tfor (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {\n+\t\tstats->event_poll_count++;\n+\t\tn = rte_event_dequeue_burst(event_dev_id,\n+\t\t\t\t\t    event_port_id, ev, BATCH_SIZE, 0);\n+\n+\t\tif (!n)\n+\t\t\tbreak;\n+\n+\t\tnb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);\n+\t}\n+\n+\tif ((++adapter->transmit_loop_count &\n+\t\t(CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {\n+\t\tnb_enqueued += eca_crypto_enq_flush(adapter);\n+\t}\n+\n+\treturn nb_enqueued;\n+}\n+\n+static inline void\n+eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,\n+\t\t  struct rte_crypto_op **ops, uint16_t num)\n+{\n+\tstruct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;\n+\tunion rte_event_crypto_metadata *m_data = NULL;\n+\tuint8_t event_dev_id = adapter->eventdev_id;\n+\tuint8_t event_port_id = adapter->event_port_id;\n+\tstruct rte_event events[BATCH_SIZE];\n+\tuint16_t nb_enqueued, nb_ev;\n+\tuint8_t retry;\n+\tuint8_t i;\n+\n+\tnb_ev = 0;\n+\tretry = 0;\n+\tnb_enqueued = 0;\n+\tnum = RTE_MIN(num, BATCH_SIZE);\n+\tfor (i = 0; i < num; i++) {\n+\t\tstruct rte_event *ev = &events[nb_ev++];\n+\t\tif (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {\n+\t\t\tm_data = rte_cryptodev_sym_session_get_private_data(\n+\t\t\t\t\tops[i]->sym->session);\n+\t\t} else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&\n+\t\t\t\tops[i]->private_data_offset) {\n+\t\t\tm_data = (union rte_event_crypto_metadata *)\n+\t\t\t\t ((uint8_t *)ops[i] +\n+\t\t\t\t  ops[i]->private_data_offset);\n+\t\t}\n+\n+\t\tif (unlikely(m_data == NULL)) {\n+\t\t\trte_pktmbuf_free(ops[i]->sym->m_src);\n+\t\t\trte_crypto_op_free(ops[i]);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\trte_memcpy(ev, &m_data->response_info, sizeof(ev));\n+\t\tev->event_ptr = ops[i];\n+\t\tev->event_type = RTE_EVENT_TYPE_CRYPTODEV;\n+\t\tif (adapter->implicit_release_disabled)\n+\t\t\tev->op = RTE_EVENT_OP_FORWARD;\n+\t\telse\n+\t\t\tev->op = RTE_EVENT_OP_NEW;\n+\t}\n+\n+\tdo {\n+\t\tnb_enqueued += rte_event_enqueue_burst(event_dev_id,\n+\t\t\t\t\t\t  event_port_id,\n+\t\t\t\t\t\t  &events[nb_enqueued],\n+\t\t\t\t\t\t  nb_ev - nb_enqueued);\n+\t} while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&\n+\t\t nb_enqueued < nb_ev);\n+\n+\t/* Free mbufs and rte_crypto_ops for failed events */\n+\tfor (i = nb_enqueued; i < nb_ev; i++) {\n+\t\tstruct rte_crypto_op *op = events[i].event_ptr;\n+\t\trte_pktmbuf_free(op->sym->m_src);\n+\t\trte_crypto_op_free(op);\n+\t}\n+\n+\tstats->event_enq_fail_count += nb_ev - nb_enqueued;\n+\tstats->event_enq_count += nb_enqueued;\n+\tstats->event_enq_retry_count += retry - 1;\n+}\n+\n+static inline unsigned int\n+eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,\n+\t\t\tunsigned int max_deq)\n+{\n+\tstruct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;\n+\tstruct crypto_device_info *curr_dev;\n+\tstruct crypto_queue_pair_info *curr_queue;\n+\tstruct rte_crypto_op *ops[BATCH_SIZE];\n+\tuint16_t n, nb_deq;\n+\tstruct rte_cryptodev *dev;\n+\tuint8_t cdev_id;\n+\tuint16_t qp, dev_qps;\n+\tbool done;\n+\tuint16_t num_cdev = rte_cryptodev_count();\n+\n+\tnb_deq = 0;\n+\tdo {\n+\t\tuint16_t queues = 0;\n+\t\tdone = true;\n+\n+\t\tfor (cdev_id = adapter->next_cdev_id;\n+\t\t\tcdev_id < num_cdev; cdev_id++) {\n+\t\t\tcurr_dev = &adapter->cdevs[cdev_id];\n+\t\t\tif (curr_dev == NULL)\n+\t\t\t\tcontinue;\n+\t\t\tdev = curr_dev->dev;\n+\t\t\tdev_qps = dev->data->nb_queue_pairs;\n+\n+\t\t\tfor (qp = curr_dev->next_queue_pair_id;\n+\t\t\t\tqueues < dev_qps; qp = (qp + 1) % dev_qps,\n+\t\t\t\tqueues++) {\n+\n+\t\t\t\tcurr_queue = &curr_dev->qpairs[qp];\n+\t\t\t\tif (!curr_queue->qp_enabled)\n+\t\t\t\t\tcontinue;\n+\n+\t\t\t\tn = rte_cryptodev_dequeue_burst(cdev_id, qp,\n+\t\t\t\t\tops, BATCH_SIZE);\n+\t\t\t\tif (!n)\n+\t\t\t\t\tcontinue;\n+\n+\t\t\t\tdone = false;\n+\t\t\t\tstats->crypto_deq_count += n;\n+\t\t\t\teca_ops_enqueue_burst(adapter, ops, n);\n+\t\t\t\tnb_deq += n;\n+\n+\t\t\t\tif (nb_deq > max_deq) {\n+\t\t\t\t\tif ((qp + 1) == dev_qps) {\n+\t\t\t\t\t\tadapter->next_cdev_id =\n+\t\t\t\t\t\t\t(cdev_id + 1)\n+\t\t\t\t\t\t\t% num_cdev;\n+\t\t\t\t\t}\n+\t\t\t\t\tcurr_dev->next_queue_pair_id = (qp + 1)\n+\t\t\t\t\t\t% dev->data->nb_queue_pairs;\n+\n+\t\t\t\t\treturn nb_deq;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\t} while (done == false);\n+\treturn nb_deq;\n+}\n+\n+static void\n+eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,\n+\t\t\tunsigned int max_ops)\n+{\n+\twhile (max_ops) {\n+\t\tunsigned int e_cnt, d_cnt;\n+\n+\t\te_cnt = eca_crypto_adapter_deq_run(adapter, max_ops);\n+\t\tmax_ops -= RTE_MIN(max_ops, e_cnt);\n+\n+\t\td_cnt = eca_crypto_adapter_enq_run(adapter, max_ops);\n+\t\tmax_ops -= RTE_MIN(max_ops, d_cnt);\n+\n+\t\tif (e_cnt == 0 && d_cnt == 0)\n+\t\t\tbreak;\n+\n+\t}\n+}\n+\n+static int\n+eca_service_func(void *args)\n+{\n+\tstruct rte_event_crypto_adapter *adapter = args;\n+\n+\tif (rte_spinlock_trylock(&adapter->lock) == 0)\n+\t\treturn 0;\n+\teca_crypto_adapter_run(adapter, adapter->max_nb);\n+\trte_spinlock_unlock(&adapter->lock);\n+\n+\treturn 0;\n+}\n+\n+static int\n+eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)\n+{\n+\tstruct rte_event_crypto_adapter_conf adapter_conf;\n+\tstruct rte_service_spec service;\n+\tint ret;\n+\n+\tif (adapter->service_inited)\n+\t\treturn 0;\n+\n+\tmemset(&service, 0, sizeof(service));\n+\tsnprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,\n+\t\t\"rte_event_crypto_adapter_%d\", id);\n+\tservice.socket_id = adapter->socket_id;\n+\tservice.callback = eca_service_func;\n+\tservice.callback_userdata = adapter;\n+\t/* Service function handles locking for queue add/del updates */\n+\tservice.capabilities = RTE_SERVICE_CAP_MT_SAFE;\n+\tret = rte_service_component_register(&service, &adapter->service_id);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to register service %s err = %\" PRId32,\n+\t\t\tservice.name, ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = adapter->conf_cb(id, adapter->eventdev_id,\n+\t\t&adapter_conf, adapter->conf_arg);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"configuration callback failed err = %\" PRId32,\n+\t\t\tret);\n+\t\treturn ret;\n+\t}\n+\n+\tadapter->max_nb = adapter_conf.max_nb;\n+\tadapter->event_port_id = adapter_conf.event_port_id;\n+\tadapter->service_inited = 1;\n+\n+\treturn ret;\n+}\n+\n+static void\n+eca_update_qp_info(struct rte_event_crypto_adapter *adapter,\n+\t\t\tstruct crypto_device_info *dev_info,\n+\t\t\tint32_t queue_pair_id,\n+\t\t\tuint8_t add)\n+{\n+\tstruct crypto_queue_pair_info *qp_info;\n+\tint enabled;\n+\tuint16_t i;\n+\n+\tif (dev_info->qpairs == NULL)\n+\t\treturn;\n+\n+\tif (queue_pair_id == -1) {\n+\t\tfor (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)\n+\t\t\teca_update_qp_info(adapter, dev_info, i, add);\n+\t} else {\n+\t\tqp_info = &dev_info->qpairs[queue_pair_id];\n+\t\tenabled = qp_info->qp_enabled;\n+\t\tif (add) {\n+\t\t\tadapter->nb_qps += !enabled;\n+\t\t\tdev_info->num_qpairs += !enabled;\n+\t\t} else {\n+\t\t\tadapter->nb_qps -= enabled;\n+\t\t\tdev_info->num_qpairs -= enabled;\n+\t\t}\n+\t\tqp_info->qp_enabled = !!add;\n+\t}\n+}\n+\n+static int\n+eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,\n+\t\tuint8_t cdev_id,\n+\t\tint queue_pair_id)\n+{\n+\tstruct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];\n+\tstruct crypto_queue_pair_info *qpairs;\n+\tuint32_t i;\n+\n+\tif (dev_info->qpairs == NULL) {\n+\t\tdev_info->qpairs =\n+\t\t    rte_zmalloc_socket(adapter->mem_name,\n+\t\t\t\t\tdev_info->dev->data->nb_queue_pairs *\n+\t\t\t\t\tsizeof(struct crypto_queue_pair_info),\n+\t\t\t\t\t0, adapter->socket_id);\n+\t\tif (dev_info->qpairs == NULL)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tqpairs = dev_info->qpairs;\n+\t\tqpairs->op_buffer = rte_zmalloc_socket(adapter->mem_name,\n+\t\t\t\t\tBATCH_SIZE *\n+\t\t\t\t\tsizeof(struct rte_crypto_op *),\n+\t\t\t\t\t0, adapter->socket_id);\n+\t\tif (!qpairs->op_buffer) {\n+\t\t\trte_free(qpairs);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t}\n+\n+\tif (queue_pair_id == -1) {\n+\t\tfor (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)\n+\t\t\teca_update_qp_info(adapter, dev_info, i, 1);\n+\t} else\n+\t\teca_update_qp_info(adapter, dev_info,\n+\t\t\t\t\t(uint16_t)queue_pair_id, 1);\n+\n+\treturn 0;\n+}\n+\n+int __rte_experimental\n+rte_event_crypto_adapter_queue_pair_add(uint8_t id,\n+\t\t\tuint8_t cdev_id,\n+\t\t\tint32_t queue_pair_id,\n+\t\t\tconst struct rte_event *event)\n+{\n+\tstruct rte_event_crypto_adapter *adapter;\n+\tstruct rte_eventdev *dev;\n+\tstruct crypto_device_info *dev_info;\n+\tuint32_t cap;\n+\tint ret;\n+\n+\tEVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\tif (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid dev_id=%\" PRIu8, cdev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tadapter = eca_id_to_adapter(id);\n+\tif (adapter == NULL)\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_eventdevs[adapter->eventdev_id];\n+\tret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,\n+\t\t\t\t\t\tcdev_id,\n+\t\t\t\t\t\t&cap);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"Failed to get adapter caps dev %\" PRIu8\n+\t\t\t\" cdev %\" PRIu8, id, cdev_id);\n+\t\treturn ret;\n+\t}\n+\n+\tif ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&\n+\t    (event == NULL)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Conf value can not be NULL for dev_id=%u\",\n+\t\t\t\t  cdev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tdev_info = &adapter->cdevs[cdev_id];\n+\n+\tif (queue_pair_id != -1 &&\n+\t    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid queue_pair_id %\" PRIu16,\n+\t\t\t\t (uint16_t)queue_pair_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,\n+\t * no need of service core as HW supports event forward capability.\n+\t */\n+\tif ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||\n+\t    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&\n+\t     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||\n+\t    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&\n+\t     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(\n+\t\t\t*dev->dev_ops->crypto_adapter_queue_pair_add,\n+\t\t\t-ENOTSUP);\n+\t\tif (dev_info->qpairs == NULL) {\n+\t\t\tdev_info->qpairs =\n+\t\t\t    rte_zmalloc_socket(adapter->mem_name,\n+\t\t\t\t\tdev_info->dev->data->nb_queue_pairs *\n+\t\t\t\t\tsizeof(struct crypto_queue_pair_info),\n+\t\t\t\t\t0, adapter->socket_id);\n+\t\t\tif (dev_info->qpairs == NULL)\n+\t\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\tret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,\n+\t\t\t\tdev_info->dev,\n+\t\t\t\tqueue_pair_id,\n+\t\t\t\tevent);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\n+\t\telse\n+\t\t\teca_update_qp_info(adapter, &adapter->cdevs[cdev_id],\n+\t\t\t\t\t   queue_pair_id, 1);\n+\t}\n+\n+\t/* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,\n+\t * or SW adapter, initiate services so the application can choose\n+\t * which ever way it wants to use the adapter.\n+\t * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW\n+\t *         Application may wants to use one of below two mode\n+\t *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue\n+\t *          b. OP_NEW mode -> HW Dequeue\n+\t * Case 2: No HW caps, use SW adapter\n+\t *          a. OP_FORWARD mode -> SW enqueue & dequeue\n+\t *          b. OP_NEW mode -> SW Dequeue\n+\t */\n+\tif ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&\n+\t     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||\n+\t     (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&\n+\t      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&\n+\t      !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&\n+\t       (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {\n+\t\trte_spinlock_lock(&adapter->lock);\n+\t\tret = eca_init_service(adapter, id);\n+\t\tif (ret == 0)\n+\t\t\tret = eca_add_queue_pair(adapter, cdev_id,\n+\t\t\t\t\t\t queue_pair_id);\n+\t\trte_spinlock_unlock(&adapter->lock);\n+\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\n+\t\trte_service_component_runstate_set(adapter->service_id, 1);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int __rte_experimental\n+rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,\n+\t\t\t\t\tint32_t queue_pair_id)\n+{\n+\tstruct rte_event_crypto_adapter *adapter;\n+\tstruct crypto_device_info *dev_info;\n+\tstruct rte_eventdev *dev;\n+\tint ret;\n+\tuint32_t cap;\n+\tuint16_t i;\n+\n+\tEVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\tif (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid dev_id=%\" PRIu8, cdev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tadapter = eca_id_to_adapter(id);\n+\tif (adapter == NULL)\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_eventdevs[adapter->eventdev_id];\n+\tret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,\n+\t\t\t\t\t\tcdev_id,\n+\t\t\t\t\t\t&cap);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tdev_info = &adapter->cdevs[cdev_id];\n+\n+\tif (queue_pair_id != -1 &&\n+\t    (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid queue_pair_id %\" PRIu16,\n+\t\t\t\t (uint16_t)queue_pair_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||\n+\t    (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&\n+\t     adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(\n+\t\t\t*dev->dev_ops->crypto_adapter_queue_pair_del,\n+\t\t\t-ENOTSUP);\n+\t\tret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,\n+\t\t\t\t\t\tdev_info->dev,\n+\t\t\t\t\t\tqueue_pair_id);\n+\t\tif (ret == 0) {\n+\t\t\teca_update_qp_info(adapter,\n+\t\t\t\t\t&adapter->cdevs[cdev_id],\n+\t\t\t\t\tqueue_pair_id,\n+\t\t\t\t\t0);\n+\t\t\tif (dev_info->num_qpairs == 0) {\n+\t\t\t\trte_free(dev_info->qpairs);\n+\t\t\t\tdev_info->qpairs = NULL;\n+\t\t\t}\n+\t\t}\n+\t} else {\n+\t\tif (adapter->nb_qps == 0)\n+\t\t\treturn 0;\n+\n+\t\trte_spinlock_lock(&adapter->lock);\n+\t\tif (queue_pair_id == -1) {\n+\t\t\tfor (i = 0; i < dev_info->dev->data->nb_queue_pairs;\n+\t\t\t\ti++)\n+\t\t\t\teca_update_qp_info(adapter, dev_info,\n+\t\t\t\t\t\t\tqueue_pair_id, 0);\n+\t\t} else {\n+\t\t\teca_update_qp_info(adapter, dev_info,\n+\t\t\t\t\t\t(uint16_t)queue_pair_id, 0);\n+\t\t}\n+\n+\t\tif (dev_info->num_qpairs == 0) {\n+\t\t\trte_free(dev_info->qpairs);\n+\t\t\tdev_info->qpairs = NULL;\n+\t\t}\n+\n+\t\trte_spinlock_unlock(&adapter->lock);\n+\t\trte_service_component_runstate_set(adapter->service_id,\n+\t\t\t\tadapter->nb_qps);\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int\n+eca_adapter_ctrl(uint8_t id, int start)\n+{\n+\tstruct rte_event_crypto_adapter *adapter;\n+\tstruct crypto_device_info *dev_info;\n+\tstruct rte_eventdev *dev;\n+\tuint32_t i;\n+\tint use_service;\n+\tint stop = !start;\n+\n+\tuse_service = 0;\n+\tEVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\tadapter = eca_id_to_adapter(id);\n+\tif (adapter == NULL)\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_eventdevs[adapter->eventdev_id];\n+\n+\tfor (i = 0; i < rte_cryptodev_count(); i++) {\n+\t\tdev_info = &adapter->cdevs[i];\n+\t\t/* if start  check for num queue pairs */\n+\t\tif (start && !dev_info->num_qpairs)\n+\t\t\tcontinue;\n+\t\t/* if stop check if dev has been started */\n+\t\tif (stop && !dev_info->dev_started)\n+\t\t\tcontinue;\n+\t\tuse_service |= !dev_info->internal_event_port;\n+\t\tdev_info->dev_started = start;\n+\t\tif (dev_info->internal_event_port == 0)\n+\t\t\tcontinue;\n+\t\tstart ? (*dev->dev_ops->crypto_adapter_start)(dev,\n+\t\t\t\t\t\t&dev_info->dev[i]) :\n+\t\t\t(*dev->dev_ops->crypto_adapter_stop)(dev,\n+\t\t\t\t\t\t&dev_info->dev[i]);\n+\t}\n+\n+\tif (use_service)\n+\t\trte_service_runstate_set(adapter->service_id, start);\n+\n+\treturn 0;\n+}\n+\n+int __rte_experimental\n+rte_event_crypto_adapter_start(uint8_t id)\n+{\n+\tstruct rte_event_crypto_adapter *adapter;\n+\n+\tEVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\tadapter = eca_id_to_adapter(id);\n+\tif (adapter == NULL)\n+\t\treturn -EINVAL;\n+\n+\treturn eca_adapter_ctrl(id, 1);\n+}\n+\n+int __rte_experimental\n+rte_event_crypto_adapter_stop(uint8_t id)\n+{\n+\treturn eca_adapter_ctrl(id, 0);\n+}\n+\n+int __rte_experimental\n+rte_event_crypto_adapter_stats_get(uint8_t id,\n+\t\t\t\tstruct rte_event_crypto_adapter_stats *stats)\n+{\n+\tstruct rte_event_crypto_adapter *adapter;\n+\tstruct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };\n+\tstruct rte_event_crypto_adapter_stats dev_stats;\n+\tstruct rte_eventdev *dev;\n+\tstruct crypto_device_info *dev_info;\n+\tuint32_t i;\n+\tint ret;\n+\n+\tEVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\tadapter = eca_id_to_adapter(id);\n+\tif (adapter == NULL || stats == NULL)\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_eventdevs[adapter->eventdev_id];\n+\tmemset(stats, 0, sizeof(*stats));\n+\tfor (i = 0; i < rte_cryptodev_count(); i++) {\n+\t\tdev_info = &adapter->cdevs[i];\n+\t\tif (dev_info->internal_event_port == 0 ||\n+\t\t\tdev->dev_ops->crypto_adapter_stats_get == NULL)\n+\t\t\tcontinue;\n+\t\tret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,\n+\t\t\t\t\t\tdev_info->dev,\n+\t\t\t\t\t\t&dev_stats);\n+\t\tif (ret)\n+\t\t\tcontinue;\n+\n+\t\tdev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;\n+\t\tdev_stats_sum.event_enq_count +=\n+\t\t\tdev_stats.event_enq_count;\n+\t}\n+\n+\tif (adapter->service_inited)\n+\t\t*stats = adapter->crypto_stats;\n+\n+\tstats->crypto_deq_count += dev_stats_sum.crypto_deq_count;\n+\tstats->event_enq_count += dev_stats_sum.event_enq_count;\n+\n+\treturn 0;\n+}\n+\n+int __rte_experimental\n+rte_event_crypto_adapter_stats_reset(uint8_t id)\n+{\n+\tstruct rte_event_crypto_adapter *adapter;\n+\tstruct crypto_device_info *dev_info;\n+\tstruct rte_eventdev *dev;\n+\tuint32_t i;\n+\n+\tEVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\tadapter = eca_id_to_adapter(id);\n+\tif (adapter == NULL)\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_eventdevs[adapter->eventdev_id];\n+\tfor (i = 0; i < rte_cryptodev_count(); i++) {\n+\t\tdev_info = &adapter->cdevs[i];\n+\t\tif (dev_info->internal_event_port == 0 ||\n+\t\t\tdev->dev_ops->crypto_adapter_stats_reset == NULL)\n+\t\t\tcontinue;\n+\t\t(*dev->dev_ops->crypto_adapter_stats_reset)(dev,\n+\t\t\t\t\t\tdev_info->dev);\n+\t}\n+\n+\tmemset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));\n+\treturn 0;\n+}\n+\n+int __rte_experimental\n+rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)\n+{\n+\tstruct rte_event_crypto_adapter *adapter;\n+\n+\tEVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\tadapter = eca_id_to_adapter(id);\n+\tif (adapter == NULL || service_id == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (adapter->service_inited)\n+\t\t*service_id = adapter->service_id;\n+\n+\treturn adapter->service_inited ? 0 : -ESRCH;\n+}\n+\n+int __rte_experimental\n+rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)\n+{\n+\tstruct rte_event_crypto_adapter *adapter;\n+\n+\tEVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\tadapter = eca_id_to_adapter(id);\n+\tif (adapter == NULL || event_port_id == NULL)\n+\t\treturn -EINVAL;\n+\n+\t*event_port_id = adapter->event_port_id;\n+\n+\treturn 0;\n+}\ndiff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map\nindex b7b04b1..35ea9d2 100644\n--- a/lib/librte_eventdev/rte_eventdev_version.map\n+++ b/lib/librte_eventdev/rte_eventdev_version.map\n@@ -97,6 +97,17 @@ EXPERIMENTAL {\n \trte_event_timer_arm_burst;\n \trte_event_timer_arm_tmo_tick_burst;\n \trte_event_timer_cancel_burst;\n-\trte_event_crypto_adapter_caps_get;\n \n+\trte_event_crypto_adapter_caps_get;\n+\trte_event_crypto_adapter_create;\n+\trte_event_crypto_adapter_create_ext;\n+\trte_event_crypto_adapter_event_port_get;\n+\trte_event_crypto_adapter_free;\n+\trte_event_crypto_adapter_queue_pair_add;\n+\trte_event_crypto_adapter_queue_pair_del;\n+\trte_event_crypto_adapter_service_id_get;\n+\trte_event_crypto_adapter_start;\n+\trte_event_crypto_adapter_stats_get;\n+\trte_event_crypto_adapter_stats_reset;\n+\trte_event_crypto_adapter_stop;\n } DPDK_18.05;\n",
    "prefixes": [
        "dpdk-dev",
        "v5",
        "3/5"
    ]
}