get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/28649/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 28649,
    "url": "https://patches.dpdk.org/api/patches/28649/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1505328781-23456-1-git-send-email-nikhil.rao@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1505328781-23456-1-git-send-email-nikhil.rao@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1505328781-23456-1-git-send-email-nikhil.rao@intel.com",
    "date": "2017-09-13T18:53:01",
    "name": "[dpdk-dev,3/4] eventdev: Add eventdev ethernet Rx adapter",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "add1e6b7afdcb4a9f98d668a06a9fedb4be562f8",
    "submitter": {
        "id": 528,
        "url": "https://patches.dpdk.org/api/people/528/?format=api",
        "name": "Rao, Nikhil",
        "email": "nikhil.rao@intel.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1505328781-23456-1-git-send-email-nikhil.rao@intel.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/28649/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/28649/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id A4FBD7D4E;\n\tWed, 13 Sep 2017 12:15:24 +0200 (CEST)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby dpdk.org (Postfix) with ESMTP id DCFAB374E\n\tfor <dev@dpdk.org>; Wed, 13 Sep 2017 12:15:22 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t13 Sep 2017 03:15:21 -0700",
            "from unknown (HELO localhost.iind.intel.com) ([10.224.122.216])\n\tby FMSMGA003.fm.intel.com with ESMTP; 13 Sep 2017 03:15:18 -0700"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.42,387,1500966000\"; d=\"scan'208\";a=\"899807526\"",
        "From": "Nikhil Rao <nikhil.rao@intel.com>",
        "To": "jerin.jacob@caviumnetworks.com,\n\tbruce.richardson@intel.com",
        "Cc": "gage.eads@intel.com, dev@dpdk.org, thomas@monjalon.net,\n\tharry.van.haaren@intel.com, hemant.agrawal@nxp.com, nipun.gupta@nxp.com, \n\tnarender.vangati@intel.com, erik.g.carrillo@intel.com,\n\tabhinandan.gujjar@intel.com",
        "Date": "Thu, 14 Sep 2017 00:23:01 +0530",
        "Message-Id": "<1505328781-23456-1-git-send-email-nikhil.rao@intel.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<04bcb240-51fb-50dc-833c-60c33a420d6f@intel.com>",
        "References": "<04bcb240-51fb-50dc-833c-60c33a420d6f@intel.com>",
        "Subject": "[dpdk-dev] [PATCH 3/4] eventdev: Add eventdev ethernet Rx adapter",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add common APIs for configuring packet transfer from ethernet Rx\nqueues to event devices across HW & SW packet transfer mechanisms.\nA detailed description of the adapter is contained in the header's\ncomments.\n\nThe adapter implementation uses eventdev PMDs to configure the packet\ntransfer if HW support is available and if not, it uses an EAL service\nfunction that reads packets from ethernet Rx queues and injects these\nas events into the event device.\n\nSigned-off-by: Nikhil Rao <nikhil.rao@intel.com>\nSigned-off-by: Gage Eads <gage.eads@intel.com>\nSigned-off-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>\n---\n lib/librte_eventdev/rte_event_eth_rx_adapter.h |  386 ++++++++\n lib/librte_eventdev/rte_event_eth_rx_adapter.c | 1239 ++++++++++++++++++++++++\n lib/Makefile                                   |    2 +-\n lib/librte_eventdev/Makefile                   |    2 +\n lib/librte_eventdev/rte_eventdev_version.map   |   11 +-\n 5 files changed, 1638 insertions(+), 2 deletions(-)\n create mode 100644 lib/librte_eventdev/rte_event_eth_rx_adapter.h\n create mode 100644 lib/librte_eventdev/rte_event_eth_rx_adapter.c",
    "diff": "diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.h b/lib/librte_eventdev/rte_event_eth_rx_adapter.h\nnew file mode 100644\nindex 0000000..678c0ae\n--- /dev/null\n+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.h\n@@ -0,0 +1,386 @@\n+/*\n+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#ifndef _RTE_EVENT_ETH_RX_ADAPTER_\n+#define _RTE_EVENT_ETH_RX_ADAPTER_\n+\n+/**\n+ * @file\n+ *\n+ * RTE Event Ethernet Rx Adapter\n+ *\n+ * An eventdev-based packet processing application enqueues/dequeues mbufs\n+ * to/from the event device. The application uses the adapter APIs to configure\n+ * the packet flow between the ethernet devices and event devices. Depending on\n+ * on the capabilties of the eventdev PMD, the adapter may use a EAL service\n+ * core function for packet transfer or use internal PMD functions to configure\n+ * the packet transfer between the ethernet device and the event device.\n+ *\n+ * The ethernet Rx event adapter's functions are:\n+ *  - rte_event_eth_rx_adapter_create_ext()\n+ *  - rte_event_eth_rx_adapter_create()/free()\n+ *  - rte_event_eth_rx_adapter_queue_add()/del()\n+ *  - rte_event_eth_rx_adapter_start()/stop()\n+ *  - rte_event_eth_rx_adapter_stats_get()/reset()\n+ *\n+ * The applicaton creates an event to ethernet adapter using\n+ * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create()\n+ * functions.\n+ * The adapter needs to know which ethernet rx queues to poll for mbufs as well\n+ * as event device parameters such as the event queue identifier, event\n+ * priority and scheduling type that the adapter should use when constructing\n+ * events. The rte_event_eth_rx_adapter_queue_add() function is provided for\n+ * this purpose.\n+ * The servicing weight parameter in the rte_event_eth_rx_adapter_queue_conf\n+ * is applicable when the Rx adapter uses a service core function and is\n+ * intended to provide application control of the polling frequency of ethernet\n+ * device receive queues, for example, the application may want to poll higher\n+ * priority queues with a higher frequency but at the same time not starve\n+ * lower priority queues completely. If this parameter is zero and the receive\n+ * interrupt is enabled when configuring the device, the receive queue is\n+ * interrupt driven; else, the queue is assigned a servicing weight of one.\n+ *\n+ * If the adapter uses a rte_service function, then the application is also\n+ * required to assign a core to the service function and control the service\n+ * core using the rte_service APIs. The rte_event_eth_rx_adapter_service_id_get\n+ * function can be used to retrieve the service function ID of the adapter in\n+ * this case.\n+ *\n+ * Note: Interrupt driven receive queues are currentely unimplemented.\n+ */\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+#include <stdint.h>\n+#include <rte_service.h>\n+\n+#include \"rte_eventdev.h\"\n+\n+#define RTE_EVENT_ETH_RX_ADAPTER_NAME_FORMAT\t\"rte_event_eth_rx_adapter_%d\"\n+\n+#define RTE_MAX_EVENT_ETH_RX_ADAPTER_INSTANCE 32\n+\n+/* struct rte_event_eth_rx_adapter_queue_conf flags definitions */\n+#define RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID\t0x1\n+/**< This flag indicates the flow identifier is valid\n+ * @see rte_event_eth_rx_adapter_queue_conf\n+ */\n+\n+struct rte_event_eth_rx_adapter_conf {\n+\tuint8_t event_port_id;\n+\t/**< Event port identifier, the adapter enqueues mbuf events to this\n+\t * port\n+\t */\n+\tuint32_t max_nb_rx;\n+\t/**< The adapter can return early if it has processed at least\n+\t * max_nb_rx mbufs. This isn't treated as a requirement; batching may\n+\t * cause the adapter to process more than max_nb_rx mbufs\n+\t */\n+};\n+\n+/**\n+ * Function type used for adapter configuration callback. The callback is\n+ * used to fill in members of the struct rte_event_eth_rx_adapter_conf, this\n+ * callback is invoked when creating a SW service for packet transfer from\n+ * ethdev queues to the event device. The SW service is created within the\n+ * rte_event_eth_rx_adapter_queue_add() function if packet required.\n+ *\n+ * @param id\n+ *  Adapter identifier.\n+ *\n+ * @param dev_id\n+ *  Event device identifier.\n+ *\n+ * @conf\n+ *  Structure that needs to be populated by this callback.\n+ *\n+ * @arg\n+ *  Argument to the callback. This is the same as the conf_arg passed to the\n+ *  rte_event_eth_rx_adapter_create_ext()\n+ */\n+typedef int (*rx_adapter_conf_cb) (uint8_t id, uint8_t dev_id,\n+\t\t\tstruct rte_event_eth_rx_adapter_conf *conf,\n+\t\t\tvoid *arg);\n+\n+/** Rx queue configuration structure */\n+struct rte_event_eth_rx_adapter_queue_conf {\n+\tuint32_t rx_queue_flags;\n+\t /**< Flags for handling received packets\n+\t  * @see RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID\n+\t  */\n+\tuint16_t servicing_weight;\n+\t/**< Relative polling frequency of ethernet receive queue, if this\n+\t * is set to zero, the Rx queue is interrupt driven (unless rx queue\n+\t * interrupts are not enabled for the ethernet device)\n+\t */\n+\tstruct rte_event ev;\n+\t/**<\n+\t *  The values from the following event fields will be used when\n+\t *  enqueuing mbuf events:\n+\t *   - event_queue_id: Targeted event queue ID for received packets.\n+\t *   - event_priority: Event priority of packets from this Rx queue in\n+\t *                     the event queue relative to other events.\n+\t *   - sched_type: Scheduling type for packets from this Rx queue.\n+\t *   - flow_id: If the RTE_ETH_RX_EVENT_ADAPTER_QUEUE_FLOW_ID_VALID bit\n+\t *\t\tis set in rx_queue_flags, this flow_id is used for all\n+\t *\t\tpackets received from this queue. Otherwise the flow ID\n+\t *\t\tis set to the RSS hash of the src and dst IPv4/6\n+\t *\t\taddress.\n+\t *\n+\t * The event adapter sets ev.event_type to RTE_EVENT_TYPE_ETHDEV in the\n+\t * enqueued event\n+\t */\n+};\n+\n+struct rte_event_eth_rx_adapter_stats {\n+\tuint64_t rx_poll_count;\n+\t/**< Receive queue poll count */\n+\tuint64_t rx_packets;\n+\t/**< Received packet count */\n+\tuint64_t rx_enq_count;\n+\t/**< Eventdev enqueue count */\n+\tuint64_t rx_enq_retry;\n+\t/**< Eventdev enqueue retry count */\n+\tuint64_t rx_enq_start_ts;\n+\t/**< Rx enqueue start timestamp */\n+\tuint64_t rx_enq_block_cycles;\n+\t/**< Cycles for which the service is blocked by the event device,\n+\t * i.e, the service fails to enqueue to the event device.\n+\t */\n+\tuint64_t rx_enq_end_ts;\n+\t/**< Latest timestamp at which the service is unblocked\n+\t * by the event device. The start, end timestamps and\n+\t * block cycles can be used to compute the percentage of\n+\t * cycles the service is blocked by the event device.\n+\t */\n+};\n+\n+/**\n+ * Create a new ethernet Rx event adapter with the specified identifier.\n+ *\n+ * @param id\n+ *  The identifier of the ethernet Rx event adapter.\n+ *\n+ * @dev_id\n+ *  The identifier of the device to configure.\n+ *\n+ * @eth_port_id\n+ *  The identifier of the ethernet device.\n+ *\n+ * @param conf_cb\n+ *  Callback function that fills in members of a\n+ *  struct rte_event_eth_rx_adapter_conf struct passed into\n+\t *    it.\n+ *\n+ * @param conf_arg\n+ *  Argument that is passed to the conf_cb function.\n+ *\n+ * @return\n+ *   - 0: Success\n+ *   - <0: Error code on failure\n+ */\n+int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,\n+\t\t\t\t\trx_adapter_conf_cb conf_cb,\n+\t\t\t\t\tvoid *conf_arg);\n+\n+/**\n+ * Create a new ethernet Rx event adapter with the specified identifier.\n+ * This function uses an internal configuration function that creates an event\n+ * port. This default function reconfigures the event device with an\n+ * additional event port and setups up the event port using the port_config\n+ * parameter passed into this function. In case the application needs more\n+ * control in configuration of the service, it should use the\n+ * rte_event_eth_rx_adapter_create_ext() version.\n+ *\n+ * @param id\n+ *  The identifier of the ethernet Rx event adapter.\n+ *\n+ * @dev_id\n+ *  The identifier of the device to configure.\n+ *\n+ * @eth_port_id\n+ *  The identifier of the ethernet device.\n+ *\n+ * @param conf_cb\n+ *  Callback function that fills in members of a\n+ *  struct rte_event_eth_rx_adapter_conf struct passed into\n+ *  it.\n+ *\n+ * @param conf_arg\n+ *  Argument of type *rte_event_port_conf* that is passed to the conf_cb\n+ *  function.\n+ *\n+ * @return\n+ *   - 0: Success\n+ *   - <0: Error code on failure\n+ */\n+int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,\n+\t\t\t\tstruct rte_event_port_conf *port_config);\n+\n+/**\n+ * Free an event adapter\n+ *\n+ * @param id\n+ *  Adapter identifier.\n+ *\n+ * @return\n+ *   - 0: Success\n+ *   - <0: Error code on failure, If the adapter still has Rx queues\n+ *      added to it, the function returns -EBUSY.\n+ */\n+int rte_event_eth_rx_adapter_free(uint8_t id);\n+\n+/**\n+ * Add receive queue to an event adapter. After a queue has been\n+ * added to the event adapter, the result of the application calling\n+ * rte_eth_rx_burst(eth_dev_id, rx_queue_id, ..) is undefined.\n+ *\n+ * @param id\n+ *  Adapter identifier.\n+ *\n+ * @param eth_dev_id\n+ *  Port identifier of Ethernet device.\n+ *\n+ * @param rx_queue_id\n+ *  Ethernet device receive queue index.\n+ *  If rx_queue_id is -1, then all Rx queues configured for\n+ *  the device are added. If the ethdev Rx queues can only be\n+ *  connected to a single event queue then rx_queue_id is\n+ *  required to be -1.\n+ *\n+ * @param conf\n+ *  Additonal configuration structure of type *rte_event_eth_rx_adapte_conf*\n+ *\n+ * @see\n+ * @return\n+ *  - 0: Success, Receive queue added correctly.\n+ *  - <0: Error code on failure.\n+ */\n+int rte_event_eth_rx_adapter_queue_add(uint8_t id,\n+\t\t\tuint8_t eth_dev_id,\n+\t\t\tint32_t rx_queue_id,\n+\t\t\tconst struct rte_event_eth_rx_adapter_queue_conf *conf);\n+\n+/**\n+ * Delete receive queue from an event adapter.\n+ *\n+ * @param id\n+ *  Adapter identifier.\n+ *\n+ * @param eth_dev_id\n+ *  Port identifier of Ethernet device.\n+ *\n+ * @param rx_queue_id\n+ *  Ethernet device receive queue index.\n+ *  If rx_queue_id is -1, then all Rx queues configured for\n+ *  the device are deleted. If the ethdev Rx queues can only be\n+ *  connected to a single event queue then rx_queue_id is\n+ *  required to be -1.\n+ *\n+ * @return\n+ *  - 0: Success, Receive queue deleted correctly.\n+ *  - <0: Error code on failure.\n+ */\n+int rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,\n+\t\t\t\t       int32_t rx_queue_id);\n+\n+/**\n+ * Start  ethernet Rx event adapter\n+ *\n+ * @param id\n+ *  Adapter identifier.\n+ *\n+ * @return\n+ *  - 0: Success, Adapter started correctly.\n+ *  - <0: Error code on failure.\n+ */\n+int rte_event_eth_rx_adapter_start(uint8_t id);\n+\n+/**\n+ * Stop  ethernet Rx event adapter\n+ *\n+ * @param id\n+ *  Adapter identifier.\n+ *\n+ * @return\n+ *  - 0: Success, Adapter started correctly.\n+ *  - <0: Error code on failure.\n+ */\n+int rte_event_eth_rx_adapter_stop(uint8_t id);\n+\n+/**\n+ * Retrieve statistics for an adapter\n+ *\n+ * @param id\n+ *  Adapter identifier.\n+ *\n+ * @param stats\n+ *  A pointer to structure used to retrieve statistics for an adapter.\n+ *\n+ * @return\n+ *  - 0: Success, retrieved successfully.\n+ *  - <0: Error code on failure.\n+ */\n+int rte_event_eth_rx_adapter_stats_get(uint8_t id,\n+\t\t\t\tstruct rte_event_eth_rx_adapter_stats *stats);\n+\n+/**\n+ * Reset statistics for an adapter\n+ *\n+ * @param id\n+ *  Adapter identifier.\n+ *\n+ * @return\n+ *  - 0: Success, statistics reset successfully.\n+ *  - <0: Error code on failure.\n+ */\n+int rte_event_eth_rx_adapter_stats_reset(uint8_t id);\n+\n+/**\n+ * Retrieve the service ID of an adapter. If the adapter doesn't use\n+ * a rte_service function, this function returns -ESRCH\n+ *\n+ * @param id\n+ *  Adapter identifier.\n+ *\n+ * @return\n+ *  - 0: Success, statistics reset successfully.\n+ *  - <0: Error code on failure, if the adapter doesn't use a rte_service\n+ * function, this function returns -ESRCH.\n+ */\n+int rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id);\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+#endif\t/* _RTE_EVENT_ETH_RX_ADAPTER_ */\ndiff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c\nnew file mode 100644\nindex 0000000..cd19e7c\n--- /dev/null\n+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c\n@@ -0,0 +1,1239 @@\n+#include <rte_cycles.h>\n+#include <rte_common.h>\n+#include <rte_dev.h>\n+#include <rte_errno.h>\n+#include <rte_ethdev.h>\n+#include <rte_log.h>\n+#include <rte_malloc.h>\n+#include <rte_service_component.h>\n+#include <rte_thash.h>\n+\n+#include \"rte_eventdev.h\"\n+#include \"rte_eventdev_pmd.h\"\n+#include \"rte_event_eth_rx_adapter.h\"\n+\n+#define BATCH_SIZE\t\t32\n+#define BLOCK_CNT_THRESHOLD\t10\n+#define ETH_EVENT_BUFFER_SIZE\t(4*BATCH_SIZE)\n+\n+static const char adapter_mem_name[] = \"rx_adapter_mem_\";\n+/*\n+ * There is an instance of this struct per polled Rx queue added to the\n+ * adapter\n+ */\n+struct eth_rx_poll_entry {\n+\t/* eth port to poll */\n+\tuint8_t eth_dev_id;\n+\t/* eth rx queue to poll */\n+\tuint16_t eth_rx_qid;\n+};\n+\n+/* Instance per adapter */\n+struct rte_eth_event_enqueue_buffer {\n+\t/* Count of events in this buffer */\n+\tuint16_t count;\n+\t/* Array of events in this buffer */\n+\tstruct rte_event events[ETH_EVENT_BUFFER_SIZE];\n+};\n+\n+struct rte_event_eth_rx_adapter {\n+\t/* event device identifier */\n+\tuint8_t eventdev_id;\n+\t/* per ethernet device structure */\n+\tstruct eth_device_info *eth_devices;\n+\t/* malloc name */\n+\tchar mem_name[sizeof(adapter_mem_name) + 4];\n+\t/* socket identifier cached from eventdev */\n+\tint socket_id;\n+\n+\t/* elements below are used by SW service */\n+\n+\t/* event port identifier */\n+\tuint8_t event_port_id;\n+\t/* per adapter EAL service */\n+\tuint32_t service_id;\n+\t/* lock to serialize config updates with service function */\n+\trte_spinlock_t rx_lock;\n+\t/* max mbufs processed in any service function invocation */\n+\tuint32_t max_nb_rx;\n+\t/* Receive queues that need to be polled */\n+\tstruct eth_rx_poll_entry *eth_rx_poll;\n+\t/* size of the eth_rx_poll array */\n+\tuint16_t num_rx_polled;\n+\t/* Weighted round robin schedule */\n+\tuint32_t *wrr_sched;\n+\t/* wrr_sched[] size */\n+\tuint32_t wrr_len;\n+\t/* Next entry in wrr[] to begin polling */\n+\tuint32_t wrr_pos;\n+\t/* Event burst buffer */\n+\tstruct rte_eth_event_enqueue_buffer event_enqueue_buffer;\n+\t/* per adapter stats */\n+\tstruct rte_event_eth_rx_adapter_stats stats;\n+\t/* Block count, counts upto BLOCK_CNT_THRESHOLD */\n+\tuint16_t enq_block_count;\n+\t/* Block start ts */\n+\tuint64_t rx_enq_block_start_ts;\n+\t/* Configuration callback for rte_service configuration */\n+\trx_adapter_conf_cb conf_cb;\n+\t/* Configuration callback argument */\n+\tvoid *conf_arg;\n+\t/* Service initialization state */\n+\tuint8_t service_inited;\n+\t/* Total count of Rx queues in adapter */\n+\tuint32_t nb_queues;\n+} __rte_cache_aligned;\n+\n+/* Per eth device */\n+struct eth_device_info {\n+\tstruct rte_eth_dev *dev;\n+\tstruct eth_rx_queue_info *rx_queue;\n+\t/* Set if ethdev->eventdev packet transfer uses a\n+\t * hardware mechanism\n+\t */\n+\tuint8_t internal_event_port;\n+\t/* set if the adapter is processing rx queues for\n+\t * this eth device and packet processing has been\n+\t * started, allows for the code to know if the PMD\n+\t * rx_adapter_stop callback needs to be invoked\n+\t */\n+\tuint8_t dev_rx_started;\n+\t/* if nb_dev_queues > 0, the start callback will\n+\t * be invoked if not already invoked\n+\t */\n+\tuint16_t nb_dev_queues;\n+};\n+\n+/* Per Rx queue */\n+struct eth_rx_queue_info {\n+\tint queue_enabled;\t/* true if added */\n+\tuint16_t wt;\t\t/* polling weight */\n+\tuint8_t event_queue_id;\t/* Event queue to enqueue packets to */\n+\tuint8_t sched_type;\t/* sched type for events */\n+\tuint8_t priority;\t/* event priority */\n+\tuint32_t flow_id;\t/* app provided flow identifier */\n+\tuint32_t flow_id_mask;\t/* Set to ~0 if app provides flow id else 0 */\n+};\n+\n+static struct rte_event_eth_rx_adapter **rte_event_eth_rx_adapter;\n+static struct rte_event_port_conf\n+\t\tcreate_port_conf[RTE_MAX_EVENT_ETH_RX_ADAPTER_INSTANCE];\n+\n+static uint8_t default_rss_key[] = {\n+\t0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,\n+\t0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,\n+\t0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,\n+\t0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,\n+\t0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,\n+};\n+static uint8_t *rss_key_be;\n+\n+static inline int\n+valid_id(uint8_t id)\n+{\n+\treturn id < RTE_MAX_EVENT_ETH_RX_ADAPTER_INSTANCE;\n+}\n+\n+#define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \\\n+\tif (!valid_id(id)) { \\\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid eth Rx adapter id = %d\\n\", id); \\\n+\t\treturn retval; \\\n+\t} \\\n+} while (0)\n+\n+static inline int\n+sw_rx_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)\n+{\n+\treturn rx_adapter->num_rx_polled;\n+}\n+\n+/* Greatest common divisor */\n+static uint16_t gcd_u16(uint16_t a, uint16_t b)\n+{\n+\tuint16_t r = a % b;\n+\n+\treturn r ? gcd_u16(b, r) : b;\n+}\n+\n+/* Returns the next queue in the polling sequence\n+ *\n+ * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling\n+ */\n+static int\n+wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t unsigned int n, int *cw,\n+\t struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,\n+\t uint16_t gcd, int prev)\n+{\n+\tint i = prev;\n+\tuint16_t w;\n+\n+\twhile (1) {\n+\t\tuint16_t q;\n+\t\tuint8_t d;\n+\n+\t\ti = (i + 1) % n;\n+\t\tif (i == 0) {\n+\t\t\t*cw = *cw - gcd;\n+\t\t\tif (*cw <= 0)\n+\t\t\t\t*cw = max_wt;\n+\t\t}\n+\n+\t\tq = eth_rx_poll[i].eth_rx_qid;\n+\t\td = eth_rx_poll[i].eth_dev_id;\n+\t\tw = rx_adapter->eth_devices[d].rx_queue[q].wt;\n+\n+\t\tif ((int)w >= *cw)\n+\t\t\treturn i;\n+\t}\n+}\n+\n+/* Precalculate WRR polling sequence for all queues in rx_adapter */\n+static int\n+eth_poll_wrr_calc(struct rte_event_eth_rx_adapter *rx_adapter)\n+{\n+\tuint8_t d;\n+\tuint16_t q;\n+\tunsigned int i;\n+\n+\t/* Initialize variables for calculaton of wrr schedule */\n+\tuint16_t max_wrr_pos = 0;\n+\tunsigned int poll_q = 0;\n+\tuint16_t max_wt = 0;\n+\tuint16_t gcd = 0;\n+\n+\tstruct eth_rx_poll_entry *rx_poll = NULL;\n+\tuint32_t *rx_wrr = NULL;\n+\n+\tif (rx_adapter->num_rx_polled) {\n+\t\tsize_t len = RTE_ALIGN(rx_adapter->num_rx_polled *\n+\t\t\t\tsizeof(*rx_adapter->eth_rx_poll),\n+\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\trx_poll = rte_zmalloc_socket(rx_adapter->mem_name,\n+\t\t\t\t\t     len,\n+\t\t\t\t\t     RTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t     rx_adapter->socket_id);\n+\t\tif (!rx_poll)\n+\t\t\treturn -ENOMEM;\n+\n+\t\t/* Generate array of all queues to poll, the size of this\n+\t\t * array is poll_q\n+\t\t */\n+\t\tfor (d = 0; d < rte_eth_dev_count(); d++) {\n+\t\t\tuint16_t nb_rx_queues;\n+\t\t\tstruct eth_device_info *dev_info =\n+\t\t\t\t\t&rx_adapter->eth_devices[d];\n+\t\t\tnb_rx_queues = dev_info->dev->data->nb_rx_queues;\n+\t\t\tif (!dev_info->rx_queue)\n+\t\t\t\tcontinue;\n+\t\t\tfor (q = 0; q < nb_rx_queues; q++) {\n+\t\t\t\tstruct eth_rx_queue_info *queue_info =\n+\t\t\t\t\t&dev_info->rx_queue[q];\n+\t\t\t\tif (!queue_info->queue_enabled)\n+\t\t\t\t\tcontinue;\n+\n+\t\t\t\tuint16_t wt = queue_info->wt;\n+\t\t\t\trx_poll[poll_q].eth_dev_id = d;\n+\t\t\t\trx_poll[poll_q].eth_rx_qid = q;\n+\t\t\t\tmax_wrr_pos += wt;\n+\t\t\t\tmax_wt = RTE_MAX(max_wt, wt);\n+\t\t\t\tgcd = (gcd) ? gcd_u16(gcd, wt) : wt;\n+\t\t\t\tpoll_q++;\n+\t\t\t}\n+\t\t}\n+\n+\t\tlen = RTE_ALIGN(max_wrr_pos * sizeof(*rx_wrr),\n+\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\trx_wrr = rte_zmalloc_socket(rx_adapter->mem_name,\n+\t\t\t\t\t    len,\n+\t\t\t\t\t    RTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t    rx_adapter->socket_id);\n+\t\tif (!rx_wrr) {\n+\t\t\trte_free(rx_poll);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\t/* Generate polling sequence based on weights */\n+\t\tint prev = -1;\n+\t\tint cw = -1;\n+\t\tfor (i = 0; i < max_wrr_pos; i++) {\n+\t\t\trx_wrr[i] = wrr_next(rx_adapter, poll_q, &cw,\n+\t\t\t\t\t     rx_poll, max_wt, gcd, prev);\n+\t\t\tprev = rx_wrr[i];\n+\t\t}\n+\t}\n+\n+\trte_free(rx_adapter->eth_rx_poll);\n+\trte_free(rx_adapter->wrr_sched);\n+\n+\trx_adapter->eth_rx_poll = rx_poll;\n+\trx_adapter->wrr_sched = rx_wrr;\n+\trx_adapter->wrr_len = max_wrr_pos;\n+\n+\treturn 0;\n+}\n+\n+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n+#define BE_16(x)\t(uint16_t)((x) >> 8 | (x) << 8)\n+#else\n+#define BE_16(x)\t(x)\n+#endif\n+\n+#define NETWORK_ORDER(x) BE_16(x)\n+\n+static inline void\n+mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,\n+\tstruct ipv6_hdr **ipv6_hdr)\n+{\n+\tstruct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);\n+\tstruct vlan_hdr *vlan_hdr;\n+\n+\t*ipv4_hdr = NULL;\n+\t*ipv6_hdr = NULL;\n+\n+\tswitch (eth_hdr->ether_type) {\n+\tcase NETWORK_ORDER(ETHER_TYPE_IPv4):\n+\t\t*ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);\n+\t\tbreak;\n+\n+\tcase NETWORK_ORDER(ETHER_TYPE_IPv6):\n+\t\t*ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);\n+\t\tbreak;\n+\n+\tcase NETWORK_ORDER(ETHER_TYPE_VLAN):\n+\t\tvlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);\n+\t\tswitch (vlan_hdr->eth_proto) {\n+\t\tcase NETWORK_ORDER(ETHER_TYPE_IPv4):\n+\t\t\t*ipv4_hdr = (struct ipv4_hdr *)(vlan_hdr + 1);\n+\t\t\tbreak;\n+\t\tcase NETWORK_ORDER(ETHER_TYPE_IPv6):\n+\t\t\t*ipv6_hdr = (struct ipv6_hdr *)(vlan_hdr + 1);\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tbreak;\n+\t\t}\n+\t\tbreak;\n+\n+\tdefault:\n+\t\tbreak;\n+\t}\n+}\n+\n+/* Calculate RSS hash for IPv4/6 */\n+static inline uint32_t\n+do_softrss(struct rte_mbuf *m)\n+{\n+\tuint32_t input_len;\n+\tvoid *tuple;\n+\tstruct rte_ipv4_tuple ipv4_tuple;\n+\tstruct rte_ipv6_tuple ipv6_tuple;\n+\tstruct ipv4_hdr *ipv4_hdr;\n+\tstruct ipv6_hdr *ipv6_hdr;\n+\n+\tmtoip(m, &ipv4_hdr, &ipv6_hdr);\n+\n+\tif (ipv4_hdr) {\n+\t\tipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);\n+\t\tipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);\n+\t\ttuple = &ipv4_tuple;\n+\t\tinput_len = RTE_THASH_V4_L3_LEN;\n+\t} else if (ipv6_hdr) {\n+\t\trte_thash_load_v6_addrs(ipv6_hdr,\n+\t\t\t\t\t(union rte_thash_tuple *)&ipv6_tuple);\n+\t\ttuple = &ipv6_tuple;\n+\t\tinput_len = RTE_THASH_V6_L3_LEN;\n+\t} else\n+\t\treturn 0;\n+\n+\treturn rte_softrss_be(tuple, input_len, rss_key_be);\n+}\n+\n+static inline int\n+rx_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)\n+{\n+\treturn !!rx_adapter->enq_block_count;\n+}\n+\n+static inline void\n+rx_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)\n+{\n+\tif (rx_adapter->rx_enq_block_start_ts)\n+\t\treturn;\n+\n+\trx_adapter->enq_block_count++;\n+\tif (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)\n+\t\treturn;\n+\n+\trx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();\n+}\n+\n+static inline void\n+rx_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\t    struct rte_event_eth_rx_adapter_stats *stats)\n+{\n+\tif (unlikely(!stats->rx_enq_start_ts))\n+\t\tstats->rx_enq_start_ts = rte_get_tsc_cycles();\n+\n+\tif (likely(!rx_enq_blocked(rx_adapter)))\n+\t\treturn;\n+\n+\trx_adapter->enq_block_count = 0;\n+\tif (rx_adapter->rx_enq_block_start_ts) {\n+\t\tstats->rx_enq_end_ts = rte_get_tsc_cycles();\n+\t\tstats->rx_enq_block_cycles += stats->rx_enq_end_ts -\n+\t\t    rx_adapter->rx_enq_block_start_ts;\n+\t\trx_adapter->rx_enq_block_start_ts = 0;\n+\t}\n+}\n+\n+/* Add event to buffer, free space check is done prior to calling\n+ * this function\n+ */\n+static inline void\n+buf_event_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\t  struct rte_event *ev)\n+{\n+\tstruct rte_eth_event_enqueue_buffer *buf =\n+\t    &rx_adapter->event_enqueue_buffer;\n+\trte_memcpy(&buf->events[buf->count++], ev, sizeof(struct rte_event));\n+}\n+\n+/* Enqueue buffered events to event device */\n+static inline uint16_t\n+flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)\n+{\n+\tstruct rte_eth_event_enqueue_buffer *buf =\n+\t    &rx_adapter->event_enqueue_buffer;\n+\tstruct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;\n+\n+\tuint16_t n = rte_event_enqueue_burst(rx_adapter->eventdev_id,\n+\t\t\t\t\trx_adapter->event_port_id,\n+\t\t\t\t\tbuf->events,\n+\t\t\t\t\tbuf->count);\n+\tif (n != buf->count) {\n+\t\tmemmove(buf->events,\n+\t\t\t&buf->events[n],\n+\t\t\t(buf->count - n) * sizeof(struct rte_event));\n+\t\tstats->rx_enq_retry++;\n+\t}\n+\n+\tn ? rx_enq_block_end_ts(rx_adapter, stats) :\n+\t\trx_enq_block_start_ts(rx_adapter);\n+\n+\tbuf->count -= n;\n+\tstats->rx_enq_count += n;\n+\n+\treturn n;\n+}\n+\n+static inline void\n+fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,\n+\tuint8_t dev_id,\n+\tuint16_t rx_queue_id,\n+\tstruct rte_mbuf **mbufs,\n+\tuint16_t num)\n+{\n+\tuint32_t i;\n+\tstruct eth_device_info *eth_device_info =\n+\t\t\t\t\t&rx_adapter->eth_devices[dev_id];\n+\tstruct eth_rx_queue_info *eth_rx_queue_info =\n+\t\t\t\t\t&eth_device_info->rx_queue[rx_queue_id];\n+\n+\tint32_t qid = eth_rx_queue_info->event_queue_id;\n+\tuint8_t sched_type = eth_rx_queue_info->sched_type;\n+\tuint8_t priority = eth_rx_queue_info->priority;\n+\tuint32_t flow_id;\n+\tstruct rte_event events[BATCH_SIZE];\n+\tstruct rte_mbuf *m = mbufs[0];\n+\tuint32_t rss_mask;\n+\tuint32_t rss;\n+\tint do_rss;\n+\n+\t/* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */\n+\trss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);\n+\tdo_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\tm = mbufs[i];\n+\t\tstruct rte_event *ev = &events[i];\n+\n+\t\trss = do_rss ? do_softrss(m) : m->hash.rss;\n+\t\tflow_id =\n+\t\t    eth_rx_queue_info->flow_id &\n+\t\t\t\teth_rx_queue_info->flow_id_mask;\n+\t\tflow_id |= rss & ~eth_rx_queue_info->flow_id_mask;\n+\n+\t\tev->flow_id = flow_id;\n+\t\tev->op = RTE_EVENT_OP_NEW;\n+\t\tev->sched_type = sched_type;\n+\t\tev->queue_id = qid;\n+\t\tev->event_type = RTE_EVENT_TYPE_ETHDEV;\n+\t\tev->sub_event_type = 0;\n+\t\tev->priority = priority;\n+\t\tev->mbuf = m;\n+\n+\t\tbuf_event_enqueue(rx_adapter, ev);\n+\t}\n+}\n+\n+/*\n+ * Polls receive queues added to the event adapter and enqueues received\n+ * packets to the event device.\n+ *\n+ * The receive code enqueues initially to a temporary buffer, the\n+ * temporary buffer is drained anytime it holds >= BATCH_SIZE packets\n+ *\n+ * If there isn't space available in the temporary buffer, packets from the\n+ * Rx queue arent dequeued from the eth device, this backpressures the\n+ * eth device, in virtual device enviroments this backpressure is relayed to the\n+ * hypervisor's switching layer where adjustments can be made to deal with\n+ * it.\n+ */\n+static inline uint32_t\n+eth_rx_poll(struct rte_event_eth_rx_adapter *rx_adapter)\n+{\n+\tuint32_t num_queue;\n+\tuint16_t n;\n+\tuint32_t nb_rx = 0;\n+\tstruct rte_mbuf *mbufs[BATCH_SIZE];\n+\tstruct rte_eth_event_enqueue_buffer *buf;\n+\tuint32_t wrr_pos;\n+\tuint32_t max_nb_rx;\n+\n+\twrr_pos = rx_adapter->wrr_pos;\n+\tmax_nb_rx = rx_adapter->max_nb_rx;\n+\tbuf = &rx_adapter->event_enqueue_buffer;\n+\tstruct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;\n+\n+\t/* Iterate through a WRR sequence */\n+\tfor (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {\n+\t\tunsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];\n+\t\tuint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;\n+\t\tuint8_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;\n+\n+\t\t/* Don't do a batch dequeue from the rx queue if there isn't\n+\t\t * enough space in the enqueue buffer.\n+\t\t */\n+\t\tif (buf->count >= BATCH_SIZE)\n+\t\t\tflush_event_buffer(rx_adapter);\n+\t\tif (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count))\n+\t\t\tbreak;\n+\n+\t\tstats->rx_poll_count++;\n+\t\tn = rte_eth_rx_burst(d, qid, mbufs, BATCH_SIZE);\n+\n+\t\tif (n) {\n+\t\t\tstats->rx_packets += n;\n+\t\t\t/* The check before rte_eth_rx_burst() ensures that\n+\t\t\t * all n mbufs can be buffered\n+\t\t\t */\n+\t\t\tfill_event_buffer(rx_adapter, d, qid, mbufs, n);\n+\t\t\tnb_rx += n;\n+\t\t\tif (nb_rx > max_nb_rx) {\n+\t\t\t\trx_adapter->wrr_pos =\n+\t\t\t\t    (wrr_pos + 1) % rx_adapter->wrr_len;\n+\t\t\t\treturn nb_rx;\n+\t\t\t}\n+\t\t}\n+\n+\t\tif (++wrr_pos == rx_adapter->wrr_len)\n+\t\t\twrr_pos = 0;\n+\t}\n+\n+\treturn nb_rx;\n+}\n+\n+static int\n+event_eth_rx_adapter_service_func(void *args)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter = args;\n+\tstruct rte_eth_event_enqueue_buffer *buf;\n+\n+\tbuf = &rx_adapter->event_enqueue_buffer;\n+\tif (!rte_spinlock_trylock(&rx_adapter->rx_lock))\n+\t\treturn 0;\n+\tif (eth_rx_poll(rx_adapter) == 0 && buf->count)\n+\t\tflush_event_buffer(rx_adapter);\n+\trte_spinlock_unlock(&rx_adapter->rx_lock);\n+\treturn 0;\n+}\n+\n+static int\n+rte_event_eth_rx_adapter_init(void)\n+{\n+\tconst char *name = \"rte_event_eth_rx_adapter_array\";\n+\tconst struct rte_memzone *mz;\n+\tunsigned int sz;\n+\tunsigned int rss_key_off;\n+\n+\tsz = sizeof(*rte_event_eth_rx_adapter) *\n+\t    RTE_MAX_EVENT_ETH_RX_ADAPTER_INSTANCE;\n+\tsz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);\n+\trss_key_off = sz;\n+\tsz = RTE_ALIGN(sz + sizeof(default_rss_key), RTE_CACHE_LINE_SIZE);\n+\n+\tmz = rte_memzone_lookup(name);\n+\tif (!mz) {\n+\t\tmz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,\n+\t\t\t\t\t\t RTE_CACHE_LINE_SIZE);\n+\t\tif (mz) {\n+\t\t\trte_convert_rss_key((uint32_t *)default_rss_key,\n+\t\t\t    (uint32_t *)(uintptr_t)(mz->addr_64 + rss_key_off),\n+\t\t\t    RTE_DIM(default_rss_key));\n+\t\t} else {\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to reserve memzone err = %\"\n+\t\t\t\t\tPRId32, rte_errno);\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n+\n+\trte_event_eth_rx_adapter = mz->addr;\n+\trss_key_be = (uint8_t *)(mz->addr_64 + rss_key_off);\n+\treturn 0;\n+}\n+\n+static inline struct rte_event_eth_rx_adapter *\n+id_to_rx_adapter(uint8_t id)\n+{\n+\treturn rte_event_eth_rx_adapter ?\n+\t\trte_event_eth_rx_adapter[id] : NULL;\n+}\n+\n+static int\n+default_conf_cb(uint8_t id, uint8_t dev_id,\n+\t\tstruct rte_event_eth_rx_adapter_conf *conf, void *arg)\n+{\n+\tint ret;\n+\tstruct rte_eventdev *dev;\n+\tstruct rte_event_dev_config dev_conf;\n+\tint started;\n+\tuint8_t port_id;\n+\tstruct rte_event_port_conf *port_conf = arg;\n+\tstruct rte_event_eth_rx_adapter *rx_adapter = id_to_rx_adapter(id);\n+\n+\tdev = &rte_eventdevs[rx_adapter->eventdev_id];\n+\tdev_conf = dev->data->dev_conf;\n+\n+\tstarted = dev->data->dev_started;\n+\tif (started)\n+\t\trte_event_dev_stop(dev_id);\n+\tport_id = dev_conf.nb_event_ports;\n+\tdev_conf.nb_event_ports += 1;\n+\tret = rte_event_dev_configure(dev_id, &dev_conf);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to configure event dev %u\\n\",\n+\t\t\t\t\t\tdev_id);\n+\t\t/* Conf. failed, OK to start ? */\n+\t\tif (started)\n+\t\t\trte_event_dev_start(dev_id);\n+\t\treturn ret;\n+\t}\n+\n+\tret = rte_event_port_setup(dev_id, port_id, port_conf);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to setup event port %u\\n\",\n+\t\t\t\t\tport_id);\n+\t} else {\n+\t\tconf->event_port_id = port_id;\n+\t\tconf->max_nb_rx = 128;\n+\t}\n+\n+\tif (started)\n+\t\trte_event_dev_start(dev_id);\n+\treturn ret;\n+}\n+\n+static int\n+init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)\n+{\n+\tint ret;\n+\tstruct rte_service_spec service;\n+\tstruct rte_event_eth_rx_adapter_conf rx_adapter_conf;\n+\n+\tif (rx_adapter->service_inited)\n+\t\treturn 0;\n+\n+\tmemset(&service, 0, sizeof(service));\n+\tsprintf(service.name, RTE_EVENT_ETH_RX_ADAPTER_NAME_FORMAT, id);\n+\tservice.socket_id = rx_adapter->socket_id;\n+\tservice.callback = event_eth_rx_adapter_service_func;\n+\tservice.callback_userdata = rx_adapter;\n+\t/* Service function handles locking for queue add/del updates */\n+\tservice.capabilities = RTE_SERVICE_CAP_MT_SAFE;\n+\tret = rte_service_component_register(&service, &rx_adapter->service_id);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to register service %s err = %\" PRId32,\n+\t\t\tservice.name, ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,\n+\t\t&rx_adapter_conf, rx_adapter->conf_arg);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"confguration callback failed err = %\" PRId32,\n+\t\t\tret);\n+\t\tgoto err_done;\n+\t}\n+\trx_adapter->event_port_id = rx_adapter_conf.event_port_id;\n+\trx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;\n+\trx_adapter->service_inited = 1;\n+\treturn 0;\n+\n+err_done:\n+\trte_service_component_unregister(rx_adapter->service_id);\n+\treturn ret;\n+}\n+\n+static void\n+update_queue_info(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\tstruct eth_device_info *dev_info,\n+\t\tint32_t rx_queue_id,\n+\t\tuint8_t add)\n+{\n+\tstruct eth_rx_queue_info *queue_info;\n+\tint enabled;\n+\tuint16_t i;\n+\n+\tif (!dev_info->rx_queue)\n+\t\treturn;\n+\n+\tif (rx_queue_id == -1) {\n+\t\tfor (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) {\n+\t\t\tqueue_info = &dev_info->rx_queue[i];\n+\t\t\tenabled = queue_info->queue_enabled;\n+\t\t\tif (add) {\n+\t\t\t\trx_adapter->nb_queues += !enabled;\n+\t\t\t\tdev_info->nb_dev_queues += !enabled;\n+\t\t\t} else {\n+\t\t\t\trx_adapter->nb_queues -= enabled;\n+\t\t\t\tdev_info->nb_dev_queues -= enabled;\n+\t\t\t}\n+\t\t\tqueue_info->queue_enabled = !!add;\n+\t\t}\n+\t} else {\n+\t\tqueue_info = &dev_info->rx_queue[rx_queue_id];\n+\t\tenabled = queue_info->queue_enabled;\n+\t\tif (add) {\n+\t\t\trx_adapter->nb_queues += !enabled;\n+\t\t\tdev_info->nb_dev_queues += !enabled;\n+\t\t} else {\n+\t\t\trx_adapter->nb_queues -= enabled;\n+\t\t\tdev_info->nb_dev_queues -= enabled;\n+\t\t}\n+\t\tqueue_info->queue_enabled = !!add;\n+\t}\n+}\n+\n+static int\n+_rte_event_eth_rx_adapter_queue_del(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\t\t\t    struct eth_device_info *dev_info,\n+\t\t\t\t    uint16_t rx_queue_id)\n+{\n+\tstruct eth_rx_queue_info *queue_info;\n+\n+\tif (!rx_adapter->nb_queues)\n+\t\treturn 0;\n+\n+\tqueue_info = &dev_info->rx_queue[rx_queue_id];\n+\trx_adapter->num_rx_polled -= queue_info->queue_enabled;\n+\tupdate_queue_info(rx_adapter, dev_info, rx_queue_id, 0);\n+\treturn 0;\n+}\n+\n+static void\n+_rte_event_eth_rx_adapter_queue_add(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\tstruct eth_device_info *dev_info,\n+\t\tuint16_t rx_queue_id,\n+\t\tconst struct rte_event_eth_rx_adapter_queue_conf *conf)\n+\n+{\n+\tstruct eth_rx_queue_info *queue_info;\n+\tconst struct rte_event *ev = &conf->ev;\n+\n+\tqueue_info = &dev_info->rx_queue[rx_queue_id];\n+\tqueue_info->event_queue_id = ev->queue_id;\n+\tqueue_info->sched_type = ev->sched_type;\n+\tqueue_info->priority = ev->priority;\n+\tqueue_info->wt = conf->servicing_weight;\n+\n+\tif (conf->\n+\t    rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {\n+\t\tqueue_info->flow_id = ev->flow_id;\n+\t\tqueue_info->flow_id_mask = ~0;\n+\t}\n+\n+\t/* The same queue can be added more than once */\n+\trx_adapter->num_rx_polled += !queue_info->queue_enabled;\n+\tupdate_queue_info(rx_adapter, dev_info, rx_queue_id, 1);\n+}\n+\n+static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\tuint8_t eth_dev_id,\n+\t\tint rx_queue_id,\n+\t\tconst struct rte_event_eth_rx_adapter_queue_conf *queue_conf)\n+{\n+\tstruct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];\n+\tuint32_t i;\n+\tint ret;\n+\n+\tif (queue_conf->servicing_weight == 0) {\n+\t\tstruct rte_event_eth_rx_adapter_queue_conf temp_conf;\n+\n+\t\tstruct rte_eth_dev_data *data = dev_info->dev->data;\n+\t\tif (data->dev_conf.intr_conf.rxq) {\n+\t\t\tRTE_EDEV_LOG_ERR(\"Interrupt driven queues\"\n+\t\t\t\t\t\" not supported\");\n+\t\t\treturn -ENOTSUP;\n+\t\t}\n+\t\ttemp_conf = *queue_conf;\n+\t\ttemp_conf.servicing_weight = 1;\n+\t\t/* If Rx interrupts are disabled set wt = 1 */\n+\t\tqueue_conf = &temp_conf;\n+\t}\n+\n+\tif (!dev_info->rx_queue) {\n+\t\tdev_info->rx_queue =\n+\t\t    rte_zmalloc_socket(rx_adapter->mem_name,\n+\t\t\t\t       dev_info->dev->data->nb_rx_queues *\n+\t\t\t\t       sizeof(struct eth_rx_queue_info), 0,\n+\t\t\t\t       rx_adapter->socket_id);\n+\t\tif (!dev_info->rx_queue)\n+\t\t\treturn -ENOMEM;\n+\t}\n+\n+\tif (rx_queue_id == -1) {\n+\t\tfor (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)\n+\t\t\t_rte_event_eth_rx_adapter_queue_add(rx_adapter,\n+\t\t\t\t\t\t\tdev_info, i,\n+\t\t\t\t\t\t\tqueue_conf);\n+\t} else {\n+\t\t_rte_event_eth_rx_adapter_queue_add(rx_adapter, dev_info,\n+\t\t\t\t\t\t  (uint16_t)rx_queue_id,\n+\t\t\t\t\t\t  queue_conf);\n+\t}\n+\n+\tret = eth_poll_wrr_calc(rx_adapter);\n+\tif (ret) {\n+\t\t_rte_event_eth_rx_adapter_queue_del(rx_adapter,\n+\t\t\t\t\tdev_info, rx_queue_id);\n+\t\treturn ret;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int\n+_rx_adapter_ctrl(struct rte_event_eth_rx_adapter *rx_adapter, int start)\n+{\n+\tstruct rte_eventdev *dev;\n+\tstruct eth_device_info *dev_info;\n+\tuint32_t i;\n+\tint use_service = 0;\n+\tint stop = !start;\n+\n+\tdev = &rte_eventdevs[rx_adapter->eventdev_id];\n+\n+\tfor (i = 0; i < rte_eth_dev_count(); i++) {\n+\t\tdev_info = &rx_adapter->eth_devices[i];\n+\t\t/* if start  check for num dev queues */\n+\t\tif (start && !dev_info->nb_dev_queues)\n+\t\t\tcontinue;\n+\t\t/* if stop check if dev has been started */\n+\t\tif (stop && !dev_info->dev_rx_started)\n+\t\t\tcontinue;\n+\t\tuse_service |= !dev_info->internal_event_port;\n+\t\tdev_info->dev_rx_started = start;\n+\t\tif (!dev_info->internal_event_port)\n+\t\t\tcontinue;\n+\t\tstart ? (*dev->dev_ops->eth_rx_adapter_start)(dev, i) :\n+\t\t\t(*dev->dev_ops->eth_rx_adapter_stop)(dev, i);\n+\t}\n+\n+\tif (use_service)\n+\t\trte_service_runstate_set(rx_adapter->service_id, start);\n+\n+\treturn 0;\n+}\n+\n+static int\n+rx_adapter_ctrl(uint8_t id, int start)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (!rx_adapter)\n+\t\treturn -EINVAL;\n+\treturn _rx_adapter_ctrl(rx_adapter, start);\n+}\n+\n+int\n+rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,\n+\t\t\t\trx_adapter_conf_cb conf_cb, void *conf_arg)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\tint ret;\n+\tint socket_id;\n+\tuint8_t i;\n+\tchar mem_name[sizeof(adapter_mem_name) + 4];\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tif (!conf_cb)\n+\t\treturn -EINVAL;\n+\n+\tif (rte_event_eth_rx_adapter == NULL) {\n+\t\tret = rte_event_eth_rx_adapter_init();\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (rx_adapter != NULL) {\n+\t\tRTE_EDEV_LOG_ERR(\"Eth Rx adapter exists id = %\" PRIu8, id);\n+\t\treturn -EEXIST;\n+\t}\n+\n+\tsocket_id = rte_event_dev_socket_id(dev_id);\n+\tsnprintf(mem_name, sizeof(adapter_mem_name) + 4, \"%s%d\",\n+\t\tadapter_mem_name, id);\n+\n+\trx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),\n+\t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n+\tif (rx_adapter == NULL) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to get mem for rx adapter\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trx_adapter->eventdev_id = dev_id;\n+\trx_adapter->socket_id = socket_id;\n+\trx_adapter->conf_cb = conf_cb;\n+\trx_adapter->conf_arg = conf_arg;\n+\tstrcpy(rx_adapter->mem_name, mem_name);\n+\trx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,\n+\t\t\t\t\trte_eth_dev_count() *\n+\t\t\t\t\tsizeof(struct eth_device_info), 0,\n+\t\t\t\t\tsocket_id);\n+\tif (rx_adapter->eth_devices == NULL) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to get mem for eth devices\\n\");\n+\t\trte_free(rx_adapter);\n+\t\treturn -ENOMEM;\n+\t}\n+\trte_spinlock_init(&rx_adapter->rx_lock);\n+\tfor (i = 0; i < rte_eth_dev_count(); i++)\n+\t\trx_adapter->eth_devices[i].dev = &rte_eth_devices[i];\n+\n+\trte_event_eth_rx_adapter[id] = rx_adapter;\n+\treturn 0;\n+}\n+\n+int\n+rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,\n+\t\tstruct rte_event_port_conf *port_config)\n+{\n+\tif (!port_config)\n+\t\treturn -EINVAL;\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\tcreate_port_conf[id] = *port_config;\n+\treturn rte_event_eth_rx_adapter_create_ext(id, dev_id,\n+\t\t\t\t\tdefault_conf_cb,\n+\t\t\t\t\t&create_port_conf[id]);\n+}\n+\n+int\n+rte_event_eth_rx_adapter_free(uint8_t id)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (!rx_adapter)\n+\t\treturn -EINVAL;\n+\n+\tif (rx_adapter->nb_queues) {\n+\t\tRTE_EDEV_LOG_ERR(\"%\" PRIu16 \" Rx queues not deleted\",\n+\t\t\t\trx_adapter->nb_queues);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\trte_free(rx_adapter->eth_devices);\n+\trte_free(rx_adapter);\n+\trte_event_eth_rx_adapter[id] = NULL;\n+\n+\treturn 0;\n+}\n+\n+int\n+rte_event_eth_rx_adapter_queue_add(uint8_t id,\n+\t\tuint8_t eth_dev_id,\n+\t\tint32_t rx_queue_id,\n+\t\tconst struct rte_event_eth_rx_adapter_queue_conf *queue_conf)\n+{\n+\tint ret;\n+\tuint32_t rx_adapter_cap;\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\tstruct rte_eventdev *dev;\n+\tstruct eth_device_info *dev_info;\n+\tint start_service = 0;\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\tRTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (!rx_adapter || !queue_conf)\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_eventdevs[rx_adapter->eventdev_id];\n+\tret = (*dev->dev_ops->eth_rx_adapter_caps_get)(dev, eth_dev_id,\n+\t\t\t\t\t\t&rx_adapter_cap);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"Failed to get adapter caps edev %\" PRIu8\n+\t\t\t\"eth port %\" PRIu8, id, eth_dev_id);\n+\t\treturn ret;\n+\t}\n+\n+\tif (!(rx_adapter_cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_FLOW_ID) &&\n+\t\t!(queue_conf->rx_queue_flags &\n+\t\t\tRTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Flow ID required for configuration,\"\n+\t\t\t\t\" eth port: %\" PRIu8 \" adapter id: %\" PRIu8,\n+\t\t\t\teth_dev_id, id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif ((rx_adapter_cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_SINGLE_EVENTQ) &&\n+\t\t(rx_queue_id != -1)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Rx queues can only be connected to single \"\n+\t\t\t\"event queue id %u eth port %u\", id, eth_dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (rx_queue_id != -1 && (uint16_t)rx_queue_id >=\n+\t\t\trte_eth_devices[eth_dev_id].data->nb_rx_queues) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid rx queue_id %\" PRIu16,\n+\t\t\t (uint16_t)rx_queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tstart_service = 0;\n+\tdev_info = &rx_adapter->eth_devices[eth_dev_id];\n+\n+\tif (rx_adapter_cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,\n+\t\t\t\t\t-ENOTSUP);\n+\t\tif (!dev_info->rx_queue) {\n+\t\t\tdev_info->rx_queue =\n+\t\t\t    rte_zmalloc_socket(rx_adapter->mem_name,\n+\t\t\t\t\tdev_info->dev->data->nb_rx_queues *\n+\t\t\t\t\tsizeof(struct eth_rx_queue_info), 0,\n+\t\t\t\t\trx_adapter->socket_id);\n+\t\t\tif (!dev_info->rx_queue)\n+\t\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\tret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev, eth_dev_id,\n+\t\t\t\trx_queue_id, queue_conf);\n+\t\tif (!ret) {\n+\t\t\tupdate_queue_info(rx_adapter,\n+\t\t\t\t\t&rx_adapter->eth_devices[eth_dev_id],\n+\t\t\t\t\trx_queue_id,\n+\t\t\t\t\t1);\n+\t\t}\n+\t} else {\n+\t\trte_spinlock_lock(&rx_adapter->rx_lock);\n+\t\tret = init_service(rx_adapter, id);\n+\t\tif (!ret)\n+\t\t\tret = add_rx_queue(rx_adapter, eth_dev_id, rx_queue_id,\n+\t\t\t\t\tqueue_conf);\n+\t\trte_spinlock_unlock(&rx_adapter->rx_lock);\n+\t\tif (!ret)\n+\t\t\tstart_service = !!sw_rx_adapter_queue_count(rx_adapter);\n+\t}\n+\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tif (start_service)\n+\t\trte_service_component_runstate_set(rx_adapter->service_id, 1);\n+\n+\treturn 0;\n+}\n+\n+int\n+rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,\n+\t\t\t\tint32_t rx_queue_id)\n+{\n+\tint ret = 0;\n+\tstruct rte_eventdev *dev;\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\tstruct eth_device_info *dev_info;\n+\tuint32_t rx_adapter_cap;\n+\tuint16_t i;\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\tRTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (!rx_adapter)\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_eventdevs[rx_adapter->eventdev_id];\n+\tret = dev->dev_ops->eth_rx_adapter_caps_get(dev, eth_dev_id,\n+\t\t\t\t\t\t&rx_adapter_cap);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tif (rx_queue_id != -1 && (uint16_t)rx_queue_id >=\n+\t\trte_eth_devices[eth_dev_id].data->nb_rx_queues) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid rx queue_id %\" PRIu16,\n+\t\t\t (uint16_t)rx_queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tdev_info = &rx_adapter->eth_devices[eth_dev_id];\n+\n+\tif (rx_adapter_cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,\n+\t\t\t\t -ENOTSUP);\n+\t\tret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev, eth_dev_id,\n+\t\t\t\t\t\t\trx_queue_id);\n+\t\tif (!ret) {\n+\t\t\tupdate_queue_info(rx_adapter,\n+\t\t\t\t\t&rx_adapter->eth_devices[eth_dev_id],\n+\t\t\t\t\trx_queue_id,\n+\t\t\t\t\t0);\n+\t\t\tif (!dev_info->nb_dev_queues) {\n+\t\t\t\trte_free(dev_info->rx_queue);\n+\t\t\t\tdev_info->rx_queue = NULL;\n+\t\t\t}\n+\t\t}\n+\t} else {\n+\t\tint rc;\n+\t\trte_spinlock_lock(&rx_adapter->rx_lock);\n+\t\tif (rx_queue_id == -1) {\n+\t\t\tfor (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)\n+\t\t\t\t_rte_event_eth_rx_adapter_queue_del(rx_adapter,\n+\t\t\t\t\t\t\t\tdev_info,\n+\t\t\t\t\t\t\t\ti);\n+\t\t} else {\n+\t\t\t_rte_event_eth_rx_adapter_queue_del(rx_adapter,\n+\t\t\t\t\t\t\tdev_info,\n+\t\t\t\t\t\t\t(uint16_t)rx_queue_id);\n+\t\t}\n+\n+\t\trc = eth_poll_wrr_calc(rx_adapter);\n+\t\tif (rc)\n+\t\t\tRTE_EDEV_LOG_ERR(\"WRR recalculation failed %\" PRId32,\n+\t\t\t\t\trc);\n+\n+\t\tif (!dev_info->nb_dev_queues) {\n+\t\t\trte_free(dev_info->rx_queue);\n+\t\t\tdev_info->rx_queue = NULL;\n+\t\t}\n+\n+\t\trte_spinlock_unlock(&rx_adapter->rx_lock);\n+\t\trte_service_component_runstate_set(rx_adapter->service_id,\n+\t\t\t\tsw_rx_adapter_queue_count(rx_adapter));\n+\t}\n+\n+\treturn ret;\n+}\n+\n+\n+int\n+rte_event_eth_rx_adapter_start(uint8_t id)\n+{\n+\treturn rx_adapter_ctrl(id, 1);\n+}\n+\n+int\n+rte_event_eth_rx_adapter_stop(uint8_t id)\n+{\n+\treturn rx_adapter_ctrl(id, 0);\n+}\n+\n+int\n+rte_event_eth_rx_adapter_stats_get(uint8_t id,\n+\t\t\t       struct rte_event_eth_rx_adapter_stats *stats)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\tstruct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };\n+\tstruct rte_event_eth_rx_adapter_stats dev_stats;\n+\tstruct rte_eventdev *dev;\n+\tstruct eth_device_info *dev_info;\n+\tuint32_t i;\n+\tint ret;\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (!rx_adapter || !stats)\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_eventdevs[rx_adapter->eventdev_id];\n+\tmemset(stats, 0, sizeof(*stats));\n+\tfor (i = 0; i < rte_eth_dev_count(); i++) {\n+\t\tdev_info = &rx_adapter->eth_devices[i];\n+\t\tif (!dev_info->internal_event_port)\n+\t\t\tcontinue;\n+\t\tret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev, i,\n+\t\t\t\t\t\t&dev_stats);\n+\t\tif (ret)\n+\t\t\tcontinue;\n+\t\tdev_stats_sum.rx_packets += dev_stats.rx_packets;\n+\t\tdev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;\n+\t}\n+\n+\tif (rx_adapter->service_inited)\n+\t\t*stats = rx_adapter->stats;\n+\n+\tstats->rx_packets += dev_stats_sum.rx_packets;\n+\tstats->rx_enq_count += dev_stats_sum.rx_enq_count;\n+\treturn 0;\n+}\n+\n+int\n+rte_event_eth_rx_adapter_stats_reset(uint8_t id)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\tstruct rte_eventdev *dev;\n+\tstruct eth_device_info *dev_info;\n+\tuint32_t i;\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (!rx_adapter)\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_eventdevs[rx_adapter->eventdev_id];\n+\tfor (i = 0; i < rte_eth_dev_count(); i++) {\n+\t\tdev_info = &rx_adapter->eth_devices[i];\n+\t\tif (!dev_info->internal_event_port)\n+\t\t\tcontinue;\n+\t\t(*dev->dev_ops->eth_rx_adapter_stats_reset)(dev, i);\n+\t}\n+\n+\tmemset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));\n+\treturn 0;\n+}\n+\n+int\n+rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (!rx_adapter || !service_id)\n+\t\treturn -EINVAL;\n+\n+\tif (rx_adapter->service_inited)\n+\t\t*service_id = rx_adapter->service_id;\n+\n+\treturn rx_adapter->service_inited ? 0 : -ESRCH;\n+}\ndiff --git a/lib/Makefile b/lib/Makefile\nindex 86caba1..dbe9b3d 100644\n--- a/lib/Makefile\n+++ b/lib/Makefile\n@@ -52,7 +52,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += librte_cryptodev\n DEPDIRS-librte_cryptodev := librte_eal librte_mempool librte_ring librte_mbuf\n DEPDIRS-librte_cryptodev += librte_kvargs\n DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += librte_eventdev\n-DEPDIRS-librte_eventdev := librte_eal librte_ring\n+DEPDIRS-librte_eventdev := librte_eal librte_ring librte_hash librte_ether\n DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += librte_vhost\n DEPDIRS-librte_vhost := librte_eal librte_mempool librte_mbuf librte_ether\n DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash\ndiff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile\nindex 410578a..c404d67 100644\n--- a/lib/librte_eventdev/Makefile\n+++ b/lib/librte_eventdev/Makefile\n@@ -43,6 +43,7 @@ CFLAGS += $(WERROR_FLAGS)\n # library source files\n SRCS-y += rte_eventdev.c\n SRCS-y += rte_event_ring.c\n+SRCS-y += rte_event_eth_rx_adapter.c\n \n # export include files\n SYMLINK-y-include += rte_eventdev.h\n@@ -50,6 +51,7 @@ SYMLINK-y-include += rte_eventdev_pmd.h\n SYMLINK-y-include += rte_eventdev_pmd_pci.h\n SYMLINK-y-include += rte_eventdev_pmd_vdev.h\n SYMLINK-y-include += rte_event_ring.h\n+SYMLINK-y-include += rte_event_eth_rx_adapter.h\n \n # versioning export map\n EXPORT_MAP := rte_eventdev_version.map\ndiff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map\nindex 996b361..e10546f 100644\n--- a/lib/librte_eventdev/rte_eventdev_version.map\n+++ b/lib/librte_eventdev/rte_eventdev_version.map\n@@ -56,6 +56,15 @@ DPDK_17.08 {\n DPDK_17.11 {\n \tglobal:\n \n+\trte_event_eth_rx_adapter_create_ext;\n+\trte_event_eth_rx_adapter_create;\n+\trte_event_eth_rx_adapter_free;\n+\trte_event_eth_rx_adapter_queue_add;\n+\trte_event_eth_rx_adapter_queue_del;\n+\trte_event_eth_rx_adapter_start;\n+\trte_event_eth_rx_adapter_stop;\n+\trte_event_eth_rx_adapter_stats_get;\n+\trte_event_eth_rx_adapter_stats_reset;\n \trte_event_eth_rx_adapter_caps_get;\n-\n+\trte_event_eth_rx_adapter_service_id_get;\n } DPDK_17.08;\n",
    "prefixes": [
        "dpdk-dev",
        "3/4"
    ]
}