get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/29811/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 29811,
    "url": "http://patches.dpdk.org/api/patches/29811/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1507324201-3517-7-git-send-email-nikhil.rao@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1507324201-3517-7-git-send-email-nikhil.rao@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1507324201-3517-7-git-send-email-nikhil.rao@intel.com",
    "date": "2017-10-06T21:10:00",
    "name": "[dpdk-dev,v5,6/7] eventdev: add eth Rx adapter implementation",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "6abbbef6ac28aea7a798088f7f8551c57f0bc8ba",
    "submitter": {
        "id": 528,
        "url": "http://patches.dpdk.org/api/people/528/?format=api",
        "name": "Rao, Nikhil",
        "email": "nikhil.rao@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1507324201-3517-7-git-send-email-nikhil.rao@intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/29811/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/29811/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 293F81B215;\n\tFri,  6 Oct 2017 14:32:10 +0200 (CEST)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n\tby dpdk.org (Postfix) with ESMTP id AAECD1B1BC\n\tfor <dev@dpdk.org>; Fri,  6 Oct 2017 14:32:06 +0200 (CEST)",
            "from fmsmga005.fm.intel.com ([10.253.24.32])\n\tby fmsmga105.fm.intel.com with ESMTP; 06 Oct 2017 05:32:06 -0700",
            "from unknown (HELO localhost.iind.intel.com) ([10.224.122.216])\n\tby fmsmga005.fm.intel.com with ESMTP; 06 Oct 2017 05:32:04 -0700"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.42,483,1500966000\"; d=\"scan'208\";a=\"159611008\"",
        "From": "Nikhil Rao <nikhil.rao@intel.com>",
        "To": "jerin.jacob@caviumnetworks.com,\n\tbruce.richardson@intel.com",
        "Cc": "dev@dpdk.org",
        "Date": "Sat,  7 Oct 2017 02:40:00 +0530",
        "Message-Id": "<1507324201-3517-7-git-send-email-nikhil.rao@intel.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1507324201-3517-1-git-send-email-nikhil.rao@intel.com>",
        "References": "<1507324201-3517-1-git-send-email-nikhil.rao@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v5 6/7] eventdev: add eth Rx adapter\n\timplementation",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The adapter implementation uses eventdev PMDs to configure the packet\ntransfer if HW support is available and if not, it uses an EAL service\nfunction that reads packets from ethernet Rx queues and injects these\nas events into the event device.\n\nSigned-off-by: Gage Eads <gage.eads@intel.com>\nSigned-off-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>\nSigned-off-by: Nikhil Rao <nikhil.rao@intel.com>\n---\n lib/librte_eventdev/rte_event_eth_rx_adapter.c | 1237 ++++++++++++++++++++++++\n lib/Makefile                                   |    2 +-\n lib/librte_eventdev/Makefile                   |    1 +\n lib/librte_eventdev/rte_eventdev_version.map   |    9 +\n 4 files changed, 1248 insertions(+), 1 deletion(-)\n create mode 100644 lib/librte_eventdev/rte_event_eth_rx_adapter.c",
    "diff": "diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c\nnew file mode 100644\nindex 000000000..0823aee16\n--- /dev/null\n+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c\n@@ -0,0 +1,1237 @@\n+#include <rte_cycles.h>\n+#include <rte_common.h>\n+#include <rte_dev.h>\n+#include <rte_errno.h>\n+#include <rte_ethdev.h>\n+#include <rte_log.h>\n+#include <rte_malloc.h>\n+#include <rte_service_component.h>\n+#include <rte_thash.h>\n+\n+#include \"rte_eventdev.h\"\n+#include \"rte_eventdev_pmd.h\"\n+#include \"rte_event_eth_rx_adapter.h\"\n+\n+#define BATCH_SIZE\t\t32\n+#define BLOCK_CNT_THRESHOLD\t10\n+#define ETH_EVENT_BUFFER_SIZE\t(4*BATCH_SIZE)\n+\n+#define ETH_RX_ADAPTER_SERVICE_NAME_LEN\t32\n+#define ETH_RX_ADAPTER_MEM_NAME_LEN\t32\n+\n+/*\n+ * There is an instance of this struct per polled Rx queue added to the\n+ * adapter\n+ */\n+struct eth_rx_poll_entry {\n+\t/* Eth port to poll */\n+\tuint8_t eth_dev_id;\n+\t/* Eth rx queue to poll */\n+\tuint16_t eth_rx_qid;\n+};\n+\n+/* Instance per adapter */\n+struct rte_eth_event_enqueue_buffer {\n+\t/* Count of events in this buffer */\n+\tuint16_t count;\n+\t/* Array of events in this buffer */\n+\tstruct rte_event events[ETH_EVENT_BUFFER_SIZE];\n+};\n+\n+struct rte_event_eth_rx_adapter {\n+\t/* RSS key */\n+\tuint8_t rss_key_be[40];\n+\t/* Event device identifier */\n+\tuint8_t eventdev_id;\n+\t/* Per ethernet device structure */\n+\tstruct eth_device_info *eth_devices;\n+\t/* Event port identifier */\n+\tuint8_t event_port_id;\n+\t/* Lock to serialize config updates with service function */\n+\trte_spinlock_t rx_lock;\n+\t/* Max mbufs processed in any service function invocation */\n+\tuint32_t max_nb_rx;\n+\t/* Receive queues that need to be polled */\n+\tstruct eth_rx_poll_entry *eth_rx_poll;\n+\t/* Size of the eth_rx_poll array */\n+\tuint16_t num_rx_polled;\n+\t/* Weighted round robin schedule */\n+\tuint32_t *wrr_sched;\n+\t/* wrr_sched[] size */\n+\tuint32_t wrr_len;\n+\t/* Next entry in wrr[] to begin polling */\n+\tuint32_t wrr_pos;\n+\t/* Event burst buffer */\n+\tstruct rte_eth_event_enqueue_buffer event_enqueue_buffer;\n+\t/* Per adapter stats */\n+\tstruct rte_event_eth_rx_adapter_stats stats;\n+\t/* Block count, counts upto BLOCK_CNT_THRESHOLD */\n+\tuint16_t enq_block_count;\n+\t/* Block start ts */\n+\tuint64_t rx_enq_block_start_ts;\n+\t/* Configuration callback for rte_service configuration */\n+\trte_event_eth_rx_adapter_conf_cb conf_cb;\n+\t/* Configuration callback argument */\n+\tvoid *conf_arg;\n+\t/* Set if  default_cb is being used */\n+\tint default_cb_arg;\n+\t/* Service initialization state */\n+\tuint8_t service_inited;\n+\t/* Total count of Rx queues in adapter */\n+\tuint32_t nb_queues;\n+\t/* Memory allocation name */\n+\tchar mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN];\n+\t/* Socket identifier cached from eventdev */\n+\tint socket_id;\n+\t/* Per adapter EAL service */\n+\tuint32_t service_id;\n+} __rte_cache_aligned;\n+\n+/* Per eth device */\n+struct eth_device_info {\n+\tstruct rte_eth_dev *dev;\n+\tstruct eth_rx_queue_info *rx_queue;\n+\t/* Set if ethdev->eventdev packet transfer uses a\n+\t * hardware mechanism\n+\t */\n+\tuint8_t internal_event_port;\n+\t/* Set if the adapter is processing rx queues for\n+\t * this eth device and packet processing has been\n+\t * started, allows for the code to know if the PMD\n+\t * rx_adapter_stop callback needs to be invoked\n+\t */\n+\tuint8_t dev_rx_started;\n+\t/* If nb_dev_queues > 0, the start callback will\n+\t * be invoked if not already invoked\n+\t */\n+\tuint16_t nb_dev_queues;\n+};\n+\n+/* Per Rx queue */\n+struct eth_rx_queue_info {\n+\tint queue_enabled;\t/* True if added */\n+\tuint16_t wt;\t\t/* Polling weight */\n+\tuint8_t event_queue_id;\t/* Event queue to enqueue packets to */\n+\tuint8_t sched_type;\t/* Sched type for events */\n+\tuint8_t priority;\t/* Event priority */\n+\tuint32_t flow_id;\t/* App provided flow identifier */\n+\tuint32_t flow_id_mask;\t/* Set to ~0 if app provides flow id else 0 */\n+};\n+\n+static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;\n+\n+static inline int\n+valid_id(uint8_t id)\n+{\n+\treturn id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;\n+}\n+\n+#define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \\\n+\tif (!valid_id(id)) { \\\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid eth Rx adapter id = %d\\n\", id); \\\n+\t\treturn retval; \\\n+\t} \\\n+} while (0)\n+\n+static inline int\n+sw_rx_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)\n+{\n+\treturn rx_adapter->num_rx_polled;\n+}\n+\n+/* Greatest common divisor */\n+static uint16_t gcd_u16(uint16_t a, uint16_t b)\n+{\n+\tuint16_t r = a % b;\n+\n+\treturn r ? gcd_u16(b, r) : b;\n+}\n+\n+/* Returns the next queue in the polling sequence\n+ *\n+ * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling\n+ */\n+static int\n+wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t unsigned int n, int *cw,\n+\t struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,\n+\t uint16_t gcd, int prev)\n+{\n+\tint i = prev;\n+\tuint16_t w;\n+\n+\twhile (1) {\n+\t\tuint16_t q;\n+\t\tuint8_t d;\n+\n+\t\ti = (i + 1) % n;\n+\t\tif (i == 0) {\n+\t\t\t*cw = *cw - gcd;\n+\t\t\tif (*cw <= 0)\n+\t\t\t\t*cw = max_wt;\n+\t\t}\n+\n+\t\tq = eth_rx_poll[i].eth_rx_qid;\n+\t\td = eth_rx_poll[i].eth_dev_id;\n+\t\tw = rx_adapter->eth_devices[d].rx_queue[q].wt;\n+\n+\t\tif ((int)w >= *cw)\n+\t\t\treturn i;\n+\t}\n+}\n+\n+/* Precalculate WRR polling sequence for all queues in rx_adapter */\n+static int\n+eth_poll_wrr_calc(struct rte_event_eth_rx_adapter *rx_adapter)\n+{\n+\tuint8_t d;\n+\tuint16_t q;\n+\tunsigned int i;\n+\n+\t/* Initialize variables for calculation of wrr schedule */\n+\tuint16_t max_wrr_pos = 0;\n+\tunsigned int poll_q = 0;\n+\tuint16_t max_wt = 0;\n+\tuint16_t gcd = 0;\n+\n+\tstruct eth_rx_poll_entry *rx_poll = NULL;\n+\tuint32_t *rx_wrr = NULL;\n+\n+\tif (rx_adapter->num_rx_polled) {\n+\t\tsize_t len = RTE_ALIGN(rx_adapter->num_rx_polled *\n+\t\t\t\tsizeof(*rx_adapter->eth_rx_poll),\n+\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\trx_poll = rte_zmalloc_socket(rx_adapter->mem_name,\n+\t\t\t\t\t     len,\n+\t\t\t\t\t     RTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t     rx_adapter->socket_id);\n+\t\tif (rx_poll == NULL)\n+\t\t\treturn -ENOMEM;\n+\n+\t\t/* Generate array of all queues to poll, the size of this\n+\t\t * array is poll_q\n+\t\t */\n+\t\tfor (d = 0; d < rte_eth_dev_count(); d++) {\n+\t\t\tuint16_t nb_rx_queues;\n+\t\t\tstruct eth_device_info *dev_info =\n+\t\t\t\t\t&rx_adapter->eth_devices[d];\n+\t\t\tnb_rx_queues = dev_info->dev->data->nb_rx_queues;\n+\t\t\tif (dev_info->rx_queue == NULL)\n+\t\t\t\tcontinue;\n+\t\t\tfor (q = 0; q < nb_rx_queues; q++) {\n+\t\t\t\tstruct eth_rx_queue_info *queue_info =\n+\t\t\t\t\t&dev_info->rx_queue[q];\n+\t\t\t\tif (queue_info->queue_enabled == 0)\n+\t\t\t\t\tcontinue;\n+\n+\t\t\t\tuint16_t wt = queue_info->wt;\n+\t\t\t\trx_poll[poll_q].eth_dev_id = d;\n+\t\t\t\trx_poll[poll_q].eth_rx_qid = q;\n+\t\t\t\tmax_wrr_pos += wt;\n+\t\t\t\tmax_wt = RTE_MAX(max_wt, wt);\n+\t\t\t\tgcd = (gcd) ? gcd_u16(gcd, wt) : wt;\n+\t\t\t\tpoll_q++;\n+\t\t\t}\n+\t\t}\n+\n+\t\tlen = RTE_ALIGN(max_wrr_pos * sizeof(*rx_wrr),\n+\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\trx_wrr = rte_zmalloc_socket(rx_adapter->mem_name,\n+\t\t\t\t\t    len,\n+\t\t\t\t\t    RTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t    rx_adapter->socket_id);\n+\t\tif (rx_wrr == NULL) {\n+\t\t\trte_free(rx_poll);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\t/* Generate polling sequence based on weights */\n+\t\tint prev = -1;\n+\t\tint cw = -1;\n+\t\tfor (i = 0; i < max_wrr_pos; i++) {\n+\t\t\trx_wrr[i] = wrr_next(rx_adapter, poll_q, &cw,\n+\t\t\t\t\t     rx_poll, max_wt, gcd, prev);\n+\t\t\tprev = rx_wrr[i];\n+\t\t}\n+\t}\n+\n+\trte_free(rx_adapter->eth_rx_poll);\n+\trte_free(rx_adapter->wrr_sched);\n+\n+\trx_adapter->eth_rx_poll = rx_poll;\n+\trx_adapter->wrr_sched = rx_wrr;\n+\trx_adapter->wrr_len = max_wrr_pos;\n+\n+\treturn 0;\n+}\n+\n+static inline void\n+mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,\n+\tstruct ipv6_hdr **ipv6_hdr)\n+{\n+\tstruct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);\n+\tstruct vlan_hdr *vlan_hdr;\n+\n+\t*ipv4_hdr = NULL;\n+\t*ipv6_hdr = NULL;\n+\n+\tswitch (eth_hdr->ether_type) {\n+\tcase RTE_BE16(ETHER_TYPE_IPv4):\n+\t\t*ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);\n+\t\tbreak;\n+\n+\tcase RTE_BE16(ETHER_TYPE_IPv6):\n+\t\t*ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);\n+\t\tbreak;\n+\n+\tcase RTE_BE16(ETHER_TYPE_VLAN):\n+\t\tvlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);\n+\t\tswitch (vlan_hdr->eth_proto) {\n+\t\tcase RTE_BE16(ETHER_TYPE_IPv4):\n+\t\t\t*ipv4_hdr = (struct ipv4_hdr *)(vlan_hdr + 1);\n+\t\t\tbreak;\n+\t\tcase RTE_BE16(ETHER_TYPE_IPv6):\n+\t\t\t*ipv6_hdr = (struct ipv6_hdr *)(vlan_hdr + 1);\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tbreak;\n+\t\t}\n+\t\tbreak;\n+\n+\tdefault:\n+\t\tbreak;\n+\t}\n+}\n+\n+/* Calculate RSS hash for IPv4/6 */\n+static inline uint32_t\n+do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)\n+{\n+\tuint32_t input_len;\n+\tvoid *tuple;\n+\tstruct rte_ipv4_tuple ipv4_tuple;\n+\tstruct rte_ipv6_tuple ipv6_tuple;\n+\tstruct ipv4_hdr *ipv4_hdr;\n+\tstruct ipv6_hdr *ipv6_hdr;\n+\n+\tmtoip(m, &ipv4_hdr, &ipv6_hdr);\n+\n+\tif (ipv4_hdr) {\n+\t\tipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);\n+\t\tipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);\n+\t\ttuple = &ipv4_tuple;\n+\t\tinput_len = RTE_THASH_V4_L3_LEN;\n+\t} else if (ipv6_hdr) {\n+\t\trte_thash_load_v6_addrs(ipv6_hdr,\n+\t\t\t\t\t(union rte_thash_tuple *)&ipv6_tuple);\n+\t\ttuple = &ipv6_tuple;\n+\t\tinput_len = RTE_THASH_V6_L3_LEN;\n+\t} else\n+\t\treturn 0;\n+\n+\treturn rte_softrss_be(tuple, input_len, rss_key_be);\n+}\n+\n+static inline int\n+rx_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)\n+{\n+\treturn !!rx_adapter->enq_block_count;\n+}\n+\n+static inline void\n+rx_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)\n+{\n+\tif (rx_adapter->rx_enq_block_start_ts)\n+\t\treturn;\n+\n+\trx_adapter->enq_block_count++;\n+\tif (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)\n+\t\treturn;\n+\n+\trx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();\n+}\n+\n+static inline void\n+rx_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\t    struct rte_event_eth_rx_adapter_stats *stats)\n+{\n+\tif (unlikely(!stats->rx_enq_start_ts))\n+\t\tstats->rx_enq_start_ts = rte_get_tsc_cycles();\n+\n+\tif (likely(!rx_enq_blocked(rx_adapter)))\n+\t\treturn;\n+\n+\trx_adapter->enq_block_count = 0;\n+\tif (rx_adapter->rx_enq_block_start_ts) {\n+\t\tstats->rx_enq_end_ts = rte_get_tsc_cycles();\n+\t\tstats->rx_enq_block_cycles += stats->rx_enq_end_ts -\n+\t\t    rx_adapter->rx_enq_block_start_ts;\n+\t\trx_adapter->rx_enq_block_start_ts = 0;\n+\t}\n+}\n+\n+/* Add event to buffer, free space check is done prior to calling\n+ * this function\n+ */\n+static inline void\n+buf_event_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\t  struct rte_event *ev)\n+{\n+\tstruct rte_eth_event_enqueue_buffer *buf =\n+\t    &rx_adapter->event_enqueue_buffer;\n+\trte_memcpy(&buf->events[buf->count++], ev, sizeof(struct rte_event));\n+}\n+\n+/* Enqueue buffered events to event device */\n+static inline uint16_t\n+flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)\n+{\n+\tstruct rte_eth_event_enqueue_buffer *buf =\n+\t    &rx_adapter->event_enqueue_buffer;\n+\tstruct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;\n+\n+\tuint16_t n = rte_event_enqueue_burst(rx_adapter->eventdev_id,\n+\t\t\t\t\trx_adapter->event_port_id,\n+\t\t\t\t\tbuf->events,\n+\t\t\t\t\tbuf->count);\n+\tif (n != buf->count) {\n+\t\tmemmove(buf->events,\n+\t\t\t&buf->events[n],\n+\t\t\t(buf->count - n) * sizeof(struct rte_event));\n+\t\tstats->rx_enq_retry++;\n+\t}\n+\n+\tn ? rx_enq_block_end_ts(rx_adapter, stats) :\n+\t\trx_enq_block_start_ts(rx_adapter);\n+\n+\tbuf->count -= n;\n+\tstats->rx_enq_count += n;\n+\n+\treturn n;\n+}\n+\n+static inline void\n+fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,\n+\tuint8_t dev_id,\n+\tuint16_t rx_queue_id,\n+\tstruct rte_mbuf **mbufs,\n+\tuint16_t num)\n+{\n+\tuint32_t i;\n+\tstruct eth_device_info *eth_device_info =\n+\t\t\t\t\t&rx_adapter->eth_devices[dev_id];\n+\tstruct eth_rx_queue_info *eth_rx_queue_info =\n+\t\t\t\t\t&eth_device_info->rx_queue[rx_queue_id];\n+\n+\tint32_t qid = eth_rx_queue_info->event_queue_id;\n+\tuint8_t sched_type = eth_rx_queue_info->sched_type;\n+\tuint8_t priority = eth_rx_queue_info->priority;\n+\tuint32_t flow_id;\n+\tstruct rte_event events[BATCH_SIZE];\n+\tstruct rte_mbuf *m = mbufs[0];\n+\tuint32_t rss_mask;\n+\tuint32_t rss;\n+\tint do_rss;\n+\n+\t/* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */\n+\trss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);\n+\tdo_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\tm = mbufs[i];\n+\t\tstruct rte_event *ev = &events[i];\n+\n+\t\trss = do_rss ?\n+\t\t\tdo_softrss(m, rx_adapter->rss_key_be) : m->hash.rss;\n+\t\tflow_id =\n+\t\t    eth_rx_queue_info->flow_id &\n+\t\t\t\teth_rx_queue_info->flow_id_mask;\n+\t\tflow_id |= rss & ~eth_rx_queue_info->flow_id_mask;\n+\n+\t\tev->flow_id = flow_id;\n+\t\tev->op = RTE_EVENT_OP_NEW;\n+\t\tev->sched_type = sched_type;\n+\t\tev->queue_id = qid;\n+\t\tev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;\n+\t\tev->sub_event_type = 0;\n+\t\tev->priority = priority;\n+\t\tev->mbuf = m;\n+\n+\t\tbuf_event_enqueue(rx_adapter, ev);\n+\t}\n+}\n+\n+/*\n+ * Polls receive queues added to the event adapter and enqueues received\n+ * packets to the event device.\n+ *\n+ * The receive code enqueues initially to a temporary buffer, the\n+ * temporary buffer is drained anytime it holds >= BATCH_SIZE packets\n+ *\n+ * If there isn't space available in the temporary buffer, packets from the\n+ * Rx queue aren't dequeued from the eth device, this back pressures the\n+ * eth device, in virtual device environments this back pressure is relayed to\n+ * the hypervisor's switching layer where adjustments can be made to deal with\n+ * it.\n+ */\n+static inline uint32_t\n+eth_rx_poll(struct rte_event_eth_rx_adapter *rx_adapter)\n+{\n+\tuint32_t num_queue;\n+\tuint16_t n;\n+\tuint32_t nb_rx = 0;\n+\tstruct rte_mbuf *mbufs[BATCH_SIZE];\n+\tstruct rte_eth_event_enqueue_buffer *buf;\n+\tuint32_t wrr_pos;\n+\tuint32_t max_nb_rx;\n+\n+\twrr_pos = rx_adapter->wrr_pos;\n+\tmax_nb_rx = rx_adapter->max_nb_rx;\n+\tbuf = &rx_adapter->event_enqueue_buffer;\n+\tstruct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;\n+\n+\t/* Iterate through a WRR sequence */\n+\tfor (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {\n+\t\tunsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];\n+\t\tuint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;\n+\t\tuint8_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;\n+\n+\t\t/* Don't do a batch dequeue from the rx queue if there isn't\n+\t\t * enough space in the enqueue buffer.\n+\t\t */\n+\t\tif (buf->count >= BATCH_SIZE)\n+\t\t\tflush_event_buffer(rx_adapter);\n+\t\tif (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count))\n+\t\t\tbreak;\n+\n+\t\tstats->rx_poll_count++;\n+\t\tn = rte_eth_rx_burst(d, qid, mbufs, BATCH_SIZE);\n+\n+\t\tif (n) {\n+\t\t\tstats->rx_packets += n;\n+\t\t\t/* The check before rte_eth_rx_burst() ensures that\n+\t\t\t * all n mbufs can be buffered\n+\t\t\t */\n+\t\t\tfill_event_buffer(rx_adapter, d, qid, mbufs, n);\n+\t\t\tnb_rx += n;\n+\t\t\tif (nb_rx > max_nb_rx) {\n+\t\t\t\trx_adapter->wrr_pos =\n+\t\t\t\t    (wrr_pos + 1) % rx_adapter->wrr_len;\n+\t\t\t\treturn nb_rx;\n+\t\t\t}\n+\t\t}\n+\n+\t\tif (++wrr_pos == rx_adapter->wrr_len)\n+\t\t\twrr_pos = 0;\n+\t}\n+\n+\treturn nb_rx;\n+}\n+\n+static int\n+event_eth_rx_adapter_service_func(void *args)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter = args;\n+\tstruct rte_eth_event_enqueue_buffer *buf;\n+\n+\tbuf = &rx_adapter->event_enqueue_buffer;\n+\tif (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)\n+\t\treturn 0;\n+\tif (eth_rx_poll(rx_adapter) == 0 && buf->count)\n+\t\tflush_event_buffer(rx_adapter);\n+\trte_spinlock_unlock(&rx_adapter->rx_lock);\n+\treturn 0;\n+}\n+\n+static int\n+rte_event_eth_rx_adapter_init(void)\n+{\n+\tconst char *name = \"rte_event_eth_rx_adapter_array\";\n+\tconst struct rte_memzone *mz;\n+\tunsigned int sz;\n+\n+\tsz = sizeof(*event_eth_rx_adapter) *\n+\t    RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;\n+\tsz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);\n+\n+\tmz = rte_memzone_lookup(name);\n+\tif (mz == NULL) {\n+\t\tmz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,\n+\t\t\t\t\t\t RTE_CACHE_LINE_SIZE);\n+\t\tif (mz == NULL) {\n+\t\t\tRTE_EDEV_LOG_ERR(\"failed to reserve memzone err = %\"\n+\t\t\t\t\tPRId32, rte_errno);\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n+\n+\tevent_eth_rx_adapter = mz->addr;\n+\treturn 0;\n+}\n+\n+static inline struct rte_event_eth_rx_adapter *\n+id_to_rx_adapter(uint8_t id)\n+{\n+\treturn event_eth_rx_adapter ?\n+\t\tevent_eth_rx_adapter[id] : NULL;\n+}\n+\n+static int\n+default_conf_cb(uint8_t id, uint8_t dev_id,\n+\t\tstruct rte_event_eth_rx_adapter_conf *conf, void *arg)\n+{\n+\tint ret;\n+\tstruct rte_eventdev *dev;\n+\tstruct rte_event_dev_config dev_conf;\n+\tint started;\n+\tuint8_t port_id;\n+\tstruct rte_event_port_conf *port_conf = arg;\n+\tstruct rte_event_eth_rx_adapter *rx_adapter = id_to_rx_adapter(id);\n+\n+\tdev = &rte_eventdevs[rx_adapter->eventdev_id];\n+\tdev_conf = dev->data->dev_conf;\n+\n+\tstarted = dev->data->dev_started;\n+\tif (started)\n+\t\trte_event_dev_stop(dev_id);\n+\tport_id = dev_conf.nb_event_ports;\n+\tdev_conf.nb_event_ports += 1;\n+\tret = rte_event_dev_configure(dev_id, &dev_conf);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to configure event dev %u\\n\",\n+\t\t\t\t\t\tdev_id);\n+\t\tif (started)\n+\t\t\trte_event_dev_start(dev_id);\n+\t\treturn ret;\n+\t}\n+\n+\tret = rte_event_port_setup(dev_id, port_id, port_conf);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to setup event port %u\\n\",\n+\t\t\t\t\tport_id);\n+\t\treturn ret;\n+\t}\n+\n+\tconf->event_port_id = port_id;\n+\tconf->max_nb_rx = 128;\n+\tif (started)\n+\t\trte_event_dev_start(dev_id);\n+\trx_adapter->default_cb_arg = 1;\n+\treturn ret;\n+}\n+\n+static int\n+init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)\n+{\n+\tint ret;\n+\tstruct rte_service_spec service;\n+\tstruct rte_event_eth_rx_adapter_conf rx_adapter_conf;\n+\n+\tif (rx_adapter->service_inited)\n+\t\treturn 0;\n+\n+\tmemset(&service, 0, sizeof(service));\n+\tsnprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,\n+\t\t\"rte_event_eth_rx_adapter_%d\", id);\n+\tservice.socket_id = rx_adapter->socket_id;\n+\tservice.callback = event_eth_rx_adapter_service_func;\n+\tservice.callback_userdata = rx_adapter;\n+\t/* Service function handles locking for queue add/del updates */\n+\tservice.capabilities = RTE_SERVICE_CAP_MT_SAFE;\n+\tret = rte_service_component_register(&service, &rx_adapter->service_id);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to register service %s err = %\" PRId32,\n+\t\t\tservice.name, ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,\n+\t\t&rx_adapter_conf, rx_adapter->conf_arg);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"configuration callback failed err = %\" PRId32,\n+\t\t\tret);\n+\t\tgoto err_done;\n+\t}\n+\trx_adapter->event_port_id = rx_adapter_conf.event_port_id;\n+\trx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;\n+\trx_adapter->service_inited = 1;\n+\treturn 0;\n+\n+err_done:\n+\trte_service_component_unregister(rx_adapter->service_id);\n+\treturn ret;\n+}\n+\n+\n+static void\n+update_queue_info(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\tstruct eth_device_info *dev_info,\n+\t\tint32_t rx_queue_id,\n+\t\tuint8_t add)\n+{\n+\tstruct eth_rx_queue_info *queue_info;\n+\tint enabled;\n+\tuint16_t i;\n+\n+\tif (dev_info->rx_queue == NULL)\n+\t\treturn;\n+\n+\tif (rx_queue_id == -1) {\n+\t\tfor (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)\n+\t\t\tupdate_queue_info(rx_adapter, dev_info, i, add);\n+\t} else {\n+\t\tqueue_info = &dev_info->rx_queue[rx_queue_id];\n+\t\tenabled = queue_info->queue_enabled;\n+\t\tif (add) {\n+\t\t\trx_adapter->nb_queues += !enabled;\n+\t\t\tdev_info->nb_dev_queues += !enabled;\n+\t\t} else {\n+\t\t\trx_adapter->nb_queues -= enabled;\n+\t\t\tdev_info->nb_dev_queues -= enabled;\n+\t\t}\n+\t\tqueue_info->queue_enabled = !!add;\n+\t}\n+}\n+\n+static int\n+event_eth_rx_adapter_queue_del(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\t\t    struct eth_device_info *dev_info,\n+\t\t\t    uint16_t rx_queue_id)\n+{\n+\tstruct eth_rx_queue_info *queue_info;\n+\n+\tif (rx_adapter->nb_queues == 0)\n+\t\treturn 0;\n+\n+\tqueue_info = &dev_info->rx_queue[rx_queue_id];\n+\trx_adapter->num_rx_polled -= queue_info->queue_enabled;\n+\tupdate_queue_info(rx_adapter, dev_info, rx_queue_id, 0);\n+\treturn 0;\n+}\n+\n+static void\n+event_eth_rx_adapter_queue_add(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\tstruct eth_device_info *dev_info,\n+\t\tuint16_t rx_queue_id,\n+\t\tconst struct rte_event_eth_rx_adapter_queue_conf *conf)\n+\n+{\n+\tstruct eth_rx_queue_info *queue_info;\n+\tconst struct rte_event *ev = &conf->ev;\n+\n+\tqueue_info = &dev_info->rx_queue[rx_queue_id];\n+\tqueue_info->event_queue_id = ev->queue_id;\n+\tqueue_info->sched_type = ev->sched_type;\n+\tqueue_info->priority = ev->priority;\n+\tqueue_info->wt = conf->servicing_weight;\n+\n+\tif (conf->rx_queue_flags &\n+\t\t\tRTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {\n+\t\tqueue_info->flow_id = ev->flow_id;\n+\t\tqueue_info->flow_id_mask = ~0;\n+\t}\n+\n+\t/* The same queue can be added more than once */\n+\trx_adapter->num_rx_polled += !queue_info->queue_enabled;\n+\tupdate_queue_info(rx_adapter, dev_info, rx_queue_id, 1);\n+}\n+\n+static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\tuint8_t eth_dev_id,\n+\t\tint rx_queue_id,\n+\t\tconst struct rte_event_eth_rx_adapter_queue_conf *queue_conf)\n+{\n+\tstruct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];\n+\tuint32_t i;\n+\tint ret;\n+\n+\tif (queue_conf->servicing_weight == 0) {\n+\t\tstruct rte_event_eth_rx_adapter_queue_conf temp_conf;\n+\n+\t\tstruct rte_eth_dev_data *data = dev_info->dev->data;\n+\t\tif (data->dev_conf.intr_conf.rxq) {\n+\t\t\tRTE_EDEV_LOG_ERR(\"Interrupt driven queues\"\n+\t\t\t\t\t\" not supported\");\n+\t\t\treturn -ENOTSUP;\n+\t\t}\n+\t\ttemp_conf = *queue_conf;\n+\t\ttemp_conf.servicing_weight = 1;\n+\t\t/* If Rx interrupts are disabled set wt = 1 */\n+\t\tqueue_conf = &temp_conf;\n+\t}\n+\n+\tif (dev_info->rx_queue == NULL) {\n+\t\tdev_info->rx_queue =\n+\t\t    rte_zmalloc_socket(rx_adapter->mem_name,\n+\t\t\t\t       dev_info->dev->data->nb_rx_queues *\n+\t\t\t\t       sizeof(struct eth_rx_queue_info), 0,\n+\t\t\t\t       rx_adapter->socket_id);\n+\t\tif (dev_info->rx_queue == NULL)\n+\t\t\treturn -ENOMEM;\n+\t}\n+\n+\tif (rx_queue_id == -1) {\n+\t\tfor (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)\n+\t\t\tevent_eth_rx_adapter_queue_add(rx_adapter,\n+\t\t\t\t\t\tdev_info, i,\n+\t\t\t\t\t\tqueue_conf);\n+\t} else {\n+\t\tevent_eth_rx_adapter_queue_add(rx_adapter, dev_info,\n+\t\t\t\t\t  (uint16_t)rx_queue_id,\n+\t\t\t\t\t  queue_conf);\n+\t}\n+\n+\tret = eth_poll_wrr_calc(rx_adapter);\n+\tif (ret) {\n+\t\tevent_eth_rx_adapter_queue_del(rx_adapter,\n+\t\t\t\t\tdev_info, rx_queue_id);\n+\t\treturn ret;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int\n+rx_adapter_ctrl(uint8_t id, int start)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\tstruct rte_eventdev *dev;\n+\tstruct eth_device_info *dev_info;\n+\tuint32_t i;\n+\tint use_service = 0;\n+\tint stop = !start;\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (rx_adapter == NULL)\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_eventdevs[rx_adapter->eventdev_id];\n+\n+\tfor (i = 0; i < rte_eth_dev_count(); i++) {\n+\t\tdev_info = &rx_adapter->eth_devices[i];\n+\t\t/* if start  check for num dev queues */\n+\t\tif (start && !dev_info->nb_dev_queues)\n+\t\t\tcontinue;\n+\t\t/* if stop check if dev has been started */\n+\t\tif (stop && !dev_info->dev_rx_started)\n+\t\t\tcontinue;\n+\t\tuse_service |= !dev_info->internal_event_port;\n+\t\tdev_info->dev_rx_started = start;\n+\t\tif (dev_info->internal_event_port == 0)\n+\t\t\tcontinue;\n+\t\tstart ? (*dev->dev_ops->eth_rx_adapter_start)(dev,\n+\t\t\t\t\t\t&rte_eth_devices[i]) :\n+\t\t\t(*dev->dev_ops->eth_rx_adapter_stop)(dev,\n+\t\t\t\t\t\t&rte_eth_devices[i]);\n+\t}\n+\n+\tif (use_service)\n+\t\trte_service_runstate_set(rx_adapter->service_id, start);\n+\n+\treturn 0;\n+}\n+\n+int\n+rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,\n+\t\t\t\trte_event_eth_rx_adapter_conf_cb conf_cb,\n+\t\t\t\tvoid *conf_arg)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\tint ret;\n+\tint socket_id;\n+\tuint8_t i;\n+\tchar mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];\n+\tconst uint8_t default_rss_key[] = {\n+\t\t0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,\n+\t\t0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,\n+\t\t0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,\n+\t\t0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,\n+\t\t0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,\n+\t};\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tif (conf_cb == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (event_eth_rx_adapter == NULL) {\n+\t\tret = rte_event_eth_rx_adapter_init();\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (rx_adapter != NULL) {\n+\t\tRTE_EDEV_LOG_ERR(\"Eth Rx adapter exists id = %\" PRIu8, id);\n+\t\treturn -EEXIST;\n+\t}\n+\n+\tsocket_id = rte_event_dev_socket_id(dev_id);\n+\tsnprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,\n+\t\t\"rte_event_eth_rx_adapter_%d\",\n+\t\tid);\n+\n+\trx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),\n+\t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n+\tif (rx_adapter == NULL) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to get mem for rx adapter\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trx_adapter->eventdev_id = dev_id;\n+\trx_adapter->socket_id = socket_id;\n+\trx_adapter->conf_cb = conf_cb;\n+\trx_adapter->conf_arg = conf_arg;\n+\tstrcpy(rx_adapter->mem_name, mem_name);\n+\trx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,\n+\t\t\t\t\trte_eth_dev_count() *\n+\t\t\t\t\tsizeof(struct eth_device_info), 0,\n+\t\t\t\t\tsocket_id);\n+\trte_convert_rss_key((const uint32_t *)default_rss_key,\n+\t\t\t(uint32_t *)rx_adapter->rss_key_be,\n+\t\t\t    RTE_DIM(default_rss_key));\n+\n+\tif (rx_adapter->eth_devices == NULL) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to get mem for eth devices\\n\");\n+\t\trte_free(rx_adapter);\n+\t\treturn -ENOMEM;\n+\t}\n+\trte_spinlock_init(&rx_adapter->rx_lock);\n+\tfor (i = 0; i < rte_eth_dev_count(); i++)\n+\t\trx_adapter->eth_devices[i].dev = &rte_eth_devices[i];\n+\n+\tevent_eth_rx_adapter[id] = rx_adapter;\n+\tif (conf_cb == default_conf_cb)\n+\t\trx_adapter->default_cb_arg = 1;\n+\treturn 0;\n+}\n+\n+int\n+rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,\n+\t\tstruct rte_event_port_conf *port_config)\n+{\n+\tstruct rte_event_port_conf *pc;\n+\tint ret;\n+\n+\tif (port_config == NULL)\n+\t\treturn -EINVAL;\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\tpc = rte_malloc(NULL, sizeof(*pc), 0);\n+\tif (pc == NULL)\n+\t\treturn -ENOMEM;\n+\t*pc = *port_config;\n+\tret = rte_event_eth_rx_adapter_create_ext(id, dev_id,\n+\t\t\t\t\tdefault_conf_cb,\n+\t\t\t\t\tpc);\n+\tif (ret)\n+\t\trte_free(pc);\n+\treturn ret;\n+}\n+\n+int\n+rte_event_eth_rx_adapter_free(uint8_t id)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (rx_adapter == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (rx_adapter->nb_queues) {\n+\t\tRTE_EDEV_LOG_ERR(\"%\" PRIu16 \" Rx queues not deleted\",\n+\t\t\t\trx_adapter->nb_queues);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tif (rx_adapter->default_cb_arg)\n+\t\trte_free(rx_adapter->conf_arg);\n+\trte_free(rx_adapter->eth_devices);\n+\trte_free(rx_adapter);\n+\tevent_eth_rx_adapter[id] = NULL;\n+\n+\treturn 0;\n+}\n+\n+int\n+rte_event_eth_rx_adapter_queue_add(uint8_t id,\n+\t\tuint8_t eth_dev_id,\n+\t\tint32_t rx_queue_id,\n+\t\tconst struct rte_event_eth_rx_adapter_queue_conf *queue_conf)\n+{\n+\tint ret;\n+\tuint32_t cap;\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\tstruct rte_eventdev *dev;\n+\tstruct eth_device_info *dev_info;\n+\tint start_service;\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\tRTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif ((rx_adapter == NULL) || (queue_conf == NULL))\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_eventdevs[rx_adapter->eventdev_id];\n+\tret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,\n+\t\t\t\t\t\teth_dev_id,\n+\t\t\t\t\t\t&cap);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"Failed to get adapter caps edev %\" PRIu8\n+\t\t\t\"eth port %\" PRIu8, id, eth_dev_id);\n+\t\treturn ret;\n+\t}\n+\n+\tif ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0\n+\t\t&& (queue_conf->rx_queue_flags &\n+\t\t\tRTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Flow ID override is not supported,\"\n+\t\t\t\t\" eth port: %\" PRIu8 \" adapter id: %\" PRIu8,\n+\t\t\t\teth_dev_id, id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&\n+\t\t(rx_queue_id != -1)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Rx queues can only be connected to single \"\n+\t\t\t\"event queue id %u eth port %u\", id, eth_dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (rx_queue_id != -1 && (uint16_t)rx_queue_id >=\n+\t\t\trte_eth_devices[eth_dev_id].data->nb_rx_queues) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid rx queue_id %\" PRIu16,\n+\t\t\t (uint16_t)rx_queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tstart_service = 0;\n+\tdev_info = &rx_adapter->eth_devices[eth_dev_id];\n+\n+\tif (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,\n+\t\t\t\t\t-ENOTSUP);\n+\t\tif (dev_info->rx_queue == NULL) {\n+\t\t\tdev_info->rx_queue =\n+\t\t\t    rte_zmalloc_socket(rx_adapter->mem_name,\n+\t\t\t\t\tdev_info->dev->data->nb_rx_queues *\n+\t\t\t\t\tsizeof(struct eth_rx_queue_info), 0,\n+\t\t\t\t\trx_adapter->socket_id);\n+\t\t\tif (dev_info->rx_queue == NULL)\n+\t\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\tret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,\n+\t\t\t\t&rte_eth_devices[eth_dev_id],\n+\t\t\t\trx_queue_id, queue_conf);\n+\t\tif (ret == 0) {\n+\t\t\tupdate_queue_info(rx_adapter,\n+\t\t\t\t\t&rx_adapter->eth_devices[eth_dev_id],\n+\t\t\t\t\trx_queue_id,\n+\t\t\t\t\t1);\n+\t\t}\n+\t} else {\n+\t\trte_spinlock_lock(&rx_adapter->rx_lock);\n+\t\tret = init_service(rx_adapter, id);\n+\t\tif (ret == 0)\n+\t\t\tret = add_rx_queue(rx_adapter, eth_dev_id, rx_queue_id,\n+\t\t\t\t\tqueue_conf);\n+\t\trte_spinlock_unlock(&rx_adapter->rx_lock);\n+\t\tif (ret == 0)\n+\t\t\tstart_service = !!sw_rx_adapter_queue_count(rx_adapter);\n+\t}\n+\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tif (start_service)\n+\t\trte_service_component_runstate_set(rx_adapter->service_id, 1);\n+\n+\treturn 0;\n+}\n+\n+int\n+rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,\n+\t\t\t\tint32_t rx_queue_id)\n+{\n+\tint ret = 0;\n+\tstruct rte_eventdev *dev;\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\tstruct eth_device_info *dev_info;\n+\tuint32_t cap;\n+\tuint16_t i;\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\tRTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (rx_adapter == NULL)\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_eventdevs[rx_adapter->eventdev_id];\n+\tret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,\n+\t\t\t\t\t\teth_dev_id,\n+\t\t\t\t\t\t&cap);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tif (rx_queue_id != -1 && (uint16_t)rx_queue_id >=\n+\t\trte_eth_devices[eth_dev_id].data->nb_rx_queues) {\n+\t\tRTE_EDEV_LOG_ERR(\"Invalid rx queue_id %\" PRIu16,\n+\t\t\t (uint16_t)rx_queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tdev_info = &rx_adapter->eth_devices[eth_dev_id];\n+\n+\tif (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {\n+\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,\n+\t\t\t\t -ENOTSUP);\n+\t\tret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,\n+\t\t\t\t\t\t&rte_eth_devices[eth_dev_id],\n+\t\t\t\t\t\trx_queue_id);\n+\t\tif (ret == 0) {\n+\t\t\tupdate_queue_info(rx_adapter,\n+\t\t\t\t\t&rx_adapter->eth_devices[eth_dev_id],\n+\t\t\t\t\trx_queue_id,\n+\t\t\t\t\t0);\n+\t\t\tif (dev_info->nb_dev_queues == 0) {\n+\t\t\t\trte_free(dev_info->rx_queue);\n+\t\t\t\tdev_info->rx_queue = NULL;\n+\t\t\t}\n+\t\t}\n+\t} else {\n+\t\tint rc;\n+\t\trte_spinlock_lock(&rx_adapter->rx_lock);\n+\t\tif (rx_queue_id == -1) {\n+\t\t\tfor (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)\n+\t\t\t\tevent_eth_rx_adapter_queue_del(rx_adapter,\n+\t\t\t\t\t\t\tdev_info,\n+\t\t\t\t\t\t\ti);\n+\t\t} else {\n+\t\t\tevent_eth_rx_adapter_queue_del(rx_adapter,\n+\t\t\t\t\t\tdev_info,\n+\t\t\t\t\t\t(uint16_t)rx_queue_id);\n+\t\t}\n+\n+\t\trc = eth_poll_wrr_calc(rx_adapter);\n+\t\tif (rc)\n+\t\t\tRTE_EDEV_LOG_ERR(\"WRR recalculation failed %\" PRId32,\n+\t\t\t\t\trc);\n+\n+\t\tif (dev_info->nb_dev_queues == 0) {\n+\t\t\trte_free(dev_info->rx_queue);\n+\t\t\tdev_info->rx_queue = NULL;\n+\t\t}\n+\n+\t\trte_spinlock_unlock(&rx_adapter->rx_lock);\n+\t\trte_service_component_runstate_set(rx_adapter->service_id,\n+\t\t\t\tsw_rx_adapter_queue_count(rx_adapter));\n+\t}\n+\n+\treturn ret;\n+}\n+\n+\n+int\n+rte_event_eth_rx_adapter_start(uint8_t id)\n+{\n+\treturn rx_adapter_ctrl(id, 1);\n+}\n+\n+int\n+rte_event_eth_rx_adapter_stop(uint8_t id)\n+{\n+\treturn rx_adapter_ctrl(id, 0);\n+}\n+\n+int\n+rte_event_eth_rx_adapter_stats_get(uint8_t id,\n+\t\t\t       struct rte_event_eth_rx_adapter_stats *stats)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\tstruct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };\n+\tstruct rte_event_eth_rx_adapter_stats dev_stats;\n+\tstruct rte_eventdev *dev;\n+\tstruct eth_device_info *dev_info;\n+\tuint32_t i;\n+\tint ret;\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (rx_adapter  == NULL || stats == NULL)\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_eventdevs[rx_adapter->eventdev_id];\n+\tmemset(stats, 0, sizeof(*stats));\n+\tfor (i = 0; i < rte_eth_dev_count(); i++) {\n+\t\tdev_info = &rx_adapter->eth_devices[i];\n+\t\tif (dev_info->internal_event_port == 0 ||\n+\t\t\tdev->dev_ops->eth_rx_adapter_stats_get == NULL)\n+\t\t\tcontinue;\n+\t\tret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev,\n+\t\t\t\t\t\t&rte_eth_devices[i],\n+\t\t\t\t\t\t&dev_stats);\n+\t\tif (ret)\n+\t\t\tcontinue;\n+\t\tdev_stats_sum.rx_packets += dev_stats.rx_packets;\n+\t\tdev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;\n+\t}\n+\n+\tif (rx_adapter->service_inited)\n+\t\t*stats = rx_adapter->stats;\n+\n+\tstats->rx_packets += dev_stats_sum.rx_packets;\n+\tstats->rx_enq_count += dev_stats_sum.rx_enq_count;\n+\treturn 0;\n+}\n+\n+int\n+rte_event_eth_rx_adapter_stats_reset(uint8_t id)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\tstruct rte_eventdev *dev;\n+\tstruct eth_device_info *dev_info;\n+\tuint32_t i;\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (rx_adapter == NULL)\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_eventdevs[rx_adapter->eventdev_id];\n+\tfor (i = 0; i < rte_eth_dev_count(); i++) {\n+\t\tdev_info = &rx_adapter->eth_devices[i];\n+\t\tif (dev_info->internal_event_port == 0 ||\n+\t\t\tdev->dev_ops->eth_rx_adapter_stats_reset == NULL)\n+\t\t\tcontinue;\n+\t\t(*dev->dev_ops->eth_rx_adapter_stats_reset)(dev,\n+\t\t\t\t\t\t\t&rte_eth_devices[i]);\n+\t}\n+\n+\tmemset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));\n+\treturn 0;\n+}\n+\n+int\n+rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter;\n+\n+\tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n+\n+\trx_adapter = id_to_rx_adapter(id);\n+\tif (rx_adapter == NULL || service_id == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (rx_adapter->service_inited)\n+\t\t*service_id = rx_adapter->service_id;\n+\n+\treturn rx_adapter->service_inited ? 0 : -ESRCH;\n+}\ndiff --git a/lib/Makefile b/lib/Makefile\nindex ccff22c39..7b2173cf5 100644\n--- a/lib/Makefile\n+++ b/lib/Makefile\n@@ -52,7 +52,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += librte_cryptodev\n DEPDIRS-librte_cryptodev := librte_eal librte_mempool librte_ring librte_mbuf\n DEPDIRS-librte_cryptodev += librte_kvargs\n DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += librte_eventdev\n-DEPDIRS-librte_eventdev := librte_eal librte_ring librte_ether\n+DEPDIRS-librte_eventdev := librte_eal librte_ring librte_ether librte_hash\n DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += librte_vhost\n DEPDIRS-librte_vhost := librte_eal librte_mempool librte_mbuf librte_ether\n DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash\ndiff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile\nindex eb1467d56..c404d673f 100644\n--- a/lib/librte_eventdev/Makefile\n+++ b/lib/librte_eventdev/Makefile\n@@ -43,6 +43,7 @@ CFLAGS += $(WERROR_FLAGS)\n # library source files\n SRCS-y += rte_eventdev.c\n SRCS-y += rte_event_ring.c\n+SRCS-y += rte_event_eth_rx_adapter.c\n \n # export include files\n SYMLINK-y-include += rte_eventdev.h\ndiff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map\nindex c181fab95..11a5f21bd 100644\n--- a/lib/librte_eventdev/rte_eventdev_version.map\n+++ b/lib/librte_eventdev/rte_eventdev_version.map\n@@ -55,5 +55,14 @@ DPDK_17.08 {\n DPDK_17.11 {\n \tglobal:\n \n+\trte_event_eth_rx_adapter_create_ext;\n+\trte_event_eth_rx_adapter_create;\n+\trte_event_eth_rx_adapter_free;\n+\trte_event_eth_rx_adapter_queue_add;\n+\trte_event_eth_rx_adapter_queue_del;\n+\trte_event_eth_rx_adapter_start;\n+\trte_event_eth_rx_adapter_stop;\n+\trte_event_eth_rx_adapter_stats_get;\n+\trte_event_eth_rx_adapter_stats_reset;\n \trte_event_eth_rx_adapter_caps_get;\n } DPDK_17.08;\n",
    "prefixes": [
        "dpdk-dev",
        "v5",
        "6/7"
    ]
}