get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/42028/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 42028,
    "url": "http://patches.dpdk.org/api/patches/42028/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1530496530-112764-2-git-send-email-nikhil.rao@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1530496530-112764-2-git-send-email-nikhil.rao@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1530496530-112764-2-git-send-email-nikhil.rao@intel.com",
    "date": "2018-07-02T01:55:26",
    "name": "[v4,1/5] eventdev: standardize Rx adapter internal function names",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "f68eb8ca3a834f181bc8704c22e1f7967dbfbf4c",
    "submitter": {
        "id": 528,
        "url": "http://patches.dpdk.org/api/people/528/?format=api",
        "name": "Rao, Nikhil",
        "email": "nikhil.rao@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1530496530-112764-2-git-send-email-nikhil.rao@intel.com/mbox/",
    "series": [
        {
            "id": 339,
            "url": "http://patches.dpdk.org/api/series/339/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=339",
            "date": "2018-07-02T01:55:25",
            "name": "eventdev: add interrupt driven queues to Rx adapter",
            "version": 4,
            "mbox": "http://patches.dpdk.org/series/339/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/42028/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/42028/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 392BA374E;\n\tMon,  2 Jul 2018 03:56:06 +0200 (CEST)",
            "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n\tby dpdk.org (Postfix) with ESMTP id D40E82BA5\n\tfor <dev@dpdk.org>; Mon,  2 Jul 2018 03:56:00 +0200 (CEST)",
            "from orsmga004.jf.intel.com ([10.7.209.38])\n\tby orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t01 Jul 2018 18:55:58 -0700",
            "from unknown (HELO localhost.localdomain.localdomain)\n\t([10.224.122.193])\n\tby orsmga004.jf.intel.com with ESMTP; 01 Jul 2018 18:55:38 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.51,297,1526367600\"; d=\"scan'208\";a=\"212679613\"",
        "From": "Nikhil Rao <nikhil.rao@intel.com>",
        "To": "jerin.jacob@caviumnetworks.com",
        "Cc": "nikhil.rao@intel.com,\n\tdev@dpdk.org",
        "Date": "Mon,  2 Jul 2018 07:25:26 +0530",
        "Message-Id": "<1530496530-112764-2-git-send-email-nikhil.rao@intel.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1530496530-112764-1-git-send-email-nikhil.rao@intel.com>",
        "References": "<1530496530-112764-1-git-send-email-nikhil.rao@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v4 1/5] eventdev: standardize Rx adapter internal\n\tfunction names",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add a common prefix to function names and rename\nfew to better match functionality\n\nSigned-off-by: Nikhil Rao <nikhil.rao@intel.com>\nAcked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>\n---\n lib/librte_eventdev/rte_event_eth_rx_adapter.c | 167 ++++++++++++-------------\n 1 file changed, 80 insertions(+), 87 deletions(-)",
    "diff": "diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c\nindex ce1f62d..9361d48 100644\n--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c\n+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c\n@@ -129,30 +129,30 @@ struct eth_rx_queue_info {\n static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;\n \n static inline int\n-valid_id(uint8_t id)\n+rxa_validate_id(uint8_t id)\n {\n \treturn id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;\n }\n \n #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \\\n-\tif (!valid_id(id)) { \\\n+\tif (!rxa_validate_id(id)) { \\\n \t\tRTE_EDEV_LOG_ERR(\"Invalid eth Rx adapter id = %d\\n\", id); \\\n \t\treturn retval; \\\n \t} \\\n } while (0)\n \n static inline int\n-sw_rx_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)\n+rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)\n {\n \treturn rx_adapter->num_rx_polled;\n }\n \n /* Greatest common divisor */\n-static uint16_t gcd_u16(uint16_t a, uint16_t b)\n+static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)\n {\n \tuint16_t r = a % b;\n \n-\treturn r ? gcd_u16(b, r) : b;\n+\treturn r ? rxa_gcd_u16(b, r) : b;\n }\n \n /* Returns the next queue in the polling sequence\n@@ -160,7 +160,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n  * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling\n  */\n static int\n-wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,\n+rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,\n \t unsigned int n, int *cw,\n \t struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,\n \t uint16_t gcd, int prev)\n@@ -190,7 +190,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \n /* Precalculate WRR polling sequence for all queues in rx_adapter */\n static int\n-eth_poll_wrr_calc(struct rte_event_eth_rx_adapter *rx_adapter)\n+rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter)\n {\n \tuint16_t d;\n \tuint16_t q;\n@@ -239,7 +239,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \t\t\t\trx_poll[poll_q].eth_rx_qid = q;\n \t\t\t\tmax_wrr_pos += wt;\n \t\t\t\tmax_wt = RTE_MAX(max_wt, wt);\n-\t\t\t\tgcd = (gcd) ? gcd_u16(gcd, wt) : wt;\n+\t\t\t\tgcd = (gcd) ? rxa_gcd_u16(gcd, wt) : wt;\n \t\t\t\tpoll_q++;\n \t\t\t}\n \t\t}\n@@ -259,7 +259,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \t\tint prev = -1;\n \t\tint cw = -1;\n \t\tfor (i = 0; i < max_wrr_pos; i++) {\n-\t\t\trx_wrr[i] = wrr_next(rx_adapter, poll_q, &cw,\n+\t\t\trx_wrr[i] = rxa_wrr_next(rx_adapter, poll_q, &cw,\n \t\t\t\t\t     rx_poll, max_wt, gcd, prev);\n \t\t\tprev = rx_wrr[i];\n \t\t}\n@@ -276,7 +276,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n }\n \n static inline void\n-mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,\n+rxa_mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,\n \tstruct ipv6_hdr **ipv6_hdr)\n {\n \tstruct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);\n@@ -315,7 +315,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \n /* Calculate RSS hash for IPv4/6 */\n static inline uint32_t\n-do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)\n+rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)\n {\n \tuint32_t input_len;\n \tvoid *tuple;\n@@ -324,7 +324,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \tstruct ipv4_hdr *ipv4_hdr;\n \tstruct ipv6_hdr *ipv6_hdr;\n \n-\tmtoip(m, &ipv4_hdr, &ipv6_hdr);\n+\trxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);\n \n \tif (ipv4_hdr) {\n \t\tipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);\n@@ -343,13 +343,13 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n }\n \n static inline int\n-rx_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)\n+rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)\n {\n \treturn !!rx_adapter->enq_block_count;\n }\n \n static inline void\n-rx_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)\n+rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)\n {\n \tif (rx_adapter->rx_enq_block_start_ts)\n \t\treturn;\n@@ -362,13 +362,13 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n }\n \n static inline void\n-rx_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,\n+rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,\n \t\t    struct rte_event_eth_rx_adapter_stats *stats)\n {\n \tif (unlikely(!stats->rx_enq_start_ts))\n \t\tstats->rx_enq_start_ts = rte_get_tsc_cycles();\n \n-\tif (likely(!rx_enq_blocked(rx_adapter)))\n+\tif (likely(!rxa_enq_blocked(rx_adapter)))\n \t\treturn;\n \n \trx_adapter->enq_block_count = 0;\n@@ -384,8 +384,8 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n  * this function\n  */\n static inline void\n-buf_event_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,\n-\t\t  struct rte_event *ev)\n+rxa_buffer_event(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\tstruct rte_event *ev)\n {\n \tstruct rte_eth_event_enqueue_buffer *buf =\n \t    &rx_adapter->event_enqueue_buffer;\n@@ -394,7 +394,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \n /* Enqueue buffered events to event device */\n static inline uint16_t\n-flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)\n+rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)\n {\n \tstruct rte_eth_event_enqueue_buffer *buf =\n \t    &rx_adapter->event_enqueue_buffer;\n@@ -411,8 +411,8 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \t\tstats->rx_enq_retry++;\n \t}\n \n-\tn ? rx_enq_block_end_ts(rx_adapter, stats) :\n-\t\trx_enq_block_start_ts(rx_adapter);\n+\tn ? rxa_enq_block_end_ts(rx_adapter, stats) :\n+\t\trxa_enq_block_start_ts(rx_adapter);\n \n \tbuf->count -= n;\n \tstats->rx_enq_count += n;\n@@ -421,11 +421,11 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n }\n \n static inline void\n-fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,\n-\tuint16_t eth_dev_id,\n-\tuint16_t rx_queue_id,\n-\tstruct rte_mbuf **mbufs,\n-\tuint16_t num)\n+rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\tuint16_t eth_dev_id,\n+\t\tuint16_t rx_queue_id,\n+\t\tstruct rte_mbuf **mbufs,\n+\t\tuint16_t num)\n {\n \tuint32_t i;\n \tstruct eth_device_info *eth_device_info =\n@@ -463,7 +463,8 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \t\tstruct rte_event *ev = &events[i];\n \n \t\trss = do_rss ?\n-\t\t\tdo_softrss(m, rx_adapter->rss_key_be) : m->hash.rss;\n+\t\t\trxa_do_softrss(m, rx_adapter->rss_key_be) :\n+\t\t\tm->hash.rss;\n \t\tflow_id =\n \t\t    eth_rx_queue_info->flow_id &\n \t\t\t\teth_rx_queue_info->flow_id_mask;\n@@ -477,7 +478,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \t\tev->priority = priority;\n \t\tev->mbuf = m;\n \n-\t\tbuf_event_enqueue(rx_adapter, ev);\n+\t\trxa_buffer_event(rx_adapter, ev);\n \t}\n }\n \n@@ -495,7 +496,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n  * it.\n  */\n static inline void\n-eth_rx_poll(struct rte_event_eth_rx_adapter *rx_adapter)\n+rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)\n {\n \tuint32_t num_queue;\n \tuint16_t n;\n@@ -520,7 +521,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \t\t * enough space in the enqueue buffer.\n \t\t */\n \t\tif (buf->count >= BATCH_SIZE)\n-\t\t\tflush_event_buffer(rx_adapter);\n+\t\t\trxa_flush_event_buffer(rx_adapter);\n \t\tif (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count)) {\n \t\t\trx_adapter->wrr_pos = wrr_pos;\n \t\t\treturn;\n@@ -534,7 +535,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \t\t\t/* The check before rte_eth_rx_burst() ensures that\n \t\t\t * all n mbufs can be buffered\n \t\t\t */\n-\t\t\tfill_event_buffer(rx_adapter, d, qid, mbufs, n);\n+\t\t\trxa_buffer_mbufs(rx_adapter, d, qid, mbufs, n);\n \t\t\tnb_rx += n;\n \t\t\tif (nb_rx > max_nb_rx) {\n \t\t\t\trx_adapter->wrr_pos =\n@@ -548,11 +549,11 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \t}\n \n \tif (buf->count >= BATCH_SIZE)\n-\t\tflush_event_buffer(rx_adapter);\n+\t\trxa_flush_event_buffer(rx_adapter);\n }\n \n static int\n-event_eth_rx_adapter_service_func(void *args)\n+rxa_service_func(void *args)\n {\n \tstruct rte_event_eth_rx_adapter *rx_adapter = args;\n \n@@ -562,7 +563,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \t\treturn 0;\n \t\trte_spinlock_unlock(&rx_adapter->rx_lock);\n \t}\n-\teth_rx_poll(rx_adapter);\n+\trxa_poll(rx_adapter);\n \trte_spinlock_unlock(&rx_adapter->rx_lock);\n \treturn 0;\n }\n@@ -594,14 +595,14 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n }\n \n static inline struct rte_event_eth_rx_adapter *\n-id_to_rx_adapter(uint8_t id)\n+rxa_id_to_adapter(uint8_t id)\n {\n \treturn event_eth_rx_adapter ?\n \t\tevent_eth_rx_adapter[id] : NULL;\n }\n \n static int\n-default_conf_cb(uint8_t id, uint8_t dev_id,\n+rxa_default_conf_cb(uint8_t id, uint8_t dev_id,\n \t\tstruct rte_event_eth_rx_adapter_conf *conf, void *arg)\n {\n \tint ret;\n@@ -610,7 +611,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \tint started;\n \tuint8_t port_id;\n \tstruct rte_event_port_conf *port_conf = arg;\n-\tstruct rte_event_eth_rx_adapter *rx_adapter = id_to_rx_adapter(id);\n+\tstruct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);\n \n \tdev = &rte_eventdevs[rx_adapter->eventdev_id];\n \tdev_conf = dev->data->dev_conf;\n@@ -647,7 +648,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n }\n \n static int\n-init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)\n+rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)\n {\n \tint ret;\n \tstruct rte_service_spec service;\n@@ -660,7 +661,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \tsnprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,\n \t\t\"rte_event_eth_rx_adapter_%d\", id);\n \tservice.socket_id = rx_adapter->socket_id;\n-\tservice.callback = event_eth_rx_adapter_service_func;\n+\tservice.callback = rxa_service_func;\n \tservice.callback_userdata = rx_adapter;\n \t/* Service function handles locking for queue add/del updates */\n \tservice.capabilities = RTE_SERVICE_CAP_MT_SAFE;\n@@ -688,9 +689,8 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \treturn ret;\n }\n \n-\n static void\n-update_queue_info(struct rte_event_eth_rx_adapter *rx_adapter,\n+rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \t\tstruct eth_device_info *dev_info,\n \t\tint32_t rx_queue_id,\n \t\tuint8_t add)\n@@ -704,7 +704,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \n \tif (rx_queue_id == -1) {\n \t\tfor (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)\n-\t\t\tupdate_queue_info(rx_adapter, dev_info, i, add);\n+\t\t\trxa_update_queue(rx_adapter, dev_info, i, add);\n \t} else {\n \t\tqueue_info = &dev_info->rx_queue[rx_queue_id];\n \t\tenabled = queue_info->queue_enabled;\n@@ -720,9 +720,9 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n }\n \n static int\n-event_eth_rx_adapter_queue_del(struct rte_event_eth_rx_adapter *rx_adapter,\n-\t\t\t    struct eth_device_info *dev_info,\n-\t\t\t    uint16_t rx_queue_id)\n+rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,\n+\tstruct eth_device_info *dev_info,\n+\tuint16_t rx_queue_id)\n {\n \tstruct eth_rx_queue_info *queue_info;\n \n@@ -731,15 +731,15 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \n \tqueue_info = &dev_info->rx_queue[rx_queue_id];\n \trx_adapter->num_rx_polled -= queue_info->queue_enabled;\n-\tupdate_queue_info(rx_adapter, dev_info, rx_queue_id, 0);\n+\trxa_update_queue(rx_adapter, dev_info, rx_queue_id, 0);\n \treturn 0;\n }\n \n static void\n-event_eth_rx_adapter_queue_add(struct rte_event_eth_rx_adapter *rx_adapter,\n-\t\tstruct eth_device_info *dev_info,\n-\t\tuint16_t rx_queue_id,\n-\t\tconst struct rte_event_eth_rx_adapter_queue_conf *conf)\n+rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n+\tstruct eth_device_info *dev_info,\n+\tuint16_t rx_queue_id,\n+\tconst struct rte_event_eth_rx_adapter_queue_conf *conf)\n \n {\n \tstruct eth_rx_queue_info *queue_info;\n@@ -759,10 +759,10 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)\n \n \t/* The same queue can be added more than once */\n \trx_adapter->num_rx_polled += !queue_info->queue_enabled;\n-\tupdate_queue_info(rx_adapter, dev_info, rx_queue_id, 1);\n+\trxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);\n }\n \n-static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n+static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,\n \t\tuint16_t eth_dev_id,\n \t\tint rx_queue_id,\n \t\tconst struct rte_event_eth_rx_adapter_queue_conf *queue_conf)\n@@ -799,19 +799,15 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \n \tif (rx_queue_id == -1) {\n \t\tfor (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)\n-\t\t\tevent_eth_rx_adapter_queue_add(rx_adapter,\n-\t\t\t\t\t\tdev_info, i,\n-\t\t\t\t\t\tqueue_conf);\n+\t\t\trxa_add_queue(rx_adapter, dev_info, i, queue_conf);\n \t} else {\n-\t\tevent_eth_rx_adapter_queue_add(rx_adapter, dev_info,\n-\t\t\t\t\t  (uint16_t)rx_queue_id,\n-\t\t\t\t\t  queue_conf);\n+\t\trxa_add_queue(rx_adapter, dev_info, (uint16_t)rx_queue_id,\n+\t\t\tqueue_conf);\n \t}\n \n-\tret = eth_poll_wrr_calc(rx_adapter);\n+\tret = rxa_calc_wrr_sequence(rx_adapter);\n \tif (ret) {\n-\t\tevent_eth_rx_adapter_queue_del(rx_adapter,\n-\t\t\t\t\tdev_info, rx_queue_id);\n+\t\trxa_sw_del(rx_adapter, dev_info, rx_queue_id);\n \t\treturn ret;\n \t}\n \n@@ -819,7 +815,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n }\n \n static int\n-rx_adapter_ctrl(uint8_t id, int start)\n+rxa_ctrl(uint8_t id, int start)\n {\n \tstruct rte_event_eth_rx_adapter *rx_adapter;\n \tstruct rte_eventdev *dev;\n@@ -829,7 +825,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \tint stop = !start;\n \n \tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n-\trx_adapter = id_to_rx_adapter(id);\n+\trx_adapter = rxa_id_to_adapter(id);\n \tif (rx_adapter == NULL)\n \t\treturn -EINVAL;\n \n@@ -892,7 +888,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \t\t\treturn ret;\n \t}\n \n-\trx_adapter = id_to_rx_adapter(id);\n+\trx_adapter = rxa_id_to_adapter(id);\n \tif (rx_adapter != NULL) {\n \t\tRTE_EDEV_LOG_ERR(\"Eth Rx adapter exists id = %\" PRIu8, id);\n \t\treturn -EEXIST;\n@@ -934,7 +930,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \t\trx_adapter->eth_devices[i].dev = &rte_eth_devices[i];\n \n \tevent_eth_rx_adapter[id] = rx_adapter;\n-\tif (conf_cb == default_conf_cb)\n+\tif (conf_cb == rxa_default_conf_cb)\n \t\trx_adapter->default_cb_arg = 1;\n \treturn 0;\n }\n@@ -955,7 +951,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \t\treturn -ENOMEM;\n \t*pc = *port_config;\n \tret = rte_event_eth_rx_adapter_create_ext(id, dev_id,\n-\t\t\t\t\tdefault_conf_cb,\n+\t\t\t\t\trxa_default_conf_cb,\n \t\t\t\t\tpc);\n \tif (ret)\n \t\trte_free(pc);\n@@ -969,7 +965,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \n \tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n \n-\trx_adapter = id_to_rx_adapter(id);\n+\trx_adapter = rxa_id_to_adapter(id);\n \tif (rx_adapter == NULL)\n \t\treturn -EINVAL;\n \n@@ -1004,7 +1000,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n \tRTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);\n \n-\trx_adapter = id_to_rx_adapter(id);\n+\trx_adapter = rxa_id_to_adapter(id);\n \tif ((rx_adapter == NULL) || (queue_conf == NULL))\n \t\treturn -EINVAL;\n \n@@ -1063,7 +1059,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \t\t\t\trx_queue_id, queue_conf);\n \t\tif (ret == 0) {\n \t\t\tdev_info->internal_event_port = 1;\n-\t\t\tupdate_queue_info(rx_adapter,\n+\t\t\trxa_update_queue(rx_adapter,\n \t\t\t\t\t&rx_adapter->eth_devices[eth_dev_id],\n \t\t\t\t\trx_queue_id,\n \t\t\t\t\t1);\n@@ -1071,13 +1067,14 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \t} else {\n \t\trte_spinlock_lock(&rx_adapter->rx_lock);\n \t\tdev_info->internal_event_port = 0;\n-\t\tret = init_service(rx_adapter, id);\n+\t\tret = rxa_init_service(rx_adapter, id);\n \t\tif (ret == 0)\n-\t\t\tret = add_rx_queue(rx_adapter, eth_dev_id, rx_queue_id,\n+\t\t\tret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id,\n \t\t\t\t\tqueue_conf);\n \t\trte_spinlock_unlock(&rx_adapter->rx_lock);\n \t\tif (ret == 0)\n-\t\t\tstart_service = !!sw_rx_adapter_queue_count(rx_adapter);\n+\t\t\tstart_service =\n+\t\t\t\t!!rxa_sw_adapter_queue_count(rx_adapter);\n \t}\n \n \tif (ret)\n@@ -1103,7 +1100,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n \tRTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);\n \n-\trx_adapter = id_to_rx_adapter(id);\n+\trx_adapter = rxa_id_to_adapter(id);\n \tif (rx_adapter == NULL)\n \t\treturn -EINVAL;\n \n@@ -1130,7 +1127,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \t\t\t\t\t\t&rte_eth_devices[eth_dev_id],\n \t\t\t\t\t\trx_queue_id);\n \t\tif (ret == 0) {\n-\t\t\tupdate_queue_info(rx_adapter,\n+\t\t\trxa_update_queue(rx_adapter,\n \t\t\t\t\t&rx_adapter->eth_devices[eth_dev_id],\n \t\t\t\t\trx_queue_id,\n \t\t\t\t\t0);\n@@ -1144,16 +1141,12 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \t\trte_spinlock_lock(&rx_adapter->rx_lock);\n \t\tif (rx_queue_id == -1) {\n \t\t\tfor (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)\n-\t\t\t\tevent_eth_rx_adapter_queue_del(rx_adapter,\n-\t\t\t\t\t\t\tdev_info,\n-\t\t\t\t\t\t\ti);\n+\t\t\t\trxa_sw_del(rx_adapter, dev_info, i);\n \t\t} else {\n-\t\t\tevent_eth_rx_adapter_queue_del(rx_adapter,\n-\t\t\t\t\t\tdev_info,\n-\t\t\t\t\t\t(uint16_t)rx_queue_id);\n+\t\t\trxa_sw_del(rx_adapter, dev_info, (uint16_t)rx_queue_id);\n \t\t}\n \n-\t\trc = eth_poll_wrr_calc(rx_adapter);\n+\t\trc = rxa_calc_wrr_sequence(rx_adapter);\n \t\tif (rc)\n \t\t\tRTE_EDEV_LOG_ERR(\"WRR recalculation failed %\" PRId32,\n \t\t\t\t\trc);\n@@ -1165,7 +1158,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \n \t\trte_spinlock_unlock(&rx_adapter->rx_lock);\n \t\trte_service_component_runstate_set(rx_adapter->service_id,\n-\t\t\t\tsw_rx_adapter_queue_count(rx_adapter));\n+\t\t\t\trxa_sw_adapter_queue_count(rx_adapter));\n \t}\n \n \treturn ret;\n@@ -1175,13 +1168,13 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n int\n rte_event_eth_rx_adapter_start(uint8_t id)\n {\n-\treturn rx_adapter_ctrl(id, 1);\n+\treturn rxa_ctrl(id, 1);\n }\n \n int\n rte_event_eth_rx_adapter_stop(uint8_t id)\n {\n-\treturn rx_adapter_ctrl(id, 0);\n+\treturn rxa_ctrl(id, 0);\n }\n \n int\n@@ -1198,7 +1191,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \n \tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n \n-\trx_adapter = id_to_rx_adapter(id);\n+\trx_adapter = rxa_id_to_adapter(id);\n \tif (rx_adapter  == NULL || stats == NULL)\n \t\treturn -EINVAL;\n \n@@ -1236,7 +1229,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \n \tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n \n-\trx_adapter = id_to_rx_adapter(id);\n+\trx_adapter = rxa_id_to_adapter(id);\n \tif (rx_adapter == NULL)\n \t\treturn -EINVAL;\n \n@@ -1261,7 +1254,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \n \tRTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);\n \n-\trx_adapter = id_to_rx_adapter(id);\n+\trx_adapter = rxa_id_to_adapter(id);\n \tif (rx_adapter == NULL || service_id == NULL)\n \t\treturn -EINVAL;\n \n",
    "prefixes": [
        "v4",
        "1/5"
    ]
}