get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/53700/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 53700,
    "url": "http://patches.dpdk.org/api/patches/53700/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190525184346.27932-3-nhorman@tuxdriver.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190525184346.27932-3-nhorman@tuxdriver.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190525184346.27932-3-nhorman@tuxdriver.com",
    "date": "2019-05-25T18:43:46",
    "name": "[RFC,2/2] Convert dpaa driver to tag internal-only symbols appropriately",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "648e80d55362435197e6b5f2208b8fe48ebd6511",
    "submitter": {
        "id": 32,
        "url": "http://patches.dpdk.org/api/people/32/?format=api",
        "name": "Neil Horman",
        "email": "nhorman@tuxdriver.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190525184346.27932-3-nhorman@tuxdriver.com/mbox/",
    "series": [
        {
            "id": 4775,
            "url": "http://patches.dpdk.org/api/series/4775/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4775",
            "date": "2019-05-25T18:43:44",
            "name": "introduce __rte_internal tag",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/4775/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/53700/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/53700/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id F2DD01B946;\n\tSat, 25 May 2019 20:44:44 +0200 (CEST)",
            "from smtp.tuxdriver.com (charlotte.tuxdriver.com [70.61.120.58])\n\tby dpdk.org (Postfix) with ESMTP id 41A9F4F91\n\tfor <dev@dpdk.org>; Sat, 25 May 2019 20:44:43 +0200 (CEST)",
            "from cpe-2606-a000-111b-405a-0-0-0-162e.dyn6.twc.com\n\t([2606:a000:111b:405a::162e] helo=hmswarspite.think-freely.org)\n\tby smtp.tuxdriver.com with esmtpsa (TLSv1:AES256-SHA:256) (Exim 4.63)\n\t(envelope-from <nhorman@tuxdriver.com>)\n\tid 1hUbeY-00069b-J3; Sat, 25 May 2019 14:44:40 -0400",
            "from hmswarspite.think-freely.org (localhost [127.0.0.1])\n\tby hmswarspite.think-freely.org (8.15.2/8.15.2) with ESMTP id\n\tx4PIhtWj028304; Sat, 25 May 2019 14:43:55 -0400",
            "(from nhorman@localhost)\n\tby hmswarspite.think-freely.org (8.15.2/8.15.2/Submit) id\n\tx4PIhtrS028303; Sat, 25 May 2019 14:43:55 -0400"
        ],
        "From": "Neil Horman <nhorman@tuxdriver.com>",
        "To": "dev@dpdk.org",
        "Cc": "Neil Horman <nhorman@tuxdriver.com>,\n\tJerin Jacob Kollanukkaran <jerinj@marvell.com>,\n\tBruce Richardson <bruce.richardson@intel.com>,\n\tThomas Monjalon <thomas@monjalon.net>",
        "Date": "Sat, 25 May 2019 14:43:46 -0400",
        "Message-Id": "<20190525184346.27932-3-nhorman@tuxdriver.com>",
        "X-Mailer": "git-send-email 2.20.1",
        "In-Reply-To": "<20190525184346.27932-1-nhorman@tuxdriver.com>",
        "References": "<20190525184346.27932-1-nhorman@tuxdriver.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-Spam-Score": "-2.9 (--)",
        "X-Spam-Status": "No",
        "Subject": "[dpdk-dev] [RFC PATCH 2/2] Convert dpaa driver to tag internal-only\n\tsymbols appropriately",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "make use of the new __rte_internal tag to specify symbols that should\nonly be used by dpdk provided libraries (as specified by the\nBUILDING_RTE_SDK cflag\n\nSigned-off-by: Neil Horman <nhorman@tuxdriver.com>\nCC: Jerin Jacob Kollanukkaran <jerinj@marvell.com>\nCC: Bruce Richardson <bruce.richardson@intel.com>\nCC: Thomas Monjalon <thomas@monjalon.net>\n---\n ...rimental-syms.sh => check-special-syms.sh} |  0\n drivers/bus/dpaa/include/fsl_bman.h           | 12 ++--\n drivers/bus/dpaa/include/fsl_fman.h           | 50 ++++++++--------\n drivers/bus/dpaa/include/fsl_qman.h           | 60 +++++++++----------\n drivers/bus/dpaa/include/fsl_usd.h            | 12 ++--\n drivers/bus/dpaa/include/netcfg.h             |  4 +-\n drivers/bus/dpaa/include/of.h                 |  6 +-\n drivers/bus/dpaa/rte_bus_dpaa_version.map     | 47 ++++++---------\n 8 files changed, 90 insertions(+), 101 deletions(-)\n rename buildtools/{check-experimental-syms.sh => check-special-syms.sh} (100%)",
    "diff": "diff --git a/buildtools/check-experimental-syms.sh b/buildtools/check-special-syms.sh\nsimilarity index 100%\nrename from buildtools/check-experimental-syms.sh\nrename to buildtools/check-special-syms.sh\ndiff --git a/drivers/bus/dpaa/include/fsl_bman.h b/drivers/bus/dpaa/include/fsl_bman.h\nindex 0c74aba44..1835acf16 100644\n--- a/drivers/bus/dpaa/include/fsl_bman.h\n+++ b/drivers/bus/dpaa/include/fsl_bman.h\n@@ -264,13 +264,13 @@ int bman_shutdown_pool(u32 bpid);\n  * the structure provided by the caller can be released or reused after the\n  * function returns.\n  */\n-struct bman_pool *bman_new_pool(const struct bman_pool_params *params);\n+struct bman_pool __rte_internal *bman_new_pool(const struct bman_pool_params *params);\n \n /**\n  * bman_free_pool - Deallocates a Buffer Pool object\n  * @pool: the pool object to release\n  */\n-void bman_free_pool(struct bman_pool *pool);\n+void __rte_internal bman_free_pool(struct bman_pool *pool);\n \n /**\n  * bman_get_params - Returns a pool object's parameters.\n@@ -279,7 +279,7 @@ void bman_free_pool(struct bman_pool *pool);\n  * The returned pointer refers to state within the pool object so must not be\n  * modified and can no longer be read once the pool object is destroyed.\n  */\n-const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);\n+const struct bman_pool_params __rte_internal *bman_get_params(const struct bman_pool *pool);\n \n /**\n  * bman_release - Release buffer(s) to the buffer pool\n@@ -289,7 +289,7 @@ const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);\n  * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options\n  *\n  */\n-int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,\n+int __rte_internal bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,\n \t\t u32 flags);\n \n /**\n@@ -302,7 +302,7 @@ int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,\n  * The return value will be the number of buffers obtained from the pool, or a\n  * negative error code if a h/w error or pool starvation was encountered.\n  */\n-int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,\n+int __rte_internal bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,\n \t\t u32 flags);\n \n /**\n@@ -317,7 +317,7 @@ int bman_query_pools(struct bm_pool_state *state);\n  *\n  * Return the number of the free buffers\n  */\n-u32 bman_query_free_buffers(struct bman_pool *pool);\n+u32 __rte_internal bman_query_free_buffers(struct bman_pool *pool);\n \n /**\n  * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds\ndiff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h\nindex 1d1ce8671..bd8218b3d 100644\n--- a/drivers/bus/dpaa/include/fsl_fman.h\n+++ b/drivers/bus/dpaa/include/fsl_fman.h\n@@ -43,19 +43,19 @@ struct fm_status_t {\n } __attribute__ ((__packed__));\n \n /* Set MAC address for a particular interface */\n-int fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num);\n+int __rte_internal fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num);\n \n /* Remove a MAC address for a particular interface */\n-void fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num);\n+void __rte_internal fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num);\n \n /* Get the FMAN statistics */\n-void fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats);\n+void __rte_internal fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats);\n \n /* Reset the FMAN statistics */\n-void fman_if_stats_reset(struct fman_if *p);\n+void __rte_internal fman_if_stats_reset(struct fman_if *p);\n \n /* Get all of the FMAN statistics */\n-void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);\n+void __rte_internal fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);\n \n /* Set ignore pause option for a specific interface */\n void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);\n@@ -64,33 +64,33 @@ void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);\n void fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len);\n \n /* Enable/disable Rx promiscuous mode on specified interface */\n-void fman_if_promiscuous_enable(struct fman_if *p);\n-void fman_if_promiscuous_disable(struct fman_if *p);\n+void __rte_internal fman_if_promiscuous_enable(struct fman_if *p);\n+void __rte_internal fman_if_promiscuous_disable(struct fman_if *p);\n \n /* Enable/disable Rx on specific interfaces */\n-void fman_if_enable_rx(struct fman_if *p);\n-void fman_if_disable_rx(struct fman_if *p);\n+void __rte_internal fman_if_enable_rx(struct fman_if *p);\n+void __rte_internal fman_if_disable_rx(struct fman_if *p);\n \n /* Enable/disable loopback on specific interfaces */\n-void fman_if_loopback_enable(struct fman_if *p);\n-void fman_if_loopback_disable(struct fman_if *p);\n+void __rte_internal fman_if_loopback_enable(struct fman_if *p);\n+void __rte_internal fman_if_loopback_disable(struct fman_if *p);\n \n /* Set buffer pool on specific interface */\n-void fman_if_set_bp(struct fman_if *fm_if, unsigned int num, int bpid,\n+void __rte_internal fman_if_set_bp(struct fman_if *fm_if, unsigned int num, int bpid,\n \t\t    size_t bufsize);\n \n /* Get Flow Control threshold parameters on specific interface */\n-int fman_if_get_fc_threshold(struct fman_if *fm_if);\n+int __rte_internal fman_if_get_fc_threshold(struct fman_if *fm_if);\n \n /* Enable and Set Flow Control threshold parameters on specific interface */\n-int fman_if_set_fc_threshold(struct fman_if *fm_if,\n+int __rte_internal fman_if_set_fc_threshold(struct fman_if *fm_if,\n \t\t\tu32 high_water, u32 low_water, u32 bpid);\n \n /* Get Flow Control pause quanta on specific interface */\n-int fman_if_get_fc_quanta(struct fman_if *fm_if);\n+int __rte_internal fman_if_get_fc_quanta(struct fman_if *fm_if);\n \n /* Set Flow Control pause quanta on specific interface */\n-int fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta);\n+int __rte_internal fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta);\n \n /* Set default error fqid on specific interface */\n void fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid);\n@@ -99,36 +99,36 @@ void fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid);\n int fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp);\n \n /* Set IC transfer params */\n-int fman_if_set_ic_params(struct fman_if *fm_if,\n+int __rte_internal fman_if_set_ic_params(struct fman_if *fm_if,\n \t\t\t  const struct fman_if_ic_params *icp);\n \n /* Get interface fd->offset value */\n-int fman_if_get_fdoff(struct fman_if *fm_if);\n+int __rte_internal fman_if_get_fdoff(struct fman_if *fm_if);\n \n /* Set interface fd->offset value */\n-void fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset);\n+void __rte_internal fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset);\n \n /* Get interface SG enable status value */\n-int fman_if_get_sg_enable(struct fman_if *fm_if);\n+int __rte_internal fman_if_get_sg_enable(struct fman_if *fm_if);\n \n /* Set interface SG support mode */\n-void fman_if_set_sg(struct fman_if *fm_if, int enable);\n+void __rte_internal fman_if_set_sg(struct fman_if *fm_if, int enable);\n \n /* Get interface Max Frame length (MTU) */\n uint16_t fman_if_get_maxfrm(struct fman_if *fm_if);\n \n /* Set interface  Max Frame length (MTU) */\n-void fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm);\n+void __rte_internal fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm);\n \n /* Set interface next invoked action for dequeue operation */\n void fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia);\n \n /* discard error packets on rx */\n-void fman_if_discard_rx_errors(struct fman_if *fm_if);\n+void __rte_internal fman_if_discard_rx_errors(struct fman_if *fm_if);\n \n-void fman_if_set_mcast_filter_table(struct fman_if *p);\n+void __rte_internal fman_if_set_mcast_filter_table(struct fman_if *p);\n \n-void fman_if_reset_mcast_filter_table(struct fman_if *p);\n+void __rte_internal fman_if_reset_mcast_filter_table(struct fman_if *p);\n \n int fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth);\n \ndiff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h\nindex e5cccbbea..85c0b9e25 100644\n--- a/drivers/bus/dpaa/include/fsl_qman.h\n+++ b/drivers/bus/dpaa/include/fsl_qman.h\n@@ -1311,7 +1311,7 @@ struct qman_cgr {\n #define QMAN_CGR_MODE_FRAME          0x00000001\n \n #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP\n-void qman_set_fq_lookup_table(void **table);\n+void __rte_internal qman_set_fq_lookup_table(void **table);\n #endif\n \n /**\n@@ -1319,7 +1319,7 @@ void qman_set_fq_lookup_table(void **table);\n  */\n int qman_get_portal_index(void);\n \n-u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,\n+u32 __rte_internal qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,\n \t\t\tvoid **bufs);\n \n /**\n@@ -1330,7 +1330,7 @@ u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,\n  * processed via qman_poll_***() functions). Returns zero for success, or\n  * -EINVAL if the current CPU is sharing a portal hosted on another CPU.\n  */\n-int qman_irqsource_add(u32 bits);\n+int __rte_internal qman_irqsource_add(u32 bits);\n \n /**\n  * qman_irqsource_remove - remove processing sources from being interrupt-driven\n@@ -1340,7 +1340,7 @@ int qman_irqsource_add(u32 bits);\n  * instead be processed via qman_poll_***() functions. Returns zero for success,\n  * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.\n  */\n-int qman_irqsource_remove(u32 bits);\n+int __rte_internal qman_irqsource_remove(u32 bits);\n \n /**\n  * qman_affine_channel - return the channel ID of an portal\n@@ -1352,7 +1352,7 @@ int qman_irqsource_remove(u32 bits);\n  */\n u16 qman_affine_channel(int cpu);\n \n-unsigned int qman_portal_poll_rx(unsigned int poll_limit,\n+unsigned int __rte_internal qman_portal_poll_rx(unsigned int poll_limit,\n \t\t\t\t void **bufs, struct qman_portal *q);\n \n /**\n@@ -1363,7 +1363,7 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,\n  *\n  * This function will issue a volatile dequeue command to the QMAN.\n  */\n-int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);\n+int __rte_internal qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);\n \n /**\n  * qman_dequeue - Get the DQRR entry after volatile dequeue command\n@@ -1373,7 +1373,7 @@ int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);\n  * is issued. It will keep returning NULL until there is no packet available on\n  * the DQRR.\n  */\n-struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);\n+struct qm_dqrr_entry __rte_internal *qman_dequeue(struct qman_fq *fq);\n \n /**\n  * qman_dqrr_consume - Consume the DQRR entriy after volatile dequeue\n@@ -1384,7 +1384,7 @@ struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);\n  * This will consume the DQRR enrey and make it available for next volatile\n  * dequeue.\n  */\n-void qman_dqrr_consume(struct qman_fq *fq,\n+void __rte_internal qman_dqrr_consume(struct qman_fq *fq,\n \t\t       struct qm_dqrr_entry *dq);\n \n /**\n@@ -1397,7 +1397,7 @@ void qman_dqrr_consume(struct qman_fq *fq,\n  * this function will return -EINVAL, otherwise the return value is >=0 and\n  * represents the number of DQRR entries processed.\n  */\n-int qman_poll_dqrr(unsigned int limit);\n+int __rte_internal qman_poll_dqrr(unsigned int limit);\n \n /**\n  * qman_poll\n@@ -1443,7 +1443,7 @@ void qman_start_dequeues(void);\n  * (SDQCR). The requested pools are limited to those the portal has dequeue\n  * access to.\n  */\n-void qman_static_dequeue_add(u32 pools, struct qman_portal *qm);\n+void __rte_internal qman_static_dequeue_add(u32 pools, struct qman_portal *qm);\n \n /**\n  * qman_static_dequeue_del - Remove pool channels from the portal SDQCR\n@@ -1490,7 +1490,7 @@ void qman_dca(const struct qm_dqrr_entry *dq, int park_request);\n  * function must be called from the same CPU as that which processed the DQRR\n  * entry in the first place.\n  */\n-void qman_dca_index(u8 index, int park_request);\n+void __rte_internal qman_dca_index(u8 index, int park_request);\n \n /**\n  * qman_eqcr_is_empty - Determine if portal's EQCR is empty\n@@ -1547,7 +1547,7 @@ void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);\n  * a frame queue object based on that, rather than assuming/requiring that it be\n  * Out of Service.\n  */\n-int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);\n+int __rte_internal qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);\n \n /**\n  * qman_destroy_fq - Deallocates a FQ\n@@ -1565,7 +1565,7 @@ void qman_destroy_fq(struct qman_fq *fq, u32 flags);\n  * qman_fq_fqid - Queries the frame queue ID of a FQ object\n  * @fq: the frame queue object to query\n  */\n-u32 qman_fq_fqid(struct qman_fq *fq);\n+u32 __rte_internal qman_fq_fqid(struct qman_fq *fq);\n \n /**\n  * qman_fq_state - Queries the state of a FQ object\n@@ -1577,7 +1577,7 @@ u32 qman_fq_fqid(struct qman_fq *fq);\n  * This captures the state, as seen by the driver, at the time the function\n  * executes.\n  */\n-void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);\n+void __rte_internal qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);\n \n /**\n  * qman_init_fq - Initialises FQ fields, leaves the FQ \"parked\" or \"scheduled\"\n@@ -1613,7 +1613,7 @@ void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);\n  * context_a.address fields and will leave the stashing fields provided by the\n  * user alone, otherwise it will zero out the context_a.stashing fields.\n  */\n-int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);\n+int __rte_internal qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);\n \n /**\n  * qman_schedule_fq - Schedules a FQ\n@@ -1642,7 +1642,7 @@ int qman_schedule_fq(struct qman_fq *fq);\n  * caller should be prepared to accept the callback as the function is called,\n  * not only once it has returned.\n  */\n-int qman_retire_fq(struct qman_fq *fq, u32 *flags);\n+int __rte_internal qman_retire_fq(struct qman_fq *fq, u32 *flags);\n \n /**\n  * qman_oos_fq - Puts a FQ \"out of service\"\n@@ -1651,7 +1651,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags);\n  * The frame queue must be retired and empty, and if any order restoration list\n  * was released as ERNs at the time of retirement, they must all be consumed.\n  */\n-int qman_oos_fq(struct qman_fq *fq);\n+int __rte_internal qman_oos_fq(struct qman_fq *fq);\n \n /**\n  * qman_fq_flow_control - Set the XON/XOFF state of a FQ\n@@ -1684,14 +1684,14 @@ int qman_query_fq_has_pkts(struct qman_fq *fq);\n  * @fq: the frame queue object to be queried\n  * @np: storage for the queried FQD fields\n  */\n-int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);\n+int __rte_internal qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);\n \n /**\n  * qman_query_fq_frmcnt - Queries fq frame count\n  * @fq: the frame queue object to be queried\n  * @frm_cnt: number of frames in the queue\n  */\n-int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);\n+int __rte_internal qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);\n \n /**\n  * qman_query_wq - Queries work queue lengths\n@@ -1721,7 +1721,7 @@ int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);\n  * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the\n  * \"flags\" retrieved from qman_fq_state().\n  */\n-int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);\n+int __rte_internal qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);\n \n /**\n  * qman_enqueue - Enqueue a frame to a frame queue\n@@ -1756,9 +1756,9 @@ int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);\n  * of an already busy hardware resource by throttling many of the to-be-dropped\n  * enqueues \"at the source\".\n  */\n-int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);\n+int __rte_internal qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);\n \n-int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,\n+int __rte_internal qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,\n \t\t       int frames_to_send);\n \n /**\n@@ -1772,7 +1772,7 @@ int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,\n  * to be processed by different frame queues.\n  */\n int\n-qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,\n+__rte_internal qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,\n \t\t      int frames_to_send);\n \n typedef int (*qman_cb_precommit) (void *arg);\n@@ -1859,7 +1859,7 @@ int qman_shutdown_fq(u32 fqid);\n  * @fqid: the base FQID of the range to deallocate\n  * @count: the number of FQIDs in the range\n  */\n-int qman_reserve_fqid_range(u32 fqid, unsigned int count);\n+int __rte_internal qman_reserve_fqid_range(u32 fqid, unsigned int count);\n static inline int qman_reserve_fqid(u32 fqid)\n {\n \treturn qman_reserve_fqid_range(fqid, 1);\n@@ -1878,7 +1878,7 @@ static inline int qman_reserve_fqid(u32 fqid)\n  * than requested (though alignment will be as requested). If @partial is zero,\n  * the return value will either be 'count' or negative.\n  */\n-int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);\n+int __rte_internal qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);\n static inline int qman_alloc_pool(u32 *result)\n {\n \tint ret = qman_alloc_pool_range(result, 1, 0, 0);\n@@ -1925,7 +1925,7 @@ void qman_seed_pool_range(u32 id, unsigned int count);\n  * any unspecified parameters) will be used rather than a modify hw hardware\n  * (which only modifies the specified parameters).\n  */\n-int qman_create_cgr(struct qman_cgr *cgr, u32 flags,\n+int __rte_internal qman_create_cgr(struct qman_cgr *cgr, u32 flags,\n \t\t    struct qm_mcc_initcgr *opts);\n \n /**\n@@ -1947,7 +1947,7 @@ int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,\n  * is executed. This must be excuted on the same affine portal on which it was\n  * created.\n  */\n-int qman_delete_cgr(struct qman_cgr *cgr);\n+int __rte_internal qman_delete_cgr(struct qman_cgr *cgr);\n \n /**\n  * qman_modify_cgr - Modify CGR fields\n@@ -1963,7 +1963,7 @@ int qman_delete_cgr(struct qman_cgr *cgr);\n  * unspecified parameters) will be used rather than a modify hw hardware (which\n  * only modifies the specified parameters).\n  */\n-int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,\n+int __rte_internal qman_modify_cgr(struct qman_cgr *cgr, u32 flags,\n \t\t    struct qm_mcc_initcgr *opts);\n \n /**\n@@ -1991,7 +1991,7 @@ int qman_query_congestion(struct qm_mcr_querycongestion *congestion);\n  * than requested (though alignment will be as requested). If @partial is zero,\n  * the return value will either be 'count' or negative.\n  */\n-int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);\n+int __rte_internal qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);\n static inline int qman_alloc_cgrid(u32 *result)\n {\n \tint ret = qman_alloc_cgrid_range(result, 1, 0, 0);\n@@ -2004,7 +2004,7 @@ static inline int qman_alloc_cgrid(u32 *result)\n  * @id: the base CGR ID of the range to deallocate\n  * @count: the number of CGR IDs in the range\n  */\n-void qman_release_cgrid_range(u32 id, unsigned int count);\n+void __rte_internal qman_release_cgrid_range(u32 id, unsigned int count);\n static inline void qman_release_cgrid(u32 id)\n {\n \tqman_release_cgrid_range(id, 1);\ndiff --git a/drivers/bus/dpaa/include/fsl_usd.h b/drivers/bus/dpaa/include/fsl_usd.h\nindex ec1ab7cee..062c0ce73 100644\n--- a/drivers/bus/dpaa/include/fsl_usd.h\n+++ b/drivers/bus/dpaa/include/fsl_usd.h\n@@ -56,7 +56,7 @@ int bman_allocate_raw_portal(struct dpaa_raw_portal *portal);\n int bman_free_raw_portal(struct dpaa_raw_portal *portal);\n \n /* Obtain thread-local UIO file-descriptors */\n-int qman_thread_fd(void);\n+int __rte_internal qman_thread_fd(void);\n int bman_thread_fd(void);\n \n /* Post-process interrupts. NB, the kernel IRQ handler disables the interrupt\n@@ -64,14 +64,14 @@ int bman_thread_fd(void);\n  * processing is complete. As such, it is essential to call this before going\n  * into another blocking read/select/poll.\n  */\n-void qman_thread_irq(void);\n-void bman_thread_irq(void);\n+void __rte_internal qman_thread_irq(void);\n+void __rte_internal bman_thread_irq(void);\n \n-void qman_clear_irq(void);\n+void __rte_internal qman_clear_irq(void);\n \n /* Global setup */\n-int qman_global_init(void);\n-int bman_global_init(void);\n+int __rte_internal qman_global_init(void);\n+int __rte_internal bman_global_init(void);\n \n /* Direct portal create and destroy */\n struct qman_portal *fsl_qman_portal_create(void);\ndiff --git a/drivers/bus/dpaa/include/netcfg.h b/drivers/bus/dpaa/include/netcfg.h\nindex 7818de68b..b9da869ae 100644\n--- a/drivers/bus/dpaa/include/netcfg.h\n+++ b/drivers/bus/dpaa/include/netcfg.h\n@@ -46,12 +46,12 @@ struct netcfg_interface {\n  * cfg_file: FMC config XML file\n  * Returns the configuration information in newly allocated memory.\n  */\n-struct netcfg_info *netcfg_acquire(void);\n+struct netcfg_info __rte_internal *netcfg_acquire(void);\n \n /* cfg_ptr: configuration information pointer.\n  * Frees the resources allocated by the configuration layer.\n  */\n-void netcfg_release(struct netcfg_info *cfg_ptr);\n+void __rte_internal netcfg_release(struct netcfg_info *cfg_ptr);\n \n #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER\n /* cfg_ptr: configuration information pointer.\ndiff --git a/drivers/bus/dpaa/include/of.h b/drivers/bus/dpaa/include/of.h\nindex 7ea7608fc..d1cb2f38f 100644\n--- a/drivers/bus/dpaa/include/of.h\n+++ b/drivers/bus/dpaa/include/of.h\n@@ -87,7 +87,7 @@ struct dt_file {\n \tuint64_t buf[OF_FILE_BUF_MAX >> 3];\n };\n \n-const struct device_node *of_find_compatible_node(\n+const __rte_internal struct device_node *of_find_compatible_node(\n \t\t\t\t\tconst struct device_node *from,\n \t\t\t\t\tconst char *type __always_unused,\n \t\t\t\t\tconst char *compatible)\n@@ -98,7 +98,7 @@ const struct device_node *of_find_compatible_node(\n \t\tdev_node != NULL; \\\n \t\tdev_node = of_find_compatible_node(dev_node, type, compatible))\n \n-const void *of_get_property(const struct device_node *from, const char *name,\n+const __rte_internal void *of_get_property(const struct device_node *from, const char *name,\n \t\t\t    size_t *lenp) __attribute__((nonnull(2)));\n bool of_device_is_available(const struct device_node *dev_node);\n \n@@ -109,7 +109,7 @@ const struct device_node *of_get_parent(const struct device_node *dev_node);\n const struct device_node *of_get_next_child(const struct device_node *dev_node,\n \t\t\t\t\t    const struct device_node *prev);\n \n-const void *of_get_mac_address(const struct device_node *np);\n+const void __rte_internal *of_get_mac_address(const struct device_node *np);\n \n #define for_each_child_node(parent, child) \\\n \tfor (child = of_get_next_child(parent, NULL); child != NULL; \\\ndiff --git a/drivers/bus/dpaa/rte_bus_dpaa_version.map b/drivers/bus/dpaa/rte_bus_dpaa_version.map\nindex c88deaf7f..ac5f0493a 100644\n--- a/drivers/bus/dpaa/rte_bus_dpaa_version.map\n+++ b/drivers/bus/dpaa/rte_bus_dpaa_version.map\n@@ -1,4 +1,4 @@\n-DPDK_17.11 {\n+DPDK_INTERNAL {\n \tglobal:\n \n \tbman_acquire;\n@@ -57,17 +57,6 @@ DPDK_17.11 {\n \tqman_set_vdq;\n \tqman_reserve_fqid_range;\n \tqman_volatile_dequeue;\n-\trte_dpaa_driver_register;\n-\trte_dpaa_driver_unregister;\n-\trte_dpaa_mem_ptov;\n-\trte_dpaa_portal_init;\n-\n-\tlocal: *;\n-};\n-\n-DPDK_18.02 {\n-\tglobal:\n-\n \tdpaa_logtype_eventdev;\n \tdpaa_svr_family;\n \tper_lcore_dpaa_io;\n@@ -87,23 +76,10 @@ DPDK_18.02 {\n \tqman_release_cgrid_range;\n \tqman_retire_fq;\n \tqman_static_dequeue_add;\n-\trte_dpaa_portal_fq_close;\n-\trte_dpaa_portal_fq_init;\n-\n-\tlocal: *;\n-} DPDK_17.11;\n-\n-DPDK_18.08 {\n-\tglobal:\n \tfman_if_get_sg_enable;\n \tfman_if_set_sg;\n \tof_get_mac_address;\n \n-\tlocal: *;\n-} DPDK_18.02;\n-\n-DPDK_18.11 {\n-\tglobal:\n \tbman_thread_irq;\n \tfman_if_get_sg_enable;\n \tfman_if_set_sg;\n@@ -113,13 +89,26 @@ DPDK_18.11 {\n \tqman_irqsource_remove;\n \tqman_thread_fd;\n \tqman_thread_irq;\n+\tqman_set_fq_lookup_table;\n+};\n+\n+DPDK_17.11 {\n+\tglobal:\n+\n+\trte_dpaa_driver_register;\n+\trte_dpaa_driver_unregister;\n+\trte_dpaa_mem_ptov;\n+\trte_dpaa_portal_init;\n \n \tlocal: *;\n-} DPDK_18.08;\n+};\n \n-DPDK_19.05 {\n+DPDK_18.02 {\n \tglobal:\n-\tqman_set_fq_lookup_table;\n+\n+\trte_dpaa_portal_fq_close;\n+\trte_dpaa_portal_fq_init;\n \n \tlocal: *;\n-} DPDK_18.11;\n+} DPDK_17.11;\n+\n",
    "prefixes": [
        "RFC",
        "2/2"
    ]
}