get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/3919/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 3919,
    "url": "http://patches.dpdk.org/api/patches/3919/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1425695004-29605-6-git-send-email-stephen@networkplumber.org/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1425695004-29605-6-git-send-email-stephen@networkplumber.org>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1425695004-29605-6-git-send-email-stephen@networkplumber.org",
    "date": "2015-03-07T02:23:24",
    "name": "[dpdk-dev,5/5] ixgbe: rename igb_* to ixgbe_*",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "a221ea6d0bfa4ba257148fe6b806b28bcb70936f",
    "submitter": {
        "id": 27,
        "url": "http://patches.dpdk.org/api/people/27/?format=api",
        "name": "Stephen Hemminger",
        "email": "stephen@networkplumber.org"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1425695004-29605-6-git-send-email-stephen@networkplumber.org/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/3919/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/3919/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 758779655;\n\tSat,  7 Mar 2015 03:23:39 +0100 (CET)",
            "from mail-pa0-f41.google.com (mail-pa0-f41.google.com\n\t[209.85.220.41]) by dpdk.org (Postfix) with ESMTP id C4B2A6A80\n\tfor <dev@dpdk.org>; Sat,  7 Mar 2015 03:23:36 +0100 (CET)",
            "by pabrd3 with SMTP id rd3so32182229pab.5\n\tfor <dev@dpdk.org>; Fri, 06 Mar 2015 18:23:36 -0800 (PST)",
            "from urahara.brocade.com\n\t(static-50-53-82-155.bvtn.or.frontiernet.net. [50.53.82.155])\n\tby mx.google.com with ESMTPSA id\n\tx4sm10797098pas.40.2015.03.06.18.23.35\n\t(version=TLSv1.2 cipher=ECDHE-RSA-AES128-SHA bits=128/128);\n\tFri, 06 Mar 2015 18:23:35 -0800 (PST)"
        ],
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20130820;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=MhWGxW5r+xAXykZVHp/ztJEs0yK8BqReu6O/ZPPPt3c=;\n\tb=K0p3VVI1HA/gbeyx+9M5qAmLvvMnubOUy47HQZE/b4GwkVUbORo/xVFG66kFg9n9Ov\n\tKaF9rvTdhTpOVB0eBQML5yiiqUDUadsnkqBle9UM2PWvxK4gFU5Nlz8Pt5rMG9owwueB\n\tRU6oWkphM6DK8HLsxXtC306T4+lYLivhydar5yGrO9aPEwmDCGttN1JUTharNmLv9ZdF\n\twkFbVAU+/rsOukTsFEDFusy9D3Rx0h0MIY5/DjPXcfWmUF5G5CxT8QNk/kRW7eg9kIPN\n\twZHSk9ivboUhHekQ7XGYFma3gTNn5WpyUP7mZaeXCZQfEpRqcXe9FcML3j1f67b6sRzE\n\tGg7g==",
        "X-Gm-Message-State": "ALoCoQkG0uvKHw6c/TX1n7nDGTqooHqmW1LEPMBVgjKPnBkJcT1Q6UXKxpj4+H49eYSlmP8st/Tv",
        "X-Received": "by 10.66.146.6 with SMTP id sy6mr30098040pab.150.1425695016189; \n\tFri, 06 Mar 2015 18:23:36 -0800 (PST)",
        "From": "Stephen Hemminger <stephen@networkplumber.org>",
        "To": "dev@dpdk.org",
        "Date": "Fri,  6 Mar 2015 18:23:24 -0800",
        "Message-Id": "<1425695004-29605-6-git-send-email-stephen@networkplumber.org>",
        "X-Mailer": "git-send-email 2.1.4",
        "In-Reply-To": "<1425695004-29605-1-git-send-email-stephen@networkplumber.org>",
        "References": "<1425695004-29605-1-git-send-email-stephen@networkplumber.org>",
        "Subject": "[dpdk-dev] [PATCH 5/5] ixgbe: rename igb_* to ixgbe_*",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "To avoid any possible confusion or breakage, rename all the structures\nof ixgbe driver to use ixgbe_ rather than igb_ because igb is a\ndifferent driver.\n\nSigned-off-by: Stephen Hemminger <stephen@networkplumber.org>\n---\n lib/librte_pmd_ixgbe/ixgbe_ethdev.c   |   2 +-\n lib/librte_pmd_ixgbe/ixgbe_rxtx.c     | 124 +++++++++++++++++-----------------\n lib/librte_pmd_ixgbe/ixgbe_rxtx.h     |  26 +++----\n lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c |  52 +++++++-------\n 4 files changed, 102 insertions(+), 102 deletions(-)",
    "diff": "diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c\nindex e1504f4..5473858 100644\n--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c\n+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c\n@@ -748,7 +748,7 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,\n \t * RX and TX function.\n \t */\n \tif (rte_eal_process_type() != RTE_PROC_PRIMARY){\n-\t\tstruct igb_tx_queue *txq;\n+\t\tstruct ixgbe_tx_queue *txq;\n \t\t/* TX queue function in primary, set by last queue initialized\n \t\t * Tx queue may not initialized by primary process */\n \t\tif (eth_dev->data->tx_queues) {\ndiff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c\nindex c5ba687..1848a13 100644\n--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c\n+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c\n@@ -122,9 +122,9 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)\n  * Return the total number of buffers freed.\n  */\n static inline int __attribute__((always_inline))\n-ixgbe_tx_free_bufs(struct igb_tx_queue *txq)\n+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)\n {\n-\tstruct igb_tx_entry *txep;\n+\tstruct ixgbe_tx_entry *txep;\n \tuint32_t status;\n \tint i;\n \n@@ -208,11 +208,11 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)\n  * Copy mbuf pointers to the S/W ring.\n  */\n static inline void\n-ixgbe_tx_fill_hw_ring(struct igb_tx_queue *txq, struct rte_mbuf **pkts,\n+ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,\n \t\t      uint16_t nb_pkts)\n {\n \tvolatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);\n-\tstruct igb_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);\n+\tstruct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);\n \tconst int N_PER_LOOP = 4;\n \tconst int N_PER_LOOP_MASK = N_PER_LOOP-1;\n \tint mainpart, leftover;\n@@ -244,7 +244,7 @@ static inline uint16_t\n tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t     uint16_t nb_pkts)\n {\n-\tstruct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;\n+\tstruct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;\n \tvolatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;\n \tuint16_t n = 0;\n \n@@ -352,7 +352,7 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,\n }\n \n static inline void\n-ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,\n+ixgbe_set_xmit_ctx(struct ixgbe_tx_queue* txq,\n \t\tvolatile struct ixgbe_adv_tx_context_desc *ctx_txd,\n \t\tuint64_t ol_flags, union ixgbe_tx_offload tx_offload)\n {\n@@ -442,7 +442,7 @@ ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,\n  * or create a new context descriptor.\n  */\n static inline uint32_t\n-what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,\n+what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,\n \t\tunion ixgbe_tx_offload tx_offload)\n {\n \t/* If match with the current used context */\n@@ -498,9 +498,9 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)\n \n /* Reset transmit descriptors after they have been used */\n static inline int\n-ixgbe_xmit_cleanup(struct igb_tx_queue *txq)\n+ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)\n {\n-\tstruct igb_tx_entry *sw_ring = txq->sw_ring;\n+\tstruct ixgbe_tx_entry *sw_ring = txq->sw_ring;\n \tvolatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;\n \tuint16_t last_desc_cleaned = txq->last_desc_cleaned;\n \tuint16_t nb_tx_desc = txq->nb_tx_desc;\n@@ -559,9 +559,9 @@ uint16_t\n ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\tuint16_t nb_pkts)\n {\n-\tstruct igb_tx_queue *txq;\n-\tstruct igb_tx_entry *sw_ring;\n-\tstruct igb_tx_entry *txe, *txn;\n+\tstruct ixgbe_tx_queue *txq;\n+\tstruct ixgbe_tx_entry *sw_ring;\n+\tstruct ixgbe_tx_entry *txe, *txn;\n \tvolatile union ixgbe_adv_tx_desc *txr;\n \tvolatile union ixgbe_adv_tx_desc *txd;\n \tstruct rte_mbuf     *tx_pkt;\n@@ -938,10 +938,10 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)\n #error \"PMD IXGBE: LOOK_AHEAD must be 8\\n\"\n #endif\n static inline int\n-ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)\n+ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)\n {\n \tvolatile union ixgbe_adv_rx_desc *rxdp;\n-\tstruct igb_rx_entry *rxep;\n+\tstruct ixgbe_rx_entry *rxep;\n \tstruct rte_mbuf *mb;\n \tuint16_t pkt_len;\n \tuint64_t pkt_flags;\n@@ -1022,10 +1022,10 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)\n }\n \n static inline int\n-ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)\n+ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq)\n {\n \tvolatile union ixgbe_adv_rx_desc *rxdp;\n-\tstruct igb_rx_entry *rxep;\n+\tstruct ixgbe_rx_entry *rxep;\n \tstruct rte_mbuf *mb;\n \tuint16_t alloc_idx;\n \tuint64_t dma_addr;\n@@ -1071,7 +1071,7 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)\n }\n \n static inline uint16_t\n-ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,\n+ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,\n \t\t\t uint16_t nb_pkts)\n {\n \tstruct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];\n@@ -1095,7 +1095,7 @@ static inline uint16_t\n rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t     uint16_t nb_pkts)\n {\n-\tstruct igb_rx_queue *rxq = (struct igb_rx_queue *)rx_queue;\n+\tstruct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;\n \tuint16_t nb_rx = 0;\n \n \t/* Any previously recv'd pkts will be returned from the Rx stage */\n@@ -1177,11 +1177,11 @@ uint16_t\n ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\tuint16_t nb_pkts)\n {\n-\tstruct igb_rx_queue *rxq;\n+\tstruct ixgbe_rx_queue *rxq;\n \tvolatile union ixgbe_adv_rx_desc *rx_ring;\n \tvolatile union ixgbe_adv_rx_desc *rxdp;\n-\tstruct igb_rx_entry *sw_ring;\n-\tstruct igb_rx_entry *rxe;\n+\tstruct ixgbe_rx_entry *sw_ring;\n+\tstruct ixgbe_rx_entry *rxe;\n \tstruct rte_mbuf *rxm;\n \tstruct rte_mbuf *nmb;\n \tunion ixgbe_adv_rx_desc rxd;\n@@ -1359,11 +1359,11 @@ uint16_t\n ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t\t  uint16_t nb_pkts)\n {\n-\tstruct igb_rx_queue *rxq;\n+\tstruct ixgbe_rx_queue *rxq;\n \tvolatile union ixgbe_adv_rx_desc *rx_ring;\n \tvolatile union ixgbe_adv_rx_desc *rxdp;\n-\tstruct igb_rx_entry *sw_ring;\n-\tstruct igb_rx_entry *rxe;\n+\tstruct ixgbe_rx_entry *sw_ring;\n+\tstruct ixgbe_rx_entry *rxe;\n \tstruct rte_mbuf *first_seg;\n \tstruct rte_mbuf *last_seg;\n \tstruct rte_mbuf *rxm;\n@@ -1675,7 +1675,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,\n }\n \n static void\n-ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)\n+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)\n {\n \tunsigned i;\n \n@@ -1690,7 +1690,7 @@ ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)\n }\n \n static void\n-ixgbe_tx_free_swring(struct igb_tx_queue *txq)\n+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)\n {\n \tif (txq != NULL &&\n \t    txq->sw_ring != NULL)\n@@ -1698,7 +1698,7 @@ ixgbe_tx_free_swring(struct igb_tx_queue *txq)\n }\n \n static void\n-ixgbe_tx_queue_release(struct igb_tx_queue *txq)\n+ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)\n {\n \tif (txq != NULL && txq->ops != NULL) {\n \t\ttxq->ops->release_mbufs(txq);\n@@ -1713,13 +1713,13 @@ ixgbe_dev_tx_queue_release(void *txq)\n \tixgbe_tx_queue_release(txq);\n }\n \n-/* (Re)set dynamic igb_tx_queue fields to defaults */\n+/* (Re)set dynamic ixgbe_tx_queue fields to defaults */\n static void\n-ixgbe_reset_tx_queue(struct igb_tx_queue *txq)\n+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)\n {\n \tstatic const union ixgbe_adv_tx_desc zeroed_desc = { .read = {\n \t\t\t.buffer_addr = 0}};\n-\tstruct igb_tx_entry *txe = txq->sw_ring;\n+\tstruct ixgbe_tx_entry *txe = txq->sw_ring;\n \tuint16_t prev, i;\n \n \t/* Zero out HW ring memory */\n@@ -1765,7 +1765,7 @@ static const struct ixgbe_txq_ops def_txq_ops = {\n  * in dev_init by secondary process when attaching to an existing ethdev.\n  */\n void\n-ixgbe_set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq)\n+ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)\n {\n \t/* Use a simple Tx queue (no offloads, no multi segs) if possible */\n \tif (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)\n@@ -1802,7 +1802,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \t\t\t const struct rte_eth_txconf *tx_conf)\n {\n \tconst struct rte_memzone *tz;\n-\tstruct igb_tx_queue *txq;\n+\tstruct ixgbe_tx_queue *txq;\n \tstruct ixgbe_hw     *hw;\n \tuint16_t tx_rs_thresh, tx_free_thresh;\n \n@@ -1899,7 +1899,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \t}\n \n \t/* First allocate the tx queue data structure */\n-\ttxq = rte_zmalloc_socket(\"ethdev TX queue\", sizeof(struct igb_tx_queue),\n+\ttxq = rte_zmalloc_socket(\"ethdev TX queue\", sizeof(struct ixgbe_tx_queue),\n \t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n \tif (txq == NULL)\n \t\treturn (-ENOMEM);\n@@ -1948,7 +1948,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \n \t/* Allocate software ring */\n \ttxq->sw_ring = rte_zmalloc_socket(\"txq->sw_ring\",\n-\t\t\t\tsizeof(struct igb_tx_entry) * nb_desc,\n+\t\t\t\tsizeof(struct ixgbe_tx_entry) * nb_desc,\n \t\t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n \tif (txq->sw_ring == NULL) {\n \t\tixgbe_tx_queue_release(txq);\n@@ -1958,7 +1958,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \t\t     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);\n \n \t/* set up vector or scalar TX function as appropriate */\n-\tset_tx_function(dev, txq);\n+\tixgbe_set_tx_function(dev, txq);\n \n \ttxq->ops->reset(txq);\n \n@@ -1969,7 +1969,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n }\n \n static void\n-ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)\n+ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)\n {\n \tunsigned i;\n \n@@ -1994,7 +1994,7 @@ ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)\n }\n \n static void\n-ixgbe_rx_queue_release(struct igb_rx_queue *rxq)\n+ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)\n {\n \tif (rxq != NULL) {\n \t\tixgbe_rx_queue_release_mbufs(rxq);\n@@ -2019,9 +2019,9 @@ ixgbe_dev_rx_queue_release(void *rxq)\n  */\n static inline int\n #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC\n-check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)\n+check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)\n #else\n-check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)\n+check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)\n #endif\n {\n \tint ret = 0;\n@@ -2071,9 +2071,9 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)\n \treturn ret;\n }\n \n-/* Reset dynamic igb_rx_queue fields back to defaults */\n+/* Reset dynamic ixgbe_rx_queue fields back to defaults */\n static void\n-ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)\n+ixgbe_reset_rx_queue(struct ixgbe_rx_queue *rxq)\n {\n \tstatic const union ixgbe_adv_rx_desc zeroed_desc = { .read = {\n \t\t\t.pkt_addr = 0}};\n@@ -2137,7 +2137,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \t\t\t struct rte_mempool *mp)\n {\n \tconst struct rte_memzone *rz;\n-\tstruct igb_rx_queue *rxq;\n+\tstruct ixgbe_rx_queue *rxq;\n \tstruct ixgbe_hw     *hw;\n \tint use_def_burst_func = 1;\n \tuint16_t len;\n@@ -2163,7 +2163,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \t}\n \n \t/* First allocate the rx queue data structure */\n-\trxq = rte_zmalloc_socket(\"ethdev RX queue\", sizeof(struct igb_rx_queue),\n+\trxq = rte_zmalloc_socket(\"ethdev RX queue\", sizeof(struct ixgbe_rx_queue),\n \t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n \tif (rxq == NULL)\n \t\treturn (-ENOMEM);\n@@ -2230,7 +2230,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \tlen = nb_desc;\n #endif\n \trxq->sw_ring = rte_zmalloc_socket(\"rxq->sw_ring\",\n-\t\t\t\t\t  sizeof(struct igb_rx_entry) * len,\n+\t\t\t\t\t  sizeof(struct ixgbe_rx_entry) * len,\n \t\t\t\t\t  RTE_CACHE_LINE_SIZE, socket_id);\n \tif (rxq->sw_ring == NULL) {\n \t\tixgbe_rx_queue_release(rxq);\n@@ -2284,7 +2284,7 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n #define IXGBE_RXQ_SCAN_INTERVAL 4\n \tvolatile union ixgbe_adv_rx_desc *rxdp;\n-\tstruct igb_rx_queue *rxq;\n+\tstruct ixgbe_rx_queue *rxq;\n \tuint32_t desc = 0;\n \n \tif (rx_queue_id >= dev->data->nb_rx_queues) {\n@@ -2311,7 +2311,7 @@ int\n ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)\n {\n \tvolatile union ixgbe_adv_rx_desc *rxdp;\n-\tstruct igb_rx_queue *rxq = rx_queue;\n+\tstruct ixgbe_rx_queue *rxq = rx_queue;\n \tuint32_t desc;\n \n \tif (unlikely(offset >= rxq->nb_rx_desc))\n@@ -2332,7 +2332,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)\n \tPMD_INIT_FUNC_TRACE();\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\tstruct igb_tx_queue *txq = dev->data->tx_queues[i];\n+\t\tstruct ixgbe_tx_queue *txq = dev->data->tx_queues[i];\n \t\tif (txq != NULL) {\n \t\t\ttxq->ops->release_mbufs(txq);\n \t\t\ttxq->ops->reset(txq);\n@@ -2340,7 +2340,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)\n \t}\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\tstruct igb_rx_queue *rxq = dev->data->rx_queues[i];\n+\t\tstruct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];\n \t\tif (rxq != NULL) {\n \t\t\tixgbe_rx_queue_release_mbufs(rxq);\n \t\t\tixgbe_reset_rx_queue(rxq);\n@@ -3296,9 +3296,9 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)\n }\n \n static int\n-ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)\n+ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)\n {\n-\tstruct igb_rx_entry *rxe = rxq->sw_ring;\n+\tstruct ixgbe_rx_entry *rxe = rxq->sw_ring;\n \tuint64_t dma_addr;\n \tunsigned i;\n \n@@ -3512,7 +3512,7 @@ int\n ixgbe_dev_rx_init(struct rte_eth_dev *dev)\n {\n \tstruct ixgbe_hw     *hw;\n-\tstruct igb_rx_queue *rxq;\n+\tstruct ixgbe_rx_queue *rxq;\n \tstruct rte_pktmbuf_pool_private *mbp_priv;\n \tuint64_t bus_addr;\n \tuint32_t rxctrl;\n@@ -3696,7 +3696,7 @@ void\n ixgbe_dev_tx_init(struct rte_eth_dev *dev)\n {\n \tstruct ixgbe_hw     *hw;\n-\tstruct igb_tx_queue *txq;\n+\tstruct ixgbe_tx_queue *txq;\n \tuint64_t bus_addr;\n \tuint32_t hlreg0;\n \tuint32_t txctrl;\n@@ -3792,8 +3792,8 @@ int\n ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)\n {\n \tstruct ixgbe_hw     *hw;\n-\tstruct igb_tx_queue *txq;\n-\tstruct igb_rx_queue *rxq;\n+\tstruct ixgbe_tx_queue *txq;\n+\tstruct ixgbe_rx_queue *rxq;\n \tuint32_t txdctl;\n \tuint32_t dmatxctl;\n \tuint32_t rxctrl;\n@@ -3859,7 +3859,7 @@ int\n ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n \tstruct ixgbe_hw     *hw;\n-\tstruct igb_rx_queue *rxq;\n+\tstruct ixgbe_rx_queue *rxq;\n \tuint32_t rxdctl;\n \tint poll_ms;\n \n@@ -3904,7 +3904,7 @@ int\n ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n \tstruct ixgbe_hw     *hw;\n-\tstruct igb_rx_queue *rxq;\n+\tstruct ixgbe_rx_queue *rxq;\n \tuint32_t rxdctl;\n \tint poll_ms;\n \n@@ -3946,7 +3946,7 @@ int\n ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n {\n \tstruct ixgbe_hw     *hw;\n-\tstruct igb_tx_queue *txq;\n+\tstruct ixgbe_tx_queue *txq;\n \tuint32_t txdctl;\n \tint poll_ms;\n \n@@ -3987,7 +3987,7 @@ int\n ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n {\n \tstruct ixgbe_hw     *hw;\n-\tstruct igb_tx_queue *txq;\n+\tstruct ixgbe_tx_queue *txq;\n \tuint32_t txdctl;\n \tuint32_t txtdh, txtdt;\n \tint poll_ms;\n@@ -4047,7 +4047,7 @@ int\n ixgbevf_dev_rx_init(struct rte_eth_dev *dev)\n {\n \tstruct ixgbe_hw     *hw;\n-\tstruct igb_rx_queue *rxq;\n+\tstruct ixgbe_rx_queue *rxq;\n \tstruct rte_pktmbuf_pool_private *mbp_priv;\n \tuint64_t bus_addr;\n \tuint32_t srrctl, psrtype = 0;\n@@ -4190,7 +4190,7 @@ void\n ixgbevf_dev_tx_init(struct rte_eth_dev *dev)\n {\n \tstruct ixgbe_hw     *hw;\n-\tstruct igb_tx_queue *txq;\n+\tstruct ixgbe_tx_queue *txq;\n \tuint64_t bus_addr;\n \tuint32_t txctrl;\n \tuint16_t i;\n@@ -4231,8 +4231,8 @@ void\n ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)\n {\n \tstruct ixgbe_hw     *hw;\n-\tstruct igb_tx_queue *txq;\n-\tstruct igb_rx_queue *rxq;\n+\tstruct ixgbe_tx_queue *txq;\n+\tstruct ixgbe_rx_queue *rxq;\n \tuint32_t txdctl;\n \tuint32_t rxdctl;\n \tuint16_t i;\ndiff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h\nindex 42d59f9..4cc1d6c 100644\n--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h\n+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h\n@@ -75,14 +75,14 @@\n /**\n  * Structure associated with each descriptor of the RX ring of a RX queue.\n  */\n-struct igb_rx_entry {\n+struct ixgbe_rx_entry {\n \tstruct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */\n };\n \n /**\n  * Structure associated with each descriptor of the TX ring of a TX queue.\n  */\n-struct igb_tx_entry {\n+struct ixgbe_tx_entry {\n \tstruct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */\n \tuint16_t next_id; /**< Index of next descriptor in ring. */\n \tuint16_t last_id; /**< Index of last scattered descriptor. */\n@@ -91,20 +91,20 @@ struct igb_tx_entry {\n /**\n  * Structure associated with each descriptor of the TX ring of a TX queue.\n  */\n-struct igb_tx_entry_v {\n+struct ixgbe_tx_entry_v {\n \tstruct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */\n };\n \n /**\n  * Structure associated with each RX queue.\n  */\n-struct igb_rx_queue {\n+struct ixgbe_rx_queue {\n \tstruct rte_mempool  *mb_pool; /**< mbuf pool to populate RX ring. */\n \tvolatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */\n \tuint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */\n \tvolatile uint32_t   *rdt_reg_addr; /**< RDT register address. */\n \tvolatile uint32_t   *rdh_reg_addr; /**< RDH register address. */\n-\tstruct igb_rx_entry *sw_ring; /**< address of RX software ring. */\n+\tstruct ixgbe_rx_entry *sw_ring; /**< address of RX software ring. */\n \tstruct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */\n \tstruct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */\n \tuint64_t            mbuf_initializer; /**< value to init mbufs */\n@@ -182,11 +182,11 @@ struct ixgbe_advctx_info {\n /**\n  * Structure associated with each TX queue.\n  */\n-struct igb_tx_queue {\n+struct ixgbe_tx_queue {\n \t/** TX ring virtual address. */\n \tvolatile union ixgbe_adv_tx_desc *tx_ring;\n \tuint64_t            tx_ring_phys_addr; /**< TX ring DMA address. */\n-\tstruct igb_tx_entry *sw_ring;      /**< virtual address of SW ring. */\n+\tstruct ixgbe_tx_entry *sw_ring;      /**< virtual address of SW ring. */\n \tvolatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */\n \tuint16_t            nb_tx_desc;    /**< number of TX descriptors. */\n \tuint16_t            tx_tail;       /**< current value of TDT reg. */\n@@ -216,9 +216,9 @@ struct igb_tx_queue {\n };\n \n struct ixgbe_txq_ops {\n-\tvoid (*release_mbufs)(struct igb_tx_queue *txq);\n-\tvoid (*free_swring)(struct igb_tx_queue *txq);\n-\tvoid (*reset)(struct igb_tx_queue *txq);\n+\tvoid (*release_mbufs)(struct ixgbe_tx_queue *txq);\n+\tvoid (*free_swring)(struct ixgbe_tx_queue *txq);\n+\tvoid (*reset)(struct ixgbe_tx_queue *txq);\n };\n \n /*\n@@ -253,7 +253,7 @@ struct ixgbe_txq_ops {\n  * the queue parameters. Used in tx_queue_setup by primary process and then\n  * in dev_init by secondary process when attaching to an existing ethdev.\n  */\n-void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq);\n+void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq);\n \n #ifdef RTE_IXGBE_INC_VECTOR\n uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\n@@ -262,8 +262,8 @@ uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,\n \t\tstruct rte_mbuf **rx_pkts, uint16_t nb_pkts);\n uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\tuint16_t nb_pkts);\n-int ixgbe_txq_vec_setup(struct igb_tx_queue *txq);\n-int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq);\n+int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);\n+int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);\n int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev);\n #endif\n \ndiff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c\nindex 11e9f12..9d8fa8d 100644\n--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c\n+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c\n@@ -45,12 +45,12 @@\n #endif\n \n static inline void\n-ixgbe_rxq_rearm(struct igb_rx_queue *rxq)\n+ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)\n {\n \tint i;\n \tuint16_t rx_id;\n \tvolatile union ixgbe_adv_rx_desc *rxdp;\n-\tstruct igb_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];\n+\tstruct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];\n \tstruct rte_mbuf *mb0, *mb1;\n \t__m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,\n \t\t\tRTE_PKTMBUF_HEADROOM);\n@@ -187,11 +187,11 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)\n  * - don't support ol_flags for rss and csum err\n  */\n static inline uint16_t\n-_recv_raw_pkts_vec(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,\n+_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,\n \t\tuint16_t nb_pkts, uint8_t *split_packet)\n {\n \tvolatile union ixgbe_adv_rx_desc *rxdp;\n-\tstruct igb_rx_entry *sw_ring;\n+\tstruct ixgbe_rx_entry *sw_ring;\n \tuint16_t nb_pkts_recd;\n \tint pos;\n \tuint64_t var;\n@@ -396,7 +396,7 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\n }\n \n static inline uint16_t\n-reassemble_packets(struct igb_rx_queue *rxq, struct rte_mbuf **rx_bufs,\n+reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,\n \t\tuint16_t nb_bufs, uint8_t *split_flags)\n {\n \tstruct rte_mbuf *pkts[RTE_IXGBE_VPMD_RX_BURST]; /*finished pkts*/\n@@ -468,7 +468,7 @@ uint16_t\n ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\tuint16_t nb_pkts)\n {\n-\tstruct igb_rx_queue *rxq = rx_queue;\n+\tstruct ixgbe_rx_queue *rxq = rx_queue;\n \tuint8_t split_flags[RTE_IXGBE_VPMD_RX_BURST] = {0};\n \n \t/* get some new buffers */\n@@ -517,9 +517,9 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,\n }\n \n static inline int __attribute__((always_inline))\n-ixgbe_tx_free_bufs(struct igb_tx_queue *txq)\n+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)\n {\n-\tstruct igb_tx_entry_v *txep;\n+\tstruct ixgbe_tx_entry_v *txep;\n \tuint32_t status;\n \tuint32_t n;\n \tuint32_t i;\n@@ -537,7 +537,7 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq)\n \t * first buffer to free from S/W ring is at index\n \t * tx_next_dd - (tx_rs_thresh-1)\n \t */\n-\ttxep = &((struct igb_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -\n+\ttxep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -\n \t\t\t(n - 1)];\n \tm = __rte_pktmbuf_prefree_seg(txep[0].mbuf);\n \tif (likely(m != NULL)) {\n@@ -575,7 +575,7 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq)\n }\n \n static inline void __attribute__((always_inline))\n-tx_backlog_entry(struct igb_tx_entry_v *txep,\n+tx_backlog_entry(struct ixgbe_tx_entry_v *txep,\n \t\t struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n {\n \tint i;\n@@ -587,9 +587,9 @@ uint16_t\n ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t       uint16_t nb_pkts)\n {\n-\tstruct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;\n+\tstruct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;\n \tvolatile union ixgbe_adv_tx_desc *txdp;\n-\tstruct igb_tx_entry_v *txep;\n+\tstruct ixgbe_tx_entry_v *txep;\n \tuint16_t n, nb_commit, tx_id;\n \tuint64_t flags = DCMD_DTYP_FLAGS;\n \tuint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;\n@@ -607,7 +607,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n \n \ttx_id = txq->tx_tail;\n \ttxdp = &txq->tx_ring[tx_id];\n-\ttxep = &((struct igb_tx_entry_v *)txq->sw_ring)[tx_id];\n+\ttxep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id];\n \n \ttxq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);\n \n@@ -628,7 +628,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n \n \t\t/* avoid reach the end of ring */\n \t\ttxdp = &(txq->tx_ring[tx_id]);\n-\t\ttxep = &(((struct igb_tx_entry_v *)txq->sw_ring)[tx_id]);\n+\t\ttxep = &(((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id]);\n \t}\n \n \ttx_backlog_entry(txep, tx_pkts, nb_commit);\n@@ -651,10 +651,10 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n }\n \n static void\n-ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)\n+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)\n {\n \tunsigned i;\n-\tstruct igb_tx_entry_v *txe;\n+\tstruct ixgbe_tx_entry_v *txe;\n \tuint16_t nb_free, max_desc;\n \n \tif (txq->sw_ring != NULL) {\n@@ -664,36 +664,36 @@ ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)\n \t\tfor (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);\n \t\t     nb_free < max_desc && i != txq->tx_tail;\n \t\t     i = (i + 1) & max_desc) {\n-\t\t\ttxe = (struct igb_tx_entry_v *)&txq->sw_ring[i];\n+\t\t\ttxe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];\n \t\t\tif (txe->mbuf != NULL)\n \t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n \t\t}\n \t\t/* reset tx_entry */\n \t\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n-\t\t\ttxe = (struct igb_tx_entry_v *)&txq->sw_ring[i];\n+\t\t\ttxe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];\n \t\t\ttxe->mbuf = NULL;\n \t\t}\n \t}\n }\n \n static void\n-ixgbe_tx_free_swring(struct igb_tx_queue *txq)\n+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)\n {\n \tif (txq == NULL)\n \t\treturn;\n \n \tif (txq->sw_ring != NULL) {\n-\t\trte_free((struct igb_rx_entry *)txq->sw_ring - 1);\n+\t\trte_free((struct ixgbe_rx_entry *)txq->sw_ring - 1);\n \t\ttxq->sw_ring = NULL;\n \t}\n }\n \n static void\n-ixgbe_reset_tx_queue(struct igb_tx_queue *txq)\n+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)\n {\n \tstatic const union ixgbe_adv_tx_desc zeroed_desc = { .read = {\n \t\t\t.buffer_addr = 0} };\n-\tstruct igb_tx_entry_v *txe = (struct igb_tx_entry_v *)txq->sw_ring;\n+\tstruct ixgbe_tx_entry_v *txe = (struct ixgbe_tx_entry_v *)txq->sw_ring;\n \tuint16_t i;\n \n \t/* Zero out HW ring memory */\n@@ -730,7 +730,7 @@ static const struct ixgbe_txq_ops vec_txq_ops = {\n };\n \n int\n-ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq)\n+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)\n {\n \tuintptr_t p;\n \tstruct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */\n@@ -747,14 +747,14 @@ ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq)\n \treturn 0;\n }\n \n-int ixgbe_txq_vec_setup(struct igb_tx_queue *txq)\n+int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)\n {\n \tif (txq->sw_ring == NULL)\n \t\treturn -1;\n \n \t/* leave the first one for overflow */\n-\ttxq->sw_ring = (struct igb_tx_entry *)\n-\t\t((struct igb_tx_entry_v *)txq->sw_ring + 1);\n+\ttxq->sw_ring = (struct ixgbe_tx_entry *)\n+\t\t((struct ixgbe_tx_entry_v *)txq->sw_ring + 1);\n \ttxq->ops = &vec_txq_ops;\n \n \treturn 0;\n",
    "prefixes": [
        "dpdk-dev",
        "5/5"
    ]
}