get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/134/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 134,
    "url": "http://patches.dpdk.org/api/patches/134/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1407789890-17355-14-git-send-email-bruce.richardson@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1407789890-17355-14-git-send-email-bruce.richardson@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1407789890-17355-14-git-send-email-bruce.richardson@intel.com",
    "date": "2014-08-11T20:44:49",
    "name": "[dpdk-dev,RFC,13/14] mbuf: cleanup + added in additional mbuf fields.",
    "commit_ref": "",
    "pull_url": "",
    "state": "rfc",
    "archived": true,
    "hash": "5af2a9fcde7da8dff35ada6de26df435982c75ff",
    "submitter": {
        "id": 20,
        "url": "http://patches.dpdk.org/api/people/20/?format=api",
        "name": "Bruce Richardson",
        "email": "bruce.richardson@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1407789890-17355-14-git-send-email-bruce.richardson@intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/134/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/134/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<bricha3@ecsmtp.ir.intel.com>",
        "Received": [
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\r\n\tby dpdk.org (Postfix) with ESMTP id A0F2EB377\r\n\tfor <dev@dpdk.org>; Mon, 11 Aug 2014 22:42:08 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\r\n\tby fmsmga103.fm.intel.com with ESMTP; 11 Aug 2014 13:37:32 -0700",
            "from irvmail001.ir.intel.com ([163.33.26.43])\r\n\tby FMSMGA003.fm.intel.com with ESMTP; 11 Aug 2014 13:41:39 -0700",
            "from sivswdev02.ir.intel.com (sivswdev02.ir.intel.com\r\n\t[10.237.217.46])\r\n\tby irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id\r\n\ts7BKiqg8020970; Mon, 11 Aug 2014 21:44:52 +0100",
            "from sivswdev02.ir.intel.com (localhost [127.0.0.1])\r\n\tby sivswdev02.ir.intel.com with ESMTP id s7BKiq00017731;\r\n\tMon, 11 Aug 2014 21:44:52 +0100",
            "(from bricha3@localhost)\r\n\tby sivswdev02.ir.intel.com with  id s7BKiqfr017726;\r\n\tMon, 11 Aug 2014 21:44:52 +0100"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"4.97,862,1389772800\"; d=\"scan'208\";a=\"371049209\"",
        "From": "Bruce Richardson <bruce.richardson@intel.com>",
        "To": "dev@dpdk.org",
        "Date": "Mon, 11 Aug 2014 21:44:49 +0100",
        "Message-Id": "<1407789890-17355-14-git-send-email-bruce.richardson@intel.com>",
        "X-Mailer": "git-send-email 1.7.0.7",
        "In-Reply-To": "<1407789890-17355-1-git-send-email-bruce.richardson@intel.com>",
        "References": "<1407789890-17355-1-git-send-email-bruce.richardson@intel.com>",
        "Subject": "[dpdk-dev] [RFC PATCH 13/14] mbuf: cleanup + added in additional\r\n\tmbuf fields.",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\r\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\r\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "X-List-Received-Date": "Mon, 11 Aug 2014 20:42:10 -0000"
    },
    "content": "Cleanups:\n* use typedefs for markers within mbuf struct\n* split up vlan_macip field as the l2/l3 lengths are for TX so go on the\n  second cache line.\n* created a tx_ol field in second cache line for data used for tx\n  offloads\n* rename the hash field to the filter field as it contains more than\n  just a hash value.\n\nAdded in the extra mbuf fields needed:\n* fdir flex bytes for i40e driver, i.e. extra 32-bits for filters\n* field to be used for a sequence number, extra 32-bit field\n* field for a second vlan tag, extra 16-bits, using space freed by\n  moving out the l2 l3 lengths.\n* userdata field for general application use.\n* added inner_l3 and l4 length fields to allow tunneling.\n\nSigned-off-by: Bruce Richardson <bruce.richardson@intel.com>\n---\n app/test-pmd/csumonly.c                     |  4 +-\n app/test-pmd/flowgen.c                      |  6 +-\n app/test-pmd/macfwd.c                       |  6 +-\n app/test-pmd/macswap.c                      |  6 +-\n app/test-pmd/rxonly.c                       |  7 +--\n app/test-pmd/testpmd.c                      |  5 +-\n app/test-pmd/txonly.c                       |  6 +-\n app/test/packet_burst_generator.c           | 10 ++--\n app/test/test_distributor.c                 | 18 +++---\n app/test/test_distributor_perf.c            |  4 +-\n lib/librte_distributor/rte_distributor.c    |  2 +-\n lib/librte_ip_frag/ip_frag_common.h         |  3 +-\n lib/librte_ip_frag/rte_ipv4_fragmentation.c |  2 +-\n lib/librte_ip_frag/rte_ipv4_reassembly.c    |  6 +-\n lib/librte_ip_frag/rte_ipv6_reassembly.c    |  5 +-\n lib/librte_mbuf/rte_mbuf.h                  | 88 ++++++++++++++++-------------\n lib/librte_pmd_e1000/e1000_ethdev.h         | 21 +++++++\n lib/librte_pmd_e1000/em_rxtx.c              | 13 +++--\n lib/librte_pmd_e1000/igb_rxtx.c             | 20 +++----\n lib/librte_pmd_i40e/i40e_rxtx.c             | 22 ++++----\n lib/librte_pmd_ixgbe/ixgbe_ethdev.h         | 21 +++++++\n lib/librte_pmd_ixgbe/ixgbe_rxtx.c           | 28 ++++-----\n lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c       |  8 +--\n lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c       |  4 +-\n lib/librte_sched/rte_sched.h                | 19 ++++---\n 25 files changed, 198 insertions(+), 136 deletions(-)",
    "diff": "diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c\r\nindex 6a3aaf0..e7bf794 100644\r\n--- a/app/test-pmd/csumonly.c\r\n+++ b/app/test-pmd/csumonly.c\r\n@@ -432,8 +432,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)\r\n \t\t}\r\n \r\n \t\t/* Combine the packet header write. VLAN is not consider here */\r\n-\t\tmb->vlan_macip.f.l2_len = l2_len;\r\n-\t\tmb->vlan_macip.f.l3_len = l3_len;\r\n+\t\tmb->tx_ol.l2_len = l2_len;\r\n+\t\tmb->tx_ol.l3_len = l3_len;\r\n \t\tmb->ol_flags = ol_flags;\r\n \t}\r\n \tnb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);\r\ndiff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c\r\nindex d69b2b8..a4e758a 100644\r\n--- a/app/test-pmd/flowgen.c\r\n+++ b/app/test-pmd/flowgen.c\r\n@@ -208,9 +208,9 @@ pkt_burst_flow_gen(struct fwd_stream *fs)\r\n \t\tpkt->nb_segs\t\t\t= 1;\r\n \t\tpkt->pkt_len\t\t\t= pkt_size;\r\n \t\tpkt->ol_flags\t\t\t= ol_flags;\r\n-\t\tpkt->vlan_macip.f.vlan_tci\t= vlan_tci;\r\n-\t\tpkt->vlan_macip.f.l2_len\t= sizeof(struct ether_hdr);\r\n-\t\tpkt->vlan_macip.f.l3_len\t= sizeof(struct ipv4_hdr);\r\n+\t\tpkt->vlan_tci0\t\t\t= vlan_tci;\r\n+\t\tpkt->tx_ol.l2_len\t\t= sizeof(struct ether_hdr);\r\n+\t\tpkt->tx_ol.l3_len\t\t= sizeof(struct ipv4_hdr);\r\n \t\tpkts_burst[nb_pkt]\t\t= pkt;\r\n \r\n \t\tnext_flow = (next_flow + 1) % cfg_n_flows;\r\ndiff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c\r\nindex 674c189..c0791dd 100644\r\n--- a/app/test-pmd/macfwd.c\r\n+++ b/app/test-pmd/macfwd.c\r\n@@ -116,9 +116,9 @@ pkt_burst_mac_forward(struct fwd_stream *fs)\r\n \t\tether_addr_copy(&ports[fs->tx_port].eth_addr,\r\n \t\t\t\t&eth_hdr->s_addr);\r\n \t\tmb->ol_flags = txp->tx_ol_flags;\r\n-\t\tmb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);\r\n-\t\tmb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);\r\n-\t\tmb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;\r\n+\t\tmb->tx_ol.l2_len = sizeof(struct ether_hdr);\r\n+\t\tmb->tx_ol.l3_len = sizeof(struct ipv4_hdr);\r\n+\t\tmb->vlan_tci0 = txp->tx_vlan_id;\r\n \t}\r\n \tnb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);\r\n \tfs->tx_packets += nb_tx;\r\ndiff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c\r\nindex d274b36..517bb73 100644\r\n--- a/app/test-pmd/macswap.c\r\n+++ b/app/test-pmd/macswap.c\r\n@@ -118,9 +118,9 @@ pkt_burst_mac_swap(struct fwd_stream *fs)\r\n \t\tether_addr_copy(&addr, &eth_hdr->s_addr);\r\n \r\n \t\tmb->ol_flags = txp->tx_ol_flags;\r\n-\t\tmb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);\r\n-\t\tmb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);\r\n-\t\tmb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;\r\n+\t\tmb->tx_ol.l2_len = sizeof(struct ether_hdr);\r\n+\t\tmb->tx_ol.l3_len = sizeof(struct ipv4_hdr);\r\n+\t\tmb->vlan_tci0 = txp->tx_vlan_id;\r\n \t}\r\n \tnb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);\r\n \tfs->tx_packets += nb_tx;\r\ndiff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c\r\nindex b477076..95a77a4 100644\r\n--- a/app/test-pmd/rxonly.c\r\n+++ b/app/test-pmd/rxonly.c\r\n@@ -158,15 +158,14 @@ pkt_burst_receive(struct fwd_stream *fs)\r\n \t\t       eth_type, (unsigned) mb->pkt_len,\r\n \t\t       (int)mb->nb_segs);\r\n \t\tif (ol_flags & PKT_RX_RSS_HASH) {\r\n-\t\t\tprintf(\" - RSS hash=0x%x\", (unsigned) mb->hash.rss);\r\n+\t\t\tprintf(\" - RSS hash=0x%x\", (unsigned) mb->filters.rss);\r\n \t\t\tprintf(\" - RSS queue=0x%x\",(unsigned) fs->rx_queue);\r\n \t\t}\r\n \t\telse if (ol_flags & PKT_RX_FDIR)\r\n \t\t\tprintf(\" - FDIR hash=0x%x - FDIR id=0x%x \",\r\n-\t\t\t       mb->hash.fdir.hash, mb->hash.fdir.id);\r\n+\t\t\t       mb->filters.fdir.hash, mb->filters.fdir.id);\r\n \t\tif (ol_flags & PKT_RX_VLAN_PKT)\r\n-\t\t\tprintf(\" - VLAN tci=0x%x\",\r\n-\t\t\t\tmb->vlan_macip.f.vlan_tci);\r\n+\t\t\tprintf(\" - VLAN tci=0x%x\", mb->vlan_tci0);\r\n \t\tprintf(\"\\n\");\r\n \t\tif (ol_flags != 0) {\r\n \t\t\tint rxf;\r\ndiff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c\r\nindex 16de06c..532d139 100644\r\n--- a/app/test-pmd/testpmd.c\r\n+++ b/app/test-pmd/testpmd.c\r\n@@ -406,8 +406,9 @@ testpmd_mbuf_ctor(struct rte_mempool *mp,\r\n \tmb->ol_flags     = 0;\r\n \tmb->data_off     = RTE_PKTMBUF_HEADROOM;\r\n \tmb->nb_segs      = 1;\r\n-\tmb->vlan_macip.data = 0;\r\n-\tmb->hash.rss     = 0;\r\n+\tmb->vlan_tci0    = 0;\r\n+\tmb->tx_ol.u64    = 0;\r\n+\tmb->filters.rss  = 0;\r\n }\r\n \r\n static void\r\ndiff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c\r\nindex 95e46a2..57d6c60 100644\r\n--- a/app/test-pmd/txonly.c\r\n+++ b/app/test-pmd/txonly.c\r\n@@ -264,9 +264,9 @@ pkt_burst_transmit(struct fwd_stream *fs)\r\n \t\tpkt->nb_segs = tx_pkt_nb_segs;\r\n \t\tpkt->pkt_len = tx_pkt_length;\r\n \t\tpkt->ol_flags = ol_flags;\r\n-\t\tpkt->vlan_macip.f.vlan_tci  = vlan_tci;\r\n-\t\tpkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);\r\n-\t\tpkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);\r\n+\t\tpkt->vlan_tci0  = vlan_tci;\r\n+\t\tpkt->tx_ol.l2_len = sizeof(struct ether_hdr);\r\n+\t\tpkt->tx_ol.l3_len = sizeof(struct ipv4_hdr);\r\n \t\tpkts_burst[nb_pkt] = pkt;\r\n \t}\r\n \tnb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);\r\ndiff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c\r\nindex c9e6c8b..bb14f83 100644\r\n--- a/app/test/packet_burst_generator.c\r\n+++ b/app/test/packet_burst_generator.c\r\n@@ -261,19 +261,19 @@ nomore_mbuf:\r\n \t\t */\r\n \t\tpkt->nb_segs = tx_pkt_nb_segs;\r\n \t\tpkt->pkt_len = tx_pkt_length;\r\n-\t\tpkt->vlan_macip.f.l2_len = eth_hdr_size;\r\n+\t\tpkt->tx_ol.l2_len = eth_hdr_size;\r\n \r\n \t\tif (ipv4) {\r\n-\t\t\tpkt->vlan_macip.f.vlan_tci  = ETHER_TYPE_IPv4;\r\n-\t\t\tpkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);\r\n+\t\t\tpkt->vlan_tci0  = ETHER_TYPE_IPv4;\r\n+\t\t\tpkt->tx_ol.l3_len = sizeof(struct ipv4_hdr);\r\n \r\n \t\t\tif (vlan_enabled)\r\n \t\t\t\tpkt->ol_flags = PKT_RX_IPV4_HDR | PKT_RX_VLAN_PKT;\r\n \t\t\telse\r\n \t\t\t\tpkt->ol_flags = PKT_RX_IPV4_HDR;\r\n \t\t} else {\r\n-\t\t\tpkt->vlan_macip.f.vlan_tci  = ETHER_TYPE_IPv6;\r\n-\t\t\tpkt->vlan_macip.f.l3_len = sizeof(struct ipv6_hdr);\r\n+\t\t\tpkt->vlan_tci0  = ETHER_TYPE_IPv6;\r\n+\t\t\tpkt->tx_ol.l3_len = sizeof(struct ipv6_hdr);\r\n \r\n \t\t\tif (vlan_enabled)\r\n \t\t\t\tpkt->ol_flags = PKT_RX_IPV6_HDR | PKT_RX_VLAN_PKT;\r\ndiff --git a/app/test/test_distributor.c b/app/test/test_distributor.c\r\nindex 0e96d42..3d922e9 100644\r\n--- a/app/test/test_distributor.c\r\n+++ b/app/test/test_distributor.c\r\n@@ -121,7 +121,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)\r\n \t/* now set all hash values in all buffers to zero, so all pkts go to the\r\n \t * one worker thread */\r\n \tfor (i = 0; i < BURST; i++)\r\n-\t\tbufs[i]->hash.rss = 0;\r\n+\t\tbufs[i]->filters.rss = 0;\r\n \r\n \trte_distributor_process(d, bufs, BURST);\r\n \trte_distributor_flush(d);\r\n@@ -143,7 +143,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)\r\n \tif (rte_lcore_count() >= 3) {\r\n \t\tclear_packet_count();\r\n \t\tfor (i = 0; i < BURST; i++)\r\n-\t\t\tbufs[i]->hash.rss = (i & 1) << 8;\r\n+\t\t\tbufs[i]->filters.rss = (i & 1) << 8;\r\n \r\n \t\trte_distributor_process(d, bufs, BURST);\r\n \t\trte_distributor_flush(d);\r\n@@ -168,7 +168,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)\r\n \t * so load gets distributed */\r\n \tclear_packet_count();\r\n \tfor (i = 0; i < BURST; i++)\r\n-\t\tbufs[i]->hash.rss = i;\r\n+\t\tbufs[i]->filters.rss = i;\r\n \r\n \trte_distributor_process(d, bufs, BURST);\r\n \trte_distributor_flush(d);\r\n@@ -200,7 +200,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)\r\n \t\treturn -1;\r\n \t}\r\n \tfor (i = 0; i < BIG_BATCH; i++)\r\n-\t\tmany_bufs[i]->hash.rss = i << 2;\r\n+\t\tmany_bufs[i]->filters.rss = i << 2;\r\n \r\n \tfor (i = 0; i < BIG_BATCH/BURST; i++) {\r\n \t\trte_distributor_process(d, &many_bufs[i*BURST], BURST);\r\n@@ -281,7 +281,7 @@ sanity_test_with_mbuf_alloc(struct rte_distributor *d, struct rte_mempool *p)\r\n \t\twhile (rte_mempool_get_bulk(p, (void *)bufs, BURST) < 0)\r\n \t\t\trte_distributor_process(d, NULL, 0);\r\n \t\tfor (j = 0; j < BURST; j++) {\r\n-\t\t\tbufs[j]->hash.rss = (i+j) << 1;\r\n+\t\t\tbufs[j]->filters.rss = (i+j) << 1;\r\n \t\t\tbufs[j]->refcnt = 1;\r\n \t\t}\r\n \r\n@@ -360,7 +360,7 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d,\r\n \t/* now set all hash values in all buffers to zero, so all pkts go to the\r\n \t * one worker thread */\r\n \tfor (i = 0; i < BURST; i++)\r\n-\t\tbufs[i]->hash.rss = 0;\r\n+\t\tbufs[i]->filters.rss = 0;\r\n \r\n \trte_distributor_process(d, bufs, BURST);\r\n \t/* at this point, we will have processed some packets and have a full\r\n@@ -373,7 +373,7 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d,\r\n \t\treturn -1;\r\n \t}\r\n \tfor (i = 0; i < BURST; i++)\r\n-\t\tbufs[i]->hash.rss = 0;\r\n+\t\tbufs[i]->filters.rss = 0;\r\n \r\n \t/* get worker zero to quit */\r\n \tzero_quit = 1;\r\n@@ -417,7 +417,7 @@ test_flush_with_worker_shutdown(struct rte_distributor *d,\r\n \t/* now set all hash values in all buffers to zero, so all pkts go to the\r\n \t * one worker thread */\r\n \tfor (i = 0; i < BURST; i++)\r\n-\t\tbufs[i]->hash.rss = 0;\r\n+\t\tbufs[i]->filters.rss = 0;\r\n \r\n \trte_distributor_process(d, bufs, BURST);\r\n \t/* at this point, we will have processed some packets and have a full\r\n@@ -489,7 +489,7 @@ quit_workers(struct rte_distributor *d, struct rte_mempool *p)\r\n \tzero_quit = 0;\r\n \tquit = 1;\r\n \tfor (i = 0; i < num_workers; i++)\r\n-\t\tbufs[i]->hash.rss = i << 1;\r\n+\t\tbufs[i]->filters.rss = i << 1;\r\n \trte_distributor_process(d, bufs, num_workers);\r\n \r\n \trte_mempool_put_bulk(p, (void *)bufs, num_workers);\r\ndiff --git a/app/test/test_distributor_perf.c b/app/test/test_distributor_perf.c\r\nindex 7ecbc6b..f9e0d57 100644\r\n--- a/app/test/test_distributor_perf.c\r\n+++ b/app/test/test_distributor_perf.c\r\n@@ -160,7 +160,7 @@ perf_test(struct rte_distributor *d, struct rte_mempool *p)\r\n \t}\r\n \t/* ensure we have different hash value for each pkt */\r\n \tfor (i = 0; i < BURST; i++)\r\n-\t\tbufs[i]->hash.rss = i;\r\n+\t\tbufs[i]->filters.rss = i;\r\n \r\n \tstart = rte_rdtsc();\r\n \tfor (i = 0; i < (1<<ITER_POWER); i++)\r\n@@ -199,7 +199,7 @@ quit_workers(struct rte_distributor *d, struct rte_mempool *p)\r\n \r\n \tquit = 1;\r\n \tfor (i = 0; i < num_workers; i++)\r\n-\t\tbufs[i]->hash.rss = i << 1;\r\n+\t\tbufs[i]->filters.rss = i << 1;\r\n \trte_distributor_process(d, bufs, num_workers);\r\n \r\n \trte_mempool_put_bulk(p, (void *)bufs, num_workers);\r\ndiff --git a/lib/librte_distributor/rte_distributor.c b/lib/librte_distributor/rte_distributor.c\r\nindex 585ff88..7aed7f1 100644\r\n--- a/lib/librte_distributor/rte_distributor.c\r\n+++ b/lib/librte_distributor/rte_distributor.c\r\n@@ -282,7 +282,7 @@ rte_distributor_process(struct rte_distributor *d,\r\n \t\t\tnext_mb = mbufs[next_idx++];\r\n \t\t\tnext_value = (((int64_t)(uintptr_t)next_mb)\r\n \t\t\t\t\t<< RTE_DISTRIB_FLAG_BITS);\r\n-\t\t\tnew_tag = (next_mb->hash.rss | 1);\r\n+\t\t\tnew_tag = (next_mb->filters.rss | 1);\r\n \r\n \t\t\tuint32_t match = 0;\r\n \t\t\tunsigned i;\r\ndiff --git a/lib/librte_ip_frag/ip_frag_common.h b/lib/librte_ip_frag/ip_frag_common.h\r\nindex 81ca23a..0a48dec 100644\r\n--- a/lib/librte_ip_frag/ip_frag_common.h\r\n+++ b/lib/librte_ip_frag/ip_frag_common.h\r\n@@ -173,8 +173,7 @@ ip_frag_chain(struct rte_mbuf *mn, struct rte_mbuf *mp)\r\n \tstruct rte_mbuf *ms;\r\n \r\n \t/* adjust start of the last fragment data. */\r\n-\trte_pktmbuf_adj(mp, (uint16_t)(mp->vlan_macip.f.l2_len +\r\n-\t\tmp->vlan_macip.f.l3_len));\r\n+\trte_pktmbuf_adj(mp, (uint16_t)(mp->tx_ol.l2_len + mp->tx_ol.l3_len));\r\n \r\n \t/* chain two fragments. */\r\n \tms = rte_pktmbuf_lastseg(mn);\r\ndiff --git a/lib/librte_ip_frag/rte_ipv4_fragmentation.c b/lib/librte_ip_frag/rte_ipv4_fragmentation.c\r\nindex cb09d92..5165b25 100644\r\n--- a/lib/librte_ip_frag/rte_ipv4_fragmentation.c\r\n+++ b/lib/librte_ip_frag/rte_ipv4_fragmentation.c\r\n@@ -198,7 +198,7 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,\r\n \t\t    out_pkt->pkt_len - sizeof(struct ipv4_hdr));\r\n \r\n \t\tout_pkt->ol_flags |= PKT_TX_IP_CKSUM;\r\n-\t\tout_pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);\r\n+\t\tout_pkt->tx_ol.l3_len = sizeof(struct ipv4_hdr);\r\n \r\n \t\t/* Write the fragment to the output list */\r\n \t\tpkts_out[out_pkt_pos] = out_pkt;\r\ndiff --git a/lib/librte_ip_frag/rte_ipv4_reassembly.c b/lib/librte_ip_frag/rte_ipv4_reassembly.c\r\nindex 06c37af..4c0104a 100644\r\n--- a/lib/librte_ip_frag/rte_ipv4_reassembly.c\r\n+++ b/lib/librte_ip_frag/rte_ipv4_reassembly.c\r\n@@ -87,10 +87,10 @@ ipv4_frag_reassemble(const struct ip_frag_pkt *fp)\r\n \r\n \t/* update ipv4 header for the reassmebled packet */\r\n \tip_hdr = (struct ipv4_hdr*)(rte_pktmbuf_mtod(m, uint8_t *) +\r\n-\t\tm->vlan_macip.f.l2_len);\r\n+\t\tm->tx_ol.l2_len);\r\n \r\n \tip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +\r\n-\t\tm->vlan_macip.f.l3_len));\r\n+\t\tm->tx_ol.l3_len));\r\n \tip_hdr->fragment_offset = (uint16_t)(ip_hdr->fragment_offset &\r\n \t\trte_cpu_to_be_16(IPV4_HDR_DF_FLAG));\r\n \tip_hdr->hdr_checksum = 0;\r\n@@ -137,7 +137,7 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,\r\n \r\n \tip_ofs *= IPV4_HDR_OFFSET_UNITS;\r\n \tip_len = (uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length) -\r\n-\t\tmb->vlan_macip.f.l3_len);\r\n+\t\tmb->tx_ol.l3_len);\r\n \r\n \tIP_FRAG_LOG(DEBUG, \"%s:%d:\\n\"\r\n \t\t\"mbuf: %p, tms: %\" PRIu64\r\ndiff --git a/lib/librte_ip_frag/rte_ipv6_reassembly.c b/lib/librte_ip_frag/rte_ipv6_reassembly.c\r\nindex dee3425..8bfe07c 100644\r\n--- a/lib/librte_ip_frag/rte_ipv6_reassembly.c\r\n+++ b/lib/librte_ip_frag/rte_ipv6_reassembly.c\r\n@@ -109,7 +109,7 @@ ipv6_frag_reassemble(const struct ip_frag_pkt *fp)\r\n \r\n \t/* update ipv6 header for the reassembled datagram */\r\n \tip_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(m, uint8_t *) +\r\n-\t\t\t\t\t\t\t\t  m->vlan_macip.f.l2_len);\r\n+\t\t\tm->tx_ol.l2_len);\r\n \r\n \tip_hdr->payload_len = rte_cpu_to_be_16(payload_len);\r\n \r\n@@ -120,8 +120,7 @@ ipv6_frag_reassemble(const struct ip_frag_pkt *fp)\r\n \t * other headers, so we assume there are no other headers and thus update\r\n \t * the main IPv6 header instead.\r\n \t */\r\n-\tmove_len = m->vlan_macip.f.l2_len + m->vlan_macip.f.l3_len -\r\n-\t\t\tsizeof(*frag_hdr);\r\n+\tmove_len = m->tx_ol.l2_len + m->tx_ol.l3_len - sizeof(*frag_hdr);\r\n \tfrag_hdr = (struct ipv6_extension_fragment *) (ip_hdr + 1);\r\n \tip_hdr->proto = frag_hdr->next_header;\r\n \r\ndiff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h\r\nindex 01d4f6e..b4a7aea 100644\r\n--- a/lib/librte_mbuf/rte_mbuf.h\r\n+++ b/lib/librte_mbuf/rte_mbuf.h\r\n@@ -112,41 +112,38 @@ extern \"C\" {\r\n #define PKT_TX_OFFLOAD_MASK (PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)\r\n \r\n /** Offload features */\r\n-union rte_vlan_macip {\r\n-\tuint32_t data;\r\n+union rte_tx_offloads {\r\n+\tuint64_t u64;\r\n \tstruct {\r\n+\t\tuint8_t  l4_len;   /**< L4 Header Length */\r\n+\t\tuint8_t  inner_l3_len; /**< L3 tunnelling header header */\r\n \t\tuint16_t l3_len:9; /**< L3 (IP) Header Length. */\r\n \t\tuint16_t l2_len:7; /**< L2 (MAC) Header Length. */\r\n-\t\tuint16_t vlan_tci;\r\n-\t\t/**< VLAN Tag Control Identifier (CPU order). */\r\n-\t} f;\r\n-};\r\n \r\n-/*\r\n- * Compare mask for vlan_macip_len.data,\r\n- * should be in sync with rte_vlan_macip.f layout.\r\n- * */\r\n-#define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */\r\n-#define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */\r\n-#define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */\r\n-/**< MAC+IP  length. */\r\n-#define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)\r\n+\t\tuint16_t mss;      /**< Maximum segment size */\r\n+\t\tuint16_t reserved;\r\n+\t};\r\n+};\r\n \r\n+/* define a set of marker types that can be used to refer to set points in the\r\n+ * mbuf */\r\n+typedef void    *MARKER[0];   /**< generic marker for a point in a structure */\r\n+typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes\r\n+                               * with a single assignment */\r\n \r\n /**\r\n  * The generic rte_mbuf, containing a packet mbuf.\r\n  */\r\n struct rte_mbuf {\r\n \t/** dummy field marking start of first cache line */\r\n-\tvoid *cache_line0[0];\r\n+\tMARKER cache_line0;\r\n \r\n \tvoid *buf_addr;           /**< Virtual address of segment buffer. */\r\n \tphys_addr_t buf_physaddr; /**< Physical address of segment buffer. */\r\n \r\n \t/* next 8 bytes are initialised on RX descriptor rearm */\r\n-\tuint64_t rearm_data[0]; /**< dummy element so we can get uin64_t ptrs\r\n-\t                         * to this part of the mbuf without alias error\r\n-\t                         */\r\n+\tMARKER64 rearm_data;      /**< dummy element for data set of rearm */\r\n+\r\n \tuint16_t buf_len;       /**< Length of segment buffer. */\r\n \tuint16_t data_off;\r\n \t/**\r\n@@ -170,27 +167,42 @@ struct rte_mbuf {\r\n \t/* remaining bytes are set on RX when pulling packet from descriptor */\r\n \tuint64_t ol_flags;        /**< Offload features. */\r\n \r\n-\t__m128i rx_descriptor_fields1[0]; /**< dummy field used as marker for\r\n-\t                                   * writes in a vector driver */\r\n+\t/** dummy field used as marker for writes in a vector driver. */\r\n+\tMARKER rx_descriptor_fields1;\r\n+\r\n \tuint16_t packet_type;     /**< Type of packet, e.g. protocols used */\r\n \tuint16_t data_len;        /**< Amount of data in segment buffer. */\r\n \tuint32_t pkt_len;         /**< Total pkt len: sum of all segments. */\r\n-\tunion rte_vlan_macip vlan_macip;\r\n+\tuint16_t vlan_tci0;       /**< first vlan tag control identifier */\r\n+\tuint16_t vlan_tci1;       /**< second vlan tci */\r\n \tunion {\r\n \t\tuint32_t rss;       /**< RSS hash result if RSS enabled */\r\n \t\tstruct {\r\n \t\t\tuint16_t hash;\r\n \t\t\tuint16_t id;\r\n \t\t} fdir;             /**< Filter identifier if FDIR enabled */\r\n-\t\tuint32_t sched;     /**< Hierarchical scheduler */\r\n-\t} hash;                 /**< hash information */\r\n+\t\tstruct {\r\n+\t\t\tuint32_t lo;\r\n+\t\t\tunion {\r\n+\t\t\t\tuint32_t hi;\r\n+\t\t\t\tuint32_t filter_id;\r\n+\t\t\t};\r\n+\t\t} fdir_i40e;    /**< i40e enhanced flow director */\r\n+\t\tuint32_t sched; /**< Hierarchical scheduler */\r\n+\t} filters;              /**< NIC filter information information */\r\n+\tuint32_t sequence;      /**< Packet sequence number */\r\n \r\n \t/* second cache line, fields only used in slow path or on TX */\r\n \t/** dummy field marking start of second cache line */\r\n-\tvoid *cache_line1[0] __rte_cache_aligned;\r\n+\tMARKER cache_line1 __rte_cache_aligned;\r\n+\r\n \tstruct rte_mempool *pool; /**< Pool from which mbuf was allocated. */\r\n \tstruct rte_mbuf *next;    /**< Next segment of scattered packet. */\r\n \r\n+\tvoid *userdata;           /**< Pointer available for application use */\r\n+\r\n+\tunion rte_tx_offloads tx_ol; /**< Fields to enable TX offloads */\r\n+\r\n \tunion {\r\n \t\tuint8_t metadata[0];\r\n \t\tuint16_t metadata16[0];\r\n@@ -238,7 +250,6 @@ struct rte_mbuf {\r\n  */\r\n #define RTE_MBUF_DIRECT(mb)     (RTE_MBUF_FROM_BADDR((mb)->buf_addr) == (mb))\r\n \r\n-\r\n /**\r\n  * Private data in case of pktmbuf pool.\r\n  *\r\n@@ -475,17 +486,20 @@ void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);\r\n  * The given mbuf must have only one segment.\r\n  *\r\n  * @param m\r\n- *   The packet mbuf to be resetted.\r\n+ *   The packet mbuf to be reset.\r\n  */\r\n static inline void rte_pktmbuf_reset(struct rte_mbuf *m)\r\n {\r\n \tm->next = NULL;\r\n \tm->pkt_len = 0;\r\n-\tm->vlan_macip.data = 0;\r\n+\tm->vlan_tci0 = m->vlan_tci1 = 0;\r\n+\tm->sequence = 0;\r\n \tm->nb_segs = 1;\r\n \tm->port = 0xff;\r\n+\tm->packet_type = 0;\r\n \r\n \tm->ol_flags = 0;\r\n+\tm->tx_ol.u64 = 0;\r\n \tm->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?\r\n \t\t\tRTE_PKTMBUF_HEADROOM : m->buf_len;\r\n \r\n@@ -538,22 +552,20 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md)\r\n \t    RTE_MBUF_DIRECT(mi) &&\r\n \t    rte_mbuf_refcnt_read(mi) == 1);\r\n \r\n+\t/* save any needed values */\r\n+\tstruct rte_mempool *pool = mi->pool;\r\n+\r\n+\t/* increment reference count of direct mbuf */\r\n \trte_mbuf_refcnt_update(md, 1);\r\n-\tmi->buf_physaddr = md->buf_physaddr;\r\n-\tmi->buf_addr = md->buf_addr;\r\n-\tmi->buf_len = md->buf_len;\r\n \r\n-\tmi->next = md->next;\r\n-\tmi->data_off = md->data_off;\r\n-\tmi->data_len = md->data_len;\r\n-\tmi->port = md->port;\r\n-\tmi->vlan_macip = md->vlan_macip;\r\n-\tmi->hash = md->hash;\r\n+\t/* assign all fields over, then reset some values */\r\n+\t*mi = *md;\r\n \r\n+\tmi->pool = pool;\r\n \tmi->next = NULL;\r\n \tmi->pkt_len = mi->data_len;\r\n \tmi->nb_segs = 1;\r\n-\tmi->ol_flags = md->ol_flags;\r\n+\trte_mbuf_refcnt_set(mi, 1);\r\n \r\n \t__rte_mbuf_sanity_check(mi, 1);\r\n \t__rte_mbuf_sanity_check(md, 0);\r\ndiff --git a/lib/librte_pmd_e1000/e1000_ethdev.h b/lib/librte_pmd_e1000/e1000_ethdev.h\r\nindex 71eb5fb..6131a79 100644\r\n--- a/lib/librte_pmd_e1000/e1000_ethdev.h\r\n+++ b/lib/librte_pmd_e1000/e1000_ethdev.h\r\n@@ -116,6 +116,27 @@ struct e1000_vfta {\r\n \tuint32_t vfta[IGB_VFTA_SIZE];\r\n };\r\n \r\n+/** Offload features for context descriptors - previously in mbuf */\r\n+union rte_vlan_macip {\r\n+\tuint32_t data;\r\n+\tstruct {\r\n+\t\tuint16_t l3_len:9; /**< L3 (IP) Header Length. */\r\n+\t\tuint16_t l2_len:7; /**< L2 (MAC) Header Length. */\r\n+\t\tuint16_t vlan_tci;\r\n+\t\t/**< VLAN Tag Control Identifier (CPU order). */\r\n+\t} f;\r\n+};\r\n+\r\n+/*\r\n+ * Compare mask for vlan_macip_len.data,\r\n+ * should be in sync with rte_vlan_macip.f layout.\r\n+ * */\r\n+#define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */\r\n+#define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */\r\n+#define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */\r\n+/**< MAC+IP  length. */\r\n+#define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)\r\n+\r\n /*\r\n  * VF data which used by PF host only\r\n  */\r\ndiff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c\r\nindex 637141e..1bf3050 100644\r\n--- a/lib/librte_pmd_e1000/em_rxtx.c\r\n+++ b/lib/librte_pmd_e1000/em_rxtx.c\r\n@@ -420,7 +420,11 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\r\n \t\ttx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM |\r\n \t\t\t\t\t\t\tPKT_TX_L4_MASK));\r\n \t\tif (tx_ol_req) {\r\n-\t\t\thdrlen = tx_pkt->vlan_macip;\r\n+\t\t\thdrlen = (union rte_vlan_macip){ .f = {\r\n+\t\t\t\t\t.l3_len = tx_pkt->tx_ol.l3_len,\r\n+\t\t\t\t\t.l2_len = tx_pkt->tx_ol.l2_len,\r\n+\t\t\t\t\t.vlan_tci = tx_pkt->vlan_tci0,\r\n+\t\t\t\t} };\r\n \t\t\t/* If new context to be built or reuse the exist ctx. */\r\n \t\t\tctx = what_ctx_update(txq, tx_ol_req, hdrlen);\r\n \r\n@@ -515,8 +519,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\r\n \t\t/* Set VLAN Tag offload fields. */\r\n \t\tif (ol_flags & PKT_TX_VLAN_PKT) {\r\n \t\t\tcmd_type_len |= E1000_TXD_CMD_VLE;\r\n-\t\t\tpopts_spec = tx_pkt->vlan_macip.f.vlan_tci <<\r\n-\t\t\t\tE1000_TXD_VLAN_SHIFT;\r\n+\t\t\tpopts_spec = tx_pkt->vlan_tci0 << E1000_TXD_VLAN_SHIFT;\r\n \t\t}\r\n \r\n \t\tif (tx_ol_req) {\r\n@@ -783,7 +786,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\r\n \t\t\t\trx_desc_error_to_pkt_flags(rxd.errors));\r\n \r\n \t\t/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */\r\n-\t\trxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);\r\n+\t\trxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);\r\n \r\n \t\t/*\r\n \t\t * Store the mbuf address into the next entry of the array\r\n@@ -1009,7 +1012,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\r\n \t\t\t\t\trx_desc_error_to_pkt_flags(rxd.errors));\r\n \r\n \t\t/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */\r\n-\t\trxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);\r\n+\t\trxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);\r\n \r\n \t\t/* Prefetch data of first segment, if configured to do so. */\r\n \t\trte_packet_prefetch((char *)first_seg->buf_addr +\r\ndiff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c\r\nindex d3e1969..490adc1 100644\r\n--- a/lib/librte_pmd_e1000/igb_rxtx.c\r\n+++ b/lib/librte_pmd_e1000/igb_rxtx.c\r\n@@ -353,7 +353,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\r\n \tuint16_t tx_ol_req;\r\n \tuint32_t new_ctx = 0;\r\n \tuint32_t ctx = 0;\r\n-\tuint32_t vlan_macip_lens;\r\n+\tunion rte_vlan_macip vlan_macip_lens;\r\n \r\n \ttxq = tx_queue;\r\n \tsw_ring = txq->sw_ring;\r\n@@ -378,13 +378,15 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\r\n \t\ttx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);\r\n \r\n \t\tol_flags = tx_pkt->ol_flags;\r\n-\t\tvlan_macip_lens = tx_pkt->vlan_macip.data;\r\n+\t\tvlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci0;\r\n+\t\tvlan_macip_lens.f.l3_len = tx_pkt->tx_ol.l3_len;\r\n+\t\tvlan_macip_lens.f.l2_len = tx_pkt->tx_ol.l2_len;\r\n \t\ttx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);\r\n \r\n \t\t/* If a Context Descriptor need be built . */\r\n \t\tif (tx_ol_req) {\r\n \t\t\tctx = what_advctx_update(txq, tx_ol_req,\r\n-\t\t\t\tvlan_macip_lens);\r\n+\t\t\t\tvlan_macip_lens.data);\r\n \t\t\t/* Only allocate context descriptor if required*/\r\n \t\t\tnew_ctx = (ctx == IGB_CTX_NUM);\r\n \t\t\tctx = txq->ctx_curr;\r\n@@ -500,7 +502,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\r\n \t\t\t\t}\r\n \r\n \t\t\t\tigbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,\r\n-\t\t\t\t    vlan_macip_lens);\r\n+\t\t\t\t\t\tvlan_macip_lens.data);\r\n \r\n \t\t\t\ttxe->last_id = tx_last;\r\n \t\t\t\ttx_id = txe->next_id;\r\n@@ -759,11 +761,10 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\r\n \t\trxm->data_len = pkt_len;\r\n \t\trxm->port = rxq->port_id;\r\n \r\n-\t\trxm->hash.rss = rxd.wb.lower.hi_dword.rss;\r\n+\t\trxm->filters.rss = rxd.wb.lower.hi_dword.rss;\r\n \t\thlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);\r\n \t\t/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */\r\n-\t\trxm->vlan_macip.f.vlan_tci =\r\n-\t\t\trte_le_to_cpu_16(rxd.wb.upper.vlan);\r\n+\t\trxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);\r\n \r\n \t\tpkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);\r\n \t\tpkt_flags = (uint16_t)(pkt_flags |\r\n@@ -993,14 +994,13 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\r\n \t\t *      - error flags.\r\n \t\t */\r\n \t\tfirst_seg->port = rxq->port_id;\r\n-\t\tfirst_seg->hash.rss = rxd.wb.lower.hi_dword.rss;\r\n+\t\tfirst_seg->filters.rss = rxd.wb.lower.hi_dword.rss;\r\n \r\n \t\t/*\r\n \t\t * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is\r\n \t\t * set in the pkt_flags field.\r\n \t\t */\r\n-\t\tfirst_seg->vlan_macip.f.vlan_tci =\r\n-\t\t\trte_le_to_cpu_16(rxd.wb.upper.vlan);\r\n+\t\tfirst_seg->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);\r\n \t\thlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);\r\n \t\tpkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);\r\n \t\tpkt_flags = (uint16_t)(pkt_flags |\r\ndiff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c\r\nindex 15e7a5d..cf86973 100644\r\n--- a/lib/librte_pmd_i40e/i40e_rxtx.c\r\n+++ b/lib/librte_pmd_i40e/i40e_rxtx.c\r\n@@ -614,16 +614,16 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)\r\n \t\t\t\tI40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;\r\n \t\t\tmb->data_len = pkt_len;\r\n \t\t\tmb->pkt_len = pkt_len;\r\n-\t\t\tmb->vlan_macip.f.vlan_tci = rx_status &\r\n+\t\t\tmb->vlan_tci0 = rx_status &\r\n \t\t\t\t(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?\r\n-\t\t\trte_le_to_cpu_16(\\\r\n-\t\t\t\trxdp[j].wb.qword0.lo_dword.l2tag1) : 0;\r\n+\t\t\t\trte_le_to_cpu_16(\\\r\n+\t\t\t\t\trxdp[j].wb.qword0.lo_dword.l2tag1) : 0;\r\n \t\t\tpkt_flags = i40e_rxd_status_to_pkt_flags(qword1);\r\n \t\t\tpkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);\r\n \t\t\tpkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);\r\n \t\t\tmb->ol_flags = pkt_flags;\r\n \t\t\tif (pkt_flags & PKT_RX_RSS_HASH)\r\n-\t\t\t\tmb->hash.rss = rte_le_to_cpu_32(\\\r\n+\t\t\t\tmb->filters.rss = rte_le_to_cpu_32(\\\r\n \t\t\t\t\trxdp->wb.qword0.hi_dword.rss);\r\n \t\t}\r\n \r\n@@ -851,7 +851,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\r\n \t\trxm->data_len = rx_packet_len;\r\n \t\trxm->port = rxq->port_id;\r\n \r\n-\t\trxm->vlan_macip.f.vlan_tci = rx_status &\r\n+\t\trxm->vlan_tci0 = rx_status &\r\n \t\t\t(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?\r\n \t\t\trte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;\r\n \t\tpkt_flags = i40e_rxd_status_to_pkt_flags(qword1);\r\n@@ -859,7 +859,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\r\n \t\tpkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);\r\n \t\trxm->ol_flags = pkt_flags;\r\n \t\tif (pkt_flags & PKT_RX_RSS_HASH)\r\n-\t\t\trxm->hash.rss =\r\n+\t\t\trxm->filters.rss =\r\n \t\t\t\trte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);\r\n \r\n \t\trx_pkts[nb_rx++] = rxm;\r\n@@ -1004,7 +1004,7 @@ i40e_recv_scattered_pkts(void *rx_queue,\r\n \t\t}\r\n \r\n \t\tfirst_seg->port = rxq->port_id;\r\n-\t\tfirst_seg->vlan_macip.f.vlan_tci = (rx_status &\r\n+\t\tfirst_seg->vlan_tci0 = (rx_status &\r\n \t\t\t(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?\r\n \t\t\trte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;\r\n \t\tpkt_flags = i40e_rxd_status_to_pkt_flags(qword1);\r\n@@ -1012,7 +1012,7 @@ i40e_recv_scattered_pkts(void *rx_queue,\r\n \t\tpkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);\r\n \t\tfirst_seg->ol_flags = pkt_flags;\r\n \t\tif (pkt_flags & PKT_RX_RSS_HASH)\r\n-\t\t\trxm->hash.rss =\r\n+\t\t\trxm->filters.rss =\r\n \t\t\t\trte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);\r\n \r\n \t\t/* Prefetch data of first segment, if configured to do so. */\r\n@@ -1107,8 +1107,8 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\r\n \t\tRTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);\r\n \r\n \t\tol_flags = tx_pkt->ol_flags;\r\n-\t\tl2_len = tx_pkt->vlan_macip.f.l2_len;\r\n-\t\tl3_len = tx_pkt->vlan_macip.f.l3_len;\r\n+\t\tl2_len = tx_pkt->tx_ol.l2_len;\r\n+\t\tl3_len = tx_pkt->tx_ol.l3_len;\r\n \r\n \t\t/* Calculate the number of context descriptors needed. */\r\n \t\tnb_ctx = i40e_calc_context_desc(ol_flags);\r\n@@ -1144,7 +1144,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\r\n \r\n \t\t/* Descriptor based VLAN insertion */\r\n \t\tif (ol_flags & PKT_TX_VLAN_PKT) {\r\n-\t\t\ttx_flags |= tx_pkt->vlan_macip.f.vlan_tci <<\r\n+\t\t\ttx_flags |= tx_pkt->vlan_tci0 <<\r\n \t\t\t\t\t\tI40E_TX_FLAG_L2TAG1_SHIFT;\r\n \t\t\ttx_flags |= I40E_TX_FLAG_INSERT_VLAN;\r\n \t\t\ttd_cmd |= I40E_TX_DESC_CMD_IL2TAG1;\r\ndiff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h\r\nindex a5159e5..c2b5d2d 100644\r\n--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h\r\n+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h\r\n@@ -98,6 +98,27 @@\r\n #define IXGBE_5TUPLE_MAX_PRI            7\r\n #define IXGBE_5TUPLE_MIN_PRI            1\r\n \r\n+/** Offload features for context descriptors - previously in mbuf */\r\n+union rte_vlan_macip {\r\n+\tuint32_t data;\r\n+\tstruct {\r\n+\t\tuint16_t l3_len:9; /**< L3 (IP) Header Length. */\r\n+\t\tuint16_t l2_len:7; /**< L2 (MAC) Header Length. */\r\n+\t\tuint16_t vlan_tci;\r\n+\t\t/**< VLAN Tag Control Identifier (CPU order). */\r\n+\t} f;\r\n+};\r\n+\r\n+/*\r\n+ * Compare mask for vlan_macip_len.data,\r\n+ * should be in sync with rte_vlan_macip.f layout.\r\n+ * */\r\n+#define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */\r\n+#define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */\r\n+#define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */\r\n+/**< MAC+IP  length. */\r\n+#define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)\r\n+\r\n /*\r\n  * Information about the fdir mode.\r\n  */\r\ndiff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c\r\nindex fadb9a0..cca44b8 100644\r\n--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c\r\n+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c\r\n@@ -599,7 +599,11 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\r\n \t\t * are needed for offload functionality.\r\n \t\t */\r\n \t\tol_flags = tx_pkt->ol_flags;\r\n-\t\tvlan_macip_lens = tx_pkt->vlan_macip.data;\r\n+\t\tvlan_macip_lens = (union rte_vlan_macip){ .f = {\r\n+\t\t\t\t\t.l3_len = tx_pkt->tx_ol.l3_len,\r\n+\t\t\t\t\t.l2_len = tx_pkt->tx_ol.l2_len,\r\n+\t\t\t\t\t.vlan_tci = tx_pkt->vlan_tci0,\r\n+\t\t\t\t} }.data;\r\n \r\n \t\t/* If hardware offload required */\r\n \t\ttx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);\r\n@@ -959,8 +963,8 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)\r\n \t\t\t\t\t\t\trxq->crc_len);\r\n \t\t\tmb->data_len = pkt_len;\r\n \t\t\tmb->pkt_len = pkt_len;\r\n-\t\t\tmb->vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;\r\n-\t\t\tmb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;\r\n+\t\t\tmb->vlan_tci0 = rxdp[j].wb.upper.vlan;\r\n+\t\t\tmb->filters.rss = rxdp[j].wb.lower.hi_dword.rss;\r\n \r\n \t\t\t/* convert descriptor fields to rte mbuf flags */\r\n \t\t\tmb->ol_flags  = rx_desc_hlen_type_rss_to_pkt_flags(\r\n@@ -1277,8 +1281,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\r\n \r\n \t\thlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);\r\n \t\t/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */\r\n-\t\trxm->vlan_macip.f.vlan_tci =\r\n-\t\t\trte_le_to_cpu_16(rxd.wb.upper.vlan);\r\n+\t\trxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);\r\n \r\n \t\tpkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);\r\n \t\tpkt_flags = (uint16_t)(pkt_flags |\r\n@@ -1288,12 +1291,12 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\r\n \t\trxm->ol_flags = pkt_flags;\r\n \r\n \t\tif (likely(pkt_flags & PKT_RX_RSS_HASH))\r\n-\t\t\trxm->hash.rss = rxd.wb.lower.hi_dword.rss;\r\n+\t\t\trxm->filters.rss = rxd.wb.lower.hi_dword.rss;\r\n \t\telse if (pkt_flags & PKT_RX_FDIR) {\r\n-\t\t\trxm->hash.fdir.hash =\r\n+\t\t\trxm->filters.fdir.hash =\r\n \t\t\t\t(uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)\r\n \t\t\t\t\t   & IXGBE_ATR_HASH_MASK);\r\n-\t\t\trxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;\r\n+\t\t\trxm->filters.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;\r\n \t\t}\r\n \t\t/*\r\n \t\t * Store the mbuf address into the next entry of the array\r\n@@ -1522,8 +1525,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\r\n \t\t * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is\r\n \t\t * set in the pkt_flags field.\r\n \t\t */\r\n-\t\tfirst_seg->vlan_macip.f.vlan_tci =\r\n-\t\t\t\trte_le_to_cpu_16(rxd.wb.upper.vlan);\r\n+\t\tfirst_seg->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);\r\n \t\thlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);\r\n \t\tpkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);\r\n \t\tpkt_flags = (uint16_t)(pkt_flags |\r\n@@ -1533,12 +1535,12 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\r\n \t\tfirst_seg->ol_flags = pkt_flags;\r\n \r\n \t\tif (likely(pkt_flags & PKT_RX_RSS_HASH))\r\n-\t\t\tfirst_seg->hash.rss = rxd.wb.lower.hi_dword.rss;\r\n+\t\t\tfirst_seg->filters.rss = rxd.wb.lower.hi_dword.rss;\r\n \t\telse if (pkt_flags & PKT_RX_FDIR) {\r\n-\t\t\tfirst_seg->hash.fdir.hash =\r\n+\t\t\tfirst_seg->filters.fdir.hash =\r\n \t\t\t\t(uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)\r\n \t\t\t\t\t   & IXGBE_ATR_HASH_MASK);\r\n-\t\t\tfirst_seg->hash.fdir.id =\r\n+\t\t\tfirst_seg->filters.fdir.id =\r\n \t\t\t\trxd.wb.lower.hi_dword.csum_ip.ip_id;\r\n \t\t}\r\n \r\ndiff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c\r\nindex 07314e9..ba9d050 100644\r\n--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c\r\n+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c\r\n@@ -287,9 +287,9 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\r\n \t\tstaterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);\r\n \r\n \t\t/* D.3 copy final 3,4 data to rx_pkts */\r\n-\t\t_mm_storeu_si128(rx_pkts[pos+3]->rx_descriptor_fields1,\r\n+\t\t_mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,\r\n \t\t\t\tpkt_mb4);\r\n-\t\t_mm_storeu_si128(rx_pkts[pos+2]->rx_descriptor_fields1,\r\n+\t\t_mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,\r\n \t\t\t\tpkt_mb3);\r\n \r\n \t\t/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */\r\n@@ -301,9 +301,9 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\r\n \t\tstaterr = _mm_packs_epi32(staterr, zero);\r\n \r\n \t\t/* D.3 copy final 1,2 data to rx_pkts */\r\n-\t\t_mm_storeu_si128(rx_pkts[pos+1]->rx_descriptor_fields1,\r\n+\t\t_mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,\r\n \t\t\t\tpkt_mb2);\r\n-\t\t_mm_storeu_si128(rx_pkts[pos]->rx_descriptor_fields1,\r\n+\t\t_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,\r\n \t\t\t\tpkt_mb1);\r\n \r\n \t\t/* C.4 calc avaialbe number of desc */\r\ndiff --git a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c\r\nindex f18fad6..e657a97 100644\r\n--- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c\r\n+++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c\r\n@@ -549,7 +549,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\r\n \t\t\t\t\t       rte_pktmbuf_mtod(rxm, void *));\r\n #endif\r\n \t\t\t\t/* Copy vlan tag in packet buffer */\r\n-\t\t\t\trxm->vlan_macip.f.vlan_tci =\r\n+\t\t\t\trxm->vlan_tci0 =\r\n \t\t\t\t\trte_le_to_cpu_16((uint16_t)rcd->tci);\r\n \r\n \t\t\t} else\r\n@@ -562,7 +562,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\r\n \t\t\trxm->pkt_len = (uint16_t)rcd->len;\r\n \t\t\trxm->data_len = (uint16_t)rcd->len;\r\n \t\t\trxm->port = rxq->port_id;\r\n-\t\t\trxm->vlan_macip.f.vlan_tci = 0;\r\n+\t\t\trxm->vlan_tci0 = 0;\r\n \t\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\r\n \r\n \t\t\trx_pkts[nb_rx++] = rxm;\r\ndiff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h\r\nindex e6bba22..11f63fc 100644\r\n--- a/lib/librte_sched/rte_sched.h\r\n+++ b/lib/librte_sched/rte_sched.h\r\n@@ -195,10 +195,12 @@ struct rte_sched_port_params {\r\n #endif\r\n };\r\n \r\n-/** Path through the scheduler hierarchy used by the scheduler enqueue operation to\r\n-identify the destination queue for the current packet. Stored in the field hash.sched\r\n-of struct rte_mbuf of each packet, typically written by the classification stage and read by\r\n-scheduler enqueue.*/\r\n+/**\r\n+ * Path through the scheduler hierarchy used by the scheduler enqueue operation\r\n+ * to identify the destination queue for the current packet. Stored in the field\r\n+ * filters.sched of struct rte_mbuf of each packet, typically written by the\r\n+ * classification stage and read by scheduler enqueue.\r\n+ */\r\n struct rte_sched_port_hierarchy {\r\n \tuint32_t queue:2;                /**< Queue ID (0 .. 3) */\r\n \tuint32_t traffic_class:2;        /**< Traffic class ID (0 .. 3)*/\r\n@@ -352,7 +354,8 @@ static inline void\r\n rte_sched_port_pkt_write(struct rte_mbuf *pkt,\r\n \tuint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue, enum rte_meter_color color)\r\n {\r\n-\tstruct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;\r\n+\tstruct rte_sched_port_hierarchy *sched =\r\n+\t\t\t(struct rte_sched_port_hierarchy *) &pkt->filters.sched;\r\n \r\n \tsched->color = (uint32_t) color;\r\n \tsched->subport = subport;\r\n@@ -381,7 +384,8 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,\r\n static inline void\r\n rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue)\r\n {\r\n-\tstruct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;\r\n+\tstruct rte_sched_port_hierarchy *sched =\r\n+\t\t\t(struct rte_sched_port_hierarchy *) &pkt->filters.sched;\r\n \r\n \t*subport = sched->subport;\r\n \t*pipe = sched->pipe;\r\n@@ -392,7 +396,8 @@ rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint3\r\n static inline enum rte_meter_color\r\n rte_sched_port_pkt_read_color(struct rte_mbuf *pkt)\r\n {\r\n-\tstruct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;\r\n+\tstruct rte_sched_port_hierarchy *sched =\r\n+\t\t\t(struct rte_sched_port_hierarchy *) &pkt->filters.sched;\r\n \r\n \treturn (enum rte_meter_color) sched->color;\r\n }\r\n",
    "prefixes": [
        "dpdk-dev",
        "RFC",
        "13/14"
    ]
}