From patchwork Fri Jun 19 08:14:20 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhang, Helin" X-Patchwork-Id: 5558 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 9258BC7C2; Fri, 19 Jun 2015 10:15:42 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id 97567C6D8 for ; Fri, 19 Jun 2015 10:15:16 +0200 (CEST) Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga101.fm.intel.com with ESMTP; 19 Jun 2015 01:15:16 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.13,643,1427785200"; d="scan'208";a="590756763" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by orsmga003.jf.intel.com with ESMTP; 19 Jun 2015 01:15:17 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id t5J8FCVA014786; Fri, 19 Jun 2015 16:15:12 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id t5J8FAnY010389; Fri, 19 Jun 2015 16:15:12 +0800 Received: (from hzhan75@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id t5J8FAQ7010385; Fri, 19 Jun 2015 16:15:10 +0800 From: Helin Zhang To: dev@dpdk.org Date: Fri, 19 Jun 2015 16:14:20 +0800 Message-Id: <1434701661-9943-18-git-send-email-helin.zhang@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1434701661-9943-1-git-send-email-helin.zhang@intel.com> References: <1433144045-30847-1-git-send-email-helin.zhang@intel.com> <1434701661-9943-1-git-send-email-helin.zhang@intel.com> Subject: [dpdk-dev] [PATCH v7 17/18] examples/l3fwd: replace bit mask based packet type with unified packet type X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" To unify packet types among all PMDs, bit masks of packet type for 'ol_flags' are replaced by unified packet type. To avoid breaking ABI compatibility, all the changes would be enabled by RTE_NEXT_ABI, which is disabled by default. Signed-off-by: Helin Zhang --- examples/l3fwd/main.c | 123 ++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 120 insertions(+), 3 deletions(-) v2 changes: * Used redefined packet types and enlarged packet_type field in mbuf. v3 changes: * Minor bug fixes and enhancements. v5 changes: * Re-worded the commit logs. v6 changes: * Disabled the code changes for unified packet type by default, to avoid breaking ABI compatibility. v7 changes: * Renamed RTE_UNIFIED_PKT_TYPE to RTE_NEXT_ABI. diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c index 7e4bbfd..eff9580 100644 --- a/examples/l3fwd/main.c +++ b/examples/l3fwd/main.c @@ -948,7 +948,11 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qcon eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); +#ifdef RTE_NEXT_ABI + if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) { +#else if (m->ol_flags & PKT_RX_IPV4_HDR) { +#endif /* Handle IPv4 headers.*/ ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) + sizeof(struct ether_hdr)); @@ -979,8 +983,11 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qcon ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr); send_single_packet(m, dst_port); - +#ifdef RTE_NEXT_ABI + } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) { +#else } else { +#endif /* Handle IPv6 headers.*/ struct ipv6_hdr *ipv6_hdr; @@ -999,8 +1006,13 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qcon ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr); send_single_packet(m, dst_port); +#ifdef RTE_NEXT_ABI + } else + /* Free the mbuf that contains non-IPV4/IPV6 packet */ + rte_pktmbuf_free(m); +#else } - +#endif } #ifdef DO_RFC_1812_CHECKS @@ -1024,12 +1036,19 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qcon * to BAD_PORT value. */ static inline __attribute__((always_inline)) void +#ifdef RTE_NEXT_ABI +rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype) +#else rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t flags) +#endif { uint8_t ihl; +#ifdef RTE_NEXT_ABI + if (RTE_ETH_IS_IPV4_HDR(ptype)) { +#else if ((flags & PKT_RX_IPV4_HDR) != 0) { - +#endif ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL; ipv4_hdr->time_to_live--; @@ -1059,11 +1078,19 @@ get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, struct ipv6_hdr *ipv6_hdr; struct ether_hdr *eth_hdr; +#ifdef RTE_NEXT_ABI + if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) { +#else if (pkt->ol_flags & PKT_RX_IPV4_HDR) { +#endif if (rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4, &next_hop) != 0) next_hop = portid; +#ifdef RTE_NEXT_ABI + } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) { +#else } else if (pkt->ol_flags & PKT_RX_IPV6_HDR) { +#endif eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *); ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1); if (rte_lpm6_lookup(qconf->ipv6_lookup_struct, @@ -1097,12 +1124,52 @@ process_packet(struct lcore_conf *qconf, struct rte_mbuf *pkt, ve = val_eth[dp]; dst_port[0] = dp; +#ifdef RTE_NEXT_ABI + rfc1812_process(ipv4_hdr, dst_port, pkt->packet_type); +#else rfc1812_process(ipv4_hdr, dst_port, pkt->ol_flags); +#endif te = _mm_blend_epi16(te, ve, MASK_ETH); _mm_store_si128((__m128i *)eth_hdr, te); } +#ifdef RTE_NEXT_ABI +/* + * Read packet_type and destination IPV4 addresses from 4 mbufs. + */ +static inline void +processx4_step1(struct rte_mbuf *pkt[FWDSTEP], + __m128i *dip, + uint32_t *ipv4_flag) +{ + struct ipv4_hdr *ipv4_hdr; + struct ether_hdr *eth_hdr; + uint32_t x0, x1, x2, x3; + + eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *); + ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); + x0 = ipv4_hdr->dst_addr; + ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4; + + eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *); + ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); + x1 = ipv4_hdr->dst_addr; + ipv4_flag[0] &= pkt[1]->packet_type; + + eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *); + ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); + x2 = ipv4_hdr->dst_addr; + ipv4_flag[0] &= pkt[2]->packet_type; + + eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *); + ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); + x3 = ipv4_hdr->dst_addr; + ipv4_flag[0] &= pkt[3]->packet_type; + + dip[0] = _mm_set_epi32(x3, x2, x1, x0); +} +#else /* RTE_NEXT_ABI */ /* * Read ol_flags and destination IPV4 addresses from 4 mbufs. */ @@ -1135,14 +1202,24 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP], __m128i *dip, uint32_t *flag) dip[0] = _mm_set_epi32(x3, x2, x1, x0); } +#endif /* RTE_NEXT_ABI */ /* * Lookup into LPM for destination port. * If lookup fails, use incoming port (portid) as destination port. */ static inline void +#ifdef RTE_NEXT_ABI +processx4_step2(const struct lcore_conf *qconf, + __m128i dip, + uint32_t ipv4_flag, + uint8_t portid, + struct rte_mbuf *pkt[FWDSTEP], + uint16_t dprt[FWDSTEP]) +#else processx4_step2(const struct lcore_conf *qconf, __m128i dip, uint32_t flag, uint8_t portid, struct rte_mbuf *pkt[FWDSTEP], uint16_t dprt[FWDSTEP]) +#endif /* RTE_NEXT_ABI */ { rte_xmm_t dst; const __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11, @@ -1152,7 +1229,11 @@ processx4_step2(const struct lcore_conf *qconf, __m128i dip, uint32_t flag, dip = _mm_shuffle_epi8(dip, bswap_mask); /* if all 4 packets are IPV4. */ +#ifdef RTE_NEXT_ABI + if (likely(ipv4_flag)) { +#else if (likely(flag != 0)) { +#endif rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dprt, portid); } else { dst.x = dip; @@ -1202,6 +1283,16 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP]) _mm_store_si128(p[2], te[2]); _mm_store_si128(p[3], te[3]); +#ifdef RTE_NEXT_ABI + rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1), + &dst_port[0], pkt[0]->packet_type); + rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1), + &dst_port[1], pkt[1]->packet_type); + rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1), + &dst_port[2], pkt[2]->packet_type); + rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1), + &dst_port[3], pkt[3]->packet_type); +#else /* RTE_NEXT_ABI */ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1), &dst_port[0], pkt[0]->ol_flags); rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1), @@ -1210,6 +1301,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP]) &dst_port[2], pkt[2]->ol_flags); rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1), &dst_port[3], pkt[3]->ol_flags); +#endif /* RTE_NEXT_ABI */ } /* @@ -1396,7 +1488,11 @@ main_loop(__attribute__((unused)) void *dummy) uint16_t *lp; uint16_t dst_port[MAX_PKT_BURST]; __m128i dip[MAX_PKT_BURST / FWDSTEP]; +#ifdef RTE_NEXT_ABI + uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP]; +#else uint32_t flag[MAX_PKT_BURST / FWDSTEP]; +#endif uint16_t pnum[MAX_PKT_BURST + 1]; #endif @@ -1466,6 +1562,18 @@ main_loop(__attribute__((unused)) void *dummy) */ int32_t n = RTE_ALIGN_FLOOR(nb_rx, 4); for (j = 0; j < n ; j+=4) { +#ifdef RTE_NEXT_ABI + uint32_t pkt_type = + pkts_burst[j]->packet_type & + pkts_burst[j+1]->packet_type & + pkts_burst[j+2]->packet_type & + pkts_burst[j+3]->packet_type; + if (pkt_type & RTE_PTYPE_L3_IPV4) { + simple_ipv4_fwd_4pkts( + &pkts_burst[j], portid, qconf); + } else if (pkt_type & + RTE_PTYPE_L3_IPV6) { +#else /* RTE_NEXT_ABI */ uint32_t ol_flag = pkts_burst[j]->ol_flags & pkts_burst[j+1]->ol_flags & pkts_burst[j+2]->ol_flags @@ -1474,6 +1582,7 @@ main_loop(__attribute__((unused)) void *dummy) simple_ipv4_fwd_4pkts(&pkts_burst[j], portid, qconf); } else if (ol_flag & PKT_RX_IPV6_HDR) { +#endif /* RTE_NEXT_ABI */ simple_ipv6_fwd_4pkts(&pkts_burst[j], portid, qconf); } else { @@ -1498,13 +1607,21 @@ main_loop(__attribute__((unused)) void *dummy) for (j = 0; j != k; j += FWDSTEP) { processx4_step1(&pkts_burst[j], &dip[j / FWDSTEP], +#ifdef RTE_NEXT_ABI + &ipv4_flag[j / FWDSTEP]); +#else &flag[j / FWDSTEP]); +#endif } k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP); for (j = 0; j != k; j += FWDSTEP) { processx4_step2(qconf, dip[j / FWDSTEP], +#ifdef RTE_NEXT_ABI + ipv4_flag[j / FWDSTEP], portid, +#else flag[j / FWDSTEP], portid, +#endif &pkts_burst[j], &dst_port[j]); }