From patchwork Wed Sep 16 03:09:58 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junyu Jiang X-Patchwork-Id: 77823 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2D915A04C7; Wed, 16 Sep 2020 05:27:28 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 349FD1C115; Wed, 16 Sep 2020 05:27:27 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by dpdk.org (Postfix) with ESMTP id 06B731C115 for ; Wed, 16 Sep 2020 05:27:23 +0200 (CEST) IronPort-SDR: tceVrreBYRFuT4G7iwD6HFUbEwUYComf2AIqIUuVDX0aE/aRuJ2DjuTCXo43XdcVFVQK6JrNmO iPbH6j8N7DKw== X-IronPort-AV: E=McAfee;i="6000,8403,9745"; a="147143644" X-IronPort-AV: E=Sophos;i="5.76,431,1592895600"; d="scan'208";a="147143644" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Sep 2020 20:27:23 -0700 IronPort-SDR: aP55yzy+yGDoxKU+Bycn7yMaXj30AsHQ7dW+u4RVEbyt8CW9bHb3EncJA32rmsm4ZL/9Iira/7 5KvvN1ycAu2A== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,431,1592895600"; d="scan'208";a="319692310" Received: from unknown (HELO intel.sh.intel.com) ([10.239.255.60]) by orsmga002.jf.intel.com with ESMTP; 15 Sep 2020 20:27:19 -0700 From: Junyu Jiang To: dev@dpdk.org Cc: Leyi Rong , Qi Zhang , Qiming Yang , Guinan Sun , Junyu Jiang Date: Wed, 16 Sep 2020 03:09:58 +0000 Message-Id: <20200916031002.42122-2-junyux.jiang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200916031002.42122-1-junyux.jiang@intel.com> References: <20200826075501.50052-1-guinanx.sun@intel.com> <20200916031002.42122-1-junyux.jiang@intel.com> Subject: [dpdk-dev] [PATCH v3 1/5] net/ice: support flex Rx descriptor RxDID #22 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch supports RxDID #22 by the following changes: -add structure and macro definition for RxDID #22. -support RxDID #22 format in normal path. -change RSS hash parsing from RxDID #22 in AVX/SSE data path. Signed-off-by: Junyu Jiang --- drivers/net/ice/ice_ethdev.c | 20 ++++++ drivers/net/ice/ice_ethdev.h | 4 ++ drivers/net/ice/ice_rxtx.c | 23 ++++--- drivers/net/ice/ice_rxtx.h | 42 +++++++++++++ drivers/net/ice/ice_rxtx_vec_avx2.c | 98 +++++++++++++++++++++++++++-- drivers/net/ice/ice_rxtx_vec_sse.c | 89 +++++++++++++++++++++----- 6 files changed, 249 insertions(+), 27 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index c42581ea7..097b72023 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -2147,6 +2147,24 @@ ice_rss_ctx_init(struct ice_pf *pf) ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); } +static uint64_t +ice_get_supported_rxdid(struct ice_hw *hw) +{ + uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */ + uint32_t regval; + int i; + + supported_rxdid |= BIT(ICE_RXDID_LEGACY_1); + + for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { + regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0)); + if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) + & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) + supported_rxdid |= BIT(i); + } + return supported_rxdid; +} + static int ice_dev_init(struct rte_eth_dev *dev) { @@ -2298,6 +2316,8 @@ ice_dev_init(struct rte_eth_dev *dev) return ret; } + pf->supported_rxdid = ice_get_supported_rxdid(hw); + return 0; err_pf_setup: diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 243a023e6..e8c9971fb 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -136,6 +136,9 @@ #define ICE_RXTX_BYTES_HIGH(bytes) ((bytes) & ~ICE_40_BIT_MASK) #define ICE_RXTX_BYTES_LOW(bytes) ((bytes) & ICE_40_BIT_MASK) +/* Max number of flexible descriptor rxdid */ +#define ICE_FLEX_DESC_RXDID_MAX_NUM 64 + /* DDP package type */ enum ice_pkg_type { ICE_PKG_TYPE_UNKNOWN, @@ -435,6 +438,7 @@ struct ice_pf { bool init_link_up; uint64_t old_rx_bytes; uint64_t old_tx_bytes; + uint64_t supported_rxdid; /* bitmap for supported RXDID */ }; #define ICE_MAX_QUEUE_NUM 2048 diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index fecb13459..fef6ad454 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -63,7 +63,7 @@ static inline uint8_t ice_proto_xtr_type_to_rxdid(uint8_t xtr_type) { static uint8_t rxdid_map[] = { - [PROTO_XTR_NONE] = ICE_RXDID_COMMS_GENERIC, + [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS, [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN, [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4, [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6, @@ -73,7 +73,7 @@ ice_proto_xtr_type_to_rxdid(uint8_t xtr_type) }; return xtr_type < RTE_DIM(rxdid_map) ? - rxdid_map[xtr_type] : ICE_RXDID_COMMS_GENERIC; + rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS; } static enum ice_status @@ -81,12 +81,13 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) { struct ice_vsi *vsi = rxq->vsi; struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + struct ice_pf *pf = ICE_VSI_TO_PF(vsi); struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi); struct ice_rlan_ctx rx_ctx; enum ice_status err; uint16_t buf_size, len; struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; - uint32_t rxdid = ICE_RXDID_COMMS_GENERIC; + uint32_t rxdid = ICE_RXDID_COMMS_OVS; uint32_t regval; /* Set buffer size as the head split is disabled. */ @@ -151,6 +152,12 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u", rxq->port_id, rxq->queue_id, rxdid); + if (!(pf->supported_rxdid & BIT(rxdid))) { + PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)", + rxdid); + return -EINVAL; + } + /* Enable Flexible Descriptors in the queue context which * allows this driver to select a specific receive descriptor format */ @@ -1338,7 +1345,7 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp) static void ice_rxd_to_proto_xtr(struct rte_mbuf *mb, - volatile struct ice_32b_rx_flex_desc_comms *desc) + volatile struct ice_32b_rx_flex_desc_comms_ovs *desc) { uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1); uint32_t metadata = 0; @@ -1376,8 +1383,9 @@ static inline void ice_rxd_to_pkt_fields(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp) { - volatile struct ice_32b_rx_flex_desc_comms *desc = - (volatile struct ice_32b_rx_flex_desc_comms *)rxdp; + volatile struct ice_32b_rx_flex_desc_comms_ovs *desc = + (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp; +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC uint16_t stat_err; stat_err = rte_le_to_cpu_16(desc->status_error0); @@ -1385,13 +1393,14 @@ ice_rxd_to_pkt_fields(struct rte_mbuf *mb, mb->ol_flags |= PKT_RX_RSS_HASH; mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); } +#endif -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC if (desc->flow_id != 0xFFFFFFFF) { mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); } +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail())) ice_rxd_to_proto_xtr(mb, desc); #endif diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 2fdcfb7d0..e21ba152d 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -38,6 +38,8 @@ #define ICE_FDIR_PKT_LEN 512 +#define ICE_RXDID_COMMS_OVS 22 + typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq); typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq); @@ -135,6 +137,46 @@ union ice_tx_offload { }; }; +/* Rx Flex Descriptor for Comms Package Profile + * RxDID Profile ID 22 (swap Hash and FlowID) + * Flex-field 0: Flow ID lower 16-bits + * Flex-field 1: Flow ID upper 16-bits + * Flex-field 2: RSS hash lower 16-bits + * Flex-field 3: RSS hash upper 16-bits + * Flex-field 4: AUX0 + * Flex-field 5: AUX1 + */ +struct ice_32b_rx_flex_desc_comms_ovs { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 flow_id; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 rss_hash; + union { + struct { + __le16 aux0; + __le16 aux1; + } flex; + __le32 ts_high; + } flex_ts; +}; + int ice_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c index be50677c2..07d129e3f 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx2.c +++ b/drivers/net/ice/ice_rxtx_vec_avx2.c @@ -191,8 +191,8 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, const __m256i shuf_msk = _mm256_set_epi8 (/* first descriptor */ - 15, 14, - 13, 12, /* octet 12~15, 32 bits rss */ + 0xFF, 0xFF, + 0xFF, 0xFF, /* rss hash parsed separately */ 11, 10, /* octet 10~11, 16 bits vlan_macip */ 5, 4, /* octet 4~5, 16 bits data_len */ 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */ @@ -200,8 +200,8 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, 0xFF, 0xFF, /* pkt_type set as unknown */ 0xFF, 0xFF, /*pkt_type set as unknown */ /* second descriptor */ - 15, 14, - 13, 12, /* octet 12~15, 32 bits rss */ + 0xFF, 0xFF, + 0xFF, 0xFF, /* rss hash parsed separately */ 11, 10, /* octet 10~11, 16 bits vlan_macip */ 5, 4, /* octet 4~5, 16 bits data_len */ 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */ @@ -461,6 +461,96 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* merge flags */ const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, rss_vlan_flags); + +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + /** + * needs to load 2nd 16B of each desc for RSS hash parsing, + * will cause performance drop to get into this context. + */ + if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_RSS_HASH) { + /* load bottom half of every 32B desc */ + const __m128i raw_desc_bh7 = + _mm_load_si128 + ((void *)(&rxdp[7].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh6 = + _mm_load_si128 + ((void *)(&rxdp[6].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh5 = + _mm_load_si128 + ((void *)(&rxdp[5].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh4 = + _mm_load_si128 + ((void *)(&rxdp[4].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh3 = + _mm_load_si128 + ((void *)(&rxdp[3].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh2 = + _mm_load_si128 + ((void *)(&rxdp[2].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh1 = + _mm_load_si128 + ((void *)(&rxdp[1].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh0 = + _mm_load_si128 + ((void *)(&rxdp[0].wb.status_error1)); + + __m256i raw_desc_bh6_7 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh6), + raw_desc_bh7, 1); + __m256i raw_desc_bh4_5 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh4), + raw_desc_bh5, 1); + __m256i raw_desc_bh2_3 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh2), + raw_desc_bh3, 1); + __m256i raw_desc_bh0_1 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh0), + raw_desc_bh1, 1); + + /** + * to shift the 32b RSS hash value to the + * highest 32b of each 128b before mask + */ + __m256i rss_hash6_7 = + _mm256_slli_epi64(raw_desc_bh6_7, 32); + __m256i rss_hash4_5 = + _mm256_slli_epi64(raw_desc_bh4_5, 32); + __m256i rss_hash2_3 = + _mm256_slli_epi64(raw_desc_bh2_3, 32); + __m256i rss_hash0_1 = + _mm256_slli_epi64(raw_desc_bh0_1, 32); + + __m256i rss_hash_msk = + _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0, + 0xFFFFFFFF, 0, 0, 0); + + rss_hash6_7 = _mm256_and_si256 + (rss_hash6_7, rss_hash_msk); + rss_hash4_5 = _mm256_and_si256 + (rss_hash4_5, rss_hash_msk); + rss_hash2_3 = _mm256_and_si256 + (rss_hash2_3, rss_hash_msk); + rss_hash0_1 = _mm256_and_si256 + (rss_hash0_1, rss_hash_msk); + + mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7); + mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5); + mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3); + mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1); + } /* if() on RSS hash parsing */ +#endif /** * At this point, we have the 8 sets of flags in the low 16-bits * of each 32-bit value in vlan0. diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c index 382ef31f3..fffb27138 100644 --- a/drivers/net/ice/ice_rxtx_vec_sse.c +++ b/drivers/net/ice/ice_rxtx_vec_sse.c @@ -230,7 +230,8 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, const __m128i zero = _mm_setzero_si128(); /* mask to shuffle from desc. to mbuf */ const __m128i shuf_msk = _mm_set_epi8 - (15, 14, 13, 12, /* octet 12~15, 32 bits rss */ + (0xFF, 0xFF, + 0xFF, 0xFF, /* rss hash parsed separately */ 11, 10, /* octet 10~11, 16 bits vlan_macip */ 5, 4, /* octet 4~5, 16 bits data_len */ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ @@ -321,7 +322,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, pos += ICE_DESCS_PER_LOOP, rxdp += ICE_DESCS_PER_LOOP) { __m128i descs[ICE_DESCS_PER_LOOP]; - __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3; __m128i staterr, sterr_tmp1, sterr_tmp2; /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */ __m128i mbp1; @@ -367,8 +368,12 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, rte_compiler_barrier(); /* D.1 pkt 3,4 convert format from desc to pktmbuf */ - pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk); - pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk); + pkt_mb3 = _mm_shuffle_epi8(descs[3], shuf_msk); + pkt_mb2 = _mm_shuffle_epi8(descs[2], shuf_msk); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb1 = _mm_shuffle_epi8(descs[1], shuf_msk); + pkt_mb0 = _mm_shuffle_epi8(descs[0], shuf_msk); /* C.1 4=>2 filter staterr info only */ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]); @@ -378,12 +383,68 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, ice_rx_desc_to_olflags_v(rxq, descs, &rx_pkts[pos]); /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ - pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); + pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust); - /* D.1 pkt 1,2 convert format from desc to pktmbuf */ - pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); - pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); + /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ + pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust); + pkt_mb0 = _mm_add_epi16(pkt_mb0, crc_adjust); + +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + /** + * needs to load 2nd 16B of each desc for RSS hash parsing, + * will cause performance drop to get into this context. + */ + if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_RSS_HASH) { + /* load bottom half of every 32B desc */ + const __m128i raw_desc_bh3 = + _mm_load_si128 + ((void *)(&rxdp[3].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh2 = + _mm_load_si128 + ((void *)(&rxdp[2].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh1 = + _mm_load_si128 + ((void *)(&rxdp[1].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh0 = + _mm_load_si128 + ((void *)(&rxdp[0].wb.status_error1)); + + /** + * to shift the 32b RSS hash value to the + * highest 32b of each 128b before mask + */ + __m128i rss_hash3 = + _mm_slli_epi64(raw_desc_bh3, 32); + __m128i rss_hash2 = + _mm_slli_epi64(raw_desc_bh2, 32); + __m128i rss_hash1 = + _mm_slli_epi64(raw_desc_bh1, 32); + __m128i rss_hash0 = + _mm_slli_epi64(raw_desc_bh0, 32); + + __m128i rss_hash_msk = + _mm_set_epi32(0xFFFFFFFF, 0, 0, 0); + + rss_hash3 = _mm_and_si128 + (rss_hash3, rss_hash_msk); + rss_hash2 = _mm_and_si128 + (rss_hash2, rss_hash_msk); + rss_hash1 = _mm_and_si128 + (rss_hash1, rss_hash_msk); + rss_hash0 = _mm_and_si128 + (rss_hash0, rss_hash_msk); + + pkt_mb3 = _mm_or_si128(pkt_mb3, rss_hash3); + pkt_mb2 = _mm_or_si128(pkt_mb2, rss_hash2); + pkt_mb1 = _mm_or_si128(pkt_mb1, rss_hash1); + pkt_mb0 = _mm_or_si128(pkt_mb0, rss_hash0); + } /* if() on RSS hash parsing */ +#endif /* C.2 get 4 pkts staterr value */ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); @@ -391,14 +452,10 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* D.3 copy final 3,4 data to rx_pkts */ _mm_storeu_si128 ((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1, - pkt_mb4); + pkt_mb3); _mm_storeu_si128 ((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1, - pkt_mb3); - - /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ - pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust); - pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust); + pkt_mb2); /* C* extract and record EOP bit */ if (split_packet) { @@ -422,9 +479,9 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* D.3 copy final 1,2 data to rx_pkts */ _mm_storeu_si128 ((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1, - pkt_mb2); + pkt_mb1); _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, - pkt_mb1); + pkt_mb0); ice_rx_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); /* C.4 calc avaialbe number of desc */ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); From patchwork Wed Sep 16 03:09:59 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junyu Jiang X-Patchwork-Id: 77824 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id F211CA04C7; Wed, 16 Sep 2020 05:27:36 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8677A1C11D; Wed, 16 Sep 2020 05:27:28 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by dpdk.org (Postfix) with ESMTP id C05A21C10E for ; Wed, 16 Sep 2020 05:27:25 +0200 (CEST) IronPort-SDR: I+kB5IYldWs1ctZBptPo4TsqONtmJ7+uLSNnpI499UJ7E6aNWqUtuSRDSXj7TjQ/vdXtql7LKk twWQW8cxprTg== X-IronPort-AV: E=McAfee;i="6000,8403,9745"; a="147143648" X-IronPort-AV: E=Sophos;i="5.76,431,1592895600"; d="scan'208";a="147143648" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Sep 2020 20:27:25 -0700 IronPort-SDR: gbnPKcEMoWLjCPe1GhlzkNVgL9EN8eaZ0HJcRuOiAMV0+BvFvgY6sAOUuJVGFTsILkgrS64lKP yDpF0EnCk5PA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,431,1592895600"; d="scan'208";a="319692327" Received: from unknown (HELO intel.sh.intel.com) ([10.239.255.60]) by orsmga002.jf.intel.com with ESMTP; 15 Sep 2020 20:27:23 -0700 From: Junyu Jiang To: dev@dpdk.org Cc: Leyi Rong , Qi Zhang , Qiming Yang , Guinan Sun Date: Wed, 16 Sep 2020 03:09:59 +0000 Message-Id: <20200916031002.42122-3-junyux.jiang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200916031002.42122-1-junyux.jiang@intel.com> References: <20200826075501.50052-1-guinanx.sun@intel.com> <20200916031002.42122-1-junyux.jiang@intel.com> Subject: [dpdk-dev] [PATCH v3 2/5] net/ice: add flow director enabled switch value X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Guinan Sun The patch adds fdir_enabled flag to identify if parse flow director mark ID from flexible Rx descriptor. Signed-off-by: Guinan Sun --- drivers/net/ice/ice_ethdev.h | 2 ++ drivers/net/ice/ice_fdir_filter.c | 9 ++++++++- drivers/net/ice/ice_rxtx.h | 30 ++++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index e8c9971fb..366eee3b4 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -291,6 +291,7 @@ struct ice_fdir_filter_conf { uint64_t input_set; uint64_t outer_input_set; /* only for tunnel packets outer fields */ + uint32_t mark_flag; }; #define ICE_MAX_FDIR_FILTER_NUM (1024 * 16) @@ -471,6 +472,7 @@ struct ice_adapter { bool is_safe_mode; struct ice_devargs devargs; enum ice_pkg_type active_pkg_type; /* loaded ddp package type */ + uint16_t fdir_ref_cnt; }; struct ice_vsi_vlan_pvid_info { diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c index e0ce1efb0..175abcdd5 100644 --- a/drivers/net/ice/ice_fdir_filter.c +++ b/drivers/net/ice/ice_fdir_filter.c @@ -1318,6 +1318,9 @@ ice_fdir_create_filter(struct ice_adapter *ad, goto free_counter; } + if (filter->mark_flag == 1) + ice_fdir_rx_parsing_enable(ad, 1); + rte_memcpy(entry, filter, sizeof(*entry)); ret = ice_fdir_entry_insert(pf, entry, &key); if (ret) { @@ -1390,6 +1393,10 @@ ice_fdir_destroy_filter(struct ice_adapter *ad, } ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false); + + if (filter->mark_flag == 1) + ice_fdir_rx_parsing_enable(ad, 0); + flow->rule = NULL; rte_free(filter); @@ -1562,7 +1569,7 @@ ice_fdir_parse_action(struct ice_adapter *ad, break; case RTE_FLOW_ACTION_TYPE_MARK: mark_num++; - + filter->mark_flag = 1; mark_spec = actions->conf; filter->input.fltr_id = mark_spec->id; filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE; diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index e21ba152d..9fa57b3b2 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -70,6 +70,7 @@ struct ice_rx_queue { uint8_t port_id; /* device port ID */ uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ + uint8_t fdir_enabled; /* 0 if FDIR disabled, 1 when enabled */ uint16_t queue_id; /* RX queue index */ uint16_t reg_idx; /* RX queue register index */ uint8_t drop_en; /* if not 0, set register bit */ @@ -245,4 +246,33 @@ uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc); int ice_tx_done_cleanup(void *txq, uint32_t free_cnt); +#define FDIR_PARSING_ENABLE_PER_QUEUE(ad, on) do { \ + int i; \ + for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \ + struct ice_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \ + if (!rxq) \ + continue; \ + rxq->fdir_enabled = on; \ + } \ + PMD_DRV_LOG(DEBUG, "FDIR processing on RX set to %d", on); \ +} while (0) + +/* Enable/disable flow director parsing from Rx descriptor in data path. */ +static inline +void ice_fdir_rx_parsing_enable(struct ice_adapter *ad, bool on) +{ + if (on) { + /* Enable flow director parsing from Rx descriptor */ + FDIR_PARSING_ENABLE_PER_QUEUE(ad, on); + ad->fdir_ref_cnt++; + } else { + if (ad->fdir_ref_cnt >= 1) { + ad->fdir_ref_cnt--; + + if (ad->fdir_ref_cnt == 0) + FDIR_PARSING_ENABLE_PER_QUEUE(ad, on); + } + } +} + #endif /* _ICE_RXTX_H_ */ From patchwork Wed Sep 16 03:10:00 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junyu Jiang X-Patchwork-Id: 77825 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5016FA04C7; Wed, 16 Sep 2020 05:27:47 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 316DE1C128; Wed, 16 Sep 2020 05:27:31 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by dpdk.org (Postfix) with ESMTP id 560491C11A for ; Wed, 16 Sep 2020 05:27:28 +0200 (CEST) IronPort-SDR: ZEPVOz/i4ZS7FQRxE/LbLCjRFRSSdq7RWfrVA5LscVSJNofwzfIcgJs6Yz9WK1/I/6htVUrdSR jRkkfhk6fnAg== X-IronPort-AV: E=McAfee;i="6000,8403,9745"; a="147143652" X-IronPort-AV: E=Sophos;i="5.76,431,1592895600"; d="scan'208";a="147143652" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Sep 2020 20:27:27 -0700 IronPort-SDR: BA4bfbC5CM0wAdPjT1oiypoM7CHznK+cuUiuvVfnWtVOAb+QwK15jFY24oTNxkr3UUIaYkVt2v S9YtsxYZD6kA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,431,1592895600"; d="scan'208";a="319692338" Received: from unknown (HELO intel.sh.intel.com) ([10.239.255.60]) by orsmga002.jf.intel.com with ESMTP; 15 Sep 2020 20:27:25 -0700 From: Junyu Jiang To: dev@dpdk.org Cc: Leyi Rong , Qi Zhang , Qiming Yang , Guinan Sun Date: Wed, 16 Sep 2020 03:10:00 +0000 Message-Id: <20200916031002.42122-4-junyux.jiang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200916031002.42122-1-junyux.jiang@intel.com> References: <20200826075501.50052-1-guinanx.sun@intel.com> <20200916031002.42122-1-junyux.jiang@intel.com> Subject: [dpdk-dev] [PATCH v3 3/5] net/ice: support flow mark in AVX path X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Guinan Sun Support flow director mark ID parsing from flexible Rx descriptor in AVX path. Signed-off-by: Guinan Sun --- drivers/net/ice/ice_rxtx_vec_avx2.c | 64 ++++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c index 07d129e3f..70e4b76db 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx2.c +++ b/drivers/net/ice/ice_rxtx_vec_avx2.c @@ -132,6 +132,25 @@ ice_rxq_rearm(struct ice_rx_queue *rxq) ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id); } +static inline __m256i +ice_flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7) +{ +#define FDID_MIS_MAGIC 0xFFFFFFFF + RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2)); + RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13)); + const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR | + PKT_RX_FDIR_ID); + /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */ + const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC); + __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7, + fdir_mis_mask); + /* this XOR op results to bit-reverse the fdir_mask */ + fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask); + const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit); + + return fdir_flags; +} + static inline uint16_t _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, uint8_t *split_packet) @@ -459,9 +478,51 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, rss_vlan_flag_bits); /* merge flags */ - const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, + __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, rss_vlan_flags); + if (rxq->fdir_enabled) { + const __m256i fdir_id4_7 = + _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5); + + const __m256i fdir_id0_3 = + _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1); + + const __m256i fdir_id0_7 = + _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3); + + const __m256i fdir_flags = + ice_flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7); + + /* merge with fdir_flags */ + mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags); + + /* write to mbuf: have to use scalar store here */ + rx_pkts[i + 0]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 3); + + rx_pkts[i + 1]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 7); + + rx_pkts[i + 2]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 2); + + rx_pkts[i + 3]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 6); + + rx_pkts[i + 4]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 1); + + rx_pkts[i + 5]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 5); + + rx_pkts[i + 6]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 0); + + rx_pkts[i + 7]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 4); + } /* if() on fdir_enabled */ + #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC /** * needs to load 2nd 16B of each desc for RSS hash parsing, @@ -551,6 +612,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1); } /* if() on RSS hash parsing */ #endif + /** * At this point, we have the 8 sets of flags in the low 16-bits * of each 32-bit value in vlan0. From patchwork Wed Sep 16 03:10:01 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junyu Jiang X-Patchwork-Id: 77826 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 34B28A04C7; Wed, 16 Sep 2020 05:27:55 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5F5421C12C; Wed, 16 Sep 2020 05:27:33 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by dpdk.org (Postfix) with ESMTP id BB94C1C126 for ; Wed, 16 Sep 2020 05:27:30 +0200 (CEST) IronPort-SDR: yqCTKhK1apFia8fzasdORtXDM0VBq++F/h4TorFsxsbWtSiK3sG0nilnz9ecfL+Ry2NqjcxQV8 x9OMXbBMTrNg== X-IronPort-AV: E=McAfee;i="6000,8403,9745"; a="147143655" X-IronPort-AV: E=Sophos;i="5.76,431,1592895600"; d="scan'208";a="147143655" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Sep 2020 20:27:30 -0700 IronPort-SDR: Hz9ms0Hy5sLc8FF14lyyjaF0oUemJVZawdYtLUvgTBo2HW4841XKxoAykP2WapOM3E2KI6E+Il MuPLi1zMB+2w== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,431,1592895600"; d="scan'208";a="319692353" Received: from unknown (HELO intel.sh.intel.com) ([10.239.255.60]) by orsmga002.jf.intel.com with ESMTP; 15 Sep 2020 20:27:27 -0700 From: Junyu Jiang To: dev@dpdk.org Cc: Leyi Rong , Qi Zhang , Qiming Yang , Guinan Sun Date: Wed, 16 Sep 2020 03:10:01 +0000 Message-Id: <20200916031002.42122-5-junyux.jiang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200916031002.42122-1-junyux.jiang@intel.com> References: <20200826075501.50052-1-guinanx.sun@intel.com> <20200916031002.42122-1-junyux.jiang@intel.com> Subject: [dpdk-dev] [PATCH v3 4/5] net/ice: support flow mark in SSE path X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Guinan Sun Support flow director mark ID parsing from flexible Rx descriptor in SSE path. Signed-off-by: Guinan Sun --- drivers/net/ice/ice_rxtx_vec_sse.c | 49 ++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c index fffb27138..965cd8b26 100644 --- a/drivers/net/ice/ice_rxtx_vec_sse.c +++ b/drivers/net/ice/ice_rxtx_vec_sse.c @@ -10,6 +10,25 @@ #pragma GCC diagnostic ignored "-Wcast-qual" #endif +static inline __m128i +ice_flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3) +{ +#define FDID_MIS_MAGIC 0xFFFFFFFF + RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2)); + RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13)); + const __m128i pkt_fdir_bit = _mm_set1_epi32(PKT_RX_FDIR | + PKT_RX_FDIR_ID); + /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */ + const __m128i fdir_mis_mask = _mm_set1_epi32(FDID_MIS_MAGIC); + __m128i fdir_mask = _mm_cmpeq_epi32(fdir_id0_3, + fdir_mis_mask); + /* this XOR op results to bit-reverse the fdir_mask */ + fdir_mask = _mm_xor_si128(fdir_mask, fdir_mis_mask); + const __m128i fdir_flags = _mm_and_si128(fdir_mask, pkt_fdir_bit); + + return fdir_flags; +} + static inline void ice_rxq_rearm(struct ice_rx_queue *rxq) { @@ -159,6 +178,36 @@ ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i descs[4], /* merge the flags */ flags = _mm_or_si128(flags, rss_vlan); + if (rxq->fdir_enabled) { + const __m128i fdir_id0_1 = + _mm_unpackhi_epi32(descs[0], descs[1]); + + const __m128i fdir_id2_3 = + _mm_unpackhi_epi32(descs[2], descs[3]); + + const __m128i fdir_id0_3 = + _mm_unpackhi_epi64(fdir_id0_1, fdir_id2_3); + + const __m128i fdir_flags = + ice_flex_rxd_to_fdir_flags_vec(fdir_id0_3); + + /* merge with fdir_flags */ + flags = _mm_or_si128(flags, fdir_flags); + + /* write fdir_id to mbuf */ + rx_pkts[0]->hash.fdir.hi = + _mm_extract_epi32(fdir_id0_3, 0); + + rx_pkts[1]->hash.fdir.hi = + _mm_extract_epi32(fdir_id0_3, 1); + + rx_pkts[2]->hash.fdir.hi = + _mm_extract_epi32(fdir_id0_3, 2); + + rx_pkts[3]->hash.fdir.hi = + _mm_extract_epi32(fdir_id0_3, 3); + } /* if() on fdir_enabled */ + /** * At this point, we have the 4 sets of flags in the low 16-bits * of each 32-bit value in flags. From patchwork Wed Sep 16 03:10:02 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junyu Jiang X-Patchwork-Id: 77827 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id BA05AA04C7; Wed, 16 Sep 2020 05:28:03 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C0BD11C120; Wed, 16 Sep 2020 05:27:35 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by dpdk.org (Postfix) with ESMTP id 0D59C1C10B for ; Wed, 16 Sep 2020 05:27:32 +0200 (CEST) IronPort-SDR: ZzcXYHMhMyqO+PXoL58LqJQGbFxHnGX5wIKqcWgEYCxQcp1rFVWg7bcpicoUicCWKTv7FEQIF5 jMQ3ge9ib+OA== X-IronPort-AV: E=McAfee;i="6000,8403,9745"; a="147143659" X-IronPort-AV: E=Sophos;i="5.76,431,1592895600"; d="scan'208";a="147143659" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Sep 2020 20:27:32 -0700 IronPort-SDR: 6XWohT309e4Dbbk44RO1ryM825IFUe3QseE27nSQVnfKKMmvYiuIzv7babMK1AQITpW2hCe3Sb b7bE++bJ4BDg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,431,1592895600"; d="scan'208";a="319692366" Received: from unknown (HELO intel.sh.intel.com) ([10.239.255.60]) by orsmga002.jf.intel.com with ESMTP; 15 Sep 2020 20:27:30 -0700 From: Junyu Jiang To: dev@dpdk.org Cc: Leyi Rong , Qi Zhang , Qiming Yang , Guinan Sun Date: Wed, 16 Sep 2020 03:10:02 +0000 Message-Id: <20200916031002.42122-6-junyux.jiang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200916031002.42122-1-junyux.jiang@intel.com> References: <20200826075501.50052-1-guinanx.sun@intel.com> <20200916031002.42122-1-junyux.jiang@intel.com> Subject: [dpdk-dev] [PATCH v3 5/5] net/ice: remove devargs flow-mark-support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Guinan Sun Currently, all data paths already support flow mark, so remove devargs "flow-mark-support". FDIR matched ID will display in verbose when packets match the created rule. Signed-off-by: Guinan Sun --- doc/guides/nics/ice.rst | 12 ------------ drivers/net/ice/ice_ethdev.c | 10 +--------- drivers/net/ice/ice_ethdev.h | 1 - drivers/net/ice/ice_rxtx_vec_common.h | 6 ------ 4 files changed, 1 insertion(+), 28 deletions(-) diff --git a/doc/guides/nics/ice.rst b/doc/guides/nics/ice.rst index 314198857..25a821177 100644 --- a/doc/guides/nics/ice.rst +++ b/doc/guides/nics/ice.rst @@ -72,18 +72,6 @@ Runtime Config Options -w 80:00.0,pipeline-mode-support=1 -- ``Flow Mark Support`` (default ``0``) - - This is a hint to the driver to select the data path that supports flow mark extraction - by default. - NOTE: This is an experimental devarg, it will be removed when any of below conditions - is ready. - 1) all data paths support flow mark (currently vPMD does not) - 2) a new offload like RTE_DEV_RX_OFFLOAD_FLOW_MARK be introduced as a standard way to hint. - Example:: - - -w 80:00.0,flow-mark-support=1 - - ``Protocol extraction for per queue`` Configure the RX queues to do protocol extraction into mbuf for protocol diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 097b72023..248daf25d 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -23,13 +23,11 @@ /* devargs */ #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support" #define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support" -#define ICE_FLOW_MARK_SUPPORT_ARG "flow-mark-support" #define ICE_PROTO_XTR_ARG "proto_xtr" static const char * const ice_valid_args[] = { ICE_SAFE_MODE_SUPPORT_ARG, ICE_PIPELINE_MODE_SUPPORT_ARG, - ICE_FLOW_MARK_SUPPORT_ARG, ICE_PROTO_XTR_ARG, NULL }; @@ -2006,11 +2004,6 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) if (ret) goto bail; - ret = rte_kvargs_process(kvlist, ICE_FLOW_MARK_SUPPORT_ARG, - &parse_bool, &ad->devargs.flow_mark_support); - if (ret) - goto bail; - bail: rte_kvargs_free(kvlist); return ret; @@ -5178,8 +5171,7 @@ RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PARAM_STRING(net_ice, ICE_PROTO_XTR_ARG "=[queue:]" ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>" - ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>" - ICE_FLOW_MARK_SUPPORT_ARG "=<0|1>"); + ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"); RTE_LOG_REGISTER(ice_logtype_init, pmd.net.ice.init, NOTICE); RTE_LOG_REGISTER(ice_logtype_driver, pmd.net.ice.driver, NOTICE); diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 366eee3b4..37b956e2f 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -451,7 +451,6 @@ struct ice_devargs { int safe_mode_support; uint8_t proto_xtr_dflt; int pipe_mode_support; - int flow_mark_support; uint8_t proto_xtr[ICE_MAX_QUEUE_NUM]; }; diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h index 46e3be98a..e2019c8d6 100644 --- a/drivers/net/ice/ice_rxtx_vec_common.h +++ b/drivers/net/ice/ice_rxtx_vec_common.h @@ -270,12 +270,6 @@ ice_rx_vec_dev_check_default(struct rte_eth_dev *dev) { int i; struct ice_rx_queue *rxq; - struct ice_adapter *ad = - ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - - /* vPMD does not support flow mark. */ - if (ad->devargs.flow_mark_support) - return -1; for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i];