From patchwork Tue Oct 8 01:50:11 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qi Zhang X-Patchwork-Id: 60661 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B061F1C025; Tue, 8 Oct 2019 03:47:39 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id DDFDB1BFFB for ; Tue, 8 Oct 2019 03:47:34 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 07 Oct 2019 18:47:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.67,269,1566889200"; d="scan'208";a="197556228" Received: from dpdk51.sh.intel.com ([10.67.110.245]) by orsmga006.jf.intel.com with ESMTP; 07 Oct 2019 18:47:32 -0700 From: Qi Zhang To: wenzhuo.lu@intel.com, qiming.yang@intel.com Cc: dev@dpdk.org, xiaolong.ye@intel.com, Qi Zhang , Dan Nowlin , Paul M Stillwell Jr Date: Tue, 8 Oct 2019 09:50:11 +0800 Message-Id: <20191008015018.17086-6-qi.z.zhang@intel.com> X-Mailer: git-send-email 2.13.6 In-Reply-To: <20191008015018.17086-1-qi.z.zhang@intel.com> References: <20190902035551.16852-1-qi.z.zhang@intel.com> <20191008015018.17086-1-qi.z.zhang@intel.com> Subject: [dpdk-dev] [PATCH v3 05/12] net/ice/base: improvements to Flow Director masking X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Currently, 3-tuple FD matching is implemented using masking. However, this is using up twenty-four of the thirty-two FD masks available. This patch uses the swap register more efficiently to implement the 3-tuple matches, which saves all FD masks for other uses. Added IPV6 versions of DSCP, TTL and Protocol fields for FD use. Signed-off-by: Dan Nowlin Signed-off-by: Paul M Stillwell Jr Signed-off-by: Qi Zhang Acked-by: Qiming Yang --- drivers/net/ice/base/ice_flex_pipe.c | 71 +++++++++------------ drivers/net/ice/base/ice_flex_type.h | 4 +- drivers/net/ice/base/ice_flow.c | 118 ++++++++++++++++++++--------------- drivers/net/ice/base/ice_flow.h | 10 ++- 4 files changed, 108 insertions(+), 95 deletions(-) diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c index 75bb87079..8f8cab86e 100644 --- a/drivers/net/ice/base/ice_flex_pipe.c +++ b/drivers/net/ice/base/ice_flex_pipe.c @@ -1248,25 +1248,6 @@ void ice_free_seg(struct ice_hw *hw) } /** - * ice_init_fd_mask_regs - initialize Flow Director mask registers - * @hw: pointer to the HW struct - * - * This function sets up the Flow Director mask registers to allow for complete - * masking off of any of the 24 Field Vector words. After this call, mask 0 will - * mask off all of FV index 0, mask 1 will mask off all of FV index 1, etc. - */ -static void ice_init_fd_mask_regs(struct ice_hw *hw) -{ - u16 i; - - for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) { - wr32(hw, GLQF_FDMASK(i), i); - ice_debug(hw, ICE_DBG_INIT, "init fd mask(%d): %x = %x\n", i, - GLQF_FDMASK(i), i); - } -} - -/** * ice_init_pkg_regs - initialize additional package registers * @hw: pointer to the hardware structure */ @@ -1279,8 +1260,6 @@ static void ice_init_pkg_regs(struct ice_hw *hw) /* setup Switch block input mask, which is 48-bits in two parts */ wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); - /* setup default flow director masks */ - ice_init_fd_mask_regs(hw); } /** @@ -2643,7 +2622,8 @@ ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx, expect_no_mask = true; /* Scan the enabled masks on this profile, for the specified idx */ - for (i = 0; i < ICE_PROFILE_MASK_COUNT; i++) + for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first + + hw->blk[blk].masks.count; i++) if (hw->blk[blk].es.mask_ena[prof] & BIT(i)) if (hw->blk[blk].masks.masks[i].in_use && hw->blk[blk].masks.masks[i].idx == idx) { @@ -2981,14 +2961,15 @@ ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk, */ static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk) { -#define MAX_NUM_PORTS 8 - u16 num_ports = MAX_NUM_PORTS; + u16 per_pf; u16 i; ice_init_lock(&hw->blk[blk].masks.lock); - hw->blk[blk].masks.count = ICE_PROFILE_MASK_COUNT / num_ports; - hw->blk[blk].masks.first = hw->pf_id * hw->blk[blk].masks.count; + per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs; + + hw->blk[blk].masks.count = per_pf; + hw->blk[blk].masks.first = hw->pf_id * per_pf; ice_memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks), ICE_NONDMA_MEM); @@ -4241,8 +4222,6 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) ice_zero_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT); - ice_init_fd_mask_regs(hw); - /* This code assumes that the Flow Director field vectors are assigned * from the end of the FV indexes working towards the zero index, that * only complete fields will be included and will be consecutive, and @@ -4298,7 +4277,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) return ICE_ERR_OUT_OF_RANGE; /* keep track of non-relevant fields */ - mask_sel |= 1 << (first_free - k); + mask_sel |= BIT(first_free - k); } pair_start[index] = first_free; @@ -4342,29 +4321,39 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) si -= indexes_used; } - /* for each set of 4 swap indexes, write the appropriate register */ + /* for each set of 4 swap and 4 inset indexes, write the appropriate + * register + */ for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) { - u32 raw_entry = 0; + u32 raw_swap = 0; + u32 raw_in = 0; for (k = 0; k < 4; k++) { u8 idx; idx = (j * 4) + k; - if (used[idx]) - raw_entry |= used[idx] << (k * BITS_PER_BYTE); + if (used[idx] && !(mask_sel & BIT(idx))) { + raw_swap |= used[idx] << (k * BITS_PER_BYTE); +#define ICE_INSET_DFLT 0x9f + raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE); + } } - /* write the appropriate register set, based on HW block */ - wr32(hw, GLQF_FDSWAP(prof_id, j), raw_entry); + /* write the appropriate swap register set */ + wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap); + + ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n", + prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap); - ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %x\n", - prof_id, j, GLQF_FDSWAP(prof_id, j), raw_entry); + /* write the appropriate inset register set */ + wr32(hw, GLQF_FDINSET(prof_id, j), raw_in); + + ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n", + prof_id, j, GLQF_FDINSET(prof_id, j), raw_in); } - /* update the masks for this profile to be sure we ignore fields that - * are not relevant to our match criteria - */ - ice_update_fd_mask(hw, prof_id, mask_sel); + /* initially clear the mask select for this profile */ + ice_update_fd_mask(hw, prof_id, 0); return ICE_SUCCESS; } diff --git a/drivers/net/ice/base/ice_flex_type.h b/drivers/net/ice/base/ice_flex_type.h index 48c1e5184..92d205ac7 100644 --- a/drivers/net/ice/base/ice_flex_type.h +++ b/drivers/net/ice/base/ice_flex_type.h @@ -668,8 +668,8 @@ struct ice_masks { struct ice_lock lock; /* lock to protect this structure */ u16 first; /* first mask owned by the PF */ u16 count; /* number of masks owned by the PF */ -#define ICE_PROFILE_MASK_COUNT 32 - struct ice_mask masks[ICE_PROFILE_MASK_COUNT]; +#define ICE_PROF_MASK_COUNT 32 + struct ice_mask masks[ICE_PROF_MASK_COUNT]; }; /* Tables per block */ diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c index 8ed3f8eb7..370ad9ba3 100644 --- a/drivers/net/ice/base/ice_flow.c +++ b/drivers/net/ice/base/ice_flow.c @@ -22,15 +22,6 @@ #define ICE_FLOW_FLD_SZ_GTP_TEID 4 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2 -/* Protocol header fields are extracted at the word boundaries as word-sized - * values. Specify the displacement value of some non-word-aligned fields needed - * to compute the offset of words containing the fields in the corresponding - * protocol headers. Displacement values are expressed in number of bits. - */ -#define ICE_FLOW_FLD_IPV6_TTL_DSCP_DISP (-4) -#define ICE_FLOW_FLD_IPV6_TTL_PROT_DISP ((-2) * BITS_PER_BYTE) -#define ICE_FLOW_FLD_IPV6_TTL_TTL_DISP ((-1) * BITS_PER_BYTE) - /* Describe properties of a protocol header field */ struct ice_flow_field_info { enum ice_flow_seg_hdr hdr; @@ -67,18 +58,29 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN), /* ICE_FLOW_FIELD_IDX_ETH_TYPE */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE), - /* IPv4 */ - /* ICE_FLOW_FIELD_IDX_IP_DSCP */ - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 1, 1), - /* ICE_FLOW_FIELD_IDX_IP_TTL */ - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 8, 1), - /* ICE_FLOW_FIELD_IDX_IP_PROT */ - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 9, ICE_FLOW_FLD_SZ_IP_PROT), + /* IPv4 / IPv6 */ + /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP, + 0x00fc), + /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP, + 0x0ff0), + /* ICE_FLOW_FIELD_IDX_IPV4_TTL */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, + ICE_FLOW_FLD_SZ_IP_TTL, 0xff00), + /* ICE_FLOW_FIELD_IDX_IPV4_PROT */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, + ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff), + /* ICE_FLOW_FIELD_IDX_IPV6_TTL */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, + ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff), + /* ICE_FLOW_FIELD_IDX_IPV6_PROT */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, + ICE_FLOW_FLD_SZ_IP_PROT, 0xff00), /* ICE_FLOW_FIELD_IDX_IPV4_SA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR), /* ICE_FLOW_FIELD_IDX_IPV4_DA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR), - /* IPv6 */ /* ICE_FLOW_FIELD_IDX_IPV6_SA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR), /* ICE_FLOW_FIELD_IDX_IPV6_DA */ @@ -608,6 +610,7 @@ ice_flow_xtract_pkt_flags(struct ice_hw *hw, * @params: information about the flow to be processed * @seg: packet segment index of the field to be extracted * @fld: ID of field to be extracted + * @match: bitfield of all fields * * This function determines the protocol ID, offset, and size of the given * field. It then allocates one or more extraction sequence entries for the @@ -615,13 +618,14 @@ ice_flow_xtract_pkt_flags(struct ice_hw *hw, */ static enum ice_status ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, - u8 seg, enum ice_flow_field fld) + u8 seg, enum ice_flow_field fld, u64 match) { enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX; enum ice_prot_id prot_id = ICE_PROT_ID_INVAL; u8 fv_words = hw->blk[params->blk].es.fvw; struct ice_flow_fld_info *flds; u16 cnt, ese_bits, i; + u16 sib_mask = 0; s16 adj = 0; u16 mask; u16 off; @@ -638,35 +642,49 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, case ICE_FLOW_FIELD_IDX_ETH_TYPE: prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL; break; - case ICE_FLOW_FIELD_IDX_IP_DSCP: - if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) - adj = ICE_FLOW_FLD_IPV6_TTL_DSCP_DISP; - /* Fall through */ - case ICE_FLOW_FIELD_IDX_IP_TTL: - case ICE_FLOW_FIELD_IDX_IP_PROT: - /* Some fields are located at different offsets in IPv4 and - * IPv6 + case ICE_FLOW_FIELD_IDX_IPV4_DSCP: + prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; + break; + case ICE_FLOW_FIELD_IDX_IPV6_DSCP: + prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; + break; + case ICE_FLOW_FIELD_IDX_IPV4_TTL: + case ICE_FLOW_FIELD_IDX_IPV4_PROT: + prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; + + /* TTL and PROT share the same extraction seq. entry. + * Each is considered a sibling to the other in terms of sharing + * the same extraction sequence entry. */ - if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) { - prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : - ICE_PROT_IPV4_IL; - /* TTL and PROT share the same extraction seq. entry. - * Each is considered a sibling to the other in term - * sharing the same extraction sequence entry. - */ - if (fld == ICE_FLOW_FIELD_IDX_IP_TTL) - sib = ICE_FLOW_FIELD_IDX_IP_PROT; - else if (fld == ICE_FLOW_FIELD_IDX_IP_PROT) - sib = ICE_FLOW_FIELD_IDX_IP_TTL; - } else if (params->prof->segs[seg].hdrs & - ICE_FLOW_SEG_HDR_IPV6) { - prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : - ICE_PROT_IPV6_IL; - if (fld == ICE_FLOW_FIELD_IDX_IP_TTL) - adj = ICE_FLOW_FLD_IPV6_TTL_TTL_DISP; - else if (fld == ICE_FLOW_FIELD_IDX_IP_PROT) - adj = ICE_FLOW_FLD_IPV6_TTL_PROT_DISP; - } + if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL) + sib = ICE_FLOW_FIELD_IDX_IPV4_PROT; + else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT) + sib = ICE_FLOW_FIELD_IDX_IPV4_TTL; + + /* If the sibling field is also included, that field's + * mask needs to be included. + */ + if (match & BIT(sib)) + sib_mask = ice_flds_info[sib].mask; + break; + case ICE_FLOW_FIELD_IDX_IPV6_TTL: + case ICE_FLOW_FIELD_IDX_IPV6_PROT: + prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; + + /* TTL and PROT share the same extraction seq. entry. + * Each is considered a sibling to the other in terms of sharing + * the same extraction sequence entry. + */ + if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL) + sib = ICE_FLOW_FIELD_IDX_IPV6_PROT; + else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT) + sib = ICE_FLOW_FIELD_IDX_IPV6_TTL; + + /* If the sibling field is also included, that field's + * mask needs to be included. + */ + if (match & BIT(sib)) + sib_mask = ice_flds_info[sib].mask; break; case ICE_FLOW_FIELD_IDX_IPV4_SA: case ICE_FLOW_FIELD_IDX_IPV4_DA: @@ -733,6 +751,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, ICE_FLOW_FV_EXTRACT_SZ; flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits); flds[fld].xtrct.idx = params->es_cnt; + flds[fld].xtrct.mask = ice_flds_info[fld].mask; /* Adjust the next field-entry index after accommodating the number of * entries this field consumes @@ -742,7 +761,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, /* Fill in the extraction sequence entries needed for this field */ off = flds[fld].xtrct.off; - mask = ice_flds_info[fld].mask; + mask = flds[fld].xtrct.mask; for (i = 0; i < cnt; i++) { /* Only consume an extraction sequence entry if there is no * sibling field associated with this field or the sibling entry @@ -767,7 +786,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, params->es[idx].prot_id = prot_id; params->es[idx].off = off; - params->mask[idx] = mask; + params->mask[idx] = mask | sib_mask; params->es_cnt++; } @@ -885,7 +904,8 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw, if (match & bit) { status = ice_flow_xtract_fld - (hw, params, i, (enum ice_flow_field)j); + (hw, params, i, (enum ice_flow_field)j, + match); if (status) return status; match &= ~bit; diff --git a/drivers/net/ice/base/ice_flow.h b/drivers/net/ice/base/ice_flow.h index 326ff6f81..c224e6ebf 100644 --- a/drivers/net/ice/base/ice_flow.h +++ b/drivers/net/ice/base/ice_flow.h @@ -114,9 +114,12 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_C_VLAN, ICE_FLOW_FIELD_IDX_ETH_TYPE, /* L3 */ - ICE_FLOW_FIELD_IDX_IP_DSCP, - ICE_FLOW_FIELD_IDX_IP_TTL, - ICE_FLOW_FIELD_IDX_IP_PROT, + ICE_FLOW_FIELD_IDX_IPV4_DSCP, + ICE_FLOW_FIELD_IDX_IPV6_DSCP, + ICE_FLOW_FIELD_IDX_IPV4_TTL, + ICE_FLOW_FIELD_IDX_IPV4_PROT, + ICE_FLOW_FIELD_IDX_IPV6_TTL, + ICE_FLOW_FIELD_IDX_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV4_SA, ICE_FLOW_FIELD_IDX_IPV4_DA, ICE_FLOW_FIELD_IDX_IPV6_SA, @@ -232,6 +235,7 @@ struct ice_flow_seg_xtrct { u16 off; /* Starting offset of the field in header in bytes */ u8 idx; /* Index of FV entry used */ u8 disp; /* Displacement of field in bits fr. FV entry's start */ + u16 mask; /* Mask for field */ }; enum ice_flow_fld_match_type {