From patchwork Mon Oct 12 07:54:04 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 80328 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 11C73A04B6; Mon, 12 Oct 2020 09:56:28 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id ADDE31D5DB; Mon, 12 Oct 2020 09:56:25 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 251D31D5D4 for ; Mon, 12 Oct 2020 09:56:22 +0200 (CEST) IronPort-SDR: jW4ZNkm0Xn/SWWe6mN5pZfLJOIBGlzP6qZoTO7d8JCKLvzT+idezrxUOAmjYWc+JL+B5l8eyOH hKgbspk87qCg== X-IronPort-AV: E=McAfee;i="6000,8403,9771"; a="227349987" X-IronPort-AV: E=Sophos;i="5.77,366,1596524400"; d="scan'208";a="227349987" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Oct 2020 00:56:20 -0700 IronPort-SDR: EfD5wVKpxAdzaMxfaii3nWovvQ5uw+T8X0s509xf95FS3zVzo0SZcng2UZgVqr7XJffBsJ693U utTPMWneyRbg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,366,1596524400"; d="scan'208";a="520604909" Received: from dpdk-junfengguo-v3.sh.intel.com ([10.67.119.146]) by fmsmga005.fm.intel.com with ESMTP; 12 Oct 2020 00:56:18 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Cc: dev@dpdk.org, junfeng.guo@intel.com Date: Mon, 12 Oct 2020 15:54:04 +0800 Message-Id: <20201012075404.1727593-1-junfeng.guo@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH] net/ice: refactor RSS config for potential bugs X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Current implementation for PF RSS config wrap function has some potential bugs about GTPU, e.g., same input set for GTPU inner and non-TUN have different hash values, which should be same. Current implementation for AVF has a better design and does not have above bugs. Thus we just reimplement the wrap function to align with AVF RSS. Signed-off-by: Junfeng Guo --- drivers/net/ice/ice_ethdev.c | 615 +++++++++++++++++++++++------------ drivers/net/ice/ice_ethdev.h | 56 ++-- 2 files changed, 440 insertions(+), 231 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index d8ce09d28..0056da78a 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -2087,14 +2087,7 @@ ice_reset_fxp_resource(struct ice_hw *hw) static void ice_rss_ctx_init(struct ice_pf *pf) { - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6); - - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); - - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); + memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx)); } static uint64_t @@ -2438,234 +2431,452 @@ ice_dev_uninit(struct rte_eth_dev *dev) return 0; } +static bool +is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg) +{ + return ((cfg->hash_func >= ICE_RSS_HASH_TOEPLITZ && + cfg->hash_func <= ICE_RSS_HASH_JHASH) && + (cfg->hash_flds != 0 && cfg->addl_hdrs != 0)) ? + true : false; +} + +static void +hash_cfg_reset(struct ice_rss_hash_cfg *cfg) +{ + cfg->hash_flds = 0; + cfg->addl_hdrs = 0; + cfg->hash_func = 0; +} + static int -ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm) +ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) { + enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = ICE_PF_TO_HW(pf); struct ice_vsi *vsi = pf->main_vsi; - if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) { - if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && - (hdr & ICE_FLOW_SEG_HDR_UDP)) { - pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr = hdr; - pf->gtpu_hash_ctx.ipv4_udp.hash_fld = fld; - pf->gtpu_hash_ctx.ipv4_udp.symm = symm; - } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && - (hdr & ICE_FLOW_SEG_HDR_UDP)) { - pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr = hdr; - pf->gtpu_hash_ctx.ipv6_udp.hash_fld = fld; - pf->gtpu_hash_ctx.ipv6_udp.symm = symm; - } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && - (hdr & ICE_FLOW_SEG_HDR_TCP)) { - pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr = hdr; - pf->gtpu_hash_ctx.ipv4_tcp.hash_fld = fld; - pf->gtpu_hash_ctx.ipv4_tcp.symm = symm; - } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && - (hdr & ICE_FLOW_SEG_HDR_TCP)) { - pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr = hdr; - pf->gtpu_hash_ctx.ipv6_tcp.hash_fld = fld; - pf->gtpu_hash_ctx.ipv6_tcp.symm = symm; - } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) { - pf->gtpu_hash_ctx.ipv4.pkt_hdr = hdr; - pf->gtpu_hash_ctx.ipv4.hash_fld = fld; - pf->gtpu_hash_ctx.ipv4.symm = symm; - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); - } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) { - pf->gtpu_hash_ctx.ipv6.pkt_hdr = hdr; - pf->gtpu_hash_ctx.ipv6.hash_fld = fld; - pf->gtpu_hash_ctx.ipv6.symm = symm; - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); - } - } + if (!is_hash_cfg_valid(cfg)) + return -ENOENT; - if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | - ICE_FLOW_SEG_HDR_GTPU_UP)) { - if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && - (hdr & ICE_FLOW_SEG_HDR_UDP)) { - if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) { - ice_add_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv4.hash_fld, - pf->gtpu_hash_ctx.ipv4.pkt_hdr, - pf->gtpu_hash_ctx.ipv4.symm); - ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4); - } - } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && - (hdr & ICE_FLOW_SEG_HDR_UDP)) { - if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) { - ice_add_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv6.hash_fld, - pf->gtpu_hash_ctx.ipv6.pkt_hdr, - pf->gtpu_hash_ctx.ipv6.symm); - ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6); - } - } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && - (hdr & ICE_FLOW_SEG_HDR_TCP)) { - if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) { - ice_add_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv4.hash_fld, - pf->gtpu_hash_ctx.ipv4.pkt_hdr, - pf->gtpu_hash_ctx.ipv4.symm); - ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4); - } - } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && - (hdr & ICE_FLOW_SEG_HDR_TCP)) { - if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) { - ice_add_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv6.hash_fld, - pf->gtpu_hash_ctx.ipv6.pkt_hdr, - pf->gtpu_hash_ctx.ipv6.symm); - ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6); - } - } + status = ice_rem_rss_cfg(hw, vsi->idx, cfg->hash_flds, + cfg->addl_hdrs); + if (status && status != ICE_ERR_DOES_NOT_EXIST) { + PMD_DRV_LOG(ERR, + "ice_rem_rss_cfg failed for VSI:%d, error:%d\n", + vsi->idx, status); + return -EBUSY; } return 0; } static int -ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr) +ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) { + enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = ICE_PF_TO_HW(pf); struct ice_vsi *vsi = pf->main_vsi; + bool symm; - if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | - ICE_FLOW_SEG_HDR_GTPU_UP)) { - if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && - (hdr & ICE_FLOW_SEG_HDR_UDP)) { - if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) { - ice_rem_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv4_udp.hash_fld, - pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); - } + if (!is_hash_cfg_valid(cfg)) + return -ENOENT; - if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) { - ice_rem_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv4.hash_fld, - pf->gtpu_hash_ctx.ipv4.pkt_hdr); - ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4); - } - } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && - (hdr & ICE_FLOW_SEG_HDR_UDP)) { - if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) { - ice_rem_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv6_udp.hash_fld, - pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); - } + symm = (cfg->hash_func == ICE_RSS_HASH_TOEPLITZ_SYMMETRIC) ? + true : false; - if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) { - ice_rem_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv6.hash_fld, - pf->gtpu_hash_ctx.ipv6.pkt_hdr); - ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6); - } - } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && - (hdr & ICE_FLOW_SEG_HDR_TCP)) { - if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) { - ice_rem_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv4_tcp.hash_fld, - pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); - } + status = ice_add_rss_cfg(hw, vsi->idx, cfg->hash_flds, + cfg->addl_hdrs, symm); + if (status) { + PMD_DRV_LOG(ERR, + "ice_add_rss_cfg failed for VSI:%d, error:%d\n", + vsi->idx, status); + return -EBUSY; + } - if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) { - ice_rem_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv4.hash_fld, - pf->gtpu_hash_ctx.ipv4.pkt_hdr); - ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4); - } - } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && - (hdr & ICE_FLOW_SEG_HDR_TCP)) { - if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) { - ice_rem_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv6_tcp.hash_fld, - pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); - } + return 0; +} - if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) { - ice_rem_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv6.hash_fld, - pf->gtpu_hash_ctx.ipv6.pkt_hdr); - ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6); - } - } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) { - if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) { - ice_rem_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv4.hash_fld, - pf->gtpu_hash_ctx.ipv4.pkt_hdr); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4); - } +static int +ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) +{ + int ret; - if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) { - ice_rem_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv4_udp.hash_fld, - pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); - } + ret = ice_hash_moveout(pf, cfg); + if (ret && (ret != -ENOENT)) + return ret; - if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) { - ice_rem_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv4_tcp.hash_fld, - pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); - } - } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) { - if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) { - ice_rem_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv6.hash_fld, - pf->gtpu_hash_ctx.ipv6.pkt_hdr); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6); - } + hash_cfg_reset(cfg); - if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) { - ice_rem_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv6_udp.hash_fld, - pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); - } + return 0; +} - if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) { - ice_rem_rss_cfg(hw, vsi->idx, - pf->gtpu_hash_ctx.ipv6_tcp.hash_fld, - pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr); - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); - } - } +static int +ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx, + u8 ctx_idx) +{ + int ret; + + switch (ctx_idx) { + case ICE_HASH_GTPU_CTX_EH_IP: + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + break; + case ICE_HASH_GTPU_CTX_EH_IP_UDP: + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + break; + case ICE_HASH_GTPU_CTX_EH_IP_TCP: + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + break; + case ICE_HASH_GTPU_CTX_UP_IP: + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + break; + case ICE_HASH_GTPU_CTX_UP_IP_UDP: + case ICE_HASH_GTPU_CTX_UP_IP_TCP: + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + break; + case ICE_HASH_GTPU_CTX_DW_IP: + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_remove(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + break; + case ICE_HASH_GTPU_CTX_DW_IP_UDP: + case ICE_HASH_GTPU_CTX_DW_IP_TCP: + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveout(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + break; + default: + break; } return 0; } +static u8 calc_gtpu_ctx_idx(uint32_t hdr) +{ + u8 eh_idx, ip_idx; + + if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) + eh_idx = 0; + else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP) + eh_idx = 1; + else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN) + eh_idx = 2; + else + return ICE_HASH_GTPU_CTX_MAX; + + ip_idx = 0; + if (hdr & ICE_FLOW_SEG_HDR_UDP) + ip_idx = 1; + else if (hdr & ICE_FLOW_SEG_HDR_TCP) + ip_idx = 2; + + if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)) + return eh_idx * 3 + ip_idx; + else + return ICE_HASH_GTPU_CTX_MAX; +} + static int -ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr) +ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr) { - if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) { - if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && - (hdr & ICE_FLOW_SEG_HDR_UDP)) { - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); - } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && - (hdr & ICE_FLOW_SEG_HDR_UDP)) { - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); - } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && - (hdr & ICE_FLOW_SEG_HDR_TCP)) { - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); - } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && - (hdr & ICE_FLOW_SEG_HDR_TCP)) { - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); - } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) { - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4); - } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) { - ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6); - } + u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr); + + if (hdr & ICE_FLOW_SEG_HDR_IPV4) + return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4, + gtpu_ctx_idx); + else if (hdr & ICE_FLOW_SEG_HDR_IPV6) + return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6, + gtpu_ctx_idx); + + return 0; +} + +static int +ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx, + u32 hdr, u64 fld, bool symm, u8 ctx_idx) +{ + int ret; + + if (ctx_idx < ICE_HASH_GTPU_CTX_MAX) { + ctx->ctx[ctx_idx].addl_hdrs = hdr; + ctx->ctx[ctx_idx].hash_flds = fld; + ctx->ctx[ctx_idx].hash_func = symm; + } + + switch (ctx_idx) { + case ICE_HASH_GTPU_CTX_EH_IP: + break; + case ICE_HASH_GTPU_CTX_EH_IP_UDP: + ret = ice_hash_moveback(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveback(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveback(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveback(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + break; + case ICE_HASH_GTPU_CTX_EH_IP_TCP: + ret = ice_hash_moveback(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveback(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveback(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveback(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + break; + case ICE_HASH_GTPU_CTX_UP_IP: + case ICE_HASH_GTPU_CTX_UP_IP_UDP: + case ICE_HASH_GTPU_CTX_UP_IP_TCP: + case ICE_HASH_GTPU_CTX_DW_IP: + case ICE_HASH_GTPU_CTX_DW_IP_UDP: + case ICE_HASH_GTPU_CTX_DW_IP_TCP: + ret = ice_hash_moveback(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveback(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]); + if (ret && (ret != -ENOENT)) + return ret; + + ret = ice_hash_moveback(pf, + &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]); + if (ret && (ret != -ENOENT)) + return ret; + + break; + default: + break; } return 0; } +static int +ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm) +{ + u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr); + + if (hdr & ICE_FLOW_SEG_HDR_IPV4) + return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4, hdr, + fld, symm, gtpu_ctx_idx); + else if (hdr & ICE_FLOW_SEG_HDR_IPV6) + return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6, hdr, + fld, symm, gtpu_ctx_idx); + + return 0; +} + +static void +ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr) +{ + u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr); + + if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX) + return; + + if (hdr & ICE_FLOW_SEG_HDR_IPV4) + hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]); + else if (hdr & ICE_FLOW_SEG_HDR_IPV6) + hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]); +} + int ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, uint64_t fld, uint32_t hdr) @@ -2677,9 +2888,7 @@ ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, if (ret && ret != ICE_ERR_DOES_NOT_EXIST) PMD_DRV_LOG(ERR, "remove rss cfg failed\n"); - ret = ice_rem_rss_cfg_post(pf, hdr); - if (ret) - PMD_DRV_LOG(ERR, "remove rss cfg post failed\n"); + ice_rem_rss_cfg_post(pf, hdr); return 0; } diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 37b956e2f..978909603 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -365,37 +365,37 @@ struct ice_fdir_info { struct ice_fdir_counter_pool_container counter; }; -#define ICE_HASH_CFG_VALID(p) \ - ((p)->hash_fld != 0 && (p)->pkt_hdr != 0) - -#define ICE_HASH_CFG_RESET(p) do { \ - (p)->hash_fld = 0; \ - (p)->pkt_hdr = 0; \ -} while (0) - -#define ICE_HASH_CFG_IS_ROTATING(p) \ - ((p)->rotate == true) - -#define ICE_HASH_CFG_ROTATE_START(p) \ - ((p)->rotate = true) - -#define ICE_HASH_CFG_ROTATE_STOP(p) \ - ((p)->rotate = false) +#define ICE_HASH_GTPU_CTX_EH_IP 0 +#define ICE_HASH_GTPU_CTX_EH_IP_UDP 1 +#define ICE_HASH_GTPU_CTX_EH_IP_TCP 2 +#define ICE_HASH_GTPU_CTX_UP_IP 3 +#define ICE_HASH_GTPU_CTX_UP_IP_UDP 4 +#define ICE_HASH_GTPU_CTX_UP_IP_TCP 5 +#define ICE_HASH_GTPU_CTX_DW_IP 6 +#define ICE_HASH_GTPU_CTX_DW_IP_UDP 7 +#define ICE_HASH_GTPU_CTX_DW_IP_TCP 8 +#define ICE_HASH_GTPU_CTX_MAX 9 + +enum ice_rss_hash_func { + ICE_RSS_HASH_TOEPLITZ = 0, + ICE_RSS_HASH_TOEPLITZ_SYMMETRIC = 1, + ICE_RSS_HASH_XOR = 2, + ICE_RSS_HASH_JHASH = 3, +}; -struct ice_hash_cfg { - uint32_t pkt_hdr; - uint64_t hash_fld; - bool rotate; /* rotate l3 rule after l4 rule. */ - bool symm; +struct ice_rss_hash_cfg { + u32 addl_hdrs; + u64 hash_flds; + enum ice_rss_hash_func hash_func; }; struct ice_hash_gtpu_ctx { - struct ice_hash_cfg ipv4; - struct ice_hash_cfg ipv6; - struct ice_hash_cfg ipv4_udp; - struct ice_hash_cfg ipv6_udp; - struct ice_hash_cfg ipv4_tcp; - struct ice_hash_cfg ipv6_tcp; + struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX]; +}; + +struct ice_hash_ctx { + struct ice_hash_gtpu_ctx gtpu4; + struct ice_hash_gtpu_ctx gtpu6; }; struct ice_pf { @@ -421,7 +421,7 @@ struct ice_pf { uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ uint16_t fdir_qp_offset; struct ice_fdir_info fdir; /* flow director info */ - struct ice_hash_gtpu_ctx gtpu_hash_ctx; + struct ice_hash_ctx hash_ctx; uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; struct ice_hw_port_stats stats_offset;