get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/60572/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 60572,
    "url": "http://patches.dpdk.org/api/patches/60572/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20191006031408.8633-6-qi.z.zhang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20191006031408.8633-6-qi.z.zhang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20191006031408.8633-6-qi.z.zhang@intel.com",
    "date": "2019-10-06T03:14:01",
    "name": "[v2,05/12] net/ice/base: improvements to Flow Director masking",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "61aea033a80ec5b361ff8f17e8db81a19b7b1265",
    "submitter": {
        "id": 504,
        "url": "http://patches.dpdk.org/api/people/504/?format=api",
        "name": "Qi Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "delegate": {
        "id": 31221,
        "url": "http://patches.dpdk.org/api/users/31221/?format=api",
        "username": "yexl",
        "first_name": "xiaolong",
        "last_name": "ye",
        "email": "xiaolong.ye@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20191006031408.8633-6-qi.z.zhang@intel.com/mbox/",
    "series": [
        {
            "id": 6704,
            "url": "http://patches.dpdk.org/api/series/6704/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=6704",
            "date": "2019-10-06T03:13:56",
            "name": "[v2,01/12] net/ice/base: fix for adding PPPoE switch rule",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/6704/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/60572/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/60572/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 98E421D15E;\n\tSun,  6 Oct 2019 05:11:29 +0200 (CEST)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n\tby dpdk.org (Postfix) with ESMTP id 153F51C43E\n\tfor <dev@dpdk.org>; Sun,  6 Oct 2019 05:11:24 +0200 (CEST)",
            "from orsmga007.jf.intel.com ([10.7.209.58])\n\tby fmsmga101.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t05 Oct 2019 20:11:24 -0700",
            "from dpdk51.sh.intel.com ([10.67.110.245])\n\tby orsmga007.jf.intel.com with ESMTP; 05 Oct 2019 20:11:22 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.67,261,1566889200\"; d=\"scan'208\";a=\"183095223\"",
        "From": "Qi Zhang <qi.z.zhang@intel.com>",
        "To": "wenzhuo.lu@intel.com,\n\tqiming.yang@intel.com",
        "Cc": "dev@dpdk.org, xiaolong.ye@intel.com, Qi Zhang <qi.z.zhang@intel.com>,\n\tDan Nowlin <dan.nowlin@intel.com>,\n\tPaul M Stillwell Jr <paul.m.stillwell.jr@intel.com>",
        "Date": "Sun,  6 Oct 2019 11:14:01 +0800",
        "Message-Id": "<20191006031408.8633-6-qi.z.zhang@intel.com>",
        "X-Mailer": "git-send-email 2.13.6",
        "In-Reply-To": "<20191006031408.8633-1-qi.z.zhang@intel.com>",
        "References": "<20190902035551.16852-1-qi.z.zhang@intel.com>\n\t<20191006031408.8633-1-qi.z.zhang@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v2 05/12] net/ice/base: improvements to Flow\n\tDirector masking",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Currently, 3-tuple FD matching is implemented using masking. However,\nthis is using up twenty-four of the thirty-two FD masks available. This\npatch uses the swap register more efficiently to implement the 3-tuple\nmatches, which saves all FD masks for other uses.\n\nAdded IPV6 versions of DSCP, TTL and Protocol fields for FD use.\n\nSigned-off-by: Dan Nowlin <dan.nowlin@intel.com>\nSigned-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>\nSigned-off-by: Qi Zhang <qi.z.zhang@intel.com>\nAcked-by: Qiming Yang <qiming.yang@intel.com>\n---\n drivers/net/ice/base/ice_flex_pipe.c |  71 +++++++++------------\n drivers/net/ice/base/ice_flex_type.h |   4 +-\n drivers/net/ice/base/ice_flow.c      | 118 ++++++++++++++++++++---------------\n drivers/net/ice/base/ice_flow.h      |  10 ++-\n 4 files changed, 108 insertions(+), 95 deletions(-)",
    "diff": "diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c\nindex 75bb87079..8f8cab86e 100644\n--- a/drivers/net/ice/base/ice_flex_pipe.c\n+++ b/drivers/net/ice/base/ice_flex_pipe.c\n@@ -1248,25 +1248,6 @@ void ice_free_seg(struct ice_hw *hw)\n }\n \n /**\n- * ice_init_fd_mask_regs - initialize Flow Director mask registers\n- * @hw: pointer to the HW struct\n- *\n- * This function sets up the Flow Director mask registers to allow for complete\n- * masking off of any of the 24 Field Vector words. After this call, mask 0 will\n- * mask off all of FV index 0, mask 1 will mask off all of FV index 1, etc.\n- */\n-static void ice_init_fd_mask_regs(struct ice_hw *hw)\n-{\n-\tu16 i;\n-\n-\tfor (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {\n-\t\twr32(hw, GLQF_FDMASK(i), i);\n-\t\tice_debug(hw, ICE_DBG_INIT, \"init fd mask(%d): %x = %x\\n\", i,\n-\t\t\t  GLQF_FDMASK(i), i);\n-\t}\n-}\n-\n-/**\n  * ice_init_pkg_regs - initialize additional package registers\n  * @hw: pointer to the hardware structure\n  */\n@@ -1279,8 +1260,6 @@ static void ice_init_pkg_regs(struct ice_hw *hw)\n \t/* setup Switch block input mask, which is 48-bits in two parts */\n \twr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);\n \twr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);\n-\t/* setup default flow director masks */\n-\tice_init_fd_mask_regs(hw);\n }\n \n /**\n@@ -2643,7 +2622,8 @@ ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,\n \t\texpect_no_mask = true;\n \n \t/* Scan the enabled masks on this profile, for the specified idx */\n-\tfor (i = 0; i < ICE_PROFILE_MASK_COUNT; i++)\n+\tfor (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +\n+\t     hw->blk[blk].masks.count; i++)\n \t\tif (hw->blk[blk].es.mask_ena[prof] & BIT(i))\n \t\t\tif (hw->blk[blk].masks.masks[i].in_use &&\n \t\t\t    hw->blk[blk].masks.masks[i].idx == idx) {\n@@ -2981,14 +2961,15 @@ ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,\n  */\n static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)\n {\n-#define MAX_NUM_PORTS    8\n-\tu16 num_ports = MAX_NUM_PORTS;\n+\tu16 per_pf;\n \tu16 i;\n \n \tice_init_lock(&hw->blk[blk].masks.lock);\n \n-\thw->blk[blk].masks.count = ICE_PROFILE_MASK_COUNT / num_ports;\n-\thw->blk[blk].masks.first = hw->pf_id * hw->blk[blk].masks.count;\n+\tper_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;\n+\n+\thw->blk[blk].masks.count = per_pf;\n+\thw->blk[blk].masks.first = hw->pf_id * per_pf;\n \n \tice_memset(hw->blk[blk].masks.masks, 0,\n \t\t   sizeof(hw->blk[blk].masks.masks), ICE_NONDMA_MEM);\n@@ -4241,8 +4222,6 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)\n \n \tice_zero_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);\n \n-\tice_init_fd_mask_regs(hw);\n-\n \t/* This code assumes that the Flow Director field vectors are assigned\n \t * from the end of the FV indexes working towards the zero index, that\n \t * only complete fields will be included and will be consecutive, and\n@@ -4298,7 +4277,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)\n \t\t\t\t\treturn ICE_ERR_OUT_OF_RANGE;\n \n \t\t\t\t/* keep track of non-relevant fields */\n-\t\t\t\tmask_sel |= 1 << (first_free - k);\n+\t\t\t\tmask_sel |= BIT(first_free - k);\n \t\t\t}\n \n \t\t\tpair_start[index] = first_free;\n@@ -4342,29 +4321,39 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)\n \t\tsi -= indexes_used;\n \t}\n \n-\t/* for each set of 4 swap indexes, write the appropriate register */\n+\t/* for each set of 4 swap and 4 inset indexes, write the appropriate\n+\t * register\n+\t */\n \tfor (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {\n-\t\tu32 raw_entry = 0;\n+\t\tu32 raw_swap = 0;\n+\t\tu32 raw_in = 0;\n \n \t\tfor (k = 0; k < 4; k++) {\n \t\t\tu8 idx;\n \n \t\t\tidx = (j * 4) + k;\n-\t\t\tif (used[idx])\n-\t\t\t\traw_entry |= used[idx] << (k * BITS_PER_BYTE);\n+\t\t\tif (used[idx] && !(mask_sel & BIT(idx))) {\n+\t\t\t\traw_swap |= used[idx] << (k * BITS_PER_BYTE);\n+#define ICE_INSET_DFLT 0x9f\n+\t\t\t\traw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);\n+\t\t\t}\n \t\t}\n \n-\t\t/* write the appropriate register set, based on HW block */\n-\t\twr32(hw, GLQF_FDSWAP(prof_id, j), raw_entry);\n+\t\t/* write the appropriate swap register set */\n+\t\twr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);\n+\n+\t\tice_debug(hw, ICE_DBG_INIT, \"swap wr(%d, %d): %x = %08x\\n\",\n+\t\t\t  prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);\n \n-\t\tice_debug(hw, ICE_DBG_INIT, \"swap wr(%d, %d): %x = %x\\n\",\n-\t\t\t  prof_id, j, GLQF_FDSWAP(prof_id, j), raw_entry);\n+\t\t/* write the appropriate inset register set */\n+\t\twr32(hw, GLQF_FDINSET(prof_id, j), raw_in);\n+\n+\t\tice_debug(hw, ICE_DBG_INIT, \"inset wr(%d, %d): %x = %08x\\n\",\n+\t\t\t  prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);\n \t}\n \n-\t/* update the masks for this profile to be sure we ignore fields that\n-\t * are not relevant to our match criteria\n-\t */\n-\tice_update_fd_mask(hw, prof_id, mask_sel);\n+\t/* initially clear the mask select for this profile */\n+\tice_update_fd_mask(hw, prof_id, 0);\n \n \treturn ICE_SUCCESS;\n }\ndiff --git a/drivers/net/ice/base/ice_flex_type.h b/drivers/net/ice/base/ice_flex_type.h\nindex 48c1e5184..92d205ac7 100644\n--- a/drivers/net/ice/base/ice_flex_type.h\n+++ b/drivers/net/ice/base/ice_flex_type.h\n@@ -668,8 +668,8 @@ struct ice_masks {\n \tstruct ice_lock lock;  /* lock to protect this structure */\n \tu16 first;\t/* first mask owned by the PF */\n \tu16 count;\t/* number of masks owned by the PF */\n-#define ICE_PROFILE_MASK_COUNT 32\n-\tstruct ice_mask masks[ICE_PROFILE_MASK_COUNT];\n+#define ICE_PROF_MASK_COUNT 32\n+\tstruct ice_mask masks[ICE_PROF_MASK_COUNT];\n };\n \n /* Tables per block */\ndiff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c\nindex 8ed3f8eb7..370ad9ba3 100644\n--- a/drivers/net/ice/base/ice_flow.c\n+++ b/drivers/net/ice/base/ice_flow.c\n@@ -22,15 +22,6 @@\n #define ICE_FLOW_FLD_SZ_GTP_TEID\t4\n #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2\n \n-/* Protocol header fields are extracted at the word boundaries as word-sized\n- * values. Specify the displacement value of some non-word-aligned fields needed\n- * to compute the offset of words containing the fields in the corresponding\n- * protocol headers. Displacement values are expressed in number of bits.\n- */\n-#define ICE_FLOW_FLD_IPV6_TTL_DSCP_DISP\t(-4)\n-#define ICE_FLOW_FLD_IPV6_TTL_PROT_DISP\t((-2) * BITS_PER_BYTE)\n-#define ICE_FLOW_FLD_IPV6_TTL_TTL_DISP\t((-1) * BITS_PER_BYTE)\n-\n /* Describe properties of a protocol header field */\n struct ice_flow_field_info {\n \tenum ice_flow_seg_hdr hdr;\n@@ -67,18 +58,29 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {\n \tICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),\n \t/* ICE_FLOW_FIELD_IDX_ETH_TYPE */\n \tICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),\n-\t/* IPv4 */\n-\t/* ICE_FLOW_FIELD_IDX_IP_DSCP */\n-\tICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 1, 1),\n-\t/* ICE_FLOW_FIELD_IDX_IP_TTL */\n-\tICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 8, 1),\n-\t/* ICE_FLOW_FIELD_IDX_IP_PROT */\n-\tICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 9, ICE_FLOW_FLD_SZ_IP_PROT),\n+\t/* IPv4 / IPv6 */\n+\t/* ICE_FLOW_FIELD_IDX_IPV4_DSCP */\n+\tICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,\n+\t\t\t      0x00fc),\n+\t/* ICE_FLOW_FIELD_IDX_IPV6_DSCP */\n+\tICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,\n+\t\t\t      0x0ff0),\n+\t/* ICE_FLOW_FIELD_IDX_IPV4_TTL */\n+\tICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,\n+\t\t\t      ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),\n+\t/* ICE_FLOW_FIELD_IDX_IPV4_PROT */\n+\tICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,\n+\t\t\t      ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),\n+\t/* ICE_FLOW_FIELD_IDX_IPV6_TTL */\n+\tICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,\n+\t\t\t      ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),\n+\t/* ICE_FLOW_FIELD_IDX_IPV6_PROT */\n+\tICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,\n+\t\t\t      ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),\n \t/* ICE_FLOW_FIELD_IDX_IPV4_SA */\n \tICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),\n \t/* ICE_FLOW_FIELD_IDX_IPV4_DA */\n \tICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),\n-\t/* IPv6 */\n \t/* ICE_FLOW_FIELD_IDX_IPV6_SA */\n \tICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),\n \t/* ICE_FLOW_FIELD_IDX_IPV6_DA */\n@@ -608,6 +610,7 @@ ice_flow_xtract_pkt_flags(struct ice_hw *hw,\n  * @params: information about the flow to be processed\n  * @seg: packet segment index of the field to be extracted\n  * @fld: ID of field to be extracted\n+ * @match: bitfield of all fields\n  *\n  * This function determines the protocol ID, offset, and size of the given\n  * field. It then allocates one or more extraction sequence entries for the\n@@ -615,13 +618,14 @@ ice_flow_xtract_pkt_flags(struct ice_hw *hw,\n  */\n static enum ice_status\n ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,\n-\t\t    u8 seg, enum ice_flow_field fld)\n+\t\t    u8 seg, enum ice_flow_field fld, u64 match)\n {\n \tenum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;\n \tenum ice_prot_id prot_id = ICE_PROT_ID_INVAL;\n \tu8 fv_words = hw->blk[params->blk].es.fvw;\n \tstruct ice_flow_fld_info *flds;\n \tu16 cnt, ese_bits, i;\n+\tu16 sib_mask = 0;\n \ts16 adj = 0;\n \tu16 mask;\n \tu16 off;\n@@ -638,35 +642,49 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,\n \tcase ICE_FLOW_FIELD_IDX_ETH_TYPE:\n \t\tprot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;\n \t\tbreak;\n-\tcase ICE_FLOW_FIELD_IDX_IP_DSCP:\n-\t\tif (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)\n-\t\t\tadj = ICE_FLOW_FLD_IPV6_TTL_DSCP_DISP;\n-\t\t/* Fall through */\n-\tcase ICE_FLOW_FIELD_IDX_IP_TTL:\n-\tcase ICE_FLOW_FIELD_IDX_IP_PROT:\n-\t\t/* Some fields are located at different offsets in IPv4 and\n-\t\t * IPv6\n+\tcase ICE_FLOW_FIELD_IDX_IPV4_DSCP:\n+\t\tprot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;\n+\t\tbreak;\n+\tcase ICE_FLOW_FIELD_IDX_IPV6_DSCP:\n+\t\tprot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;\n+\t\tbreak;\n+\tcase ICE_FLOW_FIELD_IDX_IPV4_TTL:\n+\tcase ICE_FLOW_FIELD_IDX_IPV4_PROT:\n+\t\tprot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;\n+\n+\t\t/* TTL and PROT share the same extraction seq. entry.\n+\t\t * Each is considered a sibling to the other in terms of sharing\n+\t\t * the same extraction sequence entry.\n \t\t */\n-\t\tif (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) {\n-\t\t\tprot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S :\n-\t\t\t\tICE_PROT_IPV4_IL;\n-\t\t\t/* TTL and PROT share the same extraction seq. entry.\n-\t\t\t * Each is considered a sibling to the other in term\n-\t\t\t * sharing the same extraction sequence entry.\n-\t\t\t */\n-\t\t\tif (fld == ICE_FLOW_FIELD_IDX_IP_TTL)\n-\t\t\t\tsib = ICE_FLOW_FIELD_IDX_IP_PROT;\n-\t\t\telse if (fld == ICE_FLOW_FIELD_IDX_IP_PROT)\n-\t\t\t\tsib = ICE_FLOW_FIELD_IDX_IP_TTL;\n-\t\t} else if (params->prof->segs[seg].hdrs &\n-\t\t\t   ICE_FLOW_SEG_HDR_IPV6) {\n-\t\t\tprot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S :\n-\t\t\t\tICE_PROT_IPV6_IL;\n-\t\t\tif (fld == ICE_FLOW_FIELD_IDX_IP_TTL)\n-\t\t\t\tadj = ICE_FLOW_FLD_IPV6_TTL_TTL_DISP;\n-\t\t\telse if (fld == ICE_FLOW_FIELD_IDX_IP_PROT)\n-\t\t\t\tadj = ICE_FLOW_FLD_IPV6_TTL_PROT_DISP;\n-\t\t}\n+\t\tif (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)\n+\t\t\tsib = ICE_FLOW_FIELD_IDX_IPV4_PROT;\n+\t\telse if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)\n+\t\t\tsib = ICE_FLOW_FIELD_IDX_IPV4_TTL;\n+\n+\t\t/* If the sibling field is also included, that field's\n+\t\t * mask needs to be included.\n+\t\t */\n+\t\tif (match & BIT(sib))\n+\t\t\tsib_mask = ice_flds_info[sib].mask;\n+\t\tbreak;\n+\tcase ICE_FLOW_FIELD_IDX_IPV6_TTL:\n+\tcase ICE_FLOW_FIELD_IDX_IPV6_PROT:\n+\t\tprot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;\n+\n+\t\t/* TTL and PROT share the same extraction seq. entry.\n+\t\t * Each is considered a sibling to the other in terms of sharing\n+\t\t * the same extraction sequence entry.\n+\t\t */\n+\t\tif (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)\n+\t\t\tsib = ICE_FLOW_FIELD_IDX_IPV6_PROT;\n+\t\telse if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)\n+\t\t\tsib = ICE_FLOW_FIELD_IDX_IPV6_TTL;\n+\n+\t\t/* If the sibling field is also included, that field's\n+\t\t * mask needs to be included.\n+\t\t */\n+\t\tif (match & BIT(sib))\n+\t\t\tsib_mask = ice_flds_info[sib].mask;\n \t\tbreak;\n \tcase ICE_FLOW_FIELD_IDX_IPV4_SA:\n \tcase ICE_FLOW_FIELD_IDX_IPV4_DA:\n@@ -733,6 +751,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,\n \t\tICE_FLOW_FV_EXTRACT_SZ;\n \tflds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);\n \tflds[fld].xtrct.idx = params->es_cnt;\n+\tflds[fld].xtrct.mask = ice_flds_info[fld].mask;\n \n \t/* Adjust the next field-entry index after accommodating the number of\n \t * entries this field consumes\n@@ -742,7 +761,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,\n \n \t/* Fill in the extraction sequence entries needed for this field */\n \toff = flds[fld].xtrct.off;\n-\tmask = ice_flds_info[fld].mask;\n+\tmask = flds[fld].xtrct.mask;\n \tfor (i = 0; i < cnt; i++) {\n \t\t/* Only consume an extraction sequence entry if there is no\n \t\t * sibling field associated with this field or the sibling entry\n@@ -767,7 +786,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,\n \n \t\t\tparams->es[idx].prot_id = prot_id;\n \t\t\tparams->es[idx].off = off;\n-\t\t\tparams->mask[idx] = mask;\n+\t\t\tparams->mask[idx] = mask | sib_mask;\n \t\t\tparams->es_cnt++;\n \t\t}\n \n@@ -885,7 +904,8 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,\n \n \t\t\tif (match & bit) {\n \t\t\t\tstatus = ice_flow_xtract_fld\n-\t\t\t\t\t(hw, params, i, (enum ice_flow_field)j);\n+\t\t\t\t\t(hw, params, i, (enum ice_flow_field)j,\n+\t\t\t\t\t match);\n \t\t\t\tif (status)\n \t\t\t\t\treturn status;\n \t\t\t\tmatch &= ~bit;\ndiff --git a/drivers/net/ice/base/ice_flow.h b/drivers/net/ice/base/ice_flow.h\nindex 326ff6f81..c224e6ebf 100644\n--- a/drivers/net/ice/base/ice_flow.h\n+++ b/drivers/net/ice/base/ice_flow.h\n@@ -114,9 +114,12 @@ enum ice_flow_field {\n \tICE_FLOW_FIELD_IDX_C_VLAN,\n \tICE_FLOW_FIELD_IDX_ETH_TYPE,\n \t/* L3 */\n-\tICE_FLOW_FIELD_IDX_IP_DSCP,\n-\tICE_FLOW_FIELD_IDX_IP_TTL,\n-\tICE_FLOW_FIELD_IDX_IP_PROT,\n+\tICE_FLOW_FIELD_IDX_IPV4_DSCP,\n+\tICE_FLOW_FIELD_IDX_IPV6_DSCP,\n+\tICE_FLOW_FIELD_IDX_IPV4_TTL,\n+\tICE_FLOW_FIELD_IDX_IPV4_PROT,\n+\tICE_FLOW_FIELD_IDX_IPV6_TTL,\n+\tICE_FLOW_FIELD_IDX_IPV6_PROT,\n \tICE_FLOW_FIELD_IDX_IPV4_SA,\n \tICE_FLOW_FIELD_IDX_IPV4_DA,\n \tICE_FLOW_FIELD_IDX_IPV6_SA,\n@@ -232,6 +235,7 @@ struct ice_flow_seg_xtrct {\n \tu16 off;\t/* Starting offset of the field in header in bytes */\n \tu8 idx;\t\t/* Index of FV entry used */\n \tu8 disp;\t/* Displacement of field in bits fr. FV entry's start */\n+\tu16 mask;\t/* Mask for field */\n };\n \n enum ice_flow_fld_match_type {\n",
    "prefixes": [
        "v2",
        "05/12"
    ]
}