From patchwork Fri Apr 17 16:19:09 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 68790 X-Patchwork-Delegate: ajit.khaparde@broadcom.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9FF82A0597; Fri, 17 Apr 2020 18:19:59 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 636241EA05; Fri, 17 Apr 2020 18:19:33 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id 6D8001E9D9 for ; Fri, 17 Apr 2020 18:19:26 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id B6CAE30C1D0; Fri, 17 Apr 2020 09:08:04 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com B6CAE30C1D0 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1587139684; bh=4p6JgPsZazS/IOTY7pl6eF+JM7AZG6gO0usHev9OEGU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Cx9JSWoWwH6enRW5wJjnVIP9TSjZID2a+PX82TX9Jg/FOCv4XOsyIsnq4whku0UGR Xpxk9HEZtyGWjwCDvEys5/TVBe7cbukzat4frapuk+lt2LBAhbuGDJeTfLVHx98oUM LgIGi7pdwf4vtpwn1sKy46n4WWMvyigS1RCVOUnw= Received: from localhost.localdomain (unknown [10.230.185.215]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id 0761514008D; Fri, 17 Apr 2020 09:19:24 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: Mike Baucom , Venkat Duvvuru , Kishore Padmanabha Date: Fri, 17 Apr 2020 09:19:09 -0700 Message-Id: <20200417161920.85858-2-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.21.1 (Apple Git-122.3) In-Reply-To: <20200417161920.85858-1-ajit.khaparde@broadcom.com> References: <1586962156-11179-1-git-send-email-venkatkumar.duvvuru@broadcom.com> <20200417161920.85858-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 01/12] net/bnxt: add SVIF changes for dpdk port id X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Mike Baucom Modification of the parser to get the SVIF from the driver for matches on port_id, pf, and phy_port. Signed-off-by: Mike Baucom Signed-off-by: Venkat Duvvuru Reviewed-by: Kishore Padmanabha Reviewed-by: Ajit Khaparde --- drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c | 31 ++++---- drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 92 +++++++++++++++++++----- drivers/net/bnxt/tf_ulp/ulp_rte_parser.h | 5 ++ 3 files changed, 90 insertions(+), 38 deletions(-) diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c index 49588953c..6203a495c 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c @@ -72,14 +72,14 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, struct ulp_rte_act_bitmap act_bitmap; struct ulp_rte_act_prop act_prop; enum ulp_direction_type dir = ULP_DIR_INGRESS; - uint32_t class_id, act_tmpl; - uint32_t app_priority; - int ret; struct bnxt_ulp_context *ulp_ctx = NULL; - uint32_t vnic; - uint8_t svif; + uint32_t class_id, act_tmpl; struct rte_flow *flow_id; + uint32_t app_priority; uint32_t fid; + uint8_t *buffer; + uint32_t vnic; + int ret; if (bnxt_ulp_flow_validate_args(attr, pattern, actions, @@ -100,19 +100,15 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, memset(&act_bitmap, 0, sizeof(act_bitmap)); memset(&act_prop, 0, sizeof(act_prop)); - svif = bnxt_get_svif(dev->data->port_id, false); - BNXT_TF_DBG(ERR, "SVIF for port[%d][port]=0x%08x\n", - dev->data->port_id, svif); + if (attr->egress) + dir = ULP_DIR_EGRESS; - hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].size = sizeof(svif); - hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec[0] = svif; - hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].mask[0] = -1; - ULP_BITMAP_SET(hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF); + /* copy the device port id and direction in svif for further process */ + buffer = hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; + rte_memcpy(buffer, &dev->data->port_id, sizeof(uint16_t)); + rte_memcpy(buffer + sizeof(uint16_t), &dir, sizeof(uint32_t)); - /* - * VNIC is being pushed as 32bit and the pop will take care of - * proper size - */ + /* Set the implicit vnic in the action property */ vnic = (uint32_t)bnxt_get_vnic_id(dev->data->port_id); vnic = htonl(vnic); rte_memcpy(&act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], @@ -132,9 +128,6 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, if (ret != BNXT_TF_RC_SUCCESS) goto parse_error; - if (attr->egress) - dir = ULP_DIR_EGRESS; - ret = ulp_matcher_pattern_match(dir, &hdr_bitmap, hdr_field, &act_bitmap, &class_id); diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c index 7a31b4360..4339032a7 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c +++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c @@ -85,6 +85,8 @@ bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], } item++; } + /* update the implied SVIF */ + (void)ulp_rte_parser_svif_process(hdr_bitmap, hdr_field); return BNXT_TF_RC_SUCCESS; } @@ -132,9 +134,12 @@ static int32_t ulp_rte_parser_svif_set(struct ulp_rte_hdr_bitmap *hdr_bitmap, struct ulp_rte_hdr_field *hdr_field, enum rte_flow_item_type proto, - uint32_t svif, - uint32_t mask) + uint32_t dir, + uint16_t svif, + uint16_t mask) { + uint16_t port_id = svif; + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF)) { BNXT_TF_DBG(ERR, "SVIF already set," @@ -142,21 +147,51 @@ ulp_rte_parser_svif_set(struct ulp_rte_hdr_bitmap *hdr_bitmap, return BNXT_TF_RC_ERROR; } - /* TBD: Check for any mapping errors for svif */ /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF. */ ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF); - if (proto != RTE_FLOW_ITEM_TYPE_PF) { - memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec, - &svif, sizeof(svif)); - memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].mask, - &mask, sizeof(mask)); - hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].size = sizeof(svif); + if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) { + /* perform the conversion from dpdk port to svif */ + if (dir == ULP_DIR_EGRESS) + svif = bnxt_get_svif(port_id, true); + else + svif = bnxt_get_svif(port_id, false); } + memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec, + &svif, sizeof(svif)); + memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].mask, + &mask, sizeof(mask)); + hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].size = sizeof(svif); return BNXT_TF_RC_SUCCESS; } +/* Function to handle the parsing of the RTE port id + */ +int32_t +ulp_rte_parser_svif_process(struct ulp_rte_hdr_bitmap *hdr_bitmap, + struct ulp_rte_hdr_field *hdr_field) +{ + uint16_t port_id = 0; + uint32_t dir = 0; + uint8_t *buffer; + uint16_t svif_mask = 0xFFFF; + + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF)) + return BNXT_TF_RC_SUCCESS; + + /* SVIF not set. So get the port id and direction */ + buffer = hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; + memcpy(&port_id, buffer, sizeof(port_id)); + memcpy(&dir, buffer + sizeof(port_id), sizeof(dir)); + memset(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec, 0, + RTE_PARSER_FLOW_HDR_FIELD_SIZE); + + return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field, + RTE_FLOW_ITEM_TYPE_PORT_ID, + dir, port_id, svif_mask); +} + /* Function to handle the parsing of RTE Flow item PF Header. */ int32_t ulp_rte_pf_hdr_handler(const struct rte_flow_item *item, @@ -165,8 +200,20 @@ ulp_rte_pf_hdr_handler(const struct rte_flow_item *item, uint32_t *field_idx __rte_unused, uint32_t *vlan_idx __rte_unused) { + uint16_t port_id = 0; + uint32_t dir = 0; + uint8_t *buffer; + uint16_t svif_mask = 0xFFFF; + + buffer = hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; + memcpy(&port_id, buffer, sizeof(port_id)); + memcpy(&dir, buffer + sizeof(port_id), sizeof(dir)); + memset(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec, 0, + RTE_PARSER_FLOW_HDR_FIELD_SIZE); + return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field, - item->type, 0, 0); + item->type, + dir, port_id, svif_mask); } /* Function to handle the parsing of RTE Flow item VF Header. */ @@ -178,7 +225,7 @@ ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, uint32_t *vlan_idx __rte_unused) { const struct rte_flow_item_vf *vf_spec, *vf_mask; - uint32_t svif = 0, mask = 0; + uint16_t svif = 0, mask = 0; vf_spec = item->spec; vf_mask = item->mask; @@ -188,12 +235,12 @@ ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, * header fields. */ if (vf_spec) - svif = vf_spec->id; + svif = (uint16_t)vf_spec->id; if (vf_mask) - mask = vf_mask->id; + mask = (uint16_t)vf_mask->id; return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field, - item->type, svif, mask); + item->type, 0, svif, mask); } /* Function to handle the parsing of RTE Flow item port id Header. */ @@ -205,7 +252,9 @@ ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item, uint32_t *vlan_idx __rte_unused) { const struct rte_flow_item_port_id *port_spec, *port_mask; - uint32_t svif = 0, mask = 0; + uint16_t svif = 0, mask = 0; + uint32_t dir; + uint8_t *buffer; port_spec = item->spec; port_mask = item->mask; @@ -215,12 +264,15 @@ ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item, * header fields. */ if (port_spec) - svif = port_spec->id; + svif = (uint16_t)port_spec->id; if (port_mask) - mask = port_mask->id; + mask = (uint16_t)port_mask->id; + + buffer = hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; + memcpy(&dir, buffer + sizeof(uint16_t), sizeof(uint16_t)); return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field, - item->type, svif, mask); + item->type, dir, svif, mask); } /* Function to handle the parsing of RTE Flow item phy port Header. */ @@ -244,7 +296,7 @@ ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item, mask = port_mask->index; return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field, - item->type, svif, mask); + item->type, 0, svif, mask); } /* Function to handle the parsing of RTE Flow item Ethernet Header. */ @@ -1124,6 +1176,8 @@ ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, /* copy the PF of the current device into VNIC Property */ svif_buf = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC]; ulp_util_field_int_read(svif_buf, &svif); + svif = (uint32_t)bnxt_get_vnic_id(svif); + svif = htonl(svif); vnic_buffer = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC]; ulp_util_field_int_write(vnic_buffer, svif); diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h index 0ab43d2a6..0d571bbba 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h +++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h @@ -20,6 +20,11 @@ #define BNXT_ULP_ENCAP_IPV6_SIZE 8 #define BNXT_ULP_ENCAP_UDP_SIZE 4 +/* Function to handle the parsing of the RTE port id. */ +int32_t +ulp_rte_parser_svif_process(struct ulp_rte_hdr_bitmap *hdr_bitmap, + struct ulp_rte_hdr_field *hdr_field); + /* * Function to handle the parsing of RTE Flows and placing * the RTE flow items into the ulp structures. From patchwork Fri Apr 17 16:19:10 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 68789 X-Patchwork-Delegate: ajit.khaparde@broadcom.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E7811A0597; Fri, 17 Apr 2020 18:19:46 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0131F1E9F5; Fri, 17 Apr 2020 18:19:32 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id 734DB1E9DE for ; Fri, 17 Apr 2020 18:19:26 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id 1365D30C1D1; Fri, 17 Apr 2020 09:08:05 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com 1365D30C1D1 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1587139685; bh=Skf3cD4vLeBZLMhX/KEI0O4AeYfzUodeq6TLOLkVxt0=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Iiq72UrW0hWJa65wJvhCt5W8QvhJzB1nibNH+FqTZgNoJEhvpjpszfpDy9/6aCAOH Z95ag+seT371LyWuKx6EQ5cyhtM3ZwkW3TPPfx2kvZL8Ug8qr7pjhs+c2bEVyLjpTg NGY6Qc60ASyCrfb1LnWa5wU56rpvewTXl0YqT/+0= Received: from localhost.localdomain (unknown [10.230.185.215]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id 45E4514008F; Fri, 17 Apr 2020 09:19:24 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: Mike Baucom , Venkat Duvvuru , Kishore Padmanabha Date: Fri, 17 Apr 2020 09:19:10 -0700 Message-Id: <20200417161920.85858-3-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.21.1 (Apple Git-122.3) In-Reply-To: <20200417161920.85858-1-ajit.khaparde@broadcom.com> References: <1586962156-11179-1-git-send-email-venkatkumar.duvvuru@broadcom.com> <20200417161920.85858-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 02/12] net/bnxt: allow usage of more resources in flow db X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Mike Baucom Allow the flow db resources to be more effectively utilized. Signed-off-by: Mike Baucom Signed-off-by: Venkat Duvvuru Reviewed-by: Kishore Padmanabha Reviewed-by: Ajit Khaparde --- drivers/net/bnxt/tf_ulp/ulp_flow_db.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c index aed50785c..e99e94ab7 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c +++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c @@ -331,6 +331,10 @@ int32_t ulp_flow_db_fid_alloc(struct bnxt_ulp_context *ulp_ctxt, BNXT_TF_DBG(ERR, "Flow database has reached max flows\n"); return -ENOMEM; } + if (flow_tbl->tail_index <= (flow_tbl->head_index + 1)) { + BNXT_TF_DBG(ERR, "Flow database has reached max resources\n"); + return -ENOMEM; + } *fid = flow_tbl->flow_tbl_stack[flow_tbl->head_index]; flow_tbl->head_index++; ulp_flow_db_active_flow_set(flow_tbl, *fid, 1); @@ -385,7 +389,7 @@ int32_t ulp_flow_db_resource_add(struct bnxt_ulp_context *ulp_ctxt, } /* check for max resource */ - if ((flow_tbl->num_flows + 1) >= flow_tbl->tail_index) { + if ((flow_tbl->head_index + 1) >= flow_tbl->tail_index) { BNXT_TF_DBG(ERR, "Flow db has reached max resources\n"); return -ENOMEM; } From patchwork Fri Apr 17 16:19:11 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 68788 X-Patchwork-Delegate: ajit.khaparde@broadcom.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1044CA0597; Fri, 17 Apr 2020 18:19:39 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5E37F1E9E6; Fri, 17 Apr 2020 18:19:30 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id 6AF5C1E9D1 for ; Fri, 17 Apr 2020 18:19:26 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id 6241930C1DA; Fri, 17 Apr 2020 09:08:05 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com 6241930C1DA DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1587139685; bh=rf5r6a12evlfNwtlFrtP3DwPyhPUUZW8XXSVK1zKWf4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Jz1P6OWWqxA0SG3LLAM+dI+2NBeUMM83Onxy8B3zQfleerxdf1RX67ZuBHgv5unTg t5L2vY3eYn+cZKXa0WWNxsd02PQVQBfaCMiqueGxoIlW0bHLXiPkLOst47inoOaLKo 6e3SDQDKKFrEVZxJC5qVG/tXrJC+iYc4nBc+iEVo= Received: from localhost.localdomain (unknown [10.230.185.215]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id 92F4B14008C; Fri, 17 Apr 2020 09:19:24 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: Kishore Padmanabha , Michael Baucom , Venkat Duvvuru Date: Fri, 17 Apr 2020 09:19:11 -0700 Message-Id: <20200417161920.85858-4-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.21.1 (Apple Git-122.3) In-Reply-To: <20200417161920.85858-1-ajit.khaparde@broadcom.com> References: <1586962156-11179-1-git-send-email-venkatkumar.duvvuru@broadcom.com> <20200417161920.85858-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 03/12] net/bnxt: add flow database resource iteration API X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Kishore Padmanabha This API can be used to iterate individual resource functions in the flow database. Reviewed-by: Michael Baucom Reviewed-by: Ajit Khaparde Signed-off-by: Kishore Padmanabha Signed-off-by: Venkat Duvvuru --- drivers/net/bnxt/tf_ulp/ulp_flow_db.c | 66 ++++++++++++++++++++++++++- drivers/net/bnxt/tf_ulp/ulp_flow_db.h | 17 +++++++ 2 files changed, 82 insertions(+), 1 deletion(-) diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c index e99e94ab7..9e7f9f5e3 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c +++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c @@ -560,7 +560,71 @@ int32_t ulp_flow_db_fid_free(struct bnxt_ulp_context *ulp_ctxt, return 0; } -/** Get the flow database entry iteratively +/* + * Get the flow database entry details + * + * ulp_ctxt [in] Ptr to ulp_context + * tbl_idx [in] Specify it is regular or default flow + * fid [in] The index to the flow entry + * nxt_idx [in/out] the index to the next entry + * params [out] The contents to be copied into params. + * + * returns 0 on success and negative on failure. + */ +int32_t ulp_flow_db_resource_get(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_flow_db_tables tbl_idx, + uint32_t fid, + uint32_t *nxt_idx, + struct ulp_flow_db_res_params *params) +{ + struct bnxt_ulp_flow_db *flow_db; + struct bnxt_ulp_flow_tbl *flow_tbl; + struct ulp_fdb_resource_info *nxt_resource, *fid_resource; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + + if (tbl_idx >= BNXT_ULP_FLOW_TABLE_MAX) { + BNXT_TF_DBG(ERR, "Invalid table index\n"); + return -EINVAL; + } + + flow_tbl = &flow_db->flow_tbl[tbl_idx]; + + /* check for limits of fid */ + if (fid >= flow_tbl->num_flows || !fid) { + BNXT_TF_DBG(ERR, "Invalid flow index\n"); + return -EINVAL; + } + + /* check if the flow is active or not */ + if (!ulp_flow_db_active_flow_is_set(flow_tbl, fid)) { + BNXT_TF_DBG(ERR, "flow does not exist\n"); + return -EINVAL; + } + + if (!*nxt_idx) { + fid_resource = &flow_tbl->flow_resources[fid]; + ulp_flow_db_res_info_to_params(fid_resource, params); + ULP_FLOW_DB_RES_NXT_SET(*nxt_idx, + fid_resource->nxt_resource_idx); + } else { + nxt_resource = &flow_tbl->flow_resources[*nxt_idx]; + ulp_flow_db_res_info_to_params(nxt_resource, params); + *nxt_idx = 0; + ULP_FLOW_DB_RES_NXT_SET(*nxt_idx, + nxt_resource->nxt_resource_idx); + } + + /* all good, return success */ + return 0; +} + +/* + * Get the flow database entry iteratively * * flow_tbl [in] Ptr to flow table * fid [in/out] The index to the flow entry diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h index 543541565..5361dd025 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h +++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h @@ -142,6 +142,23 @@ int32_t ulp_flow_db_fid_free(struct bnxt_ulp_context *ulp_ctxt, enum bnxt_ulp_flow_db_tables tbl_idx, uint32_t fid); +/* + *Get the flow database entry details + * + * ulp_ctxt [in] Ptr to ulp_context + * tbl_idx [in] Specify it is regular or default flow + * fid [in] The index to the flow entry + * nxt_idx [in/out] the index to the next entry + * params [out] The contents to be copied into params. + * + * returns 0 on success and negative on failure. + */ +int32_t ulp_flow_db_resource_get(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_flow_db_tables tbl_idx, + uint32_t fid, + uint32_t *nxt_idx, + struct ulp_flow_db_res_params *params); + /* * Flush all flows in the flow database. * From patchwork Fri Apr 17 16:19:12 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 68791 X-Patchwork-Delegate: ajit.khaparde@broadcom.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5521EA0597; Fri, 17 Apr 2020 18:20:10 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D32981EA15; Fri, 17 Apr 2020 18:19:34 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id 6308A1E9BF for ; Fri, 17 Apr 2020 18:19:26 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id 4892430C1D6; Fri, 17 Apr 2020 09:08:05 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com 4892430C1D6 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1587139685; bh=wg0ao+L0LwY1H65cb3JI6YGTFfXB8YcuXfVjfuV8mcU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=D2GenPwnTnI4GaHhSzeqgd9Ym9dBrZ7ei7OQm8uZr9begLEQAGFlSWSTBQf2MQsE/ LwxHuOBYw4bH8tv9UwMY5meNTp17eJcBi+4K2m2S6PrEovmp4mMCHPcsi5Fs63leFJ UyKM8Njx6lpgPJU2+DvcbpPCBFjl2bdSyao6RDUk= Received: from localhost.localdomain (unknown [10.230.185.215]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id D7FFA14008D; Fri, 17 Apr 2020 09:19:24 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: Mike Baucom , Kishore Padmanabha , Venkat Duvvuru Date: Fri, 17 Apr 2020 09:19:12 -0700 Message-Id: <20200417161920.85858-5-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.21.1 (Apple Git-122.3) In-Reply-To: <20200417161920.85858-1-ajit.khaparde@broadcom.com> References: <1586962156-11179-1-git-send-email-venkatkumar.duvvuru@broadcom.com> <20200417161920.85858-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 04/12] net/bnxt: add resource name type to debug messages X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Mike Baucom Added the name of the resource to the index/result and key/mask common builder functions. Reviewed-by: Kishore Padmanabha Reviewed-by: Ajit Khaparde Signed-off-by: Mike Baucom Signed-off-by: Venkat Duvvuru --- drivers/net/bnxt/tf_ulp/ulp_mapper.c | 61 ++++++++++++++++------------ 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c index ca4dd4562..f787c6e4d 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c +++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c @@ -365,7 +365,8 @@ ulp_mapper_ident_process(struct bnxt_ulp_mapper_parms *parms, static int32_t ulp_mapper_result_field_process(struct bnxt_ulp_mapper_parms *parms, struct bnxt_ulp_mapper_result_field_info *fld, - struct ulp_blob *blob) + struct ulp_blob *blob, + const char *name) { uint16_t idx, size_idx; uint8_t *val = NULL; @@ -376,20 +377,20 @@ ulp_mapper_result_field_process(struct bnxt_ulp_mapper_parms *parms, case BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT: val = fld->result_operand; if (!ulp_blob_push(blob, val, fld->field_bit_size)) { - BNXT_TF_DBG(ERR, "Failed to add field\n"); + BNXT_TF_DBG(ERR, "%s failed to add field\n", name); return -EINVAL; } break; case BNXT_ULP_RESULT_OPC_SET_TO_ACT_PROP: if (!ulp_operand_read(fld->result_operand, (uint8_t *)&idx, sizeof(uint16_t))) { - BNXT_TF_DBG(ERR, "operand read failed\n"); + BNXT_TF_DBG(ERR, "%s operand read failed\n", name); return -EINVAL; } idx = tfp_be_to_cpu_16(idx); if (idx >= BNXT_ULP_ACT_PROP_IDX_LAST) { - BNXT_TF_DBG(ERR, "act_prop[%d] oob\n", idx); + BNXT_TF_DBG(ERR, "%s act_prop[%d] oob\n", name, idx); return -EINVAL; } val = &parms->act_prop->act_details[idx]; @@ -400,20 +401,20 @@ ulp_mapper_result_field_process(struct bnxt_ulp_mapper_parms *parms, val += field_size; } if (!ulp_blob_push(blob, val, fld->field_bit_size)) { - BNXT_TF_DBG(ERR, "push field failed\n"); + BNXT_TF_DBG(ERR, "%s push field failed\n", name); return -EINVAL; } break; case BNXT_ULP_RESULT_OPC_SET_TO_ACT_PROP_SZ: if (!ulp_operand_read(fld->result_operand, (uint8_t *)&idx, sizeof(uint16_t))) { - BNXT_TF_DBG(ERR, "operand read failed\n"); + BNXT_TF_DBG(ERR, "%s operand read failed\n", name); return -EINVAL; } idx = tfp_be_to_cpu_16(idx); if (idx >= BNXT_ULP_ACT_PROP_IDX_LAST) { - BNXT_TF_DBG(ERR, "act_prop[%d] oob\n", idx); + BNXT_TF_DBG(ERR, "%s act_prop[%d] oob\n", name, idx); return -EINVAL; } val = &parms->act_prop->act_details[idx]; @@ -421,7 +422,7 @@ ulp_mapper_result_field_process(struct bnxt_ulp_mapper_parms *parms, /* get the size index next */ if (!ulp_operand_read(&fld->result_operand[sizeof(uint16_t)], (uint8_t *)&size_idx, sizeof(uint16_t))) { - BNXT_TF_DBG(ERR, "operand read failed\n"); + BNXT_TF_DBG(ERR, "%s operand read failed\n", name); return -EINVAL; } size_idx = tfp_be_to_cpu_16(size_idx); @@ -439,20 +440,21 @@ ulp_mapper_result_field_process(struct bnxt_ulp_mapper_parms *parms, case BNXT_ULP_RESULT_OPC_SET_TO_REGFILE: if (!ulp_operand_read(fld->result_operand, (uint8_t *)&idx, sizeof(uint16_t))) { - BNXT_TF_DBG(ERR, "operand read failed\n"); + BNXT_TF_DBG(ERR, "%s operand read failed\n", name); return -EINVAL; } idx = tfp_be_to_cpu_16(idx); /* Uninitialized regfile entries return 0 */ if (!ulp_regfile_read(parms->regfile, idx, ®val)) { - BNXT_TF_DBG(ERR, "regfile[%d] read oob\n", idx); + BNXT_TF_DBG(ERR, "%s regfile[%d] read oob\n", + name, idx); return -EINVAL; } val = ulp_blob_push_64(blob, ®val, fld->field_bit_size); if (!val) { - BNXT_TF_DBG(ERR, "push field failed\n"); + BNXT_TF_DBG(ERR, "%s push field failed\n", name); return -EINVAL; } break; @@ -468,7 +470,8 @@ static int32_t ulp_mapper_keymask_field_process(struct bnxt_ulp_mapper_parms *parms, struct bnxt_ulp_mapper_class_key_field_info *f, struct ulp_blob *blob, - uint8_t is_key) + uint8_t is_key, + const char *name) { uint64_t regval; uint16_t idx, bitlen; @@ -493,13 +496,13 @@ ulp_mapper_keymask_field_process(struct bnxt_ulp_mapper_parms *parms, case BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT: val = operand; if (!ulp_blob_push(blob, val, bitlen)) { - BNXT_TF_DBG(ERR, "push to key blob failed\n"); + BNXT_TF_DBG(ERR, "%s push to key blob failed\n", name); return -EINVAL; } break; case BNXT_ULP_SPEC_OPC_ADD_PAD: if (!ulp_blob_pad_push(blob, bitlen)) { - BNXT_TF_DBG(ERR, "Pad too large for blob\n"); + BNXT_TF_DBG(ERR, "%s pad too large for blob\n", name); return -EINVAL; } @@ -507,7 +510,7 @@ ulp_mapper_keymask_field_process(struct bnxt_ulp_mapper_parms *parms, case BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD: if (!ulp_operand_read(operand, (uint8_t *)&idx, sizeof(uint16_t))) { - BNXT_TF_DBG(ERR, "key operand read failed.\n"); + BNXT_TF_DBG(ERR, "%s key operand read failed.\n", name); return -EINVAL; } idx = tfp_be_to_cpu_16(idx); @@ -527,27 +530,27 @@ ulp_mapper_keymask_field_process(struct bnxt_ulp_mapper_parms *parms, } if (!ulp_blob_push(blob, val, bitlen)) { - BNXT_TF_DBG(ERR, "push to key blob failed\n"); + BNXT_TF_DBG(ERR, "%s push to key blob failed\n", name); return -EINVAL; } break; case BNXT_ULP_SPEC_OPC_SET_TO_REGFILE: if (!ulp_operand_read(operand, (uint8_t *)&idx, sizeof(uint16_t))) { - BNXT_TF_DBG(ERR, "key operand read failed.\n"); + BNXT_TF_DBG(ERR, "%s key operand read failed.\n", name); return -EINVAL; } idx = tfp_be_to_cpu_16(idx); if (!ulp_regfile_read(regfile, idx, ®val)) { - BNXT_TF_DBG(ERR, "regfile[%d] read failed.\n", - idx); + BNXT_TF_DBG(ERR, "%s regfile[%d] read failed.\n", + name, idx); return -EINVAL; } val = ulp_blob_push_64(blob, ®val, bitlen); if (!val) { - BNXT_TF_DBG(ERR, "push to key blob failed\n"); + BNXT_TF_DBG(ERR, "%s push to key blob failed\n", name); return -EINVAL; } default: @@ -715,7 +718,8 @@ ulp_mapper_action_info_process(struct bnxt_ulp_mapper_parms *parms, fld = &flds[i]; rc = ulp_mapper_result_field_process(parms, fld, - &blob); + &blob, + "Action"); if (rc) { BNXT_TF_DBG(ERR, "Action field failed\n"); return rc; @@ -777,7 +781,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms, for (i = 0; i < num_kflds; i++) { /* Setup the key */ rc = ulp_mapper_keymask_field_process(parms, &kflds[i], - &key, 1); + &key, 1, "TCAM Key"); if (rc) { BNXT_TF_DBG(ERR, "Key field set failed.\n"); return rc; @@ -785,7 +789,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms, /* Setup the mask */ rc = ulp_mapper_keymask_field_process(parms, &kflds[i], - &mask, 0); + &mask, 0, "TCAM Mask"); if (rc) { BNXT_TF_DBG(ERR, "Mask field set failed.\n"); return rc; @@ -852,7 +856,8 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms, for (i = 0; i < num_dflds; i++) { rc = ulp_mapper_result_field_process(parms, &dflds[i], - &data); + &data, + "TCAM Result"); if (rc) { BNXT_TF_DBG(ERR, "Failed to set data fields\n"); goto error; @@ -955,7 +960,7 @@ ulp_mapper_em_tbl_process(struct bnxt_ulp_mapper_parms *parms, for (i = 0; i < num_kflds; i++) { /* Setup the key */ rc = ulp_mapper_keymask_field_process(parms, &kflds[i], - &key, 1); + &key, 1, "EM Key"); if (rc) { BNXT_TF_DBG(ERR, "Key field set failed.\n"); return rc; @@ -981,7 +986,8 @@ ulp_mapper_em_tbl_process(struct bnxt_ulp_mapper_parms *parms, rc = ulp_mapper_result_field_process(parms, fld, - &data); + &data, + "EM Result"); if (rc) { BNXT_TF_DBG(ERR, "Failed to set data fields.\n"); return rc; @@ -1130,7 +1136,8 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms, for (i = 0; i < num_flds; i++) { rc = ulp_mapper_result_field_process(parms, &flds[i], - &data); + &data, + "Indexed Result"); if (rc) { BNXT_TF_DBG(ERR, "data field failed\n"); return rc; From patchwork Fri Apr 17 16:19:13 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 68793 X-Patchwork-Delegate: ajit.khaparde@broadcom.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id C3B9CA0597; Fri, 17 Apr 2020 18:20:41 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 9A1FC1EA39; Fri, 17 Apr 2020 18:19:38 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id 7A4641E9BF for ; Fri, 17 Apr 2020 18:19:27 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id 89B4A30C1DD; Fri, 17 Apr 2020 09:08:06 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com 89B4A30C1DD DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1587139686; bh=dNRk0lrXhENSRNhan+9FNZ9/jwifa3IA0ZwuSxbVoDY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=R81QHNoR7B4u1DQA9ECGCjEarf3eEdw+LFI4Aek/9+3Q+2pd9+UNzPkJOtt/nxYqp wW+dMecX1XSqKjX5nLjnhIzpWxG7bKKb+nv6FbOKJsGyCrNAwT0j20l76BvIqYjNyz Ku/ka4Zo8mOlgmIbKS5izpIjj92H03zLW5zmtIjE= Received: from localhost.localdomain (unknown [10.230.185.215]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id 1A81914008E; Fri, 17 Apr 2020 09:19:25 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: Kishore Padmanabha , Venkat Duvvuru Date: Fri, 17 Apr 2020 09:19:13 -0700 Message-Id: <20200417161920.85858-6-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.21.1 (Apple Git-122.3) In-Reply-To: <20200417161920.85858-1-ajit.khaparde@broadcom.com> References: <1586962156-11179-1-git-send-email-venkatkumar.duvvuru@broadcom.com> <20200417161920.85858-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 05/12] net/bnxt: aggregate ulp rte parser arguments X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Kishore Padmanabha The changes are to the ulp rte parser, the API are changed to take the parser param structure instead of individual fields. Reviewed-by: Venkat Duvvuru Reviewed-by: Ajit Khaparde Signed-off-by: Kishore Padmanabha Signed-off-by: Venkat Duvvuru --- drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c | 75 +- drivers/net/bnxt/tf_ulp/ulp_matcher.c | 20 +- drivers/net/bnxt/tf_ulp/ulp_matcher.h | 12 +- drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 882 +++++++++--------- drivers/net/bnxt/tf_ulp/ulp_rte_parser.h | 150 +-- drivers/net/bnxt/tf_ulp/ulp_template_struct.h | 42 +- 6 files changed, 560 insertions(+), 621 deletions(-) diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c index 6203a495c..026f33f66 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c @@ -67,11 +67,7 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct ulp_rte_hdr_bitmap hdr_bitmap; - struct ulp_rte_hdr_field hdr_field[BNXT_ULP_PROTO_HDR_MAX]; - struct ulp_rte_act_bitmap act_bitmap; - struct ulp_rte_act_prop act_prop; - enum ulp_direction_type dir = ULP_DIR_INGRESS; + struct ulp_rte_parser_params params; struct bnxt_ulp_context *ulp_ctx = NULL; uint32_t class_id, act_tmpl; struct rte_flow *flow_id; @@ -94,47 +90,38 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, return NULL; } - /* clear the header bitmap and field structure */ - memset(&hdr_bitmap, 0, sizeof(struct ulp_rte_hdr_bitmap)); - memset(hdr_field, 0, sizeof(hdr_field)); - memset(&act_bitmap, 0, sizeof(act_bitmap)); - memset(&act_prop, 0, sizeof(act_prop)); + /* Initialize the parser params */ + memset(¶ms, 0, sizeof(struct ulp_rte_parser_params)); if (attr->egress) - dir = ULP_DIR_EGRESS; + params.dir = ULP_DIR_EGRESS; - /* copy the device port id and direction in svif for further process */ - buffer = hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; + /* copy the device port id and direction for further processing */ + buffer = params.hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; rte_memcpy(buffer, &dev->data->port_id, sizeof(uint16_t)); - rte_memcpy(buffer + sizeof(uint16_t), &dir, sizeof(uint32_t)); /* Set the implicit vnic in the action property */ vnic = (uint32_t)bnxt_get_vnic_id(dev->data->port_id); vnic = htonl(vnic); - rte_memcpy(&act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], + rte_memcpy(¶ms.act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], &vnic, BNXT_ULP_ACT_PROP_SZ_VNIC); /* Parse the rte flow pattern */ - ret = bnxt_ulp_rte_parser_hdr_parse(pattern, - &hdr_bitmap, - hdr_field); + ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms); if (ret != BNXT_TF_RC_SUCCESS) goto parse_error; /* Parse the rte flow action */ - ret = bnxt_ulp_rte_parser_act_parse(actions, - &act_bitmap, - &act_prop); + ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms); if (ret != BNXT_TF_RC_SUCCESS) goto parse_error; - ret = ulp_matcher_pattern_match(dir, &hdr_bitmap, hdr_field, - &act_bitmap, &class_id); + ret = ulp_matcher_pattern_match(¶ms, &class_id); if (ret != BNXT_TF_RC_SUCCESS) goto parse_error; - ret = ulp_matcher_action_match(dir, &act_bitmap, &act_tmpl); + ret = ulp_matcher_action_match(¶ms, &act_tmpl); if (ret != BNXT_TF_RC_SUCCESS) goto parse_error; @@ -142,10 +129,10 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, /* call the ulp mapper to create the flow in the hardware */ ret = ulp_mapper_flow_create(ulp_ctx, app_priority, - &hdr_bitmap, - hdr_field, - &act_bitmap, - &act_prop, + ¶ms.hdr_bitmap, + params.hdr_field, + ¶ms.act_bitmap, + ¶ms.act_prop, class_id, act_tmpl, &fid); @@ -168,11 +155,7 @@ bnxt_ulp_flow_validate(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct ulp_rte_hdr_bitmap hdr_bitmap; - struct ulp_rte_hdr_field hdr_field[BNXT_ULP_PROTO_HDR_MAX]; - struct ulp_rte_act_bitmap act_bitmap; - struct ulp_rte_act_prop act_prop; - enum ulp_direction_type dir = ULP_DIR_INGRESS; + struct ulp_rte_parser_params params; uint32_t class_id, act_tmpl; int ret; @@ -183,36 +166,28 @@ bnxt_ulp_flow_validate(struct rte_eth_dev *dev __rte_unused, return -EINVAL; } - /* clear the header bitmap and field structure */ - memset(&hdr_bitmap, 0, sizeof(struct ulp_rte_hdr_bitmap)); - memset(hdr_field, 0, sizeof(hdr_field)); - memset(&act_bitmap, 0, sizeof(act_bitmap)); - memset(&act_prop, 0, sizeof(act_prop)); + /* Initialize the parser params */ + memset(¶ms, 0, sizeof(struct ulp_rte_parser_params)); + + if (attr->egress) + params.dir = ULP_DIR_EGRESS; /* Parse the rte flow pattern */ - ret = bnxt_ulp_rte_parser_hdr_parse(pattern, - &hdr_bitmap, - hdr_field); + ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms); if (ret != BNXT_TF_RC_SUCCESS) goto parse_error; /* Parse the rte flow action */ - ret = bnxt_ulp_rte_parser_act_parse(actions, - &act_bitmap, - &act_prop); + ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms); if (ret != BNXT_TF_RC_SUCCESS) goto parse_error; - if (attr->egress) - dir = ULP_DIR_EGRESS; - - ret = ulp_matcher_pattern_match(dir, &hdr_bitmap, hdr_field, - &act_bitmap, &class_id); + ret = ulp_matcher_pattern_match(¶ms, &class_id); if (ret != BNXT_TF_RC_SUCCESS) goto parse_error; - ret = ulp_matcher_action_match(dir, &act_bitmap, &act_tmpl); + ret = ulp_matcher_action_match(¶ms, &act_tmpl); if (ret != BNXT_TF_RC_SUCCESS) goto parse_error; diff --git a/drivers/net/bnxt/tf_ulp/ulp_matcher.c b/drivers/net/bnxt/tf_ulp/ulp_matcher.c index ec4121d5c..e04bfa094 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_matcher.c +++ b/drivers/net/bnxt/tf_ulp/ulp_matcher.c @@ -67,11 +67,8 @@ ulp_matcher_hdr_fields_normalize(struct ulp_rte_hdr_bitmap *hdr1, * the pattern masks against the flow templates. */ int32_t -ulp_matcher_pattern_match(enum ulp_direction_type dir, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - struct ulp_rte_act_bitmap *act_bitmap, - uint32_t *class_id) +ulp_matcher_pattern_match(struct ulp_rte_parser_params *params, + uint32_t *class_id) { struct bnxt_ulp_header_match_info *sel_hdr_match; uint32_t hdr_num, idx, jdx; @@ -80,9 +77,12 @@ ulp_matcher_pattern_match(enum ulp_direction_type dir, uint32_t start_idx; struct ulp_rte_hdr_field *m_field; struct bnxt_ulp_matcher_field_info *sf; + struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + struct ulp_rte_act_bitmap *act_bitmap = ¶ms->act_bitmap; + struct ulp_rte_hdr_field *hdr_field = params->hdr_field; /* Select the ingress or egress template to match against */ - if (dir == ULP_DIR_INGRESS) { + if (params->dir == ULP_DIR_INGRESS) { sel_hdr_match = ulp_ingress_hdr_match_list; hdr_num = BNXT_ULP_INGRESS_HDR_MATCH_SZ; } else { @@ -156,15 +156,15 @@ ulp_matcher_pattern_match(enum ulp_direction_type dir, * the action against the flow templates. */ int32_t -ulp_matcher_action_match(enum ulp_direction_type dir, - struct ulp_rte_act_bitmap *act_bitmap, - uint32_t *act_id) +ulp_matcher_action_match(struct ulp_rte_parser_params *params, + uint32_t *act_id) { struct bnxt_ulp_action_match_info *sel_act_match; uint32_t act_num, idx; + struct ulp_rte_act_bitmap *act_bitmap = ¶ms->act_bitmap; /* Select the ingress or egress template to match against */ - if (dir == ULP_DIR_INGRESS) { + if (params->dir == ULP_DIR_INGRESS) { sel_act_match = ulp_ingress_act_match_list; act_num = BNXT_ULP_INGRESS_ACT_MATCH_SZ; } else { diff --git a/drivers/net/bnxt/tf_ulp/ulp_matcher.h b/drivers/net/bnxt/tf_ulp/ulp_matcher.h index c818bbe31..fc197830f 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_matcher.h +++ b/drivers/net/bnxt/tf_ulp/ulp_matcher.h @@ -17,19 +17,15 @@ * the pattern masks against the flow templates. */ int32_t -ulp_matcher_pattern_match(enum ulp_direction_type dir, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - struct ulp_rte_act_bitmap *act_bitmap, - uint32_t *class_id); +ulp_matcher_pattern_match(struct ulp_rte_parser_params *params, + uint32_t *class_id); /* * Function to handle the matching of RTE Flows and validating * the action against the flow templates. */ int32_t -ulp_matcher_action_match(enum ulp_direction_type dir, - struct ulp_rte_act_bitmap *act_bitmap, - uint32_t *act_id); +ulp_matcher_action_match(struct ulp_rte_parser_params *params, + uint32_t *act_id); #endif /* ULP_MATCHER_H_ */ diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c index 4339032a7..2980e03b4 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c +++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c @@ -45,39 +45,56 @@ ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment) return 0; } +/* Utility function to copy field spec items */ +static struct ulp_rte_hdr_field * +ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field, + const void *buffer, + uint32_t size) +{ + field->size = size; + memcpy(field->spec, buffer, field->size); + field++; + return field; +} + +/* Utility function to copy field masks items */ +static void +ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params, + uint32_t *idx, + const void *buffer, + uint32_t size) +{ + struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx]; + + memcpy(field->mask, buffer, size); + *idx = *idx + 1; +} + /* * Function to handle the parsing of RTE Flows and placing * the RTE flow items into the ulp structures. */ int32_t bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field) + struct ulp_rte_parser_params *params) { const struct rte_flow_item *item = pattern; - uint32_t field_idx = BNXT_ULP_HDR_FIELD_LAST; - uint32_t vlan_idx = 0; struct bnxt_ulp_rte_hdr_info *hdr_info; + params->field_idx = BNXT_ULP_HDR_FIELD_LAST; /* Parse all the items in the pattern */ while (item && item->type != RTE_FLOW_ITEM_TYPE_END) { /* get the header information from the flow_hdr_info table */ hdr_info = &ulp_hdr_info[item->type]; - if (hdr_info->hdr_type == - BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { + if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n", item->type); return BNXT_TF_RC_PARSE_ERR; - } else if (hdr_info->hdr_type == - BNXT_ULP_HDR_TYPE_SUPPORTED) { + } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) { /* call the registered callback handler */ if (hdr_info->proto_hdr_func) { - if (hdr_info->proto_hdr_func(item, - hdr_bitmap, - hdr_field, - &field_idx, - &vlan_idx) != + if (hdr_info->proto_hdr_func(item, params) != BNXT_TF_RC_SUCCESS) { return BNXT_TF_RC_ERROR; } @@ -86,7 +103,7 @@ bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], item++; } /* update the implied SVIF */ - (void)ulp_rte_parser_svif_process(hdr_bitmap, hdr_field); + (void)ulp_rte_parser_svif_process(params); return BNXT_TF_RC_SUCCESS; } @@ -96,8 +113,7 @@ bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], */ int32_t bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], - struct ulp_rte_act_bitmap *act_bitmap, - struct ulp_rte_act_prop *act_prop) + struct ulp_rte_parser_params *params) { const struct rte_flow_action *action_item = actions; struct bnxt_ulp_rte_act_info *hdr_info; @@ -117,8 +133,7 @@ bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], /* call the registered callback handler */ if (hdr_info->proto_act_func) { if (hdr_info->proto_act_func(action_item, - act_bitmap, - act_prop) != + params) != BNXT_TF_RC_SUCCESS) { return BNXT_TF_RC_ERROR; } @@ -131,16 +146,15 @@ bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], /* Function to handle the parsing of RTE Flow item PF Header. */ static int32_t -ulp_rte_parser_svif_set(struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, +ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, enum rte_flow_item_type proto, - uint32_t dir, uint16_t svif, uint16_t mask) { uint16_t port_id = svif; + uint32_t dir = 0; - if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF)) { + if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF)) { BNXT_TF_DBG(ERR, "SVIF already set," " multiple sources not supported\n"); @@ -148,116 +162,91 @@ ulp_rte_parser_svif_set(struct ulp_rte_hdr_bitmap *hdr_bitmap, } /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF. */ - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF); + ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF); if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) { /* perform the conversion from dpdk port to svif */ + dir = params->dir; if (dir == ULP_DIR_EGRESS) svif = bnxt_get_svif(port_id, true); else svif = bnxt_get_svif(port_id, false); } - memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec, + memcpy(params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec, &svif, sizeof(svif)); - memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].mask, + memcpy(params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].mask, &mask, sizeof(mask)); - hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].size = sizeof(svif); + params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].size = sizeof(svif); return BNXT_TF_RC_SUCCESS; } /* Function to handle the parsing of the RTE port id */ int32_t -ulp_rte_parser_svif_process(struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field) +ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params) { uint16_t port_id = 0; - uint32_t dir = 0; uint8_t *buffer; uint16_t svif_mask = 0xFFFF; - if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF)) + if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF)) return BNXT_TF_RC_SUCCESS; /* SVIF not set. So get the port id and direction */ - buffer = hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; + buffer = params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; memcpy(&port_id, buffer, sizeof(port_id)); - memcpy(&dir, buffer + sizeof(port_id), sizeof(dir)); - memset(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec, 0, - RTE_PARSER_FLOW_HDR_FIELD_SIZE); + memset(buffer, 0, RTE_PARSER_FLOW_HDR_FIELD_SIZE); - return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field, + return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID, - dir, port_id, svif_mask); + port_id, svif_mask); } /* Function to handle the parsing of RTE Flow item PF Header. */ int32_t ulp_rte_pf_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx __rte_unused, - uint32_t *vlan_idx __rte_unused) + struct ulp_rte_parser_params *params) { uint16_t port_id = 0; - uint32_t dir = 0; uint8_t *buffer; uint16_t svif_mask = 0xFFFF; - buffer = hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; + buffer = params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; memcpy(&port_id, buffer, sizeof(port_id)); - memcpy(&dir, buffer + sizeof(port_id), sizeof(dir)); - memset(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec, 0, - RTE_PARSER_FLOW_HDR_FIELD_SIZE); + memset(buffer, 0, RTE_PARSER_FLOW_HDR_FIELD_SIZE); - return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field, + return ulp_rte_parser_svif_set(params, item->type, - dir, port_id, svif_mask); + port_id, svif_mask); } /* Function to handle the parsing of RTE Flow item VF Header. */ int32_t ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx __rte_unused, - uint32_t *vlan_idx __rte_unused) + struct ulp_rte_parser_params *params) { - const struct rte_flow_item_vf *vf_spec, *vf_mask; + const struct rte_flow_item_vf *vf_spec = item->spec; + const struct rte_flow_item_vf *vf_mask = item->mask; uint16_t svif = 0, mask = 0; - vf_spec = item->spec; - vf_mask = item->mask; - - /* - * Copy the rte_flow_item for eth into hdr_field using ethernet - * header fields. - */ + /* Get VF rte_flow_item for Port details */ if (vf_spec) svif = (uint16_t)vf_spec->id; if (vf_mask) mask = (uint16_t)vf_mask->id; - return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field, - item->type, 0, svif, mask); + return ulp_rte_parser_svif_set(params, item->type, svif, mask); } /* Function to handle the parsing of RTE Flow item port id Header. */ int32_t ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx __rte_unused, - uint32_t *vlan_idx __rte_unused) + struct ulp_rte_parser_params *params) { - const struct rte_flow_item_port_id *port_spec, *port_mask; + const struct rte_flow_item_port_id *port_spec = item->spec; + const struct rte_flow_item_port_id *port_mask = item->mask; uint16_t svif = 0, mask = 0; - uint32_t dir; - uint8_t *buffer; - - port_spec = item->spec; - port_mask = item->mask; /* * Copy the rte_flow_item for Port into hdr_field using port id @@ -268,92 +257,82 @@ ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item, if (port_mask) mask = (uint16_t)port_mask->id; - buffer = hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; - memcpy(&dir, buffer + sizeof(uint16_t), sizeof(uint16_t)); - - return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field, - item->type, dir, svif, mask); + /* Update the SVIF details */ + return ulp_rte_parser_svif_set(params, item->type, svif, mask); } /* Function to handle the parsing of RTE Flow item phy port Header. */ int32_t ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx __rte_unused, - uint32_t *vlan_idx __rte_unused) + struct ulp_rte_parser_params *params) { - const struct rte_flow_item_phy_port *port_spec, *port_mask; + const struct rte_flow_item_phy_port *port_spec = item->spec; + const struct rte_flow_item_phy_port *port_mask = item->mask; uint32_t svif = 0, mask = 0; - port_spec = item->spec; - port_mask = item->mask; - /* Copy the rte_flow_item for phy port into hdr_field */ if (port_spec) svif = port_spec->index; if (port_mask) mask = port_mask->index; - return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field, - item->type, 0, svif, mask); + /* Update the SVIF details */ + return ulp_rte_parser_svif_set(params, item->type, svif, mask); } /* Function to handle the parsing of RTE Flow item Ethernet Header. */ int32_t ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx) + struct ulp_rte_parser_params *params) { - const struct rte_flow_item_eth *eth_spec, *eth_mask; - uint32_t idx = *field_idx; - uint32_t mdx = *field_idx; + const struct rte_flow_item_eth *eth_spec = item->spec; + const struct rte_flow_item_eth *eth_mask = item->mask; + struct ulp_rte_hdr_field *field; + uint32_t idx = params->field_idx; uint64_t set_flag = 0; - - eth_spec = item->spec; - eth_mask = item->mask; + uint32_t size; /* * Copy the rte_flow_item for eth into hdr_field using ethernet * header fields */ if (eth_spec) { - hdr_field[idx].size = sizeof(eth_spec->dst.addr_bytes); - memcpy(hdr_field[idx++].spec, eth_spec->dst.addr_bytes, - sizeof(eth_spec->dst.addr_bytes)); - hdr_field[idx].size = sizeof(eth_spec->src.addr_bytes); - memcpy(hdr_field[idx++].spec, eth_spec->src.addr_bytes, - sizeof(eth_spec->src.addr_bytes)); - hdr_field[idx].size = sizeof(eth_spec->type); - memcpy(hdr_field[idx++].spec, ð_spec->type, - sizeof(eth_spec->type)); - } else { - idx += BNXT_ULP_PROTO_HDR_ETH_NUM; + size = sizeof(eth_spec->dst.addr_bytes); + field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], + eth_spec->dst.addr_bytes, + size); + size = sizeof(eth_spec->src.addr_bytes); + field = ulp_rte_parser_fld_copy(field, + eth_spec->src.addr_bytes, + size); + field = ulp_rte_parser_fld_copy(field, + ð_spec->type, + sizeof(eth_spec->type)); } - if (eth_mask) { - memcpy(hdr_field[mdx++].mask, eth_mask->dst.addr_bytes, - sizeof(eth_mask->dst.addr_bytes)); - memcpy(hdr_field[mdx++].mask, eth_mask->src.addr_bytes, - sizeof(eth_mask->src.addr_bytes)); - memcpy(hdr_field[mdx++].mask, ð_mask->type, - sizeof(eth_mask->type)); + ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes, + sizeof(eth_mask->dst.addr_bytes)); + ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes, + sizeof(eth_mask->src.addr_bytes)); + ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type, + sizeof(eth_mask->type)); } /* Add number of vlan header elements */ - *field_idx = idx + BNXT_ULP_PROTO_HDR_VLAN_NUM; - *vlan_idx = idx; + params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM; + params->vlan_idx = params->field_idx; + params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM; /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */ - set_flag = ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH); + set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_ETH); if (set_flag) - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ETH); + ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); else - ULP_BITMAP_RESET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ETH); + ULP_BITMAP_RESET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_I_ETH); /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */ - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH); + ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); return BNXT_TF_RC_SUCCESS; } @@ -361,23 +340,20 @@ ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, /* Function to handle the parsing of RTE Flow item Vlan Header. */ int32_t ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx __rte_unused, - uint32_t *vlan_idx) + struct ulp_rte_parser_params *params) { - const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; - uint32_t idx = *vlan_idx; - uint32_t mdx = *vlan_idx; + const struct rte_flow_item_vlan *vlan_spec = item->spec; + const struct rte_flow_item_vlan *vlan_mask = item->mask; + struct ulp_rte_hdr_field *field; + struct ulp_rte_hdr_bitmap *hdr_bitmap; + uint32_t idx = params->vlan_idx; uint16_t vlan_tag, priority; uint32_t outer_vtag_num = 0, inner_vtag_num = 0; - uint8_t *outer_tag_buffer; - uint8_t *inner_tag_buffer; + uint8_t *outer_tag_buff; + uint8_t *inner_tag_buff; - vlan_spec = item->spec; - vlan_mask = item->mask; - outer_tag_buffer = hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].spec; - inner_tag_buffer = hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].spec; + outer_tag_buff = params->hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].spec; + inner_tag_buff = params->hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].spec; /* * Copy the rte_flow_item for vlan into hdr_field using Vlan @@ -389,15 +365,15 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, vlan_tag &= 0xfff; vlan_tag = htons(vlan_tag); - hdr_field[idx].size = sizeof(priority); - memcpy(hdr_field[idx++].spec, &priority, sizeof(priority)); - hdr_field[idx].size = sizeof(vlan_tag); - memcpy(hdr_field[idx++].spec, &vlan_tag, sizeof(vlan_tag)); - hdr_field[idx].size = sizeof(vlan_spec->inner_type); - memcpy(hdr_field[idx++].spec, &vlan_spec->inner_type, - sizeof(vlan_spec->inner_type)); - } else { - idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM; + field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], + &priority, + sizeof(priority)); + field = ulp_rte_parser_fld_copy(field, + &vlan_tag, + sizeof(vlan_tag)); + field = ulp_rte_parser_fld_copy(field, + &vlan_spec->inner_type, + sizeof(vlan_spec->inner_type)); } if (vlan_mask) { @@ -406,26 +382,29 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, vlan_tag &= 0xfff; vlan_tag = htons(vlan_tag); - memcpy(hdr_field[mdx++].mask, &priority, sizeof(priority)); - memcpy(hdr_field[mdx++].mask, &vlan_tag, sizeof(vlan_tag)); - memcpy(hdr_field[mdx++].mask, &vlan_mask->inner_type, - sizeof(vlan_mask->inner_type)); + field = ¶ms->hdr_field[idx]; + memcpy(field->mask, &priority, field->size); + field++; + memcpy(field->mask, &vlan_tag, field->size); + field++; + memcpy(field->mask, &vlan_mask->inner_type, field->size); } /* Set the vlan index to new incremented value */ - *vlan_idx = idx; + params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM; /* Get the outer tag and inner tag counts */ - ulp_util_field_int_read(outer_tag_buffer, &outer_vtag_num); - ulp_util_field_int_read(inner_tag_buffer, &inner_vtag_num); + ulp_util_field_int_read(outer_tag_buff, &outer_vtag_num); + ulp_util_field_int_read(inner_tag_buff, &inner_vtag_num); /* Update the hdr_bitmap of the vlans */ + hdr_bitmap = ¶ms->hdr_bitmap; if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) && !ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN)) { /* Set the outer vlan bit and update the vlan tag num */ ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN); outer_vtag_num++; - ulp_util_field_int_write(outer_tag_buffer, outer_vtag_num); - hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size = + ulp_util_field_int_write(outer_tag_buff, outer_vtag_num); + params->hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size = sizeof(uint32_t); } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) && ULP_BITMAP_ISSET(hdr_bitmap->bits, @@ -435,8 +414,8 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, /* Set the outer vlan bit and update the vlan tag num */ ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OI_VLAN); outer_vtag_num++; - ulp_util_field_int_write(outer_tag_buffer, outer_vtag_num); - hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size = + ulp_util_field_int_write(outer_tag_buff, outer_vtag_num); + params->hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size = sizeof(uint32_t); } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) && @@ -451,8 +430,8 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, /* Set the inner vlan bit and update the vlan tag num */ ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_IO_VLAN); inner_vtag_num++; - ulp_util_field_int_write(inner_tag_buffer, inner_vtag_num); - hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size = + ulp_util_field_int_write(inner_tag_buff, inner_vtag_num); + params->hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size = sizeof(uint32_t); } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) && @@ -469,8 +448,8 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, /* Set the inner vlan bit and update the vlan tag num */ ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_II_VLAN); inner_vtag_num++; - ulp_util_field_int_write(inner_tag_buffer, inner_vtag_num); - hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size = + ulp_util_field_int_write(inner_tag_buff, inner_vtag_num); + params->hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size = sizeof(uint32_t); } else { BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n"); @@ -482,17 +461,14 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, /* Function to handle the parsing of RTE Flow item IPV4 Header. */ int32_t ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx __rte_unused) + struct ulp_rte_parser_params *params) { - const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; - uint32_t idx = *field_idx; - uint32_t mdx = *field_idx; - - ipv4_spec = item->spec; - ipv4_mask = item->mask; + const struct rte_flow_item_ipv4 *ipv4_spec = item->spec; + const struct rte_flow_item_ipv4 *ipv4_mask = item->mask; + struct ulp_rte_hdr_field *field; + struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = params->field_idx; + uint32_t size; if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) { BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n"); @@ -504,63 +480,81 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, * header fields */ if (ipv4_spec) { - hdr_field[idx].size = sizeof(ipv4_spec->hdr.version_ihl); - memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.version_ihl, - sizeof(ipv4_spec->hdr.version_ihl)); - hdr_field[idx].size = sizeof(ipv4_spec->hdr.type_of_service); - memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.type_of_service, - sizeof(ipv4_spec->hdr.type_of_service)); - hdr_field[idx].size = sizeof(ipv4_spec->hdr.total_length); - memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.total_length, - sizeof(ipv4_spec->hdr.total_length)); - hdr_field[idx].size = sizeof(ipv4_spec->hdr.packet_id); - memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.packet_id, - sizeof(ipv4_spec->hdr.packet_id)); - hdr_field[idx].size = sizeof(ipv4_spec->hdr.fragment_offset); - memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.fragment_offset, - sizeof(ipv4_spec->hdr.fragment_offset)); - hdr_field[idx].size = sizeof(ipv4_spec->hdr.time_to_live); - memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.time_to_live, - sizeof(ipv4_spec->hdr.time_to_live)); - hdr_field[idx].size = sizeof(ipv4_spec->hdr.next_proto_id); - memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.next_proto_id, - sizeof(ipv4_spec->hdr.next_proto_id)); - hdr_field[idx].size = sizeof(ipv4_spec->hdr.hdr_checksum); - memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.hdr_checksum, - sizeof(ipv4_spec->hdr.hdr_checksum)); - hdr_field[idx].size = sizeof(ipv4_spec->hdr.src_addr); - memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.src_addr, - sizeof(ipv4_spec->hdr.src_addr)); - hdr_field[idx].size = sizeof(ipv4_spec->hdr.dst_addr); - memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.dst_addr, - sizeof(ipv4_spec->hdr.dst_addr)); - } else { - idx += BNXT_ULP_PROTO_HDR_IPV4_NUM; + size = sizeof(ipv4_spec->hdr.version_ihl); + field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], + &ipv4_spec->hdr.version_ihl, + size); + size = sizeof(ipv4_spec->hdr.type_of_service); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.type_of_service, + size); + size = sizeof(ipv4_spec->hdr.total_length); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.total_length, + size); + size = sizeof(ipv4_spec->hdr.packet_id); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.packet_id, + size); + size = sizeof(ipv4_spec->hdr.fragment_offset); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.fragment_offset, + size); + size = sizeof(ipv4_spec->hdr.time_to_live); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.time_to_live, + size); + size = sizeof(ipv4_spec->hdr.next_proto_id); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.next_proto_id, + size); + size = sizeof(ipv4_spec->hdr.hdr_checksum); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.hdr_checksum, + size); + size = sizeof(ipv4_spec->hdr.src_addr); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.src_addr, + size); + size = sizeof(ipv4_spec->hdr.dst_addr); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.dst_addr, + size); } - if (ipv4_mask) { - memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.version_ihl, - sizeof(ipv4_mask->hdr.version_ihl)); - memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.type_of_service, - sizeof(ipv4_mask->hdr.type_of_service)); - memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.total_length, - sizeof(ipv4_mask->hdr.total_length)); - memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.packet_id, - sizeof(ipv4_mask->hdr.packet_id)); - memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.fragment_offset, - sizeof(ipv4_mask->hdr.fragment_offset)); - memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.time_to_live, - sizeof(ipv4_mask->hdr.time_to_live)); - memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.next_proto_id, - sizeof(ipv4_mask->hdr.next_proto_id)); - memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.hdr_checksum, - sizeof(ipv4_mask->hdr.hdr_checksum)); - memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.src_addr, - sizeof(ipv4_mask->hdr.src_addr)); - memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.dst_addr, - sizeof(ipv4_mask->hdr.dst_addr)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.version_ihl, + sizeof(ipv4_mask->hdr.version_ihl)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.type_of_service, + sizeof(ipv4_mask->hdr.type_of_service)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.total_length, + sizeof(ipv4_mask->hdr.total_length)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.packet_id, + sizeof(ipv4_mask->hdr.packet_id)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.fragment_offset, + sizeof(ipv4_mask->hdr.fragment_offset)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.time_to_live, + sizeof(ipv4_mask->hdr.time_to_live)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.next_proto_id, + sizeof(ipv4_mask->hdr.next_proto_id)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.hdr_checksum, + sizeof(ipv4_mask->hdr.hdr_checksum)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.src_addr, + sizeof(ipv4_mask->hdr.src_addr)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.dst_addr, + sizeof(ipv4_mask->hdr.dst_addr)); } - *field_idx = idx; /* Number of ipv4 header elements */ + /* Add the number of ipv4 header elements */ + params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM; /* Set the ipv4 header bitmap and computed l3 header bitmaps */ if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) || @@ -578,17 +572,14 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, /* Function to handle the parsing of RTE Flow item IPV6 Header */ int32_t ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx __rte_unused) + struct ulp_rte_parser_params *params) { - const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; - uint32_t idx = *field_idx; - uint32_t mdx = *field_idx; - - ipv6_spec = item->spec; - ipv6_mask = item->mask; + const struct rte_flow_item_ipv6 *ipv6_spec = item->spec; + const struct rte_flow_item_ipv6 *ipv6_mask = item->mask; + struct ulp_rte_hdr_field *field; + struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = params->field_idx; + uint32_t size; if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) { BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n"); @@ -600,43 +591,53 @@ ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, * header fields */ if (ipv6_spec) { - hdr_field[idx].size = sizeof(ipv6_spec->hdr.vtc_flow); - memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.vtc_flow, - sizeof(ipv6_spec->hdr.vtc_flow)); - hdr_field[idx].size = sizeof(ipv6_spec->hdr.payload_len); - memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.payload_len, - sizeof(ipv6_spec->hdr.payload_len)); - hdr_field[idx].size = sizeof(ipv6_spec->hdr.proto); - memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.proto, - sizeof(ipv6_spec->hdr.proto)); - hdr_field[idx].size = sizeof(ipv6_spec->hdr.hop_limits); - memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.hop_limits, - sizeof(ipv6_spec->hdr.hop_limits)); - hdr_field[idx].size = sizeof(ipv6_spec->hdr.src_addr); - memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.src_addr, - sizeof(ipv6_spec->hdr.src_addr)); - hdr_field[idx].size = sizeof(ipv6_spec->hdr.dst_addr); - memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.dst_addr, - sizeof(ipv6_spec->hdr.dst_addr)); - } else { - idx += BNXT_ULP_PROTO_HDR_IPV6_NUM; + size = sizeof(ipv6_spec->hdr.vtc_flow); + field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], + &ipv6_spec->hdr.vtc_flow, + size); + size = sizeof(ipv6_spec->hdr.payload_len); + field = ulp_rte_parser_fld_copy(field, + &ipv6_spec->hdr.payload_len, + size); + size = sizeof(ipv6_spec->hdr.proto); + field = ulp_rte_parser_fld_copy(field, + &ipv6_spec->hdr.proto, + size); + size = sizeof(ipv6_spec->hdr.hop_limits); + field = ulp_rte_parser_fld_copy(field, + &ipv6_spec->hdr.hop_limits, + size); + size = sizeof(ipv6_spec->hdr.src_addr); + field = ulp_rte_parser_fld_copy(field, + &ipv6_spec->hdr.src_addr, + size); + size = sizeof(ipv6_spec->hdr.dst_addr); + field = ulp_rte_parser_fld_copy(field, + &ipv6_spec->hdr.dst_addr, + size); } - if (ipv6_mask) { - memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.vtc_flow, - sizeof(ipv6_mask->hdr.vtc_flow)); - memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.payload_len, - sizeof(ipv6_mask->hdr.payload_len)); - memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.proto, - sizeof(ipv6_mask->hdr.proto)); - memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.hop_limits, - sizeof(ipv6_mask->hdr.hop_limits)); - memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.src_addr, - sizeof(ipv6_mask->hdr.src_addr)); - memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.dst_addr, - sizeof(ipv6_mask->hdr.dst_addr)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv6_mask->hdr.vtc_flow, + sizeof(ipv6_mask->hdr.vtc_flow)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv6_mask->hdr.payload_len, + sizeof(ipv6_mask->hdr.payload_len)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv6_mask->hdr.proto, + sizeof(ipv6_mask->hdr.proto)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv6_mask->hdr.hop_limits, + sizeof(ipv6_mask->hdr.hop_limits)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv6_mask->hdr.src_addr, + sizeof(ipv6_mask->hdr.src_addr)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv6_mask->hdr.dst_addr, + sizeof(ipv6_mask->hdr.dst_addr)); } - *field_idx = idx; /* add number of ipv6 header elements */ + /* add number of ipv6 header elements */ + params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM; /* Set the ipv6 header bitmap and computed l3 header bitmaps */ if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) || @@ -654,17 +655,14 @@ ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, /* Function to handle the parsing of RTE Flow item UDP Header. */ int32_t ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx __rte_unused) + struct ulp_rte_parser_params *params) { - const struct rte_flow_item_udp *udp_spec, *udp_mask; - uint32_t idx = *field_idx; - uint32_t mdx = *field_idx; - - udp_spec = item->spec; - udp_mask = item->mask; + const struct rte_flow_item_udp *udp_spec = item->spec; + const struct rte_flow_item_udp *udp_mask = item->mask; + struct ulp_rte_hdr_field *field; + struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = params->field_idx; + uint32_t size; if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) { BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); @@ -676,33 +674,40 @@ ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, * header fields */ if (udp_spec) { - hdr_field[idx].size = sizeof(udp_spec->hdr.src_port); - memcpy(hdr_field[idx++].spec, &udp_spec->hdr.src_port, - sizeof(udp_spec->hdr.src_port)); - hdr_field[idx].size = sizeof(udp_spec->hdr.dst_port); - memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dst_port, - sizeof(udp_spec->hdr.dst_port)); - hdr_field[idx].size = sizeof(udp_spec->hdr.dgram_len); - memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dgram_len, - sizeof(udp_spec->hdr.dgram_len)); - hdr_field[idx].size = sizeof(udp_spec->hdr.dgram_cksum); - memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dgram_cksum, - sizeof(udp_spec->hdr.dgram_cksum)); - } else { - idx += BNXT_ULP_PROTO_HDR_UDP_NUM; + size = sizeof(udp_spec->hdr.src_port); + field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], + &udp_spec->hdr.src_port, + size); + size = sizeof(udp_spec->hdr.dst_port); + field = ulp_rte_parser_fld_copy(field, + &udp_spec->hdr.dst_port, + size); + size = sizeof(udp_spec->hdr.dgram_len); + field = ulp_rte_parser_fld_copy(field, + &udp_spec->hdr.dgram_len, + size); + size = sizeof(udp_spec->hdr.dgram_cksum); + field = ulp_rte_parser_fld_copy(field, + &udp_spec->hdr.dgram_cksum, + size); } - if (udp_mask) { - memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.src_port, - sizeof(udp_mask->hdr.src_port)); - memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dst_port, - sizeof(udp_mask->hdr.dst_port)); - memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dgram_len, - sizeof(udp_mask->hdr.dgram_len)); - memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dgram_cksum, - sizeof(udp_mask->hdr.dgram_cksum)); + ulp_rte_prsr_mask_copy(params, &idx, + &udp_mask->hdr.src_port, + sizeof(udp_mask->hdr.src_port)); + ulp_rte_prsr_mask_copy(params, &idx, + &udp_mask->hdr.dst_port, + sizeof(udp_mask->hdr.dst_port)); + ulp_rte_prsr_mask_copy(params, &idx, + &udp_mask->hdr.dgram_len, + sizeof(udp_mask->hdr.dgram_len)); + ulp_rte_prsr_mask_copy(params, &idx, + &udp_mask->hdr.dgram_cksum, + sizeof(udp_mask->hdr.dgram_cksum)); } - *field_idx = idx; /* Add number of UDP header elements */ + + /* Add number of UDP header elements */ + params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM; /* Set the udp header bitmap and computed l4 header bitmaps */ if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) || @@ -720,17 +725,14 @@ ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, /* Function to handle the parsing of RTE Flow item TCP Header. */ int32_t ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx __rte_unused) + struct ulp_rte_parser_params *params) { - const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; - uint32_t idx = *field_idx; - uint32_t mdx = *field_idx; - - tcp_spec = item->spec; - tcp_mask = item->mask; + const struct rte_flow_item_tcp *tcp_spec = item->spec; + const struct rte_flow_item_tcp *tcp_mask = item->mask; + struct ulp_rte_hdr_field *field; + struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = params->field_idx; + uint32_t size; if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) { BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n"); @@ -742,58 +744,77 @@ ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, * header fields */ if (tcp_spec) { - hdr_field[idx].size = sizeof(tcp_spec->hdr.src_port); - memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.src_port, - sizeof(tcp_spec->hdr.src_port)); - hdr_field[idx].size = sizeof(tcp_spec->hdr.dst_port); - memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.dst_port, - sizeof(tcp_spec->hdr.dst_port)); - hdr_field[idx].size = sizeof(tcp_spec->hdr.sent_seq); - memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.sent_seq, - sizeof(tcp_spec->hdr.sent_seq)); - hdr_field[idx].size = sizeof(tcp_spec->hdr.recv_ack); - memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.recv_ack, - sizeof(tcp_spec->hdr.recv_ack)); - hdr_field[idx].size = sizeof(tcp_spec->hdr.data_off); - memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.data_off, - sizeof(tcp_spec->hdr.data_off)); - hdr_field[idx].size = sizeof(tcp_spec->hdr.tcp_flags); - memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.tcp_flags, - sizeof(tcp_spec->hdr.tcp_flags)); - hdr_field[idx].size = sizeof(tcp_spec->hdr.rx_win); - memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.rx_win, - sizeof(tcp_spec->hdr.rx_win)); - hdr_field[idx].size = sizeof(tcp_spec->hdr.cksum); - memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.cksum, - sizeof(tcp_spec->hdr.cksum)); - hdr_field[idx].size = sizeof(tcp_spec->hdr.tcp_urp); - memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.tcp_urp, - sizeof(tcp_spec->hdr.tcp_urp)); + size = sizeof(tcp_spec->hdr.src_port); + field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], + &tcp_spec->hdr.src_port, + size); + size = sizeof(tcp_spec->hdr.dst_port); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.dst_port, + size); + size = sizeof(tcp_spec->hdr.sent_seq); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.sent_seq, + size); + size = sizeof(tcp_spec->hdr.recv_ack); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.recv_ack, + size); + size = sizeof(tcp_spec->hdr.data_off); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.data_off, + size); + size = sizeof(tcp_spec->hdr.tcp_flags); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.tcp_flags, + size); + size = sizeof(tcp_spec->hdr.rx_win); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.rx_win, + size); + size = sizeof(tcp_spec->hdr.cksum); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.cksum, + size); + size = sizeof(tcp_spec->hdr.tcp_urp); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.tcp_urp, + size); } else { idx += BNXT_ULP_PROTO_HDR_TCP_NUM; } if (tcp_mask) { - memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.src_port, - sizeof(tcp_mask->hdr.src_port)); - memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.dst_port, - sizeof(tcp_mask->hdr.dst_port)); - memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.sent_seq, - sizeof(tcp_mask->hdr.sent_seq)); - memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.recv_ack, - sizeof(tcp_mask->hdr.recv_ack)); - memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.data_off, - sizeof(tcp_mask->hdr.data_off)); - memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.tcp_flags, - sizeof(tcp_mask->hdr.tcp_flags)); - memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.rx_win, - sizeof(tcp_mask->hdr.rx_win)); - memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.cksum, - sizeof(tcp_mask->hdr.cksum)); - memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.tcp_urp, - sizeof(tcp_mask->hdr.tcp_urp)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.src_port, + sizeof(tcp_mask->hdr.src_port)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.dst_port, + sizeof(tcp_mask->hdr.dst_port)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.sent_seq, + sizeof(tcp_mask->hdr.sent_seq)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.recv_ack, + sizeof(tcp_mask->hdr.recv_ack)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.data_off, + sizeof(tcp_mask->hdr.data_off)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.tcp_flags, + sizeof(tcp_mask->hdr.tcp_flags)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.rx_win, + sizeof(tcp_mask->hdr.rx_win)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.cksum, + sizeof(tcp_mask->hdr.cksum)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.tcp_urp, + sizeof(tcp_mask->hdr.tcp_urp)); } - *field_idx = idx; /* add number of TCP header elements */ + /* add number of TCP header elements */ + params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM; /* Set the udp header bitmap and computed l4 header bitmaps */ if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) || @@ -811,63 +832,63 @@ ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, /* Function to handle the parsing of RTE Flow item Vxlan Header. */ int32_t ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdrbitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx __rte_unused) + struct ulp_rte_parser_params *params) { - const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask; - uint32_t idx = *field_idx; - uint32_t mdx = *field_idx; - - vxlan_spec = item->spec; - vxlan_mask = item->mask; + const struct rte_flow_item_vxlan *vxlan_spec = item->spec; + const struct rte_flow_item_vxlan *vxlan_mask = item->mask; + struct ulp_rte_hdr_field *field; + struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = params->field_idx; + uint32_t size; /* * Copy the rte_flow_item for vxlan into hdr_field using vxlan * header fields */ if (vxlan_spec) { - hdr_field[idx].size = sizeof(vxlan_spec->flags); - memcpy(hdr_field[idx++].spec, &vxlan_spec->flags, - sizeof(vxlan_spec->flags)); - hdr_field[idx].size = sizeof(vxlan_spec->rsvd0); - memcpy(hdr_field[idx++].spec, &vxlan_spec->rsvd0, - sizeof(vxlan_spec->rsvd0)); - hdr_field[idx].size = sizeof(vxlan_spec->vni); - memcpy(hdr_field[idx++].spec, &vxlan_spec->vni, - sizeof(vxlan_spec->vni)); - hdr_field[idx].size = sizeof(vxlan_spec->rsvd1); - memcpy(hdr_field[idx++].spec, &vxlan_spec->rsvd1, - sizeof(vxlan_spec->rsvd1)); - } else { - idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM; + size = sizeof(vxlan_spec->flags); + field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], + &vxlan_spec->flags, + size); + size = sizeof(vxlan_spec->rsvd0); + field = ulp_rte_parser_fld_copy(field, + &vxlan_spec->rsvd0, + size); + size = sizeof(vxlan_spec->vni); + field = ulp_rte_parser_fld_copy(field, + &vxlan_spec->vni, + size); + size = sizeof(vxlan_spec->rsvd1); + field = ulp_rte_parser_fld_copy(field, + &vxlan_spec->rsvd1, + size); } - if (vxlan_mask) { - memcpy(hdr_field[mdx++].mask, &vxlan_mask->flags, - sizeof(vxlan_mask->flags)); - memcpy(hdr_field[mdx++].mask, &vxlan_mask->rsvd0, - sizeof(vxlan_mask->rsvd0)); - memcpy(hdr_field[mdx++].mask, &vxlan_mask->vni, - sizeof(vxlan_mask->vni)); - memcpy(hdr_field[mdx++].mask, &vxlan_mask->rsvd1, - sizeof(vxlan_mask->rsvd1)); + ulp_rte_prsr_mask_copy(params, &idx, + &vxlan_mask->flags, + sizeof(vxlan_mask->flags)); + ulp_rte_prsr_mask_copy(params, &idx, + &vxlan_mask->rsvd0, + sizeof(vxlan_mask->rsvd0)); + ulp_rte_prsr_mask_copy(params, &idx, + &vxlan_mask->vni, + sizeof(vxlan_mask->vni)); + ulp_rte_prsr_mask_copy(params, &idx, + &vxlan_mask->rsvd1, + sizeof(vxlan_mask->rsvd1)); } - *field_idx = idx; /* Add number of vxlan header elements */ + /* Add number of vxlan header elements */ + params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM; /* Update the hdr_bitmap with vxlan */ - ULP_BITMAP_SET(hdrbitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); return BNXT_TF_RC_SUCCESS; } /* Function to handle the parsing of RTE Flow item void Header */ int32_t ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused, - struct ulp_rte_hdr_bitmap *hdr_bit __rte_unused, - struct ulp_rte_hdr_field *hdr_field __rte_unused, - uint32_t *field_idx __rte_unused, - uint32_t *vlan_idx __rte_unused) + struct ulp_rte_parser_params *params __rte_unused) { return BNXT_TF_RC_SUCCESS; } @@ -875,8 +896,7 @@ ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused, /* Function to handle the parsing of RTE Flow action void Header. */ int32_t ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused, - struct ulp_rte_act_bitmap *act __rte_unused, - struct ulp_rte_act_prop *act_prop __rte_unused) + struct ulp_rte_parser_params *params __rte_unused) { return BNXT_TF_RC_SUCCESS; } @@ -884,16 +904,16 @@ ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused, /* Function to handle the parsing of RTE Flow action Mark Header. */ int32_t ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop) + struct ulp_rte_parser_params *param) { const struct rte_flow_action_mark *mark; - uint32_t mark_id = 0; + struct ulp_rte_act_bitmap *act = ¶m->act_bitmap; + uint32_t mark_id; mark = action_item->conf; if (mark) { mark_id = tfp_cpu_to_be_32(mark->id); - memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_MARK], + memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK], &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK); /* Update the hdr_bitmap with vxlan */ @@ -907,15 +927,13 @@ ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, /* Function to handle the parsing of RTE Flow action RSS Header. */ int32_t ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop __rte_unused) + struct ulp_rte_parser_params *param) { - const struct rte_flow_action_rss *rss; + const struct rte_flow_action_rss *rss = action_item->conf; - rss = action_item->conf; if (rss) { /* Update the hdr_bitmap with vxlan */ - ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_RSS); + ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS); return BNXT_TF_RC_SUCCESS; } BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n"); @@ -925,8 +943,7 @@ ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ int32_t ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *ap) + struct ulp_rte_parser_params *params) { const struct rte_flow_action_vxlan_encap *vxlan_encap; const struct rte_flow_item *item; @@ -939,8 +956,10 @@ ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, uint32_t vxlan_size = 0; uint8_t *buff; /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */ - const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00, + const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11}; + struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; + struct ulp_rte_act_prop *ap = ¶ms->act_prop; vxlan_encap = action_item->conf; if (!vxlan_encap) { @@ -1115,33 +1134,32 @@ ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, int32_t ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item __rte_unused, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop __rte_unused) + struct ulp_rte_parser_params *params) { /* update the hdr_bitmap with vxlan */ - ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_DECAP); + ULP_BITMAP_SET(params->act_bitmap.bits, + BNXT_ULP_ACTION_BIT_VXLAN_DECAP); return BNXT_TF_RC_SUCCESS; } /* Function to handle the parsing of RTE Flow action drop Header. */ int32_t ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop __rte_unused) + struct ulp_rte_parser_params *params) { /* Update the hdr_bitmap with drop */ - ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_DROP); + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP); return BNXT_TF_RC_SUCCESS; } /* Function to handle the parsing of RTE Flow action count. */ int32_t ulp_rte_count_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop __rte_unused) + struct ulp_rte_parser_params *params) { const struct rte_flow_action_count *act_count; + struct ulp_rte_act_prop *act_prop = ¶ms->act_prop; act_count = action_item->conf; if (act_count) { @@ -1156,29 +1174,28 @@ ulp_rte_count_act_handler(const struct rte_flow_action *action_item, } /* Update the hdr_bitmap with count */ - ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_COUNT); + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT); return BNXT_TF_RC_SUCCESS; } /* Function to handle the parsing of RTE Flow action PF. */ int32_t ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop) + struct ulp_rte_parser_params *param) { uint8_t *svif_buf; uint8_t *vnic_buffer; uint32_t svif; /* Update the hdr_bitmap with vnic bit */ - ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC); + ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); /* copy the PF of the current device into VNIC Property */ - svif_buf = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC]; + svif_buf = ¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC]; ulp_util_field_int_read(svif_buf, &svif); svif = (uint32_t)bnxt_get_vnic_id(svif); svif = htonl(svif); - vnic_buffer = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC]; + vnic_buffer = ¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC]; ulp_util_field_int_write(vnic_buffer, svif); return BNXT_TF_RC_SUCCESS; @@ -1187,8 +1204,7 @@ ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, /* Function to handle the parsing of RTE Flow action VF. */ int32_t ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop) + struct ulp_rte_parser_params *param) { const struct rte_flow_action_vf *vf_action; @@ -1200,21 +1216,20 @@ ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, return BNXT_TF_RC_PARSE_ERR; } /* TBD: Update the computed VNIC using VF conversion */ - memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], + memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], &vf_action->id, BNXT_ULP_ACT_PROP_SZ_VNIC); } /* Update the hdr_bitmap with count */ - ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC); + ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); return BNXT_TF_RC_SUCCESS; } /* Function to handle the parsing of RTE Flow action port_id. */ int32_t ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop) + struct ulp_rte_parser_params *param) { const struct rte_flow_action_port_id *port_id; @@ -1226,21 +1241,20 @@ ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item, return BNXT_TF_RC_PARSE_ERR; } /* TBD: Update the computed VNIC using port conversion */ - memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], + memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], &port_id->id, BNXT_ULP_ACT_PROP_SZ_VNIC); } /* Update the hdr_bitmap with count */ - ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC); + ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); return BNXT_TF_RC_SUCCESS; } /* Function to handle the parsing of RTE Flow action phy_port. */ int32_t ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop) + struct ulp_rte_parser_params *prm) { const struct rte_flow_action_phy_port *phy_port; @@ -1251,12 +1265,12 @@ ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, "Parse Err:Port Original not supported\n"); return BNXT_TF_RC_PARSE_ERR; } - memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], + memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], &phy_port->index, BNXT_ULP_ACT_PROP_SZ_VPORT); } /* Update the hdr_bitmap with count */ - ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VPORT); + ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT); return BNXT_TF_RC_SUCCESS; } diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h index 0d571bbba..4cc9dcc4e 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h +++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h @@ -22,8 +22,7 @@ /* Function to handle the parsing of the RTE port id. */ int32_t -ulp_rte_parser_svif_process(struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field); +ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params); /* * Function to handle the parsing of RTE Flows and placing @@ -31,178 +30,129 @@ ulp_rte_parser_svif_process(struct ulp_rte_hdr_bitmap *hdr_bitmap, */ int32_t bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field); + struct ulp_rte_parser_params *params); /* * Function to handle the parsing of RTE Flows and placing * the RTE flow actions into the ulp structures. */ int32_t -bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], - struct ulp_rte_act_bitmap *act_bitmap, - struct ulp_rte_act_prop *act_prop); +bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow item PF Header. */ int32_t -ulp_rte_pf_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx); +ulp_rte_pf_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow item VF Header. */ int32_t -ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx); +ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow item port id Header. */ int32_t -ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx); +ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow item port Header. */ int32_t -ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx); +ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); /* Function to handle the RTE item Ethernet Header. */ int32_t -ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx); +ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow item Vlan Header. */ int32_t -ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx); +ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow item IPV4 Header. */ int32_t -ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx); +ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow item IPV6 Header. */ int32_t -ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx); +ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow item UDP Header. */ int32_t -ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx); +ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow item TCP Header. */ int32_t -ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx); +ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow item Vxlan Header. */ int32_t -ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdrbitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx); +ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow item void Header. */ int32_t -ulp_rte_void_hdr_handler(const struct rte_flow_item *item, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx); +ulp_rte_void_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow action void Header. */ int32_t -ulp_rte_void_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop); +ulp_rte_void_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow action RSS Header. */ int32_t -ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop); +ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow action Mark Header. */ int32_t -ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop); +ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ int32_t -ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop); +ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ int32_t -ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop); +ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow action drop Header. */ int32_t -ulp_rte_drop_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop); +ulp_rte_drop_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow action count. */ int32_t -ulp_rte_count_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop); +ulp_rte_count_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow action PF. */ int32_t -ulp_rte_pf_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop); +ulp_rte_pf_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow action VF. */ int32_t -ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop); +ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow action port_id. */ int32_t -ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_p); +ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item, + struct ulp_rte_parser_params *params); /* Function to handle the parsing of RTE Flow action phy_port. */ int32_t -ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop); +ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); #endif /* _ULP_RTE_PARSER_H_ */ diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h index 47c0dd852..8adbf7a24 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h +++ b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h @@ -41,15 +41,32 @@ struct ulp_rte_hdr_field { uint32_t size; }; +struct ulp_rte_act_bitmap { + uint64_t bits; +}; + +/* Structure to hold the action property details. */ +struct ulp_rte_act_prop { + uint8_t act_details[BNXT_ULP_ACT_PROP_IDX_LAST]; +}; + +/* Structure to be used for passing all the parser functions */ +struct ulp_rte_parser_params { + struct ulp_rte_hdr_bitmap hdr_bitmap; + struct ulp_rte_hdr_field hdr_field[BNXT_ULP_PROTO_HDR_MAX]; + uint32_t field_idx; + uint32_t vlan_idx; + struct ulp_rte_act_bitmap act_bitmap; + struct ulp_rte_act_prop act_prop; + uint32_t dir; +}; + /* Flow Parser Header Information Structure */ struct bnxt_ulp_rte_hdr_info { enum bnxt_ulp_hdr_type hdr_type; /* Flow Parser Protocol Header Function Prototype */ int (*proto_hdr_func)(const struct rte_flow_item *item_list, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - uint32_t *field_idx, - uint32_t *vlan_idx); + struct ulp_rte_parser_params *params); }; /* Flow Parser Header Information Structure Array defined in template source*/ @@ -60,26 +77,13 @@ struct bnxt_ulp_matcher_field_info { enum bnxt_ulp_fmf_spec spec_opcode; }; -struct ulp_rte_act_bitmap { - uint64_t bits; -}; - -/* - * Structure to hold the action property details. - * It is a array of 128 bytes. - */ -struct ulp_rte_act_prop { - uint8_t act_details[BNXT_ULP_ACT_PROP_IDX_LAST]; -}; - /* Flow Parser Action Information Structure */ struct bnxt_ulp_rte_act_info { enum bnxt_ulp_act_type act_type; /* Flow Parser Protocol Action Function Prototype */ int32_t (*proto_act_func) - (const struct rte_flow_action *action_item, - struct ulp_rte_act_bitmap *act_bitmap, - struct ulp_rte_act_prop *act_prop); + (const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); }; /* Flow Parser Action Information Structure Array defined in template source*/ From patchwork Fri Apr 17 16:19:14 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 68792 X-Patchwork-Delegate: ajit.khaparde@broadcom.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4A1A9A0597; Fri, 17 Apr 2020 18:20:27 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 616561EA32; Fri, 17 Apr 2020 18:19:37 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id 84D101E9CF for ; Fri, 17 Apr 2020 18:19:27 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id D353D30C1E3; Fri, 17 Apr 2020 09:08:06 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com D353D30C1E3 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1587139686; bh=6M9c+e0TuvP2gMfcz/Gjel93WvHqNlmieEWZ4bTBiIY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=DY2AWEdD0cN5YTJjOfBN0gdvqPJNC2WYOcv1bxaxLvO7HWFKhXHO10M9RV4OpTP6T 686UzYiYO0EkpXolhcNeYOY0LTUEfejC1T16Sw1I5sGLRF2OwQDZ4a06v0M67S9k7/ 7YaMX1/2KQSgAC9jrcggjueNxgjmFonGsmeGhzLY= Received: from localhost.localdomain (unknown [10.230.185.215]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id 0D3B314008C; Fri, 17 Apr 2020 09:19:26 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: Mike Baucom , Venkat Duvvuru Date: Fri, 17 Apr 2020 09:19:14 -0700 Message-Id: <20200417161920.85858-7-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.21.1 (Apple Git-122.3) In-Reply-To: <20200417161920.85858-1-ajit.khaparde@broadcom.com> References: <1586962156-11179-1-git-send-email-venkatkumar.duvvuru@broadcom.com> <20200417161920.85858-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 06/12] net/bnxt: aggregate ulp mapper create arguments X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Mike Baucom The changes are to the ulp mapper flow_create, the API changed to take the bnxt_ulp_mapper_create_parms structure instead of individual fields. Reviewed-by: Venkat Duvvuru Reviewed-by: Ajit Khaparde Signed-off-by: Mike Baucom Signed-off-by: Venkat Duvvuru --- drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c | 19 +++++++------- drivers/net/bnxt/tf_ulp/ulp_mapper.c | 33 +++++++++++-------------- drivers/net/bnxt/tf_ulp/ulp_mapper.h | 20 +++++++++------ 3 files changed, 37 insertions(+), 35 deletions(-) diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c index 026f33f66..9326401b4 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c @@ -67,11 +67,11 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { + struct bnxt_ulp_mapper_create_parms mapper_cparms = { 0 }; struct ulp_rte_parser_params params; struct bnxt_ulp_context *ulp_ctx = NULL; uint32_t class_id, act_tmpl; struct rte_flow *flow_id; - uint32_t app_priority; uint32_t fid; uint8_t *buffer; uint32_t vnic; @@ -125,16 +125,17 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, if (ret != BNXT_TF_RC_SUCCESS) goto parse_error; - app_priority = attr->priority; + mapper_cparms.app_priority = attr->priority; + mapper_cparms.hdr_bitmap = ¶ms.hdr_bitmap; + mapper_cparms.hdr_field = params.hdr_field; + mapper_cparms.act = ¶ms.act_bitmap; + mapper_cparms.act_prop = ¶ms.act_prop; + mapper_cparms.class_tid = class_id; + mapper_cparms.act_tid = act_tmpl; + /* call the ulp mapper to create the flow in the hardware */ ret = ulp_mapper_flow_create(ulp_ctx, - app_priority, - ¶ms.hdr_bitmap, - params.hdr_field, - ¶ms.act_bitmap, - ¶ms.act_prop, - class_id, - act_tmpl, + &mapper_cparms, &fid); if (!ret) { flow_id = (struct rte_flow *)((uintptr_t)fid); diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c index f787c6e4d..f70afa47f 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c +++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c @@ -1411,26 +1411,23 @@ ulp_mapper_flow_destroy(struct bnxt_ulp_context *ulp_ctx, uint32_t fid) */ int32_t ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, - uint32_t app_priority __rte_unused, - struct ulp_rte_hdr_bitmap *hdr_bitmap __rte_unused, - struct ulp_rte_hdr_field *hdr_field, - struct ulp_rte_act_bitmap *act_bitmap, - struct ulp_rte_act_prop *act_prop, - uint32_t class_tid, - uint32_t act_tid, - uint32_t *flow_id) + struct bnxt_ulp_mapper_create_parms *cparms, + uint32_t *flowid) { - struct ulp_regfile regfile; - struct bnxt_ulp_mapper_parms parms; - struct bnxt_ulp_device_params *device_params; - int32_t rc, trc; + struct bnxt_ulp_device_params *device_params; + struct bnxt_ulp_mapper_parms parms; + struct ulp_regfile regfile; + int32_t rc, trc; + + if (!ulp_ctx || !cparms) + return -EINVAL; /* Initialize the parms structure */ memset(&parms, 0, sizeof(parms)); - parms.act_prop = act_prop; - parms.act_bitmap = act_bitmap; + parms.act_prop = cparms->act_prop; + parms.act_bitmap = cparms->act; parms.regfile = ®file; - parms.hdr_field = hdr_field; + parms.hdr_field = cparms->hdr_field; parms.tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx); parms.ulp_ctx = ulp_ctx; @@ -1441,7 +1438,7 @@ ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, } /* Get the action table entry from device id and act context id */ - parms.act_tid = act_tid; + parms.act_tid = cparms->act_tid; parms.atbls = ulp_mapper_action_tbl_list_get(parms.dev_id, parms.act_tid, &parms.num_atbls); @@ -1452,7 +1449,7 @@ ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, } /* Get the class table entry from device id and act context id */ - parms.class_tid = class_tid; + parms.class_tid = cparms->class_tid; parms.ctbls = ulp_mapper_class_tbl_list_get(parms.dev_id, parms.class_tid, &parms.num_ctbls); @@ -1506,7 +1503,7 @@ ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, goto flow_error; } - *flow_id = parms.fid; + *flowid = parms.fid; return rc; diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.h b/drivers/net/bnxt/tf_ulp/ulp_mapper.h index 5f3d46eff..24727a32d 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_mapper.h +++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.h @@ -38,20 +38,24 @@ struct bnxt_ulp_mapper_parms { enum bnxt_ulp_flow_db_tables tbl_idx; }; +struct bnxt_ulp_mapper_create_parms { + uint32_t app_priority; + struct ulp_rte_hdr_bitmap *hdr_bitmap; + struct ulp_rte_hdr_field *hdr_field; + struct ulp_rte_act_bitmap *act; + struct ulp_rte_act_prop *act_prop; + uint32_t class_tid; + uint32_t act_tid; +}; + /* * Function to handle the mapping of the Flow to be compatible * with the underlying hardware. */ int32_t ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, - uint32_t app_priority, - struct ulp_rte_hdr_bitmap *hdr_bitmap, - struct ulp_rte_hdr_field *hdr_field, - struct ulp_rte_act_bitmap *act, - struct ulp_rte_act_prop *act_prop, - uint32_t class_tid, - uint32_t act_tid, - uint32_t *flow_id); + struct bnxt_ulp_mapper_create_parms *parms, + uint32_t *flowid); /* Function that frees all resources associated with the flow. */ int32_t From patchwork Fri Apr 17 16:19:15 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 68796 X-Patchwork-Delegate: ajit.khaparde@broadcom.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 36114A0597; Fri, 17 Apr 2020 18:21:18 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E70491EA69; Fri, 17 Apr 2020 18:19:42 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id 36CBD1E9E1 for ; Fri, 17 Apr 2020 18:19:28 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id 835C730C1CD; Fri, 17 Apr 2020 09:08:07 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com 835C730C1CD DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1587139687; bh=3LGvzVTOWjxtw1YIkPpPCVIKHrrqDQg3YeegoRXtlJg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=e2G8NEnJ/PZOJ+l18/+6Rbh8C5fYZizfzOWLtAreiJZtHoCc7vIMVPEyQRgnAXyOF Az+D0eQ37BVGTKwo57CZdFVtg+alfjd1kRclYcVPL4Ft+xYEZdwsIqy4OMxX8nSiNu B+lNgI7uMRtBx2oE11yNx3F8J3uxUxCEckzA6AY4= Received: from localhost.localdomain (unknown [10.230.185.215]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id 57EA514008D; Fri, 17 Apr 2020 09:19:26 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: Kishore Padmanabha , Venkat Duvvuru Date: Fri, 17 Apr 2020 09:19:15 -0700 Message-Id: <20200417161920.85858-8-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.21.1 (Apple Git-122.3) In-Reply-To: <20200417161920.85858-1-ajit.khaparde@broadcom.com> References: <1586962156-11179-1-git-send-email-venkatkumar.duvvuru@broadcom.com> <20200417161920.85858-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 07/12] net/bnxt: use hashing for flow template match X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Kishore Padmanabha Currently, all the flow templates are sequentially searched to find out whether there is a matching template for the incoming RTE_FLOW offload request. As sequential search will have performance concerns, this patch will address it by using hash algorithm to find out the flow template. This change resulted in creation of computed fields to remove the fields that do not participate in the hash calculations. The field bitmap is created for this purpose. Reviewed-by: Venkat Duvvuru Reviewed-by: Ajit Khaparde Signed-off-by: Kishore Padmanabha Signed-off-by: Venkat Duvvuru --- drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c | 23 +- drivers/net/bnxt/tf_ulp/ulp_mapper.c | 4 +- drivers/net/bnxt/tf_ulp/ulp_matcher.c | 214 +++++------- drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 282 ++++++++-------- drivers/net/bnxt/tf_ulp/ulp_rte_parser.h | 4 + drivers/net/bnxt/tf_ulp/ulp_template_db.c | 307 +++++++----------- drivers/net/bnxt/tf_ulp/ulp_template_db.h | 280 ++++++++++------ .../net/bnxt/tf_ulp/ulp_template_field_db.h | 171 +++------- drivers/net/bnxt/tf_ulp/ulp_template_struct.h | 50 ++- drivers/net/bnxt/tf_ulp/ulp_utils.c | 33 ++ drivers/net/bnxt/tf_ulp/ulp_utils.h | 17 +- 11 files changed, 686 insertions(+), 699 deletions(-) diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c index 9326401b4..7f7aa24e6 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c @@ -61,11 +61,11 @@ bnxt_ulp_flow_validate_args(const struct rte_flow_attr *attr, /* Function to create the rte flow. */ static struct rte_flow * -bnxt_ulp_flow_create(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +bnxt_ulp_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { struct bnxt_ulp_mapper_create_parms mapper_cparms = { 0 }; struct ulp_rte_parser_params params; @@ -73,8 +73,6 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, uint32_t class_id, act_tmpl; struct rte_flow *flow_id; uint32_t fid; - uint8_t *buffer; - uint32_t vnic; int ret; if (bnxt_ulp_flow_validate_args(attr, @@ -97,14 +95,9 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, params.dir = ULP_DIR_EGRESS; /* copy the device port id and direction for further processing */ - buffer = params.hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; - rte_memcpy(buffer, &dev->data->port_id, sizeof(uint16_t)); - - /* Set the implicit vnic in the action property */ - vnic = (uint32_t)bnxt_get_vnic_id(dev->data->port_id); - vnic = htonl(vnic); - rte_memcpy(¶ms.act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], - &vnic, BNXT_ULP_ACT_PROP_SZ_VNIC); + ULP_UTIL_CHF_IDX_WR(¶ms, BNXT_ULP_CHF_IDX_INCOMING_IF, + dev->data->port_id); + ULP_UTIL_CHF_IDX_WR(¶ms, BNXT_ULP_CHF_IDX_DIRECTION, params.dir); /* Parse the rte flow pattern */ ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms); diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c index f70afa47f..a0aba403f 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c +++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c @@ -357,7 +357,7 @@ ulp_mapper_ident_process(struct bnxt_ulp_mapper_parms *parms, (void)tf_free_identifier(tfp, &free_parms); BNXT_TF_DBG(ERR, "Ident process failed for %s:%s\n", - ident->name, + ident->description, (tbl->direction == TF_DIR_RX) ? "RX" : "TX"); return rc; } @@ -405,7 +405,7 @@ ulp_mapper_result_field_process(struct bnxt_ulp_mapper_parms *parms, return -EINVAL; } break; - case BNXT_ULP_RESULT_OPC_SET_TO_ACT_PROP_SZ: + case BNXT_ULP_RESULT_OPC_SET_TO_ENCAP_ACT_PROP_SZ: if (!ulp_operand_read(fld->result_operand, (uint8_t *)&idx, sizeof(uint16_t))) { BNXT_TF_DBG(ERR, "%s operand read failed\n", name); diff --git a/drivers/net/bnxt/tf_ulp/ulp_matcher.c b/drivers/net/bnxt/tf_ulp/ulp_matcher.c index e04bfa094..e5f23ef27 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_matcher.c +++ b/drivers/net/bnxt/tf_ulp/ulp_matcher.c @@ -6,40 +6,32 @@ #include "ulp_matcher.h" #include "ulp_utils.h" -/* Utility function to check if bitmap is zero */ -static inline -int ulp_field_mask_is_zero(uint8_t *bitmap, uint32_t size) +/* Utility function to calculate the class matcher hash */ +static uint32_t +ulp_matcher_class_hash_calculate(uint64_t hi_sig, uint64_t lo_sig) { - while (size-- > 0) { - if (*bitmap != 0) - return 0; - bitmap++; - } - return 1; -} + uint64_t hash; -/* Utility function to check if bitmap is all ones */ -static inline int -ulp_field_mask_is_ones(uint8_t *bitmap, uint32_t size) -{ - while (size-- > 0) { - if (*bitmap != 0xFF) - return 0; - bitmap++; - } - return 1; + hi_sig |= ((hi_sig % BNXT_ULP_CLASS_HID_HIGH_PRIME) << + BNXT_ULP_CLASS_HID_SHFTL); + lo_sig |= ((lo_sig % BNXT_ULP_CLASS_HID_LOW_PRIME) << + (BNXT_ULP_CLASS_HID_SHFTL + 2)); + hash = hi_sig ^ lo_sig; + hash = (hash >> BNXT_ULP_CLASS_HID_SHFTR) & BNXT_ULP_CLASS_HID_MASK; + return (uint32_t)hash; } -/* Utility function to check if bitmap is non zero */ -static inline int -ulp_field_mask_notzero(uint8_t *bitmap, uint32_t size) +/* Utility function to calculate the action matcher hash */ +static uint32_t +ulp_matcher_action_hash_calculate(uint64_t hi_sig) { - while (size-- > 0) { - if (*bitmap != 0) - return 1; - bitmap++; - } - return 0; + uint64_t hash; + + hi_sig |= ((hi_sig % BNXT_ULP_ACT_HID_HIGH_PRIME) << + BNXT_ULP_ACT_HID_SHFTL); + hash = hi_sig; + hash = (hash >> BNXT_ULP_ACT_HID_SHFTR) & BNXT_ULP_ACT_HID_MASK; + return (uint32_t)hash; } /* Utility function to mask the computed and internal proto headers. */ @@ -56,10 +48,6 @@ ulp_matcher_hdr_fields_normalize(struct ulp_rte_hdr_bitmap *hdr1, ULP_BITMAP_RESET(hdr2->bits, BNXT_ULP_HDR_BIT_OI_VLAN); ULP_BITMAP_RESET(hdr2->bits, BNXT_ULP_HDR_BIT_IO_VLAN); ULP_BITMAP_RESET(hdr2->bits, BNXT_ULP_HDR_BIT_II_VLAN); - ULP_BITMAP_RESET(hdr2->bits, BNXT_ULP_HDR_BIT_O_L3); - ULP_BITMAP_RESET(hdr2->bits, BNXT_ULP_HDR_BIT_O_L4); - ULP_BITMAP_RESET(hdr2->bits, BNXT_ULP_HDR_BIT_I_L3); - ULP_BITMAP_RESET(hdr2->bits, BNXT_ULP_HDR_BIT_I_L4); } /* @@ -70,82 +58,55 @@ int32_t ulp_matcher_pattern_match(struct ulp_rte_parser_params *params, uint32_t *class_id) { - struct bnxt_ulp_header_match_info *sel_hdr_match; - uint32_t hdr_num, idx, jdx; - uint32_t match = 0; - struct ulp_rte_hdr_bitmap hdr_bitmap_masked; - uint32_t start_idx; - struct ulp_rte_hdr_field *m_field; - struct bnxt_ulp_matcher_field_info *sf; - struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; - struct ulp_rte_act_bitmap *act_bitmap = ¶ms->act_bitmap; - struct ulp_rte_hdr_field *hdr_field = params->hdr_field; - - /* Select the ingress or egress template to match against */ - if (params->dir == ULP_DIR_INGRESS) { - sel_hdr_match = ulp_ingress_hdr_match_list; - hdr_num = BNXT_ULP_INGRESS_HDR_MATCH_SZ; + struct ulp_rte_hdr_bitmap hdr_bitmap_masked; + struct bnxt_ulp_class_match_info *class_match; + uint32_t class_hid; + uint8_t vf_to_vf; + uint16_t tmpl_id; + + /* Remove the hdr bit maps that are internal or computed */ + ulp_matcher_hdr_fields_normalize(¶ms->hdr_bitmap, + &hdr_bitmap_masked); + + /* determine vf to vf flow */ + if (params->dir == ULP_DIR_EGRESS && + ULP_BITMAP_ISSET(params->act_bitmap.bits, + BNXT_ULP_ACTION_BIT_VNIC)) { + vf_to_vf = 1; } else { - sel_hdr_match = ulp_egress_hdr_match_list; - hdr_num = BNXT_ULP_EGRESS_HDR_MATCH_SZ; + vf_to_vf = 0; } - /* Remove the hdr bit maps that are internal or computed */ - ulp_matcher_hdr_fields_normalize(hdr_bitmap, &hdr_bitmap_masked); - - /* Loop through the list of class templates to find the match */ - for (idx = 0; idx < hdr_num; idx++, sel_hdr_match++) { - if (ULP_BITSET_CMP(&sel_hdr_match->hdr_bitmap, - &hdr_bitmap_masked)) { - /* no match found */ - BNXT_TF_DBG(DEBUG, "Pattern Match failed template=%d\n", - idx); - continue; - } - match = ULP_BITMAP_ISSET(act_bitmap->bits, - BNXT_ULP_ACTION_BIT_VNIC); - if (match != sel_hdr_match->act_vnic) { - /* no match found */ - BNXT_TF_DBG(DEBUG, "Vnic Match failed template=%d\n", - idx); - continue; - } else { - match = 1; - } - - /* Found a matching hdr bitmap, match the fields next */ - start_idx = sel_hdr_match->start_idx; - for (jdx = 0; jdx < sel_hdr_match->num_entries; jdx++) { - m_field = &hdr_field[jdx + BNXT_ULP_HDR_FIELD_LAST - 1]; - sf = &ulp_field_match[start_idx + jdx]; - switch (sf->mask_opcode) { - case BNXT_ULP_FMF_MASK_ANY: - match &= ulp_field_mask_is_zero(m_field->mask, - m_field->size); - break; - case BNXT_ULP_FMF_MASK_EXACT: - match &= ulp_field_mask_is_ones(m_field->mask, - m_field->size); - break; - case BNXT_ULP_FMF_MASK_WILDCARD: - match &= ulp_field_mask_notzero(m_field->mask, - m_field->size); - break; - case BNXT_ULP_FMF_MASK_IGNORE: - default: - break; - } - if (!match) - break; - } - if (match) { - BNXT_TF_DBG(DEBUG, - "Found matching pattern template %d\n", - sel_hdr_match->class_tmpl_id); - *class_id = sel_hdr_match->class_tmpl_id; - return BNXT_TF_RC_SUCCESS; - } + /* calculate the hash of the given flow */ + class_hid = ulp_matcher_class_hash_calculate(hdr_bitmap_masked.bits, + params->fld_bitmap.bits); + + /* validate the calculate hash values */ + if (class_hid >= BNXT_ULP_CLASS_SIG_TBL_MAX_SZ) + goto error; + tmpl_id = ulp_class_sig_tbl[class_hid]; + if (!tmpl_id) + goto error; + + class_match = &ulp_class_match_list[tmpl_id]; + if (ULP_BITMAP_CMP(&hdr_bitmap_masked, &class_match->hdr_sig)) { + BNXT_TF_DBG(DEBUG, "Proto Header does not match\n"); + goto error; + } + if (ULP_BITMAP_CMP(¶ms->fld_bitmap, &class_match->field_sig)) { + BNXT_TF_DBG(DEBUG, "Field signature does not match\n"); + goto error; } + if (vf_to_vf != class_match->act_vnic) { + BNXT_TF_DBG(DEBUG, "Vnic Match failed\n"); + goto error; + } + BNXT_TF_DBG(DEBUG, "Found matching pattern template %d\n", + class_match->class_tid); + *class_id = class_match->class_tid; + return BNXT_TF_RC_SUCCESS; + +error: BNXT_TF_DBG(DEBUG, "Did not find any matching template\n"); *class_id = 0; return BNXT_TF_RC_ERROR; @@ -159,29 +120,30 @@ int32_t ulp_matcher_action_match(struct ulp_rte_parser_params *params, uint32_t *act_id) { - struct bnxt_ulp_action_match_info *sel_act_match; - uint32_t act_num, idx; - struct ulp_rte_act_bitmap *act_bitmap = ¶ms->act_bitmap; - - /* Select the ingress or egress template to match against */ - if (params->dir == ULP_DIR_INGRESS) { - sel_act_match = ulp_ingress_act_match_list; - act_num = BNXT_ULP_INGRESS_ACT_MATCH_SZ; - } else { - sel_act_match = ulp_egress_act_match_list; - act_num = BNXT_ULP_EGRESS_ACT_MATCH_SZ; - } + uint32_t act_hid; + uint16_t tmpl_id; + struct bnxt_ulp_act_match_info *act_match; + + /* calculate the hash of the given flow action */ + act_hid = ulp_matcher_action_hash_calculate(params->act_bitmap.bits); - /* Loop through the list of action templates to find the match */ - for (idx = 0; idx < act_num; idx++, sel_act_match++) { - if (!ULP_BITSET_CMP(&sel_act_match->act_bitmap, - act_bitmap)) { - *act_id = sel_act_match->act_tmpl_id; - BNXT_TF_DBG(DEBUG, "Found matching act template %u\n", - *act_id); - return BNXT_TF_RC_SUCCESS; - } + /* validate the calculate hash values */ + if (act_hid >= BNXT_ULP_ACT_SIG_TBL_MAX_SZ) + goto error; + tmpl_id = ulp_act_sig_tbl[act_hid]; + if (!tmpl_id) + goto error; + + act_match = &ulp_act_match_list[tmpl_id]; + if (ULP_BITMAP_CMP(¶ms->act_bitmap, &act_match->act_sig)) { + BNXT_TF_DBG(DEBUG, "Action Header does not match\n"); + goto error; } + *act_id = act_match->act_tid; + BNXT_TF_DBG(DEBUG, "Found matching action template %u\n", *act_id); + return BNXT_TF_RC_SUCCESS; + +error: BNXT_TF_DBG(DEBUG, "Did not find any matching action template\n"); *act_id = 0; return BNXT_TF_RC_ERROR; diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c index 2980e03b4..873f86494 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c +++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c @@ -11,25 +11,6 @@ #include "ulp_utils.h" #include "tfp.h" -/* Inline Func to read integer that is stored in big endian format */ -static inline void ulp_util_field_int_read(uint8_t *buffer, - uint32_t *val) -{ - uint32_t temp_val; - - memcpy(&temp_val, buffer, sizeof(uint32_t)); - *val = rte_be_to_cpu_32(temp_val); -} - -/* Inline Func to write integer that is stored in big endian format */ -static inline void ulp_util_field_int_write(uint8_t *buffer, - uint32_t val) -{ - uint32_t temp_val = rte_cpu_to_be_32(val); - - memcpy(buffer, &temp_val, sizeof(uint32_t)); -} - /* Utility function to skip the void items. */ static inline int32_t ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment) @@ -45,6 +26,25 @@ ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment) return 0; } +/* Utility function to update the field_bitmap */ +static void +ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params, + uint32_t idx) +{ + struct ulp_rte_hdr_field *field; + + field = ¶ms->hdr_field[idx]; + if (ulp_bitmap_notzero(field->mask, field->size)) { + ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); + /* Not exact match */ + if (!ulp_bitmap_is_ones(field->mask, field->size)) + ULP_BITMAP_SET(params->fld_bitmap.bits, + BNXT_ULP_MATCH_TYPE_BITMASK_WM); + } else { + ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); + } +} + /* Utility function to copy field spec items */ static struct ulp_rte_hdr_field * ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field, @@ -64,9 +64,10 @@ ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params, const void *buffer, uint32_t size) { - struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx]; + struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx]; memcpy(field->mask, buffer, size); + ulp_rte_parser_field_bitmap_update(params, *idx); *idx = *idx + 1; } @@ -81,7 +82,11 @@ bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], const struct rte_flow_item *item = pattern; struct bnxt_ulp_rte_hdr_info *hdr_info; - params->field_idx = BNXT_ULP_HDR_FIELD_LAST; + params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; + if (params->dir == ULP_DIR_EGRESS) + ULP_BITMAP_SET(params->hdr_bitmap.bits, + BNXT_ULP_FLOW_DIR_BITMASK_EGR); + /* Parse all the items in the pattern */ while (item && item->type != RTE_FLOW_ITEM_TYPE_END) { /* get the header information from the flow_hdr_info table */ @@ -141,6 +146,8 @@ bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], } action_item++; } + /* update the implied VNIC */ + ulp_rte_parser_vnic_process(params); return BNXT_TF_RC_SUCCESS; } @@ -153,69 +160,78 @@ ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, { uint16_t port_id = svif; uint32_t dir = 0; + struct ulp_rte_hdr_field *hdr_field; if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF)) { BNXT_TF_DBG(ERR, - "SVIF already set," - " multiple sources not supported\n"); + "SVIF already set,multiple source not support'd\n"); return BNXT_TF_RC_ERROR; } - /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF. */ + /*update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF */ ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF); if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) { - /* perform the conversion from dpdk port to svif */ - dir = params->dir; + dir = ULP_UTIL_CHF_IDX_RD(params, + BNXT_ULP_CHF_IDX_DIRECTION); + /* perform the conversion from dpdk port to bnxt svif */ if (dir == ULP_DIR_EGRESS) svif = bnxt_get_svif(port_id, true); else svif = bnxt_get_svif(port_id, false); } - - memcpy(params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec, - &svif, sizeof(svif)); - memcpy(params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].mask, - &mask, sizeof(mask)); - params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].size = sizeof(svif); + hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; + memcpy(hdr_field->spec, &svif, sizeof(svif)); + memcpy(hdr_field->mask, &mask, sizeof(mask)); + hdr_field->size = sizeof(svif); return BNXT_TF_RC_SUCCESS; } -/* Function to handle the parsing of the RTE port id - */ +/* Function to handle the parsing of the RTE port id */ int32_t ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params) { uint16_t port_id = 0; - uint8_t *buffer; uint16_t svif_mask = 0xFFFF; if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF)) return BNXT_TF_RC_SUCCESS; - /* SVIF not set. So get the port id and direction */ - buffer = params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; - memcpy(&port_id, buffer, sizeof(port_id)); - memset(buffer, 0, RTE_PARSER_FLOW_HDR_FIELD_SIZE); + /* SVIF not set. So get the port id */ + port_id = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF); - return ulp_rte_parser_svif_set(params, - RTE_FLOW_ITEM_TYPE_PORT_ID, + /* Update the SVIF details */ + return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID, port_id, svif_mask); } +/* Function to handle the implicit VNIC RTE port id */ +int32_t +ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params) +{ + struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; + + if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) || + ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT)) + return BNXT_TF_RC_SUCCESS; + + /* Update the vnic details */ + ulp_rte_pf_act_handler(NULL, params); + return BNXT_TF_RC_SUCCESS; +} + /* Function to handle the parsing of RTE Flow item PF Header. */ int32_t ulp_rte_pf_hdr_handler(const struct rte_flow_item *item, struct ulp_rte_parser_params *params) { uint16_t port_id = 0; - uint8_t *buffer; uint16_t svif_mask = 0xFFFF; - buffer = params->hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec; - memcpy(&port_id, buffer, sizeof(port_id)); - memset(buffer, 0, RTE_PARSER_FLOW_HDR_FIELD_SIZE); + /* Get the port id */ + port_id = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF); + /* Update the SVIF details */ return ulp_rte_parser_svif_set(params, item->type, port_id, svif_mask); @@ -345,15 +361,11 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, const struct rte_flow_item_vlan *vlan_spec = item->spec; const struct rte_flow_item_vlan *vlan_mask = item->mask; struct ulp_rte_hdr_field *field; - struct ulp_rte_hdr_bitmap *hdr_bitmap; + struct ulp_rte_hdr_bitmap *hdr_bit; uint32_t idx = params->vlan_idx; uint16_t vlan_tag, priority; - uint32_t outer_vtag_num = 0, inner_vtag_num = 0; - uint8_t *outer_tag_buff; - uint8_t *inner_tag_buff; - - outer_tag_buff = params->hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].spec; - inner_tag_buff = params->hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].spec; + uint32_t outer_vtag_num; + uint32_t inner_vtag_num; /* * Copy the rte_flow_item for vlan into hdr_field using Vlan @@ -393,64 +405,53 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM; /* Get the outer tag and inner tag counts */ - ulp_util_field_int_read(outer_tag_buff, &outer_vtag_num); - ulp_util_field_int_read(inner_tag_buff, &inner_vtag_num); + outer_vtag_num = ULP_UTIL_CHF_IDX_RD(params, + BNXT_ULP_CHF_IDX_O_VTAG_NUM); + inner_vtag_num = ULP_UTIL_CHF_IDX_RD(params, + BNXT_ULP_CHF_IDX_I_VTAG_NUM); /* Update the hdr_bitmap of the vlans */ - hdr_bitmap = ¶ms->hdr_bitmap; - if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) && - !ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN)) { + hdr_bit = ¶ms->hdr_bitmap; + if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN)) { /* Set the outer vlan bit and update the vlan tag num */ - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN); + ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN); outer_vtag_num++; - ulp_util_field_int_write(outer_tag_buff, outer_vtag_num); - params->hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size = - sizeof(uint32_t); - } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) && - ULP_BITMAP_ISSET(hdr_bitmap->bits, - BNXT_ULP_HDR_BIT_OO_VLAN) && - !ULP_BITMAP_ISSET(hdr_bitmap->bits, - BNXT_ULP_HDR_BIT_OI_VLAN)) { + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_NUM, + outer_vtag_num); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_PRESENT, 1); + } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) && + !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN)) { /* Set the outer vlan bit and update the vlan tag num */ - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OI_VLAN); + ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN); outer_vtag_num++; - ulp_util_field_int_write(outer_tag_buff, outer_vtag_num); - params->hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size = - sizeof(uint32_t); - } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits, - BNXT_ULP_HDR_BIT_O_ETH) && - ULP_BITMAP_ISSET(hdr_bitmap->bits, - BNXT_ULP_HDR_BIT_OO_VLAN) && - ULP_BITMAP_ISSET(hdr_bitmap->bits, - BNXT_ULP_HDR_BIT_OI_VLAN) && - ULP_BITMAP_ISSET(hdr_bitmap->bits, - BNXT_ULP_HDR_BIT_I_ETH) && - !ULP_BITMAP_ISSET(hdr_bitmap->bits, - BNXT_ULP_HDR_BIT_IO_VLAN)) { + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_NUM, + outer_vtag_num); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_TWO_VTAGS, 1); + } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && + !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN)) { /* Set the inner vlan bit and update the vlan tag num */ - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_IO_VLAN); + ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN); inner_vtag_num++; - ulp_util_field_int_write(inner_tag_buff, inner_vtag_num); - params->hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size = - sizeof(uint32_t); - } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits, - BNXT_ULP_HDR_BIT_O_ETH) && - ULP_BITMAP_ISSET(hdr_bitmap->bits, - BNXT_ULP_HDR_BIT_OO_VLAN) && - ULP_BITMAP_ISSET(hdr_bitmap->bits, - BNXT_ULP_HDR_BIT_OI_VLAN) && - ULP_BITMAP_ISSET(hdr_bitmap->bits, - BNXT_ULP_HDR_BIT_I_ETH) && - ULP_BITMAP_ISSET(hdr_bitmap->bits, - BNXT_ULP_HDR_BIT_IO_VLAN) && - !ULP_BITMAP_ISSET(hdr_bitmap->bits, - BNXT_ULP_HDR_BIT_II_VLAN)) { + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_NUM, + inner_vtag_num); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_PRESENT, 1); + } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN) && + !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_II_VLAN)) { /* Set the inner vlan bit and update the vlan tag num */ - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_II_VLAN); + ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_II_VLAN); inner_vtag_num++; - ulp_util_field_int_write(inner_tag_buff, inner_vtag_num); - params->hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size = - sizeof(uint32_t); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_NUM, + inner_vtag_num); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_TWO_VTAGS, 1); } else { BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n"); return BNXT_TF_RC_ERROR; @@ -469,8 +470,10 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; uint32_t idx = params->field_idx; uint32_t size; + uint32_t inner_l3, outer_l3; - if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) { + inner_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L3); + if (inner_l3) { BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n"); return BNXT_TF_RC_ERROR; } @@ -557,14 +560,17 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM; /* Set the ipv4 header bitmap and computed l3 header bitmaps */ - if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) || + outer_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L3); + if (outer_l3 || ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3); + inner_l3++; + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L3, inner_l3); } else { ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3); + outer_l3++; + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L3, outer_l3); } return BNXT_TF_RC_SUCCESS; } @@ -580,8 +586,10 @@ ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; uint32_t idx = params->field_idx; uint32_t size; + uint32_t inner_l3, outer_l3; - if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) { + inner_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L3); + if (inner_l3) { BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n"); return BNXT_TF_RC_ERROR; } @@ -640,14 +648,15 @@ ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM; /* Set the ipv6 header bitmap and computed l3 header bitmaps */ - if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) || + outer_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L3); + if (outer_l3 || ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L3, 1); } else { ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L3, 1); } return BNXT_TF_RC_SUCCESS; } @@ -663,8 +672,10 @@ ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; uint32_t idx = params->field_idx; uint32_t size; + uint32_t inner_l4, outer_l4; - if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) { + inner_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L4); + if (inner_l4) { BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); return BNXT_TF_RC_ERROR; } @@ -710,14 +721,15 @@ ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM; /* Set the udp header bitmap and computed l4 header bitmaps */ - if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) || + outer_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L4); + if (outer_l4 || ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) { ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP); - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L4, 1); } else { ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP); - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L4, 1); } return BNXT_TF_RC_SUCCESS; } @@ -733,8 +745,10 @@ ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; uint32_t idx = params->field_idx; uint32_t size; + uint32_t inner_l4, outer_l4; - if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) { + inner_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L4); + if (inner_l4) { BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n"); return BNXT_TF_RC_ERROR; } @@ -817,14 +831,15 @@ ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM; /* Set the udp header bitmap and computed l4 header bitmaps */ - if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) || + outer_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L4); + if (outer_l4 || ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) { ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP); - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L4, 1); } else { ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP); - ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L4, 1); } return BNXT_TF_RC_SUCCESS; } @@ -1181,22 +1196,19 @@ ulp_rte_count_act_handler(const struct rte_flow_action *action_item, /* Function to handle the parsing of RTE Flow action PF. */ int32_t ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, - struct ulp_rte_parser_params *param) + struct ulp_rte_parser_params *params) { - uint8_t *svif_buf; - uint8_t *vnic_buffer; uint32_t svif; /* Update the hdr_bitmap with vnic bit */ - ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); /* copy the PF of the current device into VNIC Property */ - svif_buf = ¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC]; - ulp_util_field_int_read(svif_buf, &svif); - svif = (uint32_t)bnxt_get_vnic_id(svif); - svif = htonl(svif); - vnic_buffer = ¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC]; - ulp_util_field_int_write(vnic_buffer, svif); + svif = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF); + svif = bnxt_get_vnic_id(svif); + svif = rte_cpu_to_be_32(svif); + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], + &svif, BNXT_ULP_ACT_PROP_SZ_VNIC); return BNXT_TF_RC_SUCCESS; } @@ -1207,6 +1219,7 @@ ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, struct ulp_rte_parser_params *param) { const struct rte_flow_action_vf *vf_action; + uint32_t pid; vf_action = action_item->conf; if (vf_action) { @@ -1216,9 +1229,10 @@ ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, return BNXT_TF_RC_PARSE_ERR; } /* TBD: Update the computed VNIC using VF conversion */ + pid = bnxt_get_vnic_id(vf_action->id); + pid = rte_cpu_to_be_32(pid); memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], - &vf_action->id, - BNXT_ULP_ACT_PROP_SZ_VNIC); + &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); } /* Update the hdr_bitmap with count */ @@ -1232,6 +1246,7 @@ ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item, struct ulp_rte_parser_params *param) { const struct rte_flow_action_port_id *port_id; + uint32_t pid; port_id = act_item->conf; if (port_id) { @@ -1241,9 +1256,10 @@ ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item, return BNXT_TF_RC_PARSE_ERR; } /* TBD: Update the computed VNIC using port conversion */ + pid = bnxt_get_vnic_id(port_id->id); + pid = rte_cpu_to_be_32(pid); memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], - &port_id->id, - BNXT_ULP_ACT_PROP_SZ_VNIC); + &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); } /* Update the hdr_bitmap with count */ @@ -1257,6 +1273,7 @@ ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, struct ulp_rte_parser_params *prm) { const struct rte_flow_action_phy_port *phy_port; + uint32_t pid; phy_port = action_item->conf; if (phy_port) { @@ -1265,9 +1282,10 @@ ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, "Parse Err:Port Original not supported\n"); return BNXT_TF_RC_PARSE_ERR; } + pid = bnxt_get_vnic_id(phy_port->index); + pid = rte_cpu_to_be_32(pid); memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], - &phy_port->index, - BNXT_ULP_ACT_PROP_SZ_VPORT); + &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); } /* Update the hdr_bitmap with count */ diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h index 4cc9dcc4e..cbc8a43de 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h +++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h @@ -24,6 +24,10 @@ int32_t ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params); +/* Function to handle the implicit VNIC RTE port id */ +int32_t +ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params); + /* * Function to handle the parsing of RTE Flows and placing * the RTE flow items into the ulp structures. diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db.c b/drivers/net/bnxt/tf_ulp/ulp_template_db.c index 411f1e3df..25a558a4e 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_template_db.c +++ b/drivers/net/bnxt/tf_ulp/ulp_template_db.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2014-2019 Broadcom + * Copyright(c) 2014-2020 Broadcom * All rights reserved. */ @@ -30,6 +30,8 @@ uint32_t ulp_act_prop_map_table[] = { BNXT_ULP_ACT_PROP_SZ_MPLS_POP_NUM, [BNXT_ULP_ACT_PROP_IDX_MPLS_PUSH_NUM] = BNXT_ULP_ACT_PROP_SZ_MPLS_PUSH_NUM, + [BNXT_ULP_ACT_PROP_IDX_PORT_ID] = + BNXT_ULP_ACT_PROP_SZ_PORT_ID, [BNXT_ULP_ACT_PROP_IDX_VNIC] = BNXT_ULP_ACT_PROP_SZ_VNIC, [BNXT_ULP_ACT_PROP_IDX_VPORT] = @@ -505,8 +507,55 @@ struct bnxt_ulp_rte_hdr_info ulp_hdr_info[] = { } }; +uint32_t bnxt_ulp_encap_vtag_map[] = { + [0] = BNXT_ULP_ENCAP_VTAG_ENCODING_NOP, + [1] = BNXT_ULP_ENCAP_VTAG_ENCODING_STAG_ECAP_PRI, + [2] = BNXT_ULP_ENCAP_VTAG_ENCODING_DTAG_ECAP_PRI +}; + +uint16_t ulp_class_sig_tbl[BNXT_ULP_CLASS_SIG_TBL_MAX_SZ] = { + [BNXT_ULP_CLASS_HID_0092] = 1 +}; + +struct bnxt_ulp_class_match_info ulp_class_match_list[] = { + [1] = { + .class_hid = BNXT_ULP_CLASS_HID_0092, + .hdr_sig = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_sig = { .bits = + BNXT_ULP_HF0_BITMASK_O_IPV4_SRC_ADDR | + BNXT_ULP_HF0_BITMASK_O_IPV4_DST_ADDR | + BNXT_ULP_HF0_BITMASK_O_UDP_SRC_PORT | + BNXT_ULP_HF0_BITMASK_O_UDP_DST_PORT | + BNXT_ULP_MATCH_TYPE_BITMASK_EM }, + .class_tid = 0, + .act_vnic = 0, + .wc_pri = 0 + } +}; + +uint16_t ulp_act_sig_tbl[BNXT_ULP_ACT_SIG_TBL_MAX_SZ] = { + [BNXT_ULP_ACT_HID_0029] = 1 +}; + +struct bnxt_ulp_act_match_info ulp_act_match_list[] = { + [1] = { + .act_hid = BNXT_ULP_ACT_HID_0029, + .act_sig = { .bits = + BNXT_ULP_ACTION_BIT_MARK | + BNXT_ULP_ACTION_BIT_RSS | + BNXT_ULP_ACTION_BIT_VNIC | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .act_tid = 0 + } +}; + struct bnxt_ulp_mapper_tbl_list_info ulp_class_tmpl_list[] = { - [((0 << BNXT_ULP_LOG2_MAX_NUM_DEV) | BNXT_ULP_DEVICE_ID_WH_PLUS)] = { + [((0 << BNXT_ULP_LOG2_MAX_NUM_DEV) | + BNXT_ULP_DEVICE_ID_WH_PLUS)] = { .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, .num_tbls = 3, .start_tbl_idx = 0 @@ -528,7 +577,7 @@ struct bnxt_ulp_mapper_class_tbl_info ulp_class_tbl_list[] = { .result_bit_size = 64, .result_num_fields = 13, .ident_start_idx = 0, - .ident_nums = 1, + .ident_nums = 2, .mark_enable = BNXT_ULP_MARK_ENABLE_NO, .critical_resource = 0, .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_NOT_USED @@ -546,7 +595,7 @@ struct bnxt_ulp_mapper_class_tbl_info ulp_class_tbl_list[] = { .result_start_idx = 13, .result_bit_size = 38, .result_num_fields = 8, - .ident_start_idx = 1, + .ident_start_idx = 2, .ident_nums = 1, .mark_enable = BNXT_ULP_MARK_ENABLE_NO, .critical_resource = 0, @@ -560,12 +609,11 @@ struct bnxt_ulp_mapper_class_tbl_info ulp_class_tbl_list[] = { .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO, .key_start_idx = 55, .blob_key_bit_size = 448, - .key_bit_size = 197, + .key_bit_size = 448, .key_num_fields = 11, .result_start_idx = 21, .result_bit_size = 64, .result_num_fields = 9, - .ident_start_idx = 2, .ident_nums = 0, .mark_enable = BNXT_ULP_MARK_ENABLE_YES, .critical_resource = 1, @@ -595,24 +643,22 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { { .field_bit_size = 48, .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, - .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD, - .spec_operand = {(BNXT_ULP_HF0_O_ETH_DMAC >> 8) & 0xff, - BNXT_ULP_HF0_O_ETH_DMAC & 0xff, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, { .field_bit_size = 8, .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_HDR_FIELD, - .mask_operand = {(BNXT_ULP_HF0_SVIF_INDEX >> 8) & 0xff, - BNXT_ULP_HF0_SVIF_INDEX & 0xff, + .mask_operand = {(BNXT_ULP_HF0_IDX_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_HF0_IDX_SVIF_INDEX & 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD, - .spec_operand = {(BNXT_ULP_HF0_SVIF_INDEX >> 8) & 0xff, - BNXT_ULP_HF0_SVIF_INDEX & 0xff, + .spec_operand = {(BNXT_ULP_HF0_IDX_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_HF0_IDX_SVIF_INDEX & 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, @@ -859,8 +905,9 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, - .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + .spec_operand = {BNXT_ULP_SYM_TUN_HDR_TYPE_NONE, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, { .field_bit_size = 1, @@ -895,8 +942,9 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, - .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + .spec_operand = {BNXT_ULP_SYM_TL4_HDR_TYPE_UDP, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, { .field_bit_size = 1, @@ -949,14 +997,15 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, - .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + .spec_operand = {BNXT_ULP_SYM_TL3_HDR_TYPE_IPV4, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, { .field_bit_size = 1, .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, - .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} @@ -967,7 +1016,7 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, - .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + .spec_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, { @@ -1003,8 +1052,9 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, - .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + .spec_operand = {BNXT_ULP_SYM_TL2_HDR_TYPE_DIX, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, { .field_bit_size = 1, @@ -1012,7 +1062,7 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, - .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + .spec_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, { @@ -1038,9 +1088,11 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, - .spec_operand = {0x40, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_REGFILE, + .spec_operand = {(BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_0 & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, { .field_bit_size = 1, @@ -1103,8 +1155,8 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD, - .spec_operand = {(BNXT_ULP_HF0_O_UDP_DST_PORT >> 8) & 0xff, - BNXT_ULP_HF0_O_UDP_DST_PORT & 0xff, + .spec_operand = {(BNXT_ULP_HF0_IDX_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_HF0_IDX_O_UDP_DST_PORT & 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, @@ -1114,8 +1166,8 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD, - .spec_operand = {(BNXT_ULP_HF0_O_UDP_SRC_PORT >> 8) & 0xff, - BNXT_ULP_HF0_O_UDP_SRC_PORT & 0xff, + .spec_operand = {(BNXT_ULP_HF0_IDX_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_HF0_IDX_O_UDP_SRC_PORT & 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, @@ -1135,8 +1187,8 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD, - .spec_operand = {(BNXT_ULP_HF0_O_IPV4_DST_ADDR >> 8) & 0xff, - BNXT_ULP_HF0_O_IPV4_DST_ADDR & 0xff, + .spec_operand = {(BNXT_ULP_HF0_IDX_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_HF0_IDX_O_IPV4_DST_ADDR & 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, @@ -1146,8 +1198,8 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD, - .spec_operand = {(BNXT_ULP_HF0_O_IPV4_SRC_ADDR >> 8) & 0xff, - BNXT_ULP_HF0_O_IPV4_SRC_ADDR & 0xff, + .spec_operand = {(BNXT_ULP_HF0_IDX_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_HF0_IDX_O_IPV4_SRC_ADDR & 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, @@ -1156,11 +1208,9 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD, - .spec_operand = {(BNXT_ULP_HF0_O_ETH_SMAC >> 8) & 0xff, - BNXT_ULP_HF0_O_ETH_SMAC & 0xff, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, { .field_bit_size = 24, @@ -1187,124 +1237,11 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, - .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - } -}; - -struct bnxt_ulp_header_match_info ulp_ingress_hdr_match_list[] = { - { - .hdr_bitmap = { .bits = - BNXT_ULP_HDR_BIT_O_ETH | - BNXT_ULP_HDR_BIT_O_IPV4 | - BNXT_ULP_HDR_BIT_O_UDP }, - .start_idx = 0, - .num_entries = 24, - .class_tmpl_id = 0, - .act_vnic = 0 - } -}; - -struct bnxt_ulp_header_match_info ulp_egress_hdr_match_list[] = { -}; - -struct bnxt_ulp_matcher_field_info ulp_field_match[] = { - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_EXACT, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_EXACT, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_ANY, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_ANY, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_EXACT, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_EXACT, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_EXACT, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_EXACT, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE - }, - { - .mask_opcode = BNXT_ULP_FMF_MASK_IGNORE, - .spec_opcode = BNXT_ULP_FMF_SPEC_IGNORE + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_REGFILE, + .spec_operand = {(BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} } }; @@ -1319,9 +1256,11 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = { }, { .field_bit_size = 7, - .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, - .result_operand = {0x40, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_REGFILE, + .result_operand = {(BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_0 & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, { .field_bit_size = 1, @@ -1410,8 +1349,8 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = { { .field_bit_size = 10, .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, - .result_operand = {(0x00fd >> 8) & 0xff, - 0x00fd & 0xff, + .result_operand = {(0x00f9 >> 8) & 0xff, + 0x00f9 & 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, @@ -1423,9 +1362,11 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = { }, { .field_bit_size = 8, - .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, - .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_REGFILE, + .result_operand = {(BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, { .field_bit_size = 1, @@ -1462,7 +1403,7 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = { { .field_bit_size = 5, .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, - .result_operand = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + .result_operand = {0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, { @@ -1500,6 +1441,13 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = { }; struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = { + { + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_PROF_FUNC, + .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_0, + .ident_bit_size = 7, + .ident_bit_pos = 47 + }, { .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, .ident_type = TF_IDENT_TYPE_L2_CTXT, @@ -1516,20 +1464,9 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = { } }; -struct bnxt_ulp_action_match_info ulp_ingress_act_match_list[] = { - { - .act_bitmap = { .bits = - BNXT_ULP_ACTION_BIT_MARK | - BNXT_ULP_ACTION_BIT_RSS }, - .act_tmpl_id = 0 - } -}; - -struct bnxt_ulp_action_match_info ulp_egress_act_match_list[] = { -}; - struct bnxt_ulp_mapper_tbl_list_info ulp_act_tmpl_list[] = { - [((0 << BNXT_ULP_LOG2_MAX_NUM_DEV) | BNXT_ULP_DEVICE_ID_WH_PLUS)] = { + [((0 << BNXT_ULP_LOG2_MAX_NUM_DEV) | + BNXT_ULP_DEVICE_ID_WH_PLUS)] = { .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, .num_tbls = 1, .start_tbl_idx = 0 diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db.h b/drivers/net/bnxt/tf_ulp/ulp_template_db.h index dfab266d6..94d425315 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_template_db.h +++ b/drivers/net/bnxt/tf_ulp/ulp_template_db.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2014-2019 Broadcom + * Copyright(c) 2014-2020 Broadcom * All rights reserved. */ @@ -11,12 +11,23 @@ #ifndef ULP_TEMPLATE_DB_H_ #define ULP_TEMPLATE_DB_H_ +#define BNXT_ULP_REGFILE_MAX_SZ 15 #define BNXT_ULP_MAX_NUM_DEVICES 4 #define BNXT_ULP_LOG2_MAX_NUM_DEV 2 -#define BNXT_ULP_INGRESS_HDR_MATCH_SZ 2 -#define BNXT_ULP_EGRESS_HDR_MATCH_SZ 1 -#define BNXT_ULP_INGRESS_ACT_MATCH_SZ 2 -#define BNXT_ULP_EGRESS_ACT_MATCH_SZ 1 +#define BNXT_ULP_CLASS_SIG_TBL_MAX_SZ 256 +#define BNXT_ULP_CLASS_MATCH_LIST_MAX_SZ 2 +#define BNXT_ULP_CLASS_HID_LOW_PRIME 7919 +#define BNXT_ULP_CLASS_HID_HIGH_PRIME 7919 +#define BNXT_ULP_CLASS_HID_SHFTR 0 +#define BNXT_ULP_CLASS_HID_SHFTL 23 +#define BNXT_ULP_CLASS_HID_MASK 255 +#define BNXT_ULP_ACT_SIG_TBL_MAX_SZ 256 +#define BNXT_ULP_ACT_MATCH_LIST_MAX_SZ 2 +#define BNXT_ULP_ACT_HID_LOW_PRIME 7919 +#define BNXT_ULP_ACT_HID_HIGH_PRIME 7919 +#define BNXT_ULP_ACT_HID_SHFTR 0 +#define BNXT_ULP_ACT_HID_SHFTL 23 +#define BNXT_ULP_ACT_HID_MASK 255 enum bnxt_ulp_action_bit { BNXT_ULP_ACTION_BIT_MARK = 0x0000000000000001, @@ -54,24 +65,20 @@ enum bnxt_ulp_hdr_bit { BNXT_ULP_HDR_BIT_O_ETH = 0x0000000000000002, BNXT_ULP_HDR_BIT_OO_VLAN = 0x0000000000000004, BNXT_ULP_HDR_BIT_OI_VLAN = 0x0000000000000008, - BNXT_ULP_HDR_BIT_O_L3 = 0x0000000000000010, - BNXT_ULP_HDR_BIT_O_IPV4 = 0x0000000000000020, - BNXT_ULP_HDR_BIT_O_IPV6 = 0x0000000000000040, - BNXT_ULP_HDR_BIT_O_L4 = 0x0000000000000080, - BNXT_ULP_HDR_BIT_O_TCP = 0x0000000000000100, - BNXT_ULP_HDR_BIT_O_UDP = 0x0000000000000200, - BNXT_ULP_HDR_BIT_T_VXLAN = 0x0000000000000400, - BNXT_ULP_HDR_BIT_T_GRE = 0x0000000000000800, - BNXT_ULP_HDR_BIT_I_ETH = 0x0000000000001000, - BNXT_ULP_HDR_BIT_IO_VLAN = 0x0000000000002000, - BNXT_ULP_HDR_BIT_II_VLAN = 0x0000000000004000, - BNXT_ULP_HDR_BIT_I_L3 = 0x0000000000008000, - BNXT_ULP_HDR_BIT_I_IPV4 = 0x0000000000010000, - BNXT_ULP_HDR_BIT_I_IPV6 = 0x0000000000020000, - BNXT_ULP_HDR_BIT_I_L4 = 0x0000000000040000, - BNXT_ULP_HDR_BIT_I_TCP = 0x0000000000080000, - BNXT_ULP_HDR_BIT_I_UDP = 0x0000000000100000, - BNXT_ULP_HDR_BIT_LAST = 0x0000000000200000 + BNXT_ULP_HDR_BIT_O_IPV4 = 0x0000000000000010, + BNXT_ULP_HDR_BIT_O_IPV6 = 0x0000000000000020, + BNXT_ULP_HDR_BIT_O_TCP = 0x0000000000000040, + BNXT_ULP_HDR_BIT_O_UDP = 0x0000000000000080, + BNXT_ULP_HDR_BIT_T_VXLAN = 0x0000000000000100, + BNXT_ULP_HDR_BIT_T_GRE = 0x0000000000000200, + BNXT_ULP_HDR_BIT_I_ETH = 0x0000000000000400, + BNXT_ULP_HDR_BIT_IO_VLAN = 0x0000000000000800, + BNXT_ULP_HDR_BIT_II_VLAN = 0x0000000000001000, + BNXT_ULP_HDR_BIT_I_IPV4 = 0x0000000000002000, + BNXT_ULP_HDR_BIT_I_IPV6 = 0x0000000000004000, + BNXT_ULP_HDR_BIT_I_TCP = 0x0000000000008000, + BNXT_ULP_HDR_BIT_I_UDP = 0x0000000000010000, + BNXT_ULP_HDR_BIT_LAST = 0x0000000000020000 }; enum bnxt_ulp_act_type { @@ -82,30 +89,42 @@ enum bnxt_ulp_act_type { }; enum bnxt_ulp_byte_order { - BNXT_ULP_BYTE_ORDER_BE, - BNXT_ULP_BYTE_ORDER_LE, - BNXT_ULP_BYTE_ORDER_LAST + BNXT_ULP_BYTE_ORDER_BE = 0, + BNXT_ULP_BYTE_ORDER_LE = 1, + BNXT_ULP_BYTE_ORDER_LAST = 2 }; -enum bnxt_ulp_device_id { - BNXT_ULP_DEVICE_ID_WH_PLUS, - BNXT_ULP_DEVICE_ID_THOR, - BNXT_ULP_DEVICE_ID_STINGRAY, - BNXT_ULP_DEVICE_ID_STINGRAY2, - BNXT_ULP_DEVICE_ID_LAST +enum bnxt_ulp_chf_idx { + BNXT_ULP_CHF_IDX_MPLS_TAG_NUM = 0, + BNXT_ULP_CHF_IDX_O_VTAG_NUM = 1, + BNXT_ULP_CHF_IDX_O_VTAG_PRESENT = 2, + BNXT_ULP_CHF_IDX_O_TWO_VTAGS = 3, + BNXT_ULP_CHF_IDX_I_VTAG_NUM = 4, + BNXT_ULP_CHF_IDX_I_VTAG_PRESENT = 5, + BNXT_ULP_CHF_IDX_I_TWO_VTAGS = 6, + BNXT_ULP_CHF_IDX_INCOMING_IF = 7, + BNXT_ULP_CHF_IDX_DIRECTION = 8, + BNXT_ULP_CHF_IDX_SVIF = 9, + BNXT_ULP_CHF_IDX_O_L3 = 10, + BNXT_ULP_CHF_IDX_I_L3 = 11, + BNXT_ULP_CHF_IDX_O_L4 = 12, + BNXT_ULP_CHF_IDX_I_L4 = 13, + BNXT_ULP_CHF_IDX_LAST = 14 }; -enum bnxt_ulp_fmf_mask { - BNXT_ULP_FMF_MASK_IGNORE, - BNXT_ULP_FMF_MASK_ANY, - BNXT_ULP_FMF_MASK_EXACT, - BNXT_ULP_FMF_MASK_WILDCARD, - BNXT_ULP_FMF_MASK_LAST +enum bnxt_ulp_device_id { + BNXT_ULP_DEVICE_ID_WH_PLUS = 0, + BNXT_ULP_DEVICE_ID_THOR = 1, + BNXT_ULP_DEVICE_ID_STINGRAY = 2, + BNXT_ULP_DEVICE_ID_STINGRAY2 = 3, + BNXT_ULP_DEVICE_ID_LAST = 4 }; -enum bnxt_ulp_fmf_spec { - BNXT_ULP_FMF_SPEC_IGNORE = 0, - BNXT_ULP_FMF_SPEC_LAST = 1 +enum bnxt_ulp_hdr_type { + BNXT_ULP_HDR_TYPE_NOT_SUPPORTED = 0, + BNXT_ULP_HDR_TYPE_SUPPORTED = 1, + BNXT_ULP_HDR_TYPE_END = 2, + BNXT_ULP_HDR_TYPE_LAST = 3 }; enum bnxt_ulp_mark_enable { @@ -114,21 +133,6 @@ enum bnxt_ulp_mark_enable { BNXT_ULP_MARK_ENABLE_LAST = 2 }; -enum bnxt_ulp_hdr_field { - BNXT_ULP_HDR_FIELD_MPLS_TAG_NUM = 0, - BNXT_ULP_HDR_FIELD_O_VTAG_NUM = 1, - BNXT_ULP_HDR_FIELD_I_VTAG_NUM = 2, - BNXT_ULP_HDR_FIELD_SVIF_INDEX = 3, - BNXT_ULP_HDR_FIELD_LAST = 4 -}; - -enum bnxt_ulp_hdr_type { - BNXT_ULP_HDR_TYPE_NOT_SUPPORTED = 0, - BNXT_ULP_HDR_TYPE_SUPPORTED = 1, - BNXT_ULP_HDR_TYPE_END = 2, - BNXT_ULP_HDR_TYPE_LAST = 3 -}; - enum bnxt_ulp_mask_opc { BNXT_ULP_MASK_OPC_SET_TO_CONSTANT = 0, BNXT_ULP_MASK_OPC_SET_TO_HDR_FIELD = 1, @@ -137,6 +141,12 @@ enum bnxt_ulp_mask_opc { BNXT_ULP_MASK_OPC_LAST = 4 }; +enum bnxt_ulp_match_type { + BNXT_ULP_MATCH_TYPE_EM = 0, + BNXT_ULP_MATCH_TYPE_WC = 1, + BNXT_ULP_MATCH_TYPE_LAST = 2 +}; + enum bnxt_ulp_priority { BNXT_ULP_PRIORITY_LEVEL_0 = 0, BNXT_ULP_PRIORITY_LEVEL_1 = 1, @@ -151,20 +161,22 @@ enum bnxt_ulp_priority { }; enum bnxt_ulp_regfile_index { - BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0 = 0, - BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_1 = 1, - BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_0 = 2, - BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_1 = 3, - BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 = 4, - BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_1 = 5, - BNXT_ULP_REGFILE_INDEX_WC_PROFILE_ID_0 = 6, - BNXT_ULP_REGFILE_INDEX_WC_PROFILE_ID_1 = 7, - BNXT_ULP_REGFILE_INDEX_ACTION_PTR_MAIN = 8, - BNXT_ULP_REGFILE_INDEX_ACTION_PTR_0 = 9, - BNXT_ULP_REGFILE_INDEX_ENCAP_PTR_0 = 10, - BNXT_ULP_REGFILE_INDEX_ENCAP_PTR_1 = 11, - BNXT_ULP_REGFILE_INDEX_NOT_USED = 12, - BNXT_ULP_REGFILE_INDEX_LAST = 13 + BNXT_ULP_REGFILE_INDEX_CLASS_TID = 0, + BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0 = 1, + BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_1 = 2, + BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_0 = 3, + BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_1 = 4, + BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 = 5, + BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_1 = 6, + BNXT_ULP_REGFILE_INDEX_WC_PROFILE_ID_0 = 7, + BNXT_ULP_REGFILE_INDEX_WC_PROFILE_ID_1 = 8, + BNXT_ULP_REGFILE_INDEX_ACTION_PTR_MAIN = 9, + BNXT_ULP_REGFILE_INDEX_ACTION_PTR_0 = 10, + BNXT_ULP_REGFILE_INDEX_ENCAP_PTR_0 = 11, + BNXT_ULP_REGFILE_INDEX_ENCAP_PTR_1 = 12, + BNXT_ULP_REGFILE_INDEX_CRITICAL_RESOURCE = 13, + BNXT_ULP_REGFILE_INDEX_NOT_USED = 14, + BNXT_ULP_REGFILE_INDEX_LAST = 15 }; enum bnxt_ulp_resource_func { @@ -179,7 +191,7 @@ enum bnxt_ulp_resource_func { enum bnxt_ulp_result_opc { BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT = 0, BNXT_ULP_RESULT_OPC_SET_TO_ACT_PROP = 1, - BNXT_ULP_RESULT_OPC_SET_TO_ACT_PROP_SZ = 2, + BNXT_ULP_RESULT_OPC_SET_TO_ENCAP_ACT_PROP_SZ = 2, BNXT_ULP_RESULT_OPC_SET_TO_REGFILE = 3, BNXT_ULP_RESULT_OPC_LAST = 4 }; @@ -198,6 +210,45 @@ enum bnxt_ulp_spec_opc { BNXT_ULP_SPEC_OPC_LAST = 4 }; +enum bnxt_ulp_encap_vtag_encoding { + BNXT_ULP_ENCAP_VTAG_ENCODING_DTAG_ECAP_PRI = 4, + BNXT_ULP_ENCAP_VTAG_ENCODING_DTAG_REMAP_DIFFSERV = 5, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_ECAP_PRI = 6, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_DIFFSERV = 7, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_0 = 8, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_1 = 9, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_2 = 10, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_3 = 11, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_4 = 12, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_5 = 13, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_6 = 14, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_7 = 15, + BNXT_ULP_ENCAP_VTAG_ENCODING_NOP = 0, + BNXT_ULP_ENCAP_VTAG_ENCODING_STAG_ECAP_PRI = 1, + BNXT_ULP_ENCAP_VTAG_ENCODING_STAG_IVLAN_PRI = 2, + BNXT_ULP_ENCAP_VTAG_ENCODING_STAG_REMAP_DIFFSERV = 3 +}; + +enum bnxt_ulp_fdb_resource_flags { + BNXT_ULP_FDB_RESOURCE_FLAGS_DIR_EGR = 0x01, + BNXT_ULP_FDB_RESOURCE_FLAGS_DIR_INGR = 0x00 +}; + +enum bnxt_ulp_fdb_type { + BNXT_ULP_FDB_TYPE_DEFAULT = 1, + BNXT_ULP_FDB_TYPE_REGULAR = 0 +}; + +enum bnxt_ulp_flow_dir_bitmask { + BNXT_ULP_FLOW_DIR_BITMASK_EGR = 0x8000000000000000, + BNXT_ULP_FLOW_DIR_BITMASK_ING = 0x0000000000000000 +}; + +enum bnxt_ulp_match_type_bitmask { + BNXT_ULP_MATCH_TYPE_BITMASK_EM = 0x0000000000000000, + BNXT_ULP_MATCH_TYPE_BITMASK_WM = 0x0000000000000001 +}; + enum bnxt_ulp_sym { BNXT_ULP_SYM_BIG_ENDIAN = 0, BNXT_ULP_SYM_DECAP_FUNC_NONE = 0, @@ -208,6 +259,10 @@ enum bnxt_ulp_sym { BNXT_ULP_SYM_DECAP_FUNC_THRU_TL3 = 8, BNXT_ULP_SYM_DECAP_FUNC_THRU_TL4 = 9, BNXT_ULP_SYM_DECAP_FUNC_THRU_TUN = 10, + BNXT_ULP_SYM_ECV_CUSTOM_EN_NO = 0, + BNXT_ULP_SYM_ECV_CUSTOM_EN_YES = 1, + BNXT_ULP_SYM_ECV_L2_EN_NO = 0, + BNXT_ULP_SYM_ECV_L2_EN_YES = 1, BNXT_ULP_SYM_ECV_L3_TYPE_IPV4 = 4, BNXT_ULP_SYM_ECV_L3_TYPE_IPV6 = 5, BNXT_ULP_SYM_ECV_L3_TYPE_MPLS_8847 = 6, @@ -224,6 +279,8 @@ enum bnxt_ulp_sym { BNXT_ULP_SYM_ECV_TUN_TYPE_NONE = 0, BNXT_ULP_SYM_ECV_TUN_TYPE_NVGRE = 4, BNXT_ULP_SYM_ECV_TUN_TYPE_VXLAN = 2, + BNXT_ULP_SYM_ECV_VALID_NO = 0, + BNXT_ULP_SYM_ECV_VALID_YES = 1, BNXT_ULP_SYM_IP_PROTO_UDP = 17, BNXT_ULP_SYM_L2_HDR_TYPE_DIX = 0, BNXT_ULP_SYM_L2_HDR_TYPE_LLC = 2, @@ -244,8 +301,15 @@ enum bnxt_ulp_sym { BNXT_ULP_SYM_L4_HDR_TYPE_UPAR1 = 3, BNXT_ULP_SYM_L4_HDR_TYPE_UPAR2 = 4, BNXT_ULP_SYM_LITTLE_ENDIAN = 1, + BNXT_ULP_SYM_MATCH_TYPE_EM = 0, + BNXT_ULP_SYM_MATCH_TYPE_WM = 1, BNXT_ULP_SYM_NO = 0, BNXT_ULP_SYM_PKT_TYPE_L2 = 0, + BNXT_ULP_SYM_POP_VLAN_NO = 0, + BNXT_ULP_SYM_POP_VLAN_YES = 1, + BNXT_ULP_SYM_STINGRAY2_LOOPBACK_PORT = 3, + BNXT_ULP_SYM_STINGRAY_LOOPBACK_PORT = 3, + BNXT_ULP_SYM_THOR_LOOPBACK_PORT = 3, BNXT_ULP_SYM_TL2_HDR_TYPE_DIX = 0, BNXT_ULP_SYM_TL3_HDR_TYPE_IPV4 = 0, BNXT_ULP_SYM_TL3_HDR_TYPE_IPV6 = 1, @@ -262,6 +326,7 @@ enum bnxt_ulp_sym { BNXT_ULP_SYM_TUN_HDR_TYPE_UPAR1 = 8, BNXT_ULP_SYM_TUN_HDR_TYPE_UPAR2 = 9, BNXT_ULP_SYM_TUN_HDR_TYPE_VXLAN = 0, + BNXT_ULP_SYM_WH_PLUS_LOOPBACK_PORT = 3, BNXT_ULP_SYM_YES = 1 }; @@ -274,6 +339,7 @@ enum bnxt_ulp_act_prop_sz { BNXT_ULP_ACT_PROP_SZ_ENCAP_L3_TYPE = 4, BNXT_ULP_ACT_PROP_SZ_MPLS_POP_NUM = 4, BNXT_ULP_ACT_PROP_SZ_MPLS_PUSH_NUM = 4, + BNXT_ULP_ACT_PROP_SZ_PORT_ID = 4, BNXT_ULP_ACT_PROP_SZ_VNIC = 4, BNXT_ULP_ACT_PROP_SZ_VPORT = 4, BNXT_ULP_ACT_PROP_SZ_MARK = 4, @@ -317,38 +383,46 @@ enum bnxt_ulp_act_prop_idx { BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE = 20, BNXT_ULP_ACT_PROP_IDX_MPLS_POP_NUM = 24, BNXT_ULP_ACT_PROP_IDX_MPLS_PUSH_NUM = 28, - BNXT_ULP_ACT_PROP_IDX_VNIC = 32, - BNXT_ULP_ACT_PROP_IDX_VPORT = 36, - BNXT_ULP_ACT_PROP_IDX_MARK = 40, - BNXT_ULP_ACT_PROP_IDX_COUNT = 44, - BNXT_ULP_ACT_PROP_IDX_METER = 48, - BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC = 52, - BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST = 60, - BNXT_ULP_ACT_PROP_IDX_OF_PUSH_VLAN = 68, - BNXT_ULP_ACT_PROP_IDX_OF_SET_VLAN_PCP = 72, - BNXT_ULP_ACT_PROP_IDX_OF_SET_VLAN_VID = 76, - BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC = 80, - BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST = 84, - BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC = 88, - BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST = 104, - BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC = 120, - BNXT_ULP_ACT_PROP_IDX_SET_TP_DST = 124, - BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_0 = 128, - BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_1 = 132, - BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_2 = 136, - BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_3 = 140, - BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_4 = 144, - BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_5 = 148, - BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_6 = 152, - BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_7 = 156, - BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC = 160, - BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC = 166, - BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG = 172, - BNXT_ULP_ACT_PROP_IDX_ENCAP_IP = 180, - BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC = 212, - BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP = 228, - BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN = 232, - BNXT_ULP_ACT_PROP_IDX_LAST = 264 + BNXT_ULP_ACT_PROP_IDX_PORT_ID = 32, + BNXT_ULP_ACT_PROP_IDX_VNIC = 36, + BNXT_ULP_ACT_PROP_IDX_VPORT = 40, + BNXT_ULP_ACT_PROP_IDX_MARK = 44, + BNXT_ULP_ACT_PROP_IDX_COUNT = 48, + BNXT_ULP_ACT_PROP_IDX_METER = 52, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC = 56, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST = 64, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_VLAN = 72, + BNXT_ULP_ACT_PROP_IDX_OF_SET_VLAN_PCP = 76, + BNXT_ULP_ACT_PROP_IDX_OF_SET_VLAN_VID = 80, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC = 84, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST = 88, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC = 92, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST = 108, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC = 124, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST = 128, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_0 = 132, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_1 = 136, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_2 = 140, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_3 = 144, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_4 = 148, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_5 = 152, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_6 = 156, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_7 = 160, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC = 164, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC = 170, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG = 176, + BNXT_ULP_ACT_PROP_IDX_ENCAP_IP = 184, + BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC = 216, + BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP = 232, + BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN = 236, + BNXT_ULP_ACT_PROP_IDX_LAST = 268 +}; +enum bnxt_ulp_class_hid { + BNXT_ULP_CLASS_HID_0092 = 0x0092 +}; + +enum bnxt_ulp_act_hid { + BNXT_ULP_ACT_HID_0029 = 0x0029 }; #endif /* _ULP_TEMPLATE_DB_H_ */ diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_field_db.h b/drivers/net/bnxt/tf_ulp/ulp_template_field_db.h index 1bc4449ab..587de8a83 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_template_field_db.h +++ b/drivers/net/bnxt/tf_ulp/ulp_template_field_db.h @@ -1,130 +1,63 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2014-2020 Broadcom - * All rights reserved_ + * All rights reserved. */ -/* date: Mon Mar 9 02:37:53 2020 - * version: 0_0 - */ - -#ifndef _ULP_HDR_FIELD_ENUMS_H_ -#define _ULP_HDR_FIELD_ENUMS_H_ +#ifndef ULP_HDR_FIELD_ENUMS_H_ +#define ULP_HDR_FIELD_ENUMS_H_ enum bnxt_ulp_hf0 { - BNXT_ULP_HF0_MPLS_TAG_NUM = 0, - BNXT_ULP_HF0_O_VTAG_NUM = 1, - BNXT_ULP_HF0_I_VTAG_NUM = 2, - BNXT_ULP_HF0_SVIF_INDEX = 3, - BNXT_ULP_HF0_O_ETH_DMAC = 4, - BNXT_ULP_HF0_O_ETH_SMAC = 5, - BNXT_ULP_HF0_O_ETH_TYPE = 6, - BNXT_ULP_HF0_OO_VLAN_CFI_PRI = 7, - BNXT_ULP_HF0_OO_VLAN_VID = 8, - BNXT_ULP_HF0_OO_VLAN_TYPE = 9, - BNXT_ULP_HF0_OI_VLAN_CFI_PRI = 10, - BNXT_ULP_HF0_OI_VLAN_VID = 11, - BNXT_ULP_HF0_OI_VLAN_TYPE = 12, - BNXT_ULP_HF0_O_IPV4_VER = 13, - BNXT_ULP_HF0_O_IPV4_TOS = 14, - BNXT_ULP_HF0_O_IPV4_LEN = 15, - BNXT_ULP_HF0_O_IPV4_FRAG_ID = 16, - BNXT_ULP_HF0_O_IPV4_FRAG_OFF = 17, - BNXT_ULP_HF0_O_IPV4_TTL = 18, - BNXT_ULP_HF0_O_IPV4_NEXT_PID = 19, - BNXT_ULP_HF0_O_IPV4_CSUM = 20, - BNXT_ULP_HF0_O_IPV4_SRC_ADDR = 21, - BNXT_ULP_HF0_O_IPV4_DST_ADDR = 22, - BNXT_ULP_HF0_O_UDP_SRC_PORT = 23, - BNXT_ULP_HF0_O_UDP_DST_PORT = 24, - BNXT_ULP_HF0_O_UDP_LENGTH = 25, - BNXT_ULP_HF0_O_UDP_CSUM = 26, - BNXT_ULP_HF0_T_VXLAN_FLAGS = 27, - BNXT_ULP_HF0_T_VXLAN_RSVD0 = 28, - BNXT_ULP_HF0_T_VXLAN_VNI = 29, - BNXT_ULP_HF0_T_VXLAN_RSVD1 = 30, - BNXT_ULP_HF0_I_ETH_DMAC = 31, - BNXT_ULP_HF0_I_ETH_SMAC = 32, - BNXT_ULP_HF0_I_ETH_TYPE = 33, - BNXT_ULP_HF0_IO_VLAN_CFI_PRI = 34, - BNXT_ULP_HF0_IO_VLAN_VID = 35, - BNXT_ULP_HF0_IO_VLAN_TYPE = 36, - BNXT_ULP_HF0_II_VLAN_CFI_PRI = 37, - BNXT_ULP_HF0_II_VLAN_VID = 38, - BNXT_ULP_HF0_II_VLAN_TYPE = 39, - BNXT_ULP_HF0_I_IPV4_VER = 40, - BNXT_ULP_HF0_I_IPV4_TOS = 41, - BNXT_ULP_HF0_I_IPV4_LEN = 42, - BNXT_ULP_HF0_I_IPV4_FRAG_ID = 43, - BNXT_ULP_HF0_I_IPV4_FRAG_OFF = 44, - BNXT_ULP_HF0_I_IPV4_TTL = 45, - BNXT_ULP_HF0_I_IPV4_NEXT_PID = 46, - BNXT_ULP_HF0_I_IPV4_CSUM = 47, - BNXT_ULP_HF0_I_IPV4_SRC_ADDR = 48, - BNXT_ULP_HF0_I_IPV4_DST_ADDR = 49, - BNXT_ULP_HF0_I_UDP_SRC_PORT = 50, - BNXT_ULP_HF0_I_UDP_DST_PORT = 51, - BNXT_ULP_HF0_I_UDP_LENGTH = 52, - BNXT_ULP_HF0_I_UDP_CSUM = 53 -}; - -enum bnxt_ulp_hf1 { - BNXT_ULP_HF1_MPLS_TAG_NUM = 0, - BNXT_ULP_HF1_O_VTAG_NUM = 1, - BNXT_ULP_HF1_I_VTAG_NUM = 2, - BNXT_ULP_HF1_SVIF_INDEX = 3, - BNXT_ULP_HF1_O_ETH_DMAC = 4, - BNXT_ULP_HF1_O_ETH_SMAC = 5, - BNXT_ULP_HF1_O_ETH_TYPE = 6, - BNXT_ULP_HF1_OO_VLAN_CFI_PRI = 7, - BNXT_ULP_HF1_OO_VLAN_VID = 8, - BNXT_ULP_HF1_OO_VLAN_TYPE = 9, - BNXT_ULP_HF1_OI_VLAN_CFI_PRI = 10, - BNXT_ULP_HF1_OI_VLAN_VID = 11, - BNXT_ULP_HF1_OI_VLAN_TYPE = 12, - BNXT_ULP_HF1_O_IPV4_VER = 13, - BNXT_ULP_HF1_O_IPV4_TOS = 14, - BNXT_ULP_HF1_O_IPV4_LEN = 15, - BNXT_ULP_HF1_O_IPV4_FRAG_ID = 16, - BNXT_ULP_HF1_O_IPV4_FRAG_OFF = 17, - BNXT_ULP_HF1_O_IPV4_TTL = 18, - BNXT_ULP_HF1_O_IPV4_NEXT_PID = 19, - BNXT_ULP_HF1_O_IPV4_CSUM = 20, - BNXT_ULP_HF1_O_IPV4_SRC_ADDR = 21, - BNXT_ULP_HF1_O_IPV4_DST_ADDR = 22, - BNXT_ULP_HF1_O_UDP_SRC_PORT = 23, - BNXT_ULP_HF1_O_UDP_DST_PORT = 24, - BNXT_ULP_HF1_O_UDP_LENGTH = 25, - BNXT_ULP_HF1_O_UDP_CSUM = 26 + BNXT_ULP_HF0_IDX_SVIF_INDEX = 0, + BNXT_ULP_HF0_IDX_O_ETH_DMAC = 1, + BNXT_ULP_HF0_IDX_O_ETH_SMAC = 2, + BNXT_ULP_HF0_IDX_O_ETH_TYPE = 3, + BNXT_ULP_HF0_IDX_OO_VLAN_CFI_PRI = 4, + BNXT_ULP_HF0_IDX_OO_VLAN_VID = 5, + BNXT_ULP_HF0_IDX_OO_VLAN_TYPE = 6, + BNXT_ULP_HF0_IDX_OI_VLAN_CFI_PRI = 7, + BNXT_ULP_HF0_IDX_OI_VLAN_VID = 8, + BNXT_ULP_HF0_IDX_OI_VLAN_TYPE = 9, + BNXT_ULP_HF0_IDX_O_IPV4_VER = 10, + BNXT_ULP_HF0_IDX_O_IPV4_TOS = 11, + BNXT_ULP_HF0_IDX_O_IPV4_LEN = 12, + BNXT_ULP_HF0_IDX_O_IPV4_FRAG_ID = 13, + BNXT_ULP_HF0_IDX_O_IPV4_FRAG_OFF = 14, + BNXT_ULP_HF0_IDX_O_IPV4_TTL = 15, + BNXT_ULP_HF0_IDX_O_IPV4_NEXT_PID = 16, + BNXT_ULP_HF0_IDX_O_IPV4_CSUM = 17, + BNXT_ULP_HF0_IDX_O_IPV4_SRC_ADDR = 18, + BNXT_ULP_HF0_IDX_O_IPV4_DST_ADDR = 19, + BNXT_ULP_HF0_IDX_O_UDP_SRC_PORT = 20, + BNXT_ULP_HF0_IDX_O_UDP_DST_PORT = 21, + BNXT_ULP_HF0_IDX_O_UDP_LENGTH = 22, + BNXT_ULP_HF0_IDX_O_UDP_CSUM = 23 }; -enum bnxt_ulp_hf2 { - BNXT_ULP_HF2_MPLS_TAG_NUM = 0, - BNXT_ULP_HF2_O_VTAG_NUM = 1, - BNXT_ULP_HF2_I_VTAG_NUM = 2, - BNXT_ULP_HF2_SVIF_INDEX = 3, - BNXT_ULP_HF2_O_ETH_DMAC = 4, - BNXT_ULP_HF2_O_ETH_SMAC = 5, - BNXT_ULP_HF2_O_ETH_TYPE = 6, - BNXT_ULP_HF2_OO_VLAN_CFI_PRI = 7, - BNXT_ULP_HF2_OO_VLAN_VID = 8, - BNXT_ULP_HF2_OO_VLAN_TYPE = 9, - BNXT_ULP_HF2_OI_VLAN_CFI_PRI = 10, - BNXT_ULP_HF2_OI_VLAN_VID = 11, - BNXT_ULP_HF2_OI_VLAN_TYPE = 12, - BNXT_ULP_HF2_O_IPV4_VER = 13, - BNXT_ULP_HF2_O_IPV4_TOS = 14, - BNXT_ULP_HF2_O_IPV4_LEN = 15, - BNXT_ULP_HF2_O_IPV4_FRAG_ID = 16, - BNXT_ULP_HF2_O_IPV4_FRAG_OFF = 17, - BNXT_ULP_HF2_O_IPV4_TTL = 18, - BNXT_ULP_HF2_O_IPV4_NEXT_PID = 19, - BNXT_ULP_HF2_O_IPV4_CSUM = 20, - BNXT_ULP_HF2_O_IPV4_SRC_ADDR = 21, - BNXT_ULP_HF2_O_IPV4_DST_ADDR = 22, - BNXT_ULP_HF2_O_UDP_SRC_PORT = 23, - BNXT_ULP_HF2_O_UDP_DST_PORT = 24, - BNXT_ULP_HF2_O_UDP_LENGTH = 25, - BNXT_ULP_HF2_O_UDP_CSUM = 26 +enum bnxt_ulp_hf_bitmask0 { + BNXT_ULP_HF0_BITMASK_SVIF_INDEX = 0x8000000000000000, + BNXT_ULP_HF0_BITMASK_O_ETH_DMAC = 0x4000000000000000, + BNXT_ULP_HF0_BITMASK_O_ETH_SMAC = 0x2000000000000000, + BNXT_ULP_HF0_BITMASK_O_ETH_TYPE = 0x1000000000000000, + BNXT_ULP_HF0_BITMASK_OO_VLAN_CFI_PRI = 0x0800000000000000, + BNXT_ULP_HF0_BITMASK_OO_VLAN_VID = 0x0400000000000000, + BNXT_ULP_HF0_BITMASK_OO_VLAN_TYPE = 0x0200000000000000, + BNXT_ULP_HF0_BITMASK_OI_VLAN_CFI_PRI = 0x0100000000000000, + BNXT_ULP_HF0_BITMASK_OI_VLAN_VID = 0x0080000000000000, + BNXT_ULP_HF0_BITMASK_OI_VLAN_TYPE = 0x0040000000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_VER = 0x0020000000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_TOS = 0x0010000000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_LEN = 0x0008000000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_FRAG_ID = 0x0004000000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_FRAG_OFF = 0x0002000000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_TTL = 0x0001000000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_NEXT_PID = 0x0000800000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_CSUM = 0x0000400000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_SRC_ADDR = 0x0000200000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_DST_ADDR = 0x0000100000000000, + BNXT_ULP_HF0_BITMASK_O_UDP_SRC_PORT = 0x0000080000000000, + BNXT_ULP_HF0_BITMASK_O_UDP_DST_PORT = 0x0000040000000000, + BNXT_ULP_HF0_BITMASK_O_UDP_LENGTH = 0x0000020000000000, + BNXT_ULP_HF0_BITMASK_O_UDP_CSUM = 0x0000010000000000 }; #endif /* _ULP_HDR_FIELD_ENUMS_H_ */ diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h index 8adbf7a24..c2d3ccb3e 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h +++ b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h @@ -28,11 +28,16 @@ #define BNXT_ULP_PROTO_HDR_TCP_NUM 9 #define BNXT_ULP_PROTO_HDR_VXLAN_NUM 4 #define BNXT_ULP_PROTO_HDR_MAX 128 +#define BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX 0 struct ulp_rte_hdr_bitmap { uint64_t bits; }; +struct ulp_rte_field_bitmap { + uint64_t bits; +}; + /* Structure to store the protocol fields */ #define RTE_PARSER_FLOW_HDR_FIELD_SIZE 16 struct ulp_rte_hdr_field { @@ -53,7 +58,9 @@ struct ulp_rte_act_prop { /* Structure to be used for passing all the parser functions */ struct ulp_rte_parser_params { struct ulp_rte_hdr_bitmap hdr_bitmap; + struct ulp_rte_field_bitmap fld_bitmap; struct ulp_rte_hdr_field hdr_field[BNXT_ULP_PROTO_HDR_MAX]; + uint32_t comp_fld[BNXT_ULP_CHF_IDX_LAST]; uint32_t field_idx; uint32_t vlan_idx; struct ulp_rte_act_bitmap act_bitmap; @@ -72,11 +79,6 @@ struct bnxt_ulp_rte_hdr_info { /* Flow Parser Header Information Structure Array defined in template source*/ extern struct bnxt_ulp_rte_hdr_info ulp_hdr_info[]; -struct bnxt_ulp_matcher_field_info { - enum bnxt_ulp_fmf_mask mask_opcode; - enum bnxt_ulp_fmf_spec spec_opcode; -}; - /* Flow Parser Action Information Structure */ struct bnxt_ulp_rte_act_info { enum bnxt_ulp_act_type act_type; @@ -98,12 +100,22 @@ struct bnxt_ulp_header_match_info { uint32_t act_vnic; }; -/* Flow Matcher templates Structure Array defined in template source*/ -extern struct bnxt_ulp_header_match_info ulp_ingress_hdr_match_list[]; -extern struct bnxt_ulp_header_match_info ulp_egress_hdr_match_list[]; +struct ulp_rte_bitmap { + uint64_t bits; +}; + +struct bnxt_ulp_class_match_info { + struct ulp_rte_bitmap hdr_sig; + struct ulp_rte_bitmap field_sig; + uint32_t class_hid; + uint32_t class_tid; + uint8_t act_vnic; + uint8_t wc_pri; +}; -/* Flow field match Information Structure Array defined in template source*/ -extern struct bnxt_ulp_matcher_field_info ulp_field_match[]; +/* Flow Matcher templates Structure for class entries */ +extern uint16_t ulp_class_sig_tbl[]; +extern struct bnxt_ulp_class_match_info ulp_class_match_list[]; /* Flow Matcher Action structures */ struct bnxt_ulp_action_match_info { @@ -111,9 +123,15 @@ struct bnxt_ulp_action_match_info { uint32_t act_tmpl_id; }; -/* Flow Matcher templates Structure Array defined in template source */ -extern struct bnxt_ulp_action_match_info ulp_ingress_act_match_list[]; -extern struct bnxt_ulp_action_match_info ulp_egress_act_match_list[]; +struct bnxt_ulp_act_match_info { + struct ulp_rte_bitmap act_sig; + uint32_t act_hid; + uint32_t act_tid; +}; + +/* Flow Matcher templates Structure for action entries */ +extern uint16_t ulp_act_sig_tbl[]; +extern struct bnxt_ulp_act_match_info ulp_act_match_list[]; /* Device specific parameters */ struct bnxt_ulp_device_params { @@ -179,7 +197,7 @@ struct bnxt_ulp_mapper_act_tbl_info { }; struct bnxt_ulp_mapper_class_key_field_info { - uint8_t name[64]; + uint8_t description[64]; enum bnxt_ulp_mask_opc mask_opcode; enum bnxt_ulp_spec_opc spec_opcode; uint16_t field_bit_size; @@ -188,14 +206,14 @@ struct bnxt_ulp_mapper_class_key_field_info { }; struct bnxt_ulp_mapper_result_field_info { - uint8_t name[64]; + uint8_t description[64]; enum bnxt_ulp_result_opc result_opcode; uint16_t field_bit_size; uint8_t result_operand[16]; }; struct bnxt_ulp_mapper_ident_info { - uint8_t name[64]; + uint8_t description[64]; uint32_t resource_func; uint16_t ident_type; diff --git a/drivers/net/bnxt/tf_ulp/ulp_utils.c b/drivers/net/bnxt/tf_ulp/ulp_utils.c index e1d652864..0150c1d49 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_utils.c +++ b/drivers/net/bnxt/tf_ulp/ulp_utils.c @@ -519,3 +519,36 @@ int32_t ulp_buffer_is_empty(const uint8_t *buf, uint32_t size) { return buf[0] == 0 && !memcmp(buf, buf + 1, size - 1); } + +/* Function to check if bitmap is zero.Return 1 on success */ +uint32_t ulp_bitmap_is_zero(uint8_t *bitmap, int32_t size) +{ + while (size-- > 0) { + if (*bitmap != 0) + return 0; + bitmap++; + } + return 1; +} + +/* Function to check if bitmap is ones. Return 1 on success */ +uint32_t ulp_bitmap_is_ones(uint8_t *bitmap, int32_t size) +{ + while (size-- > 0) { + if (*bitmap != 0xFF) + return 0; + bitmap++; + } + return 1; +} + +/* Function to check if bitmap is not zero. Return 1 on success */ +uint32_t ulp_bitmap_notzero(uint8_t *bitmap, int32_t size) +{ + while (size-- > 0) { + if (*bitmap != 0) + return 1; + bitmap++; + } + return 0; +} diff --git a/drivers/net/bnxt/tf_ulp/ulp_utils.h b/drivers/net/bnxt/tf_ulp/ulp_utils.h index f729ff681..5db393398 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_utils.h +++ b/drivers/net/bnxt/tf_ulp/ulp_utils.h @@ -16,7 +16,7 @@ #define ULP_BITMAP_SET(bitmap, val) ((bitmap) |= (val)) #define ULP_BITMAP_RESET(bitmap, val) ((bitmap) &= ~(val)) #define ULP_BITMAP_ISSET(bitmap, val) ((bitmap) & (val)) -#define ULP_BITSET_CMP(b1, b2) memcmp(&(b1)->bits, \ +#define ULP_BITMAP_CMP(b1, b2) memcmp(&(b1)->bits, \ &(b2)->bits, sizeof((b1)->bits)) /* * Macros for bitmap sets and gets @@ -50,6 +50,12 @@ /* Macro to convert bits to bytes with no round off*/ #define ULP_BITS_2_BYTE_NR(bits_x) ((bits_x) / 8) +/* Macros to read the computed fields */ +#define ULP_UTIL_CHF_IDX_RD(params, idx) \ + rte_be_to_cpu_32((params)->comp_fld[(idx)]) + +#define ULP_UTIL_CHF_IDX_WR(params, idx, val) \ + ((params)->comp_fld[(idx)] = rte_cpu_to_be_32((val))) /* * Making the blob statically sized to 128 bytes for now. * The blob must be initialized with ulp_blob_init prior to using. @@ -276,4 +282,13 @@ ulp_encap_buffer_copy(uint8_t *dst, */ int32_t ulp_buffer_is_empty(const uint8_t *buf, uint32_t size); +/* Function to check if bitmap is zero.Return 1 on success */ +uint32_t ulp_bitmap_is_zero(uint8_t *bitmap, int32_t size); + +/* Function to check if bitmap is ones. Return 1 on success */ +uint32_t ulp_bitmap_is_ones(uint8_t *bitmap, int32_t size); + +/* Function to check if bitmap is not zero. Return 1 on success */ +uint32_t ulp_bitmap_notzero(uint8_t *bitmap, int32_t size); + #endif /* _ULP_UTILS_H_ */ From patchwork Fri Apr 17 16:19:16 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 68794 X-Patchwork-Delegate: ajit.khaparde@broadcom.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id C24D6A0597; Fri, 17 Apr 2020 18:20:54 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id F40A01EA49; Fri, 17 Apr 2020 18:19:39 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id 26FF51E9DE for ; Fri, 17 Apr 2020 18:19:28 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id BEAD330C1E8; Fri, 17 Apr 2020 09:08:07 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com BEAD330C1E8 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1587139687; bh=gaj0H8cUZSIn8694koqZ7uAN+BCbI+BjUdqq8OvbqbE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=tx4skEAPVcMld2D8ergkpfHfH+dKHfB9O28rjjl8+36Tn0zacSEXIjLntBa4uQ6ow qxkCvY6LyNxh2Vt/xAhWp1/I6whUVd2kyN0m4cXf8h9XHP2cGwTPOgL2h0hAgWk2E8 cf0XaRnc6ta3w5jBmUOvvIEZsKMpqwRZ6tzar6XA= Received: from localhost.localdomain (unknown [10.230.185.215]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id 068DE14008C; Fri, 17 Apr 2020 09:19:27 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: Kishore Padmanabha , Venkat Duvvuru , Michael Baucom Date: Fri, 17 Apr 2020 09:19:16 -0700 Message-Id: <20200417161920.85858-9-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.21.1 (Apple Git-122.3) In-Reply-To: <20200417161920.85858-1-ajit.khaparde@broadcom.com> References: <1586962156-11179-1-git-send-email-venkatkumar.duvvuru@broadcom.com> <20200417161920.85858-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 08/12] net/bnxt: add session and function flow flush X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Kishore Padmanabha The ulp flow flush has been extended to support session flow flush and function flow flush. The session flow flush is called when there the device is sole owner of the session and it deletes all the flows associated with that session. The function flow flush is called if the device function is not the sole owner of the session, it deletes all the flows that are associated with that device function. Reviewed-by: Venkat Duvvuru Reviewed-by: Michael Baucom Reviewed-by: Ajit Khaparde Signed-off-by: Kishore Padmanabha Signed-off-by: Venkat Duvvuru --- drivers/net/bnxt/bnxt.h | 1 + drivers/net/bnxt/bnxt_ethdev.c | 12 ++ drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 8 +- drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c | 31 +++-- drivers/net/bnxt/tf_ulp/ulp_flow_db.c | 149 ++++++++++++++++++++++-- drivers/net/bnxt/tf_ulp/ulp_flow_db.h | 46 +++++++- drivers/net/bnxt/tf_ulp/ulp_mapper.c | 2 +- drivers/net/bnxt/tf_ulp/ulp_mapper.h | 1 + 8 files changed, 224 insertions(+), 26 deletions(-) diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h index 00a4d0b3e..941eee6cd 100644 --- a/drivers/net/bnxt/bnxt.h +++ b/drivers/net/bnxt/bnxt.h @@ -755,6 +755,7 @@ void bnxt_ulp_deinit(struct bnxt *bp); uint16_t bnxt_get_vnic_id(uint16_t port); uint16_t bnxt_get_svif(uint16_t port_id, bool func_svif); +uint16_t bnxt_get_fw_func_id(uint16_t port); void bnxt_cancel_fc_thread(struct bnxt *bp); void bnxt_flow_cnt_alarm_cb(void *arg); diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index bd2c3fcb6..3fa0daffd 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -4901,6 +4901,18 @@ bnxt_get_vnic_id(uint16_t port) return vnic->fw_vnic_id; } +uint16_t +bnxt_get_fw_func_id(uint16_t port) +{ + struct rte_eth_dev *eth_dev; + struct bnxt *bp; + + eth_dev = &rte_eth_devices[port]; + bp = eth_dev->data->dev_private; + + return bp->fw_fid; +} + static int bnxt_init_fw(struct bnxt *bp) { uint16_t mtu; diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c index 56e08f233..c67da6d76 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c @@ -659,10 +659,8 @@ int32_t bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx, struct bnxt_ulp_flow_db *flow_db) { - if (!ulp_ctx || !ulp_ctx->cfg_data) { - BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); + if (!ulp_ctx || !ulp_ctx->cfg_data) return -EINVAL; - } ulp_ctx->cfg_data->flow_db = flow_db; return 0; @@ -672,10 +670,8 @@ bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx, struct bnxt_ulp_flow_db * bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx) { - if (!ulp_ctx || !ulp_ctx->cfg_data) { - BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); + if (!ulp_ctx || !ulp_ctx->cfg_data) return NULL; - } return ulp_ctx->cfg_data->flow_db; } diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c index 7f7aa24e6..f0c812ced 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c @@ -125,6 +125,7 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, mapper_cparms.act_prop = ¶ms.act_prop; mapper_cparms.class_tid = class_id; mapper_cparms.act_tid = act_tmpl; + mapper_cparms.func_id = bnxt_get_fw_func_id(dev->data->port_id); /* call the ulp mapper to create the flow in the hardware */ ret = ulp_mapper_flow_create(ulp_ctx, @@ -202,7 +203,8 @@ bnxt_ulp_flow_destroy(struct rte_eth_dev *dev, { int ret = 0; struct bnxt_ulp_context *ulp_ctx; - uint32_t fid; + uint32_t flow_id; + uint16_t func_id; ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev); if (!ulp_ctx) { @@ -213,9 +215,19 @@ bnxt_ulp_flow_destroy(struct rte_eth_dev *dev, return -EINVAL; } - fid = (uint32_t)(uintptr_t)flow; + flow_id = (uint32_t)(uintptr_t)flow; + func_id = bnxt_get_fw_func_id(dev->data->port_id); - ret = ulp_mapper_flow_destroy(ulp_ctx, fid); + if (ulp_flow_db_validate_flow_func(ulp_ctx, flow_id, func_id) == + false) { + BNXT_TF_DBG(ERR, "Incorrect device params\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to destroy flow."); + return -EINVAL; + } + + ret = ulp_mapper_flow_destroy(ulp_ctx, flow_id); if (ret) rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -230,8 +242,9 @@ bnxt_ulp_flow_flush(struct rte_eth_dev *eth_dev, struct rte_flow_error *error) { struct bnxt_ulp_context *ulp_ctx; - int32_t ret; + int32_t ret = 0; struct bnxt *bp; + uint16_t func_id; ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev); if (!ulp_ctx) { @@ -244,10 +257,12 @@ bnxt_ulp_flow_flush(struct rte_eth_dev *eth_dev, bp = eth_dev->data->dev_private; /* Free the resources for the last device */ - if (!ulp_ctx_deinit_allowed(bp)) - return 0; - - ret = ulp_flow_db_flush_flows(ulp_ctx, BNXT_ULP_REGULAR_FLOW_TABLE); + if (ulp_ctx_deinit_allowed(bp)) { + ret = ulp_flow_db_session_flow_flush(ulp_ctx); + } else if (bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx)) { + func_id = bnxt_get_fw_func_id(eth_dev->data->port_id); + ret = ulp_flow_db_function_flow_flush(ulp_ctx, func_id); + } if (ret) rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c index 9e7f9f5e3..35a7f868a 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c +++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c @@ -209,6 +209,27 @@ ulp_flow_db_dealloc_resource(struct bnxt_ulp_flow_db *flow_db, } } +/* + * Helper function to add function id to the flow table + * + * flow_db [in] Ptr to flow table + * flow_id [in] The flow id of the flow + * func_id [in] The func_id to be set, for reset pass zero + * + * returns none + */ +static void +ulp_flow_db_func_id_set(struct bnxt_ulp_flow_db *flow_db, + uint32_t flow_id, + uint32_t func_id) +{ + /* set the function id in the function table */ + if (flow_id < flow_db->func_id_tbl_size) + flow_db->func_id_tbl[flow_id] = func_id; + else /* This should never happen */ + BNXT_TF_DBG(ERR, "Invalid flow id, flowdb corrupt\n"); +} + /* * Initialize the flow database. Memory is allocated in this * call and assigned to the flow database. @@ -241,7 +262,7 @@ int32_t ulp_flow_db_init(struct bnxt_ulp_context *ulp_ctxt) if (!flow_db) { BNXT_TF_DBG(ERR, "Failed to allocate memory for flow table ptr\n"); - goto error_free; + return -ENOMEM; } /* Attach the flow database to the ulp context. */ @@ -265,6 +286,17 @@ int32_t ulp_flow_db_init(struct bnxt_ulp_context *ulp_ctxt) if (ulp_flow_db_alloc_resource(flow_db, BNXT_ULP_DEFAULT_FLOW_TABLE)) goto error_free; + /* add 1 since we are not using index 0 for flow id */ + flow_db->func_id_tbl_size = dparms->num_flows + 1; + /* Allocate the function Id table */ + flow_db->func_id_tbl = rte_zmalloc("bnxt_ulp_flow_db_func_id_table", + flow_db->func_id_tbl_size * + sizeof(uint16_t), 0); + if (!flow_db->func_id_tbl) { + BNXT_TF_DBG(ERR, + "Failed to allocate mem for flow table func id\n"); + goto error_free; + } /* All good so return. */ return 0; error_free: @@ -297,6 +329,7 @@ int32_t ulp_flow_db_deinit(struct bnxt_ulp_context *ulp_ctxt) /* Free up all the memory. */ ulp_flow_db_dealloc_resource(flow_db, BNXT_ULP_REGULAR_FLOW_TABLE); ulp_flow_db_dealloc_resource(flow_db, BNXT_ULP_DEFAULT_FLOW_TABLE); + rte_free(flow_db->func_id_tbl); rte_free(flow_db); return 0; @@ -311,12 +344,13 @@ int32_t ulp_flow_db_deinit(struct bnxt_ulp_context *ulp_ctxt) * * returns 0 on success and negative on failure. */ -int32_t ulp_flow_db_fid_alloc(struct bnxt_ulp_context *ulp_ctxt, - enum bnxt_ulp_flow_db_tables tbl_idx, - uint32_t *fid) +int32_t ulp_flow_db_fid_alloc(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_flow_db_tables tbl_idx, + uint16_t func_id, + uint32_t *fid) { - struct bnxt_ulp_flow_db *flow_db; - struct bnxt_ulp_flow_tbl *flow_tbl; + struct bnxt_ulp_flow_db *flow_db; + struct bnxt_ulp_flow_tbl *flow_tbl; *fid = 0; /* Initialize fid to invalid value */ flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); @@ -339,6 +373,10 @@ int32_t ulp_flow_db_fid_alloc(struct bnxt_ulp_context *ulp_ctxt, flow_tbl->head_index++; ulp_flow_db_active_flow_set(flow_tbl, *fid, 1); + /* The function id update is only valid for regular flow table */ + if (tbl_idx == BNXT_ULP_REGULAR_FLOW_TABLE) + ulp_flow_db_func_id_set(flow_db, *fid, func_id); + /* all good, return success */ return 0; } @@ -555,6 +593,8 @@ int32_t ulp_flow_db_fid_free(struct bnxt_ulp_context *ulp_ctxt, } flow_tbl->flow_tbl_stack[flow_tbl->head_index] = fid; ulp_flow_db_active_flow_set(flow_tbl, fid, 0); + if (tbl_idx == BNXT_ULP_REGULAR_FLOW_TABLE) + ulp_flow_db_func_id_set(flow_db, fid, 0); /* all good, return success */ return 0; @@ -636,19 +676,29 @@ ulp_flow_db_next_entry_get(struct bnxt_ulp_flow_tbl *flowtbl, uint32_t *fid) { uint32_t lfid = *fid; - uint32_t idx; + uint32_t idx, s_idx, mod_fid; uint64_t bs; do { + /* increment the flow id to find the next valid flow id */ lfid++; if (lfid >= flowtbl->num_flows) return -ENOENT; idx = lfid / ULP_INDEX_BITMAP_SIZE; + mod_fid = lfid % ULP_INDEX_BITMAP_SIZE; + s_idx = idx; while (!(bs = flowtbl->active_flow_tbl[idx])) { idx++; if ((idx * ULP_INDEX_BITMAP_SIZE) >= flowtbl->num_flows) return -ENOENT; } + /* + * remove the previous bits in the bitset bs to find the + * next non zero bit in the bitset. This needs to be done + * only if the idx is same as he one you started. + */ + if (s_idx == idx) + bs &= (-1UL >> mod_fid); lfid = (idx * ULP_INDEX_BITMAP_SIZE) + __builtin_clzl(bs); if (*fid >= lfid) { BNXT_TF_DBG(ERR, "Flow Database is corrupt\n"); @@ -688,7 +738,90 @@ int32_t ulp_flow_db_flush_flows(struct bnxt_ulp_context *ulp_ctx, } flow_tbl = &flow_db->flow_tbl[idx]; while (!ulp_flow_db_next_entry_get(flow_tbl, &fid)) - (void)ulp_mapper_resources_free(ulp_ctx, fid, idx); + ulp_mapper_resources_free(ulp_ctx, fid, idx); return 0; } + +/* + * Flush all flows in the flow database that belong to a device function. + * + * ulp_ctxt [in] Ptr to ulp context + * tbl_idx [in] The index to table + * + * returns 0 on success or negative number on failure + */ +int32_t +ulp_flow_db_function_flow_flush(struct bnxt_ulp_context *ulp_ctx, + uint16_t func_id) +{ + uint32_t flow_id = 0; + struct bnxt_ulp_flow_db *flow_db; + struct bnxt_ulp_flow_tbl *flow_tbl; + + if (!ulp_ctx || !func_id) { + BNXT_TF_DBG(ERR, "Invalid Argument\n"); + return -EINVAL; + } + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx); + if (!flow_db) { + BNXT_TF_DBG(ERR, "Flow database not found\n"); + return -EINVAL; + } + flow_tbl = &flow_db->flow_tbl[BNXT_ULP_REGULAR_FLOW_TABLE]; + while (!ulp_flow_db_next_entry_get(flow_tbl, &flow_id)) { + if (flow_db->func_id_tbl[flow_id] == func_id) + ulp_mapper_resources_free(ulp_ctx, flow_id, + BNXT_ULP_REGULAR_FLOW_TABLE); + } + + return 0; +} + +/* + * Flush all flows in the flow database that are associated with the session. + * + * ulp_ctxt [in] Ptr to ulp context + * + * returns 0 on success or negative number on failure + */ +int32_t +ulp_flow_db_session_flow_flush(struct bnxt_ulp_context *ulp_ctx) +{ + /* + * TBD: Tf core implementation of FW session flush shall change this + * implementation. + */ + return ulp_flow_db_flush_flows(ulp_ctx, BNXT_ULP_REGULAR_FLOW_TABLE); +} + +/* + * Check that flow id matches the function id or not + * + * ulp_ctxt [in] Ptr to ulp context + * flow_db [in] Ptr to flow table + * func_id [in] The func_id to be set, for reset pass zero. + * + * returns true on success or false on failure + */ +bool +ulp_flow_db_validate_flow_func(struct bnxt_ulp_context *ulp_ctx, + uint32_t flow_id, + uint32_t func_id) +{ + struct bnxt_ulp_flow_db *flow_db; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx); + if (!flow_db) { + BNXT_TF_DBG(ERR, "Flow database not found\n"); + return false; + } + + /* set the function id in the function table */ + if (flow_id < flow_db->func_id_tbl_size && func_id && + flow_db->func_id_tbl[flow_id] == func_id) + return true; + + return false; +} diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h index 5361dd025..ebca84947 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h +++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h @@ -51,6 +51,8 @@ enum bnxt_ulp_flow_db_tables { /* Structure for the flow database resource information. */ struct bnxt_ulp_flow_db { struct bnxt_ulp_flow_tbl flow_tbl[BNXT_ULP_FLOW_TABLE_MAX]; + uint16_t *func_id_tbl; + uint32_t func_id_tbl_size; }; /* flow db resource params to add resources */ @@ -88,13 +90,15 @@ int32_t ulp_flow_db_deinit(struct bnxt_ulp_context *ulp_ctxt); * * ulp_ctxt [in] Ptr to ulp_context * tbl_idx [in] Specify it is regular or default flow + * func_id [in] The function id of the device.Valid only for regular flows. * fid [out] The index to the flow entry * * returns 0 on success and negative on failure. */ -int32_t ulp_flow_db_fid_alloc(struct bnxt_ulp_context *ulp_ctxt, - enum bnxt_ulp_flow_db_tables tbl_idx, - uint32_t *fid); +int32_t ulp_flow_db_fid_alloc(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_flow_db_tables tbl_idx, + uint16_t func_id, + uint32_t *fid); /* * Allocate the flow database entry. @@ -170,4 +174,40 @@ int32_t ulp_flow_db_resource_get(struct bnxt_ulp_context *ulp_ctxt, int32_t ulp_flow_db_flush_flows(struct bnxt_ulp_context *ulp_ctx, uint32_t idx); +/* + * Flush all flows in the flow database that belong to a device function. + * + * ulp_ctxt [in] Ptr to ulp context + * tbl_idx [in] The index to table + * + * returns 0 on success or negative number on failure + */ +int32_t +ulp_flow_db_function_flow_flush(struct bnxt_ulp_context *ulp_ctx, + uint16_t func_id); + +/* + * Flush all flows in the flow database that are associated with the session. + * + * ulp_ctxt [in] Ptr to ulp context + * + * returns 0 on success or negative number on failure + */ +int32_t +ulp_flow_db_session_flow_flush(struct bnxt_ulp_context *ulp_ctx); + +/* + * Check that flow id matches the function id or not + * + * ulp_ctxt [in] Ptr to ulp context + * flow_db [in] Ptr to flow table + * func_id [in] The func_id to be set, for reset pass zero. + * + * returns true on success or false on failure + */ +bool +ulp_flow_db_validate_flow_func(struct bnxt_ulp_context *ulp_ctx, + uint32_t flow_id, + uint32_t func_id); + #endif /* _ULP_FLOW_DB_H_ */ diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c index a0aba403f..94899c005 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c +++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c @@ -461,7 +461,6 @@ ulp_mapper_result_field_process(struct bnxt_ulp_mapper_parms *parms, default: return -EINVAL; } - return 0; } @@ -1481,6 +1480,7 @@ ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, */ rc = ulp_flow_db_fid_alloc(ulp_ctx, BNXT_ULP_REGULAR_FLOW_TABLE, + cparms->func_id, &parms.fid); if (rc) { BNXT_TF_DBG(ERR, "Unable to allocate flow table entry\n"); diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.h b/drivers/net/bnxt/tf_ulp/ulp_mapper.h index 24727a32d..2fa6ffce2 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_mapper.h +++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.h @@ -46,6 +46,7 @@ struct bnxt_ulp_mapper_create_parms { struct ulp_rte_act_prop *act_prop; uint32_t class_tid; uint32_t act_tid; + uint16_t func_id; }; /* From patchwork Fri Apr 17 16:19:17 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 68797 X-Patchwork-Delegate: ajit.khaparde@broadcom.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id AE500A0597; Fri, 17 Apr 2020 18:21:30 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5B6111EA76; Fri, 17 Apr 2020 18:19:44 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id C8FF01E9E5 for ; Fri, 17 Apr 2020 18:19:28 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id 1C30730C1EE; Fri, 17 Apr 2020 09:08:08 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com 1C30730C1EE DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1587139688; bh=4xqveZnwMirhRARIvQnJ5xZCtzmLyrXKsvYq6HoqC70=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=n/BTfhv+BRyB26DGWf40+l/XXAEjsbc5nBvaOGnkztU1PpghQX4ynJBynC329tyzk NCJ+W7a7wwUABECMyd54TXWM3ROWsH2Feaox/mAe20AGCsDSWdsEmJxw8Jtat9hBDk CyJpzsoXCEFQsjR8RBWtIqJ5eWgkQBFl5mX3PWi4= Received: from localhost.localdomain (unknown [10.230.185.215]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id 4152614008E; Fri, 17 Apr 2020 09:19:27 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: Mike Baucom , Venkat Duvvuru Date: Fri, 17 Apr 2020 09:19:17 -0700 Message-Id: <20200417161920.85858-10-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.21.1 (Apple Git-122.3) In-Reply-To: <20200417161920.85858-1-ajit.khaparde@broadcom.com> References: <1586962156-11179-1-git-send-email-venkatkumar.duvvuru@broadcom.com> <20200417161920.85858-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 09/12] net/bnxt: add default identifiers to ulp mapper X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Mike Baucom - Added ulp_mapper_init/deinit to allocate/deallocate mapper data for storing the default identifiers - Modified the template_db to include the new opcode for accessing the default ids. - Modified the result and key field builders to use the new opcode for writing the default ids into blobs Reviewed-by: Venkat Duvvuru Reviewed-by: Ajit Khaparde Signed-off-by: Mike Baucom Signed-off-by: Venkat Duvvuru --- drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 34 +++ drivers/net/bnxt/tf_ulp/bnxt_ulp.h | 10 + drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c | 8 +- drivers/net/bnxt/tf_ulp/ulp_mapper.c | 251 +++++++++++++++++- drivers/net/bnxt/tf_ulp/ulp_mapper.h | 22 +- drivers/net/bnxt/tf_ulp/ulp_template_db.c | 35 +-- drivers/net/bnxt/tf_ulp/ulp_template_db.h | 25 +- drivers/net/bnxt/tf_ulp/ulp_template_struct.h | 11 + 8 files changed, 364 insertions(+), 32 deletions(-) diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c index c67da6d76..202b4a529 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c @@ -19,6 +19,7 @@ #include "ulp_template_struct.h" #include "ulp_mark_mgr.h" #include "ulp_flow_db.h" +#include "ulp_mapper.h" /* Linked list of all TF sessions. */ STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list = @@ -485,6 +486,12 @@ bnxt_ulp_init(struct bnxt *bp) goto jump_to_error; } + rc = ulp_mapper_init(&bp->ulp_ctx); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n"); + goto jump_to_error; + } + return rc; jump_to_error: @@ -529,6 +536,9 @@ bnxt_ulp_deinit(struct bnxt *bp) /* Delete the Mark database */ ulp_mark_db_deinit(&bp->ulp_ctx); + /* cleanup the ulp mapper */ + ulp_mapper_deinit(&bp->ulp_ctx); + /* Delete the ulp context and tf session */ ulp_ctx_detach(bp, session); @@ -689,3 +699,27 @@ bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev) } return &bp->ulp_ctx; } + +int32_t +bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx, + void *mapper_data) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) { + BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); + return -EINVAL; + } + + ulp_ctx->cfg_data->mapper_data = mapper_data; + return 0; +} + +void * +bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) { + BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); + return NULL; + } + + return ulp_ctx->cfg_data->mapper_data; +} diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h index b3e9e960d..d2ca17857 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h @@ -18,6 +18,7 @@ struct bnxt_ulp_data { uint32_t dev_id; /* Hardware device id */ uint32_t ref_cnt; struct bnxt_ulp_flow_db *flow_db; + void *mapper_data; }; struct bnxt_ulp_context { @@ -107,4 +108,13 @@ bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx); struct bnxt_ulp_context * bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev); +/* Function to add the ulp mapper data to the ulp context */ +int32_t +bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx, + void *mapper_data); + +/* Function to get the ulp mapper data from the ulp context */ +void * +bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx); + #endif /* _BNXT_ULP_H_ */ diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c index f0c812ced..7783f85d9 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c @@ -110,7 +110,6 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, goto parse_error; ret = ulp_matcher_pattern_match(¶ms, &class_id); - if (ret != BNXT_TF_RC_SUCCESS) goto parse_error; @@ -126,11 +125,10 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, mapper_cparms.class_tid = class_id; mapper_cparms.act_tid = act_tmpl; mapper_cparms.func_id = bnxt_get_fw_func_id(dev->data->port_id); + mapper_cparms.dir = params.dir; - /* call the ulp mapper to create the flow in the hardware */ - ret = ulp_mapper_flow_create(ulp_ctx, - &mapper_cparms, - &fid); + /* Call the ulp mapper to create the flow in the hardware. */ + ret = ulp_mapper_flow_create(ulp_ctx, &mapper_cparms, &fid); if (!ret) { flow_id = (struct rte_flow *)((uintptr_t)fid); return flow_id; diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c index 94899c005..c67a9e186 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c +++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c @@ -4,6 +4,7 @@ */ #include +#include #include "bnxt.h" #include "ulp_template_db.h" #include "ulp_template_struct.h" @@ -16,6 +17,55 @@ #include "ulp_flow_db.h" #include "ulp_mapper.h" +static struct bnxt_ulp_def_ident_info * +ulp_mapper_def_ident_info_list_get(uint32_t *num_entries) +{ + if (!num_entries) + return NULL; + *num_entries = BNXT_ULP_DEF_IDENT_INFO_TBL_MAX_SZ; + return ulp_def_ident_tbl; +} + +/* + * Read a default identifier from the mapper regfile. + * + * The regval is always returned in big-endian. + * + * returns 0 on success + */ +static int32_t +ulp_mapper_def_regfile_read(struct bnxt_ulp_mapper_data *mapper_data, + enum tf_dir dir, + uint16_t idx, + uint64_t *regval) +{ + if (!mapper_data || !regval || + dir >= TF_DIR_MAX || idx >= BNXT_ULP_DEF_REGFILE_INDEX_LAST) + return -EINVAL; + *regval = mapper_data->dflt_ids[dir][idx].ident; + return 0; +} + +/* + * Write a default identifier to the mapper regfile + * + * The regval value must be in big-endian. + * + * return 0 on success. + */ +static int32_t +ulp_mapper_def_regfile_write(struct bnxt_ulp_mapper_data *mapper_data, + enum tf_dir dir, + uint16_t idx, + uint64_t regval) +{ + if (!mapper_data || dir >= TF_DIR_MAX || + idx >= BNXT_ULP_DEF_REGFILE_INDEX_LAST) + return -EINVAL; + mapper_data->dflt_ids[dir][idx].ident = regval; + return 0; +} + /* * Get the size of the action property for a given index. * @@ -364,6 +414,7 @@ ulp_mapper_ident_process(struct bnxt_ulp_mapper_parms *parms, static int32_t ulp_mapper_result_field_process(struct bnxt_ulp_mapper_parms *parms, + enum tf_dir dir, struct bnxt_ulp_mapper_result_field_info *fld, struct ulp_blob *blob, const char *name) @@ -458,6 +509,27 @@ ulp_mapper_result_field_process(struct bnxt_ulp_mapper_parms *parms, return -EINVAL; } break; + case BNXT_ULP_RESULT_OPC_SET_TO_DEF_REGFILE: + if (!ulp_operand_read(fld->result_operand, + (uint8_t *)&idx, + sizeof(uint16_t))) { + BNXT_TF_DBG(ERR, "%s key operand read failed.\n", name); + return -EINVAL; + } + idx = tfp_be_to_cpu_16(idx); + if (ulp_mapper_def_regfile_read(parms->mapper_data, + dir, + idx, ®val)) { + BNXT_TF_DBG(ERR, "%s regfile[%d] read failed.\n", + name, idx); + return -EINVAL; + } + val = ulp_blob_push_64(blob, ®val, fld->field_bit_size); + if (!val) { + BNXT_TF_DBG(ERR, "%s push to key blob failed\n", name); + return -EINVAL; + } + break; default: return -EINVAL; } @@ -467,12 +539,13 @@ ulp_mapper_result_field_process(struct bnxt_ulp_mapper_parms *parms, /* Function to alloc action record and set the table. */ static int32_t ulp_mapper_keymask_field_process(struct bnxt_ulp_mapper_parms *parms, + enum tf_dir dir, struct bnxt_ulp_mapper_class_key_field_info *f, struct ulp_blob *blob, uint8_t is_key, const char *name) { - uint64_t regval; + uint64_t val64; uint16_t idx, bitlen; uint32_t opcode; uint8_t *operand; @@ -541,17 +614,38 @@ ulp_mapper_keymask_field_process(struct bnxt_ulp_mapper_parms *parms, } idx = tfp_be_to_cpu_16(idx); - if (!ulp_regfile_read(regfile, idx, ®val)) { + if (!ulp_regfile_read(regfile, idx, &val64)) { BNXT_TF_DBG(ERR, "%s regfile[%d] read failed.\n", name, idx); return -EINVAL; } - val = ulp_blob_push_64(blob, ®val, bitlen); + val = ulp_blob_push_64(blob, &val64, bitlen); + if (!val) { + BNXT_TF_DBG(ERR, "%s push to key blob failed\n", name); + return -EINVAL; + } + break; + case BNXT_ULP_SPEC_OPC_SET_TO_DEF_REGFILE: + if (!ulp_operand_read(operand, (uint8_t *)&idx, + sizeof(uint16_t))) { + BNXT_TF_DBG(ERR, "%s key operand read failed.\n", name); + return -EINVAL; + } + idx = tfp_be_to_cpu_16(idx); + if (ulp_mapper_def_regfile_read(parms->mapper_data, + dir, + idx, &val64)) { + BNXT_TF_DBG(ERR, "%s regfile[%d] read failed.\n", + name, idx); + return -EINVAL; + } + val = ulp_blob_push_64(blob, &val64, bitlen); if (!val) { BNXT_TF_DBG(ERR, "%s push to key blob failed\n", name); return -EINVAL; } + break; default: break; } @@ -716,6 +810,7 @@ ulp_mapper_action_info_process(struct bnxt_ulp_mapper_parms *parms, for (i = 0; i < (num_flds + encap_flds); i++) { fld = &flds[i]; rc = ulp_mapper_result_field_process(parms, + tbl->direction, fld, &blob, "Action"); @@ -779,7 +874,8 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms, */ for (i = 0; i < num_kflds; i++) { /* Setup the key */ - rc = ulp_mapper_keymask_field_process(parms, &kflds[i], + rc = ulp_mapper_keymask_field_process(parms, tbl->direction, + &kflds[i], &key, 1, "TCAM Key"); if (rc) { BNXT_TF_DBG(ERR, "Key field set failed.\n"); @@ -787,7 +883,8 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms, } /* Setup the mask */ - rc = ulp_mapper_keymask_field_process(parms, &kflds[i], + rc = ulp_mapper_keymask_field_process(parms, tbl->direction, + &kflds[i], &mask, 0, "TCAM Mask"); if (rc) { BNXT_TF_DBG(ERR, "Mask field set failed.\n"); @@ -854,6 +951,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms, for (i = 0; i < num_dflds; i++) { rc = ulp_mapper_result_field_process(parms, + tbl->direction, &dflds[i], &data, "TCAM Result"); @@ -958,7 +1056,8 @@ ulp_mapper_em_tbl_process(struct bnxt_ulp_mapper_parms *parms, /* create the key */ for (i = 0; i < num_kflds; i++) { /* Setup the key */ - rc = ulp_mapper_keymask_field_process(parms, &kflds[i], + rc = ulp_mapper_keymask_field_process(parms, tbl->direction, + &kflds[i], &key, 1, "EM Key"); if (rc) { BNXT_TF_DBG(ERR, "Key field set failed.\n"); @@ -984,6 +1083,7 @@ ulp_mapper_em_tbl_process(struct bnxt_ulp_mapper_parms *parms, fld = &dflds[i]; rc = ulp_mapper_result_field_process(parms, + tbl->direction, fld, &data, "EM Result"); @@ -1134,6 +1234,7 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms, for (i = 0; i < num_flds; i++) { rc = ulp_mapper_result_field_process(parms, + tbl->direction, &flds[i], &data, "Indexed Result"); @@ -1436,6 +1537,17 @@ ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, return -EINVAL; } + /* + * Get the mapper data for dynamic mapper data such as default + * ids. + */ + parms.mapper_data = (struct bnxt_ulp_mapper_data *) + bnxt_ulp_cntxt_ptr2_mapper_data_get(ulp_ctx); + if (!parms.mapper_data) { + BNXT_TF_DBG(ERR, "Failed to get the ulp mapper data\n"); + return -EINVAL; + } + /* Get the action table entry from device id and act context id */ parms.act_tid = cparms->act_tid; parms.atbls = ulp_mapper_action_tbl_list_get(parms.dev_id, @@ -1515,3 +1627,130 @@ ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, return rc; } + +int32_t +ulp_mapper_init(struct bnxt_ulp_context *ulp_ctx) +{ + struct tf_alloc_identifier_parms iparms; + struct bnxt_ulp_mapper_data *data; + struct bnxt_ulp_def_ident_info *dflt_ids; + uint32_t i, num_dflt_ids, reg_idx; + uint64_t regval; + struct tf *tfp; + int32_t rc; + + if (!ulp_ctx) + return -EINVAL; + + tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx); + if (!tfp) + return -EINVAL; + + data = rte_zmalloc("ulp_mapper_data", + sizeof(struct bnxt_ulp_mapper_data), 0); + if (!data) { + BNXT_TF_DBG(ERR, "Failed to allocate the mapper data\n"); + return -ENOMEM; + } + + if (bnxt_ulp_cntxt_ptr2_mapper_data_set(ulp_ctx, data)) { + BNXT_TF_DBG(ERR, "Failed to set mapper data in context\n"); + /* Don't call deinit since the prof_func wasn't allocated. */ + rte_free(data); + return -ENOMEM; + } + + /* Allocate the default ids. */ + dflt_ids = ulp_mapper_def_ident_info_list_get(&num_dflt_ids); + for (i = 0; i < num_dflt_ids; i++) { + iparms.ident_type = dflt_ids[i].ident_type; + iparms.dir = dflt_ids[i].direction; + + rc = tf_alloc_identifier(tfp, &iparms); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to alloc dflt " + "identifier [%s][%d]\n", + (iparms.dir == TF_DIR_RX) ? "RX" : "TX", + iparms.ident_type); + goto error; + } + reg_idx = dflt_ids[i].def_regfile_index; + /* All regfile entries are stored as 64bit big-endian values. */ + regval = tfp_cpu_to_be_64((uint64_t)iparms.id); + if (ulp_mapper_def_regfile_write(data, + iparms.dir, + reg_idx, + regval)) { + BNXT_TF_DBG(ERR, "Failed to write to default " + "regfile.\n"); + goto error; + } + } + + return 0; +error: + /* Ignore the return code in favor of returning the original error. */ + ulp_mapper_deinit(ulp_ctx); + return rc; +} + +void +ulp_mapper_deinit(struct bnxt_ulp_context *ulp_ctx) +{ + struct tf_free_identifier_parms free_parms; + struct bnxt_ulp_def_ident_info *dflt_ids; + struct bnxt_ulp_mapper_data *data; + uint32_t i, num_dflt_ids, reg_idx; + enum tf_dir dir; + uint64_t regval; + struct tf *tfp; + + if (!ulp_ctx) { + BNXT_TF_DBG(ERR, + "Failed to acquire ulp context, so data may " + "not be released.\n"); + return; + } + + data = (struct bnxt_ulp_mapper_data *) + bnxt_ulp_cntxt_ptr2_mapper_data_get(ulp_ctx); + if (!data) { + /* Go ahead and return since there is no allocated data. */ + BNXT_TF_DBG(ERR, "No data appears to have been allocated.\n"); + return; + } + + tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx); + if (!tfp) { + BNXT_TF_DBG(ERR, "Failed to acquire tfp.\n"); + /* Free the mapper data regardless of errors. */ + goto free_mapper_data; + } + + /* Free the default prof func ids per direction. */ + dflt_ids = ulp_mapper_def_ident_info_list_get(&num_dflt_ids); + for (i = 0; i < num_dflt_ids; i++) { + reg_idx = dflt_ids[i].def_regfile_index; + dir = dflt_ids[i].direction; + free_parms.ident_type = dflt_ids[i].ident_type; + free_parms.dir = dir; + if (ulp_mapper_def_regfile_read(data, dir, reg_idx, ®val)) { + BNXT_TF_DBG(ERR, "Failed to read def regfile to free " + "identifier.\n"); + continue; + } + /* + * All regfile entries are stored as 64bit big-endian. Need + * to convert the value to cpu before calling tf. + */ + regval = tfp_be_to_cpu_64(regval); + free_parms.id = (uint16_t)regval; + /* Ignore errors and free the remaining identifers. */ + tf_free_identifier(tfp, &free_parms); + } + +free_mapper_data: + rte_free(data); + /* Reset the data pointer within the ulp_ctx. */ + bnxt_ulp_cntxt_ptr2_mapper_data_set(ulp_ctx, NULL); +} diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.h b/drivers/net/bnxt/tf_ulp/ulp_mapper.h index 2fa6ffce2..fb47f1c27 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_mapper.h +++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.h @@ -6,10 +6,10 @@ #ifndef _ULP_MAPPER_H_ #define _ULP_MAPPER_H_ -#include #include #include #include +#include "tf_core.h" #include "ulp_template_db.h" #include "ulp_template_struct.h" #include "bnxt_ulp.h" @@ -17,6 +17,16 @@ #define ULP_SZ_BITS2BYTES(x) (((x) + 7) / 8) +struct bnxt_ulp_mapper_def_id_entry { + enum tf_identifier_type ident_type; + uint64_t ident; +}; + +struct bnxt_ulp_mapper_data { + struct bnxt_ulp_mapper_def_id_entry + dflt_ids[TF_DIR_MAX][BNXT_ULP_DEF_IDENT_INFO_TBL_MAX_SZ]; +}; + /* Internal Structure for passing the arguments around */ struct bnxt_ulp_mapper_parms { uint32_t dev_id; @@ -36,6 +46,7 @@ struct bnxt_ulp_mapper_parms { uint8_t encap_byte_swap; uint32_t fid; enum bnxt_ulp_flow_db_tables tbl_idx; + struct bnxt_ulp_mapper_data *mapper_data; }; struct bnxt_ulp_mapper_create_parms { @@ -47,8 +58,17 @@ struct bnxt_ulp_mapper_create_parms { uint32_t class_tid; uint32_t act_tid; uint16_t func_id; + enum ulp_direction_type dir; }; +/* Function to initialize any dynamic mapper data. */ +int32_t +ulp_mapper_init(struct bnxt_ulp_context *ulp_ctx); + +/* Function to release all dynamic mapper data. */ +void +ulp_mapper_deinit(struct bnxt_ulp_context *ulp_ctx); + /* * Function to handle the mapping of the Flow to be compatible * with the underlying hardware. diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db.c b/drivers/net/bnxt/tf_ulp/ulp_template_db.c index 25a558a4e..cd3f65f7a 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_template_db.c +++ b/drivers/net/bnxt/tf_ulp/ulp_template_db.c @@ -297,6 +297,15 @@ struct bnxt_ulp_rte_act_info ulp_act_info[] = { } }; +struct bnxt_ulp_def_ident_info ulp_def_ident_tbl[] = { + [0] = { + .ident_type = TF_IDENT_TYPE_PROF_FUNC, + .def_regfile_index = + BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID, + .direction = TF_DIR_RX + } +}; + struct bnxt_ulp_device_params ulp_device_params[] = { [BNXT_ULP_DEVICE_ID_WH_PLUS] = { .global_fid_enable = BNXT_ULP_SYM_YES, @@ -577,7 +586,7 @@ struct bnxt_ulp_mapper_class_tbl_info ulp_class_tbl_list[] = { .result_bit_size = 64, .result_num_fields = 13, .ident_start_idx = 0, - .ident_nums = 2, + .ident_nums = 1, .mark_enable = BNXT_ULP_MARK_ENABLE_NO, .critical_resource = 0, .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_NOT_USED @@ -595,7 +604,7 @@ struct bnxt_ulp_mapper_class_tbl_info ulp_class_tbl_list[] = { .result_start_idx = 13, .result_bit_size = 38, .result_num_fields = 8, - .ident_start_idx = 2, + .ident_start_idx = 1, .ident_nums = 1, .mark_enable = BNXT_ULP_MARK_ENABLE_NO, .critical_resource = 0, @@ -614,6 +623,7 @@ struct bnxt_ulp_mapper_class_tbl_info ulp_class_tbl_list[] = { .result_start_idx = 21, .result_bit_size = 64, .result_num_fields = 9, + .ident_start_idx = 2, .ident_nums = 0, .mark_enable = BNXT_ULP_MARK_ENABLE_YES, .critical_resource = 1, @@ -1088,9 +1098,10 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_REGFILE, - .spec_operand = {(BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_0 >> 8) & 0xff, - BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_0 & 0xff, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_DEF_REGFILE, + .spec_operand = { + (BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID & 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, @@ -1256,9 +1267,10 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = { }, { .field_bit_size = 7, - .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_REGFILE, - .result_operand = {(BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_0 >> 8) & 0xff, - BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_0 & 0xff, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_DEF_REGFILE, + .result_operand = { + (BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID & 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, @@ -1441,13 +1453,6 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = { }; struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = { - { - .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, - .ident_type = TF_IDENT_TYPE_PROF_FUNC, - .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_0, - .ident_bit_size = 7, - .ident_bit_pos = 47 - }, { .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, .ident_type = TF_IDENT_TYPE_L2_CTXT, diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db.h b/drivers/net/bnxt/tf_ulp/ulp_template_db.h index 94d425315..cf4ff9f39 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_template_db.h +++ b/drivers/net/bnxt/tf_ulp/ulp_template_db.h @@ -28,6 +28,7 @@ #define BNXT_ULP_ACT_HID_SHFTR 0 #define BNXT_ULP_ACT_HID_SHFTL 23 #define BNXT_ULP_ACT_HID_MASK 255 +#define BNXT_ULP_DEF_IDENT_INFO_TBL_MAX_SZ 1 enum bnxt_ulp_action_bit { BNXT_ULP_ACTION_BIT_MARK = 0x0000000000000001, @@ -112,6 +113,11 @@ enum bnxt_ulp_chf_idx { BNXT_ULP_CHF_IDX_LAST = 14 }; +enum bnxt_ulp_def_regfile_index { + BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID = 0, + BNXT_ULP_DEF_REGFILE_INDEX_LAST = 1 +}; + enum bnxt_ulp_device_id { BNXT_ULP_DEVICE_ID_WH_PLUS = 0, BNXT_ULP_DEVICE_ID_THOR = 1, @@ -120,6 +126,12 @@ enum bnxt_ulp_device_id { BNXT_ULP_DEVICE_ID_LAST = 4 }; +enum bnxt_ulp_direction { + BNXT_ULP_DIRECTION_INGRESS = 0, + BNXT_ULP_DIRECTION_EGRESS = 1, + BNXT_ULP_DIRECTION_LAST = 2 +}; + enum bnxt_ulp_hdr_type { BNXT_ULP_HDR_TYPE_NOT_SUPPORTED = 0, BNXT_ULP_HDR_TYPE_SUPPORTED = 1, @@ -137,8 +149,9 @@ enum bnxt_ulp_mask_opc { BNXT_ULP_MASK_OPC_SET_TO_CONSTANT = 0, BNXT_ULP_MASK_OPC_SET_TO_HDR_FIELD = 1, BNXT_ULP_MASK_OPC_SET_TO_REGFILE = 2, - BNXT_ULP_MASK_OPC_ADD_PAD = 3, - BNXT_ULP_MASK_OPC_LAST = 4 + BNXT_ULP_MASK_OPC_SET_TO_DEF_REGFILE = 3, + BNXT_ULP_MASK_OPC_ADD_PAD = 4, + BNXT_ULP_MASK_OPC_LAST = 5 }; enum bnxt_ulp_match_type { @@ -193,7 +206,8 @@ enum bnxt_ulp_result_opc { BNXT_ULP_RESULT_OPC_SET_TO_ACT_PROP = 1, BNXT_ULP_RESULT_OPC_SET_TO_ENCAP_ACT_PROP_SZ = 2, BNXT_ULP_RESULT_OPC_SET_TO_REGFILE = 3, - BNXT_ULP_RESULT_OPC_LAST = 4 + BNXT_ULP_RESULT_OPC_SET_TO_DEF_REGFILE = 4, + BNXT_ULP_RESULT_OPC_LAST = 5 }; enum bnxt_ulp_search_before_alloc { @@ -206,8 +220,9 @@ enum bnxt_ulp_spec_opc { BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT = 0, BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD = 1, BNXT_ULP_SPEC_OPC_SET_TO_REGFILE = 2, - BNXT_ULP_SPEC_OPC_ADD_PAD = 3, - BNXT_ULP_SPEC_OPC_LAST = 4 + BNXT_ULP_SPEC_OPC_SET_TO_DEF_REGFILE = 3, + BNXT_ULP_SPEC_OPC_ADD_PAD = 4, + BNXT_ULP_SPEC_OPC_LAST = 5 }; enum bnxt_ulp_encap_vtag_encoding { diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h index c2d3ccb3e..476d5b9bb 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h +++ b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h @@ -222,6 +222,12 @@ struct bnxt_ulp_mapper_ident_info { enum bnxt_ulp_regfile_index regfile_wr_idx; }; +struct bnxt_ulp_def_ident_info { + enum tf_dir direction; + enum tf_identifier_type ident_type; + enum bnxt_ulp_def_regfile_index def_regfile_index; +}; + /* * Flow Mapper Static Data Externs: * Access to the below static data should be done through access functions and @@ -285,4 +291,9 @@ extern struct bnxt_ulp_mapper_ident_info ulp_ident_list[]; */ extern uint32_t ulp_act_prop_map_table[]; +/* + * The ulp_def_ident_tbl provides the list of default identifiers that need to + * be initialized and where to store them. + */ +extern struct bnxt_ulp_def_ident_info ulp_def_ident_tbl[]; #endif /* _ULP_TEMPLATE_STRUCT_H_ */ From patchwork Fri Apr 17 16:19:18 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 68795 X-Patchwork-Delegate: ajit.khaparde@broadcom.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2F4E6A0597; Fri, 17 Apr 2020 18:21:06 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 58CD21EA5A; Fri, 17 Apr 2020 18:19:41 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id BEE661E9BF for ; Fri, 17 Apr 2020 18:19:28 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id 6010E30C1F3; Fri, 17 Apr 2020 09:08:08 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com 6010E30C1F3 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1587139688; bh=k6yIn5soQFkaRHU/v5Gl3nKXzDeC6QDoakdzJIGclTQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=AMetHod+mw2/POkTnNPLxKc3jg/hHymfZx0fJHFT+rvRfSUgqbas6bxhc3OFKCJsa NHLcHSEsQRaro0ja9wxh7U5CjjgYRNkRc9hmbMS05hDInBZr5u86wsm66jpb12UWny uoXa/k1/AJod0EAJJaxlZr3LlKpIWMm2FwBgCNjA= Received: from localhost.localdomain (unknown [10.230.185.215]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id 933A3140090; Fri, 17 Apr 2020 09:19:27 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: Mike Baucom , Venkat Duvvuru Date: Fri, 17 Apr 2020 09:19:18 -0700 Message-Id: <20200417161920.85858-11-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.21.1 (Apple Git-122.3) In-Reply-To: <20200417161920.85858-1-ajit.khaparde@broadcom.com> References: <1586962156-11179-1-git-send-email-venkatkumar.duvvuru@broadcom.com> <20200417161920.85858-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 10/12] net/bnxt: add cache table type for TCAM lookup X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Mike Baucom In order to re-use allocated resources and reduce search complexity for simple keys, a generic software cache table was added for the TCAM. The implementation is specifically only for keys that can be compressed to less than 16 bits. The keys are generated using the same mechanisms as other search tables, but the table type is set to a cache that mirrors the actual TCAM table. The allocated result fields are stored in the cache entry and can be used for subsequent searches in future tables. Reviewed-by: Venkat Duvvuru Reviewed-by: Ajit Khaparde Signed-off-by: Mike Baucom Signed-off-by: Venkat Duvvuru --- drivers/net/bnxt/tf_ulp/ulp_mapper.c | 460 ++++++++++++++++-- drivers/net/bnxt/tf_ulp/ulp_mapper.h | 29 ++ drivers/net/bnxt/tf_ulp/ulp_template_db.c | 155 +++++- drivers/net/bnxt/tf_ulp/ulp_template_db.h | 24 +- drivers/net/bnxt/tf_ulp/ulp_template_struct.h | 13 + 5 files changed, 616 insertions(+), 65 deletions(-) diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c index c67a9e186..1b270d3a5 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c +++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c @@ -66,6 +66,16 @@ ulp_mapper_def_regfile_write(struct bnxt_ulp_mapper_data *mapper_data, return 0; } +/* Retrieve the cache initialization parameters for the tbl_idx */ +static struct bnxt_ulp_cache_tbl_params * +ulp_mapper_cache_tbl_params_get(uint32_t tbl_idx) +{ + if (tbl_idx >= BNXT_ULP_CACHE_TBL_MAX_SZ) + return NULL; + + return &ulp_cache_tbl_params[tbl_idx]; +} + /* * Get the size of the action property for a given index. * @@ -256,6 +266,126 @@ ulp_mapper_ident_fields_get(struct bnxt_ulp_mapper_class_tbl_info *tbl, return &ulp_ident_list[idx]; } +static struct bnxt_ulp_mapper_cache_entry * +ulp_mapper_cache_entry_get(struct bnxt_ulp_context *ulp, + enum bnxt_ulp_cache_tbl_id id, + uint16_t key) +{ + struct bnxt_ulp_mapper_data *mapper_data; + + mapper_data = bnxt_ulp_cntxt_ptr2_mapper_data_get(ulp); + if (!mapper_data || !mapper_data->cache_tbl[id]) { + BNXT_TF_DBG(ERR, "Unable to acquire the cache tbl (%d)\n", id); + return NULL; + } + + return &mapper_data->cache_tbl[id][key]; +} + +/* + * Concatenates the tbl_type and tbl_id into a 32bit value for storing in the + * resource_type. This is done to conserve memory since both the tbl_type and + * tbl_id are 16bit. + */ +static inline void +ulp_mapper_cache_res_type_set(struct ulp_flow_db_res_params *res, + uint16_t tbl_type, + uint16_t tbl_id) +{ + res->resource_type = + ((uint32_t)tbl_id << ULP_MAPPER_CACHE_RES_TBL_ID_SHFT) | + ((uint32_t)tbl_type << ULP_MAPPER_CACHE_RES_TBL_TYPE_SHFT); +} + +/* Extracts the tbl_type and tbl_id from the 32bit resource type. */ +static inline void +ulp_mapper_cache_res_type_get(struct ulp_flow_db_res_params *res, + uint16_t *tbl_type, + uint16_t *tbl_id) +{ + *tbl_type = (uint16_t)((res->resource_type >> + ULP_MAPPER_CACHE_RES_TBL_TYPE_SHFT) & + ULP_MAPPER_CACHE_RES_TBL_MASK); + *tbl_id = (uint16_t)((res->resource_type >> + ULP_MAPPER_CACHE_RES_TBL_ID_SHFT) & + ULP_MAPPER_CACHE_RES_TBL_MASK); +} + +static int32_t +ulp_mapper_cache_entry_free(struct bnxt_ulp_context *ulp, + struct tf *tfp, + struct ulp_flow_db_res_params *res) +{ + struct bnxt_ulp_mapper_cache_entry *cache_entry; + struct tf_free_identifier_parms ident_parms; + struct tf_free_tcam_entry_parms tcam_parms; + uint16_t table_id, table_type; + int32_t rc, trc, i; + + /* + * The table id, used for cache, and table_type, used for tcam, are + * both encoded within the resource. We must first extract them to + * formulate the args for tf calls. + */ + ulp_mapper_cache_res_type_get(res, &table_type, &table_id); + + cache_entry = ulp_mapper_cache_entry_get(ulp, table_id, + (uint16_t)res->resource_hndl); + if (!cache_entry || !cache_entry->ref_count) { + BNXT_TF_DBG(ERR, "Cache entry (%d:%d) not valid on free.\n", + table_id, (uint16_t)res->resource_hndl); + return -EINVAL; + } + + /* + * See if we need to delete the entry. The tcam and identifiers are all + * tracked by the cached entries reference count. All are deleted when + * the reference count hit zero. + */ + cache_entry->ref_count--; + if (cache_entry->ref_count) + return 0; + + /* + * Need to delete the tcam entry and the allocated identifiers. + * In the event of a failure, need to try to delete the remaining + * resources before returning error. + */ + tcam_parms.dir = res->direction; + tcam_parms.tcam_tbl_type = table_type; + tcam_parms.idx = cache_entry->tcam_idx; + rc = tf_free_tcam_entry(tfp, &tcam_parms); + if (rc) + BNXT_TF_DBG(ERR, "Failed to free tcam [%d][%s][0x%04x] rc=%d\n", + table_type, + (res->direction == TF_DIR_RX) ? "RX" : "TX", + tcam_parms.idx, rc); + + /* + * Free the identifiers associated with the tcam entry. Entries with + * negative one are considered uninitialized. + */ + for (i = 0; i < BNXT_ULP_CACHE_TBL_IDENT_MAX_NUM; i++) { + if (cache_entry->idents[i] == ULP_IDENTS_INVALID) + continue; + + ident_parms.dir = res->direction; + ident_parms.ident_type = cache_entry->ident_types[i]; + ident_parms.id = cache_entry->idents[i]; + trc = tf_free_identifier(tfp, &ident_parms); + if (trc) { + BNXT_TF_DBG(ERR, "Failed to free identifier " + "[%d][%s][0x%04x] rc=%d\n", + ident_parms.ident_type, + (res->direction == TF_DIR_RX) ? "RX" : "TX", + ident_parms.id, trc); + rc = trc; + } + } + + return rc; +} + static inline int32_t ulp_mapper_tcam_entry_free(struct bnxt_ulp_context *ulp __rte_unused, struct tf *tfp, @@ -337,10 +467,16 @@ ulp_mapper_mark_free(struct bnxt_ulp_context *ulp, 0); } +/* + * Process the identifier instruction and either store it in the flow database + * or return it in the val (if not NULL) on success. If val is NULL, the + * identifier is to be stored in the flow database. + */ static int32_t ulp_mapper_ident_process(struct bnxt_ulp_mapper_parms *parms, struct bnxt_ulp_mapper_class_tbl_info *tbl, - struct bnxt_ulp_mapper_ident_info *ident) + struct bnxt_ulp_mapper_ident_info *ident, + uint16_t *val) { struct ulp_flow_db_res_params fid_parms; uint64_t id = 0; @@ -378,22 +514,26 @@ ulp_mapper_ident_process(struct bnxt_ulp_mapper_parms *parms, } /* Link the resource to the flow in the flow db */ - memset(&fid_parms, 0, sizeof(fid_parms)); - fid_parms.direction = tbl->direction; - fid_parms.resource_func = ident->resource_func; - fid_parms.resource_type = ident->ident_type; - fid_parms.resource_hndl = iparms.id; - fid_parms.critical_resource = 0; + if (!val) { + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = ident->resource_func; + fid_parms.resource_type = ident->ident_type; + fid_parms.resource_hndl = iparms.id; + fid_parms.critical_resource = 0; - rc = ulp_flow_db_resource_add(parms->ulp_ctx, - parms->tbl_idx, - parms->fid, - &fid_parms); - if (rc) { - BNXT_TF_DBG(ERR, "Failed to link resource to flow rc = %d\n", - rc); - /* Need to free the identifier, so goto error */ - goto error; + rc = ulp_flow_db_resource_add(parms->ulp_ctx, + parms->tbl_idx, + parms->fid, + &fid_parms); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to link res to flow rc = %d\n", + rc); + /* Need to free the identifier, so goto error */ + goto error; + } + } else { + *val = iparms.id; } return 0; @@ -848,6 +988,12 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms, uint32_t hit = 0; uint16_t tmplen = 0; + /* Skip this if was handled by the cache. */ + if (parms->tcam_tbl_opc == BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_SKIP) { + parms->tcam_tbl_opc = BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL; + return 0; + } + tfp = bnxt_ulp_cntxt_tfp_get(parms->ulp_ctx); if (!tfp) { BNXT_TF_DBG(ERR, "Failed to get truflow pointer\n"); @@ -930,15 +1076,22 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms, struct bnxt_ulp_mapper_ident_info *idents; uint32_t num_dflds, num_idents; - /* Alloc identifiers */ - idents = ulp_mapper_ident_fields_get(tbl, &num_idents); - - for (i = 0; i < num_idents; i++) { - rc = ulp_mapper_ident_process(parms, tbl, &idents[i]); - - /* Already logged the error, just return */ - if (rc) - goto error; + /* + * Since the cache entry is responsible for allocating + * identifiers when in use, allocate the identifiers only + * during normal processing. + */ + if (parms->tcam_tbl_opc == + BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL) { + idents = ulp_mapper_ident_fields_get(tbl, &num_idents); + + for (i = 0; i < num_idents; i++) { + rc = ulp_mapper_ident_process(parms, tbl, + &idents[i], NULL); + /* Already logged the error, just return */ + if (rc) + goto error; + } } /* Create the result data blob */ @@ -986,32 +1139,57 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms, sparms.idx); goto error; } + + /* Update cache with TCAM index if the was cache allocated. */ + if (parms->tcam_tbl_opc == + BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_ALLOC) { + if (!parms->cache_ptr) { + BNXT_TF_DBG(ERR, "Unable to update cache"); + rc = -EINVAL; + goto error; + } + parms->cache_ptr->tcam_idx = aparms.idx; + } + } else { BNXT_TF_DBG(ERR, "Not supporting search before alloc now\n"); rc = -EINVAL; goto error; } - /* Link the resource to the flow in the flow db */ - fid_parms.direction = tbl->direction; - fid_parms.resource_func = tbl->resource_func; - fid_parms.resource_type = tbl->table_type; - fid_parms.critical_resource = tbl->critical_resource; - fid_parms.resource_hndl = aparms.idx; - - rc = ulp_flow_db_resource_add(parms->ulp_ctx, - parms->tbl_idx, - parms->fid, - &fid_parms); - if (rc) { - BNXT_TF_DBG(ERR, "Failed to link resource to flow rc = %d\n", - rc); - /* Need to free the identifier, so goto error */ - goto error; + /* + * Only link the entry to the flow db in the event that cache was not + * used. + */ + if (parms->tcam_tbl_opc == BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL) { + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_type = tbl->table_type; + fid_parms.critical_resource = tbl->critical_resource; + fid_parms.resource_hndl = aparms.idx; + rc = ulp_flow_db_resource_add(parms->ulp_ctx, + parms->tbl_idx, + parms->fid, + &fid_parms); + if (rc) { + BNXT_TF_DBG(ERR, + "Failed to link resource to flow rc = %d\n", + rc); + /* Need to free the identifier, so goto error */ + goto error; + } + } else { + /* + * Reset the tcam table opcode to normal in case the next tcam + * entry does not use cache. + */ + parms->tcam_tbl_opc = BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL; + parms->cache_ptr = NULL; } return 0; error: + parms->tcam_tbl_opc = BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL; free_parms.dir = tbl->direction; free_parms.tcam_tbl_type = tbl->table_type; free_parms.idx = aparms.idx; @@ -1324,6 +1502,153 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms, return rc; } +static int32_t +ulp_mapper_cache_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_class_tbl_info *tbl) +{ + struct bnxt_ulp_mapper_class_key_field_info *kflds; + struct bnxt_ulp_mapper_cache_entry *cache_entry; + struct bnxt_ulp_mapper_ident_info *idents; + uint32_t i, num_kflds = 0, num_idents = 0; + struct ulp_flow_db_res_params fid_parms; + struct tf_free_identifier_parms fparms; + uint16_t tmplen, tmp_ident; + struct ulp_blob key; + uint8_t *cache_key; + uint64_t regval; + uint16_t *ckey; + int32_t rc; + + /* Get the key fields list and build the key. */ + kflds = ulp_mapper_key_fields_get(tbl, &num_kflds); + if (!kflds || !num_kflds) { + BNXT_TF_DBG(ERR, "Failed to get key fields\n"); + return -EINVAL; + } + if (!ulp_blob_init(&key, tbl->key_bit_size, parms->order)) { + BNXT_TF_DBG(ERR, "Failed to alloc blob\n"); + return -EINVAL; + } + for (i = 0; i < num_kflds; i++) { + /* Setup the key */ + rc = ulp_mapper_keymask_field_process(parms, tbl->direction, + &kflds[i], + &key, 1, "Cache Key"); + if (rc) { + BNXT_TF_DBG(ERR, + "Failed to create key for Cache rc=%d\n", + rc); + return -EINVAL; + } + } + + /* + * Perform the lookup in the cache table with constructed key. The + * cache_key is a byte array of tmplen, it needs to be converted to a + * index for the cache table. + */ + cache_key = ulp_blob_data_get(&key, &tmplen); + ckey = (uint16_t *)cache_key; + cache_entry = ulp_mapper_cache_entry_get(parms->ulp_ctx, + tbl->cache_tbl_id, + *ckey); + + /* + * Get the identifier list for processing by both the hit and miss + * processing. + */ + idents = ulp_mapper_ident_fields_get(tbl, &num_idents); + + if (!cache_entry->ref_count) { + /* Initialize the cache entry */ + cache_entry->tcam_idx = 0; + cache_entry->ref_count = 0; + for (i = 0; i < BNXT_ULP_CACHE_TBL_IDENT_MAX_NUM; i++) + cache_entry->idents[i] = ULP_IDENTS_INVALID; + + /* Need to allocate identifiers for storing in the cache. */ + for (i = 0; i < num_idents; i++) { + /* + * Since we are using the cache, the identifier does not + * get added to the flow db. Pass in the pointer to the + * tmp_ident. + */ + rc = ulp_mapper_ident_process(parms, tbl, + &idents[i], &tmp_ident); + if (rc) + goto error; + + cache_entry->ident_types[i] = idents[i].ident_type; + cache_entry->idents[i] = tmp_ident; + } + + /* Tell the TCAM processor to alloc an entry */ + parms->tcam_tbl_opc = BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_ALLOC; + /* Store the cache key for use by the tcam process code */ + parms->cache_ptr = cache_entry; + } else { + /* Cache hit, get values from result. */ + for (i = 0; i < num_idents; i++) { + regval = (uint64_t)cache_entry->idents[i]; + if (!ulp_regfile_write(parms->regfile, + idents[i].regfile_wr_idx, + tfp_cpu_to_be_64(regval))) { + BNXT_TF_DBG(ERR, + "Failed to write to regfile\n"); + return -EINVAL; + } + } + /* + * The cached entry is being used, so let the tcam processing + * know not to process this table. + */ + parms->tcam_tbl_opc = BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_SKIP; + } + + /* Made through the cache processing, increment the reference count. */ + cache_entry->ref_count++; + + /* Link the cache to the flow db. */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + + /* + * Cache resource type is composed of both table_type and cache_tbl_id + * need to set it appropriately via setter. + */ + ulp_mapper_cache_res_type_set(&fid_parms, + tbl->table_type, + tbl->cache_tbl_id); + fid_parms.resource_hndl = (uint64_t)*ckey; + fid_parms.critical_resource = tbl->critical_resource; + rc = ulp_flow_db_resource_add(parms->ulp_ctx, + parms->tbl_idx, + parms->fid, + &fid_parms); + if (rc) + BNXT_TF_DBG(ERR, "Failed to add cache to flow db.\n"); + + return rc; +error: + /* + * This error handling only gets called when the idents are being + * allocated for the cache on misses. Using the num_idents that was + * previously set. + */ + for (i = 0; i < num_idents; i++) { + if (cache_entry->idents[i] == ULP_IDENTS_INVALID) + continue; + + fparms.dir = tbl->direction; + fparms.ident_type = idents[i].ident_type; + fparms.id = cache_entry->idents[i]; + tf_free_identifier(parms->tfp, &fparms); + } + + return rc; +} + /* * Function to process the action template. Iterate through the list * action info templates and process it. @@ -1378,6 +1703,9 @@ ulp_mapper_class_tbls_process(struct bnxt_ulp_mapper_parms *parms) case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE: rc = ulp_mapper_index_tbl_process(parms, tbl); break; + case BNXT_ULP_RESOURCE_FUNC_CACHE_TABLE: + rc = ulp_mapper_cache_tbl_process(parms, tbl); + break; default: BNXT_TF_DBG(ERR, "Unexpected class resource %d\n", tbl->resource_func); @@ -1413,6 +1741,9 @@ ulp_mapper_resource_free(struct bnxt_ulp_context *ulp, } switch (res->resource_func) { + case BNXT_ULP_RESOURCE_FUNC_CACHE_TABLE: + rc = ulp_mapper_cache_entry_free(ulp, tfp, res); + break; case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE: rc = ulp_mapper_tcam_entry_free(ulp, tfp, res); break; @@ -1530,6 +1861,7 @@ ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, parms.hdr_field = cparms->hdr_field; parms.tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx); parms.ulp_ctx = ulp_ctx; + parms.tcam_tbl_opc = BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL; /* Get the device id from the ulp context */ if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &parms.dev_id)) { @@ -1586,6 +1918,14 @@ ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, return -EINVAL; } + rc = ulp_regfile_write(parms.regfile, + BNXT_ULP_REGFILE_INDEX_CLASS_TID, + tfp_cpu_to_be_64((uint64_t)parms.class_tid)); + if (!rc) { + BNXT_TF_DBG(ERR, "Unable to write template ID to regfile\n"); + return -EINVAL; + } + /* Allocate a Flow ID for attaching all resources for the flow to. * Once allocated, all errors have to walk the list of resources and * free each of them. @@ -1631,13 +1971,14 @@ ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, int32_t ulp_mapper_init(struct bnxt_ulp_context *ulp_ctx) { + struct bnxt_ulp_cache_tbl_params *tbl; struct tf_alloc_identifier_parms iparms; struct bnxt_ulp_mapper_data *data; struct bnxt_ulp_def_ident_info *dflt_ids; uint32_t i, num_dflt_ids, reg_idx; uint64_t regval; struct tf *tfp; - int32_t rc; + int32_t rc, csize; if (!ulp_ctx) return -EINVAL; @@ -1677,16 +2018,37 @@ ulp_mapper_init(struct bnxt_ulp_context *ulp_ctx) reg_idx = dflt_ids[i].def_regfile_index; /* All regfile entries are stored as 64bit big-endian values. */ regval = tfp_cpu_to_be_64((uint64_t)iparms.id); - if (ulp_mapper_def_regfile_write(data, - iparms.dir, - reg_idx, - regval)) { + rc = ulp_mapper_def_regfile_write(data, iparms.dir, + reg_idx, regval); + if (rc) { BNXT_TF_DBG(ERR, "Failed to write to default " "regfile.\n"); goto error; } } + /* Allocate the ulp cache tables. */ + for (i = 0; i < BNXT_ULP_CACHE_TBL_MAX_SZ; i++) { + tbl = ulp_mapper_cache_tbl_params_get(i); + if (!tbl) { + BNXT_TF_DBG(ERR, "Failed to get cache table parms (%d)", + i); + goto error; + } + if (tbl->num_entries != 0) { + csize = sizeof(struct bnxt_ulp_mapper_cache_entry) * + tbl->num_entries; + data->cache_tbl[i] = rte_zmalloc("ulp mapper cache tbl", + csize, 0); + if (!data->cache_tbl[i]) { + BNXT_TF_DBG(ERR, "Failed to allocate Cache " + "table %d.\n", i); + rc = -ENOMEM; + goto error; + } + } + } + return 0; error: /* Ignore the return code in favor of returning the original error. */ @@ -1750,6 +2112,12 @@ ulp_mapper_deinit(struct bnxt_ulp_context *ulp_ctx) } free_mapper_data: + /* Free the ulp cache tables */ + for (i = 0; i < BNXT_ULP_CACHE_TBL_MAX_SZ; i++) { + rte_free(data->cache_tbl[i]); + data->cache_tbl[i] = NULL; + } + rte_free(data); /* Reset the data pointer within the ulp_ctx. */ bnxt_ulp_cntxt_ptr2_mapper_data_set(ulp_ctx, NULL); diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.h b/drivers/net/bnxt/tf_ulp/ulp_mapper.h index fb47f1c27..c50d555a4 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_mapper.h +++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.h @@ -16,6 +16,31 @@ #include "ulp_utils.h" #define ULP_SZ_BITS2BYTES(x) (((x) + 7) / 8) +#define ULP_IDENTS_INVALID ((uint16_t)0xffff) +#define ULP_MAPPER_CACHE_RES_TBL_ID_SHFT 16 +#define ULP_MAPPER_CACHE_RES_TBL_TYPE_SHFT 0 +#define ULP_MAPPER_CACHE_RES_TBL_MASK ((uint32_t)0x0000ffff) + +/* + * The cache table opcode is used to convey informat from the cache handler + * to the tcam handler. The opcodes do the following: + * NORMAL - tcam should process all instructions as normnal + * SKIP - tcam is using the cached entry and doesn't need to process the + * instruction. + * ALLOC - tcam needs to allocate the tcam index and store in the cache entry + */ +enum bnxt_ulp_cache_table_opc { + BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL, + BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_SKIP, + BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_ALLOC +}; + +struct bnxt_ulp_mapper_cache_entry { + uint32_t ref_count; + uint16_t tcam_idx; + uint16_t idents[BNXT_ULP_CACHE_TBL_IDENT_MAX_NUM]; + uint8_t ident_types[BNXT_ULP_CACHE_TBL_IDENT_MAX_NUM]; +}; struct bnxt_ulp_mapper_def_id_entry { enum tf_identifier_type ident_type; @@ -25,6 +50,8 @@ struct bnxt_ulp_mapper_def_id_entry { struct bnxt_ulp_mapper_data { struct bnxt_ulp_mapper_def_id_entry dflt_ids[TF_DIR_MAX][BNXT_ULP_DEF_IDENT_INFO_TBL_MAX_SZ]; + struct bnxt_ulp_mapper_cache_entry + *cache_tbl[BNXT_ULP_CACHE_TBL_MAX_SZ]; }; /* Internal Structure for passing the arguments around */ @@ -47,6 +74,8 @@ struct bnxt_ulp_mapper_parms { uint32_t fid; enum bnxt_ulp_flow_db_tables tbl_idx; struct bnxt_ulp_mapper_data *mapper_data; + enum bnxt_ulp_cache_table_opc tcam_tbl_opc; + struct bnxt_ulp_mapper_cache_entry *cache_ptr; }; struct bnxt_ulp_mapper_create_parms { diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db.c b/drivers/net/bnxt/tf_ulp/ulp_template_db.c index cd3f65f7a..86384169f 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_template_db.c +++ b/drivers/net/bnxt/tf_ulp/ulp_template_db.c @@ -297,6 +297,21 @@ struct bnxt_ulp_rte_act_info ulp_act_info[] = { } }; +struct bnxt_ulp_cache_tbl_params ulp_cache_tbl_params[] = { + [BNXT_ULP_CACHE_TBL_ID_L2_CNTXT_TCAM_INGRESS] = { + .num_entries = 16384 + }, + [BNXT_ULP_CACHE_TBL_ID_L2_CNTXT_TCAM_EGRESS] = { + .num_entries = 16384 + }, + [BNXT_ULP_CACHE_TBL_ID_PROFILE_TCAM_INGRESS] = { + .num_entries = 16384 + }, + [BNXT_ULP_CACHE_TBL_ID_PROFILE_TCAM_EGRESS] = { + .num_entries = 16384 + } +}; + struct bnxt_ulp_def_ident_info ulp_def_ident_tbl[] = { [0] = { .ident_type = TF_IDENT_TYPE_PROF_FUNC, @@ -566,29 +581,70 @@ struct bnxt_ulp_mapper_tbl_list_info ulp_class_tmpl_list[] = { [((0 << BNXT_ULP_LOG2_MAX_NUM_DEV) | BNXT_ULP_DEVICE_ID_WH_PLUS)] = { .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, - .num_tbls = 3, + .num_tbls = 5, .start_tbl_idx = 0 } }; struct bnxt_ulp_mapper_class_tbl_info ulp_class_tbl_list[] = { + { + .resource_func = BNXT_ULP_RESOURCE_FUNC_CACHE_TABLE, + .table_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM, + .direction = TF_DIR_RX, + .priority = BNXT_ULP_PRIORITY_NOT_USED, + .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO, + .key_start_idx = 0, + .blob_key_bit_size = 12, + .key_bit_size = 12, + .key_num_fields = 2, + .result_start_idx = 0, + .result_bit_size = 10, + .result_num_fields = 1, + .ident_start_idx = 0, + .ident_nums = 1, + .mark_enable = BNXT_ULP_MARK_ENABLE_NO, + .critical_resource = 0, + .cache_tbl_id = BNXT_ULP_CACHE_TBL_ID_L2_CNTXT_TCAM_INGRESS, + .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_NOT_USED + }, { .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, .table_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM, .direction = TF_DIR_RX, .priority = BNXT_ULP_PRIORITY_LEVEL_0, .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO, - .key_start_idx = 0, + .key_start_idx = 2, .blob_key_bit_size = 167, .key_bit_size = 167, .key_num_fields = 13, - .result_start_idx = 0, + .result_start_idx = 1, .result_bit_size = 64, .result_num_fields = 13, - .ident_start_idx = 0, + .ident_start_idx = 1, + .ident_nums = 0, + .mark_enable = BNXT_ULP_MARK_ENABLE_NO, + .critical_resource = 0, + .cache_tbl_id = 0, + .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_NOT_USED + }, + { + .resource_func = BNXT_ULP_RESOURCE_FUNC_CACHE_TABLE, + .table_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_RX, + .priority = BNXT_ULP_PRIORITY_NOT_USED, + .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO, + .key_start_idx = 15, + .blob_key_bit_size = 16, + .key_bit_size = 16, + .key_num_fields = 3, + .result_start_idx = 14, + .result_bit_size = 10, + .result_num_fields = 1, + .ident_start_idx = 1, .ident_nums = 1, .mark_enable = BNXT_ULP_MARK_ENABLE_NO, .critical_resource = 0, + .cache_tbl_id = BNXT_ULP_CACHE_TBL_ID_PROFILE_TCAM_INGRESS, .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_NOT_USED }, { @@ -597,17 +653,18 @@ struct bnxt_ulp_mapper_class_tbl_info ulp_class_tbl_list[] = { .direction = TF_DIR_RX, .priority = BNXT_ULP_PRIORITY_LEVEL_0, .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO, - .key_start_idx = 13, + .key_start_idx = 18, .blob_key_bit_size = 81, .key_bit_size = 81, .key_num_fields = 42, - .result_start_idx = 13, + .result_start_idx = 15, .result_bit_size = 38, .result_num_fields = 8, - .ident_start_idx = 1, - .ident_nums = 1, + .ident_start_idx = 2, + .ident_nums = 0, .mark_enable = BNXT_ULP_MARK_ENABLE_NO, .critical_resource = 0, + .cache_tbl_id = 0, .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_NOT_USED }, { @@ -616,22 +673,44 @@ struct bnxt_ulp_mapper_class_tbl_info ulp_class_tbl_list[] = { .direction = TF_DIR_RX, .priority = BNXT_ULP_PRIORITY_NOT_USED, .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO, - .key_start_idx = 55, + .key_start_idx = 60, .blob_key_bit_size = 448, .key_bit_size = 448, .key_num_fields = 11, - .result_start_idx = 21, + .result_start_idx = 23, .result_bit_size = 64, .result_num_fields = 9, .ident_start_idx = 2, .ident_nums = 0, .mark_enable = BNXT_ULP_MARK_ENABLE_YES, .critical_resource = 1, + .cache_tbl_id = 0, .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_NOT_USED } }; struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { + { + .field_bit_size = 8, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD, + .spec_operand = {(BNXT_ULP_HF0_IDX_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_HF0_IDX_SVIF_INDEX & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 4, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {BNXT_ULP_SYM_TUN_HDR_TYPE_NONE, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, { .field_bit_size = 12, .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, @@ -754,6 +833,39 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { .spec_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, + /* class template id: 0, wh_plus, table: profile_tcam_cache_0 */ + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 7, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_DEF_REGFILE, + .spec_operand = { + (BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 8, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_REGFILE, + .spec_operand = {(BNXT_ULP_REGFILE_INDEX_CLASS_TID >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_CLASS_TID & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, { .field_bit_size = 1, .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, @@ -1257,6 +1369,14 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { }; struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = { + { + .field_bit_size = 10, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_REGFILE, + .result_operand = {(BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0 & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, { .field_bit_size = 10, .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_REGFILE, @@ -1340,6 +1460,15 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = { .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, + + { + .field_bit_size = 10, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_REGFILE, + .result_operand = {(BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, { .field_bit_size = 4, .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, @@ -1458,14 +1587,14 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = { .ident_type = TF_IDENT_TYPE_L2_CTXT, .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0, .ident_bit_size = 10, - .ident_bit_pos = 54 + .ident_bit_pos = 0 }, { .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, .ident_type = TF_IDENT_TYPE_EM_PROF, .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0, - .ident_bit_size = 8, - .ident_bit_pos = 2 + .ident_bit_size = 10, + .ident_bit_pos = 0 } }; diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db.h b/drivers/net/bnxt/tf_ulp/ulp_template_db.h index cf4ff9f39..a5606bdc4 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_template_db.h +++ b/drivers/net/bnxt/tf_ulp/ulp_template_db.h @@ -11,9 +11,10 @@ #ifndef ULP_TEMPLATE_DB_H_ #define ULP_TEMPLATE_DB_H_ -#define BNXT_ULP_REGFILE_MAX_SZ 15 +#define BNXT_ULP_REGFILE_MAX_SZ 16 #define BNXT_ULP_MAX_NUM_DEVICES 4 #define BNXT_ULP_LOG2_MAX_NUM_DEV 2 +#define BNXT_ULP_CACHE_TBL_MAX_SZ 4 #define BNXT_ULP_CLASS_SIG_TBL_MAX_SZ 256 #define BNXT_ULP_CLASS_MATCH_LIST_MAX_SZ 2 #define BNXT_ULP_CLASS_HID_LOW_PRIME 7919 @@ -28,6 +29,7 @@ #define BNXT_ULP_ACT_HID_SHFTR 0 #define BNXT_ULP_ACT_HID_SHFTL 23 #define BNXT_ULP_ACT_HID_MASK 255 +#define BNXT_ULP_CACHE_TBL_IDENT_MAX_NUM 2 #define BNXT_ULP_DEF_IDENT_INFO_TBL_MAX_SZ 1 enum bnxt_ulp_action_bit { @@ -95,6 +97,14 @@ enum bnxt_ulp_byte_order { BNXT_ULP_BYTE_ORDER_LAST = 2 }; +enum bnxt_ulp_cache_tbl_id { + BNXT_ULP_CACHE_TBL_ID_L2_CNTXT_TCAM_INGRESS = 0, + BNXT_ULP_CACHE_TBL_ID_L2_CNTXT_TCAM_EGRESS = 1, + BNXT_ULP_CACHE_TBL_ID_PROFILE_TCAM_INGRESS = 2, + BNXT_ULP_CACHE_TBL_ID_PROFILE_TCAM_EGRESS = 3, + BNXT_ULP_CACHE_TBL_ID_LAST = 4 +}; + enum bnxt_ulp_chf_idx { BNXT_ULP_CHF_IDX_MPLS_TAG_NUM = 0, BNXT_ULP_CHF_IDX_O_VTAG_NUM = 1, @@ -188,17 +198,19 @@ enum bnxt_ulp_regfile_index { BNXT_ULP_REGFILE_INDEX_ENCAP_PTR_0 = 11, BNXT_ULP_REGFILE_INDEX_ENCAP_PTR_1 = 12, BNXT_ULP_REGFILE_INDEX_CRITICAL_RESOURCE = 13, - BNXT_ULP_REGFILE_INDEX_NOT_USED = 14, - BNXT_ULP_REGFILE_INDEX_LAST = 15 + BNXT_ULP_REGFILE_INDEX_CACHE_ENTRY_PTR = 14, + BNXT_ULP_REGFILE_INDEX_NOT_USED = 15, + BNXT_ULP_REGFILE_INDEX_LAST = 16 }; enum bnxt_ulp_resource_func { BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE = 0, BNXT_ULP_RESOURCE_FUNC_EM_TABLE = 1, BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE = 2, - BNXT_ULP_RESOURCE_FUNC_IDENTIFIER = 3, - BNXT_ULP_RESOURCE_FUNC_HW_FID = 4, - BNXT_ULP_RESOURCE_FUNC_LAST = 5 + BNXT_ULP_RESOURCE_FUNC_CACHE_TABLE = 3, + BNXT_ULP_RESOURCE_FUNC_IDENTIFIER = 4, + BNXT_ULP_RESOURCE_FUNC_HW_FID = 5, + BNXT_ULP_RESOURCE_FUNC_LAST = 6 }; enum bnxt_ulp_result_opc { diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h index 476d5b9bb..1bef5ab0e 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h +++ b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h @@ -181,6 +181,8 @@ struct bnxt_ulp_mapper_class_tbl_info { uint8_t mark_enable; enum bnxt_ulp_regfile_index regfile_wr_idx; + + enum bnxt_ulp_cache_tbl_id cache_tbl_id; }; struct bnxt_ulp_mapper_act_tbl_info { @@ -228,6 +230,10 @@ struct bnxt_ulp_def_ident_info { enum bnxt_ulp_def_regfile_index def_regfile_index; }; +struct bnxt_ulp_cache_tbl_params { + uint16_t num_entries; +}; + /* * Flow Mapper Static Data Externs: * Access to the below static data should be done through access functions and @@ -296,4 +302,11 @@ extern uint32_t ulp_act_prop_map_table[]; * be initialized and where to store them. */ extern struct bnxt_ulp_def_ident_info ulp_def_ident_tbl[]; + +/* + * The ulp_cache_tbl_parms table provides the sizes of the cache tables the + * mapper must dynamically allocate during initialization. + */ +extern struct bnxt_ulp_cache_tbl_params ulp_cache_tbl_params[]; + #endif /* _ULP_TEMPLATE_STRUCT_H_ */ From patchwork Fri Apr 17 16:19:19 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 68798 X-Patchwork-Delegate: ajit.khaparde@broadcom.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 81F80A0597; Fri, 17 Apr 2020 18:21:44 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8D4371EA86; Fri, 17 Apr 2020 18:19:46 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id 49A081E9E6 for ; Fri, 17 Apr 2020 18:19:29 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id 77CA430C1F6; Fri, 17 Apr 2020 09:08:08 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com 77CA430C1F6 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1587139688; bh=SR/nTXKTc++U5Iq7Po7gzc4z6lCScn2RVIZRlaBaAhg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=aFt6Xc7lmfQPU4JTQXH0ytXMZuUJEtqF/WZMjoT2A8iPOV9CdnRM2af4HkH027BpB zuJJ9cRlBc/XU7XtaYDNQfBlMDM0CEN1Ccmqc07pFetjcq1/axsIT9zx6SLBPtFn+T 3d1MJ/Lwf4MW8D8oDl39Lz0+u6rLjyTiXf/eVaeM= Received: from localhost.localdomain (unknown [10.230.185.215]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id E471E14008C; Fri, 17 Apr 2020 09:19:27 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: Kishore Padmanabha , Mike Baucom , Venkat Duvvuru Date: Fri, 17 Apr 2020 09:19:19 -0700 Message-Id: <20200417161920.85858-12-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.21.1 (Apple Git-122.3) In-Reply-To: <20200417161920.85858-1-ajit.khaparde@broadcom.com> References: <1586962156-11179-1-git-send-email-venkatkumar.duvvuru@broadcom.com> <20200417161920.85858-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 11/12] net/bnxt: add port database X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Kishore Padmanabha The port database is a repository of the port details it is used by the ulp code to query any port related details. Reviewed-by: Mike Baucom Reviewed-by: Ajit Khaparde Signed-off-by: Kishore Padmanabha Signed-off-by: Venkat Duvvuru --- drivers/net/bnxt/Makefile | 1 + drivers/net/bnxt/meson.build | 1 + drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 47 ++++ drivers/net/bnxt/tf_ulp/bnxt_ulp.h | 10 + drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c | 13 +- drivers/net/bnxt/tf_ulp/ulp_port_db.c | 263 ++++++++++++++++++ drivers/net/bnxt/tf_ulp/ulp_port_db.h | 134 +++++++++ drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 16 +- drivers/net/bnxt/tf_ulp/ulp_template_struct.h | 1 + 9 files changed, 480 insertions(+), 6 deletions(-) create mode 100644 drivers/net/bnxt/tf_ulp/ulp_port_db.c create mode 100644 drivers/net/bnxt/tf_ulp/ulp_port_db.h diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile index 5ed33ccbf..2a39ed139 100644 --- a/drivers/net/bnxt/Makefile +++ b/drivers/net/bnxt/Makefile @@ -66,6 +66,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/ulp_mapper.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/ulp_matcher.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/ulp_rte_parser.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/bnxt_ulp_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/ulp_port_db.c # # Export include files diff --git a/drivers/net/bnxt/meson.build b/drivers/net/bnxt/meson.build index d75f887d1..59dda6932 100644 --- a/drivers/net/bnxt/meson.build +++ b/drivers/net/bnxt/meson.build @@ -41,6 +41,7 @@ sources = files('bnxt_cpr.c', 'tf_ulp/ulp_matcher.c', 'tf_ulp/ulp_rte_parser.c', 'tf_ulp/bnxt_ulp_flow.c', + 'tf_ulp/ulp_port_db.c', 'rte_pmd_bnxt.c') diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c index 202b4a529..f8047f0d6 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c @@ -20,6 +20,7 @@ #include "ulp_mark_mgr.h" #include "ulp_flow_db.h" #include "ulp_mapper.h" +#include "ulp_port_db.h" /* Linked list of all TF sessions. */ STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list = @@ -454,6 +455,13 @@ bnxt_ulp_init(struct bnxt *bp) if (rc) { BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n"); + return rc; + } + /* update the port database */ + rc = ulp_port_db_dev_port_intf_update(&bp->ulp_ctx, bp); + if (rc) { + BNXT_TF_DBG(ERR, + "Failed to update port database\n"); } return rc; } @@ -465,6 +473,20 @@ bnxt_ulp_init(struct bnxt *bp) goto jump_to_error; } + /* create the port database */ + rc = ulp_port_db_init(&bp->ulp_ctx); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to create the port database\n"); + goto jump_to_error; + } + + /* update the port database */ + rc = ulp_port_db_dev_port_intf_update(&bp->ulp_ctx, bp); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to update port database\n"); + goto jump_to_error; + } + /* Create the Mark database. */ rc = ulp_mark_db_init(&bp->ulp_ctx); if (rc) { @@ -539,6 +561,9 @@ bnxt_ulp_deinit(struct bnxt *bp) /* cleanup the ulp mapper */ ulp_mapper_deinit(&bp->ulp_ctx); + /* Delete the Port database */ + ulp_port_db_deinit(&bp->ulp_ctx); + /* Delete the ulp context and tf session */ ulp_ctx_detach(bp, session); @@ -723,3 +748,25 @@ bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx) return ulp_ctx->cfg_data->mapper_data; } + +/* Function to set the port database to the ulp context. */ +int32_t +bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_port_db *port_db) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->port_db = port_db; + return 0; +} + +/* Function to get the port database from the ulp context. */ +struct bnxt_ulp_port_db * +bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return NULL; + + return ulp_ctx->cfg_data->port_db; +} diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h index d2ca17857..eecc09cea 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h @@ -19,6 +19,7 @@ struct bnxt_ulp_data { uint32_t ref_cnt; struct bnxt_ulp_flow_db *flow_db; void *mapper_data; + struct bnxt_ulp_port_db *port_db; }; struct bnxt_ulp_context { @@ -117,4 +118,13 @@ bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx, void * bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx); +/* Function to set the port database to the ulp context. */ +int32_t +bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_port_db *port_db); + +/* Function to get the port database from the ulp context. */ +struct bnxt_ulp_port_db * +bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx); + #endif /* _BNXT_ULP_H_ */ diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c index 7783f85d9..dbec8cecf 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c @@ -69,7 +69,7 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, { struct bnxt_ulp_mapper_create_parms mapper_cparms = { 0 }; struct ulp_rte_parser_params params; - struct bnxt_ulp_context *ulp_ctx = NULL; + struct bnxt_ulp_context *ulp_ctx; uint32_t class_id, act_tmpl; struct rte_flow *flow_id; uint32_t fid; @@ -90,6 +90,7 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, /* Initialize the parser params */ memset(¶ms, 0, sizeof(struct ulp_rte_parser_params)); + params.ulp_ctx = ulp_ctx; if (attr->egress) params.dir = ULP_DIR_EGRESS; @@ -142,7 +143,7 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev, /* Function to validate the rte flow. */ static int -bnxt_ulp_flow_validate(struct rte_eth_dev *dev __rte_unused, +bnxt_ulp_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], @@ -151,6 +152,7 @@ bnxt_ulp_flow_validate(struct rte_eth_dev *dev __rte_unused, struct ulp_rte_parser_params params; uint32_t class_id, act_tmpl; int ret; + struct bnxt_ulp_context *ulp_ctx; if (bnxt_ulp_flow_validate_args(attr, pattern, actions, @@ -159,8 +161,15 @@ bnxt_ulp_flow_validate(struct rte_eth_dev *dev __rte_unused, return -EINVAL; } + ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev); + if (!ulp_ctx) { + BNXT_TF_DBG(ERR, "ULP context is not initialized\n"); + return -EINVAL; + } + /* Initialize the parser params */ memset(¶ms, 0, sizeof(struct ulp_rte_parser_params)); + params.ulp_ctx = ulp_ctx; if (attr->egress) params.dir = ULP_DIR_EGRESS; diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.c b/drivers/net/bnxt/tf_ulp/ulp_port_db.c new file mode 100644 index 000000000..e3b924289 --- /dev/null +++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.c @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2020 Broadcom + * All rights reserved. + */ + +#include +#include "bnxt.h" +#include "bnxt_vnic.h" +#include "bnxt_tf_common.h" +#include "ulp_port_db.h" + +static uint32_t +ulp_port_db_allocate_ifindex(struct bnxt_ulp_port_db *port_db) +{ + uint32_t idx = 1; + + while (idx < port_db->ulp_intf_list_size && + port_db->ulp_intf_list[idx].type != BNXT_ULP_INTF_TYPE_INVALID) + idx++; + + if (idx >= port_db->ulp_intf_list_size) { + BNXT_TF_DBG(ERR, "Port DB interface list is full\n"); + return 0; + } + return idx; +} + +/* + * Initialize the port database. Memory is allocated in this + * call and assigned to the port database. + * + * ulp_ctxt [in] Ptr to ulp context + * + * Returns 0 on success or negative number on failure. + */ +int32_t ulp_port_db_init(struct bnxt_ulp_context *ulp_ctxt) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = rte_zmalloc("bnxt_ulp_port_db", + sizeof(struct bnxt_ulp_port_db), 0); + if (!port_db) { + BNXT_TF_DBG(ERR, + "Failed to allocate memory for port db\n"); + return -ENOMEM; + } + + /* Attach the port database to the ulp context. */ + bnxt_ulp_cntxt_ptr2_port_db_set(ulp_ctxt, port_db); + + /* index 0 is not being used hence add 1 to size */ + port_db->ulp_intf_list_size = BNXT_PORT_DB_MAX_INTF_LIST + 1; + /* Allocate the port tables */ + port_db->ulp_intf_list = rte_zmalloc("bnxt_ulp_port_db_intf_list", + port_db->ulp_intf_list_size * + sizeof(struct ulp_interface_info), + 0); + if (!port_db->ulp_intf_list) { + BNXT_TF_DBG(ERR, + "Failed to allocate mem for port interface list\n"); + goto error_free; + } + return 0; + +error_free: + ulp_port_db_deinit(ulp_ctxt); + return -ENOMEM; +} + +/* + * Deinitialize the port database. Memory is deallocated in + * this call. + * + * ulp_ctxt [in] Ptr to ulp context + * + * Returns 0 on success. + */ +int32_t ulp_port_db_deinit(struct bnxt_ulp_context *ulp_ctxt) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + + /* Detach the flow database from the ulp context. */ + bnxt_ulp_cntxt_ptr2_port_db_set(ulp_ctxt, NULL); + + /* Free up all the memory. */ + rte_free(port_db->ulp_intf_list); + rte_free(port_db); + return 0; +} + +/* + * Update the port database.This api is called when the port + * details are available during the startup. + * + * ulp_ctxt [in] Ptr to ulp context + * bp [in]. ptr to the device function. + * + * Returns 0 on success or negative number on failure. + */ +int32_t ulp_port_db_dev_port_intf_update(struct bnxt_ulp_context *ulp_ctxt, + struct bnxt *bp) +{ + struct bnxt_ulp_port_db *port_db; + uint32_t port_id = bp->eth_dev->data->port_id; + uint32_t ifindex; + struct ulp_interface_info *intf; + int32_t rc; + struct bnxt_vnic_info *vnic; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + + rc = ulp_port_db_dev_port_to_ulp_index(ulp_ctxt, port_id, &ifindex); + if (rc == -ENOENT) { + /* port not found, allocate one */ + ifindex = ulp_port_db_allocate_ifindex(port_db); + if (!ifindex) + return -ENOMEM; + port_db->dev_port_list[port_id] = ifindex; + } else if (rc == -EINVAL) { + return -EINVAL; + } + + /* update the interface details */ + intf = &port_db->ulp_intf_list[ifindex]; + if (BNXT_PF(bp) || BNXT_VF(bp)) { + if (BNXT_PF(bp)) { + intf->type = BNXT_ULP_INTF_TYPE_PF; + intf->port_svif = bp->port_svif; + } else { + intf->type = BNXT_ULP_INTF_TYPE_VF; + } + intf->func_id = bp->fw_fid; + intf->func_svif = bp->func_svif; + vnic = BNXT_GET_DEFAULT_VNIC(bp); + if (vnic) + intf->default_vnic = vnic->fw_vnic_id; + intf->bp = bp; + memcpy(intf->mac_addr, bp->mac_addr, sizeof(intf->mac_addr)); + } else { + BNXT_TF_DBG(ERR, "Invalid interface type\n"); + } + + return 0; +} + +/* + * Api to get the ulp ifindex for a given device port. + * + * ulp_ctxt [in] Ptr to ulp context + * port_id [in].device port id + * ifindex [out] ulp ifindex + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_dev_port_to_ulp_index(struct bnxt_ulp_context *ulp_ctxt, + uint32_t port_id, + uint32_t *ifindex) +{ + struct bnxt_ulp_port_db *port_db; + + *ifindex = 0; + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || port_id >= RTE_MAX_ETHPORTS) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + if (!port_db->dev_port_list[port_id]) + return -ENOENT; + + *ifindex = port_db->dev_port_list[port_id]; + return 0; +} + +/* + * Api to get the function id for a given ulp ifindex. + * + * ulp_ctxt [in] Ptr to ulp context + * ifindex [in] ulp ifindex + * func_id [out] the function id of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_function_id_get(struct bnxt_ulp_context *ulp_ctxt, + uint32_t ifindex, + uint16_t *func_id) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + *func_id = port_db->ulp_intf_list[ifindex].func_id; + return 0; +} + +/* + * Api to get the svid for a given ulp ifindex. + * + * ulp_ctxt [in] Ptr to ulp context + * ifindex [in] ulp ifindex + * dir [in] the direction for the flow. + * svif [out] the svif of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_svif_get(struct bnxt_ulp_context *ulp_ctxt, + uint32_t ifindex, + uint32_t dir, + uint16_t *svif) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + if (dir == ULP_DIR_EGRESS) + *svif = port_db->ulp_intf_list[ifindex].func_svif; + else + *svif = port_db->ulp_intf_list[ifindex].port_svif; + return 0; +} + +/* + * Api to get the vnic id for a given ulp ifindex. + * + * ulp_ctxt [in] Ptr to ulp context + * ifindex [in] ulp ifindex + * vnic [out] the vnic of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_default_vnic_get(struct bnxt_ulp_context *ulp_ctxt, + uint32_t ifindex, + uint16_t *vnic) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + *vnic = port_db->ulp_intf_list[ifindex].default_vnic; + return 0; +} diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.h b/drivers/net/bnxt/tf_ulp/ulp_port_db.h new file mode 100644 index 000000000..271c29a47 --- /dev/null +++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2019 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_PORT_DB_H_ +#define _ULP_PORT_DB_H_ + +#include "bnxt_ulp.h" + +#define BNXT_PORT_DB_MAX_INTF_LIST 256 + +/* enumeration of the interface types */ +enum bnxt_ulp_intf_type { + BNXT_ULP_INTF_TYPE_INVALID = 0, + BNXT_ULP_INTF_TYPE_PF = 1, + BNXT_ULP_INTF_TYPE_VF, + BNXT_ULP_INTF_TYPE_PF_REP, + BNXT_ULP_INTF_TYPE_VF_REP, + BNXT_ULP_INTF_TYPE_LAST +}; + +/* Structure for the Port database resource information. */ +struct ulp_interface_info { + enum bnxt_ulp_intf_type type; + uint16_t func_id; + uint16_t func_svif; + uint16_t port_svif; + uint16_t default_vnic; + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + /* back pointer to the bnxt driver, it is null for rep ports */ + struct bnxt *bp; +}; + +/* Structure for the Port database */ +struct bnxt_ulp_port_db { + struct ulp_interface_info *ulp_intf_list; + uint32_t ulp_intf_list_size; + + /* dpdk device external port list */ + uint16_t dev_port_list[RTE_MAX_ETHPORTS]; +}; + +/* + * Initialize the port database. Memory is allocated in this + * call and assigned to the port database. + * + * ulp_ctxt [in] Ptr to ulp context + * + * Returns 0 on success or negative number on failure. + */ +int32_t ulp_port_db_init(struct bnxt_ulp_context *ulp_ctxt); + +/* + * Deinitialize the port database. Memory is deallocated in + * this call. + * + * ulp_ctxt [in] Ptr to ulp context + * + * Returns 0 on success. + */ +int32_t ulp_port_db_deinit(struct bnxt_ulp_context *ulp_ctxt); + +/* + * Update the port database.This api is called when the port + * details are available during the startup. + * + * ulp_ctxt [in] Ptr to ulp context + * bp [in]. ptr to the device function. + * + * Returns 0 on success or negative number on failure. + */ +int32_t ulp_port_db_dev_port_intf_update(struct bnxt_ulp_context *ulp_ctxt, + struct bnxt *bp); + +/* + * Api to get the ulp ifindex for a given device port. + * + * ulp_ctxt [in] Ptr to ulp context + * port_id [in].device port id + * ifindex [out] ulp ifindex + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_dev_port_to_ulp_index(struct bnxt_ulp_context *ulp_ctxt, + uint32_t port_id, + uint32_t *ifindex); + +/* + * Api to get the function id for a given ulp ifindex. + * + * ulp_ctxt [in] Ptr to ulp context + * ifindex [in] ulp ifindex + * func_id [out] the function id of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_function_id_get(struct bnxt_ulp_context *ulp_ctxt, + uint32_t ifindex, + uint16_t *func_id); + +/* + * Api to get the svid for a given ulp ifindex. + * + * ulp_ctxt [in] Ptr to ulp context + * ifindex [in] ulp ifindex + * dir [in] the direction for the flow. + * svif [out] the svif of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_svif_get(struct bnxt_ulp_context *ulp_ctxt, + uint32_t ifindex, + uint32_t dir, + uint16_t *svif); + +/* + * Api to get the vnic id for a given ulp ifindex. + * + * ulp_ctxt [in] Ptr to ulp context + * ifindex [in] ulp ifindex + * vnic [out] the vnic of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_default_vnic_get(struct bnxt_ulp_context *ulp_ctxt, + uint32_t ifindex, + uint16_t *vnic); + +#endif /* _ULP_PORT_DB_H_ */ diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c index 873f86494..ace5fad97 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c +++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c @@ -10,6 +10,7 @@ #include "ulp_rte_parser.h" #include "ulp_utils.h" #include "tfp.h" +#include "ulp_port_db.h" /* Utility function to skip the void items. */ static inline int32_t @@ -161,6 +162,8 @@ ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, uint16_t port_id = svif; uint32_t dir = 0; struct ulp_rte_hdr_field *hdr_field; + uint32_t ifindex; + int32_t rc; if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF)) { BNXT_TF_DBG(ERR, @@ -175,10 +178,15 @@ ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, dir = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_DIRECTION); /* perform the conversion from dpdk port to bnxt svif */ - if (dir == ULP_DIR_EGRESS) - svif = bnxt_get_svif(port_id, true); - else - svif = bnxt_get_svif(port_id, false); + rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id, + &ifindex); + if (rc) { + BNXT_TF_DBG(ERR, + "Invalid port id\n"); + return BNXT_TF_RC_ERROR; + } + ulp_port_db_svif_get(params->ulp_ctx, ifindex, dir, &svif); + svif = rte_cpu_to_be_16(svif); } hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; memcpy(hdr_field->spec, &svif, sizeof(svif)); diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h index 1bef5ab0e..0e0d02ff4 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h +++ b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h @@ -66,6 +66,7 @@ struct ulp_rte_parser_params { struct ulp_rte_act_bitmap act_bitmap; struct ulp_rte_act_prop act_prop; uint32_t dir; + struct bnxt_ulp_context *ulp_ctx; }; /* Flow Parser Header Information Structure */ From patchwork Fri Apr 17 16:19:20 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 68799 X-Patchwork-Delegate: ajit.khaparde@broadcom.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D9A55A0597; Fri, 17 Apr 2020 18:21:54 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 197E51EA99; Fri, 17 Apr 2020 18:19:48 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id 51C0E1E9E9 for ; Fri, 17 Apr 2020 18:19:29 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id E9E8B30C1F8 for ; Fri, 17 Apr 2020 09:08:08 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com E9E8B30C1F8 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1587139688; bh=gBYz8pvzbWs1e6u/y/M0Vgfql0EJdO6g2hX6faBqPo4=; h=From:To:Subject:Date:In-Reply-To:References:From; b=AXqTiDYsdl+RQHuwEollc1ZUtl85KCCwM9T85cjxpKmSotoSDKxN2S5PY63vLrysf p0VGPooGDPk4Zg6OtP5V8d+xxgUWCtf1hyeE+yfGa8LdBBaqNDDmifHJ7uC0TsX3Lz cj2BhQ4Sd6uiKyjqaeSUBUVKQ+j7leCWHHmA+Fj4= Received: from localhost.localdomain (unknown [10.230.185.215]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id 4393514008D for ; Fri, 17 Apr 2020 09:19:28 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Date: Fri, 17 Apr 2020 09:19:20 -0700 Message-Id: <20200417161920.85858-13-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.21.1 (Apple Git-122.3) In-Reply-To: <20200417161920.85858-1-ajit.khaparde@broadcom.com> References: <1586962156-11179-1-git-send-email-venkatkumar.duvvuru@broadcom.com> <20200417161920.85858-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 12/12] net/bnxt: remove redefinition of page size X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" PAGE_SIZE is a common macro and cause redefinition errors. Replace such occurances. Signed-off-by: Ajit Khaparde --- drivers/net/bnxt/tf_core/tf_tbl.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/bnxt/tf_core/tf_tbl.h b/drivers/net/bnxt/tf_core/tf_tbl.h index cb7ce9d17..5d3ea71f4 100644 --- a/drivers/net/bnxt/tf_core/tf_tbl.h +++ b/drivers/net/bnxt/tf_core/tf_tbl.h @@ -79,30 +79,30 @@ struct tf_tbl_scope_cb { /** Hardware Page sizes supported for EEM: 4K, 8K, 64K, 256K, 1M, 2M, 4M, 1G. * Round-down other page sizes to the lower hardware page size supported. */ -#define PAGE_SHIFT 22 /** 2M */ +#define BNXT_PAGE_SHIFT 22 /** 2M */ -#if (PAGE_SHIFT < 12) /** < 4K >> 4K */ +#if (BNXT_PAGE_SHIFT < 12) /** < 4K >> 4K */ #define TF_EM_PAGE_SHIFT 12 #define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_4K -#elif (PAGE_SHIFT <= 13) /** 4K, 8K */ +#elif (BNXT_PAGE_SHIFT <= 13) /** 4K, 8K */ #define TF_EM_PAGE_SHIFT 13 #define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_8K -#elif (PAGE_SHIFT < 16) /** 16K, 32K >> 8K */ +#elif (BNXT_PAGE_SHIFT < 16) /** 16K, 32K >> 8K */ #define TF_EM_PAGE_SHIFT 15 #define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_32K -#elif (PAGE_SHIFT <= 17) /** 64K, 128K >> 64K */ +#elif (BNXT_PAGE_SHIFT <= 17) /** 64K, 128K >> 64K */ #define TF_EM_PAGE_SHIFT 16 #define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_64K -#elif (PAGE_SHIFT <= 19) /** 256K, 512K >> 256K */ +#elif (BNXT_PAGE_SHIFT <= 19) /** 256K, 512K >> 256K */ #define TF_EM_PAGE_SHIFT 18 #define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_256K -#elif (PAGE_SHIFT <= 21) /** 1M */ +#elif (BNXT_PAGE_SHIFT <= 21) /** 1M */ #define TF_EM_PAGE_SHIFT 20 #define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1M -#elif (PAGE_SHIFT <= 22) /** 2M, 4M */ +#elif (BNXT_PAGE_SHIFT <= 22) /** 2M, 4M */ #define TF_EM_PAGE_SHIFT 21 #define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_2M -#elif (PAGE_SHIFT <= 29) /** 8M ... 512M >> 4M */ +#elif (BNXT_PAGE_SHIFT <= 29) /** 8M ... 512M >> 4M */ #define TF_EM_PAGE_SHIFT 22 #define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_4M #else /** >= 1G >> 1G */